id
list
project
string
origin_file
list
test_list
list
prob_info
list
type
list
node
list
language
string
toolfunc_count
int64
func_count
int64
pytest_info
dict
[ "datachain.src.datachain.func.func.Func::get_result_type", "datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.func.func.Func::get_column", "datachain.src.datachain.func.conditional.case", "datachain.src.datachain.func.conditional.ifelse" ]
datachain
[ "datachain/func/func.py", "datachain/lib/convert/python_to_sql.py", "datachain/func/func.py", "datachain/sql/selectable.py", "datachain/func/conditional.py", "datachain/func/conditional.py" ]
[ "tests/unit/sql/test_conditional.py" ]
[ { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 361, "func_end_lineno": 373, "func_code": " def get_result_type(\n self, signals_schema: Optional[\"SignalSchema\"] = None\n ) -> \"DataType\":\n if self.result_type:\n return self.result_type\n\n if signals_schema and (col_type := self._db_col_type(signals_schema)):\n return col_type\n\n raise DataChainColumnError(\n str(self),\n \"Column name is required to infer result type\",\n )" }, { "class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")" }, { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 375, "func_end_lineno": 422, "func_code": " def get_column(\n self,\n signals_schema: Optional[\"SignalSchema\"] = None,\n label: Optional[str] = None,\n table: Optional[\"TableClause\"] = None,\n ) -> Column:\n col_type = self.get_result_type(signals_schema)\n sql_type = python_to_sql(col_type)\n\n def get_col(col: ColT, string_as_literal=False) -> ColT:\n # string_as_literal is used only for conditionals like `case()` where\n # literals are nested inside ColT as we have tuples of condition - values\n # and if user wants to set some case value as column, explicit `C(\"col\")`\n # syntax must be used to distinguish from literals\n if isinstance(col, tuple):\n return tuple(get_col(x, string_as_literal=True) for x in col)\n if isinstance(col, Func):\n return col.get_column(signals_schema, table=table)\n if isinstance(col, str) and not string_as_literal:\n column = Column(col, sql_type)\n column.table = table\n return column\n return col\n\n cols = [get_col(col) for col in self._db_cols]\n kwargs = {k: get_col(v, string_as_literal=True) for k, v in self.kwargs.items()}\n func_col = self.inner(*cols, *self.args, **kwargs)\n\n if self.is_window:\n if not self.window:\n raise DataChainParamsError(\n f\"Window function {self} requires over() clause with a window spec\",\n )\n func_col = func_col.over(\n partition_by=self.window.partition_by,\n order_by=(\n desc(self.window.order_by)\n if self.window.desc\n else self.window.order_by\n ),\n )\n\n func_col.type = sql_type() if inspect.isclass(sql_type) else sql_type\n\n if col_name := self.get_col_name(label):\n func_col = func_col.label(col_name)\n\n return func_col" }, { "class_start_lineno": 1, "class_end_lineno": 56, "func_start_lineno": 24, "func_end_lineno": 29, "func_code": "def process_column_expression(col):\n if hasattr(col, \"get_column\"):\n return col.get_column()\n if isinstance(col, str):\n return expression.column(col)\n return col" }, { "class_start_lineno": 1, "class_end_lineno": 270, "func_start_lineno": 93, "func_end_lineno": 158, "func_code": "def case(\n *args: tuple[Union[ColumnElement, Func, bool], CaseT], else_: Optional[CaseT] = None\n) -> Func:\n \"\"\"\n Returns the case function that produces case expression which has a list of\n conditions and corresponding results. Results can be python primitives like string,\n numbers or booleans but can also be other nested functions (including case function)\n or columns.\n Result type is inferred from condition results.\n\n Args:\n args tuple((ColumnElement | Func | bool),(str | int | float | complex | bool, Func, ColumnElement)):\n Tuple of condition and values pair.\n else_ (str | int | float | complex | bool, Func): optional else value in case\n expression. If omitted, and no case conditions are satisfied, the result\n will be None (NULL in DB).\n\n Returns:\n Func: A Func object that represents the case function.\n\n Example:\n ```py\n dc.mutate(\n res=func.case((C(\"num\") > 0, \"P\"), (C(\"num\") < 0, \"N\"), else_=\"Z\"),\n )\n ```\n \"\"\" # noqa: E501\n supported_types = [int, float, complex, str, bool]\n\n def _get_type(val):\n from enum import Enum\n\n if isinstance(val, Func):\n # nested functions\n return val.result_type\n if isinstance(val, Column):\n # at this point we cannot know what is the type of a column\n return None\n if isinstance(val, Enum):\n return type(val.value)\n return type(val)\n\n if not args:\n raise DataChainParamsError(\"Missing statements\")\n\n type_ = _get_type(else_) if else_ is not None else None\n\n for arg in args:\n arg_type = _get_type(arg[1])\n if arg_type is None:\n # we couldn't figure out the type of case value\n continue\n if type_ and arg_type != type_:\n raise DataChainParamsError(\n f\"Statement values must be of the same type, got {type_} and {arg_type}\"\n )\n type_ = arg_type\n\n if type_ is not None and type_ not in supported_types:\n raise DataChainParamsError(\n f\"Only python literals ({supported_types}) are supported for values\"\n )\n\n kwargs = {\"else_\": else_}\n\n return Func(\"case\", inner=sql_case, cols=args, kwargs=kwargs, result_type=type_)" }, { "class_start_lineno": 1, "class_end_lineno": 270, "func_start_lineno": 161, "func_end_lineno": 187, "func_code": "def ifelse(\n condition: Union[ColumnElement, Func], if_val: CaseT, else_val: CaseT\n) -> Func:\n \"\"\"\n Returns the ifelse function that produces if expression which has a condition\n and values for true and false outcome. Results can be one of python primitives\n like string, numbers or booleans, but can also be nested functions or columns.\n Result type is inferred from the values.\n\n Args:\n condition (ColumnElement, Func): Condition which is evaluated.\n if_val (str | int | float | complex | bool, Func, ColumnElement): Value for true\n condition outcome.\n else_val (str | int | float | complex | bool, Func, ColumnElement): Value for\n false condition outcome.\n\n Returns:\n Func: A Func object that represents the ifelse function.\n\n Example:\n ```py\n dc.mutate(\n res=func.ifelse(isnone(\"col\"), \"EMPTY\", \"NOT_EMPTY\")\n )\n ```\n \"\"\"\n return case((condition, if_val), else_=else_val)" } ]
[ "function_empty", "TDD" ]
[ "datachain.func.func.Func.get_result_type", "datachain.lib.convert.python_to_sql.python_to_sql", "datachain.func.func.Func.get_column", "datachain.sql.selectable.process_column_expression", "datachain.func.conditional.case", "datachain.func.conditional.ifelse" ]
Python
1
5
{ "total_num": 34, "base_passed_num": 0 }
[ "haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.components.builders.answer_builder.AnswerBuilder::run" ]
haystack
[ "haystack/dataclasses/chat_message.py", "haystack/components/builders/answer_builder.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py" ]
[ "test/components/builders/test_answer_builder.py" ]
[ { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)" }, { "class_start_lineno": 15, "class_end_lineno": 184, "func_start_lineno": 61, "func_end_lineno": 147, "func_code": " def run( # pylint: disable=too-many-positional-arguments\n self,\n query: str,\n replies: Union[List[str], List[ChatMessage]],\n meta: Optional[List[Dict[str, Any]]] = None,\n documents: Optional[List[Document]] = None,\n pattern: Optional[str] = None,\n reference_pattern: Optional[str] = None,\n ):\n \"\"\"\n Turns the output of a Generator into `GeneratedAnswer` objects using regular expressions.\n\n :param query:\n The input query used as the Generator prompt.\n :param replies:\n The output of the Generator. Can be a list of strings or a list of `ChatMessage` objects.\n :param meta:\n The metadata returned by the Generator. If not specified, the generated answer will contain no metadata.\n :param documents:\n The documents used as the Generator inputs. If specified, they are added to\n the`GeneratedAnswer` objects.\n If both `documents` and `reference_pattern` are specified, the documents referenced in the\n Generator output are extracted from the input documents and added to the `GeneratedAnswer` objects.\n :param pattern:\n The regular expression pattern to extract the answer text from the Generator.\n If not specified, the entire response is used as the answer.\n The regular expression can have one capture group at most.\n If present, the capture group text\n is used as the answer. If no capture group is present, the whole match is used as the answer.\n Examples:\n `[^\\\\n]+$` finds \"this is an answer\" in a string \"this is an argument.\\\\nthis is an answer\".\n `Answer: (.*)` finds \"this is an answer\" in a string\n \"this is an argument. Answer: this is an answer\".\n :param reference_pattern:\n The regular expression pattern used for parsing the document references.\n If not specified, no parsing is done, and all documents are referenced.\n References need to be specified as indices of the input documents and start at [1].\n Example: `\\\\[(\\\\d+)\\\\]` finds \"1\" in a string \"this is an answer[1]\".\n\n :returns: A dictionary with the following keys:\n - `answers`: The answers received from the output of the Generator.\n \"\"\"\n if not meta:\n meta = [{}] * len(replies)\n elif len(replies) != len(meta):\n raise ValueError(f\"Number of replies ({len(replies)}), and metadata ({len(meta)}) must match.\")\n\n if pattern:\n AnswerBuilder._check_num_groups_in_regex(pattern)\n\n pattern = pattern or self.pattern\n reference_pattern = reference_pattern or self.reference_pattern\n all_answers = []\n for reply, given_metadata in zip(replies, meta):\n # Extract content from ChatMessage objects if reply is a ChatMessages, else use the string as is\n if isinstance(reply, ChatMessage):\n if reply.text is None:\n raise ValueError(f\"The provided ChatMessage has no text. ChatMessage: {reply}\")\n extracted_reply = reply.text\n else:\n extracted_reply = str(reply)\n extracted_metadata = reply.meta if isinstance(reply, ChatMessage) else {}\n\n extracted_metadata = {**extracted_metadata, **given_metadata}\n\n referenced_docs = []\n if documents:\n if reference_pattern:\n reference_idxs = AnswerBuilder._extract_reference_idxs(extracted_reply, reference_pattern)\n else:\n reference_idxs = [doc_idx for doc_idx, _ in enumerate(documents)]\n\n for idx in reference_idxs:\n try:\n referenced_docs.append(documents[idx])\n except IndexError:\n logger.warning(\n \"Document index '{index}' referenced in Generator output is out of range. \", index=idx + 1\n )\n\n answer_string = AnswerBuilder._extract_answer_string(extracted_reply, pattern)\n answer = GeneratedAnswer(\n data=answer_string, query=query, documents=referenced_docs, meta=extracted_metadata\n )\n all_answers.append(answer)\n\n return {\"answers\": all_answers}" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 153, "func_end_lineno": 157, "func_code": " def meta(self) -> Dict[str, Any]:\n \"\"\"\n Returns the metadata associated with the message.\n \"\"\"\n return self._meta" } ]
[ "function_empty", "TDD" ]
[ "haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.components.builders.answer_builder.AnswerBuilder.run", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.dataclasses.chat_message.ChatMessage.meta" ]
Python
1
2
{ "total_num": 19, "base_passed_num": 1 }
[ "haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.core.type_utils._strict_types_are_compatible", "haystack.haystack.core.type_utils._types_are_compatible", "haystack.haystack.core.type_utils._type_name", "haystack.haystack.core.pipeline.base._connections_status" ]
haystack
[ "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/core/type_utils.py", "haystack/core/type_utils.py", "haystack/core/type_utils.py", "haystack/core/pipeline/base.py" ]
[ "test/components/builders/test_chat_prompt_builder.py" ]
[ { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 153, "func_end_lineno": 157, "func_code": " def meta(self) -> Dict[str, Any]:\n \"\"\"\n Returns the metadata associated with the message.\n \"\"\"\n return self._meta" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 29, "func_end_lineno": 76, "func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 14, "func_end_lineno": 26, "func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 79, "func_end_lineno": 105, "func_code": "def _type_name(type_):\n \"\"\"\n Util methods to get a nice readable representation of a type.\n\n Handles Optional and Literal in a special way to make it more readable.\n \"\"\"\n # Literal args are strings, so we wrap them in quotes to make it clear\n if isinstance(type_, str):\n return f\"'{type_}'\"\n\n name = getattr(type_, \"__name__\", str(type_))\n\n if name.startswith(\"typing.\"):\n name = name[7:]\n if \"[\" in name:\n name = name.split(\"[\")[0]\n args = get_args(type_)\n if name == \"Union\" and type(None) in args and len(args) == 2:\n # Optional is technically a Union of type and None\n # but we want to display it as Optional\n name = \"Optional\"\n\n if args:\n args = \", \".join([_type_name(a) for a in args if a is not type(None)])\n return f\"{name}[{args}]\"\n\n return f\"{name}\"" }, { "class_start_lineno": 1, "class_end_lineno": 1261, "func_start_lineno": 1207, "func_end_lineno": 1229, "func_code": "def _connections_status(\n sender_node: str, receiver_node: str, sender_sockets: List[OutputSocket], receiver_sockets: List[InputSocket]\n) -> str:\n \"\"\"\n Lists the status of the sockets, for error messages.\n \"\"\"\n sender_sockets_entries = []\n for sender_socket in sender_sockets:\n sender_sockets_entries.append(f\" - {sender_socket.name}: {_type_name(sender_socket.type)}\")\n sender_sockets_list = \"\\n\".join(sender_sockets_entries)\n\n receiver_sockets_entries = []\n for receiver_socket in receiver_sockets:\n if receiver_socket.senders:\n sender_status = f\"sent by {','.join(receiver_socket.senders)}\"\n else:\n sender_status = \"available\"\n receiver_sockets_entries.append(\n f\" - {receiver_socket.name}: {_type_name(receiver_socket.type)} ({sender_status})\"\n )\n receiver_sockets_list = \"\\n\".join(receiver_sockets_entries)\n\n return f\"'{sender_node}':\\n{sender_sockets_list}\\n'{receiver_node}':\\n{receiver_sockets_list}\"" } ]
[ "function_empty", "TDD" ]
[ "haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.dataclasses.chat_message.ChatMessage.meta", "haystack.core.type_utils._strict_types_are_compatible", "haystack.core.type_utils._types_are_compatible", "haystack.core.type_utils._type_name", "haystack.core.pipeline.base._connections_status" ]
Python
4
5
{ "total_num": 35, "base_passed_num": 7 }
[ "haystack.haystack.components.builders.prompt_builder.PromptBuilder::_validate_variables", "haystack.haystack.components.builders.prompt_builder.PromptBuilder::run", "haystack.haystack.core.type_utils._strict_types_are_compatible", "haystack.haystack.core.type_utils._types_are_compatible" ]
haystack
[ "haystack/components/builders/prompt_builder.py", "haystack/components/builders/prompt_builder.py", "haystack/core/type_utils.py", "haystack/core/type_utils.py" ]
[ "test/components/builders/test_prompt_builder.py" ]
[ { "class_start_lineno": 17, "class_end_lineno": 266, "func_start_lineno": 247, "func_end_lineno": 266, "func_code": " def _validate_variables(self, provided_variables: Set[str]):\n \"\"\"\n Checks if all the required template variables are provided.\n\n :param provided_variables:\n A set of provided template variables.\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n if self.required_variables == \"*\":\n required_variables = sorted(self.variables)\n else:\n required_variables = self.required_variables\n missing_variables = [var for var in required_variables if var not in provided_variables]\n if missing_variables:\n missing_vars_str = \", \".join(missing_variables)\n raise ValueError(\n f\"Missing required input variables in PromptBuilder: {missing_vars_str}. \"\n f\"Required variables: {required_variables}. Provided variables: {provided_variables}.\"\n )" }, { "class_start_lineno": 17, "class_end_lineno": 266, "func_start_lineno": 213, "func_end_lineno": 245, "func_code": " def run(self, template: Optional[str] = None, template_variables: Optional[Dict[str, Any]] = None, **kwargs):\n \"\"\"\n Renders the prompt template with the provided variables.\n\n It applies the template variables to render the final prompt. You can provide variables via pipeline kwargs.\n In order to overwrite the default template, you can set the `template` parameter.\n In order to overwrite pipeline kwargs, you can set the `template_variables` parameter.\n\n :param template:\n An optional string template to overwrite PromptBuilder's default template. If None, the default template\n provided at initialization is used.\n :param template_variables:\n An optional dictionary of template variables to overwrite the pipeline variables.\n :param kwargs:\n Pipeline variables used for rendering the prompt.\n\n :returns: A dictionary with the following keys:\n - `prompt`: The updated prompt text after rendering the prompt template.\n\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n kwargs = kwargs or {}\n template_variables = template_variables or {}\n template_variables_combined = {**kwargs, **template_variables}\n self._validate_variables(set(template_variables_combined.keys()))\n\n compiled_template = self.template\n if template is not None:\n compiled_template = self._env.from_string(template)\n\n result = compiled_template.render(template_variables_combined)\n return {\"prompt\": result}" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 29, "func_end_lineno": 76, "func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 14, "func_end_lineno": 26, "func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True" } ]
[ "function_empty" ]
[ "haystack.components.builders.prompt_builder.PromptBuilder._validate_variables", "haystack.components.builders.prompt_builder.PromptBuilder.run", "haystack.core.type_utils._strict_types_are_compatible", "haystack.core.type_utils._types_are_compatible" ]
Python
4
4
{ "total_num": 29, "base_passed_num": 7 }
[ "haystack.haystack.core.type_utils._strict_types_are_compatible", "haystack.haystack.core.type_utils._types_are_compatible", "haystack.haystack.core.type_utils._type_name", "haystack.haystack.core.pipeline.base._connections_status" ]
haystack
[ "haystack/core/type_utils.py", "haystack/core/type_utils.py", "haystack/core/type_utils.py", "haystack/core/pipeline/base.py" ]
[ "test/components/classifiers/test_zero_shot_document_classifier.py", "test/components/joiners/test_list_joiner.py", "test/core/pipeline/test_pipeline.py", "test/tools/test_component_tool.py", "test/tracing/test_logging_tracer.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 29, "func_end_lineno": 76, "func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 14, "func_end_lineno": 26, "func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 79, "func_end_lineno": 105, "func_code": "def _type_name(type_):\n \"\"\"\n Util methods to get a nice readable representation of a type.\n\n Handles Optional and Literal in a special way to make it more readable.\n \"\"\"\n # Literal args are strings, so we wrap them in quotes to make it clear\n if isinstance(type_, str):\n return f\"'{type_}'\"\n\n name = getattr(type_, \"__name__\", str(type_))\n\n if name.startswith(\"typing.\"):\n name = name[7:]\n if \"[\" in name:\n name = name.split(\"[\")[0]\n args = get_args(type_)\n if name == \"Union\" and type(None) in args and len(args) == 2:\n # Optional is technically a Union of type and None\n # but we want to display it as Optional\n name = \"Optional\"\n\n if args:\n args = \", \".join([_type_name(a) for a in args if a is not type(None)])\n return f\"{name}[{args}]\"\n\n return f\"{name}\"" }, { "class_start_lineno": 1, "class_end_lineno": 1261, "func_start_lineno": 1207, "func_end_lineno": 1229, "func_code": "def _connections_status(\n sender_node: str, receiver_node: str, sender_sockets: List[OutputSocket], receiver_sockets: List[InputSocket]\n) -> str:\n \"\"\"\n Lists the status of the sockets, for error messages.\n \"\"\"\n sender_sockets_entries = []\n for sender_socket in sender_sockets:\n sender_sockets_entries.append(f\" - {sender_socket.name}: {_type_name(sender_socket.type)}\")\n sender_sockets_list = \"\\n\".join(sender_sockets_entries)\n\n receiver_sockets_entries = []\n for receiver_socket in receiver_sockets:\n if receiver_socket.senders:\n sender_status = f\"sent by {','.join(receiver_socket.senders)}\"\n else:\n sender_status = \"available\"\n receiver_sockets_entries.append(\n f\" - {receiver_socket.name}: {_type_name(receiver_socket.type)} ({sender_status})\"\n )\n receiver_sockets_list = \"\\n\".join(receiver_sockets_entries)\n\n return f\"'{sender_node}':\\n{sender_sockets_list}\\n'{receiver_node}':\\n{receiver_sockets_list}\"" } ]
[ "function_empty" ]
[ "haystack.core.type_utils._strict_types_are_compatible", "haystack.core.type_utils._types_are_compatible", "haystack.core.type_utils._type_name", "haystack.core.pipeline.base._connections_status" ]
Python
4
4
{ "total_num": 48, "base_passed_num": 41 }
[ "haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.components.connectors.openapi_service.OpenAPIServiceConnector::run", "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.core.serialization.component_to_dict" ]
haystack
[ "haystack/dataclasses/chat_message.py", "haystack/components/connectors/openapi_service.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/core/serialization.py", "haystack/components/connectors/openapi_service.py", "haystack/core/serialization.py" ]
[ "test/components/connectors/test_openapi_service.py" ]
[ { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)" }, { "class_start_lineno": 149, "class_end_lineno": 399, "func_start_lineno": 211, "func_end_lineno": 263, "func_code": " def run(\n self,\n messages: List[ChatMessage],\n service_openapi_spec: Dict[str, Any],\n service_credentials: Optional[Union[dict, str]] = None,\n ) -> Dict[str, List[ChatMessage]]:\n \"\"\"\n Processes a list of chat messages to invoke a method on an OpenAPI service.\n\n It parses the last message in the list, expecting it to contain tool calls.\n\n :param messages: A list of `ChatMessage` objects containing the messages to be processed. The last message\n should contain the tool calls.\n :param service_openapi_spec: The OpenAPI JSON specification object of the service to be invoked. All the refs\n should already be resolved.\n :param service_credentials: The credentials to be used for authentication with the service.\n Currently, only the http and apiKey OpenAPI security schemes are supported.\n\n :return: A dictionary with the following keys:\n - `service_response`: a list of `ChatMessage` objects, each containing the response from the service. The\n response is in JSON format, and the `content` attribute of the `ChatMessage` contains\n the JSON string.\n\n :raises ValueError: If the last message is not from the assistant or if it does not contain tool calls.\n \"\"\"\n\n last_message = messages[-1]\n if not last_message.is_from(ChatRole.ASSISTANT):\n raise ValueError(f\"{last_message} is not from the assistant.\")\n\n tool_calls = last_message.tool_calls\n if not tool_calls:\n raise ValueError(f\"The provided ChatMessage has no tool calls.\\nChatMessage: {last_message}\")\n\n function_payloads = []\n for tool_call in tool_calls:\n function_payloads.append({\"arguments\": tool_call.arguments, \"name\": tool_call.tool_name})\n\n # instantiate the OpenAPI service for the given specification\n openapi_service = OpenAPI(service_openapi_spec, ssl_verify=self.ssl_verify)\n self._authenticate_service(openapi_service, service_credentials)\n\n response_messages = []\n for method_invocation_descriptor in function_payloads:\n service_response = self._invoke_method(openapi_service, method_invocation_descriptor)\n # openapi3 parses the JSON service response into a model object, which is not our focus at the moment.\n # Instead, we require direct access to the raw JSON data of the response, rather than the model objects\n # provided by the openapi3 library. This approach helps us avoid issues related to (de)serialization.\n # By accessing the raw JSON response through `service_response._raw_data`, we can serialize this data\n # into a string. Finally, we use this string to create a ChatMessage object.\n response_messages.append(ChatMessage.from_user(json.dumps(service_response)))\n\n return {\"service_response\": response_messages}" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 183, "func_end_lineno": 187, "func_code": " def tool_calls(self) -> List[ToolCall]:\n \"\"\"\n Returns the list of all Tool calls contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCall)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}" }, { "class_start_lineno": 149, "class_end_lineno": 399, "func_start_lineno": 265, "func_end_lineno": 272, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(self, ssl_verify=self.ssl_verify)" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data" } ]
[ "function_empty", "TDD" ]
[ "haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.components.connectors.openapi_service.OpenAPIServiceConnector.run", "haystack.dataclasses.chat_message.ChatMessage.tool_calls", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.core.serialization.default_to_dict", "haystack.components.connectors.openapi_service.OpenAPIServiceConnector.to_dict", "haystack.core.serialization.component_to_dict" ]
Python
3
4
{ "total_num": 12, "base_passed_num": 4 }
[ "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.components.converters.json.JSONConverter::to_dict", "haystack.haystack.components.converters.utils.normalize_metadata", "haystack.haystack.components.converters.utils.get_bytestream_from_source", "haystack.haystack.components.converters.json.JSONConverter::run" ]
haystack
[ "haystack/core/serialization.py", "haystack/components/converters/json.py", "haystack/components/converters/utils.py", "haystack/components/converters/utils.py", "haystack/components/converters/json.py" ]
[ "test/components/converters/test_json.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}" }, { "class_start_lineno": 22, "class_end_lineno": 291, "func_start_lineno": 152, "func_end_lineno": 165, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n jq_schema=self._jq_schema,\n content_key=self._content_key,\n extra_meta_fields=self._meta_fields,\n store_full_path=self._store_full_path,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 51, "func_start_lineno": 30, "func_end_lineno": 51, "func_code": "def normalize_metadata(\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], sources_count: int\n) -> List[Dict[str, Any]]:\n \"\"\"\n Normalize the metadata input for a converter.\n\n Given all the possible value of the meta input for a converter (None, dictionary or list of dicts),\n makes sure to return a list of dictionaries of the correct length for the converter to use.\n\n :param meta: the meta input of the converter, as-is\n :param sources_count: the number of sources the converter received\n :returns: a list of dictionaries of the make length as the sources list\n \"\"\"\n if meta is None:\n return [{}] * sources_count\n if isinstance(meta, dict):\n return [meta] * sources_count\n if isinstance(meta, list):\n if sources_count != len(meta):\n raise ValueError(\"The length of the metadata list must match the number of sources.\")\n return meta\n raise ValueError(\"meta must be either None, a dictionary or a list of dictionaries.\")" }, { "class_start_lineno": 1, "class_end_lineno": 51, "func_start_lineno": 11, "func_end_lineno": 27, "func_code": "def get_bytestream_from_source(source: Union[str, Path, ByteStream]) -> ByteStream:\n \"\"\"\n Creates a ByteStream object from a source.\n\n :param source:\n A source to convert to a ByteStream. Can be a string (path to a file), a Path object, or a ByteStream.\n :return:\n A ByteStream object.\n \"\"\"\n\n if isinstance(source, ByteStream):\n return source\n if isinstance(source, (str, Path)):\n bs = ByteStream.from_file_path(Path(source))\n bs.meta[\"file_path\"] = str(source)\n return bs\n raise ValueError(f\"Unsupported source type {type(source)}\")" }, { "class_start_lineno": 22, "class_end_lineno": 291, "func_start_lineno": 250, "func_end_lineno": 291, "func_code": " def run(\n self,\n sources: List[Union[str, Path, ByteStream]],\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n ):\n \"\"\"\n Converts a list of JSON files to documents.\n\n :param sources:\n A list of file paths or ByteStream objects.\n :param meta:\n Optional metadata to attach to the documents.\n This value can be either a list of dictionaries or a single dictionary.\n If it's a single dictionary, its content is added to the metadata of all produced documents.\n If it's a list, the length of the list must match the number of sources.\n If `sources` contain ByteStream objects, their `meta` will be added to the output documents.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of created documents.\n \"\"\"\n documents = []\n meta_list = normalize_metadata(meta=meta, sources_count=len(sources))\n\n for source, metadata in zip(sources, meta_list):\n try:\n bytestream = get_bytestream_from_source(source)\n except Exception as exc:\n logger.warning(\"Could not read {source}. Skipping it. Error: {error}\", source=source, error=exc)\n continue\n\n data = self._get_content_and_meta(bytestream)\n\n for text, extra_meta in data:\n merged_metadata = {**bytestream.meta, **metadata, **extra_meta}\n\n if not self._store_full_path and (file_path := bytestream.meta.get(\"file_path\")):\n merged_metadata[\"file_path\"] = os.path.basename(file_path)\n document = Document(content=text, meta=merged_metadata)\n documents.append(document)\n\n return {\"documents\": documents}" } ]
[ "function_empty" ]
[ "haystack.core.serialization.default_to_dict", "haystack.components.converters.json.JSONConverter.to_dict", "haystack.components.converters.utils.normalize_metadata", "haystack.components.converters.utils.get_bytestream_from_source", "haystack.components.converters.json.JSONConverter.run" ]
Python
5
5
{ "total_num": 19, "base_passed_num": 5 }
[ "haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_openapi_spec", "haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::run", "haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_property_attributes", "haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_openapi_to_functions" ]
haystack
[ "haystack/components/converters/openapi_functions.py", "haystack/components/converters/openapi_functions.py", "haystack/components/converters/openapi_functions.py", "haystack/components/converters/openapi_functions.py", "haystack/components/converters/openapi_functions.py" ]
[ "test/components/converters/test_openapi_functions.py" ]
[ { "class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 232, "func_end_lineno": 257, "func_code": " def _parse_openapi_spec(self, content: str) -> Dict[str, Any]:\n \"\"\"\n Parses OpenAPI specification content, supporting both JSON and YAML formats.\n\n :param content: The content of the OpenAPI specification.\n :return: The parsed OpenAPI specification.\n \"\"\"\n open_api_spec_content = None\n try:\n open_api_spec_content = json.loads(content)\n return jsonref.replace_refs(open_api_spec_content)\n except json.JSONDecodeError as json_error:\n # heuristic to confirm that the content is likely malformed JSON\n if content.strip().startswith((\"{\", \"[\")):\n raise json_error\n\n try:\n open_api_spec_content = yaml.safe_load(content)\n except yaml.YAMLError:\n error_message = (\n \"Failed to parse the OpenAPI specification. The content does not appear to be valid JSON or YAML.\\n\\n\"\n )\n raise RuntimeError(error_message, content)\n\n # Replace references in the object with their resolved values, if any\n return jsonref.replace_refs(open_api_spec_content)" }, { "class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 56, "func_end_lineno": 115, "func_code": " def run(self, sources: List[Union[str, Path, ByteStream]]) -> Dict[str, Any]:\n \"\"\"\n Converts OpenAPI definitions in OpenAI function calling format.\n\n :param sources:\n File paths or ByteStream objects of OpenAPI definitions (in JSON or YAML format).\n\n :returns:\n A dictionary with the following keys:\n - functions: Function definitions in JSON object format\n - openapi_specs: OpenAPI specs in JSON/YAML object format with resolved references\n\n :raises RuntimeError:\n If the OpenAPI definitions cannot be downloaded or processed.\n :raises ValueError:\n If the source type is not recognized or no functions are found in the OpenAPI definitions.\n \"\"\"\n all_extracted_fc_definitions: List[Dict[str, Any]] = []\n all_openapi_specs = []\n for source in sources:\n openapi_spec_content = None\n if isinstance(source, (str, Path)):\n if os.path.exists(source):\n try:\n with open(source, \"r\") as f:\n openapi_spec_content = f.read()\n except IOError as e:\n logger.warning(\n \"IO error reading OpenAPI specification file: {source}. Error: {e}\", source=source, e=e\n )\n else:\n logger.warning(f\"OpenAPI specification file not found: {source}\")\n elif isinstance(source, ByteStream):\n openapi_spec_content = source.data.decode(\"utf-8\")\n if not openapi_spec_content:\n logger.warning(\n \"Invalid OpenAPI specification content provided: {openapi_spec_content}\",\n openapi_spec_content=openapi_spec_content,\n )\n else:\n logger.warning(\n \"Invalid source type {source}. Only str, Path, and ByteStream are supported.\", source=type(source)\n )\n continue\n\n if openapi_spec_content:\n try:\n service_openapi_spec = self._parse_openapi_spec(openapi_spec_content)\n functions: List[Dict[str, Any]] = self._openapi_to_functions(service_openapi_spec)\n all_extracted_fc_definitions.extend(functions)\n all_openapi_specs.append(service_openapi_spec)\n except Exception as e:\n logger.error(\n \"Error processing OpenAPI specification from source {source}: {error}\", source=source, error=e\n )\n\n if not all_extracted_fc_definitions:\n logger.warning(\"No OpenAI function definitions extracted from the provided OpenAPI specification sources.\")\n\n return {\"functions\": all_extracted_fc_definitions, \"openapi_specs\": all_openapi_specs}" }, { "class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 193, "func_end_lineno": 230, "func_code": " def _parse_property_attributes(\n self, property_schema: Dict[str, Any], include_attributes: Optional[List[str]] = None\n ) -> Dict[str, Any]:\n \"\"\"\n Parses the attributes of a property schema.\n\n Recursively parses the attributes of a property schema, including nested objects and arrays,\n and includes specified attributes like description, pattern, etc.\n\n :param property_schema: The schema of the property to parse.\n :param include_attributes: The list of attributes to include in the parsed schema.\n :return: The parsed schema of the property including the specified attributes.\n \"\"\"\n include_attributes = include_attributes or [\"description\", \"pattern\", \"enum\"]\n\n schema_type = property_schema.get(\"type\")\n\n parsed_schema = {\"type\": schema_type} if schema_type else {}\n for attr in include_attributes:\n if attr in property_schema:\n parsed_schema[attr] = property_schema[attr]\n\n if schema_type == \"object\":\n properties = property_schema.get(\"properties\", {})\n parsed_properties = {\n prop_name: self._parse_property_attributes(prop, include_attributes)\n for prop_name, prop in properties.items()\n }\n parsed_schema[\"properties\"] = parsed_properties\n\n if \"required\" in property_schema:\n parsed_schema[\"required\"] = property_schema[\"required\"]\n\n elif schema_type == \"array\":\n items = property_schema.get(\"items\", {})\n parsed_schema[\"items\"] = self._parse_property_attributes(items, include_attributes)\n\n return parsed_schema" }, { "class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 153, "func_end_lineno": 191, "func_code": " def _parse_endpoint_spec(self, resolved_spec: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n if not isinstance(resolved_spec, dict):\n logger.warning(\"Invalid OpenAPI spec format provided. Could not extract function.\")\n return {}\n\n function_name = resolved_spec.get(\"operationId\")\n description = resolved_spec.get(\"description\") or resolved_spec.get(\"summary\", \"\")\n\n schema: Dict[str, Any] = {\"type\": \"object\", \"properties\": {}}\n\n # requestBody section\n req_body_schema = (\n resolved_spec.get(\"requestBody\", {}).get(\"content\", {}).get(\"application/json\", {}).get(\"schema\", {})\n )\n if \"properties\" in req_body_schema:\n for prop_name, prop_schema in req_body_schema[\"properties\"].items():\n schema[\"properties\"][prop_name] = self._parse_property_attributes(prop_schema)\n\n if \"required\" in req_body_schema:\n schema.setdefault(\"required\", []).extend(req_body_schema[\"required\"])\n\n # parameters section\n for param in resolved_spec.get(\"parameters\", []):\n if \"schema\" in param:\n schema_dict = self._parse_property_attributes(param[\"schema\"])\n # these attributes are not in param[schema] level but on param level\n useful_attributes = [\"description\", \"pattern\", \"enum\"]\n schema_dict.update({key: param[key] for key in useful_attributes if param.get(key)})\n schema[\"properties\"][param[\"name\"]] = schema_dict\n if param.get(\"required\", False):\n schema.setdefault(\"required\", []).append(param[\"name\"])\n\n if function_name and description and schema[\"properties\"]:\n return {\"name\": function_name, \"description\": description, \"parameters\": schema}\n else:\n logger.warning(\n \"Invalid OpenAPI spec format provided. Could not extract function from {spec}\", spec=resolved_spec\n )\n return {}" }, { "class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 117, "func_end_lineno": 151, "func_code": " def _openapi_to_functions(self, service_openapi_spec: Dict[str, Any]) -> List[Dict[str, Any]]:\n \"\"\"\n OpenAPI to OpenAI function conversion.\n\n Extracts functions from the OpenAPI specification of the service and converts them into a format\n suitable for OpenAI function calling.\n\n :param service_openapi_spec: The OpenAPI specification from which functions are to be extracted.\n :type service_openapi_spec: Dict[str, Any]\n :return: A list of dictionaries, each representing a function. Each dictionary includes the function's\n name, description, and a schema of its parameters.\n :rtype: List[Dict[str, Any]]\n \"\"\"\n\n # Doesn't enforce rigid spec validation because that would require a lot of dependencies\n # We check the version and require minimal fields to be present, so we can extract functions\n spec_version = service_openapi_spec.get(\"openapi\")\n if not spec_version:\n raise ValueError(f\"Invalid OpenAPI spec provided. Could not extract version from {service_openapi_spec}\")\n service_openapi_spec_version = int(spec_version.split(\".\")[0])\n\n # Compare the versions\n if service_openapi_spec_version < OpenAPIServiceToFunctions.MIN_REQUIRED_OPENAPI_SPEC_VERSION:\n raise ValueError(\n f\"Invalid OpenAPI spec version {service_openapi_spec_version}. Must be \"\n f\"at least {OpenAPIServiceToFunctions.MIN_REQUIRED_OPENAPI_SPEC_VERSION}.\"\n )\n\n functions: List[Dict[str, Any]] = []\n for paths in service_openapi_spec[\"paths\"].values():\n for path_spec in paths.values():\n function_dict = self._parse_endpoint_spec(path_spec)\n if function_dict:\n functions.append(function_dict)\n return functions" } ]
[ "function_empty" ]
[ "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_openapi_spec", "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions.run", "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_property_attributes", "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_endpoint_spec", "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._openapi_to_functions" ]
Python
4
4
{ "total_num": 8, "base_passed_num": 0 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.converters.output_adapter.OutputAdapter::to_dict", "haystack.haystack.utils.type_serialization.thread_safe_import", "haystack.haystack.utils.callable_serialization.deserialize_callable" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/converters/output_adapter.py", "haystack/utils/type_serialization.py", "haystack/utils/callable_serialization.py" ]
[ "test/components/converters/test_output_adapter.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 25, "class_end_lineno": 184, "func_start_lineno": 139, "func_end_lineno": 153, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n se_filters = {name: serialize_callable(filter_func) for name, filter_func in self.custom_filters.items()}\n return default_to_dict(\n self,\n template=self.template,\n output_type=serialize_type(self.output_type),\n custom_filters=se_filters,\n unsafe=self._unsafe,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 159, "func_end_lineno": 170, "func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)" }, { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 45, "func_end_lineno": 80, "func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")" } ]
[ "function_empty", "TDD" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.converters.output_adapter.OutputAdapter.to_dict", "haystack.utils.type_serialization.thread_safe_import", "haystack.utils.callable_serialization.deserialize_callable" ]
Python
3
4
{ "total_num": 14, "base_passed_num": 10 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.embedders.azure_document_embedder.AzureOpenAIDocumentEmbedder::to_dict", "haystack.haystack.utils.type_serialization.thread_safe_import", "haystack.haystack.utils.callable_serialization.deserialize_callable" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/embedders/azure_document_embedder.py", "haystack/utils/type_serialization.py", "haystack/utils/callable_serialization.py" ]
[ "test/components/embedders/test_azure_document_embedder.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 20, "class_end_lineno": 281, "func_start_lineno": 154, "func_end_lineno": 183, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 159, "func_end_lineno": 170, "func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)" }, { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 45, "func_end_lineno": 80, "func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")" } ]
[ "function_empty", "TDD" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.embedders.azure_document_embedder.AzureOpenAIDocumentEmbedder.to_dict", "haystack.utils.type_serialization.thread_safe_import", "haystack.utils.callable_serialization.deserialize_callable" ]
Python
3
4
{ "total_num": 6, "base_passed_num": 3 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.embedders.azure_text_embedder.AzureOpenAITextEmbedder::to_dict", "haystack.haystack.utils.type_serialization.thread_safe_import", "haystack.haystack.utils.callable_serialization.deserialize_callable" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/embedders/azure_text_embedder.py", "haystack/utils/type_serialization.py", "haystack/utils/callable_serialization.py" ]
[ "test/components/embedders/test_azure_text_embedder.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 15, "class_end_lineno": 216, "func_start_lineno": 136, "func_end_lineno": 161, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n dimensions=self.dimensions,\n organization=self.organization,\n api_version=self.api_version,\n prefix=self.prefix,\n suffix=self.suffix,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 159, "func_end_lineno": 170, "func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)" }, { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 45, "func_end_lineno": 80, "func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")" } ]
[ "function_empty", "TDD" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.embedders.azure_text_embedder.AzureOpenAITextEmbedder.to_dict", "haystack.utils.type_serialization.thread_safe_import", "haystack.utils.callable_serialization.deserialize_callable" ]
Python
3
4
{ "total_num": 5, "base_passed_num": 2 }
[ "haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::_prepare_texts_to_embed", "haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::run" ]
haystack
[ "haystack/components/embedders/hugging_face_api_document_embedder.py", "haystack/components/embedders/hugging_face_api_document_embedder.py" ]
[ "test/components/embedders/test_hugging_face_api_document_embedder.py" ]
[ { "class_start_lineno": 24, "class_end_lineno": 298, "func_start_lineno": 219, "func_end_lineno": 234, "func_code": " def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = []\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n )\n\n texts_to_embed.append(text_to_embed)\n return texts_to_embed" }, { "class_start_lineno": 24, "class_end_lineno": 298, "func_start_lineno": 274, "func_end_lineno": 298, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Embeds a list of documents.\n\n :param documents:\n Documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of documents with embeddings.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\n \"HuggingFaceAPIDocumentEmbedder expects a list of Documents as input.\"\n \" In case you want to embed a string, please use the HuggingFaceAPITextEmbedder.\"\n )\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n\n embeddings = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents}" } ]
[ "function_empty" ]
[ "haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder._prepare_texts_to_embed", "haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder.run" ]
Python
2
2
{ "total_num": 17, "base_passed_num": 12 }
[ "haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::_prepare_texts_to_embed", "haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::run" ]
haystack
[ "haystack/components/embedders/openai_document_embedder.py", "haystack/components/embedders/openai_document_embedder.py" ]
[ "test/components/embedders/test_openai_document_embedder.py" ]
[ { "class_start_lineno": 19, "class_end_lineno": 245, "func_start_lineno": 164, "func_end_lineno": 181, "func_code": " def _prepare_texts_to_embed(self, documents: List[Document]) -> Dict[str, str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = {}\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n )\n\n # copied from OpenAI embedding_utils (https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py)\n # replace newlines, which can negatively affect performance.\n texts_to_embed[doc.id] = text_to_embed.replace(\"\\n\", \" \")\n return texts_to_embed" }, { "class_start_lineno": 19, "class_end_lineno": 245, "func_start_lineno": 220, "func_end_lineno": 245, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Embeds a list of documents.\n\n :param documents:\n A list of documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of documents with embeddings.\n - `meta`: Information about the usage of the model.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\n \"OpenAIDocumentEmbedder expects a list of Documents as input.\"\n \"In case you want to embed a string, please use the OpenAITextEmbedder.\"\n )\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents, \"meta\": meta}" } ]
[ "function_empty" ]
[ "haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder._prepare_texts_to_embed", "haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder.run" ]
Python
2
2
{ "total_num": 11, "base_passed_num": 7 }
[ "haystack.haystack.utils.device.ComponentDevice::to_dict", "haystack.haystack.components.embedders.sentence_transformers_document_embedder.SentenceTransformersDocumentEmbedder::to_dict" ]
haystack
[ "haystack/utils/device.py", "haystack/components/embedders/sentence_transformers_document_embedder.py" ]
[ "test/components/embedders/test_sentence_transformers_document_embedder.py" ]
[ { "class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 450, "func_end_lineno": 463, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False" }, { "class_start_lineno": 16, "class_end_lineno": 256, "func_start_lineno": 145, "func_end_lineno": 175, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n trust_remote_code=self.trust_remote_code,\n truncate_dim=self.truncate_dim,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n precision=self.precision,\n encode_kwargs=self.encode_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict" } ]
[ "function_empty" ]
[ "haystack.utils.device.ComponentDevice.to_dict", "haystack.components.embedders.sentence_transformers_document_embedder.SentenceTransformersDocumentEmbedder.to_dict" ]
Python
2
2
{ "total_num": 18, "base_passed_num": 15 }
[ "haystack.haystack.utils.device.ComponentDevice::to_dict", "haystack.haystack.components.embedders.sentence_transformers_text_embedder.SentenceTransformersTextEmbedder::to_dict" ]
haystack
[ "haystack/utils/device.py", "haystack/components/embedders/sentence_transformers_text_embedder.py" ]
[ "test/components/embedders/test_sentence_transformers_text_embedder.py" ]
[ { "class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 450, "func_end_lineno": 463, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False" }, { "class_start_lineno": 16, "class_end_lineno": 229, "func_start_lineno": 133, "func_end_lineno": 161, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n prefix=self.prefix,\n suffix=self.suffix,\n batch_size=self.batch_size,\n progress_bar=self.progress_bar,\n normalize_embeddings=self.normalize_embeddings,\n trust_remote_code=self.trust_remote_code,\n truncate_dim=self.truncate_dim,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n precision=self.precision,\n encode_kwargs=self.encode_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict" } ]
[ "function_empty" ]
[ "haystack.utils.device.ComponentDevice.to_dict", "haystack.components.embedders.sentence_transformers_text_embedder.SentenceTransformersTextEmbedder.to_dict" ]
Python
2
2
{ "total_num": 19, "base_passed_num": 16 }
[ "haystack.haystack.utils.type_serialization.serialize_type", "haystack.haystack.components.evaluators.llm_evaluator.LLMEvaluator::to_dict", "haystack.haystack.core.serialization.component_to_dict", "haystack.haystack.utils.type_serialization.deserialize_type", "haystack.haystack.core.serialization.component_from_dict" ]
haystack
[ "haystack/utils/type_serialization.py", "haystack/components/evaluators/llm_evaluator.py", "haystack/core/serialization.py", "haystack/utils/type_serialization.py", "haystack/core/serialization.py" ]
[ "test/components/evaluators/test_llm_evaluator.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 19, "func_end_lineno": 52, "func_code": "def serialize_type(target: Any) -> str:\n \"\"\"\n Serializes a type or an instance to its string representation, including the module name.\n\n This function handles types, instances of types, and special typing objects.\n It assumes that non-typing objects will have a '__name__' attribute.\n\n :param target:\n The object to serialize, can be an instance or a type.\n :return:\n The string representation of the type.\n \"\"\"\n name = getattr(target, \"__name__\", str(target))\n\n # Remove the 'typing.' prefix when using python <3.9\n if name.startswith(\"typing.\"):\n name = name[7:]\n # Remove the arguments from the name when using python <3.9\n if \"[\" in name:\n name = name.split(\"[\")[0]\n\n # Get module name\n module = inspect.getmodule(target)\n module_name = \"\"\n # We omit the module name for builtins to not clutter the output\n if module and hasattr(module, \"__name__\") and module.__name__ != \"builtins\":\n module_name = f\"{module.__name__}\"\n\n args = get_args(target)\n if args:\n args_str = \", \".join([serialize_type(a) for a in args if a is not type(None)])\n return f\"{module_name}.{name}[{args_str}]\" if module_name else f\"{name}[{args_str}]\"\n\n return f\"{module_name}.{name}\" if module_name else f\"{name}\"" }, { "class_start_lineno": 18, "class_end_lineno": 387, "func_start_lineno": 278, "func_end_lineno": 297, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n # Since we cannot currently serialize tuples, convert the inputs to a list.\n inputs = [[name, serialize_type(type_)] for name, type_ in self.inputs]\n return default_to_dict(\n self,\n instructions=self.instructions,\n inputs=inputs,\n outputs=self.outputs,\n examples=self.examples,\n api=self.api,\n api_key=self.api_key and self.api_key.to_dict(),\n api_params=self.api_params,\n progress_bar=self.progress_bar,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data" }, { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 78, "func_end_lineno": 156, "func_code": "def deserialize_type(type_str: str) -> Any: # pylint: disable=too-many-return-statements\n \"\"\"\n Deserializes a type given its full import path as a string, including nested generic types.\n\n This function will dynamically import the module if it's not already imported\n and then retrieve the type object from it. It also handles nested generic types like\n `typing.List[typing.Dict[int, str]]`.\n\n :param type_str:\n The string representation of the type's full import path.\n :returns:\n The deserialized type object.\n :raises DeserializationError:\n If the type cannot be deserialized due to missing module or type.\n \"\"\"\n\n type_mapping = {\n list: typing.List,\n dict: typing.Dict,\n set: typing.Set,\n tuple: typing.Tuple,\n frozenset: typing.FrozenSet,\n }\n\n # Handle generics\n if \"[\" in type_str and type_str.endswith(\"]\"):\n main_type_str, generics_str = type_str.split(\"[\", 1)\n generics_str = generics_str[:-1]\n\n main_type = deserialize_type(main_type_str)\n generic_args = [deserialize_type(arg) for arg in _parse_generic_args(generics_str)]\n\n # Reconstruct\n try:\n if sys.version_info >= (3, 9) or repr(main_type).startswith(\"typing.\"):\n return main_type[tuple(generic_args) if len(generic_args) > 1 else generic_args[0]]\n else:\n return type_mapping[main_type][tuple(generic_args) if len(generic_args) > 1 else generic_args[0]]\n except (TypeError, AttributeError) as e:\n raise DeserializationError(f\"Could not apply arguments {generic_args} to type {main_type}\") from e\n\n # Handle non-generic types\n # First, check if there's a module prefix\n if \".\" in type_str:\n parts = type_str.split(\".\")\n module_name = \".\".join(parts[:-1])\n type_name = parts[-1]\n\n module = sys.modules.get(module_name)\n if module is None:\n try:\n module = thread_safe_import(module_name)\n except ImportError as e:\n raise DeserializationError(f\"Could not import the module: {module_name}\") from e\n\n # Get the class from the module\n if hasattr(module, type_name):\n return getattr(module, type_name)\n\n raise DeserializationError(f\"Could not locate the type: {type_name} in the module: {module_name}\")\n\n # No module prefix, check builtins and typing\n # First check builtins\n if hasattr(builtins, type_str):\n return getattr(builtins, type_str)\n\n # Then check typing\n if hasattr(typing, type_str):\n return getattr(typing, type_str)\n\n # Special case for NoneType\n if type_str == \"NoneType\":\n return type(None)\n\n # Special case for None\n if type_str == \"None\":\n return None\n\n raise DeserializationError(f\"Could not deserialize type: {type_str}\")" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 134, "func_end_lineno": 169, "func_code": "def component_from_dict(\n cls: Type[object], data: Dict[str, Any], name: str, callbacks: Optional[DeserializationCallbacks] = None\n) -> Any:\n \"\"\"\n Creates a component instance from a dictionary.\n\n If a `from_dict` method is present in the component class, that will be used instead of the default method.\n\n :param cls:\n The class to be used for deserialization.\n :param data:\n The serialized data.\n :param name:\n The name of the component.\n :param callbacks:\n Callbacks to invoke during deserialization.\n :returns:\n The deserialized component.\n \"\"\"\n\n def component_pre_init_callback(component_cls, init_params):\n assert callbacks is not None\n assert callbacks.component_pre_init is not None\n callbacks.component_pre_init(name, component_cls, init_params)\n\n def do_from_dict():\n if hasattr(cls, \"from_dict\"):\n return cls.from_dict(data)\n\n return default_from_dict(cls, data)\n\n if callbacks is None or callbacks.component_pre_init is None:\n return do_from_dict()\n\n with _hook_component_init(component_pre_init_callback):\n return do_from_dict()" } ]
[ "function_empty" ]
[ "haystack.utils.type_serialization.serialize_type", "haystack.components.evaluators.llm_evaluator.LLMEvaluator.to_dict", "haystack.core.serialization.component_to_dict", "haystack.utils.type_serialization.deserialize_type", "haystack.core.serialization.component_from_dict" ]
Python
5
5
{ "total_num": 17, "base_passed_num": 13 }
[ "haystack.haystack.utils.device.ComponentDevice::to_dict", "haystack.haystack.components.evaluators.sas_evaluator.SASEvaluator::to_dict" ]
haystack
[ "haystack/utils/device.py", "haystack/components/evaluators/sas_evaluator.py" ]
[ "test/components/evaluators/test_sas_evaluator.py" ]
[ { "class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 450, "func_end_lineno": 463, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False" }, { "class_start_lineno": 20, "class_end_lineno": 201, "func_start_lineno": 85, "func_end_lineno": 98, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n return default_to_dict(\n self,\n model=self._model,\n batch_size=self._batch_size,\n device=self._device.to_dict() if self._device else None,\n token=self._token.to_dict() if self._token else None,\n )" } ]
[ "function_empty" ]
[ "haystack.utils.device.ComponentDevice.to_dict", "haystack.components.evaluators.sas_evaluator.SASEvaluator.to_dict" ]
Python
2
2
{ "total_num": 12, "base_passed_num": 11 }
[ "haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict", "haystack.haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor::to_dict", "haystack.haystack.components.builders.prompt_builder.PromptBuilder::_validate_variables", "haystack.haystack.components.builders.prompt_builder.PromptBuilder::run" ]
haystack
[ "haystack/components/generators/chat/openai.py", "haystack/components/extractors/llm_metadata_extractor.py", "haystack/components/builders/prompt_builder.py", "haystack/components/builders/prompt_builder.py", "haystack/components/extractors/llm_metadata_extractor.py" ]
[ "test/components/extractors/test_llm_metadata_extractor.py" ]
[ { "class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 170, "func_end_lineno": 190, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )" }, { "class_start_lineno": 61, "class_end_lineno": 442, "func_start_lineno": 239, "func_end_lineno": 258, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n\n llm_provider = self.llm_provider.to_dict()\n\n return default_to_dict(\n self,\n prompt=self.prompt,\n generator_api=self.generator_api.value,\n generator_api_params=llm_provider[\"init_parameters\"],\n expected_keys=self.expected_keys,\n page_range=self.expanded_range,\n raise_on_failure=self.raise_on_failure,\n max_workers=self.max_workers,\n )" }, { "class_start_lineno": 17, "class_end_lineno": 266, "func_start_lineno": 247, "func_end_lineno": 266, "func_code": " def _validate_variables(self, provided_variables: Set[str]):\n \"\"\"\n Checks if all the required template variables are provided.\n\n :param provided_variables:\n A set of provided template variables.\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n if self.required_variables == \"*\":\n required_variables = sorted(self.variables)\n else:\n required_variables = self.required_variables\n missing_variables = [var for var in required_variables if var not in provided_variables]\n if missing_variables:\n missing_vars_str = \", \".join(missing_variables)\n raise ValueError(\n f\"Missing required input variables in PromptBuilder: {missing_vars_str}. \"\n f\"Required variables: {required_variables}. Provided variables: {provided_variables}.\"\n )" }, { "class_start_lineno": 17, "class_end_lineno": 266, "func_start_lineno": 213, "func_end_lineno": 245, "func_code": " def run(self, template: Optional[str] = None, template_variables: Optional[Dict[str, Any]] = None, **kwargs):\n \"\"\"\n Renders the prompt template with the provided variables.\n\n It applies the template variables to render the final prompt. You can provide variables via pipeline kwargs.\n In order to overwrite the default template, you can set the `template` parameter.\n In order to overwrite pipeline kwargs, you can set the `template_variables` parameter.\n\n :param template:\n An optional string template to overwrite PromptBuilder's default template. If None, the default template\n provided at initialization is used.\n :param template_variables:\n An optional dictionary of template variables to overwrite the pipeline variables.\n :param kwargs:\n Pipeline variables used for rendering the prompt.\n\n :returns: A dictionary with the following keys:\n - `prompt`: The updated prompt text after rendering the prompt template.\n\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n kwargs = kwargs or {}\n template_variables = template_variables or {}\n template_variables_combined = {**kwargs, **template_variables}\n self._validate_variables(set(template_variables_combined.keys()))\n\n compiled_template = self.template\n if template is not None:\n compiled_template = self._env.from_string(template)\n\n result = compiled_template.render(template_variables_combined)\n return {\"prompt\": result}" }, { "class_start_lineno": 61, "class_end_lineno": 442, "func_start_lineno": 332, "func_end_lineno": 359, "func_code": " def _prepare_prompts(\n self, documents: List[Document], expanded_range: Optional[List[int]] = None\n ) -> List[Union[ChatMessage, None]]:\n all_prompts: List[Union[ChatMessage, None]] = []\n for document in documents:\n if not document.content:\n logger.warning(\"Document {doc_id} has no content. Skipping metadata extraction.\", doc_id=document.id)\n all_prompts.append(None)\n continue\n\n if expanded_range:\n doc_copy = copy.deepcopy(document)\n pages = self.splitter.run(documents=[doc_copy])\n content = \"\"\n for idx, page in enumerate(pages[\"documents\"]):\n if idx + 1 in expanded_range:\n content += page.content\n doc_copy.content = content\n else:\n doc_copy = document\n\n prompt_with_doc = self.builder.run(template=self.prompt, template_variables={\"document\": doc_copy})\n\n # build a ChatMessage with the prompt\n message = ChatMessage.from_user(prompt_with_doc[\"prompt\"])\n all_prompts.append(message)\n\n return all_prompts" } ]
[ "function_empty" ]
[ "haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict", "haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor.to_dict", "haystack.components.builders.prompt_builder.PromptBuilder._validate_variables", "haystack.components.builders.prompt_builder.PromptBuilder.run", "haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor._prepare_prompts" ]
Python
4
4
{ "total_num": 13, "base_passed_num": 9 }
[ "haystack.haystack.components.extractors.named_entity_extractor.NamedEntityExtractor::to_dict", "haystack.haystack.core.serialization.component_to_dict" ]
haystack
[ "haystack/components/extractors/named_entity_extractor.py", "haystack/core/serialization.py" ]
[ "test/components/extractors/test_named_entity_extractor.py" ]
[ { "class_start_lineno": 78, "class_end_lineno": 275, "func_start_lineno": 212, "func_end_lineno": 232, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n backend=self._backend.type.name,\n model=self._backend.model_name,\n device=self._backend.device.to_dict(),\n pipeline_kwargs=self._backend._pipeline_kwargs,\n token=self.token.to_dict() if self.token else None,\n )\n\n hf_pipeline_kwargs = serialization_dict[\"init_parameters\"][\"pipeline_kwargs\"]\n hf_pipeline_kwargs.pop(\"token\", None)\n\n serialize_hf_model_kwargs(hf_pipeline_kwargs)\n return serialization_dict" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data" } ]
[ "function_empty", "TDD" ]
[ "haystack.components.extractors.named_entity_extractor.NamedEntityExtractor.to_dict", "haystack.core.serialization.component_to_dict" ]
Python
1
2
{ "total_num": 7, "base_passed_num": 2 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.generators.azure.AzureOpenAIGenerator::to_dict", "haystack.haystack.core.serialization.component_to_dict" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/generators/azure.py", "haystack/core/serialization.py" ]
[ "test/components/generators/test_azure.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 19, "class_end_lineno": 210, "func_start_lineno": 162, "func_end_lineno": 188, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n system_prompt=self.system_prompt,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data" } ]
[ "function_empty" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.generators.azure.AzureOpenAIGenerator.to_dict", "haystack.core.serialization.component_to_dict" ]
Python
3
3
{ "total_num": 7, "base_passed_num": 4 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.generators.openai.OpenAIGenerator::to_dict", "haystack.haystack.utils.type_serialization.thread_safe_import", "haystack.haystack.utils.callable_serialization.deserialize_callable" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/generators/openai.py", "haystack/utils/type_serialization.py", "haystack/utils/callable_serialization.py" ]
[ "test/components/generators/test_openai.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 20, "class_end_lineno": 335, "func_start_lineno": 133, "func_end_lineno": 150, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n system_prompt=self.system_prompt,\n api_key=self.api_key.to_dict(),\n )" }, { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 159, "func_end_lineno": 170, "func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)" }, { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 45, "func_end_lineno": 80, "func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")" } ]
[ "function_empty", "TDD" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.generators.openai.OpenAIGenerator.to_dict", "haystack.utils.type_serialization.thread_safe_import", "haystack.utils.callable_serialization.deserialize_callable" ]
Python
3
4
{ "total_num": 12, "base_passed_num": 9 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.generators.chat.azure.AzureOpenAIChatGenerator::to_dict", "haystack.haystack.core.serialization.import_class_by_name", "haystack.haystack.tools.tool.deserialize_tools_inplace" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/generators/chat/azure.py", "haystack/core/serialization.py", "haystack/tools/tool.py" ]
[ "test/components/generators/chat/test_azure.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 20, "class_end_lineno": 226, "func_start_lineno": 177, "func_end_lineno": 204, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n timeout=self.timeout,\n max_retries=self.max_retries,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n default_headers=self.default_headers,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 243, "func_end_lineno": 264, "func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error" }, { "class_start_lineno": 1, "class_end_lineno": 136, "func_start_lineno": 106, "func_end_lineno": 136, "func_code": "def deserialize_tools_inplace(data: Dict[str, Any], key: str = \"tools\"):\n \"\"\"\n Deserialize Tools in a dictionary inplace.\n\n :param data:\n The dictionary with the serialized data.\n :param key:\n The key in the dictionary where the Tools are stored.\n \"\"\"\n if key in data:\n serialized_tools = data[key]\n\n if serialized_tools is None:\n return\n\n if not isinstance(serialized_tools, list):\n raise TypeError(f\"The value of '{key}' is not a list\")\n\n deserialized_tools = []\n for tool in serialized_tools:\n if not isinstance(tool, dict):\n raise TypeError(f\"Serialized tool '{tool}' is not a dictionary\")\n\n # different classes are allowed: Tool, ComponentTool, etc.\n tool_class = import_class_by_name(tool[\"type\"])\n if not issubclass(tool_class, Tool):\n raise TypeError(f\"Class '{tool_class}' is not a subclass of Tool\")\n\n deserialized_tools.append(tool_class.from_dict(tool))\n\n data[key] = deserialized_tools" } ]
[ "function_empty" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.generators.chat.azure.AzureOpenAIChatGenerator.to_dict", "haystack.core.serialization.import_class_by_name", "haystack.tools.tool.deserialize_tools_inplace" ]
Python
4
4
{ "total_num": 8, "base_passed_num": 4 }
[ "haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict", "haystack.haystack.core.serialization.import_class_by_name", "haystack.haystack.tools.tool.deserialize_tools_inplace" ]
haystack
[ "haystack/utils/callable_serialization.py", "haystack/components/generators/chat/openai.py", "haystack/core/serialization.py", "haystack/tools/tool.py" ]
[ "test/components/generators/chat/test_openai.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"<lambda>\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"<locals>\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path" }, { "class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 170, "func_end_lineno": 190, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 243, "func_end_lineno": 264, "func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error" }, { "class_start_lineno": 1, "class_end_lineno": 136, "func_start_lineno": 106, "func_end_lineno": 136, "func_code": "def deserialize_tools_inplace(data: Dict[str, Any], key: str = \"tools\"):\n \"\"\"\n Deserialize Tools in a dictionary inplace.\n\n :param data:\n The dictionary with the serialized data.\n :param key:\n The key in the dictionary where the Tools are stored.\n \"\"\"\n if key in data:\n serialized_tools = data[key]\n\n if serialized_tools is None:\n return\n\n if not isinstance(serialized_tools, list):\n raise TypeError(f\"The value of '{key}' is not a list\")\n\n deserialized_tools = []\n for tool in serialized_tools:\n if not isinstance(tool, dict):\n raise TypeError(f\"Serialized tool '{tool}' is not a dictionary\")\n\n # different classes are allowed: Tool, ComponentTool, etc.\n tool_class = import_class_by_name(tool[\"type\"])\n if not issubclass(tool_class, Tool):\n raise TypeError(f\"Class '{tool_class}' is not a subclass of Tool\")\n\n deserialized_tools.append(tool_class.from_dict(tool))\n\n data[key] = deserialized_tools" } ]
[ "function_empty" ]
[ "haystack.utils.callable_serialization.serialize_callable", "haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict", "haystack.core.serialization.import_class_by_name", "haystack.tools.tool.deserialize_tools_inplace" ]
Python
4
4
{ "total_num": 19, "base_passed_num": 16 }
[ "haystack.haystack.components.joiners.document_joiner.DocumentJoiner::_reciprocal_rank_fusion", "haystack.haystack.components.joiners.document_joiner.DocumentJoiner::run" ]
haystack
[ "haystack/components/joiners/document_joiner.py", "haystack/components/joiners/document_joiner.py" ]
[ "test/components/joiners/test_document_joiner.py" ]
[ { "class_start_lineno": 44, "class_end_lineno": 290, "func_start_lineno": 201, "func_end_lineno": 232, "func_code": " def _reciprocal_rank_fusion(self, document_lists: List[List[Document]]) -> List[Document]:\n \"\"\"\n Merge multiple lists of Documents and assign scores based on reciprocal rank fusion.\n\n The constant k is set to 61 (60 was suggested by the original paper,\n plus 1 as python lists are 0-based and the paper used 1-based ranking).\n \"\"\"\n # This check prevents a division by zero when no documents are passed\n if not document_lists:\n return []\n\n k = 61\n\n scores_map: dict = defaultdict(int)\n documents_map = {}\n weights = self.weights if self.weights else [1 / len(document_lists)] * len(document_lists)\n\n # Calculate weighted reciprocal rank fusion score\n for documents, weight in zip(document_lists, weights):\n for rank, doc in enumerate(documents):\n scores_map[doc.id] += (weight * len(document_lists)) / (k + rank)\n documents_map[doc.id] = doc\n\n # Normalize scores. Note: len(results) / k is the maximum possible score,\n # achieved by being ranked first in all doc lists with non-zero weight.\n for _id in scores_map:\n scores_map[_id] /= len(document_lists) / k\n\n for doc in documents_map.values():\n doc.score = scores_map[doc.id]\n\n return list(documents_map.values())" }, { "class_start_lineno": 44, "class_end_lineno": 290, "func_start_lineno": 130, "func_end_lineno": 163, "func_code": " def run(self, documents: Variadic[List[Document]], top_k: Optional[int] = None):\n \"\"\"\n Joins multiple lists of Documents into a single list depending on the `join_mode` parameter.\n\n :param documents:\n List of list of documents to be merged.\n :param top_k:\n The maximum number of documents to return. Overrides the instance's `top_k` if provided.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: Merged list of Documents\n \"\"\"\n output_documents = []\n\n documents = list(documents)\n output_documents = self.join_mode_function(documents)\n\n if self.sort_by_score:\n output_documents = sorted(\n output_documents, key=lambda doc: doc.score if doc.score is not None else -inf, reverse=True\n )\n if any(doc.score is None for doc in output_documents):\n logger.info(\n \"Some of the Documents DocumentJoiner got have score=None. It was configured to sort Documents by \"\n \"score, so those with score=None were sorted as if they had a score of -infinity.\"\n )\n\n if top_k:\n output_documents = output_documents[:top_k]\n elif self.top_k:\n output_documents = output_documents[: self.top_k]\n\n return {\"documents\": output_documents}" } ]
[ "function_empty" ]
[ "haystack.components.joiners.document_joiner.DocumentJoiner._reciprocal_rank_fusion", "haystack.components.joiners.document_joiner.DocumentJoiner.run" ]
Python
2
2
{ "total_num": 29, "base_passed_num": 7 }
[ "haystack.haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter::_split_dataframe", "haystack.haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter::_recursive_split" ]
haystack
[ "haystack/components/preprocessors/csv_document_splitter.py", "haystack/components/preprocessors/csv_document_splitter.py" ]
[ "test/components/preprocessors/test_csv_document_splitter.py" ]
[ { "class_start_lineno": 18, "class_end_lineno": 244, "func_start_lineno": 174, "func_end_lineno": 207, "func_code": " def _split_dataframe(\n self, df: \"pd.DataFrame\", split_threshold: int, axis: Literal[\"row\", \"column\"]\n ) -> List[\"pd.DataFrame\"]:\n \"\"\"\n Splits a DataFrame into sub-tables based on consecutive empty rows or columns exceeding `split_threshold`.\n\n :param df: DataFrame to split.\n :param split_threshold: Minimum number of consecutive empty rows or columns to trigger a split.\n :param axis: Axis along which to split. Either \"row\" or \"column\".\n :return: List of split DataFrames.\n \"\"\"\n # Find indices of consecutive empty rows or columns\n split_indices = self._find_split_indices(df=df, split_threshold=split_threshold, axis=axis)\n\n # If no split_indices are found, return the original DataFrame\n if len(split_indices) == 0:\n return [df]\n\n # Split the DataFrame at identified indices\n sub_tables = []\n table_start_idx = 0\n df_length = df.shape[0] if axis == \"row\" else df.shape[1]\n for empty_start_idx, empty_end_idx in split_indices + [(df_length, df_length)]:\n # Avoid empty splits\n if empty_start_idx - table_start_idx >= 1:\n if axis == \"row\":\n sub_table = df.iloc[table_start_idx:empty_start_idx]\n else:\n sub_table = df.iloc[:, table_start_idx:empty_start_idx]\n if not sub_table.empty:\n sub_tables.append(sub_table)\n table_start_idx = empty_end_idx + 1\n\n return sub_tables" }, { "class_start_lineno": 18, "class_end_lineno": 244, "func_start_lineno": 209, "func_end_lineno": 244, "func_code": " def _recursive_split(\n self, df: \"pd.DataFrame\", row_split_threshold: int, column_split_threshold: int\n ) -> List[\"pd.DataFrame\"]:\n \"\"\"\n Recursively splits a DataFrame.\n\n Recursively splits a DataFrame first by empty rows, then by empty columns, and repeats the process\n until no more splits are possible. Returns a list of DataFrames, each representing a fully separated sub-table.\n\n :param df: A Pandas DataFrame representing a table (or multiple tables) extracted from a CSV.\n :param row_split_threshold: The minimum number of consecutive empty rows required to trigger a split.\n :param column_split_threshold: The minimum number of consecutive empty columns to trigger a split.\n \"\"\"\n\n # Step 1: Split by rows\n new_sub_tables = self._split_dataframe(df=df, split_threshold=row_split_threshold, axis=\"row\")\n\n # Step 2: Split by columns\n final_tables = []\n for table in new_sub_tables:\n final_tables.extend(self._split_dataframe(df=table, split_threshold=column_split_threshold, axis=\"column\"))\n\n # Step 3: Recursively reapply splitting checked by whether any new empty rows appear after column split\n result = []\n for table in final_tables:\n # Check if there are consecutive rows >= row_split_threshold now present\n if len(self._find_split_indices(df=table, split_threshold=row_split_threshold, axis=\"row\")) > 0:\n result.extend(\n self._recursive_split(\n df=table, row_split_threshold=row_split_threshold, column_split_threshold=column_split_threshold\n )\n )\n else:\n result.append(table)\n\n return result" } ]
[ "function_empty" ]
[ "haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter._split_dataframe", "haystack.components.preprocessors.csv_document_splitter.CSVDocumentSplitter._recursive_split" ]
Python
2
2
{ "total_num": 23, "base_passed_num": 15 }
[ "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_find_and_remove_header_footer", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_find_longest_common_ngram", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_allngram", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_ascii_only", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::run" ]
haystack
[ "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py" ]
[ "test/components/preprocessors/test_document_cleaner.py" ]
[ { "class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 231, "func_end_lineno": 267, "func_code": " def _find_and_remove_header_footer(\n self, text: str, n_chars: int, n_first_pages_to_ignore: int, n_last_pages_to_ignore: int\n ) -> str:\n \"\"\"\n Heuristic to find footers and headers across different pages by searching for the longest common string.\n\n Pages in the text need to be separated by form feed character \"\\f\".\n For headers, we only search in the first n_chars characters (for footer: last n_chars).\n Note: This heuristic uses exact matches and therefore works well for footers like \"Copyright 2019 by XXX\",\n but won't detect \"Page 3 of 4\" or similar.\n\n :param n_chars: The number of first/last characters where the header/footer shall be searched in.\n :param n_first_pages_to_ignore: The number of first pages to ignore\n (e.g. TOCs often don't contain footer/header).\n :param n_last_pages_to_ignore: The number of last pages to ignore.\n :returns: The text without the found headers and footers.\n \"\"\"\n\n pages = text.split(\"\\f\")\n\n # header\n start_of_pages = [p[:n_chars] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]\n found_header = self._find_longest_common_ngram(start_of_pages)\n if found_header:\n pages = [page.replace(found_header, \"\") for page in pages]\n\n # footer\n end_of_pages = [p[-n_chars:] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]\n found_footer = self._find_longest_common_ngram(end_of_pages)\n if found_footer:\n pages = [page.replace(found_footer, \"\") for page in pages]\n\n logger.debug(\n \"Removed header '{header}' and footer '{footer}' in document\", header=found_header, footer=found_footer\n )\n text = \"\\f\".join(pages)\n return text" }, { "class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 306, "func_end_lineno": 325, "func_code": " def _find_longest_common_ngram(self, sequences: List[str], min_ngram: int = 3, max_ngram: int = 30) -> str:\n \"\"\"\n Find the longest common ngram across a list of text sequences (e.g. start of pages).\n\n Considering all ngram lengths between the minimum and maximum length. Helpful for finding footers, headers etc.\n Empty sequences are ignored.\n\n :param sequences: The list of strings that shall be searched for common n_grams.\n :param max_ngram: The maximum length of ngram to consider.\n :param min_ngram: The minimum length of ngram to consider.\n :returns: The longest ngram that all sequences have in common.\n \"\"\"\n sequences = [s for s in sequences if s] # filter empty sequences\n if not sequences:\n return \"\"\n seqs_ngrams = map(partial(self._allngram, min_ngram=min_ngram, max_ngram=max_ngram), sequences)\n intersection = reduce(set.intersection, seqs_ngrams)\n\n longest = max(intersection, key=len, default=\"\")\n return longest if longest.strip() else \"\"" }, { "class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 290, "func_end_lineno": 304, "func_code": " def _allngram(self, seq: str, min_ngram: int, max_ngram: int) -> Set[str]:\n \"\"\"\n Generates all possible ngrams from a given sequence of text.\n\n Considering all ngram lengths between the minimum and maximum length.\n\n :param seq: The sequence to generate ngrams from.\n :param min_ngram: The minimum length of ngram to consider.\n :param max_ngram: The maximum length of ngram to consider.\n :returns: A set of all ngrams from the given sequence.\n \"\"\"\n lengths = range(min_ngram, max_ngram) if max_ngram else range(min_ngram, len(seq))\n ngrams = map(partial(self._ngram, seq), lengths)\n res = set(chain.from_iterable(ngrams))\n return res" }, { "class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 219, "func_end_lineno": 229, "func_code": " def _remove_repeated_substrings(self, text: str) -> str:\n \"\"\"\n Remove any substrings from the text that occur repeatedly on every page. For example headers or footers.\n\n Pages in the text need to be separated by form feed character \"\\f\".\n :param text: Text to clean.\n :returns: The text without the repeated substrings.\n \"\"\"\n return self._find_and_remove_header_footer(\n text, n_chars=300, n_first_pages_to_ignore=1, n_last_pages_to_ignore=1\n )" }, { "class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 158, "func_end_lineno": 171, "func_code": " def _ascii_only(self, text: str) -> str:\n \"\"\"\n Convert the text to ASCII only.\n\n Will remove accents from characters and replace them with ASCII characters.\n Other non-ASCII characters will be removed.\n\n :param text: Text to convert to ASCII only.\n :returns: The text in ASCII only.\n \"\"\"\n\n # First normalize the text to NFKD to separate the characters and their diacritics\n # Then encode it to ASCII and ignore any characters that can't be encoded\n return self._normalize_unicode(text, \"NFKD\").encode(\"ascii\", \"ignore\").decode(\"utf-8\")" }, { "class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 93, "func_end_lineno": 145, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Cleans up the documents.\n\n :param documents: List of Documents to clean.\n\n :returns: A dictionary with the following key:\n - `documents`: List of cleaned Documents.\n\n :raises TypeError: if documents is not a list of Documents.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\"DocumentCleaner expects a List of Documents as input.\")\n\n cleaned_docs = []\n for doc in documents:\n if doc.content is None:\n logger.warning(\n \"DocumentCleaner only cleans text documents but document.content for document ID\"\n \" %{document_id} is None.\",\n document_id=doc.id,\n )\n cleaned_docs.append(doc)\n continue\n text = doc.content\n\n if self.unicode_normalization:\n text = self._normalize_unicode(text, self.unicode_normalization)\n if self.ascii_only:\n text = self._ascii_only(text)\n if self.remove_extra_whitespaces:\n text = self._remove_extra_whitespaces(text)\n if self.remove_empty_lines:\n text = self._remove_empty_lines(text)\n if self.remove_substrings:\n text = self._remove_substrings(text, self.remove_substrings)\n if self.remove_regex:\n text = self._remove_regex(text, self.remove_regex)\n if self.remove_repeated_substrings:\n text = self._remove_repeated_substrings(text)\n\n clean_doc = Document(\n id=doc.id if self.keep_id else \"\",\n content=text,\n blob=doc.blob,\n meta=deepcopy(doc.meta),\n score=doc.score,\n embedding=doc.embedding,\n sparse_embedding=doc.sparse_embedding,\n )\n cleaned_docs.append(clean_doc)\n\n return {\"documents\": cleaned_docs}" } ]
[ "function_empty" ]
[ "haystack.components.preprocessors.document_cleaner.DocumentCleaner._find_and_remove_header_footer", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._find_longest_common_ngram", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._allngram", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._remove_repeated_substrings", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._ascii_only", "haystack.components.preprocessors.document_cleaner.DocumentCleaner.run" ]
Python
5
5
{ "total_num": 14, "base_passed_num": 1 }
[ "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_document", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::run", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_concatenate_units", "haystack.haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter::split_sentences" ]
haystack
[ "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/sentence_tokenizer.py", "haystack/components/preprocessors/document_splitter.py" ]
[ "test/components/preprocessors/test_document_splitter.py" ]
[ { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 204, "func_end_lineno": 211, "func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)" }, { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 166, "func_end_lineno": 202, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. Each document includes:\n - A metadata field `source_id` to track the original document.\n - A metadata field `page_number` to track the original page number.\n - All other metadata copied from the original document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n if self._use_sentence_splitter and self.sentence_splitter is None:\n raise RuntimeError(\n \"The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'.\"\n )\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs: List[Document] = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but content for document ID {doc.id} is None.\"\n )\n if doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n\n split_docs += self._split_document(doc)\n return {\"documents\": split_docs}" }, { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 263, "func_end_lineno": 306, "func_code": " def _concatenate_units(\n self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int\n ) -> Tuple[List[str], List[int], List[int]]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n\n Keeps track of the original page number that each element belongs. If the length of the current units is less\n than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current\n units with the last split, preventing the creation of excessively small splits.\n \"\"\"\n\n text_splits: List[str] = []\n splits_pages: List[int] = []\n splits_start_idxs: List[int] = []\n cur_start_idx = 0\n cur_page = 1\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n\n # check if length of current units is below split_threshold\n if len(current_units) < split_threshold and len(text_splits) > 0:\n # concatenate the last split with the current one\n text_splits[-1] += txt\n\n # NOTE: This line skips documents that have content=\"\"\n elif len(txt) > 0:\n text_splits.append(txt)\n splits_pages.append(cur_page)\n splits_start_idxs.append(cur_start_idx)\n\n processed_units = current_units[: split_length - split_overlap]\n cur_start_idx += len(\"\".join(processed_units))\n\n if self.split_by == \"page\":\n num_page_breaks = len(processed_units)\n else:\n num_page_breaks = sum(processed_unit.count(\"\\f\") for processed_unit in processed_units)\n\n cur_page += num_page_breaks\n\n return text_splits, splits_pages, splits_start_idxs" }, { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 238, "func_end_lineno": 251, "func_code": " def _split_by_character(self, doc) -> List[Document]:\n split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]\n units = doc.content.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n units, self.split_length, self.split_overlap, self.split_threshold\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n return self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )" }, { "class_start_lineno": 116, "class_end_lineno": 238, "func_start_lineno": 147, "func_end_lineno": 159, "func_code": " def split_sentences(self, text: str) -> List[Dict[str, Any]]:\n \"\"\"\n Splits a text into sentences including references to original char positions for each split.\n\n :param text: The text to split.\n :returns: list of sentences with positions.\n \"\"\"\n sentence_spans = list(self.sentence_tokenizer.span_tokenize(text))\n if self.use_split_rules:\n sentence_spans = SentenceSplitter._apply_split_rules(text, sentence_spans)\n\n sentences = [{\"sentence\": text[start:end], \"start\": start, \"end\": end} for start, end in sentence_spans]\n return sentences" }, { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 213, "func_end_lineno": 236, "func_code": " def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:\n split_docs = []\n\n result = self.sentence_splitter.split_sentences(doc.content) # type: ignore # None check is done in run()\n units = [sentence[\"sentence\"] for sentence in result]\n\n if self.respect_sentence_boundary:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(\n sentences=units, split_length=self.split_length, split_overlap=self.split_overlap\n )\n else:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n elements=units,\n split_length=self.split_length,\n split_overlap=self.split_overlap,\n split_threshold=self.split_threshold,\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )\n\n return split_docs" } ]
[ "function_empty", "TDD" ]
[ "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document", "haystack.components.preprocessors.document_splitter.DocumentSplitter.run", "haystack.components.preprocessors.document_splitter.DocumentSplitter._concatenate_units", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_character", "haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter.split_sentences", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_nltk_sentence" ]
Python
2
4
{ "total_num": 53, "base_passed_num": 20 }
[ "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_length", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_split_chunk", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_apply_overlap", "haystack.haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter::split_sentences", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_text" ]
haystack
[ "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/sentence_tokenizer.py", "haystack/components/preprocessors/recursive_splitter.py" ]
[ "test/components/preprocessors/test_recursive_splitter.py" ]
[ { "class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 215, "func_end_lineno": 227, "func_code": " def _chunk_length(self, text: str) -> int:\n \"\"\"\n Split the text by whitespace and count non-empty elements.\n\n :param: The text to be split.\n :return: The number of words in the text.\n \"\"\"\n\n if self.split_units == \"word\":\n words = [word for word in text.split(\" \") if word]\n return len(words)\n\n return len(text)" }, { "class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 204, "func_end_lineno": 213, "func_code": " def _get_overlap(self, overlapped_chunks: List[str]) -> Tuple[str, str]:\n \"\"\"Get the previous overlapped chunk instead of the original chunk.\"\"\"\n prev_chunk = overlapped_chunks[-1]\n overlap_start = max(0, self._chunk_length(prev_chunk) - self.split_overlap)\n if self.split_units == \"word\":\n word_chunks = prev_chunk.split()\n overlap = \" \".join(word_chunks[overlap_start:])\n else:\n overlap = prev_chunk[overlap_start:]\n return overlap, prev_chunk" }, { "class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 114, "func_end_lineno": 133, "func_code": " def _split_chunk(self, current_chunk: str) -> Tuple[str, str]:\n \"\"\"\n Splits a chunk based on the split_length and split_units attribute.\n\n :param current_chunk: The current chunk to be split.\n :returns:\n A tuple containing the current chunk and the remaining words or characters.\n \"\"\"\n\n if self.split_units == \"word\":\n words = current_chunk.split()\n current_chunk = \" \".join(words[: self.split_length])\n remaining_words = words[self.split_length :]\n return current_chunk, \" \".join(remaining_words)\n\n # split by characters\n text = current_chunk\n current_chunk = text[: self.split_length]\n remaining_chars = text[self.split_length :]\n return current_chunk, remaining_chars" }, { "class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 135, "func_end_lineno": 202, "func_code": " def _apply_overlap(self, chunks: List[str]) -> List[str]:\n \"\"\"\n Applies an overlap between consecutive chunks if the chunk_overlap attribute is greater than zero.\n\n Works for both word- and character-level splitting. It trims the last chunk if it exceeds the split_length and\n adds the trimmed content to the next chunk. If the last chunk is still too long after trimming, it splits it\n and adds the first chunk to the list. This process continues until the last chunk is within the split_length.\n\n :param chunks: A list of text chunks.\n :returns:\n A list of text chunks with the overlap applied.\n \"\"\"\n overlapped_chunks: List[str] = []\n\n for idx, chunk in enumerate(chunks):\n if idx == 0:\n overlapped_chunks.append(chunk)\n continue\n\n # get the overlap between the current and previous chunk\n overlap, prev_chunk = self._get_overlap(overlapped_chunks)\n if overlap == prev_chunk:\n logger.warning(\n \"Overlap is the same as the previous chunk. \"\n \"Consider increasing the `split_length` parameter or decreasing the `split_overlap` parameter.\"\n )\n\n # create a new chunk starting with the overlap\n current_chunk = overlap + \" \" + chunk if self.split_units == \"word\" else overlap + chunk\n\n # if this new chunk exceeds 'split_length', trim it and move the remaining text to the next chunk\n # if this is the last chunk, another new chunk will contain the trimmed text preceded by the overlap\n # of the last chunk\n if self._chunk_length(current_chunk) > self.split_length:\n current_chunk, remaining_text = self._split_chunk(current_chunk)\n if idx < len(chunks) - 1:\n chunks[idx + 1] = remaining_text + (\" \" if self.split_units == \"word\" else \"\") + chunks[idx + 1]\n elif remaining_text:\n # create a new chunk with the trimmed text preceded by the overlap of the last chunk\n overlapped_chunks.append(current_chunk)\n chunk = remaining_text\n overlap, _ = self._get_overlap(overlapped_chunks)\n current_chunk = overlap + \" \" + chunk if self.split_units == \"word\" else overlap + chunk\n\n overlapped_chunks.append(current_chunk)\n\n # it can still be that the new last chunk exceeds the 'split_length'\n # continue splitting until the last chunk is within 'split_length'\n if idx == len(chunks) - 1 and self._chunk_length(current_chunk) > self.split_length:\n last_chunk = overlapped_chunks.pop()\n first_chunk, remaining_chunk = self._split_chunk(last_chunk)\n overlapped_chunks.append(first_chunk)\n\n while remaining_chunk:\n # combine overlap with remaining chunk\n overlap, _ = self._get_overlap(overlapped_chunks)\n current = overlap + (\" \" if self.split_units == \"word\" else \"\") + remaining_chunk\n\n # if it fits within split_length we are done\n if self._chunk_length(current) <= self.split_length:\n overlapped_chunks.append(current)\n break\n\n # otherwise split it again\n first_chunk, remaining_chunk = self._split_chunk(current)\n overlapped_chunks.append(first_chunk)\n\n return overlapped_chunks" }, { "class_start_lineno": 116, "class_end_lineno": 238, "func_start_lineno": 147, "func_end_lineno": 159, "func_code": " def split_sentences(self, text: str) -> List[Dict[str, Any]]:\n \"\"\"\n Splits a text into sentences including references to original char positions for each split.\n\n :param text: The text to split.\n :returns: list of sentences with positions.\n \"\"\"\n sentence_spans = list(self.sentence_tokenizer.span_tokenize(text))\n if self.use_split_rules:\n sentence_spans = SentenceSplitter._apply_split_rules(text, sentence_spans)\n\n sentences = [{\"sentence\": text[start:end], \"start\": start, \"end\": end} for start, end in sentence_spans]\n return sentences" }, { "class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 229, "func_end_lineno": 311, "func_code": " def _chunk_text(self, text: str) -> List[str]:\n \"\"\"\n Recursive chunking algorithm that divides text into smaller chunks based on a list of separator characters.\n\n It starts with a list of separator characters (e.g., [\"\\n\\n\", \"sentence\", \"\\n\", \" \"]) and attempts to divide\n the text using the first separator. If the resulting chunks are still larger than the specified chunk size,\n it moves to the next separator in the list. This process continues recursively, progressively applying each\n specific separator until the chunks meet the desired size criteria.\n\n :param text: The text to be split into chunks.\n :returns:\n A list of text chunks.\n \"\"\"\n if self._chunk_length(text) <= self.split_length:\n return [text]\n\n for curr_separator in self.separators: # type: ignore # the caller already checked that separators is not None\n if curr_separator == \"sentence\":\n # re. ignore: correct SentenceSplitter initialization is checked at the initialization of the component\n sentence_with_spans = self.nltk_tokenizer.split_sentences(text) # type: ignore\n splits = [sentence[\"sentence\"] for sentence in sentence_with_spans]\n else:\n # add escape \"\\\" to the separator and wrapped it in a group so that it's included in the splits as well\n escaped_separator = re.escape(curr_separator)\n escaped_separator = f\"({escaped_separator})\"\n\n # split the text and merge every two consecutive splits, i.e.: the text and the separator after it\n splits = re.split(escaped_separator, text)\n splits = [\n \"\".join([splits[i], splits[i + 1]]) if i < len(splits) - 1 else splits[i]\n for i in range(0, len(splits), 2)\n ]\n\n # remove last split if it's empty\n splits = splits[:-1] if splits[-1] == \"\" else splits\n\n if len(splits) == 1: # go to next separator, if current separator not found in the text\n continue\n\n chunks = []\n current_chunk: List[str] = []\n current_length = 0\n\n # check splits, if any is too long, recursively chunk it, otherwise add to current chunk\n for split in splits:\n split_text = split\n\n # if adding this split exceeds chunk_size, process current_chunk\n if current_length + self._chunk_length(split_text) > self.split_length:\n # process current_chunk\n if current_chunk: # keep the good splits\n chunks.append(\"\".join(current_chunk))\n current_chunk = []\n current_length = 0\n\n # recursively handle splits that are too large\n if self._chunk_length(split_text) > self.split_length:\n if curr_separator == self.separators[-1]:\n # tried last separator, can't split further, do a fixed-split based on word/character\n fall_back_chunks = self._fall_back_to_fixed_chunking(split_text, self.split_units)\n chunks.extend(fall_back_chunks)\n else:\n chunks.extend(self._chunk_text(split_text))\n current_length += self._chunk_length(split_text)\n\n else:\n current_chunk.append(split_text)\n current_length += self._chunk_length(split_text)\n else:\n current_chunk.append(split_text)\n current_length += self._chunk_length(split_text)\n\n if current_chunk:\n chunks.append(\"\".join(current_chunk))\n\n if self.split_overlap > 0:\n chunks = self._apply_overlap(chunks)\n\n if chunks:\n return chunks\n\n # if no separator worked, fall back to word- or character-level chunking\n return self._fall_back_to_fixed_chunking(text, self.split_units)" } ]
[ "function_empty", "TDD" ]
[ "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_length", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._get_overlap", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._split_chunk", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._apply_overlap", "haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter.split_sentences", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_text" ]
Python
4
5
{ "total_num": 35, "base_passed_num": 8 }
[ "haystack.haystack.utils.device.ComponentDevice::to_dict", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::to_dict", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::_prepare_texts_to_embed", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::_greedy_diversity_order", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::run" ]
haystack
[ "haystack/utils/device.py", "haystack/components/rankers/sentence_transformers_diversity.py", "haystack/components/rankers/sentence_transformers_diversity.py", "haystack/components/rankers/sentence_transformers_diversity.py", "haystack/components/rankers/sentence_transformers_diversity.py" ]
[ "test/components/rankers/test_sentence_transformers_diversity.py" ]
[ { "class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 450, "func_end_lineno": 463, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False" }, { "class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 212, "func_end_lineno": 241, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model_name_or_path,\n top_k=self.top_k,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n similarity=str(self.similarity),\n query_prefix=self.query_prefix,\n query_suffix=self.query_suffix,\n document_prefix=self.document_prefix,\n document_suffix=self.document_suffix,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n strategy=str(self.strategy),\n lambda_threshold=self.lambda_threshold,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict" }, { "class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 261, "func_end_lineno": 277, "func_code": " def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = []\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key]\n ]\n text_to_embed = (\n self.document_prefix\n + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"])\n + self.document_suffix\n )\n texts_to_embed.append(text_to_embed)\n\n return texts_to_embed" }, { "class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 279, "func_end_lineno": 323, "func_code": " def _greedy_diversity_order(self, query: str, documents: List[Document]) -> List[Document]:\n \"\"\"\n Orders the given list of documents to maximize diversity.\n\n The algorithm first calculates embeddings for each document and the query. It starts by selecting the document\n that is semantically closest to the query. Then, for each remaining document, it selects the one that, on\n average, is least similar to the already selected documents. This process continues until all documents are\n selected, resulting in a list where each subsequent document contributes the most to the overall diversity of\n the selected set.\n\n :param query: The search query.\n :param documents: The list of Document objects to be ranked.\n\n :return: A list of documents ordered to maximize diversity.\n \"\"\"\n texts_to_embed = self._prepare_texts_to_embed(documents)\n\n doc_embeddings, query_embedding = self._embed_and_normalize(query, texts_to_embed)\n\n n = len(documents)\n selected: List[int] = []\n\n # Compute the similarity vector between the query and documents\n query_doc_sim = query_embedding @ doc_embeddings.T\n\n # Start with the document with the highest similarity to the query\n selected.append(int(torch.argmax(query_doc_sim).item()))\n\n selected_sum = doc_embeddings[selected[0]] / n\n\n while len(selected) < n:\n # Compute mean of dot products of all selected documents and all other documents\n similarities = selected_sum @ doc_embeddings.T\n # Mask documents that are already selected\n similarities[selected] = torch.inf\n # Select the document with the lowest total similarity score\n index_unselected = int(torch.argmin(similarities).item())\n selected.append(index_unselected)\n # It's enough just to add to the selected vectors because dot product is distributive\n # It's divided by n for numerical stability\n selected_sum += doc_embeddings[index_unselected] / n\n\n ranked_docs: List[Document] = [documents[i] for i in selected]\n\n return ranked_docs" }, { "class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 388, "func_end_lineno": 435, "func_code": " def run(\n self,\n query: str,\n documents: List[Document],\n top_k: Optional[int] = None,\n lambda_threshold: Optional[float] = None,\n ) -> Dict[str, List[Document]]:\n \"\"\"\n Rank the documents based on their diversity.\n\n :param query: The search query.\n :param documents: List of Document objects to be ranker.\n :param top_k: Optional. An integer to override the top_k set during initialization.\n :param lambda_threshold: Override the trade-off parameter between relevance and diversity. Only used when\n strategy is \"maximum_margin_relevance\".\n\n :returns: A dictionary with the following key:\n - `documents`: List of Document objects that have been selected based on the diversity ranking.\n\n :raises ValueError: If the top_k value is less than or equal to 0.\n :raises RuntimeError: If the component has not been warmed up.\n \"\"\"\n if self.model is None:\n error_msg = (\n \"The component SentenceTransformersDiversityRanker wasn't warmed up. \"\n \"Run 'warm_up()' before calling 'run()'.\"\n )\n raise RuntimeError(error_msg)\n\n if not documents:\n return {\"documents\": []}\n\n if top_k is None:\n top_k = self.top_k\n elif not 0 < top_k <= len(documents):\n raise ValueError(f\"top_k must be between 1 and {len(documents)}, but got {top_k}\")\n\n if self.strategy == DiversityRankingStrategy.MAXIMUM_MARGIN_RELEVANCE:\n if lambda_threshold is None:\n lambda_threshold = self.lambda_threshold\n self._check_lambda_threshold(lambda_threshold, self.strategy)\n re_ranked_docs = self._maximum_margin_relevance(\n query=query, documents=documents, lambda_threshold=lambda_threshold, top_k=top_k\n )\n else:\n re_ranked_docs = self._greedy_diversity_order(query=query, documents=documents)\n\n return {\"documents\": re_ranked_docs[:top_k]}" } ]
[ "function_empty", "TDD" ]
[ "haystack.utils.device.ComponentDevice.to_dict", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker.to_dict", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker._prepare_texts_to_embed", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker._greedy_diversity_order", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker.run" ]
Python
2
5
{ "total_num": 53, "base_passed_num": 17 }
[ "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.utils.hf.serialize_hf_model_kwargs", "haystack.haystack.components.rankers.transformers_similarity.TransformersSimilarityRanker::to_dict" ]
haystack
[ "haystack/core/serialization.py", "haystack/utils/hf.py", "haystack/components/rankers/transformers_similarity.py" ]
[ "test/components/rankers/test_transformers_similarity.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}" }, { "class_start_lineno": 1, "class_end_lineno": 395, "func_start_lineno": 98, "func_end_lineno": 112, "func_code": "def serialize_hf_model_kwargs(kwargs: Dict[str, Any]):\n \"\"\"\n Recursively serialize HuggingFace specific model keyword arguments in-place to make them JSON serializable.\n\n :param kwargs: The keyword arguments to serialize\n \"\"\"\n torch_import.check()\n\n for k, v in kwargs.items():\n # torch.dtype\n if isinstance(v, torch.dtype):\n kwargs[k] = str(v)\n\n if isinstance(v, dict):\n serialize_hf_model_kwargs(v)" }, { "class_start_lineno": 24, "class_end_lineno": 309, "func_start_lineno": 157, "func_end_lineno": 182, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n device=None,\n model=self.model_name_or_path,\n token=self.token.to_dict() if self.token else None,\n top_k=self.top_k,\n query_prefix=self.query_prefix,\n document_prefix=self.document_prefix,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n scale_score=self.scale_score,\n calibration_factor=self.calibration_factor,\n score_threshold=self.score_threshold,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n )\n\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict" } ]
[ "function_empty" ]
[ "haystack.core.serialization.default_to_dict", "haystack.utils.hf.serialize_hf_model_kwargs", "haystack.components.rankers.transformers_similarity.TransformersSimilarityRanker.to_dict" ]
Python
3
3
{ "total_num": 26, "base_passed_num": 20 }
[ "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.utils.hf.serialize_hf_model_kwargs", "haystack.haystack.components.readers.extractive.ExtractiveReader::to_dict", "haystack.haystack.components.readers.extractive.ExtractiveReader::_should_keep", "haystack.haystack.components.readers.extractive.ExtractiveReader::deduplicate_by_overlap" ]
haystack
[ "haystack/core/serialization.py", "haystack/utils/hf.py", "haystack/components/readers/extractive.py", "haystack/components/readers/extractive.py", "haystack/components/readers/extractive.py" ]
[ "test/components/readers/test_extractive.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}" }, { "class_start_lineno": 1, "class_end_lineno": 395, "func_start_lineno": 98, "func_end_lineno": 112, "func_code": "def serialize_hf_model_kwargs(kwargs: Dict[str, Any]):\n \"\"\"\n Recursively serialize HuggingFace specific model keyword arguments in-place to make them JSON serializable.\n\n :param kwargs: The keyword arguments to serialize\n \"\"\"\n torch_import.check()\n\n for k, v in kwargs.items():\n # torch.dtype\n if isinstance(v, torch.dtype):\n kwargs[k] = str(v)\n\n if isinstance(v, dict):\n serialize_hf_model_kwargs(v)" }, { "class_start_lineno": 26, "class_end_lineno": 660, "func_start_lineno": 136, "func_end_lineno": 160, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model_name_or_path,\n device=None,\n token=self.token.to_dict() if self.token else None,\n max_seq_length=self.max_seq_length,\n top_k=self.top_k,\n score_threshold=self.score_threshold,\n stride=self.stride,\n max_batch_size=self.max_batch_size,\n answers_per_seq=self.answers_per_seq,\n no_answer=self.no_answer,\n calibration_factor=self.calibration_factor,\n model_kwargs=self.model_kwargs,\n )\n\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict" }, { "class_start_lineno": 26, "class_end_lineno": 660, "func_start_lineno": 432, "func_end_lineno": 492, "func_code": " def _should_keep(\n self, candidate_answer: ExtractedAnswer, current_answers: List[ExtractedAnswer], overlap_threshold: float\n ) -> bool:\n \"\"\"\n Determines if the answer should be kept based on how much it overlaps with previous answers.\n\n NOTE: We might want to avoid throwing away answers that only have a few character (or word) overlap:\n - E.g. The answers \"the river in\" and \"in Maine\" from the context \"I want to go to the river in Maine.\"\n might both want to be kept.\n\n :param candidate_answer:\n Candidate answer that will be checked if it should be kept.\n :param current_answers:\n Current list of answers that will be kept.\n :param overlap_threshold:\n If the overlap between two answers is greater than this threshold then return False.\n \"\"\"\n keep = True\n\n # If the candidate answer doesn't have a document keep it\n if not candidate_answer.document:\n return keep\n\n for ans in current_answers:\n # If an answer in current_answers doesn't have a document skip the comparison\n if not ans.document:\n continue\n\n # If offset is missing then keep both\n if ans.document_offset is None:\n continue\n\n # If offset is missing then keep both\n if candidate_answer.document_offset is None:\n continue\n\n # If the answers come from different documents then keep both\n if candidate_answer.document.id != ans.document.id:\n continue\n\n overlap_len = self._calculate_overlap(\n answer1_start=ans.document_offset.start,\n answer1_end=ans.document_offset.end,\n answer2_start=candidate_answer.document_offset.start,\n answer2_end=candidate_answer.document_offset.end,\n )\n\n # If overlap is 0 then keep\n if overlap_len == 0:\n continue\n\n overlap_frac_answer1 = overlap_len / (ans.document_offset.end - ans.document_offset.start)\n overlap_frac_answer2 = overlap_len / (\n candidate_answer.document_offset.end - candidate_answer.document_offset.start\n )\n\n if overlap_frac_answer1 > overlap_threshold or overlap_frac_answer2 > overlap_threshold:\n keep = False\n break\n\n return keep" }, { "class_start_lineno": 26, "class_end_lineno": 660, "func_start_lineno": 494, "func_end_lineno": 529, "func_code": " def deduplicate_by_overlap(\n self, answers: List[ExtractedAnswer], overlap_threshold: Optional[float]\n ) -> List[ExtractedAnswer]:\n \"\"\"\n De-duplicates overlapping Extractive Answers.\n\n De-duplicates overlapping Extractive Answers from the same document based on how much the spans of the\n answers overlap.\n\n :param answers:\n List of answers to be deduplicated.\n :param overlap_threshold:\n If set this will remove duplicate answers if they have an overlap larger than the\n supplied threshold. For example, for the answers \"in the river in Maine\" and \"the river\" we would remove\n one of these answers since the second answer has a 100% (1.0) overlap with the first answer.\n However, for the answers \"the river in\" and \"in Maine\" there is only a max overlap percentage of 25% so\n both of these answers could be kept if this variable is set to 0.24 or lower.\n If None is provided then all answers are kept.\n :returns:\n List of deduplicated answers.\n \"\"\"\n if overlap_threshold is None:\n return answers\n\n # Initialize with the first answer and its offsets_in_document\n deduplicated_answers = [answers[0]]\n\n # Loop over remaining answers to check for overlaps\n for ans in answers[1:]:\n keep = self._should_keep(\n candidate_answer=ans, current_answers=deduplicated_answers, overlap_threshold=overlap_threshold\n )\n if keep:\n deduplicated_answers.append(ans)\n\n return deduplicated_answers" } ]
[ "function_empty", "TDD" ]
[ "haystack.core.serialization.default_to_dict", "haystack.utils.hf.serialize_hf_model_kwargs", "haystack.components.readers.extractive.ExtractiveReader.to_dict", "haystack.components.readers.extractive.ExtractiveReader._should_keep", "haystack.components.readers.extractive.ExtractiveReader.deduplicate_by_overlap" ]
Python
4
5
{ "total_num": 34, "base_passed_num": 20 }
[ "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict" ]
haystack
[ "haystack/core/serialization.py", "haystack/document_stores/in_memory/document_store.py", "haystack/components/retrievers/filter_retriever.py" ]
[ "test/components/retrievers/test_filter_retriever.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}" }, { "class_start_lineno": 58, "class_end_lineno": 738, "func_start_lineno": 344, "func_end_lineno": 358, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )" }, { "class_start_lineno": 15, "class_end_lineno": 96, "func_start_lineno": 60, "func_end_lineno": 68, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(self, document_store=docstore, filters=self.filters)" } ]
[ "function_empty" ]
[ "haystack.core.serialization.default_to_dict", "haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict", "haystack.components.retrievers.filter_retriever.FilterRetriever.to_dict" ]
Python
2
2
{ "total_num": 10, "base_passed_num": 8 }
[ "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_document", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::run" ]
haystack
[ "haystack/core/serialization.py", "haystack/document_stores/in_memory/document_store.py", "haystack/components/retrievers/sentence_window_retriever.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py" ]
[ "test/components/retrievers/test_sentence_window_retriever.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}" }, { "class_start_lineno": 58, "class_end_lineno": 738, "func_start_lineno": 344, "func_end_lineno": 358, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )" }, { "class_start_lineno": 13, "class_end_lineno": 198, "func_start_lineno": 122, "func_end_lineno": 130, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(self, document_store=docstore, window_size=self.window_size)" }, { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 204, "func_end_lineno": 211, "func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)" }, { "class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 166, "func_end_lineno": 202, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. Each document includes:\n - A metadata field `source_id` to track the original document.\n - A metadata field `page_number` to track the original page number.\n - All other metadata copied from the original document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n if self._use_sentence_splitter and self.sentence_splitter is None:\n raise RuntimeError(\n \"The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'.\"\n )\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs: List[Document] = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but content for document ID {doc.id} is None.\"\n )\n if doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n\n split_docs += self._split_document(doc)\n return {\"documents\": split_docs}" } ]
[ "function_empty", "TDD" ]
[ "haystack.core.serialization.default_to_dict", "haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict", "haystack.components.retrievers.sentence_window_retriever.SentenceWindowRetriever.to_dict", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document", "haystack.components.preprocessors.document_splitter.DocumentSplitter.run" ]
Python
3
4
{ "total_num": 16, "base_passed_num": 13 }
[ "haystack.haystack.components.routers.conditional_router.ConditionalRouter::_validate_template", "haystack.haystack.components.routers.conditional_router.ConditionalRouter::_validate_routes", "haystack.haystack.utils.type_serialization.serialize_type", "haystack.haystack.components.routers.conditional_router.ConditionalRouter::to_dict" ]
haystack
[ "haystack/components/routers/conditional_router.py", "haystack/components/routers/conditional_router.py", "haystack/utils/type_serialization.py", "haystack/components/routers/conditional_router.py" ]
[ "test/components/routers/test_conditional_router.py" ]
[ { "class_start_lineno": 29, "class_end_lineno": 433, "func_start_lineno": 371, "func_end_lineno": 383, "func_code": " def _validate_template(self, env: Environment, template_text: str):\n \"\"\"\n Validates a template string by parsing it with Jinja.\n\n :param env: A Jinja environment.\n :param template_text: A Jinja template string.\n :returns: `True` if the template is valid, `False` otherwise.\n \"\"\"\n try:\n env.parse(template_text)\n return True\n except TemplateSyntaxError:\n return False" }, { "class_start_lineno": 29, "class_end_lineno": 433, "func_start_lineno": 335, "func_end_lineno": 355, "func_code": " def _validate_routes(self, routes: List[Dict]):\n \"\"\"\n Validates a list of routes.\n\n :param routes: A list of routes.\n \"\"\"\n for route in routes:\n try:\n keys = set(route.keys())\n except AttributeError:\n raise ValueError(f\"Route must be a dictionary, got: {route}\")\n\n mandatory_fields = {\"condition\", \"output\", \"output_type\", \"output_name\"}\n has_all_mandatory_fields = mandatory_fields.issubset(keys)\n if not has_all_mandatory_fields:\n raise ValueError(\n f\"Route must contain 'condition', 'output', 'output_type' and 'output_name' fields: {route}\"\n )\n for field in [\"condition\", \"output\"]:\n if not self._validate_template(self._env, route[field]):\n raise ValueError(f\"Invalid template for field '{field}': {route[field]}\")" }, { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 19, "func_end_lineno": 52, "func_code": "def serialize_type(target: Any) -> str:\n \"\"\"\n Serializes a type or an instance to its string representation, including the module name.\n\n This function handles types, instances of types, and special typing objects.\n It assumes that non-typing objects will have a '__name__' attribute.\n\n :param target:\n The object to serialize, can be an instance or a type.\n :return:\n The string representation of the type.\n \"\"\"\n name = getattr(target, \"__name__\", str(target))\n\n # Remove the 'typing.' prefix when using python <3.9\n if name.startswith(\"typing.\"):\n name = name[7:]\n # Remove the arguments from the name when using python <3.9\n if \"[\" in name:\n name = name.split(\"[\")[0]\n\n # Get module name\n module = inspect.getmodule(target)\n module_name = \"\"\n # We omit the module name for builtins to not clutter the output\n if module and hasattr(module, \"__name__\") and module.__name__ != \"builtins\":\n module_name = f\"{module.__name__}\"\n\n args = get_args(target)\n if args:\n args_str = \", \".join([serialize_type(a) for a in args if a is not type(None)])\n return f\"{module_name}.{name}[{args_str}]\" if module_name else f\"{name}[{args_str}]\"\n\n return f\"{module_name}.{name}\" if module_name else f\"{name}\"" }, { "class_start_lineno": 29, "class_end_lineno": 433, "func_start_lineno": 237, "func_end_lineno": 256, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_routes = []\n for route in self.routes:\n # output_type needs to be serialized to a string\n serialized_routes.append({**route, \"output_type\": serialize_type(route[\"output_type\"])})\n se_filters = {name: serialize_callable(filter_func) for name, filter_func in self.custom_filters.items()}\n return default_to_dict(\n self,\n routes=serialized_routes,\n custom_filters=se_filters,\n unsafe=self._unsafe,\n validate_output_type=self._validate_output_type,\n optional_variables=self.optional_variables,\n )" } ]
[ "function_empty" ]
[ "haystack.components.routers.conditional_router.ConditionalRouter._validate_template", "haystack.components.routers.conditional_router.ConditionalRouter._validate_routes", "haystack.utils.type_serialization.serialize_type", "haystack.components.routers.conditional_router.ConditionalRouter.to_dict" ]
Python
4
4
{ "total_num": 23, "base_passed_num": 15 }
[ "haystack.haystack.components.samplers.top_p.TopPSampler::_get_documents_and_scores", "haystack.haystack.components.samplers.top_p.TopPSampler::run" ]
haystack
[ "haystack/components/samplers/top_p.py", "haystack/components/samplers/top_p.py" ]
[ "test/components/samplers/test_top_p.py" ]
[ { "class_start_lineno": 18, "class_end_lineno": 177, "func_start_lineno": 144, "func_end_lineno": 177, "func_code": " def _get_documents_and_scores(self, documents: List[Document]) -> Tuple[List[Document], List[float]]:\n \"\"\"\n Checks if documents have scores in their metadata or score field and returns the documents with scores.\n\n :param documents: List of Documents.\n :return: List of scores.\n \"\"\"\n docs_with_scores = []\n scores = []\n docs_missing_scores = []\n for doc in documents:\n score = self._get_doc_score(doc=doc, score_field=self.score_field)\n if score is None:\n docs_missing_scores.append(doc)\n else:\n scores.append(score)\n docs_with_scores.append(doc)\n\n if len(docs_missing_scores) > 0:\n missing_scores_docs_ids = [d.id for d in docs_missing_scores if d.id]\n if self.score_field:\n logger.warning(\n \"Score field '{score_field}' not found in metadata of documents with IDs: {doc_ids}.\"\n \"Make sure that all documents have a score field '{score_field_2}' in their metadata.\",\n score_field=self.score_field,\n doc_ids=\",\".join(missing_scores_docs_ids),\n score_field_2=self.score_field,\n )\n else:\n logger.warning(\n \"Ensure all documents have a valid score value. These documents {doc_ids} are missing scores.\",\n doc_ids=\",\".join(missing_scores_docs_ids),\n )\n return docs_with_scores, scores" }, { "class_start_lineno": 18, "class_end_lineno": 177, "func_start_lineno": 65, "func_end_lineno": 122, "func_code": " def run(self, documents: List[Document], top_p: Optional[float] = None):\n \"\"\"\n Filters documents using top-p sampling based on their scores.\n\n If the specified top_p results in no documents being selected (especially in cases of a low top_p value), the\n method returns the document with the highest score.\n\n :param documents: List of Document objects to be filtered.\n :param top_p: If specified, a float to override the cumulative probability threshold set during initialization.\n\n :returns: A dictionary with the following key:\n - `documents`: List of Document objects that have been selected based on the top-p sampling.\n :raises ValueError: If the top_p value is not within the range [0, 1].\n \"\"\"\n if not documents:\n return {\"documents\": []}\n\n top_p = top_p or self.top_p\n if not 0 <= top_p <= 1:\n raise ValueError(f\"top_p must be between 0 and 1. Got {top_p}.\")\n\n documents_with_scores, scores = self._get_documents_and_scores(documents)\n if len(documents_with_scores) == 0:\n logger.warning(\"No documents with scores found. Returning the original documents.\")\n return {\"documents\": documents}\n\n sorted_docs_with_scores = sorted(zip(documents_with_scores, scores), key=lambda x: x[1], reverse=True)\n sorted_documents, sorted_scores = [list(t) for t in zip(*sorted_docs_with_scores)]\n\n tensor_scores = torch.tensor(sorted_scores, dtype=torch.float32)\n probs = torch.nn.functional.softmax(tensor_scores, dim=-1)\n cumulative_probs = torch.cumsum(probs, dim=-1)\n\n # Check if the cumulative probabilities are close to top_p with a 1e-6 tolerance\n close_to_top_p = torch.isclose(cumulative_probs, torch.tensor(top_p, device=cumulative_probs.device), atol=1e-6)\n\n # Combine the close_to_top_p with original condition using logical OR\n condition = (cumulative_probs <= top_p) | close_to_top_p\n\n # Find the indices with cumulative probabilities that exceed top_p\n top_p_indices = torch.where(torch.BoolTensor(condition))[0]\n\n # Map the selected indices back to their original indices\n selected_docs = [sorted_documents[i.item()] for i in top_p_indices]\n\n if self.min_top_k and len(selected_docs) < self.min_top_k:\n selected_docs = sorted_documents[: self.min_top_k]\n\n # If low p resulted in no documents being selected, then return at least one document\n if len(selected_docs) == 0:\n logger.warning(\n \"Top-p sampling with p={top_p} resulted in no documents being selected. \"\n \"Returning the document with the highest score.\",\n top_p=top_p,\n )\n selected_docs = [sorted_documents[0]]\n\n return {\"documents\": selected_docs}" } ]
[ "function_empty" ]
[ "haystack.components.samplers.top_p.TopPSampler._get_documents_and_scores", "haystack.components.samplers.top_p.TopPSampler.run" ]
Python
2
2
{ "total_num": 11, "base_passed_num": 1 }
[ "haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.components.tools.tool_invoker.ToolInvoker::run", "haystack.haystack.core.type_utils._strict_types_are_compatible", "haystack.haystack.core.type_utils._types_are_compatible" ]
haystack
[ "haystack/dataclasses/chat_message.py", "haystack/components/tools/tool_invoker.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/core/type_utils.py", "haystack/core/type_utils.py" ]
[ "test/components/tools/test_tool_invoker.py" ]
[ { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)" }, { "class_start_lineno": 38, "class_end_lineno": 242, "func_start_lineno": 166, "func_end_lineno": 214, "func_code": " def run(self, messages: List[ChatMessage]) -> Dict[str, Any]:\n \"\"\"\n Processes ChatMessage objects containing tool calls and invokes the corresponding tools, if available.\n\n :param messages:\n A list of ChatMessage objects.\n :returns:\n A dictionary with the key `tool_messages` containing a list of ChatMessage objects with tool role.\n Each ChatMessage objects wraps the result of a tool invocation.\n\n :raises ToolNotFoundException:\n If the tool is not found in the list of available tools and `raise_on_failure` is True.\n :raises ToolInvocationError:\n If the tool invocation fails and `raise_on_failure` is True.\n :raises StringConversionError:\n If the conversion of the tool result to a string fails and `raise_on_failure` is True.\n \"\"\"\n tool_messages = []\n\n for message in messages:\n tool_calls = message.tool_calls\n if not tool_calls:\n continue\n\n for tool_call in tool_calls:\n tool_name = tool_call.tool_name\n tool_arguments = tool_call.arguments\n\n if not tool_name in self._tools_with_names:\n msg = _TOOL_NOT_FOUND.format(tool_name=tool_name, available_tools=self._tools_with_names.keys())\n if self.raise_on_failure:\n raise ToolNotFoundException(msg)\n tool_messages.append(ChatMessage.from_tool(tool_result=msg, origin=tool_call, error=True))\n continue\n\n tool_to_invoke = self._tools_with_names[tool_name]\n try:\n tool_result = tool_to_invoke.invoke(**tool_arguments)\n except ToolInvocationError as e:\n if self.raise_on_failure:\n raise e\n msg = _TOOL_INVOCATION_FAILURE.format(error=e)\n tool_messages.append(ChatMessage.from_tool(tool_result=msg, origin=tool_call, error=True))\n continue\n\n tool_message = self._prepare_tool_result_message(tool_result, tool_call)\n tool_messages.append(tool_message)\n\n return {\"tool_messages\": tool_messages}" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 183, "func_end_lineno": 187, "func_code": " def tool_calls(self) -> List[ToolCall]:\n \"\"\"\n Returns the list of all Tool calls contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCall)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 199, "func_end_lineno": 203, "func_code": " def tool_call_results(self) -> List[ToolCallResult]:\n \"\"\"\n Returns the list of all Tool call results contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCallResult)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 206, "func_end_lineno": 212, "func_code": " def tool_call_result(self) -> Optional[ToolCallResult]:\n \"\"\"\n Returns the first Tool call result contained in the message.\n \"\"\"\n if tool_call_results := self.tool_call_results:\n return tool_call_results[0]\n return None" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 29, "func_end_lineno": 76, "func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 14, "func_end_lineno": 26, "func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True" } ]
[ "function_empty", "TDD" ]
[ "haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.components.tools.tool_invoker.ToolInvoker.run", "haystack.dataclasses.chat_message.ChatMessage.tool_calls", "haystack.dataclasses.chat_message.ChatMessage.tool_call_results", "haystack.dataclasses.chat_message.ChatMessage.tool_call_result", "haystack.core.type_utils._strict_types_are_compatible", "haystack.core.type_utils._types_are_compatible" ]
Python
3
4
{ "total_num": 16, "base_passed_num": 6 }
[ "haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.components.validators.json_schema.JsonSchemaValidator::run", "haystack.haystack.core.type_utils._strict_types_are_compatible", "haystack.haystack.core.type_utils._types_are_compatible" ]
haystack
[ "haystack/dataclasses/chat_message.py", "haystack/components/validators/json_schema.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/core/type_utils.py", "haystack/core/type_utils.py" ]
[ "test/components/validators/test_json_schema.py" ]
[ { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)" }, { "class_start_lineno": 29, "class_end_lineno": 257, "func_start_lineno": 115, "func_end_lineno": 186, "func_code": " def run(\n self,\n messages: List[ChatMessage],\n json_schema: Optional[Dict[str, Any]] = None,\n error_template: Optional[str] = None,\n ) -> Dict[str, List[ChatMessage]]:\n \"\"\"\n Validates the last of the provided messages against the specified json schema.\n\n If it does, the message is passed along the \"validated\" output. If it does not, the message is passed along\n the \"validation_error\" output.\n\n :param messages: A list of ChatMessage instances to be validated. The last message in this list is the one\n that is validated.\n :param json_schema: A dictionary representing the [JSON schema](https://json-schema.org/)\n against which the messages' content is validated. If not provided, the schema from the component init\n is used.\n :param error_template: A custom template string for formatting the error message in case of validation. If not\n provided, the `error_template` from the component init is used.\n :return: A dictionary with the following keys:\n - \"validated\": A list of messages if the last message is valid.\n - \"validation_error\": A list of messages if the last message is invalid.\n :raises ValueError: If no JSON schema is provided or if the message content is not a dictionary or a list of\n dictionaries.\n \"\"\"\n last_message = messages[-1]\n if last_message.text is None:\n raise ValueError(f\"The provided ChatMessage has no text. ChatMessage: {last_message}\")\n if not is_valid_json(last_message.text):\n return {\n \"validation_error\": [\n ChatMessage.from_user(\n f\"The message '{last_message.text}' is not a valid JSON object. \"\n f\"Please provide only a valid JSON object in string format.\"\n f\"Don't use any markdown and don't add any comment.\"\n )\n ]\n }\n\n last_message_content = json.loads(last_message.text)\n json_schema = json_schema or self.json_schema\n error_template = error_template or self.error_template or self.default_error_template\n\n if not json_schema:\n raise ValueError(\"Provide a JSON schema for validation either in the run method or in the component init.\")\n # fc payload is json object but subtree `parameters` is string - we need to convert to json object\n # we need complete json to validate it against schema\n last_message_json = self._recursive_json_to_object(last_message_content)\n using_openai_schema: bool = self._is_openai_function_calling_schema(json_schema)\n if using_openai_schema:\n validation_schema = json_schema[\"parameters\"]\n else:\n validation_schema = json_schema\n try:\n last_message_json = [last_message_json] if not isinstance(last_message_json, list) else last_message_json\n for content in last_message_json:\n if using_openai_schema:\n validate(instance=content[\"function\"][\"arguments\"], schema=validation_schema)\n else:\n validate(instance=content, schema=validation_schema)\n\n return {\"validated\": [last_message]}\n except ValidationError as e:\n error_path = \" -> \".join(map(str, e.absolute_path)) if e.absolute_path else \"N/A\"\n error_schema_path = \" -> \".join(map(str, e.absolute_schema_path)) if e.absolute_schema_path else \"N/A\"\n\n error_template = error_template or self.default_error_template\n\n recovery_prompt = self._construct_error_recovery_message(\n error_template, str(e), error_path, error_schema_path, validation_schema, failing_json=last_message.text\n )\n return {\"validation_error\": [ChatMessage.from_user(recovery_prompt)]}" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 29, "func_end_lineno": 76, "func_code": "def _strict_types_are_compatible(sender, receiver): # pylint: disable=too-many-return-statements\n \"\"\"\n Checks whether the sender type is equal to or a subtype of the receiver type under strict validation.\n\n Note: this method has no pretense to perform proper type matching. It especially does not deal with aliasing of\n typing classes such as `List` or `Dict` to their runtime counterparts `list` and `dict`. It also does not deal well\n with \"bare\" types, so `List` is treated differently from `List[Any]`, even though they should be the same.\n Consider simplifying the typing of your components if you observe unexpected errors during component connection.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :return: True if the sender type is strictly compatible with the receiver type, False otherwise.\n \"\"\"\n if sender == receiver or receiver is Any:\n return True\n\n if sender is Any:\n return False\n\n try:\n if issubclass(sender, receiver):\n return True\n except TypeError: # typing classes can't be used with issubclass, so we deal with them below\n pass\n\n sender_origin = get_origin(sender)\n receiver_origin = get_origin(receiver)\n\n if sender_origin is not Union and receiver_origin is Union:\n return any(_strict_types_are_compatible(sender, union_arg) for union_arg in get_args(receiver))\n\n # Both must have origins and they must be equal\n if not (sender_origin and receiver_origin and sender_origin == receiver_origin):\n return False\n\n # Compare generic type arguments\n sender_args = get_args(sender)\n receiver_args = get_args(receiver)\n\n # Handle bare types\n if not sender_args and sender_origin:\n sender_args = (Any,)\n if not receiver_args and receiver_origin:\n receiver_args = (Any,) * (len(sender_args) if sender_args else 1)\n if len(sender_args) > len(receiver_args):\n return False\n\n return all(_strict_types_are_compatible(*args) for args in zip(sender_args, receiver_args))" }, { "class_start_lineno": 1, "class_end_lineno": 105, "func_start_lineno": 14, "func_end_lineno": 26, "func_code": "def _types_are_compatible(sender, receiver, type_validation: bool = True) -> bool:\n \"\"\"\n Determines if two types are compatible based on the specified validation mode.\n\n :param sender: The sender type.\n :param receiver: The receiver type.\n :param type_validation: Whether to perform strict type validation.\n :return: True if the types are compatible, False otherwise.\n \"\"\"\n if type_validation:\n return _strict_types_are_compatible(sender, receiver)\n else:\n return True" } ]
[ "function_empty", "TDD" ]
[ "haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.components.validators.json_schema.JsonSchemaValidator.run", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.core.type_utils._strict_types_are_compatible", "haystack.core.type_utils._types_are_compatible" ]
Python
3
4
{ "total_num": 8, "base_passed_num": 2 }
[ "haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::write_documents", "haystack.haystack.components.writers.document_writer.DocumentWriter::run" ]
haystack
[ "haystack/document_stores/in_memory/document_store.py", "haystack/components/writers/document_writer.py" ]
[ "test/components/writers/test_document_writer.py" ]
[ { "class_start_lineno": 58, "class_end_lineno": 738, "func_start_lineno": 432, "func_end_lineno": 473, "func_code": " def write_documents(self, documents: List[Document], policy: DuplicatePolicy = DuplicatePolicy.NONE) -> int:\n \"\"\"\n Refer to the DocumentStore.write_documents() protocol documentation.\n\n If `policy` is set to `DuplicatePolicy.NONE` defaults to `DuplicatePolicy.FAIL`.\n \"\"\"\n if (\n not isinstance(documents, Iterable)\n or isinstance(documents, str)\n or any(not isinstance(doc, Document) for doc in documents)\n ):\n raise ValueError(\"Please provide a list of Documents.\")\n\n if policy == DuplicatePolicy.NONE:\n policy = DuplicatePolicy.FAIL\n\n written_documents = len(documents)\n for document in documents:\n if policy != DuplicatePolicy.OVERWRITE and document.id in self.storage.keys():\n if policy == DuplicatePolicy.FAIL:\n raise DuplicateDocumentError(f\"ID '{document.id}' already exists.\")\n if policy == DuplicatePolicy.SKIP:\n logger.warning(\"ID '{document_id}' already exists\", document_id=document.id)\n written_documents -= 1\n continue\n\n # Since the statistics are updated in an incremental manner,\n # we need to explicitly remove the existing document to revert\n # the statistics before updating them with the new document.\n if document.id in self.storage.keys():\n self.delete_documents([document.id])\n\n tokens = []\n if document.content is not None:\n tokens = self._tokenize_bm25(document.content)\n\n self.storage[document.id] = document\n\n self._bm25_attr[document.id] = BM25DocumentStats(Counter(tokens), len(tokens))\n self._freq_vocab_for_idf.update(set(tokens))\n self._avg_doc_len = (len(tokens) + self._avg_doc_len * len(self._bm25_attr)) / (len(self._bm25_attr) + 1)\n return written_documents" }, { "class_start_lineno": 15, "class_end_lineno": 134, "func_start_lineno": 85, "func_end_lineno": 103, "func_code": " def run(self, documents: List[Document], policy: Optional[DuplicatePolicy] = None):\n \"\"\"\n Run the DocumentWriter on the given input data.\n\n :param documents:\n A list of documents to write to the document store.\n :param policy:\n The policy to use when encountering duplicate documents.\n :returns:\n Number of documents written to the document store.\n\n :raises ValueError:\n If the specified document store is not found.\n \"\"\"\n if policy is None:\n policy = self.policy\n\n documents_written = self.document_store.write_documents(documents=documents, policy=policy)\n return {\"documents_written\": documents_written}" } ]
[ "function_empty" ]
[ "haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.write_documents", "haystack.components.writers.document_writer.DocumentWriter.run" ]
Python
2
2
{ "total_num": 11, "base_passed_num": 7 }
[ "haystack.haystack.core.pipeline.component_checks.are_all_sockets_ready", "haystack.haystack.core.pipeline.component_checks.has_lazy_variadic_socket_received_all_inputs", "haystack.haystack.core.pipeline.component_checks.has_socket_received_all_inputs", "haystack.haystack.core.pipeline.component_checks.can_component_run" ]
haystack
[ "haystack/core/pipeline/component_checks.py", "haystack/core/pipeline/component_checks.py", "haystack/core/pipeline/component_checks.py", "haystack/core/pipeline/component_checks.py" ]
[ "test/core/pipeline/test_component_checks.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 52, "func_end_lineno": 83, "func_code": "def are_all_sockets_ready(component: Dict, inputs: Dict, only_check_mandatory: bool = False) -> bool:\n \"\"\"\n Checks if all sockets of a component have enough inputs for the component to execute.\n\n :param component: Component metadata and the component instance.\n :param inputs: Inputs for the component.\n :param only_check_mandatory: If only mandatory sockets should be checked.\n \"\"\"\n filled_sockets = set()\n expected_sockets = set()\n if only_check_mandatory:\n sockets_to_check = {\n socket_name: socket for socket_name, socket in component[\"input_sockets\"].items() if socket.is_mandatory\n }\n else:\n sockets_to_check = {\n socket_name: socket\n for socket_name, socket in component[\"input_sockets\"].items()\n if socket.is_mandatory or len(socket.senders)\n }\n\n for socket_name, socket in sockets_to_check.items():\n socket_inputs = inputs.get(socket_name, [])\n expected_sockets.add(socket_name)\n\n # Check if socket has all required inputs or is a lazy variadic socket with any input\n if has_socket_received_all_inputs(socket, socket_inputs) or (\n is_socket_lazy_variadic(socket) and any_socket_input_received(socket_inputs)\n ):\n filled_sockets.add(socket_name)\n\n return filled_sockets == expected_sockets" }, { "class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 149, "func_end_lineno": 163, "func_code": "def has_lazy_variadic_socket_received_all_inputs(socket: InputSocket, socket_inputs: List[Dict]) -> bool:\n \"\"\"\n Checks if a lazy variadic socket has received all expected inputs from other components in the pipeline.\n\n :param socket: The InputSocket of a component.\n :param socket_inputs: Inputs for the socket.\n \"\"\"\n expected_senders = set(socket.senders)\n actual_senders = {\n sock[\"sender\"]\n for sock in socket_inputs\n if sock[\"value\"] is not _NO_OUTPUT_PRODUCED and sock[\"sender\"] is not None\n }\n\n return expected_senders == actual_senders" }, { "class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 175, "func_end_lineno": 199, "func_code": "def has_socket_received_all_inputs(socket: InputSocket, socket_inputs: List[Dict]) -> bool:\n \"\"\"\n Checks if a socket has received all expected inputs.\n\n :param socket: The InputSocket of a component.\n :param socket_inputs: Inputs for the socket.\n \"\"\"\n # No inputs received for the socket, it is not filled.\n if len(socket_inputs) == 0:\n return False\n\n # The socket is greedy variadic and at least one input was produced, it is complete.\n if (\n socket.is_variadic\n and socket.is_greedy\n and any(sock[\"value\"] is not _NO_OUTPUT_PRODUCED for sock in socket_inputs)\n ):\n return True\n\n # The socket is lazy variadic and all expected inputs were produced.\n if is_socket_lazy_variadic(socket) and has_lazy_variadic_socket_received_all_inputs(socket, socket_inputs):\n return True\n\n # The socket is not variadic and the only expected input is complete.\n return not socket.is_variadic and socket_inputs[0][\"value\"] is not _NO_OUTPUT_PRODUCED" }, { "class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 12, "func_end_lineno": 25, "func_code": "def can_component_run(component: Dict, inputs: Dict) -> bool:\n \"\"\"\n Checks if the component can run, given the current state of its inputs.\n\n A component needs to pass two gates so that it is ready to run:\n 1. It has received all mandatory inputs.\n 2. It has received a trigger.\n :param component: Component metadata and the component instance.\n :param inputs: Inputs for the component.\n \"\"\"\n received_all_mandatory_inputs = are_all_sockets_ready(component, inputs, only_check_mandatory=True)\n received_trigger = has_any_trigger(component, inputs)\n\n return received_all_mandatory_inputs and received_trigger" } ]
[ "function_empty" ]
[ "haystack.core.pipeline.component_checks.are_all_sockets_ready", "haystack.core.pipeline.component_checks.has_lazy_variadic_socket_received_all_inputs", "haystack.core.pipeline.component_checks.has_socket_received_all_inputs", "haystack.core.pipeline.component_checks.can_component_run" ]
Python
4
4
{ "total_num": 78, "base_passed_num": 44 }
[ "haystack.haystack.dataclasses.document.Document::to_dict", "haystack.haystack.dataclasses.answer.ExtractedAnswer::to_dict" ]
haystack
[ "haystack/dataclasses/document.py", "haystack/dataclasses/answer.py" ]
[ "test/dataclasses/test_answer.py" ]
[ { "class_start_lineno": 49, "class_end_lineno": 186, "func_start_lineno": 123, "func_end_lineno": 140, "func_code": " def to_dict(self, flatten=True) -> Dict[str, Any]:\n \"\"\"\n Converts Document into a dictionary.\n\n `blob` field is converted to a JSON-serializable type.\n\n :param flatten:\n Whether to flatten `meta` field or not. Defaults to `True` to be backward-compatible with Haystack 1.x.\n \"\"\"\n data = asdict(self)\n if (blob := data.get(\"blob\")) is not None:\n data[\"blob\"] = {\"data\": list(blob[\"data\"]), \"mime_type\": blob[\"mime_type\"]}\n\n if flatten:\n meta = data.pop(\"meta\")\n return {**data, **meta}\n\n return data" }, { "class_start_lineno": 28, "class_end_lineno": 84, "func_start_lineno": 43, "func_end_lineno": 63, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize the object to a dictionary.\n\n :returns:\n Serialized dictionary representation of the object.\n \"\"\"\n document = self.document.to_dict(flatten=False) if self.document is not None else None\n document_offset = asdict(self.document_offset) if self.document_offset is not None else None\n context_offset = asdict(self.context_offset) if self.context_offset is not None else None\n return default_to_dict(\n self,\n data=self.data,\n query=self.query,\n document=document,\n context=self.context,\n score=self.score,\n document_offset=document_offset,\n context_offset=context_offset,\n meta=self.meta,\n )" } ]
[ "function_empty" ]
[ "haystack.dataclasses.document.Document.to_dict", "haystack.dataclasses.answer.ExtractedAnswer.to_dict" ]
Python
2
2
{ "total_num": 8, "base_passed_num": 7 }
[ "haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.dataclasses.chat_message.ChatMessage::to_dict", "haystack.haystack.dataclasses.chat_message.ChatMessage::to_openai_dict_format" ]
haystack
[ "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py" ]
[ "test/dataclasses/test_chat_message.py" ]
[ { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 146, "func_end_lineno": 150, "func_code": " def role(self) -> ChatRole:\n \"\"\"\n Returns the role of the entity sending the message.\n \"\"\"\n return self._role" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 160, "func_end_lineno": 164, "func_code": " def name(self) -> Optional[str]:\n \"\"\"\n Returns the name associated with the message.\n \"\"\"\n return self._name" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 183, "func_end_lineno": 187, "func_code": " def tool_calls(self) -> List[ToolCall]:\n \"\"\"\n Returns the list of all Tool calls contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCall)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 190, "func_end_lineno": 196, "func_code": " def tool_call(self) -> Optional[ToolCall]:\n \"\"\"\n Returns the first Tool call contained in the message.\n \"\"\"\n if tool_calls := self.tool_calls:\n return tool_calls[0]\n return None" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 199, "func_end_lineno": 203, "func_code": " def tool_call_results(self) -> List[ToolCallResult]:\n \"\"\"\n Returns the list of all Tool call results contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCallResult)]" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 206, "func_end_lineno": 212, "func_code": " def tool_call_result(self) -> Optional[ToolCallResult]:\n \"\"\"\n Returns the first Tool call result contained in the message.\n \"\"\"\n if tool_call_results := self.tool_call_results:\n return tool_call_results[0]\n return None" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 293, "func_end_lineno": 316, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Converts ChatMessage into a dictionary.\n\n :returns:\n Serialized version of the object.\n \"\"\"\n serialized: Dict[str, Any] = {}\n serialized[\"_role\"] = self._role.value\n serialized[\"_meta\"] = self._meta\n serialized[\"_name\"] = self._name\n content: List[Dict[str, Any]] = []\n for part in self._content:\n if isinstance(part, TextContent):\n content.append({\"text\": part.text})\n elif isinstance(part, ToolCall):\n content.append({\"tool_call\": asdict(part)})\n elif isinstance(part, ToolCallResult):\n content.append({\"tool_call_result\": asdict(part)})\n else:\n raise TypeError(f\"Unsupported type in ChatMessage content: `{type(part).__name__}` for `{part}`.\")\n\n serialized[\"_content\"] = content\n return serialized" }, { "class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 357, "func_end_lineno": 403, "func_code": " def to_openai_dict_format(self) -> Dict[str, Any]:\n \"\"\"\n Convert a ChatMessage to the dictionary format expected by OpenAI's Chat API.\n \"\"\"\n text_contents = self.texts\n tool_calls = self.tool_calls\n tool_call_results = self.tool_call_results\n\n if not text_contents and not tool_calls and not tool_call_results:\n raise ValueError(\n \"A `ChatMessage` must contain at least one `TextContent`, `ToolCall`, or `ToolCallResult`.\"\n )\n if len(text_contents) + len(tool_call_results) > 1:\n raise ValueError(\"A `ChatMessage` can only contain one `TextContent` or one `ToolCallResult`.\")\n\n openai_msg: Dict[str, Any] = {\"role\": self._role.value}\n\n # Add name field if present\n if self._name is not None:\n openai_msg[\"name\"] = self._name\n\n if tool_call_results:\n result = tool_call_results[0]\n if result.origin.id is None:\n raise ValueError(\"`ToolCall` must have a non-null `id` attribute to be used with OpenAI.\")\n openai_msg[\"content\"] = result.result\n openai_msg[\"tool_call_id\"] = result.origin.id\n # OpenAI does not provide a way to communicate errors in tool invocations, so we ignore the error field\n return openai_msg\n\n if text_contents:\n openai_msg[\"content\"] = text_contents[0]\n if tool_calls:\n openai_tool_calls = []\n for tc in tool_calls:\n if tc.id is None:\n raise ValueError(\"`ToolCall` must have a non-null `id` attribute to be used with OpenAI.\")\n openai_tool_calls.append(\n {\n \"id\": tc.id,\n \"type\": \"function\",\n # We disable ensure_ascii so special chars like emojis are not converted\n \"function\": {\"name\": tc.tool_name, \"arguments\": json.dumps(tc.arguments, ensure_ascii=False)},\n }\n )\n openai_msg[\"tool_calls\"] = openai_tool_calls\n return openai_msg" } ]
[ "function_empty", "TDD" ]
[ "haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.dataclasses.chat_message.ChatMessage.role", "haystack.dataclasses.chat_message.ChatMessage.name", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.dataclasses.chat_message.ChatMessage.tool_calls", "haystack.dataclasses.chat_message.ChatMessage.tool_call", "haystack.dataclasses.chat_message.ChatMessage.tool_call_results", "haystack.dataclasses.chat_message.ChatMessage.tool_call_result", "haystack.dataclasses.chat_message.ChatMessage.to_dict", "haystack.dataclasses.chat_message.ChatMessage.to_openai_dict_format" ]
Python
1
3
{ "total_num": 35, "base_passed_num": 15 }
[ "haystack.haystack.evaluation.eval_run_result.EvaluationRunResult::detailed_report", "haystack.haystack.evaluation.eval_run_result.EvaluationRunResult::comparative_detailed_report" ]
haystack
[ "haystack/evaluation/eval_run_result.py", "haystack/evaluation/eval_run_result.py" ]
[ "test/evaluation/test_eval_run_result.py" ]
[ { "class_start_lineno": 16, "class_end_lineno": 222, "func_start_lineno": 138, "func_end_lineno": 162, "func_code": " def detailed_report(\n self, output_format: Literal[\"json\", \"csv\", \"df\"] = \"json\", csv_file: Optional[str] = None\n ) -> Union[Dict[str, List[Any]], \"DataFrame\", str]:\n \"\"\"\n Generates a report with detailed scores for each metric.\n\n :param output_format: The output format for the report, \"json\", \"csv\", or \"df\", default to \"json\".\n :param csv_file: Filepath to save CSV output if `output_format` is \"csv\", must be provided.\n\n :returns:\n JSON or DataFrame with the detailed scores, in case the output is set to a CSV file, a message confirming\n the successful write or an error message.\n \"\"\"\n\n combined_data = {col: self.inputs[col] for col in self.inputs}\n\n # enforce columns type consistency\n scores_columns = list(self.results.keys())\n for col in scores_columns:\n col_values = self.results[col][\"individual_scores\"]\n if any(isinstance(v, float) for v in col_values):\n col_values = [float(v) for v in col_values]\n combined_data[col] = col_values\n\n return self._handle_output(combined_data, output_format, csv_file)" }, { "class_start_lineno": 16, "class_end_lineno": 222, "func_start_lineno": 164, "func_end_lineno": 222, "func_code": " def comparative_detailed_report(\n self,\n other: \"EvaluationRunResult\",\n keep_columns: Optional[List[str]] = None,\n output_format: Literal[\"json\", \"csv\", \"df\"] = \"json\",\n csv_file: Optional[str] = None,\n ) -> Union[str, \"DataFrame\", None]:\n \"\"\"\n Generates a report with detailed scores for each metric from two evaluation runs for comparison.\n\n :param other: Results of another evaluation run to compare with.\n :param keep_columns: List of common column names to keep from the inputs of the evaluation runs to compare.\n :param output_format: The output format for the report, \"json\", \"csv\", or \"df\", default to \"json\".\n :param csv_file: Filepath to save CSV output if `output_format` is \"csv\", must be provided.\n\n :returns:\n JSON or DataFrame with a comparison of the detailed scores, in case the output is set to a CSV file,\n a message confirming the successful write or an error message.\n \"\"\"\n\n if not isinstance(other, EvaluationRunResult):\n raise ValueError(\"Comparative scores can only be computed between EvaluationRunResults.\")\n\n if not hasattr(other, \"run_name\") or not hasattr(other, \"inputs\") or not hasattr(other, \"results\"):\n raise ValueError(\"The 'other' parameter must have 'run_name', 'inputs', and 'results' attributes.\")\n\n if self.run_name == other.run_name:\n warn(f\"The run names of the two evaluation results are the same ('{self.run_name}')\")\n\n if self.inputs.keys() != other.inputs.keys():\n warn(f\"The input columns differ between the results; using the input columns of '{self.run_name}'.\")\n\n # got both detailed reports\n detailed_a = self.detailed_report(output_format=\"json\")\n detailed_b = other.detailed_report(output_format=\"json\")\n\n # ensure both detailed reports are in dictionaries format\n if not isinstance(detailed_a, dict) or not isinstance(detailed_b, dict):\n raise ValueError(\"Detailed reports must be dictionaries.\")\n\n # determine which columns to ignore\n if keep_columns is None:\n ignore = list(self.inputs.keys())\n else:\n ignore = [col for col in list(self.inputs.keys()) if col not in keep_columns]\n\n # filter out ignored columns from pipe_b_dict\n filtered_detailed_b = {\n f\"{other.run_name}_{key}\": value for key, value in detailed_b.items() if key not in ignore\n }\n\n # rename columns in pipe_a_dict based on ignore list\n renamed_detailed_a = {\n (key if key in ignore else f\"{self.run_name}_{key}\"): value for key, value in detailed_a.items()\n }\n\n # combine both detailed reports\n combined_results = {**renamed_detailed_a, **filtered_detailed_b}\n return self._handle_output(combined_results, output_format, csv_file)" } ]
[ "function_empty" ]
[ "haystack.evaluation.eval_run_result.EvaluationRunResult.detailed_report", "haystack.evaluation.eval_run_result.EvaluationRunResult.comparative_detailed_report" ]
Python
2
2
{ "total_num": 4, "base_passed_num": 2 }
[ "haystack.haystack.tools.from_function.create_tool_from_function", "haystack.haystack.tools.from_function.tool" ]
haystack
[ "haystack/tools/from_function.py", "haystack/tools/from_function.py" ]
[ "test/tools/test_from_function.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 166, "func_start_lineno": 14, "func_end_lineno": 112, "func_code": "def create_tool_from_function(\n function: Callable, name: Optional[str] = None, description: Optional[str] = None\n) -> \"Tool\":\n \"\"\"\n Create a Tool instance from a function.\n\n Allows customizing the Tool name and description.\n For simpler use cases, consider using the `@tool` decorator.\n\n ### Usage example\n\n ```python\n from typing import Annotated, Literal\n from haystack.tools import create_tool_from_function\n\n def get_weather(\n city: Annotated[str, \"the city for which to get the weather\"] = \"Munich\",\n unit: Annotated[Literal[\"Celsius\", \"Fahrenheit\"], \"the unit for the temperature\"] = \"Celsius\"):\n '''A simple function to get the current weather for a location.'''\n return f\"Weather report for {city}: 20 {unit}, sunny\"\n\n tool = create_tool_from_function(get_weather)\n\n print(tool)\n >>> Tool(name='get_weather', description='A simple function to get the current weather for a location.',\n >>> parameters={\n >>> 'type': 'object',\n >>> 'properties': {\n >>> 'city': {'type': 'string', 'description': 'the city for which to get the weather', 'default': 'Munich'},\n >>> 'unit': {\n >>> 'type': 'string',\n >>> 'enum': ['Celsius', 'Fahrenheit'],\n >>> 'description': 'the unit for the temperature',\n >>> 'default': 'Celsius',\n >>> },\n >>> }\n >>> },\n >>> function=<function get_weather at 0x7f7b3a8a9b80>)\n ```\n\n :param function:\n The function to be converted into a Tool.\n The function must include type hints for all parameters.\n The function is expected to have basic python input types (str, int, float, bool, list, dict, tuple).\n Other input types may work but are not guaranteed.\n If a parameter is annotated using `typing.Annotated`, its metadata will be used as parameter description.\n :param name:\n The name of the Tool. If not provided, the name of the function will be used.\n :param description:\n The description of the Tool. If not provided, the docstring of the function will be used.\n To intentionally leave the description empty, pass an empty string.\n\n :returns:\n The Tool created from the function.\n\n :raises ValueError:\n If any parameter of the function lacks a type hint.\n :raises SchemaGenerationError:\n If there is an error generating the JSON schema for the Tool.\n \"\"\"\n\n tool_description = description if description is not None else (function.__doc__ or \"\")\n\n signature = inspect.signature(function)\n\n # collect fields (types and defaults) and descriptions from function parameters\n fields: Dict[str, Any] = {}\n descriptions = {}\n\n for param_name, param in signature.parameters.items():\n if param.annotation is param.empty:\n raise ValueError(f\"Function '{function.__name__}': parameter '{param_name}' does not have a type hint.\")\n\n # if the parameter has not a default value, Pydantic requires an Ellipsis (...)\n # to explicitly indicate that the parameter is required\n default = param.default if param.default is not param.empty else ...\n fields[param_name] = (param.annotation, default)\n\n if hasattr(param.annotation, \"__metadata__\"):\n descriptions[param_name] = param.annotation.__metadata__[0]\n\n # create Pydantic model and generate JSON schema\n try:\n model = create_model(function.__name__, **fields)\n schema = model.model_json_schema()\n except Exception as e:\n raise SchemaGenerationError(f\"Failed to create JSON schema for function '{function.__name__}'\") from e\n\n # we don't want to include title keywords in the schema, as they contain redundant information\n # there is no programmatic way to prevent Pydantic from adding them, so we remove them later\n # see https://github.com/pydantic/pydantic/discussions/8504\n _remove_title_from_schema(schema)\n\n # add parameters descriptions to the schema\n for param_name, param_description in descriptions.items():\n if param_name in schema[\"properties\"]:\n schema[\"properties\"][param_name][\"description\"] = param_description\n\n return Tool(name=name or function.__name__, description=tool_description, parameters=schema, function=function)" }, { "class_start_lineno": 1, "class_end_lineno": 166, "func_start_lineno": 115, "func_end_lineno": 151, "func_code": "def tool(function: Callable) -> Tool:\n \"\"\"\n Decorator to convert a function into a Tool.\n\n Tool name, description, and parameters are inferred from the function.\n If you need to customize more the Tool, use `create_tool_from_function` instead.\n\n ### Usage example\n ```python\n from typing import Annotated, Literal\n from haystack.tools import tool\n\n @tool\n def get_weather(\n city: Annotated[str, \"the city for which to get the weather\"] = \"Munich\",\n unit: Annotated[Literal[\"Celsius\", \"Fahrenheit\"], \"the unit for the temperature\"] = \"Celsius\"):\n '''A simple function to get the current weather for a location.'''\n return f\"Weather report for {city}: 20 {unit}, sunny\"\n\n print(get_weather)\n >>> Tool(name='get_weather', description='A simple function to get the current weather for a location.',\n >>> parameters={\n >>> 'type': 'object',\n >>> 'properties': {\n >>> 'city': {'type': 'string', 'description': 'the city for which to get the weather', 'default': 'Munich'},\n >>> 'unit': {\n >>> 'type': 'string',\n >>> 'enum': ['Celsius', 'Fahrenheit'],\n >>> 'description': 'the unit for the temperature',\n >>> 'default': 'Celsius',\n >>> },\n >>> }\n >>> },\n >>> function=<function get_weather at 0x7f7b3a8a9b80>)\n ```\n \"\"\"\n return create_tool_from_function(function)" } ]
[ "function_empty" ]
[ "haystack.tools.from_function.create_tool_from_function", "haystack.tools.from_function.tool" ]
Python
2
2
{ "total_num": 12, "base_passed_num": 3 }
[ "haystack.haystack.core.serialization.import_class_by_name", "haystack.haystack.tools.tool.deserialize_tools_inplace" ]
haystack
[ "haystack/core/serialization.py", "haystack/tools/tool.py" ]
[ "test/tools/test_tool.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 243, "func_end_lineno": 264, "func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error" }, { "class_start_lineno": 1, "class_end_lineno": 136, "func_start_lineno": 106, "func_end_lineno": 136, "func_code": "def deserialize_tools_inplace(data: Dict[str, Any], key: str = \"tools\"):\n \"\"\"\n Deserialize Tools in a dictionary inplace.\n\n :param data:\n The dictionary with the serialized data.\n :param key:\n The key in the dictionary where the Tools are stored.\n \"\"\"\n if key in data:\n serialized_tools = data[key]\n\n if serialized_tools is None:\n return\n\n if not isinstance(serialized_tools, list):\n raise TypeError(f\"The value of '{key}' is not a list\")\n\n deserialized_tools = []\n for tool in serialized_tools:\n if not isinstance(tool, dict):\n raise TypeError(f\"Serialized tool '{tool}' is not a dictionary\")\n\n # different classes are allowed: Tool, ComponentTool, etc.\n tool_class = import_class_by_name(tool[\"type\"])\n if not issubclass(tool_class, Tool):\n raise TypeError(f\"Class '{tool_class}' is not a subclass of Tool\")\n\n deserialized_tools.append(tool_class.from_dict(tool))\n\n data[key] = deserialized_tools" } ]
[ "function_empty" ]
[ "haystack.core.serialization.import_class_by_name", "haystack.tools.tool.deserialize_tools_inplace" ]
Python
2
2
{ "total_num": 10, "base_passed_num": 8 }
[ "haystack.haystack.dataclasses.document.Document::to_dict", "haystack.haystack.tracing.utils.coerce_tag_value" ]
haystack
[ "haystack/dataclasses/document.py", "haystack/tracing/utils.py", "haystack/tracing/utils.py" ]
[ "test/tracing/test_utils.py" ]
[ { "class_start_lineno": 49, "class_end_lineno": 186, "func_start_lineno": 123, "func_end_lineno": 140, "func_code": " def to_dict(self, flatten=True) -> Dict[str, Any]:\n \"\"\"\n Converts Document into a dictionary.\n\n `blob` field is converted to a JSON-serializable type.\n\n :param flatten:\n Whether to flatten `meta` field or not. Defaults to `True` to be backward-compatible with Haystack 1.x.\n \"\"\"\n data = asdict(self)\n if (blob := data.get(\"blob\")) is not None:\n data[\"blob\"] = {\"data\": list(blob[\"data\"]), \"mime_type\": blob[\"mime_type\"]}\n\n if flatten:\n meta = data.pop(\"meta\")\n return {**data, **meta}\n\n return data" }, { "class_start_lineno": 1, "class_end_lineno": 52, "func_start_lineno": 42, "func_end_lineno": 52, "func_code": "def _serializable_value(value: Any) -> Any:\n if isinstance(value, list):\n return [_serializable_value(v) for v in value]\n\n if isinstance(value, dict):\n return {k: _serializable_value(v) for k, v in value.items()}\n\n if getattr(value, \"to_dict\", None):\n return _serializable_value(value.to_dict())\n\n return value" }, { "class_start_lineno": 1, "class_end_lineno": 52, "func_start_lineno": 15, "func_end_lineno": 39, "func_code": "def coerce_tag_value(value: Any) -> Union[bool, str, int, float]:\n \"\"\"\n Coerces span tag values to compatible types for the tracing backend.\n\n Most tracing libraries don't support sending complex types to the backend. Hence, we need to convert them to\n compatible types.\n\n :param value: an arbitrary value which should be coerced to a compatible type\n :return: the value coerced to a compatible type\n \"\"\"\n if isinstance(value, PRIMITIVE_TYPES):\n return value\n\n if value is None:\n return \"\"\n\n try:\n # do that with-in try-except because who knows what kind of objects are being passed\n serializable = _serializable_value(value)\n return json.dumps(serializable)\n except Exception as error:\n logger.debug(\"Failed to coerce tag value to string: {error}\", error=error)\n\n # Our last resort is to convert the value to a string\n return str(value)" } ]
[ "function_empty" ]
[ "haystack.dataclasses.document.Document.to_dict", "haystack.tracing.utils._serializable_value", "haystack.tracing.utils.coerce_tag_value" ]
Python
2
2
{ "total_num": 11, "base_passed_num": 0 }
[ "haystack.haystack.core.serialization.import_class_by_name", "haystack.haystack.utils.base_serialization.deserialize_class_instance" ]
haystack
[ "haystack/core/serialization.py", "haystack/utils/base_serialization.py" ]
[ "test/utils/test_base_serialization.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 243, "func_end_lineno": 264, "func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error" }, { "class_start_lineno": 1, "class_end_lineno": 54, "func_start_lineno": 29, "func_end_lineno": 54, "func_code": "def deserialize_class_instance(data: Dict[str, Any]) -> Any:\n \"\"\"\n Deserializes an object from a dictionary representation generated by `auto_serialize_class_instance`.\n\n :param data:\n The dictionary to deserialize from.\n :returns:\n The deserialized object.\n :raises DeserializationError:\n If the serialization data is malformed, the class type cannot be imported, or the\n class does not have a `from_dict` method.\n \"\"\"\n if \"type\" not in data:\n raise DeserializationError(\"Missing 'type' in serialization data\")\n if \"data\" not in data:\n raise DeserializationError(\"Missing 'data' in serialization data\")\n\n try:\n obj_class = import_class_by_name(data[\"type\"])\n except ImportError as e:\n raise DeserializationError(f\"Class '{data['type']}' not correctly imported\") from e\n\n if not hasattr(obj_class, \"from_dict\"):\n raise DeserializationError(f\"Class '{data['type']}' does not have a 'from_dict' method\")\n\n return obj_class.from_dict(data[\"data\"])" } ]
[ "function_empty" ]
[ "haystack.core.serialization.import_class_by_name", "haystack.utils.base_serialization.deserialize_class_instance" ]
Python
2
2
{ "total_num": 4, "base_passed_num": 2 }
[ "haystack.haystack.utils.type_serialization.thread_safe_import", "haystack.haystack.utils.callable_serialization.deserialize_callable" ]
haystack
[ "haystack/utils/type_serialization.py", "haystack/utils/callable_serialization.py" ]
[ "test/utils/test_callable_serialization.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 159, "func_end_lineno": 170, "func_code": "def thread_safe_import(module_name: str) -> ModuleType:\n \"\"\"\n Import a module in a thread-safe manner.\n\n Importing modules in a multi-threaded environment can lead to race conditions.\n This function ensures that the module is imported in a thread-safe manner without having impact\n on the performance of the import for single-threaded environments.\n\n :param module_name: the module to import\n \"\"\"\n with _import_lock:\n return importlib.import_module(module_name)" }, { "class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 45, "func_end_lineno": 80, "func_code": "def deserialize_callable(callable_handle: str) -> Callable:\n \"\"\"\n Deserializes a callable given its full import path as a string.\n\n :param callable_handle: The full path of the callable_handle\n :return: The callable\n :raises DeserializationError: If the callable cannot be found\n \"\"\"\n parts = callable_handle.split(\".\")\n\n for i in range(len(parts), 0, -1):\n module_name = \".\".join(parts[:i])\n try:\n mod: Any = thread_safe_import(module_name)\n except Exception:\n # keep reducing i until we find a valid module import\n continue\n\n attr_value = mod\n for part in parts[i:]:\n try:\n attr_value = getattr(attr_value, part)\n except AttributeError as e:\n raise DeserializationError(f\"Could not find attribute '{part}' in {attr_value.__name__}\") from e\n\n # when the attribute is a classmethod, we need the underlying function\n if isinstance(attr_value, (classmethod, staticmethod)):\n attr_value = attr_value.__func__\n\n if not callable(attr_value):\n raise DeserializationError(f\"The final attribute is not callable: {attr_value}\")\n\n return attr_value\n\n # Fallback if we never find anything\n raise DeserializationError(f\"Could not import '{callable_handle}' as a module or callable.\")" } ]
[ "function_empty", "TDD" ]
[ "haystack.utils.type_serialization.thread_safe_import", "haystack.utils.callable_serialization.deserialize_callable" ]
Python
1
2
{ "total_num": 11, "base_passed_num": 5 }
[ "haystack.haystack.utils.device.ComponentDevice::to_hf", "haystack.haystack.utils.device.ComponentDevice::update_hf_kwargs" ]
haystack
[ "haystack/utils/device.py", "haystack/utils/device.py" ]
[ "test/utils/test_device.py" ]
[ { "class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 359, "func_end_lineno": 379, "func_code": " def to_hf(self) -> Union[Union[int, str], Dict[str, Union[int, str]]]:\n \"\"\"\n Convert the component device representation to HuggingFace format.\n\n :returns:\n The HuggingFace device representation.\n \"\"\"\n self._validate()\n\n def convert_device(device: Device, *, gpu_id_only: bool = False) -> Union[int, str]:\n if gpu_id_only and device.type == DeviceType.GPU:\n assert device.id is not None\n return device.id\n else:\n return str(device)\n\n if self._single_device is not None:\n return convert_device(self._single_device)\n\n assert self._multiple_devices is not None\n return {key: convert_device(device, gpu_id_only=True) for key, device in self._multiple_devices.mapping.items()}" }, { "class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 381, "func_end_lineno": 402, "func_code": " def update_hf_kwargs(self, hf_kwargs: Dict[str, Any], *, overwrite: bool) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to HuggingFace format.\n\n Add them as canonical keyword arguments to the keyword arguments dictionary.\n\n :param hf_kwargs:\n The HuggingFace keyword arguments dictionary.\n :param overwrite:\n Whether to overwrite existing device arguments.\n :returns:\n The HuggingFace keyword arguments dictionary.\n \"\"\"\n self._validate()\n\n if not overwrite and any(x in hf_kwargs for x in (\"device\", \"device_map\")):\n return hf_kwargs\n\n converted = self.to_hf()\n key = \"device_map\" if self.has_multiple_devices else \"device\"\n hf_kwargs[key] = converted\n return hf_kwargs" } ]
[ "function_empty" ]
[ "haystack.utils.device.ComponentDevice.to_hf", "haystack.utils.device.ComponentDevice.update_hf_kwargs" ]
Python
2
2
{ "total_num": 7, "base_passed_num": 4 }
[ "haystack.haystack.core.serialization.import_class_by_name", "haystack.haystack.utils.docstore_deserialization.deserialize_document_store_in_init_params_inplace" ]
haystack
[ "haystack/core/serialization.py", "haystack/utils/docstore_deserialization.py" ]
[ "test/utils/test_docstore_deserialization.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 243, "func_end_lineno": 264, "func_code": "def import_class_by_name(fully_qualified_name: str) -> Type[object]:\n \"\"\"\n Utility function to import (load) a class object based on its fully qualified class name.\n\n This function dynamically imports a class based on its string name.\n It splits the name into module path and class name, imports the module,\n and returns the class object.\n\n :param fully_qualified_name: the fully qualified class name as a string\n :returns: the class object.\n :raises ImportError: If the class cannot be imported or found.\n \"\"\"\n try:\n module_path, class_name = fully_qualified_name.rsplit(\".\", 1)\n logger.debug(\n \"Attempting to import class '{cls_name}' from module '{md_path}'\", cls_name=class_name, md_path=module_path\n )\n module = thread_safe_import(module_path)\n return getattr(module, class_name)\n except (ImportError, AttributeError) as error:\n logger.error(\"Failed to import class '{full_name}'\", full_name=fully_qualified_name)\n raise ImportError(f\"Could not import class '{fully_qualified_name}'\") from error" }, { "class_start_lineno": 1, "class_end_lineno": 39, "func_start_lineno": 11, "func_end_lineno": 39, "func_code": "def deserialize_document_store_in_init_params_inplace(data: Dict[str, Any], key: str = \"document_store\"):\n \"\"\"\n Deserializes a generic document store from the init_parameters of a serialized component in place.\n\n :param data:\n The dictionary to deserialize from.\n :param key:\n The key in the `data[\"init_parameters\"]` dictionary where the document store is specified.\n :returns:\n The dictionary, with the document store deserialized.\n\n :raises DeserializationError:\n If the document store is not properly specified in the serialization data or its type cannot be imported.\n \"\"\"\n init_params = data.get(\"init_parameters\", {})\n if key not in init_params:\n raise DeserializationError(f\"Missing '{key}' in serialization data\")\n if \"type\" not in init_params[key]:\n raise DeserializationError(f\"Missing 'type' in {key} serialization data\")\n\n doc_store_data = data[\"init_parameters\"][key]\n try:\n doc_store_class = import_class_by_name(doc_store_data[\"type\"])\n except ImportError as e:\n raise DeserializationError(f\"Class '{doc_store_data['type']}' not correctly imported\") from e\n if hasattr(doc_store_class, \"from_dict\"):\n data[\"init_parameters\"][key] = doc_store_class.from_dict(doc_store_data)\n else:\n data[\"init_parameters\"][key] = default_from_dict(doc_store_class, doc_store_data)" } ]
[ "function_empty" ]
[ "haystack.core.serialization.import_class_by_name", "haystack.utils.docstore_deserialization.deserialize_document_store_in_init_params_inplace" ]
Python
2
2
{ "total_num": 6, "base_passed_num": 0 }
[ "inference.inference.core.utils.image_utils.validate_numpy_image", "inference.inference.core.utils.image_utils.load_image_with_inferred_type" ]
inference
[ "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py" ]
[ "tests/inference/unit_tests/core/active_learning/test_middlewares.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 353, "func_end_lineno": 377, "func_code": "def validate_numpy_image(data: np.ndarray) -> None:\n \"\"\"\n Validate if the provided data is a valid numpy image.\n\n Args:\n data (np.ndarray): The numpy array representing an image.\n\n Raises:\n InvalidNumpyInput: If the provided data is not a valid numpy image.\n \"\"\"\n if not issubclass(type(data), np.ndarray):\n raise InvalidNumpyInput(\n message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n public_message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n )\n if len(data.shape) != 3 and len(data.shape) != 2:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 2 or 3 dimensions, got {len(data.shape)} dimensions.\",\n public_message=f\"For image given as np.ndarray expected 2 or 3 dimensions.\",\n )\n if data.shape[-1] != 3 and data.shape[-1] != 1:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 1 or 3 channels, got {data.shape[-1]} channels.\",\n public_message=\"For image given as np.ndarray expected 1 or 3 channels.\",\n )" }, { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 180, "func_end_lineno": 212, "func_code": "def load_image_with_inferred_type(\n value: Any,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"Load an image by inferring its type.\n\n Args:\n value (Any): The image data.\n cv_imread_flags (int): Flags used for OpenCV's imread function.\n\n Returns:\n Tuple[np.ndarray, bool]: Loaded image as a numpy array and a boolean indicating if the image is in BGR format.\n\n Raises:\n NotImplementedError: If the image type could not be inferred.\n \"\"\"\n if isinstance(value, (np.ndarray, np.generic)):\n validate_numpy_image(data=value)\n return value, True\n elif isinstance(value, Image.Image):\n return np.asarray(value.convert(\"RGB\")), False\n elif isinstance(value, str) and (value.startswith(\"http\")):\n return load_image_from_url(value=value, cv_imread_flags=cv_imread_flags), True\n elif (\n isinstance(value, str)\n and ALLOW_LOADING_IMAGES_FROM_LOCAL_FILESYSTEM\n and os.path.isfile(value)\n ):\n return cv2.imread(value, cv_imread_flags), True\n else:\n return attempt_loading_image_from_string(\n value=value, cv_imread_flags=cv_imread_flags\n )" } ]
[ "function_empty" ]
[ "inference.core.utils.image_utils.validate_numpy_image", "inference.core.utils.image_utils.load_image_with_inferred_type" ]
Python
2
2
{ "total_num": 7, "base_passed_num": 4 }
[ "inference.inference.core.interfaces.camera.video_source.VideoSource::read_frame", "inference.inference.core.interfaces.camera.video_source.VideoSource::__next__", "inference.inference.core.interfaces.camera.utils.get_video_frames_generator", "inference.inference.core.interfaces.camera.video_source.get_from_queue" ]
inference
[ "inference/core/interfaces/camera/video_source.py", "inference/core/interfaces/camera/video_source.py", "inference/core/interfaces/camera/utils.py", "inference/core/interfaces/camera/utils.py", "inference/core/interfaces/camera/video_source.py" ]
[ "tests/inference/unit_tests/core/interfaces/camera/test_utils.py" ]
[ { "class_start_lineno": 181, "class_end_lineno": 738, "func_start_lineno": 526, "func_end_lineno": 555, "func_code": " def read_frame(self, timeout: Optional[float] = None) -> Optional[VideoFrame]:\n \"\"\"\n Method to be used by the consumer to get decoded source frame.\n\n Returns: VideoFrame object with decoded frame and its metadata.\n Throws:\n * EndOfStreamError: when trying to get the frame from closed source.\n \"\"\"\n video_frame: Optional[Union[VideoFrame, str]] = get_from_queue(\n queue=self._frames_buffer,\n on_successful_read=self._video_consumer.notify_frame_consumed,\n timeout=timeout,\n purge=self._buffer_consumption_strategy is BufferConsumptionStrategy.EAGER,\n )\n if video_frame == POISON_PILL:\n raise EndOfStreamError(\n \"Attempted to retrieve frame from stream that already ended.\"\n )\n if video_frame is not None:\n send_video_source_status_update(\n severity=UpdateSeverity.DEBUG,\n event_type=FRAME_CONSUMED_EVENT,\n payload={\n \"frame_timestamp\": video_frame.frame_timestamp,\n \"frame_id\": video_frame.frame_id,\n \"source_id\": video_frame.source_id,\n },\n status_update_handlers=self._status_update_handlers,\n )\n return video_frame" }, { "class_start_lineno": 181, "class_end_lineno": 738, "func_start_lineno": 720, "func_end_lineno": 738, "func_code": " def __next__(self) -> VideoFrame:\n \"\"\"\n Method allowing to use `VideoSource` convenient to read frames\n\n Returns: VideoFrame\n\n Example:\n ```python\n source = VideoSource.init(video_reference=\"./some.mp4\")\n source.start()\n\n for frame in source:\n pass\n ```\n \"\"\"\n try:\n return self.read_frame()\n except EndOfStreamError:\n raise StopIteration()" }, { "class_start_lineno": 1, "class_end_lineno": 516, "func_start_lineno": 479, "func_end_lineno": 494, "func_code": "def limit_frame_rate(\n frames_generator: Iterable[T],\n max_fps: Union[float, int],\n strategy: FPSLimiterStrategy,\n) -> Generator[T, None, None]:\n rate_limiter = RateLimiter(desired_fps=max_fps)\n for frame_data in frames_generator:\n delay = rate_limiter.estimate_next_action_delay()\n if delay <= 0.0:\n rate_limiter.tick()\n yield frame_data\n continue\n if strategy is FPSLimiterStrategy.WAIT:\n time.sleep(delay)\n rate_limiter.tick()\n yield frame_data" }, { "class_start_lineno": 1, "class_end_lineno": 516, "func_start_lineno": 46, "func_end_lineno": 97, "func_code": "def get_video_frames_generator(\n video: Union[VideoSource, str, int],\n max_fps: Optional[Union[float, int]] = None,\n limiter_strategy: Optional[FPSLimiterStrategy] = None,\n) -> Generator[VideoFrame, None, None]:\n \"\"\"\n Util function to create a frames generator from `VideoSource` with possibility to\n limit FPS of consumed frames and dictate what to do if frames are produced to fast.\n\n Args:\n video (Union[VideoSource, str, int]): Either instance of VideoSource or video reference accepted\n by VideoSource.init(...)\n max_fps (Optional[Union[float, int]]): value of maximum FPS rate of generated frames - can be used to limit\n generation frequency\n limiter_strategy (Optional[FPSLimiterStrategy]): strategy used to deal with frames decoding exceeding\n limit of `max_fps`. By default - for files, in the interest of processing all frames -\n generation will be awaited, for streams - frames will be dropped on the floor.\n Returns: generator of `VideoFrame`\n\n Example:\n ```python\n from inference.core.interfaces.camera.utils import get_video_frames_generator\n\n for frame in get_video_frames_generator(\n video=\"./some.mp4\",\n max_fps=50,\n ):\n pass\n ```\n \"\"\"\n is_managed_source = False\n if issubclass(type(video), str) or issubclass(type(video), int):\n video = VideoSource.init(\n video_reference=video,\n )\n video.start()\n is_managed_source = True\n if max_fps is None:\n yield from video\n if is_managed_source:\n video.terminate(purge_frames_buffer=True)\n return None\n limiter_strategy = resolve_limiter_strategy(\n explicitly_defined_strategy=limiter_strategy,\n source_properties=video.describe_source().source_properties,\n )\n yield from limit_frame_rate(\n frames_generator=video, max_fps=max_fps, strategy=limiter_strategy\n )\n if is_managed_source:\n video.terminate(purge_frames_buffer=True)\n return None" }, { "class_start_lineno": 1, "class_end_lineno": 1209, "func_start_lineno": 1064, "func_end_lineno": 1089, "func_code": "def get_from_queue(\n queue: Queue,\n timeout: Optional[float] = None,\n on_successful_read: Callable[[], None] = lambda: None,\n purge: bool = False,\n) -> Optional[Any]:\n \"\"\"\n Function is supposed to take element from the queue waiting on the first element to appear using `timeout`\n parameter. One may ask to go to the very last element of the queue and return it - then `purge` should be set\n to True. No additional wait on new elements to appear happen and the purge stops once queue is free returning last\n element consumed.\n queue.task_done() and on_successful_read(...) will be called on each received element.\n \"\"\"\n result = None\n if queue.empty() or not purge:\n try:\n result = queue.get(timeout=timeout)\n queue.task_done()\n on_successful_read()\n except Empty:\n pass\n while not queue.empty() and purge:\n result = queue.get()\n queue.task_done()\n on_successful_read()\n return result" } ]
[ "function_empty" ]
[ "inference.core.interfaces.camera.video_source.VideoSource.read_frame", "inference.core.interfaces.camera.video_source.VideoSource.__next__", "inference.core.interfaces.camera.utils.limit_frame_rate", "inference.core.interfaces.camera.utils.get_video_frames_generator", "inference.core.interfaces.camera.video_source.get_from_queue" ]
Python
4
4
{ "total_num": 42, "base_passed_num": 0 }
[ "inference.inference.core.interfaces.camera.video_source.get_from_queue", "inference.inference.core.interfaces.camera.video_source.VideoSource::read_frame", "inference.inference.core.interfaces.camera.video_source.VideoSource::__next__" ]
inference
[ "inference/core/interfaces/camera/video_source.py", "inference/core/interfaces/camera/video_source.py", "inference/core/interfaces/camera/video_source.py" ]
[ "tests/inference/unit_tests/core/interfaces/camera/test_video_source.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1209, "func_start_lineno": 1064, "func_end_lineno": 1089, "func_code": "def get_from_queue(\n queue: Queue,\n timeout: Optional[float] = None,\n on_successful_read: Callable[[], None] = lambda: None,\n purge: bool = False,\n) -> Optional[Any]:\n \"\"\"\n Function is supposed to take element from the queue waiting on the first element to appear using `timeout`\n parameter. One may ask to go to the very last element of the queue and return it - then `purge` should be set\n to True. No additional wait on new elements to appear happen and the purge stops once queue is free returning last\n element consumed.\n queue.task_done() and on_successful_read(...) will be called on each received element.\n \"\"\"\n result = None\n if queue.empty() or not purge:\n try:\n result = queue.get(timeout=timeout)\n queue.task_done()\n on_successful_read()\n except Empty:\n pass\n while not queue.empty() and purge:\n result = queue.get()\n queue.task_done()\n on_successful_read()\n return result" }, { "class_start_lineno": 181, "class_end_lineno": 738, "func_start_lineno": 526, "func_end_lineno": 555, "func_code": " def read_frame(self, timeout: Optional[float] = None) -> Optional[VideoFrame]:\n \"\"\"\n Method to be used by the consumer to get decoded source frame.\n\n Returns: VideoFrame object with decoded frame and its metadata.\n Throws:\n * EndOfStreamError: when trying to get the frame from closed source.\n \"\"\"\n video_frame: Optional[Union[VideoFrame, str]] = get_from_queue(\n queue=self._frames_buffer,\n on_successful_read=self._video_consumer.notify_frame_consumed,\n timeout=timeout,\n purge=self._buffer_consumption_strategy is BufferConsumptionStrategy.EAGER,\n )\n if video_frame == POISON_PILL:\n raise EndOfStreamError(\n \"Attempted to retrieve frame from stream that already ended.\"\n )\n if video_frame is not None:\n send_video_source_status_update(\n severity=UpdateSeverity.DEBUG,\n event_type=FRAME_CONSUMED_EVENT,\n payload={\n \"frame_timestamp\": video_frame.frame_timestamp,\n \"frame_id\": video_frame.frame_id,\n \"source_id\": video_frame.source_id,\n },\n status_update_handlers=self._status_update_handlers,\n )\n return video_frame" }, { "class_start_lineno": 181, "class_end_lineno": 738, "func_start_lineno": 720, "func_end_lineno": 738, "func_code": " def __next__(self) -> VideoFrame:\n \"\"\"\n Method allowing to use `VideoSource` convenient to read frames\n\n Returns: VideoFrame\n\n Example:\n ```python\n source = VideoSource.init(video_reference=\"./some.mp4\")\n source.start()\n\n for frame in source:\n pass\n ```\n \"\"\"\n try:\n return self.read_frame()\n except EndOfStreamError:\n raise StopIteration()" } ]
[ "function_empty" ]
[ "inference.core.interfaces.camera.video_source.get_from_queue", "inference.core.interfaces.camera.video_source.VideoSource.read_frame", "inference.core.interfaces.camera.video_source.VideoSource.__next__" ]
Python
3
3
{ "total_num": 45, "base_passed_num": 0 }
[ "inference.inference.core.utils.preprocess.resize_image_keeping_aspect_ratio", "inference.inference.core.utils.preprocess.letterbox_image" ]
inference
[ "inference/core/utils/preprocess.py", "inference/core/utils/preprocess.py", "inference/core/interfaces/stream/sinks.py" ]
[ "tests/inference/unit_tests/core/interfaces/stream/test_sinks.py", "tests/inference/unit_tests/core/utils/test_drawing.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 298, "func_start_lineno": 253, "func_end_lineno": 298, "func_code": "def resize_image_keeping_aspect_ratio(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n) -> ImageMetaType:\n \"\"\"\n Resize reserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n \"\"\"\n if isinstance(image, np.ndarray):\n img_ratio = image.shape[1] / image.shape[0]\n elif USE_PYTORCH_FOR_PREPROCESSING:\n img_ratio = image.shape[-1] / image.shape[-2]\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(image)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )\n desired_ratio = desired_size[0] / desired_size[1]\n\n # Determine the new dimensions\n if img_ratio >= desired_ratio:\n # Resize by width\n new_width = desired_size[0]\n new_height = int(desired_size[0] / img_ratio)\n else:\n # Resize by height\n new_height = desired_size[1]\n new_width = int(desired_size[1] * img_ratio)\n\n # Resize the image to new dimensions\n if isinstance(image, np.ndarray):\n return cv2.resize(image, (new_width, new_height))\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.interpolate(\n image, size=(new_height, new_width), mode=\"bilinear\"\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(image)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )" }, { "class_start_lineno": 1, "class_end_lineno": 298, "func_start_lineno": 190, "func_end_lineno": 241, "func_code": "def letterbox_image(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n color: Tuple[int, int, int] = (0, 0, 0),\n) -> ImageMetaType:\n \"\"\"\n Resize and pad image to fit the desired size, preserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n - color: tuple (B, G, R) representing the color to pad with.\n\n Returns:\n - letterboxed image.\n \"\"\"\n resized_img = resize_image_keeping_aspect_ratio(\n image=image,\n desired_size=desired_size,\n )\n new_height, new_width = (\n resized_img.shape[:2]\n if isinstance(resized_img, np.ndarray)\n else resized_img.shape[-2:]\n )\n top_padding = (desired_size[1] - new_height) // 2\n bottom_padding = desired_size[1] - new_height - top_padding\n left_padding = (desired_size[0] - new_width) // 2\n right_padding = desired_size[0] - new_width - left_padding\n if isinstance(resized_img, np.ndarray):\n return cv2.copyMakeBorder(\n resized_img,\n top_padding,\n bottom_padding,\n left_padding,\n right_padding,\n cv2.BORDER_CONSTANT,\n value=color,\n )\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.pad(\n resized_img,\n (left_padding, right_padding, top_padding, bottom_padding),\n \"constant\",\n color[0],\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(resized_img)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )" }, { "class_start_lineno": 1, "class_end_lineno": 570, "func_start_lineno": 155, "func_end_lineno": 196, "func_code": "def _handle_frame_rendering(\n frame: Optional[VideoFrame],\n prediction: dict,\n annotators: List[BaseAnnotator],\n display_size: Optional[Tuple[int, int]],\n display_statistics: bool,\n fps_value: Optional[float],\n) -> np.ndarray:\n if frame is None:\n image = np.zeros((256, 256, 3), dtype=np.uint8)\n else:\n try:\n labels = [p[\"class\"] for p in prediction[\"predictions\"]]\n if hasattr(sv.Detections, \"from_inference\"):\n detections = sv.Detections.from_inference(prediction)\n else:\n detections = sv.Detections.from_inference(prediction)\n image = frame.image.copy()\n for annotator in annotators:\n kwargs = {\n \"scene\": image,\n \"detections\": detections,\n }\n if isinstance(annotator, sv.LabelAnnotator):\n kwargs[\"labels\"] = labels\n image = annotator.annotate(**kwargs)\n except (TypeError, KeyError):\n logger.warning(\n f\"Used `render_boxes(...)` sink, but predictions that were provided do not match the expected \"\n f\"format of object detection prediction that could be accepted by \"\n f\"`supervision.Detection.from_inference(...)\"\n )\n image = frame.image.copy()\n if display_size is not None:\n image = letterbox_image(image, desired_size=display_size)\n if display_statistics:\n image = render_statistics(\n image=image,\n frame_timestamp=(frame.frame_timestamp if frame is not None else None),\n fps=fps_value,\n )\n return image" } ]
[ "function_empty" ]
[ "inference.core.utils.preprocess.resize_image_keeping_aspect_ratio", "inference.core.utils.preprocess.letterbox_image", "inference.core.interfaces.stream.sinks._handle_frame_rendering" ]
Python
2
2
{ "total_num": 21, "base_passed_num": 10 }
[ "inference.inference.core.utils.image_utils.validate_numpy_image", "inference.inference.core.utils.image_utils.load_image_from_numpy_str", "inference.inference.core.utils.image_utils.load_image_base64", "inference.inference.core.utils.image_utils.load_image_from_encoded_bytes", "inference.inference.core.utils.image_utils.attempt_loading_image_from_string", "inference.inference.core.utils.image_utils.load_image_with_inferred_type" ]
inference
[ "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py" ]
[ "tests/inference/unit_tests/core/utils/test_image_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 353, "func_end_lineno": 377, "func_code": "def validate_numpy_image(data: np.ndarray) -> None:\n \"\"\"\n Validate if the provided data is a valid numpy image.\n\n Args:\n data (np.ndarray): The numpy array representing an image.\n\n Raises:\n InvalidNumpyInput: If the provided data is not a valid numpy image.\n \"\"\"\n if not issubclass(type(data), np.ndarray):\n raise InvalidNumpyInput(\n message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n public_message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n )\n if len(data.shape) != 3 and len(data.shape) != 2:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 2 or 3 dimensions, got {len(data.shape)} dimensions.\",\n public_message=f\"For image given as np.ndarray expected 2 or 3 dimensions.\",\n )\n if data.shape[-1] != 3 and data.shape[-1] != 1:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 1 or 3 channels, got {data.shape[-1]} channels.\",\n public_message=\"For image given as np.ndarray expected 1 or 3 channels.\",\n )" }, { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 318, "func_end_lineno": 345, "func_code": "def load_image_from_numpy_str(value: Union[bytes, str]) -> np.ndarray:\n \"\"\"Loads an image from a numpy array string.\n\n Args:\n value (Union[bytes, str]): Base64 string or byte sequence representing the pickled numpy array of the image.\n\n Returns:\n Image.Image: The loaded PIL image.\n\n Raises:\n InvalidNumpyInput: If the numpy data is invalid.\n \"\"\"\n if not ALLOW_NUMPY_INPUT:\n raise InvalidImageTypeDeclared(\n message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n public_message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n )\n try:\n if isinstance(value, str):\n value = pybase64.b64decode(value)\n data = pickle.loads(value)\n except (EOFError, TypeError, pickle.UnpicklingError, binascii.Error) as error:\n raise InvalidNumpyInput(\n message=f\"Could not unpickle image data. Cause: {error}\",\n public_message=\"Could not deserialize pickle payload.\",\n ) from error\n validate_numpy_image(data=data)\n return data" }, { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 258, "func_end_lineno": 292, "func_code": "def load_image_base64(\n value: Union[str, bytes], cv_imread_flags=cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"Loads an image from a base64 encoded string using OpenCV.\n\n Args:\n value (str): Base64 encoded string representing the image.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n # New routes accept images via json body (str), legacy routes accept bytes which need to be decoded as strings\n if not isinstance(value, str):\n value = value.decode(\"utf-8\")\n value = BASE64_DATA_TYPE_PATTERN.sub(\"\", value)\n try:\n value = pybase64.b64decode(value)\n except binascii.Error as error:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n ) from error\n if len(value) == 0:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Empty image payload.\",\n )\n image_np = np.frombuffer(value, np.uint8)\n result = cv2.imdecode(image_np, cv_imread_flags)\n if result is None:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n )\n return result" }, { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 496, "func_end_lineno": 516, "func_code": "def load_image_from_encoded_bytes(\n value: bytes, cv_imread_flags: int = cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"\n Load an image from encoded bytes.\n\n Args:\n value (bytes): The byte sequence representing the image.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n image_np = np.asarray(bytearray(value), dtype=np.uint8)\n image = cv2.imdecode(image_np, cv_imread_flags)\n if image is None:\n raise InputImageLoadError(\n message=f\"Could not decode bytes as image.\",\n public_message=\"Data is not image.\",\n )\n return image" }, { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 215, "func_end_lineno": 255, "func_code": "def attempt_loading_image_from_string(\n value: Union[str, bytes, bytearray, _IOBase],\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Attempt to load an image from a string.\n\n Args:\n value (Union[str, bytes, bytearray, _IOBase]): The image data in string format.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n Tuple[np.ndarray, bool]: A tuple of the loaded image in numpy array format and a boolean flag indicating if the image is in BGR format.\n \"\"\"\n try:\n return load_image_base64(value=value, cv_imread_flags=cv_imread_flags), True\n except:\n pass\n try:\n return (\n load_image_from_encoded_bytes(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return (\n load_image_from_buffer(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return load_image_from_numpy_str(value=value), True\n except InvalidImageTypeDeclared as error:\n raise error\n except InvalidNumpyInput as error:\n raise InputFormatInferenceFailed(\n message=\"Input image format could not be inferred from string.\",\n public_message=\"Input image format could not be inferred from string.\",\n ) from error" }, { "class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 180, "func_end_lineno": 212, "func_code": "def load_image_with_inferred_type(\n value: Any,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"Load an image by inferring its type.\n\n Args:\n value (Any): The image data.\n cv_imread_flags (int): Flags used for OpenCV's imread function.\n\n Returns:\n Tuple[np.ndarray, bool]: Loaded image as a numpy array and a boolean indicating if the image is in BGR format.\n\n Raises:\n NotImplementedError: If the image type could not be inferred.\n \"\"\"\n if isinstance(value, (np.ndarray, np.generic)):\n validate_numpy_image(data=value)\n return value, True\n elif isinstance(value, Image.Image):\n return np.asarray(value.convert(\"RGB\")), False\n elif isinstance(value, str) and (value.startswith(\"http\")):\n return load_image_from_url(value=value, cv_imread_flags=cv_imread_flags), True\n elif (\n isinstance(value, str)\n and ALLOW_LOADING_IMAGES_FROM_LOCAL_FILESYSTEM\n and os.path.isfile(value)\n ):\n return cv2.imread(value, cv_imread_flags), True\n else:\n return attempt_loading_image_from_string(\n value=value, cv_imread_flags=cv_imread_flags\n )" } ]
[ "function_empty" ]
[ "inference.core.utils.image_utils.validate_numpy_image", "inference.core.utils.image_utils.load_image_from_numpy_str", "inference.core.utils.image_utils.load_image_base64", "inference.core.utils.image_utils.load_image_from_encoded_bytes", "inference.core.utils.image_utils.attempt_loading_image_from_string", "inference.core.utils.image_utils.load_image_with_inferred_type" ]
Python
6
6
{ "total_num": 152, "base_passed_num": 77 }
[ "inference.inference.core.utils.postprocess.get_static_crop_dimensions", "inference.inference.core.utils.postprocess.post_process_bboxes", "inference.inference.core.utils.postprocess.post_process_polygons", "inference.inference.core.utils.postprocess.post_process_keypoints" ]
inference
[ "inference/core/utils/postprocess.py", "inference/core/utils/postprocess.py", "inference/core/utils/postprocess.py", "inference/core/utils/postprocess.py" ]
[ "tests/inference/unit_tests/core/utils/test_postprocess.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 658, "func_start_lineno": 473, "func_end_lineno": 513, "func_code": "def get_static_crop_dimensions(\n orig_shape: Tuple[int, int],\n preproc: dict,\n disable_preproc_static_crop: bool = False,\n) -> Tuple[Tuple[int, int], Tuple[int, int]]:\n \"\"\"\n Generates a transformation based on preprocessing configuration.\n\n Args:\n orig_shape (tuple): The original shape of the object (e.g., image) - (height, width).\n preproc (dict): Preprocessing configuration dictionary, containing information such as static cropping.\n disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.\n\n Returns:\n tuple: A tuple containing the shift in the x and y directions, and the updated original shape after cropping.\n \"\"\"\n try:\n if static_crop_should_be_applied(\n preprocessing_config=preproc,\n disable_preproc_static_crop=disable_preproc_static_crop,\n ):\n x_min, y_min, x_max, y_max = standardise_static_crop(\n static_crop_config=preproc[STATIC_CROP_KEY]\n )\n else:\n x_min, y_min, x_max, y_max = 0, 0, 1, 1\n crop_shift_x, crop_shift_y = (\n round(x_min * orig_shape[1]),\n round(y_min * orig_shape[0]),\n )\n cropped_percent_x = x_max - x_min\n cropped_percent_y = y_max - y_min\n orig_shape = (\n round(orig_shape[0] * cropped_percent_y),\n round(orig_shape[1] * cropped_percent_x),\n )\n return (crop_shift_x, crop_shift_y), orig_shape\n except KeyError as error:\n raise PostProcessingError(\n f\"Could not find a proper configuration key {error} in post-processing.\"\n )" }, { "class_start_lineno": 1, "class_end_lineno": 658, "func_start_lineno": 98, "func_end_lineno": 163, "func_code": "def post_process_bboxes(\n predictions: List[List[List[float]]],\n infer_shape: Tuple[int, int],\n img_dims: List[Tuple[int, int]],\n preproc: dict,\n disable_preproc_static_crop: bool = False,\n resize_method: str = \"Stretch to\",\n) -> List[List[List[float]]]:\n \"\"\"\n Postprocesses each patch of detections by scaling them to the original image coordinates and by shifting them based on a static crop preproc (if applied).\n\n Args:\n predictions (List[List[List[float]]]): The predictions output from NMS, indices are: batch x prediction x [x1, y1, x2, y2, ...].\n infer_shape (Tuple[int, int]): The shape of the inference image.\n img_dims (List[Tuple[int, int]]): The dimensions of the original image for each batch, indices are: batch x [height, width].\n preproc (dict): Preprocessing configuration dictionary.\n disable_preproc_static_crop (bool, optional): If true, the static crop preprocessing step is disabled for this call. Default is False.\n resize_method (str, optional): Resize method for image. Defaults to \"Stretch to\".\n\n Returns:\n List[List[List[float]]]: The scaled and shifted predictions, indices are: batch x prediction x [x1, y1, x2, y2, ...].\n \"\"\"\n\n # Get static crop params\n scaled_predictions = []\n # Loop through batches\n for i, batch_predictions in enumerate(predictions):\n if len(batch_predictions) == 0:\n scaled_predictions.append([])\n continue\n np_batch_predictions = np.array(batch_predictions)\n # Get bboxes from predictions (x1,y1,x2,y2)\n predicted_bboxes = np_batch_predictions[:, :4]\n (crop_shift_x, crop_shift_y), origin_shape = get_static_crop_dimensions(\n img_dims[i],\n preproc,\n disable_preproc_static_crop=disable_preproc_static_crop,\n )\n if resize_method == \"Stretch to\":\n predicted_bboxes = stretch_bboxes(\n predicted_bboxes=predicted_bboxes,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n elif (\n resize_method == \"Fit (black edges) in\"\n or resize_method == \"Fit (white edges) in\"\n or resize_method == \"Fit (grey edges) in\"\n ):\n predicted_bboxes = undo_image_padding_for_predicted_boxes(\n predicted_bboxes=predicted_bboxes,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n predicted_bboxes = clip_boxes_coordinates(\n predicted_bboxes=predicted_bboxes,\n origin_shape=origin_shape,\n )\n predicted_bboxes = shift_bboxes(\n bboxes=predicted_bboxes,\n shift_x=crop_shift_x,\n shift_y=crop_shift_y,\n )\n np_batch_predictions[:, :4] = predicted_bboxes\n scaled_predictions.append(np_batch_predictions.tolist())\n return scaled_predictions" }, { "class_start_lineno": 1, "class_end_lineno": 658, "func_start_lineno": 393, "func_end_lineno": 441, "func_code": "def post_process_polygons(\n origin_shape: Tuple[int, int],\n polys: List[List[Tuple[float, float]]],\n infer_shape: Tuple[int, int],\n preproc: dict,\n resize_method: str = \"Stretch to\",\n) -> List[List[Tuple[float, float]]]:\n \"\"\"Scales and shifts polygons based on the given image shapes and preprocessing method.\n\n This function performs polygon scaling and shifting based on the specified resizing method and\n pre-processing steps. The polygons are transformed according to the ratio and padding between two images.\n\n Args:\n origin_shape (tuple of int): Shape of the source image (height, width).\n infer_shape (tuple of int): Shape of the target image (height, width).\n polys (list of list of tuple): List of polygons, where each polygon is represented by a list of (x, y) coordinates.\n preproc (object): Preprocessing details used for generating the transformation.\n resize_method (str, optional): Resizing method, either \"Stretch to\", \"Fit (black edges) in\", \"Fit (white edges) in\", or \"Fit (grey edges) in\". Defaults to \"Stretch to\".\n\n Returns:\n list of list of tuple: A list of shifted and scaled polygons.\n \"\"\"\n (crop_shift_x, crop_shift_y), origin_shape = get_static_crop_dimensions(\n origin_shape, preproc\n )\n new_polys = []\n if resize_method == \"Stretch to\":\n width_ratio = origin_shape[1] / infer_shape[1]\n height_ratio = origin_shape[0] / infer_shape[0]\n new_polys = scale_polygons(\n polygons=polys,\n x_scale=width_ratio,\n y_scale=height_ratio,\n )\n elif resize_method in {\n \"Fit (black edges) in\",\n \"Fit (white edges) in\",\n \"Fit (grey edges) in\",\n }:\n new_polys = undo_image_padding_for_predicted_polygons(\n polygons=polys,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n shifted_polys = []\n for poly in new_polys:\n poly = [(p[0] + crop_shift_x, p[1] + crop_shift_y) for p in poly]\n shifted_polys.append(poly)\n return shifted_polys" }, { "class_start_lineno": 1, "class_end_lineno": 658, "func_start_lineno": 522, "func_end_lineno": 585, "func_code": "def post_process_keypoints(\n predictions: List[List[List[float]]],\n keypoints_start_index: int,\n infer_shape: Tuple[int, int],\n img_dims: List[Tuple[int, int]],\n preproc: dict,\n disable_preproc_static_crop: bool = False,\n resize_method: str = \"Stretch to\",\n) -> List[List[List[float]]]:\n \"\"\"Scales and shifts keypoints based on the given image shapes and preprocessing method.\n\n This function performs polygon scaling and shifting based on the specified resizing method and\n pre-processing steps. The polygons are transformed according to the ratio and padding between two images.\n\n Args:\n predictions: predictions from model\n keypoints_start_index: offset in the 3rd dimension pointing where in the prediction start keypoints [(x, y, cfg), ...] for each keypoint class\n img_dims list of (tuple of int): Shape of the source image (height, width).\n infer_shape (tuple of int): Shape of the target image (height, width).\n preproc (object): Preprocessing details used for generating the transformation.\n resize_method (str, optional): Resizing method, either \"Stretch to\", \"Fit (black edges) in\", \"Fit (white edges) in\", or \"Fit (grey edges) in\". Defaults to \"Stretch to\".\n disable_preproc_static_crop: flag to disable static crop\n Returns:\n list of list of list: predictions with post-processed keypoints\n \"\"\"\n # Get static crop params\n scaled_predictions = []\n # Loop through batches\n for i, batch_predictions in enumerate(predictions):\n if len(batch_predictions) == 0:\n scaled_predictions.append([])\n continue\n np_batch_predictions = np.array(batch_predictions)\n keypoints = np_batch_predictions[:, keypoints_start_index:]\n (crop_shift_x, crop_shift_y), origin_shape = get_static_crop_dimensions(\n img_dims[i],\n preproc,\n disable_preproc_static_crop=disable_preproc_static_crop,\n )\n if resize_method == \"Stretch to\":\n keypoints = stretch_keypoints(\n keypoints=keypoints,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n elif (\n resize_method == \"Fit (black edges) in\"\n or resize_method == \"Fit (white edges) in\"\n or resize_method == \"Fit (grey edges) in\"\n ):\n keypoints = undo_image_padding_for_predicted_keypoints(\n keypoints=keypoints,\n infer_shape=infer_shape,\n origin_shape=origin_shape,\n )\n keypoints = clip_keypoints_coordinates(\n keypoints=keypoints, origin_shape=origin_shape\n )\n keypoints = shift_keypoints(\n keypoints=keypoints, shift_x=crop_shift_x, shift_y=crop_shift_y\n )\n np_batch_predictions[:, keypoints_start_index:] = keypoints\n scaled_predictions.append(np_batch_predictions.tolist())\n return scaled_predictions" } ]
[ "function_empty" ]
[ "inference.core.utils.postprocess.get_static_crop_dimensions", "inference.core.utils.postprocess.post_process_bboxes", "inference.core.utils.postprocess.post_process_polygons", "inference.core.utils.postprocess.post_process_keypoints" ]
Python
4
4
{ "total_num": 54, "base_passed_num": 24 }
[ "open-iris.src.iris.utils.math.estimate_diameter", "open-iris.src.iris.nodes.normalization.nonlinear_normalization.NonlinearNormalization::_generate_correspondences" ]
open-iris
[ "iris/utils/math.py", "iris/nodes/normalization/nonlinear_normalization.py" ]
[ "tests/unit_tests/nodes/normalization/test_nonlinear_normalization.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 38, "func_end_lineno": 50, "func_code": "def estimate_diameter(polygon: np.ndarray) -> float:\n \"\"\"Estimates the diameter of an arbitrary arc by evaluating the maximum distance between any two points on the arc.\n\n Args:\n polygon (np.ndarray): Polygon points.\n\n Returns:\n float: Estimated diameter length.\n\n Reference:\n [1] https://sparrow.dev/pairwise-distance-in-numpy/\n \"\"\"\n return float(np.linalg.norm(polygon[:, None, :] - polygon[None, :, :], axis=-1).max())" }, { "class_start_lineno": 13, "class_end_lineno": 113, "func_start_lineno": 89, "func_end_lineno": 113, "func_code": " def _generate_correspondences(self, pupil_points: np.ndarray, iris_points: np.ndarray) -> np.ndarray:\n \"\"\"Generate corresponding positions in original image.\n\n Args:\n pupil_points (np.ndarray): Pupil bounding points. NumPy array of shape (num_points x 2).\n iris_points (np.ndarray): Iris bounding points. NumPy array of shape (num_points x 2).\n\n Returns:\n np.ndarray: generated corresponding points.\n \"\"\"\n pupil_diameter = math.estimate_diameter(pupil_points)\n iris_diameter = math.estimate_diameter(iris_points)\n p2i_ratio = pupil_diameter / iris_diameter\n\n if p2i_ratio <= 0 or p2i_ratio >= 1:\n raise NormalizationError(f\"Invalid pupil to iris ratio, not in the range (0,1): {p2i_ratio}.\")\n\n src_points = np.array(\n [\n pupil_points + x * (iris_points - pupil_points)\n for x in self.params.intermediate_radiuses[round(100 * (p2i_ratio))]\n ]\n )\n\n return np.round(src_points).astype(int)" } ]
[ "function_empty" ]
[ "iris.utils.math.estimate_diameter", "iris.nodes.normalization.nonlinear_normalization.NonlinearNormalization._generate_correspondences" ]
Python
2
2
{ "total_num": 3, "base_passed_num": 2 }
[ "open-iris.src.iris.utils.math.area", "open-iris.src.iris.nodes.vectorization.contouring.filter_polygon_areas", "open-iris.src.iris.nodes.vectorization.contouring.ContouringAlgorithm::_filter_contours" ]
open-iris
[ "iris/utils/math.py", "iris/nodes/vectorization/contouring.py", "iris/nodes/vectorization/contouring.py" ]
[ "tests/unit_tests/nodes/vectorization/test_contouring.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 7, "func_end_lineno": 35, "func_code": "def area(array: np.ndarray, signed: bool = False) -> float:\n \"\"\"Shoelace formula for simple polygon area calculation.\n\n WARNING: This formula only works for \"simple polygons\", i.e planar polygon without self-intersection nor holes.\n These conditions are not checked within this function.\n\n Args:\n array (np.ndarray): np array representing a polygon as a list of points, i.e. of shape (_, 2).\n signed (bool): If True, the area is signed, i.e. negative if the polygon is oriented clockwise.\n\n Returns:\n float: Polygon area\n\n Raises:\n ValueError: if the input array does not have shape (_, 2)\n\n References:\n [1] https://en.wikipedia.org/wiki/Shoelace_formula\n [2] https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n \"\"\"\n if len(array.shape) != 2 or array.shape[1] != 2:\n raise ValueError(f\"Unable to determine the area of a polygon with shape {array.shape}. Expecting (_, 2).\")\n\n xs, ys = array.T\n area = 0.5 * (np.dot(xs, np.roll(ys, 1)) - np.dot(ys, np.roll(xs, 1)))\n if not signed:\n area = abs(area)\n\n return float(area)" }, { "class_start_lineno": 1, "class_end_lineno": 133, "func_start_lineno": 13, "func_end_lineno": 35, "func_code": "def filter_polygon_areas(\n polygons: List[np.ndarray], rel_tr: NonNegativeFloat = 0.03, abs_tr: NonNegativeFloat = 0.0\n) -> List[np.ndarray]:\n \"\"\"Filter out polygons whose area is below either an absolute threshold or a fraction of the largest area.\n\n Args:\n polygons (List[np.ndarray]): List of polygons to filter.\n rel_tr (NonNegativeFloat, optional): Relative threshold. Defaults to 0.03.\n abs_tr (NonNegativeFloat, optional): Absolute threshold. Defaults to 0.0.\n\n Returns:\n List[np.ndarray]: Filtered polygons' list.\n \"\"\"\n areas = [area(polygon) if len(polygon) > 2 else 1.0 for polygon in polygons]\n area_factors = np.array(areas) / np.max(areas)\n\n filtered_polygons = [\n polygon\n for area, area_factor, polygon in zip(areas, area_factors, polygons)\n if area > abs_tr and area_factor > rel_tr\n ]\n\n return filtered_polygons" }, { "class_start_lineno": 38, "class_end_lineno": 133, "func_start_lineno": 121, "func_end_lineno": 133, "func_code": " def _filter_contours(self, contours: List[np.ndarray]) -> List[np.ndarray]:\n \"\"\"Filter contours based on predefined filters.\n\n Args:\n contours (List[np.ndarray]): Contours list.\n\n Returns:\n List[np.ndarray]: Filtered list of contours.\n \"\"\"\n for filter_func in self.params.contour_filters:\n contours = filter_func(contours)\n\n return contours" } ]
[ "function_empty", "TDD" ]
[ "iris.utils.math.area", "iris.nodes.vectorization.contouring.filter_polygon_areas", "iris.nodes.vectorization.contouring.ContouringAlgorithm._filter_contours" ]
Python
2
3
{ "total_num": 12, "base_passed_num": 9 }
[ "open-iris.src.iris.utils.math.cartesian2polar", "open-iris.src.iris.nodes.geometry_estimation.linear_extrapolation.LinearExtrapolation::_estimate" ]
open-iris
[ "iris/utils/math.py", "iris/nodes/geometry_estimation/linear_extrapolation.py" ]
[ "tests/unit_tests/nodes/geometry_estimation/test_linear_extrapolation.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 53, "func_end_lineno": 73, "func_code": "def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert xs and ys cartesian coordinates to polar coordinates.\n\n Args:\n xs (np.ndarray): x values.\n ys (np.ndarray): y values.\n center_x (float): center's x.\n center_y (float): center's y.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).\n \"\"\"\n x_rel: np.ndarray = xs - center_x\n y_rel: np.ndarray = ys - center_y\n\n C = np.vectorize(complex)(x_rel, y_rel)\n\n rho = np.abs(C)\n phi = np.angle(C) % (2 * np.pi)\n\n return rho, phi" }, { "class_start_lineno": 12, "class_end_lineno": 82, "func_start_lineno": 58, "func_end_lineno": 82, "func_code": " def _estimate(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:\n \"\"\"Estimate a circle fit for a single contour.\n\n Args:\n vertices (np.ndarray): Contour's vertices.\n center_xy (Tuple[float, float]): Contour's center position.\n\n Returns:\n np.ndarray: Estimated polygon.\n \"\"\"\n rhos, phis = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)\n\n padded_rhos = np.concatenate([rhos, rhos, rhos])\n padded_phis = np.concatenate([phis - 2 * np.pi, phis, phis + 2 * np.pi])\n\n interpolated_phis = np.arange(padded_phis.min(), padded_phis.max(), np.radians(self.params.dphi))\n interpolated_rhos = np.interp(interpolated_phis, xp=padded_phis, fp=padded_rhos, period=2 * np.pi)\n\n mask = (interpolated_phis >= 0) & (interpolated_phis < 2 * np.pi)\n interpolated_phis, interpolated_rhos = interpolated_phis[mask], interpolated_rhos[mask]\n\n xs, ys = math.polar2cartesian(interpolated_rhos, interpolated_phis, *center_xy)\n estimated_vertices = np.column_stack([xs, ys])\n\n return estimated_vertices" } ]
[ "function_empty", "TDD" ]
[ "iris.utils.math.cartesian2polar", "iris.nodes.geometry_estimation.linear_extrapolation.LinearExtrapolation._estimate" ]
Python
1
2
{ "total_num": 12, "base_passed_num": 0 }
[ "open-iris.src.iris.callbacks.pipeline_trace.PipelineCallTraceStorage::get", "open-iris.src.iris.callbacks.pipeline_trace.PipelineCallTraceStorage::__getitem__" ]
open-iris
[ "iris/callbacks/pipeline_trace.py", "iris/callbacks/pipeline_trace.py" ]
[ "tests/unit_tests/callbacks/test_pipeline_trace.py" ]
[ { "class_start_lineno": 16, "class_end_lineno": 146, "func_start_lineno": 52, "func_end_lineno": 67, "func_code": " def get(self, result_name: str) -> Any:\n \"\"\"Get result_name result.\n\n Args:\n result_name (str): Result name.\n\n Raises:\n PipelineCallTraceStorageError: Raised if result_name is not found.\n\n Returns:\n Any: Result object.\n \"\"\"\n if result_name not in self._storage.keys():\n raise PipelineCallTraceStorageError(f\"Unknown result name: {result_name}\")\n\n return self._storage[result_name]" }, { "class_start_lineno": 16, "class_end_lineno": 146, "func_start_lineno": 30, "func_end_lineno": 42, "func_code": " def __getitem__(self, result_name: str) -> Any:\n \"\"\"Get result_name result.\n\n Args:\n result_name (str): Result name.\n\n Raises:\n PipelineCallTraceStorageError: Raised if result_name is not found.\n\n Returns:\n Any: Result object.\n \"\"\"\n return self.get(result_name)" } ]
[ "function_empty" ]
[ "iris.callbacks.pipeline_trace.PipelineCallTraceStorage.get", "iris.callbacks.pipeline_trace.PipelineCallTraceStorage.__getitem__" ]
Python
2
2
{ "total_num": 8, "base_passed_num": 2 }
[ "open-iris.src.iris.utils.math.cartesian2polar", "open-iris.src.iris.nodes.eye_properties_estimation.occlusion_calculator.OcclusionCalculator::_get_quantile_points" ]
open-iris
[ "iris/utils/math.py", "iris/nodes/eye_properties_estimation/occlusion_calculator.py" ]
[ "tests/unit_tests/nodes/eye_properties_estimation/test_occlusion_calculator.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 53, "func_end_lineno": 73, "func_code": "def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert xs and ys cartesian coordinates to polar coordinates.\n\n Args:\n xs (np.ndarray): x values.\n ys (np.ndarray): y values.\n center_x (float): center's x.\n center_y (float): center's y.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).\n \"\"\"\n x_rel: np.ndarray = xs - center_x\n y_rel: np.ndarray = ys - center_y\n\n C = np.vectorize(complex)(x_rel, y_rel)\n\n rho = np.abs(C)\n phi = np.angle(C) % (2 * np.pi)\n\n return rho, phi" }, { "class_start_lineno": 12, "class_end_lineno": 142, "func_start_lineno": 99, "func_end_lineno": 142, "func_code": " def _get_quantile_points(\n self, iris_coords: np.ndarray, eye_orientation: EyeOrientation, eye_centers: EyeCenters\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Get those iris's points which fall into a specified quantile.\n\n Args:\n iris_coords (np.ndarray): Iris polygon coordinates.\n eye_orientation: (EyeOrientation): Eye orientation.\n eye_centers: (EyeCenters): Eye centers.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Tuple with xs and ys that falls into quantile region.\n \"\"\"\n orientation_angle = np.degrees(eye_orientation.angle)\n num_rotations = -round(orientation_angle * len(iris_coords) / 360.0)\n\n iris_xs, iris_ys = iris_coords[:, 0], iris_coords[:, 1]\n iris_rhos, iris_phis = math.cartesian2polar(iris_xs, iris_ys, eye_centers.iris_x, eye_centers.iris_y)\n\n iris_phis = np.roll(iris_phis, num_rotations, axis=0)\n iris_rhos = np.roll(iris_rhos, num_rotations, axis=0)\n\n scaled_quantile = round(self.params.quantile_angle * len(iris_coords) / 360.0)\n\n phis2mask = np.concatenate(\n [\n iris_phis[:scaled_quantile],\n iris_phis[-scaled_quantile:],\n iris_phis[len(iris_phis) // 2 : len(iris_phis) // 2 + scaled_quantile],\n iris_phis[len(iris_phis) // 2 - scaled_quantile : len(iris_phis) // 2],\n ]\n )\n rhos2mask = np.concatenate(\n [\n iris_rhos[:scaled_quantile],\n iris_rhos[-scaled_quantile:],\n iris_rhos[len(iris_rhos) // 2 : len(iris_rhos) // 2 + scaled_quantile],\n iris_rhos[len(iris_rhos) // 2 - scaled_quantile : len(iris_rhos) // 2],\n ]\n )\n phis2mask, rhos2mask = zip(*sorted(zip(phis2mask, rhos2mask)))\n xs2mask, ys2mask = math.polar2cartesian(rhos2mask, phis2mask, eye_centers.iris_x, eye_centers.iris_y)\n\n return xs2mask, ys2mask" } ]
[ "function_empty", "TDD" ]
[ "iris.utils.math.cartesian2polar", "iris.nodes.eye_properties_estimation.occlusion_calculator.OcclusionCalculator._get_quantile_points" ]
Python
1
2
{ "total_num": 19, "base_passed_num": 1 }
[ "open-iris.src.iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod::_calculate_perpendicular_bisectors", "open-iris.src.iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod::_find_center_coords" ]
open-iris
[ "iris/nodes/eye_properties_estimation/bisectors_method.py", "iris/nodes/eye_properties_estimation/bisectors_method.py" ]
[ "tests/unit_tests/nodes/eye_properties_estimation/test_bisectors_method.py", "tests/unit_tests/nodes/eye_properties_estimation/test_pupil_iris_property_calculator.py" ]
[ { "class_start_lineno": 11, "class_end_lineno": 170, "func_start_lineno": 84, "func_end_lineno": 140, "func_code": " def _calculate_perpendicular_bisectors(\n self, polygon: np.ndarray, min_distance_between_sector_points_in_px: float\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Calculate the perpendicular bisector of self.params.num_bisectors randomly chosen points from a polygon's vertices.\n A pair of points is used if their distance is larger then min_distance_between_sector_points_in_px.\n\n Args:\n polygon (np.ndarray): np.ndarray based on which we are searching the center of a circular shape.\n min_distance_between_sector_points_in_px (float): Minimum distance between sector points.\n\n Raises:\n EyeCentersEstimationError: Raised if not able to find enough random pairs of points on the arc with a large enough distance!\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Calculated perpendicular bisectors.\n \"\"\"\n np.random.seed(142857)\n\n bisectors_first_points = np.empty([0, 2])\n bisectors_second_points = np.empty([0, 2])\n for _ in range(self.params.max_iterations):\n random_indices = np.random.choice(len(polygon), size=(self.params.num_bisectors, 2))\n\n first_drawn_points = polygon[random_indices[:, 0]]\n second_drawn_points = polygon[random_indices[:, 1]]\n\n norms = np.linalg.norm(first_drawn_points - second_drawn_points, axis=1)\n mask = norms > min_distance_between_sector_points_in_px\n\n bisectors_first_points = np.vstack([bisectors_first_points, first_drawn_points[mask]])\n bisectors_second_points = np.vstack([bisectors_second_points, second_drawn_points[mask]])\n\n if len(bisectors_first_points) >= self.params.num_bisectors:\n break\n else:\n raise EyeCentersEstimationError(\n \"Not able to find enough random pairs of points on the arc with a large enough distance!\"\n )\n\n bisectors_first_points = bisectors_first_points[: self.params.num_bisectors]\n bisectors_second_points = bisectors_second_points[: self.params.num_bisectors]\n\n bisectors_center = (bisectors_first_points + bisectors_second_points) / 2\n\n # Flip xs with ys and flip sign of on of them to create a 90deg rotation\n inv_bisectors_center_slope = np.fliplr(bisectors_second_points - bisectors_first_points)\n inv_bisectors_center_slope[:, 1] = -inv_bisectors_center_slope[:, 1]\n\n # Add perpendicular vector to center and normalize\n norm = np.linalg.norm(inv_bisectors_center_slope, axis=1)\n inv_bisectors_center_slope[:, 0] /= norm\n inv_bisectors_center_slope[:, 1] /= norm\n\n first_bisectors_point = bisectors_center - inv_bisectors_center_slope\n second_bisectors_point = bisectors_center + inv_bisectors_center_slope\n\n return first_bisectors_point, second_bisectors_point" }, { "class_start_lineno": 11, "class_end_lineno": 170, "func_start_lineno": 66, "func_end_lineno": 82, "func_code": " def _find_center_coords(self, polygon: np.ndarray, diameter: float) -> Tuple[float, float]:\n \"\"\"Find center coordinates of a polygon.\n\n Args:\n polygon (np.ndarray): np.ndarray.\n diameter (float): diameter of the polygon.\n\n Returns:\n Tuple[float, float]: Tuple with the center location coordinates (x, y).\n \"\"\"\n min_distance_between_sector_points_in_px = self.params.min_distance_between_sector_points * diameter\n\n first_bisectors_point, second_bisectors_point = self._calculate_perpendicular_bisectors(\n polygon, min_distance_between_sector_points_in_px\n )\n\n return self._find_best_intersection(first_bisectors_point, second_bisectors_point)" } ]
[ "function_empty", "TDD" ]
[ "iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod._calculate_perpendicular_bisectors", "iris.nodes.eye_properties_estimation.bisectors_method.BisectorsMethod._find_center_coords" ]
Python
1
2
{ "total_num": 20, "base_passed_num": 12 }
[ "open-iris.src.iris.utils.math.cartesian2polar", "open-iris.src.iris.nodes.geometry_refinement.smoothing.Smoothing::_cut_into_arcs", "open-iris.src.iris.nodes.geometry_refinement.smoothing.Smoothing::_smooth_arc", "open-iris.src.iris.nodes.geometry_refinement.smoothing.Smoothing::_smooth_circular_shape" ]
open-iris
[ "iris/utils/math.py", "iris/nodes/geometry_refinement/smoothing.py", "iris/nodes/geometry_refinement/smoothing.py", "iris/nodes/geometry_refinement/smoothing.py" ]
[ "tests/unit_tests/nodes/geometry_refinement/test_smoothing.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 53, "func_end_lineno": 73, "func_code": "def cartesian2polar(xs: np.ndarray, ys: np.ndarray, center_x: float, center_y: float) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"Convert xs and ys cartesian coordinates to polar coordinates.\n\n Args:\n xs (np.ndarray): x values.\n ys (np.ndarray): y values.\n center_x (float): center's x.\n center_y (float): center's y.\n\n Returns:\n Tuple[np.ndarray, np.ndarray]: Converted coordinates (rhos, phis).\n \"\"\"\n x_rel: np.ndarray = xs - center_x\n y_rel: np.ndarray = ys - center_y\n\n C = np.vectorize(complex)(x_rel, y_rel)\n\n rho = np.abs(C)\n phi = np.angle(C) % (2 * np.pi)\n\n return rho, phi" }, { "class_start_lineno": 12, "class_end_lineno": 256, "func_start_lineno": 84, "func_end_lineno": 119, "func_code": " def _cut_into_arcs(self, polygon: np.ndarray, center_xy: Tuple[float, float]) -> Tuple[List[np.ndarray], int]:\n \"\"\"Cut contour into arcs.\n\n Args:\n polygon (np.ndarray): Contour polygon.\n center_xy (Tuple[float, float]): Polygon's center.\n\n Returns:\n Tuple[List[np.ndarray], int]: Tuple with: (list of list of vertices, number of gaps detected in a contour).\n \"\"\"\n rho, phi = math.cartesian2polar(polygon[:, 0], polygon[:, 1], *center_xy)\n phi, rho = self._sort_two_arrays(phi, rho)\n\n differences = np.abs(phi - np.roll(phi, -1))\n # True distance between first and last point\n differences[-1] = 2 * np.pi - differences[-1]\n\n gap_indices = np.argwhere(differences > np.radians(self.params.gap_threshold)).flatten()\n\n if gap_indices.size < 2:\n return [polygon], gap_indices.size\n\n gap_indices += 1\n phi, rho = np.split(phi, gap_indices), np.split(rho, gap_indices)\n\n arcs = [\n np.column_stack(math.polar2cartesian(rho_coords, phi_coords, *center_xy))\n for rho_coords, phi_coords in zip(rho, phi)\n ]\n\n # Connect arc which lies between 0 and 2π.\n if len(arcs) == gap_indices.size + 1:\n arcs[0] = np.vstack([arcs[0], arcs[-1]])\n arcs = arcs[:-1]\n\n return arcs, gap_indices.size" }, { "class_start_lineno": 12, "class_end_lineno": 256, "func_start_lineno": 121, "func_end_lineno": 144, "func_code": " def _smooth_arc(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:\n \"\"\"Smooth a single contour arc.\n\n Args:\n vertices (np.ndarray): Arc's vertices.\n center_xy (Tuple[float, float]): Center of an entire contour.\n\n Returns:\n np.ndarray: Smoothed arc's vertices.\n \"\"\"\n rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)\n phi, rho = self._sort_two_arrays(phi, rho)\n\n idx = self._find_start_index(phi)\n offset = phi[idx]\n relative_phi = (phi - offset) % (2 * np.pi)\n\n smoothed_relative_phi, smoothed_rho = self._smooth_array(relative_phi, rho)\n\n smoothed_phi = (smoothed_relative_phi + offset) % (2 * np.pi)\n\n x_smoothed, y_smoothed = math.polar2cartesian(smoothed_rho, smoothed_phi, *center_xy)\n\n return np.column_stack([x_smoothed, y_smoothed])" }, { "class_start_lineno": 12, "class_end_lineno": 256, "func_start_lineno": 146, "func_end_lineno": 168, "func_code": " def _smooth_circular_shape(self, vertices: np.ndarray, center_xy: Tuple[float, float]) -> np.ndarray:\n \"\"\"Smooth arc in a form of a circular shape.\n\n Args:\n vertices (np.ndarray): Arc's vertices.\n center_xy (Tuple[float, float]): Center of an entire contour.\n\n Returns:\n np.ndarray: Smoothed arc's vertices.\n \"\"\"\n rho, phi = math.cartesian2polar(vertices[:, 0], vertices[:, 1], *center_xy)\n\n padded_phi = np.concatenate([phi - 2 * np.pi, phi, phi + 2 * np.pi])\n padded_rho = np.concatenate([rho, rho, rho])\n\n smoothed_phi, smoothed_rho = self._smooth_array(padded_phi, padded_rho)\n\n mask = (smoothed_phi >= 0) & (smoothed_phi < 2 * np.pi)\n rho_smoothed, phi_smoothed = smoothed_rho[mask], smoothed_phi[mask]\n\n x_smoothed, y_smoothed = math.polar2cartesian(rho_smoothed, phi_smoothed, *center_xy)\n\n return np.column_stack([x_smoothed, y_smoothed])" } ]
[ "function_empty", "TDD" ]
[ "iris.utils.math.cartesian2polar", "iris.nodes.geometry_refinement.smoothing.Smoothing._cut_into_arcs", "iris.nodes.geometry_refinement.smoothing.Smoothing._smooth_arc", "iris.nodes.geometry_refinement.smoothing.Smoothing._smooth_circular_shape" ]
Python
2
4
{ "total_num": 12, "base_passed_num": 5 }
[ "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_class", "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_pipeline", "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_node", "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_nodes" ]
open-iris
[ "iris/pipelines/iris_pipeline.py", "iris/pipelines/iris_pipeline.py", "iris/pipelines/iris_pipeline.py", "iris/pipelines/iris_pipeline.py" ]
[ "tests/unit_tests/pipelines/test_iris_pipeline.py" ]
[ { "class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 225, "func_end_lineno": 245, "func_code": " def instanciate_class(self, class_name: str, kwargs: Dict[str, Any]) -> Callable:\n \"\"\"Instanciate a class from its string definition and its kwargs.\n\n This function relies on pydoc.locate, a safe way to instanciate a class from its string definition, which itself relies on pydoc.safe_import.\n\n Args:\n class_name (str): name of the class.\n kwargs (Dict): kwargs to pass to the class at instanciation time\n\n Returns:\n Callable: the instanciated class\n\n Raises:\n IRISPipelineError: Raised if the class cannot be located.\n \"\"\"\n object_class = pydoc.locate(class_name)\n\n if object_class is None:\n raise IRISPipelineError(f\"Could not locate class {class_name}\")\n\n return object_class(**kwargs)" }, { "class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 179, "func_end_lineno": 200, "func_code": " def instanciate_pipeline(self) -> List[PipelineNode]:\n \"\"\"Given a list of PipelineNodes, crawl the parameters and instanciate the PipelineClass available.\n\n Returns:\n List[PipelineNode]: pipeline with instanciated parameters\n \"\"\"\n instanciated_pipeline = []\n for node in self.params.pipeline:\n current_node = node\n for param_name, param_value in node.algorithm.params.items():\n if isinstance(param_value, (tuple, list)):\n for i, value in enumerate(param_value):\n if isinstance(value, PipelineClass):\n current_node.algorithm.params[param_name][i] = self.instanciate_class(\n class_name=value.class_name, kwargs=value.params\n )\n elif isinstance(param_value, PipelineClass):\n current_node.algorithm.params[param_name] = self.instanciate_class(\n class_name=param_value.class_name, kwargs=param_value.params\n )\n instanciated_pipeline.append(current_node)\n return instanciated_pipeline" }, { "class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 202, "func_end_lineno": 223, "func_code": " def instanciate_node(\n self, node_class: str, algorithm_params: Dict[str, Any], callbacks: Optional[List[PipelineClass]]\n ) -> Algorithm:\n \"\"\"Instanciate an Algorithm from its class, kwargs and optional Callbacks.\n\n NOTE: All callbacks of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.\n\n Args:\n node_class (str): Node's class.\n algorithm_params (Dict[str, Any]): Node's kwargs.\n callbacks (Optional[List[PipelineClass]]): list of callbacks.\n\n Returns:\n Algorithm: instanciated node.\n \"\"\"\n if callbacks is not None and len(callbacks):\n instanciated_callbacks = [self.instanciate_class(cb.class_name, cb.params) for cb in callbacks]\n instanciated_callbacks = [cb for cb in instanciated_callbacks if type(cb) not in self.env.disabled_qa]\n\n algorithm_params = {**algorithm_params, **{\"callbacks\": instanciated_callbacks}}\n\n return self.instanciate_class(node_class, algorithm_params)" }, { "class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 159, "func_end_lineno": 177, "func_code": " def instanciate_nodes(self) -> Dict[str, Algorithm]:\n \"\"\"Given a list of PipelineNode, return the associated instanciated nodes.\n\n NOTE: All nodes of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.\n\n Returns:\n Dict[str, Algorithm]: instanciated nodes.\n \"\"\"\n instanciated_pipeline = self.instanciate_pipeline()\n nodes = {\n node.name: self.instanciate_node(\n node_class=node.algorithm.class_name,\n algorithm_params=node.algorithm.params,\n callbacks=node.callbacks,\n )\n for node in instanciated_pipeline\n }\n nodes = {node_name: node for node_name, node in nodes.items() if type(node) not in self.env.disabled_qa}\n return nodes" } ]
[ "function_empty", "TDD" ]
[ "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_class", "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_pipeline", "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_node", "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_nodes" ]
Python
1
4
{ "total_num": 33, "base_passed_num": 0 }
[ "open-iris.src.iris.io.validators.is_binary", "open-iris.src.iris.nodes.binarization.specular_reflection_detection.SpecularReflectionDetection::run" ]
open-iris
[ "iris/io/validators.py", "iris/nodes/binarization/specular_reflection_detection.py" ]
[ "tests/unit_tests/nodes/binarization/test_specular_reflection_detection.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 277, "func_start_lineno": 40, "func_end_lineno": 57, "func_code": "def is_binary(cls: type, v: np.ndarray, field: fields.ModelField) -> np.ndarray:\n \"\"\"Check if array has only boolean values, i.e. is binary.\n\n Args:\n cls (type): Class type.\n v (np.ndarray): Value to check.\n field (fields.ModelField): Field descriptor.\n\n Raises:\n ValueError: Exception raised if array doesn't contain bool datatypes.\n\n Returns:\n np.ndarray: `v` sent for further processing.\n \"\"\"\n if v.dtype != np.dtype(\"bool\"):\n raise ValueError(f\"{cls.__name__}: {field.name} must be binary. got dtype {v.dtype}\")\n\n return v" }, { "class_start_lineno": 8, "class_end_lineno": 40, "func_start_lineno": 26, "func_end_lineno": 40, "func_code": " def run(self, ir_image: IRImage) -> NoiseMask:\n \"\"\"Thresholds an IRImage to detect Specular Reflection.\n\n Args:\n ir_image (IRImage): Infrared image object.\n\n Returns:\n NoiseMask: a binary map of the thresholded IRImage.\n \"\"\"\n _, reflection_segmap = cv2.threshold(\n ir_image.img_data, self.params.reflection_threshold, 255, cv2.THRESH_BINARY\n )\n reflection_segmap = (reflection_segmap / 255.0).astype(bool)\n\n return NoiseMask(mask=reflection_segmap)" } ]
[ "function_empty" ]
[ "iris.io.validators.is_binary", "iris.nodes.binarization.specular_reflection_detection.SpecularReflectionDetection.run" ]
Python
2
2
{ "total_num": 4, "base_passed_num": 0 }
[ "transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_transforms.to_channel_dimension_format", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.flip_channel_order" ]
transformers
[ "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_utils.py", "transformers/image_transforms.py" ]
[ "tests/test_image_transforms.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 58, "func_end_lineno": 94, "func_code": "def to_channel_dimension_format(\n image: np.ndarray,\n channel_dim: Union[ChannelDimension, str],\n input_channel_dim: Optional[Union[ChannelDimension, str]] = None,\n) -> np.ndarray:\n \"\"\"\n Converts `image` to the channel dimension format specified by `channel_dim`.\n\n Args:\n image (`numpy.ndarray`):\n The image to have its channel dimension set.\n channel_dim (`ChannelDimension`):\n The channel dimension format to use.\n input_channel_dim (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input image.\n\n Returns:\n `np.ndarray`: The image with the channel dimension set to `channel_dim`.\n \"\"\"\n if not isinstance(image, np.ndarray):\n raise TypeError(f\"Input image must be of type np.ndarray, got {type(image)}\")\n\n if input_channel_dim is None:\n input_channel_dim = infer_channel_dimension_format(image)\n\n target_channel_dim = ChannelDimension(channel_dim)\n if input_channel_dim == target_channel_dim:\n return image\n\n if target_channel_dim == ChannelDimension.FIRST:\n image = image.transpose((2, 0, 1))\n elif target_channel_dim == ChannelDimension.LAST:\n image = image.transpose((1, 2, 0))\n else:\n raise ValueError(\"Unsupported channel dimension format: {}\".format(channel_dim))\n\n return image" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 774, "func_end_lineno": 809, "func_code": "def flip_channel_order(\n image: np.ndarray,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Flips the channel order of the image.\n\n If the image is in RGB format, it will be converted to BGR and vice versa.\n\n Args:\n image (`np.ndarray`):\n The image to flip.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n \"\"\"\n input_data_format = infer_channel_dimension_format(image) if input_data_format is None else input_data_format\n\n if input_data_format == ChannelDimension.LAST:\n image = image[..., ::-1]\n elif input_data_format == ChannelDimension.FIRST:\n image = image[::-1, ...]\n else:\n raise ValueError(f\"Unsupported channel dimension: {input_data_format}\")\n\n if data_format is not None:\n image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n return image" } ]
[ "function_empty" ]
[ "transformers.image_utils.infer_channel_dimension_format", "transformers.image_transforms.to_channel_dimension_format", "transformers.image_utils.get_image_size", "transformers.image_transforms.flip_channel_order" ]
Python
4
4
{ "total_num": 24, "base_passed_num": 5 }
[ "transformers.src.transformers.utils.logging._configure_library_root_logger", "transformers.src.transformers.utils.logging.get_logger", "transformers.src.transformers.generation.configuration_utils.GenerationConfig::to_diff_dict", "transformers.src.transformers.generation.configuration_utils.GenerationConfig::to_json_string" ]
transformers
[ "transformers/utils/logging.py", "transformers/utils/logging.py", "transformers/generation/configuration_utils.py", "transformers/generation/configuration_utils.py", "transformers/generation/configuration_utils.py" ]
[ "tests/benchmark/test_benchmark.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 81, "func_end_lineno": 104, "func_code": "def _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176\n if sys.stderr is None:\n sys.stderr = open(os.devnull, \"w\")\n\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n # if logging level is debug, we add pathname and lineno to formatter for easy debugging\n if os.getenv(\"TRANSFORMERS_VERBOSITY\", None) == \"detail\":\n formatter = logging.Formatter(\"[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s\")\n _default_handler.setFormatter(formatter)\n\n library_root_logger.propagate = False" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 147, "func_end_lineno": 158, "func_code": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom transformers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)" }, { "class_start_lineno": 94, "class_end_lineno": 1274, "func_start_lineno": 1109, "func_end_lineno": 1130, "func_code": " def to_diff_dict(self) -> Dict[str, Any]:\n \"\"\"\n Removes all attributes from config which correspond to the default config attributes for better readability and\n serializes to a Python dictionary.\n\n Returns:\n `Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n config_dict = self.to_dict()\n\n # get the default config dict\n default_config_dict = GenerationConfig().to_dict()\n\n serializable_config_dict = {}\n\n # only serialize values that differ from the default config\n for key, value in config_dict.items():\n if key not in default_config_dict or key == \"transformers_version\" or value != default_config_dict[key]:\n serializable_config_dict[key] = value\n\n self.dict_torch_dtype_to_str(serializable_config_dict)\n return serializable_config_dict" }, { "class_start_lineno": 94, "class_end_lineno": 1274, "func_start_lineno": 1153, "func_end_lineno": 1195, "func_code": " def to_json_string(self, use_diff: bool = True, ignore_metadata: bool = False) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Args:\n use_diff (`bool`, *optional*, defaults to `True`):\n If set to `True`, only the difference between the config instance and the default `GenerationConfig()`\n is serialized to JSON string.\n ignore_metadata (`bool`, *optional*, defaults to `False`):\n Whether to ignore the metadata fields present in the instance\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n if use_diff is True:\n config_dict = self.to_diff_dict()\n else:\n config_dict = self.to_dict()\n\n if ignore_metadata:\n for metadata_field in METADATA_FIELDS:\n config_dict.pop(metadata_field, None)\n\n def convert_keys_to_string(obj):\n if isinstance(obj, dict):\n return {str(key): convert_keys_to_string(value) for key, value in obj.items()}\n elif isinstance(obj, list):\n return [convert_keys_to_string(item) for item in obj]\n else:\n return obj\n\n def convert_dataclass_to_dict(obj):\n if isinstance(obj, dict):\n return {key: convert_dataclass_to_dict(value) for key, value in obj.items()}\n elif is_dataclass(obj):\n return obj.to_dict()\n else:\n return obj\n\n config_dict = convert_keys_to_string(config_dict)\n config_dict = convert_dataclass_to_dict(config_dict)\n\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"" }, { "class_start_lineno": 94, "class_end_lineno": 1274, "func_start_lineno": 480, "func_end_lineno": 481, "func_code": " def __hash__(self):\n return hash(self.to_json_string(ignore_metadata=True))" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.logging._configure_library_root_logger", "transformers.utils.logging.get_logger", "transformers.generation.configuration_utils.GenerationConfig.to_diff_dict", "transformers.generation.configuration_utils.GenerationConfig.to_json_string", "transformers.generation.configuration_utils.GenerationConfig.__hash__" ]
Python
3
4
{ "total_num": 11, "base_passed_num": 0 }
[ "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.blip.image_processing_blip.BlipImageProcessor::resize" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/models/blip/image_processing_blip.py" ]
[ "tests/models/blip/test_image_processing_blip.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 46, "class_end_lineno": 294, "func_start_lineno": 111, "func_end_lineno": 157, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n size = get_size_dict(size)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(f\"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}\")\n output_size = (size[\"height\"], size[\"width\"])\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.resize", "transformers.models.blip.image_processing_blip.BlipImageProcessor.resize" ]
Python
3
5
{ "total_num": 20, "base_passed_num": 12 }
[ "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor::resize" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/models/chinese_clip/image_processing_chinese_clip.py" ]
[ "tests/models/chinese_clip/test_image_processing_chinese_clip.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 51, "class_end_lineno": 306, "func_start_lineno": 125, "func_end_lineno": 162, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n output_size = get_resize_output_image_size(\n image, size=(size[\"height\"], size[\"width\"]), default_to_square=False, input_data_format=input_data_format\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.get_resize_output_image_size", "transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor.resize" ]
Python
3
5
{ "total_num": 21, "base_passed_num": 12 }
[ "transformers.src.transformers.utils.logging._configure_library_root_logger", "transformers.src.transformers.utils.logging.get_logger" ]
transformers
[ "transformers/utils/logging.py", "transformers/utils/logging.py" ]
[ "tests/models/esm/test_tokenization_esm.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 81, "func_end_lineno": 104, "func_code": "def _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176\n if sys.stderr is None:\n sys.stderr = open(os.devnull, \"w\")\n\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n # if logging level is debug, we add pathname and lineno to formatter for easy debugging\n if os.getenv(\"TRANSFORMERS_VERBOSITY\", None) == \"detail\":\n formatter = logging.Formatter(\"[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s\")\n _default_handler.setFormatter(formatter)\n\n library_root_logger.propagate = False" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 147, "func_end_lineno": 158, "func_code": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom transformers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.logging._configure_library_root_logger", "transformers.utils.logging.get_logger" ]
Python
1
2
{ "total_num": 6, "base_passed_num": 0 }
[ "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.flava.image_processing_flava.FlavaImageProcessor::resize" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/models/flava/image_processing_flava.py" ]
[ "tests/models/flava/test_image_processing_flava.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 136, "class_end_lineno": 700, "func_start_lineno": 338, "func_end_lineno": 384, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n size = get_size_dict(size)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(f\"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}\")\n output_size = (size[\"height\"], size[\"width\"])\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.resize", "transformers.models.flava.image_processing_flava.FlavaImageProcessor.resize" ]
Python
3
5
{ "total_num": 15, "base_passed_num": 6 }
[ "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_utils.get_image_size" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/image_utils.py" ]
[ "tests/models/fuyu/test_image_processing_fuyu.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.infer_channel_dimension_format", "transformers.image_utils.get_image_size" ]
Python
2
4
{ "total_num": 4, "base_passed_num": 1 }
[ "transformers.src.transformers.utils.backbone_utils.verify_out_features_out_indices", "transformers.src.transformers.utils.backbone_utils._align_output_features_output_indices", "transformers.src.transformers.utils.backbone_utils.get_aligned_output_features_output_indices", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.utils.generic.is_tensor" ]
transformers
[ "transformers/utils/backbone_utils.py", "transformers/utils/backbone_utils.py", "transformers/utils/backbone_utils.py", "transformers/models/rt_detr/configuration_rt_detr_resnet.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py" ]
[ "tests/models/rt_detr/test_modeling_rt_detr_resnet.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 377, "func_start_lineno": 32, "func_end_lineno": 74, "func_code": "def verify_out_features_out_indices(\n out_features: Optional[Iterable[str]], out_indices: Optional[Iterable[int]], stage_names: Optional[Iterable[str]]\n):\n \"\"\"\n Verify that out_indices and out_features are valid for the given stage_names.\n \"\"\"\n if stage_names is None:\n raise ValueError(\"Stage_names must be set for transformers backbones\")\n\n if out_features is not None:\n if not isinstance(out_features, (list,)):\n raise ValueError(f\"out_features must be a list got {type(out_features)}\")\n if any(feat not in stage_names for feat in out_features):\n raise ValueError(f\"out_features must be a subset of stage_names: {stage_names} got {out_features}\")\n if len(out_features) != len(set(out_features)):\n raise ValueError(f\"out_features must not contain any duplicates, got {out_features}\")\n if out_features != (sorted_feats := [feat for feat in stage_names if feat in out_features]):\n raise ValueError(\n f\"out_features must be in the same order as stage_names, expected {sorted_feats} got {out_features}\"\n )\n\n if out_indices is not None:\n if not isinstance(out_indices, list):\n raise ValueError(f\"out_indices must be a list, got {type(out_indices)}\")\n # Convert negative indices to their positive equivalent: [-1,] -> [len(stage_names) - 1,]\n positive_indices = tuple(idx % len(stage_names) if idx < 0 else idx for idx in out_indices)\n if any(idx for idx in positive_indices if idx not in range(len(stage_names))):\n raise ValueError(f\"out_indices must be valid indices for stage_names {stage_names}, got {out_indices}\")\n if len(positive_indices) != len(set(positive_indices)):\n msg = f\"out_indices must not contain any duplicates, got {out_indices}\"\n msg += f\"(equivalent to {positive_indices}))\" if positive_indices != out_indices else \"\"\n raise ValueError(msg)\n if positive_indices != tuple(sorted(positive_indices)):\n sorted_negative = [idx for _, idx in sorted(zip(positive_indices, out_indices), key=lambda x: x[0])]\n raise ValueError(\n f\"out_indices must be in the same order as stage_names, expected {sorted_negative} got {out_indices}\"\n )\n\n if out_features is not None and out_indices is not None:\n if len(out_features) != len(out_indices):\n raise ValueError(\"out_features and out_indices should have the same length if both are set\")\n if out_features != [stage_names[idx] for idx in out_indices]:\n raise ValueError(\"out_features and out_indices should correspond to the same stages if both are set\")" }, { "class_start_lineno": 1, "class_end_lineno": 377, "func_start_lineno": 77, "func_end_lineno": 105, "func_code": "def _align_output_features_output_indices(\n out_features: Optional[List[str]],\n out_indices: Optional[Union[List[int], Tuple[int]]],\n stage_names: List[str],\n):\n \"\"\"\n Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.\n\n The logic is as follows:\n - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the\n `out_indices`.\n - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the\n `out_features`.\n - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.\n - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.\n\n Args:\n out_features (`List[str]`): The names of the features for the backbone to output.\n out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.\n stage_names (`List[str]`): The names of the stages of the backbone.\n \"\"\"\n if out_indices is None and out_features is None:\n out_indices = [len(stage_names) - 1]\n out_features = [stage_names[-1]]\n elif out_indices is None and out_features is not None:\n out_indices = [stage_names.index(layer) for layer in out_features]\n elif out_features is None and out_indices is not None:\n out_features = [stage_names[idx] for idx in out_indices]\n return out_features, out_indices" }, { "class_start_lineno": 1, "class_end_lineno": 377, "func_start_lineno": 108, "func_end_lineno": 137, "func_code": "def get_aligned_output_features_output_indices(\n out_features: Optional[List[str]],\n out_indices: Optional[Union[List[int], Tuple[int]]],\n stage_names: List[str],\n) -> Tuple[List[str], List[int]]:\n \"\"\"\n Get the `out_features` and `out_indices` so that they are aligned.\n\n The logic is as follows:\n - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the\n `out_indices`.\n - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the\n `out_features`.\n - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.\n - `out_indices` and `out_features` set: they are verified to be aligned.\n\n Args:\n out_features (`List[str]`): The names of the features for the backbone to output.\n out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.\n stage_names (`List[str]`): The names of the stages of the backbone.\n \"\"\"\n out_indices = list(out_indices) if out_indices is not None else None\n # First verify that the out_features and out_indices are valid\n verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)\n output_features, output_indices = _align_output_features_output_indices(\n out_features=out_features, out_indices=out_indices, stage_names=stage_names\n )\n # Verify that the aligned out_features and out_indices are valid\n verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)\n return output_features, output_indices" }, { "class_start_lineno": 25, "class_end_lineno": 111, "func_start_lineno": 83, "func_end_lineno": 111, "func_code": " def __init__(\n self,\n num_channels=3,\n embedding_size=64,\n hidden_sizes=[256, 512, 1024, 2048],\n depths=[3, 4, 6, 3],\n layer_type=\"bottleneck\",\n hidden_act=\"relu\",\n downsample_in_first_stage=False,\n downsample_in_bottleneck=False,\n out_features=None,\n out_indices=None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n if layer_type not in self.layer_types:\n raise ValueError(f\"layer_type={layer_type} is not one of {','.join(self.layer_types)}\")\n self.num_channels = num_channels\n self.embedding_size = embedding_size\n self.hidden_sizes = hidden_sizes\n self.depths = depths\n self.layer_type = layer_type\n self.hidden_act = hidden_act\n self.downsample_in_first_stage = downsample_in_first_stage\n self.downsample_in_bottleneck = downsample_in_bottleneck\n self.stage_names = [\"stem\"] + [f\"stage{idx}\" for idx in range(1, len(depths) + 1)]\n self._out_features, self._out_indices = get_aligned_output_features_output_indices(\n out_features=out_features, out_indices=out_indices, stage_names=self.stage_names\n )" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 119, "func_end_lineno": 140, "func_code": "def is_tensor(x):\n \"\"\"\n Tests if `x` is a `torch.Tensor`, `tf.Tensor`, `jaxlib.xla_extension.DeviceArray`, `np.ndarray` or `mlx.array`\n in the order defined by `infer_framework_from_repr`\n \"\"\"\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(x)\n for test_func in framework_to_test_func.values():\n if test_func(x):\n return True\n\n # Tracers\n if is_torch_fx_proxy(x):\n return True\n\n if is_flax_available():\n from jax.core import Tracer\n\n if isinstance(x, Tracer):\n return True\n\n return False" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.backbone_utils.verify_out_features_out_indices", "transformers.utils.backbone_utils._align_output_features_output_indices", "transformers.utils.backbone_utils.get_aligned_output_features_output_indices", "transformers.models.rt_detr.configuration_rt_detr_resnet.RTDetrResNetConfig.__init__", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.utils.generic.is_tensor" ]
Python
4
6
{ "total_num": 8, "base_passed_num": 0 }
[ "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.video_llava.image_processing_video_llava.VideoLlavaImageProcessor::resize" ]
transformers
[ "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/video_llava/image_processing_video_llava.py" ]
[ "tests/models/video_llava/test_image_processing_video_llava.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 69, "class_end_lineno": 404, "func_start_lineno": 143, "func_end_lineno": 190, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n default_to_square = True\n if \"shortest_edge\" in size:\n size = size[\"shortest_edge\"]\n default_to_square = False\n elif \"height\" in size and \"width\" in size:\n size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n\n output_size = get_resize_output_image_size(\n image,\n size=size,\n default_to_square=default_to_square,\n input_data_format=input_data_format,\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty" ]
[ "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.video_llava.image_processing_video_llava.VideoLlavaImageProcessor.resize" ]
Python
4
4
{ "total_num": 18, "base_passed_num": 8 }
[ "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.models.videomae.image_processing_videomae.VideoMAEImageProcessor::resize" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/videomae/image_processing_videomae.py" ]
[ "tests/models/videomae/test_image_processing_videomae.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 63, "class_end_lineno": 345, "func_start_lineno": 134, "func_end_lineno": 176, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\n have the size `(h, w)`. If `size` is of the form `{\"shortest_edge\": s}`, the output image will have its\n shortest edge of length `s` while keeping the aspect ratio of the original image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if \"shortest_edge\" in size:\n output_size = get_resize_output_image_size(\n image, size[\"shortest_edge\"], default_to_square=False, input_data_format=input_data_format\n )\n elif \"height\" in size and \"width\" in size:\n output_size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\")\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.models.videomae.image_processing_videomae.VideoMAEImageProcessor.resize" ]
Python
4
6
{ "total_num": 13, "base_passed_num": 6 }
[ "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.models.vivit.image_processing_vivit.VivitImageProcessor::resize" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/vivit/image_processing_vivit.py" ]
[ "tests/models/vivit/test_image_processing_vivit.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 66, "class_end_lineno": 404, "func_start_lineno": 142, "func_end_lineno": 184, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\n have the size `(h, w)`. If `size` is of the form `{\"shortest_edge\": s}`, the output image will have its\n shortest edge of length `s` while keeping the aspect ratio of the original image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if \"shortest_edge\" in size:\n output_size = get_resize_output_image_size(\n image, size[\"shortest_edge\"], default_to_square=False, input_data_format=input_data_format\n )\n elif \"height\" in size and \"width\" in size:\n output_size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\")\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.models.vivit.image_processing_vivit.VivitImageProcessor.resize" ]
Python
4
6
{ "total_num": 14, "base_passed_num": 7 }
[ "transformers.src.transformers.utils.logging._configure_library_root_logger", "transformers.src.transformers.utils.logging.get_logger", "transformers.src.transformers.utils.generic.to_py_obj", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::decode" ]
transformers
[ "transformers/utils/logging.py", "transformers/utils/logging.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py" ]
[ "tests/models/wav2vec2/test_tokenization_wav2vec2.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 81, "func_end_lineno": 104, "func_code": "def _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176\n if sys.stderr is None:\n sys.stderr = open(os.devnull, \"w\")\n\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n # if logging level is debug, we add pathname and lineno to formatter for easy debugging\n if os.getenv(\"TRANSFORMERS_VERBOSITY\", None) == \"detail\":\n formatter = logging.Formatter(\"[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s\")\n _default_handler.setFormatter(formatter)\n\n library_root_logger.propagate = False" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 147, "func_end_lineno": 158, "func_code": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom transformers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 254, "func_end_lineno": 281, "func_code": "def to_py_obj(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.\n \"\"\"\n\n framework_to_py_obj = {\n \"pt\": lambda obj: obj.detach().cpu().tolist(),\n \"tf\": lambda obj: obj.numpy().tolist(),\n \"jax\": lambda obj: np.asarray(obj).tolist(),\n \"np\": lambda obj: obj.tolist(),\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_py_obj(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return [to_py_obj(o) for o in obj]\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_py_obj[framework](obj)\n\n # tolist also works on 0d np arrays\n if isinstance(obj, np.number):\n return obj.tolist()\n else:\n return obj" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\"<class 'torch.\"):\n return \"pt\"\n elif representation.startswith(\"<class 'tensorflow.\"):\n return \"tf\"\n elif representation.startswith(\"<class 'jax\"):\n return \"jax\"\n elif representation.startswith(\"<class 'numpy.\"):\n return \"np\"\n elif representation.startswith(\"<class 'mlx.\"):\n return \"mlx\"" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 115, "class_end_lineno": 644, "func_start_lineno": 528, "func_end_lineno": 631, "func_code": " def decode(\n self,\n token_ids: Union[int, List[int], \"np.ndarray\", \"torch.Tensor\", \"tf.Tensor\"],\n skip_special_tokens: bool = False,\n clean_up_tokenization_spaces: bool = None,\n output_char_offsets: bool = False,\n output_word_offsets: bool = False,\n **kwargs,\n ) -> str:\n \"\"\"\n Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\n tokens and clean up tokenization spaces.\n\n Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\n Args:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces.\n output_char_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output character offsets. Character offsets can be used in combination with the\n sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.\n\n <Tip>\n\n Please take a look at the example below to better understand how to make use of `output_char_offsets`.\n\n </Tip>\n\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n\n <Tip>\n\n Please take a look at the example below to better understand how to make use of `output_word_offsets`.\n\n </Tip>\n\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\n Returns:\n `str` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded\n sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when\n `output_char_offsets == True` or `output_word_offsets == True`.\n\n Example:\n\n ```python\n >>> # Let's see how to retrieve time steps for a model\n >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC\n >>> from datasets import load_dataset\n >>> import datasets\n >>> import torch\n\n >>> # import model, feature extractor, tokenizer\n >>> model = AutoModelForCTC.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"facebook/wav2vec2-base-960h\")\n\n >>> # load first sample of English common_voice\n >>> dataset = load_dataset(\"mozilla-foundation/common_voice_11_0\", \"en\", split=\"train\", streaming=True, trust_remote_code=True)\n >>> dataset = dataset.cast_column(\"audio\", datasets.Audio(sampling_rate=16_000))\n >>> dataset_iter = iter(dataset)\n >>> sample = next(dataset_iter)\n\n >>> # forward sample through model to get greedily predicted transcription ids\n >>> input_values = feature_extractor(sample[\"audio\"][\"array\"], return_tensors=\"pt\").input_values\n >>> logits = model(input_values).logits[0]\n >>> pred_ids = torch.argmax(logits, axis=-1)\n\n >>> # retrieve word stamps (analogous commands for `output_char_offsets`)\n >>> outputs = tokenizer.decode(pred_ids, output_word_offsets=True)\n >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate\n >>> time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate\n\n >>> word_offsets = [\n ... {\n ... \"word\": d[\"word\"],\n ... \"start_time\": round(d[\"start_offset\"] * time_offset, 2),\n ... \"end_time\": round(d[\"end_offset\"] * time_offset, 2),\n ... }\n ... for d in outputs.word_offsets\n ... ]\n >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer:\n >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en\n >>> word_offsets[:3]\n [{'word': 'THE', 'start_time': 0.7, 'end_time': 0.78}, {'word': 'TRICK', 'start_time': 0.88, 'end_time': 1.08}, {'word': 'APPEARS', 'start_time': 1.2, 'end_time': 1.64}]\n ```\"\"\"\n # Convert inputs to python lists\n token_ids = to_py_obj(token_ids)\n\n return self._decode(\n token_ids=token_ids,\n skip_special_tokens=skip_special_tokens,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n output_char_offsets=output_char_offsets,\n output_word_offsets=output_word_offsets,\n **kwargs,\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.logging._configure_library_root_logger", "transformers.utils.logging.get_logger", "transformers.utils.generic.to_py_obj", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer.decode" ]
Python
1
6
{ "total_num": 102, "base_passed_num": 0 }
[ "transformers.src.transformers.utils.logging._configure_library_root_logger", "transformers.src.transformers.utils.logging.get_logger", "transformers.src.transformers.utils.import_utils.create_import_structure_from_path", "transformers.src.transformers.utils.import_utils.define_import_structure" ]
transformers
[ "transformers/utils/logging.py", "transformers/utils/logging.py", "transformers/utils/import_utils.py", "transformers/utils/import_utils.py" ]
[ "tests/utils/test_dynamic_module_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 81, "func_end_lineno": 104, "func_code": "def _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176\n if sys.stderr is None:\n sys.stderr = open(os.devnull, \"w\")\n\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n # if logging level is debug, we add pathname and lineno to formatter for easy debugging\n if os.getenv(\"TRANSFORMERS_VERBOSITY\", None) == \"detail\":\n formatter = logging.Formatter(\"[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s\")\n _default_handler.setFormatter(formatter)\n\n library_root_logger.propagate = False" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 147, "func_end_lineno": 158, "func_code": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom transformers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)" }, { "class_start_lineno": 1, "class_end_lineno": 2158, "func_start_lineno": 1846, "func_end_lineno": 2037, "func_code": "def create_import_structure_from_path(module_path):\n \"\"\"\n This method takes the path to a file/a folder and returns the import structure.\n If a file is given, it will return the import structure of the parent folder.\n\n Import structures are designed to be digestible by `_LazyModule` objects. They are\n created from the __all__ definitions in each files as well as the `@export` decorators\n above methods and objects.\n\n The import structure allows explicit display of the required backends for a given object.\n These backends are specified in two ways:\n\n 1. Through their `@export`, if they are exported with that decorator. This `@export` decorator\n accepts a `backend` tuple kwarg mentioning which backends are required to run this object.\n\n 2. If an object is defined in a file with \"default\" backends, it will have, at a minimum, this\n backend specified. The default backends are defined according to the filename:\n\n - If a file is named like `modeling_*.py`, it will have a `torch` backend\n - If a file is named like `modeling_tf_*.py`, it will have a `tf` backend\n - If a file is named like `modeling_flax_*.py`, it will have a `flax` backend\n - If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend\n\n Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed.\n Should an object be imported without its required backends being in the environment, any attempt to use the\n object will raise an error mentioning which backend(s) should be added to the environment in order to use\n that object.\n\n Here's an example of an input import structure at the src.transformers.models level:\n\n {\n 'albert': {\n frozenset(): {\n 'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}\n },\n frozenset({'tokenizers'}): {\n 'tokenization_albert_fast': {'AlbertTokenizerFast'}\n },\n },\n 'align': {\n frozenset(): {\n 'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},\n 'processing_align': {'AlignProcessor'}\n },\n },\n 'altclip': {\n frozenset(): {\n 'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},\n 'processing_altclip': {'AltCLIPProcessor'},\n }\n }\n }\n \"\"\"\n import_structure = {}\n if os.path.isdir(module_path):\n directory = module_path\n adjacent_modules = []\n\n for f in os.listdir(module_path):\n if f != \"__pycache__\" and os.path.isdir(os.path.join(module_path, f)):\n import_structure[f] = create_import_structure_from_path(os.path.join(module_path, f))\n\n elif not os.path.isdir(os.path.join(directory, f)):\n adjacent_modules.append(f)\n\n else:\n directory = os.path.dirname(module_path)\n adjacent_modules = [f for f in os.listdir(directory) if not os.path.isdir(os.path.join(directory, f))]\n\n # We're only taking a look at files different from __init__.py\n # We could theoretically export things directly from the __init__.py\n # files, but this is not supported at this time.\n if \"__init__.py\" in adjacent_modules:\n adjacent_modules.remove(\"__init__.py\")\n\n module_requirements = {}\n for module_name in adjacent_modules:\n # Only modules ending in `.py` are accepted here.\n if not module_name.endswith(\".py\"):\n continue\n\n with open(os.path.join(directory, module_name)) as f:\n file_content = f.read()\n\n # Remove the .py suffix\n module_name = module_name[:-3]\n\n previous_line = \"\"\n previous_index = 0\n\n # Some files have some requirements by default.\n # For example, any file named `modeling_tf_xxx.py`\n # should have TensorFlow as a required backend.\n base_requirements = ()\n for string_check, requirements in BASE_FILE_REQUIREMENTS.items():\n if string_check(module_name):\n base_requirements = requirements\n break\n\n # Objects that have a `@export` assigned to them will get exported\n # with the backends specified in the decorator as well as the file backends.\n exported_objects = set()\n if \"@export\" in file_content:\n lines = file_content.split(\"\\n\")\n for index, line in enumerate(lines):\n # This allows exporting items with other decorators. We'll take a look\n # at the line that follows at the same indentation level.\n if line.startswith((\" \", \"\\t\", \"@\", \")\")) and not line.startswith(\"@export\"):\n continue\n\n # Skipping line enables putting whatever we want between the\n # export() call and the actual class/method definition.\n # This is what enables having # Copied from statements, docs, etc.\n skip_line = False\n\n if \"@export\" in previous_line:\n skip_line = False\n\n # Backends are defined on the same line as export\n if \"backends\" in previous_line:\n backends_string = previous_line.split(\"backends=\")[1].split(\"(\")[1].split(\")\")[0]\n backends = tuple(sorted([b.strip(\"'\\\",\") for b in backends_string.split(\", \") if b]))\n\n # Backends are defined in the lines following export, for example such as:\n # @export(\n # backends=(\n # \"sentencepiece\",\n # \"torch\",\n # \"tf\",\n # )\n # )\n #\n # or\n #\n # @export(\n # backends=(\n # \"sentencepiece\", \"tf\"\n # )\n # )\n elif \"backends\" in lines[previous_index + 1]:\n backends = []\n for backend_line in lines[previous_index:index]:\n if \"backends\" in backend_line:\n backend_line = backend_line.split(\"=\")[1]\n if '\"' in backend_line or \"'\" in backend_line:\n if \", \" in backend_line:\n backends.extend(backend.strip(\"()\\\"', \") for backend in backend_line.split(\", \"))\n else:\n backends.append(backend_line.strip(\"()\\\"', \"))\n\n # If the line is only a ')', then we reached the end of the backends and we break.\n if backend_line.strip() == \")\":\n break\n backends = tuple(backends)\n\n # No backends are registered for export\n else:\n backends = ()\n\n backends = frozenset(backends + base_requirements)\n if backends not in module_requirements:\n module_requirements[backends] = {}\n if module_name not in module_requirements[backends]:\n module_requirements[backends][module_name] = set()\n\n if not line.startswith(\"class\") and not line.startswith(\"def\"):\n skip_line = True\n else:\n start_index = 6 if line.startswith(\"class\") else 4\n object_name = line[start_index:].split(\"(\")[0].strip(\":\")\n module_requirements[backends][module_name].add(object_name)\n exported_objects.add(object_name)\n\n if not skip_line:\n previous_line = line\n previous_index = index\n\n # All objects that are in __all__ should be exported by default.\n # These objects are exported with the file backends.\n if \"__all__\" in file_content:\n for _all_object in fetch__all__(file_content):\n if _all_object not in exported_objects:\n backends = frozenset(base_requirements)\n if backends not in module_requirements:\n module_requirements[backends] = {}\n if module_name not in module_requirements[backends]:\n module_requirements[backends][module_name] = set()\n\n module_requirements[backends][module_name].add(_all_object)\n\n import_structure = {**module_requirements, **import_structure}\n return import_structure" }, { "class_start_lineno": 1, "class_end_lineno": 2158, "func_start_lineno": 2136, "func_end_lineno": 2158, "func_code": "def define_import_structure(module_path: str) -> IMPORT_STRUCTURE_T:\n \"\"\"\n This method takes a module_path as input and creates an import structure digestible by a _LazyModule.\n\n Here's an example of an output import structure at the src.transformers.models level:\n\n {\n frozenset({'tokenizers'}): {\n 'albert.tokenization_albert_fast': {'AlbertTokenizerFast'}\n },\n frozenset(): {\n 'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'},\n 'align.processing_align': {'AlignProcessor'},\n 'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},\n 'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},\n 'altclip.processing_altclip': {'AltCLIPProcessor'}\n }\n }\n\n The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects.\n \"\"\"\n import_structure = create_import_structure_from_path(module_path)\n return spread_import_structure(import_structure)" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.logging._configure_library_root_logger", "transformers.utils.logging.get_logger", "transformers.utils.import_utils.create_import_structure_from_path", "transformers.utils.import_utils.define_import_structure" ]
Python
3
4
{ "total_num": 10, "base_passed_num": 0 }
[ "transformers.src.transformers.utils.generic.infer_framework", "transformers.src.transformers.utils.generic.find_labels" ]
transformers
[ "transformers/utils/generic.py", "transformers/utils/generic.py" ]
[ "tests/utils/test_file_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 738, "func_end_lineno": 753, "func_code": "def infer_framework(model_class):\n \"\"\"\n Infers the framework of a given model without using isinstance(), because we cannot guarantee that the relevant\n classes are imported or available.\n \"\"\"\n for base_class in inspect.getmro(model_class):\n module = base_class.__module__\n name = base_class.__name__\n if module.startswith(\"tensorflow\") or module.startswith(\"keras\") or name == \"TFPreTrainedModel\":\n return \"tf\"\n elif module.startswith(\"torch\") or name == \"PreTrainedModel\":\n return \"pt\"\n elif module.startswith(\"flax\") or module.startswith(\"jax\") or name == \"FlaxPreTrainedModel\":\n return \"flax\"\n else:\n raise TypeError(f\"Could not infer framework from class {model_class}.\")" }, { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 565, "func_end_lineno": 584, "func_code": "def find_labels(model_class):\n \"\"\"\n Find the labels used by a given model.\n\n Args:\n model_class (`type`): The class of the model.\n \"\"\"\n model_name = model_class.__name__\n framework = infer_framework(model_class)\n if framework == \"tf\":\n signature = inspect.signature(model_class.call) # TensorFlow models\n elif framework == \"pt\":\n signature = inspect.signature(model_class.forward) # PyTorch models\n else:\n signature = inspect.signature(model_class.__call__) # Flax models\n\n if \"QuestionAnswering\" in model_name:\n return [p for p in signature.parameters if \"label\" in p or p in (\"start_positions\", \"end_positions\")]\n else:\n return [p for p in signature.parameters if \"label\" in p]" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.generic.infer_framework", "transformers.utils.generic.find_labels" ]
Python
1
2
{ "total_num": 5, "base_passed_num": 4 }
[ "transformers.src.transformers.utils.logging._configure_library_root_logger", "transformers.src.transformers.utils.logging.get_logger", "transformers.src.transformers.utils.logging.get_verbosity", "transformers.src.transformers.utils.logging.set_verbosity" ]
transformers
[ "transformers/utils/logging.py", "transformers/utils/logging.py", "transformers/utils/logging.py", "transformers/utils/logging.py", "transformers/utils/logging.py" ]
[ "tests/utils/test_logging.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 81, "func_end_lineno": 104, "func_code": "def _configure_library_root_logger() -> None:\n global _default_handler\n\n with _lock:\n if _default_handler:\n # This library has already configured the library root logger.\n return\n _default_handler = logging.StreamHandler() # Set sys.stderr as stream.\n # set defaults based on https://github.com/pyinstaller/pyinstaller/issues/7334#issuecomment-1357447176\n if sys.stderr is None:\n sys.stderr = open(os.devnull, \"w\")\n\n _default_handler.flush = sys.stderr.flush\n\n # Apply our default configuration to the library root logger.\n library_root_logger = _get_library_root_logger()\n library_root_logger.addHandler(_default_handler)\n library_root_logger.setLevel(_get_default_logging_level())\n # if logging level is debug, we add pathname and lineno to formatter for easy debugging\n if os.getenv(\"TRANSFORMERS_VERBOSITY\", None) == \"detail\":\n formatter = logging.Formatter(\"[%(levelname)s|%(pathname)s:%(lineno)s] %(asctime)s >> %(message)s\")\n _default_handler.setFormatter(formatter)\n\n library_root_logger.propagate = False" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 147, "func_end_lineno": 158, "func_code": "def get_logger(name: Optional[str] = None) -> logging.Logger:\n \"\"\"\n Return a logger with the specified name.\n\n This function is not supposed to be directly accessed unless you are writing a custom transformers module.\n \"\"\"\n\n if name is None:\n name = _get_library_name()\n\n _configure_library_root_logger()\n return logging.getLogger(name)" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 161, "func_end_lineno": 181, "func_code": "def get_verbosity() -> int:\n \"\"\"\n Return the current level for the 🤗 Transformers's root logger as an int.\n\n Returns:\n `int`: The logging level.\n\n <Tip>\n\n 🤗 Transformers has following logging levels:\n\n - 50: `transformers.logging.CRITICAL` or `transformers.logging.FATAL`\n - 40: `transformers.logging.ERROR`\n - 30: `transformers.logging.WARNING` or `transformers.logging.WARN`\n - 20: `transformers.logging.INFO`\n - 10: `transformers.logging.DEBUG`\n\n </Tip>\"\"\"\n\n _configure_library_root_logger()\n return _get_library_root_logger().getEffectiveLevel()" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 184, "func_end_lineno": 200, "func_code": "def set_verbosity(verbosity: int) -> None:\n \"\"\"\n Set the verbosity level for the 🤗 Transformers's root logger.\n\n Args:\n verbosity (`int`):\n Logging level, e.g., one of:\n\n - `transformers.logging.CRITICAL` or `transformers.logging.FATAL`\n - `transformers.logging.ERROR`\n - `transformers.logging.WARNING` or `transformers.logging.WARN`\n - `transformers.logging.INFO`\n - `transformers.logging.DEBUG`\n \"\"\"\n\n _configure_library_root_logger()\n _get_library_root_logger().setLevel(verbosity)" }, { "class_start_lineno": 1, "class_end_lineno": 410, "func_start_lineno": 218, "func_end_lineno": 220, "func_code": "def set_verbosity_error():\n \"\"\"Set the verbosity to the `ERROR` level.\"\"\"\n return set_verbosity(ERROR)" } ]
[ "function_empty", "TDD" ]
[ "transformers.utils.logging._configure_library_root_logger", "transformers.utils.logging.get_logger", "transformers.utils.logging.get_verbosity", "transformers.utils.logging.set_verbosity", "transformers.utils.logging.set_verbosity_error" ]
Python
3
4
{ "total_num": 6, "base_passed_num": 0 }
[ "transformers.src.transformers.modeling_rope_utils._compute_default_rope_parameters", "transformers.src.transformers.modeling_rope_utils._compute_linear_scaling_rope_parameters", "transformers.src.transformers.modeling_rope_utils._compute_llama3_parameters", "transformers.src.transformers.modeling_rope_utils._check_received_keys", "transformers.src.transformers.modeling_rope_utils.rope_config_validation" ]
transformers
[ "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py" ]
[ "tests/utils/test_modeling_rope_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 29, "func_end_lineno": 68, "func_code": "def _compute_default_rope_parameters(\n config: Optional[PretrainedConfig] = None,\n device: Optional[\"torch.device\"] = None,\n seq_len: Optional[int] = None,\n **rope_kwargs,\n) -> Tuple[\"torch.Tensor\", float]:\n \"\"\"\n Computes the inverse frequencies according to the original RoPE implementation\n Args:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\n Returns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).\n \"\"\"\n if config is not None and len(rope_kwargs) > 0:\n raise ValueError(\n \"Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in \"\n f\"`_compute_default_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}\"\n )\n if len(rope_kwargs) > 0:\n base = rope_kwargs[\"base\"]\n dim = rope_kwargs[\"dim\"]\n elif config is not None:\n base = config.rope_theta\n partial_rotary_factor = config.partial_rotary_factor if hasattr(config, \"partial_rotary_factor\") else 1.0\n head_dim = getattr(config, \"head_dim\", config.hidden_size // config.num_attention_heads)\n dim = int(head_dim * partial_rotary_factor)\n\n attention_factor = 1.0 # Unused in this type of RoPE\n\n # Compute the inverse frequencies\n inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim))\n return inv_freq, attention_factor" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 71, "func_end_lineno": 109, "func_code": "def _compute_linear_scaling_rope_parameters(\n config: Optional[PretrainedConfig] = None,\n device: Optional[\"torch.device\"] = None,\n seq_len: Optional[int] = None,\n **rope_kwargs,\n) -> Tuple[\"torch.Tensor\", float]:\n \"\"\"\n Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev\n Args:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\n Returns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).\n \"\"\"\n if config is not None and len(rope_kwargs) > 0:\n raise ValueError(\n \"Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in \"\n f\"`_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}\"\n )\n if len(rope_kwargs) > 0:\n factor = rope_kwargs[\"factor\"]\n elif config is not None:\n factor = config.rope_scaling[\"factor\"]\n\n # Gets the default RoPE parameters\n inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n\n # Then applies linear scaling to the frequencies.\n # NOTE: originally, scaling was applied to the position_ids. However, we get `embs = inv_freq @ position_ids`, so\n # applying scaling to the inverse frequencies is equivalent.\n inv_freq /= factor\n return inv_freq, attention_factor" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 307, "func_end_lineno": 347, "func_code": "def _compute_llama3_parameters(\n config: PretrainedConfig, device: \"torch.device\", seq_len: Optional[int] = None, **rope_kwargs\n) -> Tuple[\"torch.Tensor\", float]:\n \"\"\"\n Computes the inverse frequencies for llama 3.1.\n\n Args:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\n Returns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin.\n \"\"\"\n # Gets the default RoPE parameters\n inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n\n factor = config.rope_scaling[\"factor\"] # `8` in the original implementation\n low_freq_factor = config.rope_scaling[\"low_freq_factor\"] # `1` in the original implementation\n high_freq_factor = config.rope_scaling[\"high_freq_factor\"] # `4` in the original implementation\n old_context_len = config.rope_scaling[\"original_max_position_embeddings\"] # `8192` in the original implementation\n\n low_freq_wavelen = old_context_len / low_freq_factor\n high_freq_wavelen = old_context_len / high_freq_factor\n\n wavelen = 2 * math.pi / inv_freq\n # wavelen < high_freq_wavelen: do nothing\n # wavelen > low_freq_wavelen: divide by factor\n inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq)\n # otherwise: interpolate between the two, using a smooth factor\n smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)\n smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama\n is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)\n inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)\n\n return inv_freq_llama, attention_factor" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 363, "func_end_lineno": 379, "func_code": "def _check_received_keys(rope_type: str, received_keys: set, required_keys: set, optional_keys: Optional[set] = None):\n \"\"\"Compare the received keys in `config.rope_scaling` against the expected and optional keys\"\"\"\n # BC: \"rope_type\" was originally \"type\" -- let's check for \"rope_type\" when \"type\" is present\n if \"type\" in received_keys:\n received_keys -= {\"type\"}\n required_keys.add(\"rope_type\")\n\n missing_keys = required_keys - received_keys\n if missing_keys:\n raise KeyError(f\"Missing required keys in `rope_scaling` for 'rope_type'='{rope_type}': {missing_keys}\")\n\n if optional_keys is not None:\n unused_keys = received_keys - required_keys - optional_keys\n else:\n unused_keys = received_keys - required_keys\n if unused_keys:\n logger.warning(f\"Unrecognized keys in `rope_scaling` for 'rope_type'='{rope_type}': {unused_keys}\")" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 447, "func_end_lineno": 494, "func_code": "def _validate_longrope_parameters(config: PretrainedConfig):\n rope_scaling = config.rope_scaling\n rope_type = rope_scaling.get(\"rope_type\", rope_scaling.get(\"type\", None)) # BC: \"rope_type\" was originally \"type\"\n required_keys = {\"rope_type\", \"short_factor\", \"long_factor\"}\n # TODO (joao): update logic for the inclusion of `original_max_position_embeddings`\n optional_keys = {\"attention_factor\", \"factor\", \"original_max_position_embeddings\"}\n received_keys = set(rope_scaling.keys())\n _check_received_keys(rope_type, received_keys, required_keys, optional_keys)\n\n partial_rotary_factor = config.partial_rotary_factor if hasattr(config, \"partial_rotary_factor\") else 1.0\n head_dim = getattr(config, \"head_dim\", config.hidden_size // config.num_attention_heads)\n dim = int(head_dim * partial_rotary_factor)\n\n short_factor = rope_scaling.get(\"short_factor\")\n if not isinstance(short_factor, list) and all(isinstance(x, (int, float)) for x in short_factor):\n logger.warning(f\"`rope_scaling`'s short_factor field must be a list of numbers, got {short_factor}\")\n if not len(short_factor) == dim // 2:\n logger.warning(f\"`rope_scaling`'s short_factor field must have length {dim // 2}, got {len(short_factor)}\")\n\n long_factor = rope_scaling.get(\"long_factor\")\n if not isinstance(long_factor, list) and all(isinstance(x, (int, float)) for x in long_factor):\n logger.warning(f\"`rope_scaling`'s long_factor field must be a list of numbers, got {long_factor}\")\n if not len(long_factor) == dim // 2:\n logger.warning(f\"`rope_scaling`'s long_factor field must have length {dim // 2}, got {len(long_factor)}\")\n\n # Handle Phi3 divergence: prefer the use of `attention_factor` and/or `factor` over\n # `original_max_position_embeddings` to compute internal variables. The latter lives outside `rope_scaling` and is\n # unique to longrope (= undesirable)\n if hasattr(config, \"original_max_position_embeddings\"):\n logger.warning_once(\n \"This model has set a `original_max_position_embeddings` field, to be used together with \"\n \"`max_position_embeddings` to determine a scaling factor. Please set the `factor` field of `rope_scaling`\"\n \"with this ratio instead -- we recommend the use of this field over `original_max_position_embeddings`, \"\n \"as it is compatible with most model architectures.\"\n )\n else:\n factor = rope_scaling.get(\"factor\")\n if factor is None:\n logger.warning(\"Missing required keys in `rope_scaling`: 'factor'\")\n elif not isinstance(factor, float) or factor < 1.0:\n logger.warning(f\"`rope_scaling`'s factor field must be a float >= 1, got {factor}\")\n\n attention_factor = rope_scaling.get(\"attention_factor\")\n if attention_factor is not None:\n if not isinstance(attention_factor, float) or attention_factor < 0.0:\n logger.warning(\n f\"`rope_scaling`'s attention_factor field must be a float greater than 0, got {attention_factor}\"\n )" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 544, "func_end_lineno": 560, "func_code": "def rope_config_validation(config: PretrainedConfig):\n \"\"\"\n Validate the RoPE config arguments, given a `PretrainedConfig` object\n \"\"\"\n rope_scaling = getattr(config, \"rope_scaling\", None) # not a default parameter in `PretrainedConfig`\n if rope_scaling is None:\n return\n\n # BC: \"rope_type\" was originally \"type\"\n rope_type = rope_scaling.get(\"rope_type\", rope_scaling.get(\"type\", \"default\"))\n validation_fn = ROPE_VALIDATION_FUNCTIONS.get(rope_type)\n if validation_fn is not None:\n validation_fn(config)\n else:\n logger.warning(\n f\"Missing validation function mapping in `ROPE_VALIDATION_FUNCTIONS` for 'rope_type'='{rope_type}'\"\n )" } ]
[ "function_empty", "TDD" ]
[ "transformers.modeling_rope_utils._compute_default_rope_parameters", "transformers.modeling_rope_utils._compute_linear_scaling_rope_parameters", "transformers.modeling_rope_utils._compute_llama3_parameters", "transformers.modeling_rope_utils._check_received_keys", "transformers.modeling_rope_utils._validate_longrope_parameters", "transformers.modeling_rope_utils.rope_config_validation" ]
Python
4
5
{ "total_num": 10, "base_passed_num": 1 }
[ "langchain_core.libs.core.langchain_core.load.dump.dumps", "langchain_core.libs.core.langchain_core.load.dump.default", "langchain_core.libs.core.langchain_core.load.dump.dumpd" ]
langchain_core
[ "langchain_core/load/dump.py", "langchain_core/load/dump.py", "langchain_core/load/dump.py" ]
[ "libs/core/tests/unit_tests/load/test_serializable.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 23, "func_end_lineno": 53, "func_code": "def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:\n \"\"\"Return a json string representation of an object.\n\n Args:\n obj: The object to dump.\n pretty: Whether to pretty print the json. If true, the json will be\n indented with 2 spaces (if no indent is provided as part of kwargs).\n Default is False.\n kwargs: Additional arguments to pass to json.dumps\n\n Returns:\n A json string representation of the object.\n\n Raises:\n ValueError: If `default` is passed as a kwarg.\n \"\"\"\n if \"default\" in kwargs:\n msg = \"`default` should not be passed to dumps\"\n raise ValueError(msg)\n try:\n if pretty:\n indent = kwargs.pop(\"indent\", 2)\n return json.dumps(obj, default=default, indent=indent, **kwargs)\n else:\n return json.dumps(obj, default=default, **kwargs)\n except TypeError:\n if pretty:\n indent = kwargs.pop(\"indent\", 2)\n return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)\n else:\n return json.dumps(to_json_not_implemented(obj), **kwargs)" }, { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 7, "func_end_lineno": 20, "func_code": "def default(obj: Any) -> Any:\n \"\"\"Return a default value for a Serializable object or\n a SerializedNotImplemented object.\n\n Args:\n obj: The object to serialize to json if it is a Serializable object.\n\n Returns:\n A json serializable object or a SerializedNotImplemented object.\n \"\"\"\n if isinstance(obj, Serializable):\n return obj.to_json()\n else:\n return to_json_not_implemented(obj)" }, { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 56, "func_end_lineno": 70, "func_code": "def dumpd(obj: Any) -> Any:\n \"\"\"Return a dict representation of an object.\n\n Note:\n Unfortunately this function is not as efficient as it could be\n because it first dumps the object to a json string and then loads it\n back into a dictionary.\n\n Args:\n obj: The object to dump.\n\n Returns:\n dictionary that can be serialized to json using json.dumps\n \"\"\"\n return json.loads(dumps(obj))" } ]
[ "function_empty" ]
[ "langchain_core.load.dump.dumps", "langchain_core.load.dump.default", "langchain_core.load.dump.dumpd" ]
Python
3
3
{ "total_num": 8, "base_passed_num": 3 }
[ "langchain_core.libs.core.langchain_core.load.dump.dumps", "langchain_core.libs.core.langchain_core.load.dump.default", "langchain_core.libs.core.langchain_core.load.dump.dumpd", "langchain_core.libs.core.langchain_core.utils.json.parse_partial_json", "langchain_core.libs.core.langchain_core.messages.ai.AIMessageChunk::init_tool_calls" ]
langchain_core
[ "langchain_core/load/dump.py", "langchain_core/load/dump.py", "langchain_core/load/dump.py", "langchain_core/utils/json.py", "langchain_core/messages/ai.py" ]
[ "libs/core/tests/unit_tests/messages/test_ai.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 23, "func_end_lineno": 53, "func_code": "def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:\n \"\"\"Return a json string representation of an object.\n\n Args:\n obj: The object to dump.\n pretty: Whether to pretty print the json. If true, the json will be\n indented with 2 spaces (if no indent is provided as part of kwargs).\n Default is False.\n kwargs: Additional arguments to pass to json.dumps\n\n Returns:\n A json string representation of the object.\n\n Raises:\n ValueError: If `default` is passed as a kwarg.\n \"\"\"\n if \"default\" in kwargs:\n msg = \"`default` should not be passed to dumps\"\n raise ValueError(msg)\n try:\n if pretty:\n indent = kwargs.pop(\"indent\", 2)\n return json.dumps(obj, default=default, indent=indent, **kwargs)\n else:\n return json.dumps(obj, default=default, **kwargs)\n except TypeError:\n if pretty:\n indent = kwargs.pop(\"indent\", 2)\n return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)\n else:\n return json.dumps(to_json_not_implemented(obj), **kwargs)" }, { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 7, "func_end_lineno": 20, "func_code": "def default(obj: Any) -> Any:\n \"\"\"Return a default value for a Serializable object or\n a SerializedNotImplemented object.\n\n Args:\n obj: The object to serialize to json if it is a Serializable object.\n\n Returns:\n A json serializable object or a SerializedNotImplemented object.\n \"\"\"\n if isinstance(obj, Serializable):\n return obj.to_json()\n else:\n return to_json_not_implemented(obj)" }, { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 56, "func_end_lineno": 70, "func_code": "def dumpd(obj: Any) -> Any:\n \"\"\"Return a dict representation of an object.\n\n Note:\n Unfortunately this function is not as efficient as it could be\n because it first dumps the object to a json string and then loads it\n back into a dictionary.\n\n Args:\n obj: The object to dump.\n\n Returns:\n dictionary that can be serialized to json using json.dumps\n \"\"\"\n return json.loads(dumps(obj))" }, { "class_start_lineno": 1, "class_end_lineno": 191, "func_start_lineno": 43, "func_end_lineno": 119, "func_code": "def parse_partial_json(s: str, *, strict: bool = False) -> Any:\n \"\"\"Parse a JSON string that may be missing closing braces.\n\n Args:\n s: The JSON string to parse.\n strict: Whether to use strict parsing. Defaults to False.\n\n Returns:\n The parsed JSON object as a Python dictionary.\n \"\"\"\n # Attempt to parse the string as-is.\n try:\n return json.loads(s, strict=strict)\n except json.JSONDecodeError:\n pass\n\n # Initialize variables.\n new_chars = []\n stack = []\n is_inside_string = False\n escaped = False\n\n # Process each character in the string one at a time.\n for char in s:\n if is_inside_string:\n if char == '\"' and not escaped:\n is_inside_string = False\n elif char == \"\\n\" and not escaped:\n char = \"\\\\n\" # Replace the newline character with the escape sequence.\n elif char == \"\\\\\":\n escaped = not escaped\n else:\n escaped = False\n else:\n if char == '\"':\n is_inside_string = True\n escaped = False\n elif char == \"{\":\n stack.append(\"}\")\n elif char == \"[\":\n stack.append(\"]\")\n elif char == \"}\" or char == \"]\":\n if stack and stack[-1] == char:\n stack.pop()\n else:\n # Mismatched closing character; the input is malformed.\n return None\n\n # Append the processed character to the new string.\n new_chars.append(char)\n\n # If we're still inside a string at the end of processing,\n # we need to close the string.\n if is_inside_string:\n if escaped: # Remoe unterminated escape character\n new_chars.pop()\n new_chars.append('\"')\n\n # Reverse the stack to get the closing characters.\n stack.reverse()\n\n # Try to parse mods of string until we succeed or run out of characters.\n while new_chars:\n # Close any remaining open structures in the reverse\n # order that they were opened.\n # Attempt to parse the modified string as JSON.\n try:\n return json.loads(\"\".join(new_chars + stack), strict=strict)\n except json.JSONDecodeError:\n # If we still can't parse the string as JSON,\n # try removing the last character\n new_chars.pop()\n\n # If we got here, we ran out of characters to remove\n # and still couldn't parse the string as JSON, so return the parse error\n # for the original string.\n return json.loads(s, strict=strict)" }, { "class_start_lineno": 296, "class_end_lineno": 403, "func_start_lineno": 328, "func_end_lineno": 394, "func_code": " def init_tool_calls(self) -> Self:\n \"\"\"Initialize tool calls from tool call chunks.\n\n Args:\n values: The values to validate.\n\n Returns:\n The values with tool calls initialized.\n\n Raises:\n ValueError: If the tool call chunks are malformed.\n \"\"\"\n if not self.tool_call_chunks:\n if self.tool_calls:\n self.tool_call_chunks = [\n create_tool_call_chunk(\n name=tc[\"name\"],\n args=json.dumps(tc[\"args\"]),\n id=tc[\"id\"],\n index=None,\n )\n for tc in self.tool_calls\n ]\n if self.invalid_tool_calls:\n tool_call_chunks = self.tool_call_chunks\n tool_call_chunks.extend(\n [\n create_tool_call_chunk(\n name=tc[\"name\"], args=tc[\"args\"], id=tc[\"id\"], index=None\n )\n for tc in self.invalid_tool_calls\n ]\n )\n self.tool_call_chunks = tool_call_chunks\n\n return self\n tool_calls = []\n invalid_tool_calls = []\n\n def add_chunk_to_invalid_tool_calls(chunk: ToolCallChunk) -> None:\n invalid_tool_calls.append(\n create_invalid_tool_call(\n name=chunk[\"name\"],\n args=chunk[\"args\"],\n id=chunk[\"id\"],\n error=None,\n )\n )\n\n for chunk in self.tool_call_chunks:\n try:\n args_ = parse_partial_json(chunk[\"args\"]) if chunk[\"args\"] != \"\" else {} # type: ignore[arg-type]\n if isinstance(args_, dict):\n tool_calls.append(\n create_tool_call(\n name=chunk[\"name\"] or \"\",\n args=args_,\n id=chunk[\"id\"],\n )\n )\n else:\n add_chunk_to_invalid_tool_calls(chunk)\n except Exception:\n add_chunk_to_invalid_tool_calls(chunk)\n self.tool_calls = tool_calls\n self.invalid_tool_calls = invalid_tool_calls\n return self" } ]
[ "function_empty" ]
[ "langchain_core.load.dump.dumps", "langchain_core.load.dump.default", "langchain_core.load.dump.dumpd", "langchain_core.utils.json.parse_partial_json", "langchain_core.messages.ai.AIMessageChunk.init_tool_calls" ]
Python
5
5
{ "total_num": 11, "base_passed_num": 9 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.utils.json.parse_json_markdown", "langchain_core.libs.core.langchain_core.output_parsers.json.JsonOutputParser::parse_result" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/utils/json.py", "langchain_core/output_parsers/json.py" ]
[ "libs/core/tests/unit_tests/output_parsers/test_json.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 1, "class_end_lineno": 191, "func_start_lineno": 125, "func_end_lineno": 145, "func_code": "def parse_json_markdown(\n json_string: str, *, parser: Callable[[str], Any] = parse_partial_json\n) -> dict:\n \"\"\"Parse a JSON string from a Markdown string.\n\n Args:\n json_string: The Markdown string.\n\n Returns:\n The parsed JSON object as a Python dictionary.\n \"\"\"\n try:\n return _parse_json(json_string, parser=parser)\n except json.JSONDecodeError:\n # Try to find JSON string within triple backticks\n match = _json_markdown_re.search(json_string)\n\n # If no match found, assume the entire string is a JSON string\n # Else, use the content within the backticks\n json_str = json_string if match is None else match.group(2)\n return _parse_json(json_str, parser=parser)" }, { "class_start_lineno": 34, "class_end_lineno": 123, "func_start_lineno": 57, "func_end_lineno": 86, "func_code": " def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:\n \"\"\"Parse the result of an LLM call to a JSON object.\n\n Args:\n result: The result of the LLM call.\n partial: Whether to parse partial JSON objects.\n If True, the output will be a JSON object containing\n all the keys that have been returned so far.\n If False, the output will be the full JSON object.\n Default is False.\n\n Returns:\n The parsed JSON object.\n\n Raises:\n OutputParserException: If the output is not valid JSON.\n \"\"\"\n text = result[0].text\n text = text.strip()\n if partial:\n try:\n return parse_json_markdown(text)\n except JSONDecodeError:\n return None\n else:\n try:\n return parse_json_markdown(text)\n except JSONDecodeError as e:\n msg = f\"Invalid json output: {text}\"\n raise OutputParserException(msg, llm_output=text) from e" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config", "langchain_core.utils.json.parse_json_markdown", "langchain_core.output_parsers.json.JsonOutputParser.parse_result" ]
Python
4
4
{ "total_num": 36, "base_passed_num": 11 }
[ "langchain_core.libs.core.langchain_core.utils.json.parse_partial_json", "langchain_core.libs.core.langchain_core.messages.ai.AIMessageChunk::init_tool_calls", "langchain_core.libs.core.langchain_core.utils._merge.merge_lists", "langchain_core.libs.core.langchain_core.utils._merge.merge_dicts" ]
langchain_core
[ "langchain_core/utils/json.py", "langchain_core/messages/ai.py", "langchain_core/utils/_merge.py", "langchain_core/utils/_merge.py", "langchain_core/runnables/base.py" ]
[ "libs/core/tests/unit_tests/output_parsers/test_openai_tools.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 191, "func_start_lineno": 43, "func_end_lineno": 119, "func_code": "def parse_partial_json(s: str, *, strict: bool = False) -> Any:\n \"\"\"Parse a JSON string that may be missing closing braces.\n\n Args:\n s: The JSON string to parse.\n strict: Whether to use strict parsing. Defaults to False.\n\n Returns:\n The parsed JSON object as a Python dictionary.\n \"\"\"\n # Attempt to parse the string as-is.\n try:\n return json.loads(s, strict=strict)\n except json.JSONDecodeError:\n pass\n\n # Initialize variables.\n new_chars = []\n stack = []\n is_inside_string = False\n escaped = False\n\n # Process each character in the string one at a time.\n for char in s:\n if is_inside_string:\n if char == '\"' and not escaped:\n is_inside_string = False\n elif char == \"\\n\" and not escaped:\n char = \"\\\\n\" # Replace the newline character with the escape sequence.\n elif char == \"\\\\\":\n escaped = not escaped\n else:\n escaped = False\n else:\n if char == '\"':\n is_inside_string = True\n escaped = False\n elif char == \"{\":\n stack.append(\"}\")\n elif char == \"[\":\n stack.append(\"]\")\n elif char == \"}\" or char == \"]\":\n if stack and stack[-1] == char:\n stack.pop()\n else:\n # Mismatched closing character; the input is malformed.\n return None\n\n # Append the processed character to the new string.\n new_chars.append(char)\n\n # If we're still inside a string at the end of processing,\n # we need to close the string.\n if is_inside_string:\n if escaped: # Remoe unterminated escape character\n new_chars.pop()\n new_chars.append('\"')\n\n # Reverse the stack to get the closing characters.\n stack.reverse()\n\n # Try to parse mods of string until we succeed or run out of characters.\n while new_chars:\n # Close any remaining open structures in the reverse\n # order that they were opened.\n # Attempt to parse the modified string as JSON.\n try:\n return json.loads(\"\".join(new_chars + stack), strict=strict)\n except json.JSONDecodeError:\n # If we still can't parse the string as JSON,\n # try removing the last character\n new_chars.pop()\n\n # If we got here, we ran out of characters to remove\n # and still couldn't parse the string as JSON, so return the parse error\n # for the original string.\n return json.loads(s, strict=strict)" }, { "class_start_lineno": 296, "class_end_lineno": 403, "func_start_lineno": 328, "func_end_lineno": 394, "func_code": " def init_tool_calls(self) -> Self:\n \"\"\"Initialize tool calls from tool call chunks.\n\n Args:\n values: The values to validate.\n\n Returns:\n The values with tool calls initialized.\n\n Raises:\n ValueError: If the tool call chunks are malformed.\n \"\"\"\n if not self.tool_call_chunks:\n if self.tool_calls:\n self.tool_call_chunks = [\n create_tool_call_chunk(\n name=tc[\"name\"],\n args=json.dumps(tc[\"args\"]),\n id=tc[\"id\"],\n index=None,\n )\n for tc in self.tool_calls\n ]\n if self.invalid_tool_calls:\n tool_call_chunks = self.tool_call_chunks\n tool_call_chunks.extend(\n [\n create_tool_call_chunk(\n name=tc[\"name\"], args=tc[\"args\"], id=tc[\"id\"], index=None\n )\n for tc in self.invalid_tool_calls\n ]\n )\n self.tool_call_chunks = tool_call_chunks\n\n return self\n tool_calls = []\n invalid_tool_calls = []\n\n def add_chunk_to_invalid_tool_calls(chunk: ToolCallChunk) -> None:\n invalid_tool_calls.append(\n create_invalid_tool_call(\n name=chunk[\"name\"],\n args=chunk[\"args\"],\n id=chunk[\"id\"],\n error=None,\n )\n )\n\n for chunk in self.tool_call_chunks:\n try:\n args_ = parse_partial_json(chunk[\"args\"]) if chunk[\"args\"] != \"\" else {} # type: ignore[arg-type]\n if isinstance(args_, dict):\n tool_calls.append(\n create_tool_call(\n name=chunk[\"name\"] or \"\",\n args=args_,\n id=chunk[\"id\"],\n )\n )\n else:\n add_chunk_to_invalid_tool_calls(chunk)\n except Exception:\n add_chunk_to_invalid_tool_calls(chunk)\n self.tool_calls = tool_calls\n self.invalid_tool_calls = invalid_tool_calls\n return self" }, { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 72, "func_end_lineno": 106, "func_code": "def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]:\n \"\"\"Add many lists, handling None.\n\n Args:\n left: The first list to merge.\n others: The other lists to merge.\n\n Returns:\n The merged list.\n \"\"\"\n merged = left.copy() if left is not None else None\n for other in others:\n if other is None:\n continue\n elif merged is None:\n merged = other.copy()\n else:\n for e in other:\n if isinstance(e, dict) and \"index\" in e and isinstance(e[\"index\"], int):\n to_merge = [\n i\n for i, e_left in enumerate(merged)\n if e_left[\"index\"] == e[\"index\"]\n ]\n if to_merge:\n # TODO: Remove this once merge_dict is updated with special\n # handling for 'type'.\n if \"type\" in e:\n e = {k: v for k, v in e.items() if k != \"type\"}\n merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)\n else:\n merged.append(e)\n else:\n merged.append(e)\n return merged" }, { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 6, "func_end_lineno": 69, "func_code": "def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Merge many dicts, handling specific scenarios where a key exists in both\n dictionaries but has a value of None in 'left'. In such cases, the method uses the\n value from 'right' for that key in the merged dictionary.\n\n Args:\n left: The first dictionary to merge.\n others: The other dictionaries to merge.\n\n Returns:\n The merged dictionary.\n\n Raises:\n TypeError: If the key exists in both dictionaries but has a different type.\n TypeError: If the value has an unsupported type.\n\n Example:\n If left = {\"function_call\": {\"arguments\": None}} and\n right = {\"function_call\": {\"arguments\": \"{\\n\"}}\n then, after merging, for the key \"function_call\",\n the value from 'right' is used,\n resulting in merged = {\"function_call\": {\"arguments\": \"{\\n\"}}.\n \"\"\"\n merged = left.copy()\n for right in others:\n for right_k, right_v in right.items():\n if right_k not in merged or right_v is not None and merged[right_k] is None:\n merged[right_k] = right_v\n elif right_v is None:\n continue\n elif type(merged[right_k]) is not type(right_v):\n msg = (\n f'additional_kwargs[\"{right_k}\"] already exists in this message,'\n \" but with a different type.\"\n )\n raise TypeError(msg)\n elif isinstance(merged[right_k], str):\n # TODO: Add below special handling for 'type' key in 0.3 and remove\n # merge_lists 'type' logic.\n #\n # if right_k == \"type\":\n # if merged[right_k] == right_v:\n # continue\n # else:\n # raise ValueError(\n # \"Unable to merge. Two different values seen for special \"\n # f\"key 'type': {merged[right_k]} and {right_v}. 'type' \"\n # \"should either occur once or have the same value across \"\n # \"all dicts.\"\n # )\n merged[right_k] += right_v\n elif isinstance(merged[right_k], dict):\n merged[right_k] = merge_dicts(merged[right_k], right_v)\n elif isinstance(merged[right_k], list):\n merged[right_k] = merge_lists(merged[right_k], right_v)\n elif merged[right_k] == right_v:\n continue\n else:\n msg = (\n f\"Additional kwargs key {right_k} already exists in left dict and \"\n f\"value has unsupported type {type(merged[right_k])}.\"\n )\n raise TypeError(msg)\n return merged" }, { "class_start_lineno": 2659, "class_end_lineno": 3435, "func_start_lineno": 3403, "func_end_lineno": 3409, "func_code": " def stream(\n self,\n input: Input,\n config: Optional[RunnableConfig] = None,\n **kwargs: Optional[Any],\n ) -> Iterator[Output]:\n yield from self.transform(iter([input]), config, **kwargs)" } ]
[ "function_empty" ]
[ "langchain_core.utils.json.parse_partial_json", "langchain_core.messages.ai.AIMessageChunk.init_tool_calls", "langchain_core.utils._merge.merge_lists", "langchain_core.utils._merge.merge_dicts", "langchain_core.runnables.base.RunnableSequence.stream" ]
Python
4
4
{ "total_num": 11, "base_passed_num": 2 }
[ "langchain_core.libs.core.langchain_core.utils.formatting.StrictFormatter::validate_input_variables", "langchain_core.libs.core.langchain_core.prompts.string.check_valid_template" ]
langchain_core
[ "langchain_core/utils/formatting.py", "langchain_core/prompts/string.py", "langchain_core/prompts/few_shot.py" ]
[ "libs/core/tests/unit_tests/prompts/test_few_shot.py" ]
[ { "class_start_lineno": 8, "class_end_lineno": 48, "func_start_lineno": 35, "func_end_lineno": 48, "func_code": " def validate_input_variables(\n self, format_string: str, input_variables: list[str]\n ) -> None:\n \"\"\"Check that all input variables are used in the format string.\n\n Args:\n format_string: The format string.\n input_variables: The input variables.\n\n Raises:\n ValueError: If any input variables are not used in the format string.\n \"\"\"\n dummy_inputs = dict.fromkeys(input_variables, \"foo\")\n super().format(format_string, **dummy_inputs)" }, { "class_start_lineno": 1, "class_end_lineno": 319, "func_start_lineno": 207, "func_end_lineno": 236, "func_code": "def check_valid_template(\n template: str, template_format: str, input_variables: list[str]\n) -> None:\n \"\"\"Check that template string is valid.\n\n Args:\n template: The template string.\n template_format: The template format. Should be one of \"f-string\" or \"jinja2\".\n input_variables: The input variables.\n\n Raises:\n ValueError: If the template format is not supported.\n ValueError: If the prompt schema is invalid.\n \"\"\"\n try:\n validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]\n except KeyError as exc:\n msg = (\n f\"Invalid template format {template_format!r}, should be one of\"\n f\" {list(DEFAULT_FORMATTER_MAPPING)}.\"\n )\n raise ValueError(msg) from exc\n try:\n validator_func(template, input_variables)\n except (KeyError, IndexError) as exc:\n msg = (\n \"Invalid prompt schema; check for mismatched or missing input parameters\"\n f\" from {input_variables}.\"\n )\n raise ValueError(msg) from exc" }, { "class_start_lineno": 115, "class_end_lineno": 244, "func_start_lineno": 148, "func_end_lineno": 164, "func_code": " def template_is_valid(self) -> Self:\n \"\"\"Check that prefix, suffix, and input variables are consistent.\"\"\"\n if self.validate_template:\n check_valid_template(\n self.prefix + self.suffix,\n self.template_format,\n self.input_variables + list(self.partial_variables),\n )\n elif self.template_format or None:\n self.input_variables = [\n var\n for var in get_template_variables(\n self.prefix + self.suffix, self.template_format\n )\n if var not in self.partial_variables\n ]\n return self" } ]
[ "function_empty" ]
[ "langchain_core.utils.formatting.StrictFormatter.validate_input_variables", "langchain_core.prompts.string.check_valid_template", "langchain_core.prompts.few_shot.FewShotPromptTemplate.template_is_valid" ]
Python
2
2
{ "total_num": 16, "base_passed_num": 13 }
[ "langchain_core.libs.core.langchain_core.prompts.loading.load_prompt_from_config", "langchain_core.libs.core.langchain_core.prompts.loading.load_prompt" ]
langchain_core
[ "langchain_core/prompts/loading.py", "langchain_core/prompts/loading.py", "langchain_core/prompts/loading.py" ]
[ "libs/core/tests/unit_tests/prompts/test_loading.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 20, "func_end_lineno": 41, "func_code": "def load_prompt_from_config(config: dict) -> BasePromptTemplate:\n \"\"\"Load prompt from Config Dict.\n\n Args:\n config: Dict containing the prompt configuration.\n\n Returns:\n A PromptTemplate object.\n\n Raises:\n ValueError: If the prompt type is not supported.\n \"\"\"\n if \"_type\" not in config:\n logger.warning(\"No `_type` key found, defaulting to `prompt`.\")\n config_type = config.pop(\"_type\", \"prompt\")\n\n if config_type not in type_to_loader_dict:\n msg = f\"Loading {config_type} prompt not supported\"\n raise ValueError(msg)\n\n prompt_loader = type_to_loader_dict[config_type]\n return prompt_loader(config)" }, { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 141, "func_end_lineno": 163, "func_code": "def load_prompt(\n path: Union[str, Path], encoding: Optional[str] = None\n) -> BasePromptTemplate:\n \"\"\"Unified method for loading a prompt from LangChainHub or local fs.\n\n Args:\n path: Path to the prompt file.\n encoding: Encoding of the file. Defaults to None.\n\n Returns:\n A PromptTemplate object.\n\n Raises:\n RuntimeError: If the path is a Lang Chain Hub path.\n \"\"\"\n if isinstance(path, str) and path.startswith(\"lc://\"):\n msg = (\n \"Loading from the deprecated github-based Hub is no longer supported. \"\n \"Please use the new LangChain Hub at https://smith.langchain.com/hub \"\n \"instead.\"\n )\n raise RuntimeError(msg)\n return _load_prompt_from_file(path, encoding)" }, { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 99, "func_end_lineno": 118, "func_code": "def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:\n \"\"\"Load the \"few shot\" prompt from the config.\"\"\"\n # Load the suffix and prefix templates.\n config = _load_template(\"suffix\", config)\n config = _load_template(\"prefix\", config)\n # Load the example prompt.\n if \"example_prompt_path\" in config:\n if \"example_prompt\" in config:\n msg = (\n \"Only one of example_prompt and example_prompt_path should \"\n \"be specified.\"\n )\n raise ValueError(msg)\n config[\"example_prompt\"] = load_prompt(config.pop(\"example_prompt_path\"))\n else:\n config[\"example_prompt\"] = load_prompt_from_config(config[\"example_prompt\"])\n # Load the examples.\n config = _load_examples(config)\n config = _load_output_parser(config)\n return FewShotPromptTemplate(**config)" } ]
[ "function_empty" ]
[ "langchain_core.prompts.loading.load_prompt_from_config", "langchain_core.prompts.loading.load_prompt", "langchain_core.prompts.loading._load_few_shot_prompt" ]
Python
2
2
{ "total_num": 10, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.runnables.base.RunnableLambda::deps", "langchain_core.libs.core.langchain_core.beta.runnables.context.config_with_context", "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config" ]
langchain_core
[ "langchain_core/runnables/base.py", "langchain_core/runnables/base.py", "langchain_core/beta/runnables/context.py", "langchain_core/runnables/config.py", "langchain_core/runnables/config.py" ]
[ "libs/core/tests/unit_tests/prompts/test_structured.py" ]
[ { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4471, "func_end_lineno": 4491, "func_code": " def deps(self) -> list[Runnable]:\n \"\"\"The dependencies of this Runnable.\n\n Returns:\n The dependencies of this Runnable. If the function has nonlocal\n variables that are Runnables, they are considered dependencies.\n \"\"\"\n if hasattr(self, \"func\"):\n objects = get_function_nonlocals(self.func)\n elif hasattr(self, \"afunc\"):\n objects = get_function_nonlocals(self.afunc)\n else:\n objects = []\n\n deps: list[Runnable] = []\n for obj in objects:\n if isinstance(obj, Runnable):\n deps.append(obj)\n elif isinstance(getattr(obj, \"__self__\", None), Runnable):\n deps.append(obj.__self__)\n return deps" }, { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4494, "func_end_lineno": 4497, "func_code": " def config_specs(self) -> list[ConfigurableFieldSpec]:\n return get_unique_config_specs(\n spec for dep in self.deps for spec in dep.config_specs\n )" }, { "class_start_lineno": 1, "class_end_lineno": 401, "func_start_lineno": 140, "func_end_lineno": 153, "func_code": "def config_with_context(\n config: RunnableConfig,\n steps: list[Runnable],\n) -> RunnableConfig:\n \"\"\"Patch a runnable config with context getters and setters.\n\n Args:\n config: The runnable config.\n steps: The runnable steps.\n\n Returns:\n The patched runnable config.\n \"\"\"\n return _config_with_context(config, steps, _setter, _getter, threading.Event)" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" } ]
[ "function_empty" ]
[ "langchain_core.runnables.base.RunnableLambda.deps", "langchain_core.runnables.base.RunnableLambda.config_specs", "langchain_core.beta.runnables.context.config_with_context", "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config" ]
Python
4
4
{ "total_num": 4, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.callbacks.manager.handle_event", "langchain_core.libs.core.langchain_core.callbacks.manager.CallbackManagerForChainRun::on_chain_end" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/callbacks/manager.py", "langchain_core/callbacks/manager.py" ]
[ "libs/core/tests/unit_tests/runnables/test_context.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 1, "class_end_lineno": 2606, "func_start_lineno": 236, "func_end_lineno": 312, "func_code": "def handle_event(\n handlers: list[BaseCallbackHandler],\n event_name: str,\n ignore_condition_name: Optional[str],\n *args: Any,\n **kwargs: Any,\n) -> None:\n \"\"\"Generic event handler for CallbackManager.\n\n Note: This function is used by LangServe to handle events.\n\n Args:\n handlers: The list of handlers that will handle the event.\n event_name: The name of the event (e.g., \"on_llm_start\").\n ignore_condition_name: Name of the attribute defined on handler\n that if True will cause the handler to be skipped for the given event.\n *args: The arguments to pass to the event handler.\n **kwargs: The keyword arguments to pass to the event handler\n \"\"\"\n coros: list[Coroutine[Any, Any, Any]] = []\n\n try:\n message_strings: Optional[list[str]] = None\n for handler in handlers:\n try:\n if ignore_condition_name is None or not getattr(\n handler, ignore_condition_name\n ):\n event = getattr(handler, event_name)(*args, **kwargs)\n if asyncio.iscoroutine(event):\n coros.append(event)\n except NotImplementedError as e:\n if event_name == \"on_chat_model_start\":\n if message_strings is None:\n message_strings = [get_buffer_string(m) for m in args[1]]\n handle_event(\n [handler],\n \"on_llm_start\",\n \"ignore_llm\",\n args[0],\n message_strings,\n *args[2:],\n **kwargs,\n )\n else:\n handler_name = handler.__class__.__name__\n logger.warning(\n f\"NotImplementedError in {handler_name}.{event_name}\"\n f\" callback: {repr(e)}\"\n )\n except Exception as e:\n logger.warning(\n f\"Error in {handler.__class__.__name__}.{event_name} callback:\"\n f\" {repr(e)}\"\n )\n if handler.raise_error:\n raise\n finally:\n if coros:\n try:\n # Raises RuntimeError if there is no current event loop.\n asyncio.get_running_loop()\n loop_running = True\n except RuntimeError:\n loop_running = False\n\n if loop_running:\n # If we try to submit this coroutine to the running loop\n # we end up in a deadlock, as we'd have gotten here from a\n # running coroutine, which we cannot interrupt to run this one.\n # The solution is to create a new loop in a new thread.\n with ThreadPoolExecutor(1) as executor:\n executor.submit(\n cast(Callable, copy_context().run), _run_coros, coros\n ).result()\n else:\n _run_coros(coros)" }, { "class_start_lineno": 817, "class_end_lineno": 900, "func_start_lineno": 820, "func_end_lineno": 836, "func_code": " def on_chain_end(self, outputs: Union[dict[str, Any], Any], **kwargs: Any) -> None:\n \"\"\"Run when chain ends running.\n\n Args:\n outputs (Union[Dict[str, Any], Any]): The outputs of the chain.\n **kwargs (Any): Additional keyword arguments.\n \"\"\"\n handle_event(\n self.handlers,\n \"on_chain_end\",\n \"ignore_chain\",\n outputs,\n run_id=self.run_id,\n parent_run_id=self.parent_run_id,\n tags=self.tags,\n **kwargs,\n )" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config", "langchain_core.callbacks.manager.handle_event", "langchain_core.callbacks.manager.CallbackManagerForChainRun.on_chain_end" ]
Python
4
4
{ "total_num": 27, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.globals.get_llm_cache", "langchain_core.libs.core.langchain_core.language_models.llms.get_prompts" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/globals.py", "langchain_core/language_models/llms.py" ]
[ "libs/core/tests/unit_tests/runnables/test_fallbacks.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 1, "class_end_lineno": 222, "func_start_lineno": 186, "func_end_lineno": 222, "func_code": "def get_llm_cache() -> \"BaseCache\":\n \"\"\"Get the value of the `llm_cache` global setting.\n\n Returns:\n The value of the `llm_cache` global setting.\n \"\"\"\n try:\n import langchain # type: ignore[import]\n\n # We're about to run some deprecated code, don't report warnings from it.\n # The user called the correct (non-deprecated) code path and shouldn't get warnings.\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\",\n message=(\n \"Importing llm_cache from langchain root module is no longer supported\"\n ),\n )\n # N.B.: This is a workaround for an unfortunate quirk of Python's\n # module-level `__getattr__()` implementation:\n # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004\n #\n # Remove it once `langchain.llm_cache` is no longer supported, and\n # once all users have migrated to using `set_llm_cache()` here.\n #\n # In the meantime, the `llm_cache` setting returns whichever of\n # its two backing sources is truthy (not `None` and non-empty),\n # or the old value if both are falsy. This accommodates users\n # who haven't migrated to using `set_llm_cache()` yet.\n # Those users are getting deprecation warnings directing them\n # to use `set_llm_cache()` when they import `langchain.llm_cache`.\n old_llm_cache = langchain.llm_cache\n except ImportError:\n old_llm_cache = None\n\n global _llm_cache\n return _llm_cache or old_llm_cache" }, { "class_start_lineno": 1, "class_end_lineno": 1547, "func_start_lineno": 151, "func_end_lineno": 184, "func_code": "def get_prompts(\n params: dict[str, Any],\n prompts: list[str],\n cache: Optional[Union[BaseCache, bool, None]] = None,\n) -> tuple[dict[int, list], str, list[int], list[str]]:\n \"\"\"Get prompts that are already cached.\n\n Args:\n params: Dictionary of parameters.\n prompts: List of prompts.\n cache: Cache object. Default is None.\n\n Returns:\n A tuple of existing prompts, llm_string, missing prompt indexes,\n and missing prompts.\n\n Raises:\n ValueError: If the cache is not set and cache is True.\n \"\"\"\n llm_string = str(sorted(params.items()))\n missing_prompts = []\n missing_prompt_idxs = []\n existing_prompts = {}\n\n llm_cache = _resolve_cache(cache)\n for i, prompt in enumerate(prompts):\n if llm_cache:\n cache_val = llm_cache.lookup(prompt, llm_string)\n if isinstance(cache_val, list):\n existing_prompts[i] = cache_val\n else:\n missing_prompts.append(prompt)\n missing_prompt_idxs.append(i)\n return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config", "langchain_core.globals.get_llm_cache", "langchain_core.language_models.llms.get_prompts" ]
Python
4
4
{ "total_num": 16, "base_passed_num": 2 }
[ "langchain_core.libs.core.langchain_core.runnables.graph.is_uuid", "langchain_core.libs.core.langchain_core.runnables.graph.node_data_str", "langchain_core.libs.core.langchain_core.runnables.graph.Graph::add_node", "langchain_core.libs.core.langchain_core.runnables.graph_ascii.AsciiCanvas::point", "langchain_core.libs.core.langchain_core.runnables.graph_ascii.AsciiCanvas::line" ]
langchain_core
[ "langchain_core/runnables/graph.py", "langchain_core/runnables/graph.py", "langchain_core/runnables/graph.py", "langchain_core/runnables/graph_ascii.py", "langchain_core/runnables/graph_ascii.py" ]
[ "libs/core/tests/unit_tests/runnables/test_graph.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 664, "func_start_lineno": 42, "func_end_lineno": 55, "func_code": "def is_uuid(value: str) -> bool:\n \"\"\"Check if a string is a valid UUID.\n\n Args:\n value: The string to check.\n\n Returns:\n True if the string is a valid UUID, False otherwise.\n \"\"\"\n try:\n UUID(value)\n except ValueError:\n return False\n return True" }, { "class_start_lineno": 1, "class_end_lineno": 664, "func_start_lineno": 178, "func_end_lineno": 196, "func_code": "def node_data_str(id: str, data: Union[type[BaseModel], RunnableType]) -> str:\n \"\"\"Convert the data of a node to a string.\n\n Args:\n id: The node id.\n data: The node data.\n\n Returns:\n A string representation of the data.\n \"\"\"\n from langchain_core.runnables.base import Runnable\n\n if not is_uuid(id):\n return id\n elif isinstance(data, Runnable):\n data_str = data.get_name()\n else:\n data_str = data.__name__\n return data_str if not data_str.startswith(\"Runnable\") else data_str[8:]" }, { "class_start_lineno": 256, "class_end_lineno": 636, "func_start_lineno": 313, "func_end_lineno": 339, "func_code": " def add_node(\n self,\n data: Union[type[BaseModel], RunnableType],\n id: Optional[str] = None,\n *,\n metadata: Optional[dict[str, Any]] = None,\n ) -> Node:\n \"\"\"Add a node to the graph and return it.\n\n Args:\n data: The data of the node.\n id: The id of the node. Defaults to None.\n metadata: Optional metadata for the node. Defaults to None.\n\n Returns:\n The node that was added to the graph.\n\n Raises:\n ValueError: If a node with the same id already exists.\n \"\"\"\n if id is not None and id in self.nodes:\n msg = f\"Node with id {id} already exists\"\n raise ValueError(msg)\n id = id or self.next_id()\n node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data))\n self.nodes[node.id] = node\n return node" }, { "class_start_lineno": 39, "class_end_lineno": 157, "func_start_lineno": 64, "func_end_lineno": 85, "func_code": " def point(self, x: int, y: int, char: str) -> None:\n \"\"\"Create a point on ASCII canvas.\n\n Args:\n x (int): x coordinate. Should be >= 0 and < number of columns in\n the canvas.\n y (int): y coordinate. Should be >= 0 an < number of lines in the\n canvas.\n char (str): character to place in the specified point on the\n canvas.\n \"\"\"\n if len(char) != 1:\n msg = \"char should be a single character\"\n raise ValueError(msg)\n if x >= self.cols or x < 0:\n msg = \"x should be >= 0 and < number of columns\"\n raise ValueError(msg)\n if y >= self.lines or y < 0:\n msg = \"y should be >= 0 and < number of lines\"\n raise ValueError(msg)\n\n self.canvas[y][x] = char" }, { "class_start_lineno": 39, "class_end_lineno": 157, "func_start_lineno": 87, "func_end_lineno": 117, "func_code": " def line(self, x0: int, y0: int, x1: int, y1: int, char: str) -> None:\n \"\"\"Create a line on ASCII canvas.\n\n Args:\n x0 (int): x coordinate where the line should start.\n y0 (int): y coordinate where the line should start.\n x1 (int): x coordinate where the line should end.\n y1 (int): y coordinate where the line should end.\n char (str): character to draw the line with.\n \"\"\"\n if x0 > x1:\n x1, x0 = x0, x1\n y1, y0 = y0, y1\n\n dx = x1 - x0\n dy = y1 - y0\n\n if dx == 0 and dy == 0:\n self.point(x0, y0, char)\n elif abs(dx) >= abs(dy):\n for x in range(x0, x1 + 1):\n y = y0 if dx == 0 else y0 + int(round((x - x0) * dy / float(dx)))\n self.point(x, y, char)\n elif y0 < y1:\n for y in range(y0, y1 + 1):\n x = x0 if dy == 0 else x0 + int(round((y - y0) * dx / float(dy)))\n self.point(x, y, char)\n else:\n for y in range(y1, y0 + 1):\n x = x0 if dy == 0 else x1 + int(round((y - y1) * dx / float(dy)))\n self.point(x, y, char)" } ]
[ "function_empty" ]
[ "langchain_core.runnables.graph.is_uuid", "langchain_core.runnables.graph.node_data_str", "langchain_core.runnables.graph.Graph.add_node", "langchain_core.runnables.graph_ascii.AsciiCanvas.point", "langchain_core.runnables.graph_ascii.AsciiCanvas.line" ]
Python
5
5
{ "total_num": 11, "base_passed_num": 3 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.merge_configs", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.runnables.base.RunnableLambda::invoke" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/runnables/base.py", "langchain_core/runnables/base.py" ]
[ "libs/core/tests/unit_tests/runnables/test_history.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 295, "func_end_lineno": 358, "func_code": "def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig:\n \"\"\"Merge multiple configs into one.\n\n Args:\n *configs (Optional[RunnableConfig]): The configs to merge.\n\n Returns:\n RunnableConfig: The merged config.\n \"\"\"\n base: RunnableConfig = {}\n # Even though the keys aren't literals, this is correct\n # because both dicts are the same type\n for config in (ensure_config(c) for c in configs if c is not None):\n for key in config:\n if key == \"metadata\":\n base[key] = { # type: ignore\n **base.get(key, {}), # type: ignore\n **(config.get(key) or {}), # type: ignore\n }\n elif key == \"tags\":\n base[key] = sorted( # type: ignore\n set(base.get(key, []) + (config.get(key) or [])), # type: ignore\n )\n elif key == \"configurable\":\n base[key] = { # type: ignore\n **base.get(key, {}), # type: ignore\n **(config.get(key) or {}), # type: ignore\n }\n elif key == \"callbacks\":\n base_callbacks = base.get(\"callbacks\")\n these_callbacks = config[\"callbacks\"]\n # callbacks can be either None, list[handler] or manager\n # so merging two callbacks values has 6 cases\n if isinstance(these_callbacks, list):\n if base_callbacks is None:\n base[\"callbacks\"] = these_callbacks.copy()\n elif isinstance(base_callbacks, list):\n base[\"callbacks\"] = base_callbacks + these_callbacks\n else:\n # base_callbacks is a manager\n mngr = base_callbacks.copy()\n for callback in these_callbacks:\n mngr.add_handler(callback, inherit=True)\n base[\"callbacks\"] = mngr\n elif these_callbacks is not None:\n # these_callbacks is a manager\n if base_callbacks is None:\n base[\"callbacks\"] = these_callbacks.copy()\n elif isinstance(base_callbacks, list):\n mngr = these_callbacks.copy()\n for callback in base_callbacks:\n mngr.add_handler(callback, inherit=True)\n base[\"callbacks\"] = mngr\n else:\n # base_callbacks is also a manager\n base[\"callbacks\"] = base_callbacks.merge(these_callbacks)\n elif key == \"recursion_limit\":\n if config[\"recursion_limit\"] != DEFAULT_RECURSION_LIMIT:\n base[\"recursion_limit\"] = config[\"recursion_limit\"]\n elif key in COPIABLE_KEYS and config[key] is not None: # type: ignore[literal-required]\n base[key] = config[key].copy() # type: ignore[literal-required]\n else:\n base[key] = config[key] or base.get(key) # type: ignore\n return base" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4696, "func_end_lineno": 4699, "func_code": " def _config(\n self, config: Optional[RunnableConfig], callable: Callable[..., Any]\n ) -> RunnableConfig:\n return ensure_config(config)" }, { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4701, "func_end_lineno": 4732, "func_code": " def invoke(\n self,\n input: Input,\n config: Optional[RunnableConfig] = None,\n **kwargs: Optional[Any],\n ) -> Output:\n \"\"\"Invoke this Runnable synchronously.\n\n Args:\n input: The input to this Runnable.\n config: The config to use. Defaults to None.\n kwargs: Additional keyword arguments.\n\n Returns:\n The output of this Runnable.\n\n Raises:\n TypeError: If the Runnable is a coroutine function.\n \"\"\"\n if hasattr(self, \"func\"):\n return self._call_with_config(\n self._invoke,\n input,\n self._config(config, self.func),\n **kwargs,\n )\n else:\n msg = (\n \"Cannot invoke a coroutine function synchronously.\"\n \"Use `ainvoke` instead.\"\n )\n raise TypeError(msg)" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.merge_configs", "langchain_core.runnables.config.patch_config", "langchain_core.runnables.base.RunnableLambda._config", "langchain_core.runnables.base.RunnableLambda.invoke" ]
Python
4
4
{ "total_num": 23, "base_passed_num": 4 }
[ "langchain_core.libs.core.langchain_core.utils.env.get_from_env", "langchain_core.libs.core.langchain_core.utils.env.get_from_dict_or_env" ]
langchain_core
[ "langchain_core/utils/env.py", "langchain_core/utils/env.py" ]
[ "libs/core/tests/unit_tests/utils/test_env.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 81, "func_start_lineno": 54, "func_end_lineno": 81, "func_code": "def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:\n \"\"\"Get a value from a dictionary or an environment variable.\n\n Args:\n key: The key to look up in the dictionary.\n env_key: The environment variable to look up if the key is not\n in the dictionary.\n default: The default value to return if the key is not in the dictionary\n or the environment. Defaults to None.\n\n Returns:\n str: The value of the key.\n\n Raises:\n ValueError: If the key is not in the dictionary and no default value is\n provided or if the environment variable is not set.\n \"\"\"\n if env_key in os.environ and os.environ[env_key]:\n return os.environ[env_key]\n elif default is not None:\n return default\n else:\n msg = (\n f\"Did not find {key}, please add an environment variable\"\n f\" `{env_key}` which contains it, or pass\"\n f\" `{key}` as a named parameter.\"\n )\n raise ValueError(msg)" }, { "class_start_lineno": 1, "class_end_lineno": 81, "func_start_lineno": 24, "func_end_lineno": 51, "func_code": "def get_from_dict_or_env(\n data: dict[str, Any],\n key: Union[str, list[str]],\n env_key: str,\n default: Optional[str] = None,\n) -> str:\n \"\"\"Get a value from a dictionary or an environment variable.\n\n Args:\n data: The dictionary to look up the key in.\n key: The key to look up in the dictionary. This can be a list of keys to try\n in order.\n env_key: The environment variable to look up if the key is not\n in the dictionary.\n default: The default value to return if the key is not in the dictionary\n or the environment. Defaults to None.\n \"\"\"\n if isinstance(key, (list, tuple)):\n for k in key:\n if k in data and data[k]:\n return data[k]\n\n if isinstance(key, str) and key in data and data[key]:\n return data[key]\n\n key_for_err = key[0] if isinstance(key, (list, tuple)) else key\n\n return get_from_env(key_for_err, env_key, default=default)" } ]
[ "function_empty" ]
[ "langchain_core.utils.env.get_from_env", "langchain_core.utils.env.get_from_dict_or_env" ]
Python
2
2
{ "total_num": 1, "base_passed_num": 0 }
[ "langchain_core.libs.14core.langchain_core.tools.base.create_schema_from_function", "langchain_core.libs.core.langchain_core.utils.function_calling._convert_python_function_to_openai_function", "langchain_core.libs.core.langchain_core.tools.base._parse_python_function_docstring", "langchain_core.libs.core.langchain_core.tools.base._infer_arg_descriptions" ]
langchain_core
[ "langchain_core/tools/base.py", "langchain_core/utils/function_calling.py", "langchain_core/tools/base.py", "langchain_core/tools/base.py" ]
[ "libs/core/tests/unit_tests/utils/test_function_calling.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1112, "func_start_lineno": 210, "func_end_lineno": 307, "func_code": "def create_schema_from_function(\n model_name: str,\n func: Callable,\n *,\n filter_args: Optional[Sequence[str]] = None,\n parse_docstring: bool = False,\n error_on_invalid_docstring: bool = False,\n include_injected: bool = True,\n) -> type[BaseModel]:\n \"\"\"Create a pydantic schema from a function's signature.\n\n Args:\n model_name: Name to assign to the generated pydantic schema.\n func: Function to generate the schema from.\n filter_args: Optional list of arguments to exclude from the schema.\n Defaults to FILTERED_ARGS.\n parse_docstring: Whether to parse the function's docstring for descriptions\n for each argument. Defaults to False.\n error_on_invalid_docstring: if ``parse_docstring`` is provided, configure\n whether to raise ValueError on invalid Google Style docstrings.\n Defaults to False.\n include_injected: Whether to include injected arguments in the schema.\n Defaults to True, since we want to include them in the schema\n when *validating* tool inputs.\n\n Returns:\n A pydantic model with the same arguments as the function.\n \"\"\"\n sig = inspect.signature(func)\n\n if _function_annotations_are_pydantic_v1(sig, func):\n validated = validate_arguments_v1(func, config=_SchemaConfig) # type: ignore\n else:\n # https://docs.pydantic.dev/latest/usage/validation_decorator/\n with warnings.catch_warnings():\n # We are using deprecated functionality here.\n # This code should be re-written to simply construct a pydantic model\n # using inspect.signature and create_model.\n warnings.simplefilter(\"ignore\", category=PydanticDeprecationWarning)\n validated = validate_arguments(func, config=_SchemaConfig) # type: ignore\n\n # Let's ignore `self` and `cls` arguments for class and instance methods\n # If qualified name has a \".\", then it likely belongs in a class namespace\n in_class = bool(func.__qualname__ and \".\" in func.__qualname__)\n\n has_args = False\n has_kwargs = False\n\n for param in sig.parameters.values():\n if param.kind == param.VAR_POSITIONAL:\n has_args = True\n elif param.kind == param.VAR_KEYWORD:\n has_kwargs = True\n\n inferred_model = validated.model # type: ignore\n\n if filter_args:\n filter_args_ = filter_args\n else:\n # Handle classmethods and instance methods\n existing_params: list[str] = list(sig.parameters.keys())\n if existing_params and existing_params[0] in (\"self\", \"cls\") and in_class:\n filter_args_ = [existing_params[0]] + list(FILTERED_ARGS)\n else:\n filter_args_ = list(FILTERED_ARGS)\n\n for existing_param in existing_params:\n if not include_injected and _is_injected_arg_type(\n sig.parameters[existing_param].annotation\n ):\n filter_args_.append(existing_param)\n\n description, arg_descriptions = _infer_arg_descriptions(\n func,\n parse_docstring=parse_docstring,\n error_on_invalid_docstring=error_on_invalid_docstring,\n )\n # Pydantic adds placeholder virtual fields we need to strip\n valid_properties = []\n for field in get_fields(inferred_model):\n if not has_args and field == \"args\":\n continue\n if not has_kwargs and field == \"kwargs\":\n continue\n\n if field == \"v__duplicate_kwargs\": # Internal pydantic field\n continue\n\n if field not in filter_args_:\n valid_properties.append(field)\n\n return _create_subset_model(\n model_name,\n inferred_model,\n list(valid_properties),\n descriptions=arg_descriptions,\n fn_description=description,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 704, "func_start_lineno": 160, "func_end_lineno": 190, "func_code": "def _convert_python_function_to_openai_function(\n function: Callable,\n) -> FunctionDescription:\n \"\"\"Convert a Python function to an OpenAI function-calling API compatible dict.\n\n Assumes the Python function has type hints and a docstring with a description. If\n the docstring has Google Python style argument descriptions, these will be\n included as well.\n\n Args:\n function: The Python function to convert.\n\n Returns:\n The OpenAI function description.\n \"\"\"\n from langchain_core.tools.base import create_schema_from_function\n\n func_name = _get_python_function_name(function)\n model = create_schema_from_function(\n func_name,\n function,\n filter_args=(),\n parse_docstring=True,\n error_on_invalid_docstring=False,\n include_injected=False,\n )\n return _convert_pydantic_to_openai_function(\n model,\n name=func_name,\n description=model.__doc__,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 1112, "func_start_lineno": 110, "func_end_lineno": 122, "func_code": "def _parse_python_function_docstring(\n function: Callable, annotations: dict, error_on_invalid_docstring: bool = False\n) -> tuple[str, dict]:\n \"\"\"Parse the function and argument descriptions from the docstring of a function.\n\n Assumes the function docstring follows Google Python style guide.\n \"\"\"\n docstring = inspect.getdoc(function)\n return _parse_google_docstring(\n docstring,\n list(annotations),\n error_on_invalid_docstring=error_on_invalid_docstring,\n )" }, { "class_start_lineno": 1, "class_end_lineno": 1112, "func_start_lineno": 135, "func_end_lineno": 161, "func_code": "def _infer_arg_descriptions(\n fn: Callable,\n *,\n parse_docstring: bool = False,\n error_on_invalid_docstring: bool = False,\n) -> tuple[str, dict]:\n \"\"\"Infer argument descriptions from a function's docstring.\"\"\"\n if hasattr(inspect, \"get_annotations\"):\n # This is for python < 3.10\n annotations = inspect.get_annotations(fn) # type: ignore\n else:\n annotations = getattr(fn, \"__annotations__\", {})\n if parse_docstring:\n description, arg_descriptions = _parse_python_function_docstring(\n fn, annotations, error_on_invalid_docstring=error_on_invalid_docstring\n )\n else:\n description = inspect.getdoc(fn) or \"\"\n arg_descriptions = {}\n if parse_docstring:\n _validate_docstring_args_against_annotations(arg_descriptions, annotations)\n for arg, arg_type in annotations.items():\n if arg in arg_descriptions:\n continue\n if desc := _get_annotation_description(arg_type):\n arg_descriptions[arg] = desc\n return description, arg_descriptions" } ]
[ "function_empty" ]
[ "langchain_core.tools.base.create_schema_from_function", "langchain_core.utils.function_calling._convert_python_function_to_openai_function", "langchain_core.tools.base._parse_python_function_docstring", "langchain_core.tools.base._infer_arg_descriptions" ]
Python
4
4
{ "total_num": 20, "base_passed_num": 15 }
[ "langchain_core.libs.core.langchain_core.utils._merge.merge_lists", "langchain_core.libs.core.langchain_core.utils._merge.merge_dicts" ]
langchain_core
[ "langchain_core/utils/_merge.py", "langchain_core/utils/_merge.py" ]
[ "libs/core/tests/unit_tests/utils/test_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 72, "func_end_lineno": 106, "func_code": "def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]:\n \"\"\"Add many lists, handling None.\n\n Args:\n left: The first list to merge.\n others: The other lists to merge.\n\n Returns:\n The merged list.\n \"\"\"\n merged = left.copy() if left is not None else None\n for other in others:\n if other is None:\n continue\n elif merged is None:\n merged = other.copy()\n else:\n for e in other:\n if isinstance(e, dict) and \"index\" in e and isinstance(e[\"index\"], int):\n to_merge = [\n i\n for i, e_left in enumerate(merged)\n if e_left[\"index\"] == e[\"index\"]\n ]\n if to_merge:\n # TODO: Remove this once merge_dict is updated with special\n # handling for 'type'.\n if \"type\" in e:\n e = {k: v for k, v in e.items() if k != \"type\"}\n merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)\n else:\n merged.append(e)\n else:\n merged.append(e)\n return merged" }, { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 6, "func_end_lineno": 69, "func_code": "def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Merge many dicts, handling specific scenarios where a key exists in both\n dictionaries but has a value of None in 'left'. In such cases, the method uses the\n value from 'right' for that key in the merged dictionary.\n\n Args:\n left: The first dictionary to merge.\n others: The other dictionaries to merge.\n\n Returns:\n The merged dictionary.\n\n Raises:\n TypeError: If the key exists in both dictionaries but has a different type.\n TypeError: If the value has an unsupported type.\n\n Example:\n If left = {\"function_call\": {\"arguments\": None}} and\n right = {\"function_call\": {\"arguments\": \"{\\n\"}}\n then, after merging, for the key \"function_call\",\n the value from 'right' is used,\n resulting in merged = {\"function_call\": {\"arguments\": \"{\\n\"}}.\n \"\"\"\n merged = left.copy()\n for right in others:\n for right_k, right_v in right.items():\n if right_k not in merged or right_v is not None and merged[right_k] is None:\n merged[right_k] = right_v\n elif right_v is None:\n continue\n elif type(merged[right_k]) is not type(right_v):\n msg = (\n f'additional_kwargs[\"{right_k}\"] already exists in this message,'\n \" but with a different type.\"\n )\n raise TypeError(msg)\n elif isinstance(merged[right_k], str):\n # TODO: Add below special handling for 'type' key in 0.3 and remove\n # merge_lists 'type' logic.\n #\n # if right_k == \"type\":\n # if merged[right_k] == right_v:\n # continue\n # else:\n # raise ValueError(\n # \"Unable to merge. Two different values seen for special \"\n # f\"key 'type': {merged[right_k]} and {right_v}. 'type' \"\n # \"should either occur once or have the same value across \"\n # \"all dicts.\"\n # )\n merged[right_k] += right_v\n elif isinstance(merged[right_k], dict):\n merged[right_k] = merge_dicts(merged[right_k], right_v)\n elif isinstance(merged[right_k], list):\n merged[right_k] = merge_lists(merged[right_k], right_v)\n elif merged[right_k] == right_v:\n continue\n else:\n msg = (\n f\"Additional kwargs key {right_k} already exists in left dict and \"\n f\"value has unsupported type {type(merged[right_k])}.\"\n )\n raise TypeError(msg)\n return merged" } ]
[ "function_empty" ]
[ "langchain_core.utils._merge.merge_lists", "langchain_core.utils._merge.merge_dicts" ]
Python
2
2
{ "total_num": 47, "base_passed_num": 26 }
[ "finam.src.finam.data.grid_spec.NoGrid::compatible_with", "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/data/grid_spec.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/adapters/test_probe.py", "tests/adapters/test_time.py", "tests/components/test_debug.py", "tests/core/test_pull_based_component.py" ]
[ { "class_start_lineno": 30, "class_end_lineno": 90, "func_start_lineno": 71, "func_end_lineno": 87, "func_code": " def compatible_with(self, other, check_location=True):\n \"\"\"\n Check for compatibility with other Grid.\n\n Parameters\n ----------\n other : instance of Grid\n Other grid to compatibility with.\n check_location : bool, optional\n Whether to check location for equality, by default True\n\n Returns\n -------\n bool\n compatibility\n \"\"\"\n return isinstance(other, NoGrid) and self.data_shape == other.data_shape" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.data.grid_spec.NoGrid.compatible_with", "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
4
4
{ "total_num": 15, "base_passed_num": 2 }
[ "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts", "finam.src.finam.data.tools.mask.from_compressed" ]
finam
[ "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py" ]
[ "tests/adapters/test_regrid.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 151, "func_end_lineno": 205, "func_code": "def from_compressed(xdata, shape, order=\"C\", mask=None, **kwargs):\n \"\"\"\n Fill a (masked) array following a given mask or shape with the provided data.\n\n This will only create a masked array if kwargs are given (especially a mask).\n Otherwise this is simply reshaping the given data.\n Filling is performed in the given array order.\n\n Parameters\n ----------\n data : :class:`pint.Quantity` or :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray`\n The reference object input.\n shape : str\n shape argument for :any:`numpy.reshape`\n order : str\n order argument for :any:`numpy.reshape`\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to use\n **kwargs\n keyword arguments forwarded to :any:`numpy.ma.array`\n\n Returns\n -------\n :class:`pint.Quantity` or :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray`\n New object with the desired shape and same type as input.\n Units will be taken from the input if present.\n Will only be a masked array if kwargs are given.\n\n See also\n --------\n to_compressed:\n Inverse operation.\n :any:`numpy.ma.array`:\n Routine consuming kwargs to create a masked array.\n :any:`numpy.reshape`:\n Equivalent routine if no mask is provided.\n\n Notes\n -----\n If both `mask` and `shape` are given, they need to match in size.\n \"\"\"\n if mask is None or mask is np.ma.nomask or not mask_specified(mask):\n if kwargs and mask is Mask.NONE:\n msg = \"from_compressed: Can't create masked array with mask=Mask.NONE\"\n raise FinamDataError(msg)\n data = np.reshape(xdata, shape, order=order)\n return to_masked(data, **kwargs) if kwargs or mask is np.ma.nomask else data\n if is_quantified(xdata):\n # pylint: disable-next=unexpected-keyword-arg\n data = quantify(np.empty_like(xdata, shape=np.prod(shape)), xdata.units)\n else:\n # pylint: disable-next=unexpected-keyword-arg\n data = np.empty_like(xdata, shape=np.prod(shape))\n data[np.logical_not(np.ravel(mask, order=order))] = xdata\n return to_masked(np.reshape(data, shape, order=order), mask=mask, **kwargs)" } ]
[ "function_empty" ]
[ "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts", "finam.data.tools.mask.from_compressed" ]
Python
4
4
{ "total_num": 11, "base_passed_num": 1 }
[ "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.from_compressed", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/data/tools/mask.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/adapters/test_regrid_mask.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 151, "func_end_lineno": 205, "func_code": "def from_compressed(xdata, shape, order=\"C\", mask=None, **kwargs):\n \"\"\"\n Fill a (masked) array following a given mask or shape with the provided data.\n\n This will only create a masked array if kwargs are given (especially a mask).\n Otherwise this is simply reshaping the given data.\n Filling is performed in the given array order.\n\n Parameters\n ----------\n data : :class:`pint.Quantity` or :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray`\n The reference object input.\n shape : str\n shape argument for :any:`numpy.reshape`\n order : str\n order argument for :any:`numpy.reshape`\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to use\n **kwargs\n keyword arguments forwarded to :any:`numpy.ma.array`\n\n Returns\n -------\n :class:`pint.Quantity` or :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray`\n New object with the desired shape and same type as input.\n Units will be taken from the input if present.\n Will only be a masked array if kwargs are given.\n\n See also\n --------\n to_compressed:\n Inverse operation.\n :any:`numpy.ma.array`:\n Routine consuming kwargs to create a masked array.\n :any:`numpy.reshape`:\n Equivalent routine if no mask is provided.\n\n Notes\n -----\n If both `mask` and `shape` are given, they need to match in size.\n \"\"\"\n if mask is None or mask is np.ma.nomask or not mask_specified(mask):\n if kwargs and mask is Mask.NONE:\n msg = \"from_compressed: Can't create masked array with mask=Mask.NONE\"\n raise FinamDataError(msg)\n data = np.reshape(xdata, shape, order=order)\n return to_masked(data, **kwargs) if kwargs or mask is np.ma.nomask else data\n if is_quantified(xdata):\n # pylint: disable-next=unexpected-keyword-arg\n data = quantify(np.empty_like(xdata, shape=np.prod(shape)), xdata.units)\n else:\n # pylint: disable-next=unexpected-keyword-arg\n data = np.empty_like(xdata, shape=np.prod(shape))\n data[np.logical_not(np.ravel(mask, order=order))] = xdata\n return to_masked(np.reshape(data, shape, order=order), mask=mask, **kwargs)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.data.tools.mask.mask_specified", "finam.data.tools.mask.from_compressed", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
4
4
{ "total_num": 6, "base_passed_num": 0 }
[ "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts", "finam.src.finam.data.tools.units.is_quantified", "finam.src.finam.data.tools.core.prepare" ]
finam
[ "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/units.py", "finam/data/tools/core.py" ]
[ "tests/adapters/test_stats.py", "tests/components/test_simplex_noise.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" }, { "class_start_lineno": 1, "class_end_lineno": 249, "func_start_lineno": 115, "func_end_lineno": 129, "func_code": "def is_quantified(xdata):\n \"\"\"\n Check if data is a quantified DataArray.\n\n Parameters\n ----------\n xdata : Any\n The given data array.\n\n Returns\n -------\n bool\n Whether the data is a quantified DataArray.\n \"\"\"\n return isinstance(xdata, pint.Quantity)" }, { "class_start_lineno": 1, "class_end_lineno": 363, "func_start_lineno": 26, "func_end_lineno": 113, "func_code": "def prepare(data, info, time_entries=1, force_copy=False, report_conversion=False):\n \"\"\"\n Prepares data in FINAM's internal transmission format.\n\n Checks tha shape of the data.\n Checks or adds units and time dimension.\n\n Parameters\n ----------\n data : arraylike\n The input data.\n info : Info\n Info associated with the data.\n time_entries : int, optional\n Number of time slices in the data. Default 1.\n force_copy : bool, optional\n Forces the result to be a copy of the passed data. Default ``False``.\n\n If not used, the result is a view of the data if no units conversion needs to be done.\n report_conversion : bool, optional\n If true, returns a tuple with the second element indicating the unit conversion if it was required.\n\n Returns\n -------\n pint.Quantity or tuple(pint.Quantity, tuple(pint.Unit, pint.Unit) or None)\n The prepared data as a numpy array, wrapped into a :class:`pint.Quantity`.\n\n If ``report_conversion`` is ``True``, a tuple is returned with the second element\n indicating the unit conversion if it was required.\n\n The second element is ``None`` if no conversion was required,\n and a tuple of two :class:`pint.Unit` objects otherwise.\n\n Raises\n ------\n FinamDataError\n If the data doesn't match its info.\n \"\"\"\n units_converted = None\n units = info.units\n if is_quantified(data):\n if not compatible_units(data.units, units):\n raise FinamDataError(\n f\"Given data has incompatible units. \"\n f\"Got {data.units}, expected {units}.\"\n )\n if info.is_masked and not np.ma.isarray(data.magnitude):\n data = UNITS.Quantity(\n np.ma.array(\n data=data.magnitude,\n mask=info.mask,\n shrink=False,\n fill_value=info.fill_value,\n ),\n data.units,\n )\n if not equivalent_units(data.units, units):\n units_converted = data.units, units\n data = data.to(units)\n elif force_copy:\n data = data.copy()\n else:\n if info.is_masked and not np.ma.isarray(data):\n data = UNITS.Quantity(\n np.ma.array(\n data=data,\n mask=info.mask,\n shrink=False,\n fill_value=info.fill_value,\n copy=force_copy,\n ),\n units,\n )\n # this covers masked arrays as well\n elif isinstance(data, np.ndarray):\n if force_copy:\n data = data.copy()\n data = UNITS.Quantity(data, units)\n else:\n if force_copy:\n data = copy.copy(data)\n data = UNITS.Quantity(np.asarray(data), units)\n\n data = _check_input_shape(data, info, time_entries)\n\n if report_conversion:\n return data, units_converted\n return data" } ]
[ "function_empty" ]
[ "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts", "finam.data.tools.units.is_quantified", "finam.data.tools.core.prepare" ]
Python
5
5
{ "total_num": 3, "base_passed_num": 0 }
[ "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.data.grid_tools.gen_axes", "finam.src.finam.components.generators.CallbackGenerator::_connect" ]
finam
[ "finam/sdk/output.py", "finam/sdk/output.py", "finam/sdk/component.py", "finam/data/grid_tools.py", "finam/data/grid_spec.py", "finam/components/generators.py" ]
[ "tests/adapters/test_time_integration.py" ]
[ { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0" }, { "class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io" }, { "class_start_lineno": 1, "class_end_lineno": 526, "func_start_lineno": 78, "func_end_lineno": 107, "func_code": "def gen_axes(dims, spacing, origin, axes_increase=None):\n \"\"\"\n Generate uniform axes.\n\n Parameters\n ----------\n dims : iterable\n Dimensions of the uniform grid for each direction.\n spacing : iterable\n Spacing of the uniform in each dimension. Must be positive.\n origin : iterable\n Origin of the uniform grid.\n axes_increase : arraylike or None, optional\n False to indicate a bottom up axis (in xyz order), by default None\n\n Returns\n -------\n list of np.ndarray\n Axes of the uniform grid.\n \"\"\"\n if axes_increase is None:\n axes_increase = np.full(len(dims), True, dtype=bool)\n if len(axes_increase) != len(dims):\n raise ValueError(\"gen_axes: wrong length of 'axes_increase'\")\n axes = []\n for i, d in enumerate(dims):\n axes.append(np.arange(d) * spacing[i] + origin[i])\n if not axes_increase[i]:\n axes[i] = axes[i][::-1]\n return axes" }, { "class_start_lineno": 236, "class_end_lineno": 364, "func_start_lineno": 267, "func_end_lineno": 296, "func_code": " def __init__(\n self,\n dims,\n spacing=(1.0, 1.0, 1.0),\n origin=(0.0, 0.0, 0.0),\n data_location=Location.CELLS,\n order=\"F\",\n axes_reversed=False,\n axes_increase=None,\n axes_attributes=None,\n axes_names=None,\n crs=None,\n ):\n # at most 3 axes\n dims = tuple(dims)[:3]\n self.spacing = tuple(spacing)[: len(dims)]\n if len(self.spacing) < len(dims):\n raise ValueError(\"UniformGrid: wrong length of 'spacing'\")\n self.origin = tuple(origin)[: len(dims)]\n if len(self.origin) < len(dims):\n raise ValueError(\"UniformGrid: wrong length of 'origin'\")\n super().__init__(\n axes=gen_axes(dims, self.spacing, self.origin, axes_increase),\n data_location=data_location,\n order=order,\n axes_reversed=axes_reversed,\n axes_attributes=axes_attributes,\n axes_names=axes_names,\n crs=crs,\n )" }, { "class_start_lineno": 11, "class_end_lineno": 127, "func_start_lineno": 82, "func_end_lineno": 102, "func_code": " def _connect(self, start_time):\n \"\"\"Push initial values to outputs.\n\n After the method call, the component should have status CONNECTED.\n \"\"\"\n if self._initial_data is None:\n self._initial_data = {\n key: callback(self._time)\n for key, (callback, _) in self._callbacks.items()\n }\n\n push_data = {}\n for name, req in self.connector.data_required.items():\n if req:\n push_data[name] = self._initial_data[name]\n\n self.try_connect(start_time, push_data=push_data)\n\n if self.status == ComponentStatus.CONNECTED:\n del self._initial_data\n del self._connector" } ]
[ "function_empty", "TDD" ]
[ "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.sdk.component.IOList.add", "finam.data.grid_tools.gen_axes", "finam.data.grid_spec.UniformGrid.__init__", "finam.components.generators.CallbackGenerator._connect" ]
Python
3
4
{ "total_num": 5, "base_passed_num": 0 }
[ "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.data.grid_spec.NoGrid::compatible_with", "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/sdk/output.py", "finam/sdk/output.py", "finam/sdk/component.py", "finam/data/grid_spec.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/info.py" ]
[ "tests/components/test_callback.py", "tests/components/test_noise.py", "tests/core/test_propagate_info.py", "tests/core/test_schedule.py" ]
[ { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0" }, { "class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io" }, { "class_start_lineno": 30, "class_end_lineno": 90, "func_start_lineno": 71, "func_end_lineno": 87, "func_code": " def compatible_with(self, other, check_location=True):\n \"\"\"\n Check for compatibility with other Grid.\n\n Parameters\n ----------\n other : instance of Grid\n Other grid to compatibility with.\n check_location : bool, optional\n Whether to check location for equality, by default True\n\n Returns\n -------\n bool\n compatibility\n \"\"\"\n return isinstance(other, NoGrid) and self.data_shape == other.data_shape" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.sdk.component.IOList.add", "finam.data.grid_spec.NoGrid.compatible_with", "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.info.Info.accepts" ]
Python
5
5
{ "total_num": 46, "base_passed_num": 7 }