{"id": ["cloudnetpy.cloudnetpy.categorize.atmos_utils.get_lwc_change_rate_at_bases", "cloudnetpy.cloudnetpy.categorize.atmos_utils.fill_clouds_with_lwc_dz", "cloudnetpy.cloudnetpy.products.lwc.Lwc::_init_lwc_adiabatic"], "project": "cloudnetpy", "origin_file": ["cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/products/lwc.py", "cloudnetpy/products/lwc.py"], "test_list": ["tests/unit/test_lwc.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 175, "func_end_lineno": 198, "func_code": "def get_lwc_change_rate_at_bases(\n temperature: np.ndarray,\n pressure: np.ndarray,\n is_liquid: np.ndarray,\n) -> np.ndarray:\n \"\"\"Finds LWC change rate in liquid cloud bases.\n\n Args:\n temperature: 2D temperature array (K).\n pressure: 2D pressure array (Pa).\n is_liquid: Boolean array indicating presence of liquid clouds.\n\n Returns:\n Liquid water content change rate at cloud bases (kg m-3 m-1).\n\n \"\"\"\n liquid_bases = find_cloud_bases(is_liquid)\n lwc_dz = ma.zeros(liquid_bases.shape)\n lwc_dz[liquid_bases] = calc_lwc_change_rate(\n temperature[liquid_bases],\n pressure[liquid_bases],\n )\n\n return lwc_dz"}, {"class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 154, "func_end_lineno": 172, "func_code": "def fill_clouds_with_lwc_dz(\n temperature: np.ndarray, pressure: np.ndarray, is_liquid: np.ndarray\n) -> np.ndarray:\n \"\"\"Fills liquid clouds with lwc change rate at the cloud bases.\n\n Args:\n temperature: 2D temperature array (K).\n pressure: 2D pressure array (Pa).\n is_liquid: Boolean array indicating presence of liquid clouds.\n\n Returns:\n Liquid water content change rate (kg m-3 m-1), so that for each cloud the base\n value is filled for the whole cloud.\n\n \"\"\"\n lwc_dz = get_lwc_change_rate_at_bases(temperature, pressure, is_liquid)\n lwc_dz_filled = ma.zeros(lwc_dz.shape)\n lwc_dz_filled[is_liquid] = utils.ffill(lwc_dz[is_liquid])\n return lwc_dz_filled"}, {"class_start_lineno": 120, "class_end_lineno": 167, "func_start_lineno": 146, "func_end_lineno": 152, "func_code": " def _init_lwc_adiabatic(self) -> np.ndarray:\n \"\"\"Returns theoretical adiabatic lwc in liquid clouds (kg/m3).\"\"\"\n lwc_dz = atmos_utils.fill_clouds_with_lwc_dz(\n *self.lwc_source.atmosphere,\n self.is_liquid,\n )\n return atmos_utils.calc_adiabatic_lwc(lwc_dz, self.height)"}, {"class_start_lineno": 120, "class_end_lineno": 167, "func_start_lineno": 134, "func_end_lineno": 140, "func_code": " def __init__(self, lwc_source: LwcSource):\n self.lwc_source = lwc_source\n self.height = lwc_source.getvar(\"height\")\n self.is_liquid = self._get_liquid()\n self.lwc_adiabatic = self._init_lwc_adiabatic()\n self.lwc = self._adiabatic_lwc_to_lwc()\n self._mask_rain()"}], "type": ["function_empty", "Development"], "node": ["cloudnetpy.categorize.atmos_utils.get_lwc_change_rate_at_bases", "cloudnetpy.categorize.atmos_utils.fill_clouds_with_lwc_dz", "cloudnetpy.products.lwc.Lwc._init_lwc_adiabatic", "cloudnetpy.products.lwc.Lwc.__init__"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 37, "base_passed_num": 0}} {"id": ["cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_saturation_vapor_pressure", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_mixing_ratio", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_air_density", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_lwc_change_rate"], "project": "cloudnetpy", "origin_file": ["cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/categorize/atmos_utils.py"], "test_list": ["tests/unit/test_lwc.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 245, "func_end_lineno": 266, "func_code": "def calc_saturation_vapor_pressure(temperature: np.ndarray) -> np.ndarray:\n \"\"\"Goff-Gratch formula for saturation vapor pressure over water adopted by WMO.\n\n Args:\n temperature: Temperature (K).\n\n Returns:\n Saturation vapor pressure (Pa).\n\n \"\"\"\n ratio = con.T0 / temperature\n inv_ratio = ratio**-1\n return (\n 10\n ** (\n 10.79574 * (1 - ratio)\n - 5.028 * np.log10(inv_ratio)\n + 1.50475e-4 * (1 - (10 ** (-8.2969 * (inv_ratio - 1))))\n + 0.42873e-3 * (10 ** (4.76955 * (1 - ratio)) - 1)\n + 0.78614\n )\n ) * con.HPA_TO_PA"}, {"class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 269, "func_end_lineno": 280, "func_code": "def calc_mixing_ratio(vapor_pressure: np.ndarray, pressure: np.ndarray) -> np.ndarray:\n \"\"\"Calculates mixing ratio from partial vapor pressure and pressure.\n\n Args:\n vapor_pressure: Partial pressure of water vapor (Pa).\n pressure: Atmospheric pressure (Pa).\n\n Returns:\n Mixing ratio (kg kg-1).\n\n \"\"\"\n return con.MW_RATIO * vapor_pressure / (pressure - vapor_pressure)"}, {"class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 283, "func_end_lineno": 299, "func_code": "def calc_air_density(\n pressure: np.ndarray,\n temperature: np.ndarray,\n svp_mixing_ratio: np.ndarray,\n) -> np.ndarray:\n \"\"\"Calculates air density (kg m-3).\n\n Args:\n pressure: Pressure (Pa).\n temperature: Temperature (K).\n svp_mixing_ratio: Saturation vapor pressure mixing ratio (kg kg-1).\n\n Returns:\n Air density (kg m-3).\n\n \"\"\"\n return pressure / (con.RS * temperature * (0.6 * svp_mixing_ratio + 1))"}, {"class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 201, "func_end_lineno": 242, "func_code": "def calc_lwc_change_rate(temperature: np.ndarray, pressure: np.ndarray) -> np.ndarray:\n \"\"\"Returns rate of change of condensable water (LWC).\n\n Calculates the theoretical adiabatic rate of increase of LWC\n with height, given the cloud base temperature and pressure.\n\n Args:\n temperature: Temperature of cloud base (K).\n pressure: Pressure of cloud base (Pa).\n\n Returns:\n dlwc/dz (kg m-3 m-1)\n\n References:\n Brenguier, 1991, https://doi.org/10.1175/1520-0469(1991)048<0264:POTCPA>2.0.CO;2\n\n \"\"\"\n svp = calc_saturation_vapor_pressure(temperature)\n svp_mixing_ratio = calc_mixing_ratio(svp, pressure)\n air_density = calc_air_density(pressure, temperature, svp_mixing_ratio)\n\n e = 0.622\n Cp = 1004 # J kg-1 K-1\n Lv = 2.45e6 # J kg-1 = Pa m3 kg-1\n qs = svp_mixing_ratio # kg kg-1\n pa = air_density # kg m-3\n es = svp # Pa\n P = pressure # Pa\n T = temperature # K\n\n # See Appendix B in Brenguier (1991) for the derivation of the following equation\n dqs_dp = (\n -(1 - (Cp * T) / (e * Lv))\n * (((Cp * T) / (e * Lv)) + ((Lv * qs * pa) / (P - es))) ** -1\n * (e * es)\n * (P - es) ** -2\n )\n\n # Using hydrostatic equation to convert dqs_dp to dqs_dz\n dqs_dz = dqs_dp * air_density * -scipy.constants.g\n\n return dqs_dz * air_density"}], "type": ["function_empty", "Development"], "node": ["cloudnetpy.categorize.atmos_utils.calc_saturation_vapor_pressure", "cloudnetpy.categorize.atmos_utils.calc_mixing_ratio", "cloudnetpy.categorize.atmos_utils.calc_air_density", "cloudnetpy.categorize.atmos_utils.calc_lwc_change_rate"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 37, "base_passed_num": 35}} {"id": ["d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.metrics.evaluators.TDErrorEvaluator::__call__"], "project": "d3rlpy", "origin_file": ["d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py"], "test_list": ["tests_copy/metrics/test_evaluators.py"], "prob_info": [{"class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"}, {"class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"}, {"class_start_lineno": 71, "class_end_lineno": 121, "func_start_lineno": 93, "func_end_lineno": 121, "func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_errors = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n # estimate values for current observations\n values = algo.predict_value(batch.observations, batch.actions)\n\n # estimate values for next observations\n next_actions = algo.predict(batch.next_observations)\n next_values = algo.predict_value(\n batch.next_observations, next_actions\n )\n\n # calculate td errors\n mask = (1.0 - batch.terminals).reshape(-1)\n rewards = np.asarray(batch.rewards).reshape(-1)\n if algo.reward_scaler:\n rewards = algo.reward_scaler.transform_numpy(rewards)\n y = rewards + algo.gamma * next_values * mask\n total_errors += ((values - y) ** 2).tolist()\n\n return float(np.mean(total_errors))"}], "type": ["Development"], "node": ["d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.TDErrorEvaluator.__call__"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 0}} {"id": ["d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.metrics.evaluators.DiscountedSumOfAdvantageEvaluator::__call__"], "project": "d3rlpy", "origin_file": ["d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py"], "test_list": ["tests_copy/metrics/test_evaluators.py"], "prob_info": [{"class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"}, {"class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"}, {"class_start_lineno": 124, "class_end_lineno": 188, "func_start_lineno": 154, "func_end_lineno": 188, "func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_sums = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n # estimate values for dataset actions\n dataset_values = algo.predict_value(\n batch.observations, batch.actions\n )\n\n # estimate values for the current policy\n actions = algo.predict(batch.observations)\n on_policy_values = algo.predict_value(\n batch.observations, actions\n )\n\n # calculate advantages\n advantages = (dataset_values - on_policy_values).tolist()\n\n # calculate discounted sum of advantages\n A = advantages[-1]\n sum_advantages = [A]\n for advantage in reversed(advantages[:-1]):\n A = advantage + algo.gamma * A\n sum_advantages.append(A)\n\n total_sums += sum_advantages\n # smaller is better\n return float(np.mean(total_sums))"}], "type": ["Development"], "node": ["d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.DiscountedSumOfAdvantageEvaluator.__call__"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 0}} {"id": ["d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.metrics.evaluators.AverageValueEstimationEvaluator::__call__"], "project": "d3rlpy", "origin_file": ["d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py"], "test_list": ["tests_copy/metrics/test_evaluators.py"], "prob_info": [{"class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"}, {"class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"}, {"class_start_lineno": 191, "class_end_lineno": 226, "func_start_lineno": 212, "func_end_lineno": 226, "func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_values = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n actions = algo.predict(batch.observations)\n values = algo.predict_value(batch.observations, actions)\n total_values += values.tolist()\n return float(np.mean(total_values))"}], "type": ["Development"], "node": ["d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.AverageValueEstimationEvaluator.__call__"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 0}} {"id": ["d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.metrics.evaluators.ContinuousActionDiffEvaluator::__call__"], "project": "d3rlpy", "origin_file": ["d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py"], "test_list": ["tests_copy/metrics/test_evaluators.py"], "prob_info": [{"class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"}, {"class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"}, {"class_start_lineno": 330, "class_end_lineno": 366, "func_start_lineno": 352, "func_end_lineno": 366, "func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_diffs = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n actions = algo.predict(batch.observations)\n diff = ((batch.actions - actions) ** 2).sum(axis=1).tolist()\n total_diffs += diff\n return float(np.mean(total_diffs))"}], "type": ["Development"], "node": ["d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.ContinuousActionDiffEvaluator.__call__"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 0}} {"id": ["d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.metrics.evaluators.CompareContinuousActionDiffEvaluator::__call__"], "project": "d3rlpy", "origin_file": ["d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py"], "test_list": ["tests_copy/metrics/test_evaluators.py"], "prob_info": [{"class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"}, {"class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"}, {"class_start_lineno": 409, "class_end_lineno": 455, "func_start_lineno": 439, "func_end_lineno": 455, "func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_diffs = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n # TODO: handle different n_frames\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n base_actions = self._base_algo.predict(batch.observations)\n actions = algo.predict(batch.observations)\n diff = ((actions - base_actions) ** 2).sum(axis=1).tolist()\n total_diffs += diff\n return float(np.mean(total_diffs))"}], "type": ["Development"], "node": ["d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.CompareContinuousActionDiffEvaluator.__call__"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 0}} {"id": ["d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.metrics.evaluators.CompareDiscreteActionMatchEvaluator::__call__"], "project": "d3rlpy", "origin_file": ["d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py"], "test_list": ["tests_copy/metrics/test_evaluators.py"], "prob_info": [{"class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )"}, {"class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch"}, {"class_start_lineno": 458, "class_end_lineno": 503, "func_start_lineno": 489, "func_end_lineno": 503, "func_code": " def __call__(\n self, algo: QLearningAlgoProtocol, dataset: ReplayBufferBase\n ) -> float:\n total_matches = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n # TODO: handle different n_frames\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n base_actions = self._base_algo.predict(batch.observations)\n actions = algo.predict(batch.observations)\n match = (base_actions == actions).tolist()\n total_matches += match\n return float(np.mean(total_matches))"}], "type": ["Development"], "node": ["d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.CompareDiscreteActionMatchEvaluator.__call__"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 0}} {"id": ["d3rlpy.d3rlpy.models.torch.q_functions.mean_q_function.DiscreteMeanQFunctionForwarder::compute_error", "d3rlpy.d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunctionForwarder::compute_error", "d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function.compute_ensemble_q_function_error"], "project": "d3rlpy", "origin_file": ["d3rlpy/models/torch/q_functions/mean_q_function.py", "d3rlpy/models/torch/q_functions/iqn_q_function.py", "d3rlpy/models/torch/q_functions/ensemble_q_function.py", "d3rlpy/models/torch/q_functions/ensemble_q_function.py", "d3rlpy/models/torch/q_functions/ensemble_q_function.py"], "test_list": ["tests_copy/models/torch/q_functions/test_ensemble_q_function.py", "tests_copy/models/torch/q_functions/test_ensemble_q_function.py"], "prob_info": [{"class_start_lineno": 47, "class_end_lineno": 86, "func_start_lineno": 58, "func_end_lineno": 74, "func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n one_hot = F.one_hot(actions.view(-1), num_classes=self._action_size)\n value = (self._q_func(observations).q_value * one_hot.float()).sum(\n dim=1, keepdim=True\n )\n y = rewards + gamma * target * (1 - terminals)\n loss = compute_huber_loss(value, y)\n return compute_reduce(loss, reduction)"}, {"class_start_lineno": 122, "class_end_lineno": 174, "func_start_lineno": 133, "func_end_lineno": 162, "func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n batch_size = get_batch_size(observations)\n assert target.shape == (batch_size, self._n_quantiles)\n\n # extraect quantiles corresponding to act_t\n output = self._q_func(observations)\n taus = output.taus\n all_quantiles = output.quantiles\n assert taus is not None and all_quantiles is not None\n quantiles = pick_quantile_value_by_action(all_quantiles, actions)\n\n loss = compute_quantile_loss(\n quantiles=quantiles,\n rewards=rewards,\n target=target,\n terminals=terminals,\n taus=taus,\n gamma=gamma,\n )\n\n return compute_reduce(loss, reduction)"}, {"class_start_lineno": 1, "class_end_lineno": 367, "func_start_lineno": 77, "func_end_lineno": 109, "func_code": "def compute_ensemble_q_function_error(\n forwarders: Union[\n Sequence[DiscreteQFunctionForwarder],\n Sequence[ContinuousQFunctionForwarder],\n ],\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n assert target.ndim == 2\n td_sum = torch.tensor(\n 0.0,\n dtype=torch.float32,\n device=get_device(observations),\n )\n for forwarder in forwarders:\n loss = forwarder.compute_error(\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n reduction=\"none\",\n )\n if masks is not None:\n loss = loss * masks\n td_sum += loss.mean()\n return td_sum"}, {"class_start_lineno": 150, "class_end_lineno": 218, "func_start_lineno": 179, "func_end_lineno": 198, "func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n return compute_ensemble_q_function_error(\n forwarders=self._forwarders,\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n masks=masks,\n )"}, {"class_start_lineno": 221, "class_end_lineno": 291, "func_start_lineno": 252, "func_end_lineno": 271, "func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n return compute_ensemble_q_function_error(\n forwarders=self._forwarders,\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n masks=masks,\n )"}], "type": ["Development"], "node": ["d3rlpy.models.torch.q_functions.mean_q_function.DiscreteMeanQFunctionForwarder.compute_error", "d3rlpy.models.torch.q_functions.iqn_q_function.DiscreteIQNQFunctionForwarder.compute_error", "d3rlpy.models.torch.q_functions.ensemble_q_function.compute_ensemble_q_function_error", "d3rlpy.models.torch.q_functions.ensemble_q_function.DiscreteEnsembleQFunctionForwarder.compute_error", "d3rlpy.models.torch.q_functions.ensemble_q_function.ContinuousEnsembleQFunctionForwarder.compute_error"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 60, "base_passed_num": 20}} {"id": ["datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.signal_schema.SignalSchema::get_column_type"], "project": "datachain", "origin_file": ["datachain/lib/convert/python_to_sql.py", "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py", "datachain/func/func.py", "datachain/func/func.py"], "test_list": ["tests/unit/test_func.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 464, "func_end_lineno": 479, "func_code": " def get_column_type(self, col_name: str, with_subtree: bool = False) -> DataType:\n \"\"\"\n Returns column type by column name.\n\n If `with_subtree` is True, then it will return the type of the column\n even if it has a subtree (e.g. model with nested fields), otherwise it will\n return the type of the column (standard type field, not the model).\n\n If column is not found, raises `SignalResolvingError`.\n \"\"\"\n for path, _type, has_subtree, _ in self.get_flat_tree():\n if (with_subtree or not has_subtree) and DEFAULT_DELIMITER.join(\n path\n ) == col_name:\n return _type\n raise SignalResolvingError([col_name], \"is not found\")"}, {"class_start_lineno": 1, "class_end_lineno": 449, "func_start_lineno": 425, "func_end_lineno": 438, "func_code": "def get_db_col_type(signals_schema: \"SignalSchema\", col: ColT) -> \"DataType\":\n if isinstance(col, tuple):\n # we can only get tuple from case statement where the first tuple item\n # is condition, and second one is value which type is important\n col = col[1]\n if isinstance(col, Func):\n return col.get_result_type(signals_schema)\n\n if isinstance(col, ColumnElement) and not hasattr(col, \"name\"):\n return sql_to_python(col)\n\n return signals_schema.get_column_type(\n col.name if isinstance(col, ColumnElement) else col # type: ignore[arg-type]\n )"}, {"class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 375, "func_end_lineno": 422, "func_code": " def get_column(\n self,\n signals_schema: Optional[\"SignalSchema\"] = None,\n label: Optional[str] = None,\n table: Optional[\"TableClause\"] = None,\n ) -> Column:\n col_type = self.get_result_type(signals_schema)\n sql_type = python_to_sql(col_type)\n\n def get_col(col: ColT, string_as_literal=False) -> ColT:\n # string_as_literal is used only for conditionals like `case()` where\n # literals are nested inside ColT as we have tuples of condition - values\n # and if user wants to set some case value as column, explicit `C(\"col\")`\n # syntax must be used to distinguish from literals\n if isinstance(col, tuple):\n return tuple(get_col(x, string_as_literal=True) for x in col)\n if isinstance(col, Func):\n return col.get_column(signals_schema, table=table)\n if isinstance(col, str) and not string_as_literal:\n column = Column(col, sql_type)\n column.table = table\n return column\n return col\n\n cols = [get_col(col) for col in self._db_cols]\n kwargs = {k: get_col(v, string_as_literal=True) for k, v in self.kwargs.items()}\n func_col = self.inner(*cols, *self.args, **kwargs)\n\n if self.is_window:\n if not self.window:\n raise DataChainParamsError(\n f\"Window function {self} requires over() clause with a window spec\",\n )\n func_col = func_col.over(\n partition_by=self.window.partition_by,\n order_by=(\n desc(self.window.order_by)\n if self.window.desc\n else self.window.order_by\n ),\n )\n\n func_col.type = sql_type() if inspect.isclass(sql_type) else sql_type\n\n if col_name := self.get_col_name(label):\n func_col = func_col.label(col_name)\n\n return func_col"}], "type": ["Development"], "node": ["datachain.lib.convert.python_to_sql.python_to_sql", "datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.signal_schema.SignalSchema.get_column_type", "datachain.func.func.get_db_col_type", "datachain.func.func.Func.get_column"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 94, "base_passed_num": 39}} {"id": ["datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.lib.signal_schema.SignalSchema::get_column_type", "datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.dc.DataChain::mutate"], "project": "datachain", "origin_file": ["datachain/lib/convert/python_to_sql.py", "datachain/lib/signal_schema.py", "datachain/func/func.py", "datachain/func/func.py", "datachain/lib/signal_schema.py", "datachain/lib/dc.py"], "test_list": ["tests/unit/test_func.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 464, "func_end_lineno": 479, "func_code": " def get_column_type(self, col_name: str, with_subtree: bool = False) -> DataType:\n \"\"\"\n Returns column type by column name.\n\n If `with_subtree` is True, then it will return the type of the column\n even if it has a subtree (e.g. model with nested fields), otherwise it will\n return the type of the column (standard type field, not the model).\n\n If column is not found, raises `SignalResolvingError`.\n \"\"\"\n for path, _type, has_subtree, _ in self.get_flat_tree():\n if (with_subtree or not has_subtree) and DEFAULT_DELIMITER.join(\n path\n ) == col_name:\n return _type\n raise SignalResolvingError([col_name], \"is not found\")"}, {"class_start_lineno": 1, "class_end_lineno": 449, "func_start_lineno": 425, "func_end_lineno": 438, "func_code": "def get_db_col_type(signals_schema: \"SignalSchema\", col: ColT) -> \"DataType\":\n if isinstance(col, tuple):\n # we can only get tuple from case statement where the first tuple item\n # is condition, and second one is value which type is important\n col = col[1]\n if isinstance(col, Func):\n return col.get_result_type(signals_schema)\n\n if isinstance(col, ColumnElement) and not hasattr(col, \"name\"):\n return sql_to_python(col)\n\n return signals_schema.get_column_type(\n col.name if isinstance(col, ColumnElement) else col # type: ignore[arg-type]\n )"}, {"class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 375, "func_end_lineno": 422, "func_code": " def get_column(\n self,\n signals_schema: Optional[\"SignalSchema\"] = None,\n label: Optional[str] = None,\n table: Optional[\"TableClause\"] = None,\n ) -> Column:\n col_type = self.get_result_type(signals_schema)\n sql_type = python_to_sql(col_type)\n\n def get_col(col: ColT, string_as_literal=False) -> ColT:\n # string_as_literal is used only for conditionals like `case()` where\n # literals are nested inside ColT as we have tuples of condition - values\n # and if user wants to set some case value as column, explicit `C(\"col\")`\n # syntax must be used to distinguish from literals\n if isinstance(col, tuple):\n return tuple(get_col(x, string_as_literal=True) for x in col)\n if isinstance(col, Func):\n return col.get_column(signals_schema, table=table)\n if isinstance(col, str) and not string_as_literal:\n column = Column(col, sql_type)\n column.table = table\n return column\n return col\n\n cols = [get_col(col) for col in self._db_cols]\n kwargs = {k: get_col(v, string_as_literal=True) for k, v in self.kwargs.items()}\n func_col = self.inner(*cols, *self.args, **kwargs)\n\n if self.is_window:\n if not self.window:\n raise DataChainParamsError(\n f\"Window function {self} requires over() clause with a window spec\",\n )\n func_col = func_col.over(\n partition_by=self.window.partition_by,\n order_by=(\n desc(self.window.order_by)\n if self.window.desc\n else self.window.order_by\n ),\n )\n\n func_col.type = sql_type() if inspect.isclass(sql_type) else sql_type\n\n if col_name := self.get_col_name(label):\n func_col = func_col.label(col_name)\n\n return func_col"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)"}, {"class_start_lineno": 174, "class_end_lineno": 2625, "func_start_lineno": 1136, "func_end_lineno": 1215, "func_code": " def mutate(self, **kwargs) -> \"Self\":\n \"\"\"Create new signals based on existing signals.\n\n This method cannot modify existing columns. If you need to modify an\n existing column, use a different name for the new column and then use\n `select()` to choose which columns to keep.\n\n This method is vectorized and more efficient compared to map(), and it does not\n extract or download any data from the internal database. However, it can only\n utilize predefined built-in functions and their combinations.\n\n The supported functions:\n Numerical: +, -, *, /, rand(), avg(), count(), func(),\n greatest(), least(), max(), min(), sum()\n String: length(), split(), replace(), regexp_replace()\n Filename: name(), parent(), file_stem(), file_ext()\n Array: length(), sip_hash_64(), euclidean_distance(),\n cosine_distance()\n Window: row_number(), rank(), dense_rank(), first()\n\n Example:\n ```py\n dc.mutate(\n area=Column(\"image.height\") * Column(\"image.width\"),\n extension=file_ext(Column(\"file.name\")),\n dist=cosine_distance(embedding_text, embedding_image)\n )\n ```\n\n Window function example:\n ```py\n window = func.window(partition_by=\"file.parent\", order_by=\"file.size\")\n dc.mutate(\n row_number=func.row_number().over(window),\n )\n ```\n\n This method can be also used to rename signals. If the Column(\"name\") provided\n as value for the new signal - the old column will be dropped. Otherwise a new\n column is created.\n\n Example:\n ```py\n dc.mutate(\n newkey=Column(\"oldkey\")\n )\n ```\n \"\"\"\n primitives = (bool, str, int, float)\n\n for col_name, expr in kwargs.items():\n if not isinstance(expr, (*primitives, Column, Func)) and isinstance(\n expr.type, NullType\n ):\n raise DataChainColumnError(\n col_name, f\"Cannot infer type with expression {expr}\"\n )\n\n mutated = {}\n schema = self.signals_schema\n for name, value in kwargs.items():\n if isinstance(value, Column):\n # renaming existing column\n for signal in schema.db_signals(name=value.name, as_columns=True):\n mutated[signal.name.replace(value.name, name, 1)] = signal # type: ignore[union-attr]\n elif isinstance(value, Func):\n # adding new signal\n mutated[name] = value.get_column(schema)\n elif isinstance(value, primitives):\n # adding simple python constant primitives like str, int, float, bool\n val = literal(value)\n val.type = python_to_sql(type(value))()\n mutated[name] = val # type: ignore[assignment]\n else:\n # adding new signal\n mutated[name] = value\n\n return self._evolve(\n query=self._query.mutate(**mutated), signal_schema=schema.mutate(kwargs)\n )"}], "type": ["function_empty", "Development"], "node": ["datachain.lib.convert.python_to_sql.python_to_sql", "datachain.lib.signal_schema.SignalSchema.get_column_type", "datachain.func.func.get_db_col_type", "datachain.func.func.Func.get_column", "datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.dc.DataChain.mutate"], "language": "Python", "toolfunc_count": 1, "func_count": 4, "pytest_info": {"total_num": 94, "base_passed_num": 39}} {"id": ["datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.signal_schema.SignalSchema::get_column_type", "datachain.src.datachain.lib.signal_schema.SignalSchema::mutate"], "project": "datachain", "origin_file": ["datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py", "datachain/func/func.py", "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py"], "test_list": ["tests/unit/test_func.py", "tests/unit/lib/test_signal_schema.py"], "prob_info": [{"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 464, "func_end_lineno": 479, "func_code": " def get_column_type(self, col_name: str, with_subtree: bool = False) -> DataType:\n \"\"\"\n Returns column type by column name.\n\n If `with_subtree` is True, then it will return the type of the column\n even if it has a subtree (e.g. model with nested fields), otherwise it will\n return the type of the column (standard type field, not the model).\n\n If column is not found, raises `SignalResolvingError`.\n \"\"\"\n for path, _type, has_subtree, _ in self.get_flat_tree():\n if (with_subtree or not has_subtree) and DEFAULT_DELIMITER.join(\n path\n ) == col_name:\n return _type\n raise SignalResolvingError([col_name], \"is not found\")"}, {"class_start_lineno": 1, "class_end_lineno": 449, "func_start_lineno": 425, "func_end_lineno": 438, "func_code": "def get_db_col_type(signals_schema: \"SignalSchema\", col: ColT) -> \"DataType\":\n if isinstance(col, tuple):\n # we can only get tuple from case statement where the first tuple item\n # is condition, and second one is value which type is important\n col = col[1]\n if isinstance(col, Func):\n return col.get_result_type(signals_schema)\n\n if isinstance(col, ColumnElement) and not hasattr(col, \"name\"):\n return sql_to_python(col)\n\n return signals_schema.get_column_type(\n col.name if isinstance(col, ColumnElement) else col # type: ignore[arg-type]\n )"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 557, "func_end_lineno": 585, "func_code": " def mutate(self, args_map: dict) -> \"SignalSchema\":\n new_values = self.values.copy()\n\n for name, value in args_map.items():\n if isinstance(value, Column) and value.name in self.values:\n # renaming existing signal\n del new_values[value.name]\n new_values[name] = self.values[value.name]\n continue\n if isinstance(value, Column):\n # adding new signal from existing signal field\n try:\n new_values[name] = self.get_column_type(\n value.name, with_subtree=True\n )\n continue\n except SignalResolvingError:\n pass\n if isinstance(value, Func):\n # adding new signal with function\n new_values[name] = value.get_result_type(self)\n continue\n if isinstance(value, ColumnElement):\n # adding new signal\n new_values[name] = sql_to_python(value)\n continue\n new_values[name] = value\n\n return SignalSchema(new_values)"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 627, "func_end_lineno": 628, "func_code": " def get_flat_tree(self) -> Iterator[tuple[list[str], DataType, bool, int]]:\n yield from self._get_flat_tree(self.tree, [], 0)"}], "type": ["Development"], "node": ["datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.signal_schema.SignalSchema.get_column_type", "datachain.func.func.get_db_col_type", "datachain.lib.signal_schema.SignalSchema.mutate", "datachain.lib.signal_schema.SignalSchema.get_flat_tree"], "language": "Python", "toolfunc_count": 0, "func_count": 3, "pytest_info": {"total_num": 152, "base_passed_num": 84}} {"id": ["datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.lib.signal_schema.SignalSchema::db_signals"], "project": "datachain", "origin_file": ["datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py", "datachain/lib/convert/python_to_sql.py", "datachain/lib/signal_schema.py"], "test_list": ["tests/unit/lib/test_arrow.py", "tests/unit/lib/test_signal_schema.py"], "prob_info": [{"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 627, "func_end_lineno": 628, "func_code": " def get_flat_tree(self) -> Iterator[tuple[list[str], DataType, bool, int]]:\n yield from self._get_flat_tree(self.tree, [], 0)"}, {"class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")"}, {"class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 481, "func_end_lineno": 503, "func_code": " def db_signals(\n self, name: Optional[str] = None, as_columns=False\n ) -> Union[list[str], list[Column]]:\n \"\"\"\n Returns DB columns as strings or Column objects with proper types\n Optionally, it can filter results by specific object, returning only his signals\n \"\"\"\n signals = [\n DEFAULT_DELIMITER.join(path)\n if not as_columns\n else Column(DEFAULT_DELIMITER.join(path), python_to_sql(_type))\n for path, _type, has_subtree, _ in self.get_flat_tree()\n if not has_subtree\n ]\n\n if name:\n signals = [\n s\n for s in signals\n if str(s) == name or str(s).startswith(f\"{name}{DEFAULT_DELIMITER}\")\n ]\n\n return signals # type: ignore[return-value]"}], "type": ["function_empty", "Development"], "node": ["datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.signal_schema.SignalSchema.get_flat_tree", "datachain.lib.convert.python_to_sql.python_to_sql", "datachain.lib.signal_schema.SignalSchema.db_signals"], "language": "Python", "toolfunc_count": 1, "func_count": 3, "pytest_info": {"total_num": 90, "base_passed_num": 78}} {"id": ["haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.dataclasses.chat_message.ChatMessage::to_dict", "haystack.haystack.components.builders.chat_prompt_builder.ChatPromptBuilder::to_dict"], "project": "haystack", "origin_file": ["haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/components/builders/chat_prompt_builder.py"], "test_list": ["test/components/builders/test_chat_prompt_builder.py"], "prob_info": [{"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 153, "func_end_lineno": 157, "func_code": " def meta(self) -> Dict[str, Any]:\n \"\"\"\n Returns the metadata associated with the message.\n \"\"\"\n return self._meta"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 293, "func_end_lineno": 316, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Converts ChatMessage into a dictionary.\n\n :returns:\n Serialized version of the object.\n \"\"\"\n serialized: Dict[str, Any] = {}\n serialized[\"_role\"] = self._role.value\n serialized[\"_meta\"] = self._meta\n serialized[\"_name\"] = self._name\n content: List[Dict[str, Any]] = []\n for part in self._content:\n if isinstance(part, TextContent):\n content.append({\"text\": part.text})\n elif isinstance(part, ToolCall):\n content.append({\"tool_call\": asdict(part)})\n elif isinstance(part, ToolCallResult):\n content.append({\"tool_call_result\": asdict(part)})\n else:\n raise TypeError(f\"Unsupported type in ChatMessage content: `{type(part).__name__}` for `{part}`.\")\n\n serialized[\"_content\"] = content\n return serialized"}, {"class_start_lineno": 18, "class_end_lineno": 272, "func_start_lineno": 240, "func_end_lineno": 254, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Returns a dictionary representation of the component.\n\n :returns:\n Serialized dictionary representation of the component.\n \"\"\"\n if self.template is not None:\n template = [m.to_dict() for m in self.template]\n else:\n template = None\n\n return default_to_dict(\n self, template=template, variables=self._variables, required_variables=self._required_variables\n )"}], "type": ["function_empty", "Development"], "node": ["haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.dataclasses.chat_message.ChatMessage.meta", "haystack.dataclasses.chat_message.ChatMessage.to_dict", "haystack.components.builders.chat_prompt_builder.ChatPromptBuilder.to_dict"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 35, "base_passed_num": 7}} {"id": ["haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict", "haystack.haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever::to_dict", "haystack.haystack.components.classifiers.zero_shot_document_classifier.TransformersZeroShotDocumentClassifier::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/core/serialization.py", "haystack/document_stores/in_memory/document_store.py", "haystack/components/retrievers/in_memory/bm25_retriever.py", "haystack/components/classifiers/zero_shot_document_classifier.py", "haystack/core/serialization.py"], "test_list": ["test/components/classifiers/test_zero_shot_document_classifier.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"}, {"class_start_lineno": 58, "class_end_lineno": 738, "func_start_lineno": 344, "func_end_lineno": 358, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )"}, {"class_start_lineno": 13, "class_end_lineno": 203, "func_start_lineno": 88, "func_end_lineno": 103, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self,\n document_store=docstore,\n filters=self.filters,\n top_k=self.top_k,\n scale_score=self.scale_score,\n filter_policy=self.filter_policy.value,\n )"}, {"class_start_lineno": 20, "class_end_lineno": 245, "func_start_lineno": 152, "func_end_lineno": 171, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n labels=self.labels,\n model=self.huggingface_pipeline_kwargs[\"model\"],\n huggingface_pipeline_kwargs=self.huggingface_pipeline_kwargs,\n token=self.token.to_dict() if self.token else None,\n )\n\n huggingface_pipeline_kwargs = serialization_dict[\"init_parameters\"][\"huggingface_pipeline_kwargs\"]\n huggingface_pipeline_kwargs.pop(\"token\", None)\n\n serialize_hf_model_kwargs(huggingface_pipeline_kwargs)\n return serialization_dict"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.core.serialization.default_to_dict", "haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict", "haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever.to_dict", "haystack.components.classifiers.zero_shot_document_classifier.TransformersZeroShotDocumentClassifier.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 10, "base_passed_num": 8}} {"id": ["haystack.haystack.components.converters.utils.normalize_metadata", "haystack.haystack.components.converters.utils.get_bytestream_from_source", "haystack.haystack.components.converters.json.JSONConverter::run"], "project": "haystack", "origin_file": ["haystack/components/converters/utils.py", "haystack/components/converters/utils.py", "haystack/components/converters/json.py"], "test_list": ["test/components/converters/test_json.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 51, "func_start_lineno": 30, "func_end_lineno": 51, "func_code": "def normalize_metadata(\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]], sources_count: int\n) -> List[Dict[str, Any]]:\n \"\"\"\n Normalize the metadata input for a converter.\n\n Given all the possible value of the meta input for a converter (None, dictionary or list of dicts),\n makes sure to return a list of dictionaries of the correct length for the converter to use.\n\n :param meta: the meta input of the converter, as-is\n :param sources_count: the number of sources the converter received\n :returns: a list of dictionaries of the make length as the sources list\n \"\"\"\n if meta is None:\n return [{}] * sources_count\n if isinstance(meta, dict):\n return [meta] * sources_count\n if isinstance(meta, list):\n if sources_count != len(meta):\n raise ValueError(\"The length of the metadata list must match the number of sources.\")\n return meta\n raise ValueError(\"meta must be either None, a dictionary or a list of dictionaries.\")"}, {"class_start_lineno": 1, "class_end_lineno": 51, "func_start_lineno": 11, "func_end_lineno": 27, "func_code": "def get_bytestream_from_source(source: Union[str, Path, ByteStream]) -> ByteStream:\n \"\"\"\n Creates a ByteStream object from a source.\n\n :param source:\n A source to convert to a ByteStream. Can be a string (path to a file), a Path object, or a ByteStream.\n :return:\n A ByteStream object.\n \"\"\"\n\n if isinstance(source, ByteStream):\n return source\n if isinstance(source, (str, Path)):\n bs = ByteStream.from_file_path(Path(source))\n bs.meta[\"file_path\"] = str(source)\n return bs\n raise ValueError(f\"Unsupported source type {type(source)}\")"}, {"class_start_lineno": 22, "class_end_lineno": 291, "func_start_lineno": 250, "func_end_lineno": 291, "func_code": " def run(\n self,\n sources: List[Union[str, Path, ByteStream]],\n meta: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,\n ):\n \"\"\"\n Converts a list of JSON files to documents.\n\n :param sources:\n A list of file paths or ByteStream objects.\n :param meta:\n Optional metadata to attach to the documents.\n This value can be either a list of dictionaries or a single dictionary.\n If it's a single dictionary, its content is added to the metadata of all produced documents.\n If it's a list, the length of the list must match the number of sources.\n If `sources` contain ByteStream objects, their `meta` will be added to the output documents.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of created documents.\n \"\"\"\n documents = []\n meta_list = normalize_metadata(meta=meta, sources_count=len(sources))\n\n for source, metadata in zip(sources, meta_list):\n try:\n bytestream = get_bytestream_from_source(source)\n except Exception as exc:\n logger.warning(\"Could not read {source}. Skipping it. Error: {error}\", source=source, error=exc)\n continue\n\n data = self._get_content_and_meta(bytestream)\n\n for text, extra_meta in data:\n merged_metadata = {**bytestream.meta, **metadata, **extra_meta}\n\n if not self._store_full_path and (file_path := bytestream.meta.get(\"file_path\")):\n merged_metadata[\"file_path\"] = os.path.basename(file_path)\n document = Document(content=text, meta=merged_metadata)\n documents.append(document)\n\n return {\"documents\": documents}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.converters.utils.normalize_metadata", "haystack.components.converters.utils.get_bytestream_from_source", "haystack.components.converters.json.JSONConverter.run"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 6}} {"id": ["haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_property_attributes", "haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_parse_endpoint_spec", "haystack.haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions::_openapi_to_functions"], "project": "haystack", "origin_file": ["haystack/components/converters/openapi_functions.py", "haystack/components/converters/openapi_functions.py", "haystack/components/converters/openapi_functions.py"], "test_list": ["test/components/converters/test_openapi_functions.py"], "prob_info": [{"class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 193, "func_end_lineno": 230, "func_code": " def _parse_property_attributes(\n self, property_schema: Dict[str, Any], include_attributes: Optional[List[str]] = None\n ) -> Dict[str, Any]:\n \"\"\"\n Parses the attributes of a property schema.\n\n Recursively parses the attributes of a property schema, including nested objects and arrays,\n and includes specified attributes like description, pattern, etc.\n\n :param property_schema: The schema of the property to parse.\n :param include_attributes: The list of attributes to include in the parsed schema.\n :return: The parsed schema of the property including the specified attributes.\n \"\"\"\n include_attributes = include_attributes or [\"description\", \"pattern\", \"enum\"]\n\n schema_type = property_schema.get(\"type\")\n\n parsed_schema = {\"type\": schema_type} if schema_type else {}\n for attr in include_attributes:\n if attr in property_schema:\n parsed_schema[attr] = property_schema[attr]\n\n if schema_type == \"object\":\n properties = property_schema.get(\"properties\", {})\n parsed_properties = {\n prop_name: self._parse_property_attributes(prop, include_attributes)\n for prop_name, prop in properties.items()\n }\n parsed_schema[\"properties\"] = parsed_properties\n\n if \"required\" in property_schema:\n parsed_schema[\"required\"] = property_schema[\"required\"]\n\n elif schema_type == \"array\":\n items = property_schema.get(\"items\", {})\n parsed_schema[\"items\"] = self._parse_property_attributes(items, include_attributes)\n\n return parsed_schema"}, {"class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 153, "func_end_lineno": 191, "func_code": " def _parse_endpoint_spec(self, resolved_spec: Dict[str, Any]) -> Optional[Dict[str, Any]]:\n if not isinstance(resolved_spec, dict):\n logger.warning(\"Invalid OpenAPI spec format provided. Could not extract function.\")\n return {}\n\n function_name = resolved_spec.get(\"operationId\")\n description = resolved_spec.get(\"description\") or resolved_spec.get(\"summary\", \"\")\n\n schema: Dict[str, Any] = {\"type\": \"object\", \"properties\": {}}\n\n # requestBody section\n req_body_schema = (\n resolved_spec.get(\"requestBody\", {}).get(\"content\", {}).get(\"application/json\", {}).get(\"schema\", {})\n )\n if \"properties\" in req_body_schema:\n for prop_name, prop_schema in req_body_schema[\"properties\"].items():\n schema[\"properties\"][prop_name] = self._parse_property_attributes(prop_schema)\n\n if \"required\" in req_body_schema:\n schema.setdefault(\"required\", []).extend(req_body_schema[\"required\"])\n\n # parameters section\n for param in resolved_spec.get(\"parameters\", []):\n if \"schema\" in param:\n schema_dict = self._parse_property_attributes(param[\"schema\"])\n # these attributes are not in param[schema] level but on param level\n useful_attributes = [\"description\", \"pattern\", \"enum\"]\n schema_dict.update({key: param[key] for key in useful_attributes if param.get(key)})\n schema[\"properties\"][param[\"name\"]] = schema_dict\n if param.get(\"required\", False):\n schema.setdefault(\"required\", []).append(param[\"name\"])\n\n if function_name and description and schema[\"properties\"]:\n return {\"name\": function_name, \"description\": description, \"parameters\": schema}\n else:\n logger.warning(\n \"Invalid OpenAPI spec format provided. Could not extract function from {spec}\", spec=resolved_spec\n )\n return {}"}, {"class_start_lineno": 23, "class_end_lineno": 257, "func_start_lineno": 117, "func_end_lineno": 151, "func_code": " def _openapi_to_functions(self, service_openapi_spec: Dict[str, Any]) -> List[Dict[str, Any]]:\n \"\"\"\n OpenAPI to OpenAI function conversion.\n\n Extracts functions from the OpenAPI specification of the service and converts them into a format\n suitable for OpenAI function calling.\n\n :param service_openapi_spec: The OpenAPI specification from which functions are to be extracted.\n :type service_openapi_spec: Dict[str, Any]\n :return: A list of dictionaries, each representing a function. Each dictionary includes the function's\n name, description, and a schema of its parameters.\n :rtype: List[Dict[str, Any]]\n \"\"\"\n\n # Doesn't enforce rigid spec validation because that would require a lot of dependencies\n # We check the version and require minimal fields to be present, so we can extract functions\n spec_version = service_openapi_spec.get(\"openapi\")\n if not spec_version:\n raise ValueError(f\"Invalid OpenAPI spec provided. Could not extract version from {service_openapi_spec}\")\n service_openapi_spec_version = int(spec_version.split(\".\")[0])\n\n # Compare the versions\n if service_openapi_spec_version < OpenAPIServiceToFunctions.MIN_REQUIRED_OPENAPI_SPEC_VERSION:\n raise ValueError(\n f\"Invalid OpenAPI spec version {service_openapi_spec_version}. Must be \"\n f\"at least {OpenAPIServiceToFunctions.MIN_REQUIRED_OPENAPI_SPEC_VERSION}.\"\n )\n\n functions: List[Dict[str, Any]] = []\n for paths in service_openapi_spec[\"paths\"].values():\n for path_spec in paths.values():\n function_dict = self._parse_endpoint_spec(path_spec)\n if function_dict:\n functions.append(function_dict)\n return functions"}], "type": ["function_empty", "Development"], "node": ["haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_property_attributes", "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._parse_endpoint_spec", "haystack.components.converters.openapi_functions.OpenAPIServiceToFunctions._openapi_to_functions"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 8, "base_passed_num": 4}} {"id": ["haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.converters.output_adapter.OutputAdapter::to_dict", "haystack.haystack.utils.type_serialization.serialize_type", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/utils/callable_serialization.py", "haystack/components/converters/output_adapter.py", "haystack/utils/type_serialization.py", "haystack/core/serialization.py"], "test_list": ["test/components/converters/test_output_adapter.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"}, {"class_start_lineno": 25, "class_end_lineno": 184, "func_start_lineno": 139, "func_end_lineno": 153, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n se_filters = {name: serialize_callable(filter_func) for name, filter_func in self.custom_filters.items()}\n return default_to_dict(\n self,\n template=self.template,\n output_type=serialize_type(self.output_type),\n custom_filters=se_filters,\n unsafe=self._unsafe,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 19, "func_end_lineno": 52, "func_code": "def serialize_type(target: Any) -> str:\n \"\"\"\n Serializes a type or an instance to its string representation, including the module name.\n\n This function handles types, instances of types, and special typing objects.\n It assumes that non-typing objects will have a '__name__' attribute.\n\n :param target:\n The object to serialize, can be an instance or a type.\n :return:\n The string representation of the type.\n \"\"\"\n name = getattr(target, \"__name__\", str(target))\n\n # Remove the 'typing.' prefix when using python <3.9\n if name.startswith(\"typing.\"):\n name = name[7:]\n # Remove the arguments from the name when using python <3.9\n if \"[\" in name:\n name = name.split(\"[\")[0]\n\n # Get module name\n module = inspect.getmodule(target)\n module_name = \"\"\n # We omit the module name for builtins to not clutter the output\n if module and hasattr(module, \"__name__\") and module.__name__ != \"builtins\":\n module_name = f\"{module.__name__}\"\n\n args = get_args(target)\n if args:\n args_str = \", \".join([serialize_type(a) for a in args if a is not type(None)])\n return f\"{module_name}.{name}[{args_str}]\" if module_name else f\"{name}[{args_str}]\"\n\n return f\"{module_name}.{name}\" if module_name else f\"{name}\""}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.utils.callable_serialization.serialize_callable", "haystack.components.converters.output_adapter.OutputAdapter.to_dict", "haystack.utils.type_serialization.serialize_type", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 2, "func_count": 4, "pytest_info": {"total_num": 14, "base_passed_num": 10}} {"id": ["haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::_prepare_texts_to_embed", "haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::_embed_batch", "haystack.haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder::run"], "project": "haystack", "origin_file": ["haystack/components/embedders/hugging_face_api_document_embedder.py", "haystack/components/embedders/hugging_face_api_document_embedder.py", "haystack/components/embedders/hugging_face_api_document_embedder.py"], "test_list": ["test/components/embedders/test_hugging_face_api_document_embedder.py"], "prob_info": [{"class_start_lineno": 24, "class_end_lineno": 298, "func_start_lineno": 219, "func_end_lineno": 234, "func_code": " def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = []\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n )\n\n texts_to_embed.append(text_to_embed)\n return texts_to_embed"}, {"class_start_lineno": 24, "class_end_lineno": 298, "func_start_lineno": 236, "func_end_lineno": 271, "func_code": " def _embed_batch(self, texts_to_embed: List[str], batch_size: int) -> List[List[float]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n truncate = self.truncate\n normalize = self.normalize\n\n if self.api_type == HFEmbeddingAPIType.SERVERLESS_INFERENCE_API:\n if truncate is not None:\n msg = \"`truncate` parameter is not supported for Serverless Inference API. It will be ignored.\"\n warnings.warn(msg)\n truncate = None\n if normalize is not None:\n msg = \"`normalize` parameter is not supported for Serverless Inference API. It will be ignored.\"\n warnings.warn(msg)\n normalize = None\n\n all_embeddings = []\n for i in tqdm(\n range(0, len(texts_to_embed), batch_size), disable=not self.progress_bar, desc=\"Calculating embeddings\"\n ):\n batch = texts_to_embed[i : i + batch_size]\n\n np_embeddings = self._client.feature_extraction(\n # this method does not officially support list of strings, but works as expected\n text=batch, # type: ignore[arg-type]\n truncate=truncate,\n normalize=normalize,\n )\n\n if np_embeddings.ndim != 2 or np_embeddings.shape[0] != len(batch):\n raise ValueError(f\"Expected embedding shape ({batch_size}, embedding_dim), got {np_embeddings.shape}\")\n\n all_embeddings.extend(np_embeddings.tolist())\n\n return all_embeddings"}, {"class_start_lineno": 24, "class_end_lineno": 298, "func_start_lineno": 274, "func_end_lineno": 298, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Embeds a list of documents.\n\n :param documents:\n Documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of documents with embeddings.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\n \"HuggingFaceAPIDocumentEmbedder expects a list of Documents as input.\"\n \" In case you want to embed a string, please use the HuggingFaceAPITextEmbedder.\"\n )\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n\n embeddings = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder._prepare_texts_to_embed", "haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder._embed_batch", "haystack.components.embedders.hugging_face_api_document_embedder.HuggingFaceAPIDocumentEmbedder.run"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 17, "base_passed_num": 10}} {"id": ["haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::_prepare_texts_to_embed", "haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::_embed_batch", "haystack.haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder::run"], "project": "haystack", "origin_file": ["haystack/components/embedders/openai_document_embedder.py", "haystack/components/embedders/openai_document_embedder.py", "haystack/components/embedders/openai_document_embedder.py"], "test_list": ["test/components/embedders/test_openai_document_embedder.py"], "prob_info": [{"class_start_lineno": 19, "class_end_lineno": 245, "func_start_lineno": 164, "func_end_lineno": 181, "func_code": " def _prepare_texts_to_embed(self, documents: List[Document]) -> Dict[str, str]:\n \"\"\"\n Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.\n \"\"\"\n texts_to_embed = {}\n for doc in documents:\n meta_values_to_embed = [\n str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta[key] is not None\n ]\n\n text_to_embed = (\n self.prefix + self.embedding_separator.join(meta_values_to_embed + [doc.content or \"\"]) + self.suffix\n )\n\n # copied from OpenAI embedding_utils (https://github.com/openai/openai-python/blob/main/openai/embeddings_utils.py)\n # replace newlines, which can negatively affect performance.\n texts_to_embed[doc.id] = text_to_embed.replace(\"\\n\", \" \")\n return texts_to_embed"}, {"class_start_lineno": 19, "class_end_lineno": 245, "func_start_lineno": 183, "func_end_lineno": 217, "func_code": " def _embed_batch(self, texts_to_embed: Dict[str, str], batch_size: int) -> Tuple[List[List[float]], Dict[str, Any]]:\n \"\"\"\n Embed a list of texts in batches.\n \"\"\"\n\n all_embeddings = []\n meta: Dict[str, Any] = {}\n for batch in tqdm(\n batched(texts_to_embed.items(), batch_size), disable=not self.progress_bar, desc=\"Calculating embeddings\"\n ):\n args: Dict[str, Any] = {\"model\": self.model, \"input\": [b[1] for b in batch]}\n\n if self.dimensions is not None:\n args[\"dimensions\"] = self.dimensions\n\n try:\n response = self.client.embeddings.create(**args)\n except APIError as exc:\n ids = \", \".join(b[0] for b in batch)\n msg = \"Failed embedding of documents {ids} caused by {exc}\"\n logger.exception(msg, ids=ids, exc=exc)\n continue\n\n embeddings = [el.embedding for el in response.data]\n all_embeddings.extend(embeddings)\n\n if \"model\" not in meta:\n meta[\"model\"] = response.model\n if \"usage\" not in meta:\n meta[\"usage\"] = dict(response.usage)\n else:\n meta[\"usage\"][\"prompt_tokens\"] += response.usage.prompt_tokens\n meta[\"usage\"][\"total_tokens\"] += response.usage.total_tokens\n\n return all_embeddings, meta"}, {"class_start_lineno": 19, "class_end_lineno": 245, "func_start_lineno": 220, "func_end_lineno": 245, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Embeds a list of documents.\n\n :param documents:\n A list of documents to embed.\n\n :returns:\n A dictionary with the following keys:\n - `documents`: A list of documents with embeddings.\n - `meta`: Information about the usage of the model.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\n \"OpenAIDocumentEmbedder expects a list of Documents as input.\"\n \"In case you want to embed a string, please use the OpenAITextEmbedder.\"\n )\n\n texts_to_embed = self._prepare_texts_to_embed(documents=documents)\n\n embeddings, meta = self._embed_batch(texts_to_embed=texts_to_embed, batch_size=self.batch_size)\n\n for doc, emb in zip(documents, embeddings):\n doc.embedding = emb\n\n return {\"documents\": documents, \"meta\": meta}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder._prepare_texts_to_embed", "haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder._embed_batch", "haystack.components.embedders.openai_document_embedder.OpenAIDocumentEmbedder.run"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 11, "base_passed_num": 6}} {"id": ["haystack.haystack.utils.type_serialization.serialize_type", "haystack.haystack.components.evaluators.llm_evaluator.LLMEvaluator::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/utils/type_serialization.py", "haystack/components/evaluators/llm_evaluator.py", "haystack/core/serialization.py"], "test_list": ["test/components/evaluators/test_llm_evaluator.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 170, "func_start_lineno": 19, "func_end_lineno": 52, "func_code": "def serialize_type(target: Any) -> str:\n \"\"\"\n Serializes a type or an instance to its string representation, including the module name.\n\n This function handles types, instances of types, and special typing objects.\n It assumes that non-typing objects will have a '__name__' attribute.\n\n :param target:\n The object to serialize, can be an instance or a type.\n :return:\n The string representation of the type.\n \"\"\"\n name = getattr(target, \"__name__\", str(target))\n\n # Remove the 'typing.' prefix when using python <3.9\n if name.startswith(\"typing.\"):\n name = name[7:]\n # Remove the arguments from the name when using python <3.9\n if \"[\" in name:\n name = name.split(\"[\")[0]\n\n # Get module name\n module = inspect.getmodule(target)\n module_name = \"\"\n # We omit the module name for builtins to not clutter the output\n if module and hasattr(module, \"__name__\") and module.__name__ != \"builtins\":\n module_name = f\"{module.__name__}\"\n\n args = get_args(target)\n if args:\n args_str = \", \".join([serialize_type(a) for a in args if a is not type(None)])\n return f\"{module_name}.{name}[{args_str}]\" if module_name else f\"{name}[{args_str}]\"\n\n return f\"{module_name}.{name}\" if module_name else f\"{name}\""}, {"class_start_lineno": 18, "class_end_lineno": 387, "func_start_lineno": 278, "func_end_lineno": 297, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n # Since we cannot currently serialize tuples, convert the inputs to a list.\n inputs = [[name, serialize_type(type_)] for name, type_ in self.inputs]\n return default_to_dict(\n self,\n instructions=self.instructions,\n inputs=inputs,\n outputs=self.outputs,\n examples=self.examples,\n api=self.api,\n api_key=self.api_key and self.api_key.to_dict(),\n api_params=self.api_params,\n progress_bar=self.progress_bar,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.utils.type_serialization.serialize_type", "haystack.components.evaluators.llm_evaluator.LLMEvaluator.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 1, "func_count": 3, "pytest_info": {"total_num": 17, "base_passed_num": 14}} {"id": ["haystack.haystack.components.builders.prompt_builder.PromptBuilder::_validate_variables", "haystack.haystack.components.builders.prompt_builder.PromptBuilder::run", "haystack.haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor::_prepare_prompts"], "project": "haystack", "origin_file": ["haystack/components/builders/prompt_builder.py", "haystack/components/builders/prompt_builder.py", "haystack/components/extractors/llm_metadata_extractor.py"], "test_list": ["test/components/extractors/test_llm_metadata_extractor.py"], "prob_info": [{"class_start_lineno": 17, "class_end_lineno": 266, "func_start_lineno": 247, "func_end_lineno": 266, "func_code": " def _validate_variables(self, provided_variables: Set[str]):\n \"\"\"\n Checks if all the required template variables are provided.\n\n :param provided_variables:\n A set of provided template variables.\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n if self.required_variables == \"*\":\n required_variables = sorted(self.variables)\n else:\n required_variables = self.required_variables\n missing_variables = [var for var in required_variables if var not in provided_variables]\n if missing_variables:\n missing_vars_str = \", \".join(missing_variables)\n raise ValueError(\n f\"Missing required input variables in PromptBuilder: {missing_vars_str}. \"\n f\"Required variables: {required_variables}. Provided variables: {provided_variables}.\"\n )"}, {"class_start_lineno": 17, "class_end_lineno": 266, "func_start_lineno": 213, "func_end_lineno": 245, "func_code": " def run(self, template: Optional[str] = None, template_variables: Optional[Dict[str, Any]] = None, **kwargs):\n \"\"\"\n Renders the prompt template with the provided variables.\n\n It applies the template variables to render the final prompt. You can provide variables via pipeline kwargs.\n In order to overwrite the default template, you can set the `template` parameter.\n In order to overwrite pipeline kwargs, you can set the `template_variables` parameter.\n\n :param template:\n An optional string template to overwrite PromptBuilder's default template. If None, the default template\n provided at initialization is used.\n :param template_variables:\n An optional dictionary of template variables to overwrite the pipeline variables.\n :param kwargs:\n Pipeline variables used for rendering the prompt.\n\n :returns: A dictionary with the following keys:\n - `prompt`: The updated prompt text after rendering the prompt template.\n\n :raises ValueError:\n If any of the required template variables is not provided.\n \"\"\"\n kwargs = kwargs or {}\n template_variables = template_variables or {}\n template_variables_combined = {**kwargs, **template_variables}\n self._validate_variables(set(template_variables_combined.keys()))\n\n compiled_template = self.template\n if template is not None:\n compiled_template = self._env.from_string(template)\n\n result = compiled_template.render(template_variables_combined)\n return {\"prompt\": result}"}, {"class_start_lineno": 61, "class_end_lineno": 442, "func_start_lineno": 332, "func_end_lineno": 359, "func_code": " def _prepare_prompts(\n self, documents: List[Document], expanded_range: Optional[List[int]] = None\n ) -> List[Union[ChatMessage, None]]:\n all_prompts: List[Union[ChatMessage, None]] = []\n for document in documents:\n if not document.content:\n logger.warning(\"Document {doc_id} has no content. Skipping metadata extraction.\", doc_id=document.id)\n all_prompts.append(None)\n continue\n\n if expanded_range:\n doc_copy = copy.deepcopy(document)\n pages = self.splitter.run(documents=[doc_copy])\n content = \"\"\n for idx, page in enumerate(pages[\"documents\"]):\n if idx + 1 in expanded_range:\n content += page.content\n doc_copy.content = content\n else:\n doc_copy = document\n\n prompt_with_doc = self.builder.run(template=self.prompt, template_variables={\"document\": doc_copy})\n\n # build a ChatMessage with the prompt\n message = ChatMessage.from_user(prompt_with_doc[\"prompt\"])\n all_prompts.append(message)\n\n return all_prompts"}], "type": ["function_empty", "Development"], "node": ["haystack.components.builders.prompt_builder.PromptBuilder._validate_variables", "haystack.components.builders.prompt_builder.PromptBuilder.run", "haystack.components.extractors.llm_metadata_extractor.LLMMetadataExtractor._prepare_prompts"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 13, "base_passed_num": 10}} {"id": ["haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_concatenate_units", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_character", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::run"], "project": "haystack", "origin_file": ["haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py"], "test_list": ["test/components/extractors/test_llm_metadata_extractor.py", "test/components/retrievers/test_sentence_window_retriever.py"], "prob_info": [{"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 263, "func_end_lineno": 306, "func_code": " def _concatenate_units(\n self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int\n ) -> Tuple[List[str], List[int], List[int]]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n\n Keeps track of the original page number that each element belongs. If the length of the current units is less\n than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current\n units with the last split, preventing the creation of excessively small splits.\n \"\"\"\n\n text_splits: List[str] = []\n splits_pages: List[int] = []\n splits_start_idxs: List[int] = []\n cur_start_idx = 0\n cur_page = 1\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n\n # check if length of current units is below split_threshold\n if len(current_units) < split_threshold and len(text_splits) > 0:\n # concatenate the last split with the current one\n text_splits[-1] += txt\n\n # NOTE: This line skips documents that have content=\"\"\n elif len(txt) > 0:\n text_splits.append(txt)\n splits_pages.append(cur_page)\n splits_start_idxs.append(cur_start_idx)\n\n processed_units = current_units[: split_length - split_overlap]\n cur_start_idx += len(\"\".join(processed_units))\n\n if self.split_by == \"page\":\n num_page_breaks = len(processed_units)\n else:\n num_page_breaks = sum(processed_unit.count(\"\\f\") for processed_unit in processed_units)\n\n cur_page += num_page_breaks\n\n return text_splits, splits_pages, splits_start_idxs"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 238, "func_end_lineno": 251, "func_code": " def _split_by_character(self, doc) -> List[Document]:\n split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]\n units = doc.content.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n units, self.split_length, self.split_overlap, self.split_threshold\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n return self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 204, "func_end_lineno": 211, "func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 166, "func_end_lineno": 202, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. Each document includes:\n - A metadata field `source_id` to track the original document.\n - A metadata field `page_number` to track the original page number.\n - All other metadata copied from the original document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n if self._use_sentence_splitter and self.sentence_splitter is None:\n raise RuntimeError(\n \"The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'.\"\n )\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs: List[Document] = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but content for document ID {doc.id} is None.\"\n )\n if doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n\n split_docs += self._split_document(doc)\n return {\"documents\": split_docs}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.preprocessors.document_splitter.DocumentSplitter._concatenate_units", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_character", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document", "haystack.components.preprocessors.document_splitter.DocumentSplitter.run"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 29, "base_passed_num": 27}} {"id": ["haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.generators.azure.AzureOpenAIGenerator::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/utils/callable_serialization.py", "haystack/components/generators/azure.py", "haystack/core/serialization.py"], "test_list": ["test/components/generators/test_azure.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"}, {"class_start_lineno": 19, "class_end_lineno": 210, "func_start_lineno": 162, "func_end_lineno": 188, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n system_prompt=self.system_prompt,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n timeout=self.timeout,\n max_retries=self.max_retries,\n default_headers=self.default_headers,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.utils.callable_serialization.serialize_callable", "haystack.components.generators.azure.AzureOpenAIGenerator.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 7, "base_passed_num": 4}} {"id": ["haystack.haystack.utils.callable_serialization.serialize_callable", "haystack.haystack.components.generators.chat.azure.AzureOpenAIChatGenerator::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/utils/callable_serialization.py", "haystack/components/generators/chat/azure.py", "haystack/core/serialization.py"], "test_list": ["test/components/generators/chat/test_azure.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 80, "func_start_lineno": 12, "func_end_lineno": 42, "func_code": "def serialize_callable(callable_handle: Callable) -> str:\n \"\"\"\n Serializes a callable to its full path.\n\n :param callable_handle: The callable to serialize\n :return: The full path of the callable\n \"\"\"\n try:\n full_arg_spec = inspect.getfullargspec(callable_handle)\n is_instance_method = bool(full_arg_spec.args and full_arg_spec.args[0] == \"self\")\n except TypeError:\n is_instance_method = False\n if is_instance_method:\n raise SerializationError(\"Serialization of instance methods is not supported.\")\n\n # __qualname__ contains the fully qualified path we need for classmethods and staticmethods\n qualname = getattr(callable_handle, \"__qualname__\", \"\")\n if \"\" in qualname:\n raise SerializationError(\"Serialization of lambdas is not supported.\")\n if \"\" in qualname:\n raise SerializationError(\"Serialization of nested functions is not supported.\")\n\n name = qualname or callable_handle.__name__\n\n # Get the full package path of the function\n module = inspect.getmodule(callable_handle)\n if module is not None:\n full_path = f\"{module.__name__}.{name}\"\n else:\n full_path = name\n return full_path"}, {"class_start_lineno": 20, "class_end_lineno": 226, "func_start_lineno": 177, "func_end_lineno": 204, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n azure_ad_token_provider_name = None\n if self.azure_ad_token_provider:\n azure_ad_token_provider_name = serialize_callable(self.azure_ad_token_provider)\n return default_to_dict(\n self,\n azure_endpoint=self.azure_endpoint,\n azure_deployment=self.azure_deployment,\n organization=self.organization,\n api_version=self.api_version,\n streaming_callback=callback_name,\n generation_kwargs=self.generation_kwargs,\n timeout=self.timeout,\n max_retries=self.max_retries,\n api_key=self.api_key.to_dict() if self.api_key is not None else None,\n azure_ad_token=self.azure_ad_token.to_dict() if self.azure_ad_token is not None else None,\n default_headers=self.default_headers,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n azure_ad_token_provider=azure_ad_token_provider_name,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.utils.callable_serialization.serialize_callable", "haystack.components.generators.chat.azure.AzureOpenAIChatGenerator.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 8, "base_passed_num": 5}} {"id": ["haystack.haystack.dataclasses.chat_message.ChatMessage::__getattribute__", "haystack.haystack.dataclasses.chat_message.ChatMessage::to_openai_dict_format", "haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::run"], "project": "haystack", "origin_file": ["haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/components/generators/chat/openai.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/dataclasses/chat_message.py", "haystack/components/generators/chat/openai.py"], "test_list": ["test/components/generators/chat/test_openai.py"], "prob_info": [{"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": " def __getattribute__(self, name):\n \"\"\"\n This method is reimplemented to make the `content` attribute removal more visible.\n \"\"\"\n\n if name == \"content\":\n msg = (\n \"The `content` attribute of `ChatMessage` has been removed. \"\n \"Use the `text` property to access the textual value. \"\n \"For more information about the new API and how to migrate, see the documentation: \"\n \"https://docs.haystack.deepset.ai/docs/chatmessage\"\n )\n raise AttributeError(msg)\n return object.__getattribute__(self, name)"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 357, "func_end_lineno": 403, "func_code": " def to_openai_dict_format(self) -> Dict[str, Any]:\n \"\"\"\n Convert a ChatMessage to the dictionary format expected by OpenAI's Chat API.\n \"\"\"\n text_contents = self.texts\n tool_calls = self.tool_calls\n tool_call_results = self.tool_call_results\n\n if not text_contents and not tool_calls and not tool_call_results:\n raise ValueError(\n \"A `ChatMessage` must contain at least one `TextContent`, `ToolCall`, or `ToolCallResult`.\"\n )\n if len(text_contents) + len(tool_call_results) > 1:\n raise ValueError(\"A `ChatMessage` can only contain one `TextContent` or one `ToolCallResult`.\")\n\n openai_msg: Dict[str, Any] = {\"role\": self._role.value}\n\n # Add name field if present\n if self._name is not None:\n openai_msg[\"name\"] = self._name\n\n if tool_call_results:\n result = tool_call_results[0]\n if result.origin.id is None:\n raise ValueError(\"`ToolCall` must have a non-null `id` attribute to be used with OpenAI.\")\n openai_msg[\"content\"] = result.result\n openai_msg[\"tool_call_id\"] = result.origin.id\n # OpenAI does not provide a way to communicate errors in tool invocations, so we ignore the error field\n return openai_msg\n\n if text_contents:\n openai_msg[\"content\"] = text_contents[0]\n if tool_calls:\n openai_tool_calls = []\n for tc in tool_calls:\n if tc.id is None:\n raise ValueError(\"`ToolCall` must have a non-null `id` attribute to be used with OpenAI.\")\n openai_tool_calls.append(\n {\n \"id\": tc.id,\n \"type\": \"function\",\n # We disable ensure_ascii so special chars like emojis are not converted\n \"function\": {\"name\": tc.tool_name, \"arguments\": json.dumps(tc.arguments, ensure_ascii=False)},\n }\n )\n openai_msg[\"tool_calls\"] = openai_tool_calls\n return openai_msg"}, {"class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 355, "func_end_lineno": 397, "func_code": " def _prepare_api_call( # noqa: PLR0913\n self,\n *,\n messages: List[ChatMessage],\n streaming_callback: Optional[StreamingCallbackT] = None,\n generation_kwargs: Optional[Dict[str, Any]] = None,\n tools: Optional[List[Tool]] = None,\n tools_strict: Optional[bool] = None,\n ) -> Dict[str, Any]:\n # update generation kwargs by merging with the generation kwargs passed to the run method\n generation_kwargs = {**self.generation_kwargs, **(generation_kwargs or {})}\n\n # adapt ChatMessage(s) to the format expected by the OpenAI API\n openai_formatted_messages = [message.to_openai_dict_format() for message in messages]\n\n tools = tools or self.tools\n tools_strict = tools_strict if tools_strict is not None else self.tools_strict\n _check_duplicate_tool_names(tools)\n\n openai_tools = {}\n if tools:\n tool_definitions = []\n for t in tools:\n function_spec = {**t.tool_spec}\n if tools_strict:\n function_spec[\"strict\"] = True\n function_spec[\"parameters\"][\"additionalProperties\"] = False\n tool_definitions.append({\"type\": \"function\", \"function\": function_spec})\n openai_tools = {\"tools\": tool_definitions}\n\n is_streaming = streaming_callback is not None\n num_responses = generation_kwargs.pop(\"n\", 1)\n if is_streaming and num_responses > 1:\n raise ValueError(\"Cannot stream multiple responses, please set n=1.\")\n\n return {\n \"model\": self.model,\n \"messages\": openai_formatted_messages, # type: ignore[arg-type] # openai expects list of specific message types\n \"stream\": streaming_callback is not None,\n \"n\": num_responses,\n **openai_tools,\n **generation_kwargs,\n }"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 167, "func_end_lineno": 171, "func_code": " def texts(self) -> List[str]:\n \"\"\"\n Returns the list of all texts contained in the message.\n \"\"\"\n return [content.text for content in self._content if isinstance(content, TextContent)]"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 183, "func_end_lineno": 187, "func_code": " def tool_calls(self) -> List[ToolCall]:\n \"\"\"\n Returns the list of all Tool calls contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCall)]"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 199, "func_end_lineno": 203, "func_code": " def tool_call_results(self) -> List[ToolCallResult]:\n \"\"\"\n Returns the list of all Tool call results contained in the message.\n \"\"\"\n return [content for content in self._content if isinstance(content, ToolCallResult)]"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 153, "func_end_lineno": 157, "func_code": " def meta(self) -> Dict[str, Any]:\n \"\"\"\n Returns the metadata associated with the message.\n \"\"\"\n return self._meta"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 174, "func_end_lineno": 180, "func_code": " def text(self) -> Optional[str]:\n \"\"\"\n Returns the first text contained in the message.\n \"\"\"\n if texts := self.texts:\n return texts[0]\n return None"}, {"class_start_lineno": 88, "class_end_lineno": 481, "func_start_lineno": 190, "func_end_lineno": 196, "func_code": " def tool_call(self) -> Optional[ToolCall]:\n \"\"\"\n Returns the first Tool call contained in the message.\n \"\"\"\n if tool_calls := self.tool_calls:\n return tool_calls[0]\n return None"}, {"class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 210, "func_end_lineno": 277, "func_code": " def run(\n self,\n messages: List[ChatMessage],\n streaming_callback: Optional[StreamingCallbackT] = None,\n generation_kwargs: Optional[Dict[str, Any]] = None,\n *,\n tools: Optional[List[Tool]] = None,\n tools_strict: Optional[bool] = None,\n ):\n \"\"\"\n Invokes chat completion based on the provided messages and generation parameters.\n\n :param messages:\n A list of ChatMessage instances representing the input messages.\n :param streaming_callback:\n A callback function that is called when a new token is received from the stream.\n :param generation_kwargs:\n Additional keyword arguments for text generation. These parameters will\n override the parameters passed during component initialization.\n For details on OpenAI API parameters, see [OpenAI documentation](https://platform.openai.com/docs/api-reference/chat/create).\n :param tools:\n A list of tools for which the model can prepare calls. If set, it will override the `tools` parameter set\n during component initialization.\n :param tools_strict:\n Whether to enable strict schema adherence for tool calls. If set to `True`, the model will follow exactly\n the schema provided in the `parameters` field of the tool definition, but this may increase latency.\n If set, it will override the `tools_strict` parameter set during component initialization.\n\n :returns:\n A dictionary with the following key:\n - `replies`: A list containing the generated responses as ChatMessage instances.\n \"\"\"\n if len(messages) == 0:\n return {\"replies\": []}\n\n streaming_callback = streaming_callback or self.streaming_callback\n\n api_args = self._prepare_api_call(\n messages=messages,\n streaming_callback=streaming_callback,\n generation_kwargs=generation_kwargs,\n tools=tools,\n tools_strict=tools_strict,\n )\n chat_completion: Union[Stream[ChatCompletionChunk], ChatCompletion] = self.client.chat.completions.create(\n **api_args\n )\n\n is_streaming = isinstance(chat_completion, Stream)\n assert is_streaming or streaming_callback is None\n\n if is_streaming:\n completions = self._handle_stream_response(\n chat_completion, # type: ignore\n streaming_callback, # type: ignore\n )\n else:\n assert isinstance(chat_completion, ChatCompletion), \"Unexpected response type for non-streaming request.\"\n completions = [\n self._convert_chat_completion_to_chat_message(chat_completion, choice)\n for choice in chat_completion.choices\n ]\n\n # before returning, do post-processing of the completions\n for message in completions:\n self._check_finish_reason(message.meta)\n\n return {\"replies\": completions}"}], "type": ["function_empty", "Development"], "node": ["haystack.dataclasses.chat_message.ChatMessage.__getattribute__", "haystack.dataclasses.chat_message.ChatMessage.to_openai_dict_format", "haystack.components.generators.chat.openai.OpenAIChatGenerator._prepare_api_call", "haystack.dataclasses.chat_message.ChatMessage.texts", "haystack.dataclasses.chat_message.ChatMessage.tool_calls", "haystack.dataclasses.chat_message.ChatMessage.tool_call_results", "haystack.dataclasses.chat_message.ChatMessage.meta", "haystack.dataclasses.chat_message.ChatMessage.text", "haystack.dataclasses.chat_message.ChatMessage.tool_call", "haystack.components.generators.chat.openai.OpenAIChatGenerator.run"], "language": "Python", "toolfunc_count": 1, "func_count": 3, "pytest_info": {"total_num": 19, "base_passed_num": 10}} {"id": ["haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_find_and_remove_header_footer", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_find_longest_common_ngram", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_ngram", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_allngram", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::_ascii_only", "haystack.haystack.components.preprocessors.document_cleaner.DocumentCleaner::run"], "project": "haystack", "origin_file": ["haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py", "haystack/components/preprocessors/document_cleaner.py"], "test_list": ["test/components/preprocessors/test_document_cleaner.py"], "prob_info": [{"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 231, "func_end_lineno": 267, "func_code": " def _find_and_remove_header_footer(\n self, text: str, n_chars: int, n_first_pages_to_ignore: int, n_last_pages_to_ignore: int\n ) -> str:\n \"\"\"\n Heuristic to find footers and headers across different pages by searching for the longest common string.\n\n Pages in the text need to be separated by form feed character \"\\f\".\n For headers, we only search in the first n_chars characters (for footer: last n_chars).\n Note: This heuristic uses exact matches and therefore works well for footers like \"Copyright 2019 by XXX\",\n but won't detect \"Page 3 of 4\" or similar.\n\n :param n_chars: The number of first/last characters where the header/footer shall be searched in.\n :param n_first_pages_to_ignore: The number of first pages to ignore\n (e.g. TOCs often don't contain footer/header).\n :param n_last_pages_to_ignore: The number of last pages to ignore.\n :returns: The text without the found headers and footers.\n \"\"\"\n\n pages = text.split(\"\\f\")\n\n # header\n start_of_pages = [p[:n_chars] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]\n found_header = self._find_longest_common_ngram(start_of_pages)\n if found_header:\n pages = [page.replace(found_header, \"\") for page in pages]\n\n # footer\n end_of_pages = [p[-n_chars:] for p in pages[n_first_pages_to_ignore:-n_last_pages_to_ignore]]\n found_footer = self._find_longest_common_ngram(end_of_pages)\n if found_footer:\n pages = [page.replace(found_footer, \"\") for page in pages]\n\n logger.debug(\n \"Removed header '{header}' and footer '{footer}' in document\", header=found_header, footer=found_footer\n )\n text = \"\\f\".join(pages)\n return text"}, {"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 306, "func_end_lineno": 325, "func_code": " def _find_longest_common_ngram(self, sequences: List[str], min_ngram: int = 3, max_ngram: int = 30) -> str:\n \"\"\"\n Find the longest common ngram across a list of text sequences (e.g. start of pages).\n\n Considering all ngram lengths between the minimum and maximum length. Helpful for finding footers, headers etc.\n Empty sequences are ignored.\n\n :param sequences: The list of strings that shall be searched for common n_grams.\n :param max_ngram: The maximum length of ngram to consider.\n :param min_ngram: The minimum length of ngram to consider.\n :returns: The longest ngram that all sequences have in common.\n \"\"\"\n sequences = [s for s in sequences if s] # filter empty sequences\n if not sequences:\n return \"\"\n seqs_ngrams = map(partial(self._allngram, min_ngram=min_ngram, max_ngram=max_ngram), sequences)\n intersection = reduce(set.intersection, seqs_ngrams)\n\n longest = max(intersection, key=len, default=\"\")\n return longest if longest.strip() else \"\""}, {"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 269, "func_end_lineno": 288, "func_code": " def _ngram(self, seq: str, n: int) -> Generator[str, None, None]:\n \"\"\"\n Return all ngrams of length n from a text sequence. Each ngram consists of n words split by whitespace.\n\n :param seq: The sequence to generate ngrams from.\n :param n: The length of the ngrams to generate.\n :returns: A Generator generating all ngrams of length n from the given sequence.\n \"\"\"\n\n # In order to maintain the original whitespace, but still consider \\n and \\t for n-gram tokenization,\n # we add a space here and remove it after creation of the ngrams again (see below)\n seq = seq.replace(\"\\n\", \" \\n\")\n seq = seq.replace(\"\\t\", \" \\t\")\n\n words = seq.split(\" \")\n ngrams = (\n \" \".join(words[i : i + n]).replace(\" \\n\", \"\\n\").replace(\" \\t\", \"\\t\") for i in range(0, len(words) - n + 1)\n )\n\n return ngrams"}, {"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 290, "func_end_lineno": 304, "func_code": " def _allngram(self, seq: str, min_ngram: int, max_ngram: int) -> Set[str]:\n \"\"\"\n Generates all possible ngrams from a given sequence of text.\n\n Considering all ngram lengths between the minimum and maximum length.\n\n :param seq: The sequence to generate ngrams from.\n :param min_ngram: The minimum length of ngram to consider.\n :param max_ngram: The maximum length of ngram to consider.\n :returns: A set of all ngrams from the given sequence.\n \"\"\"\n lengths = range(min_ngram, max_ngram) if max_ngram else range(min_ngram, len(seq))\n ngrams = map(partial(self._ngram, seq), lengths)\n res = set(chain.from_iterable(ngrams))\n return res"}, {"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 219, "func_end_lineno": 229, "func_code": " def _remove_repeated_substrings(self, text: str) -> str:\n \"\"\"\n Remove any substrings from the text that occur repeatedly on every page. For example headers or footers.\n\n Pages in the text need to be separated by form feed character \"\\f\".\n :param text: Text to clean.\n :returns: The text without the repeated substrings.\n \"\"\"\n return self._find_and_remove_header_footer(\n text, n_chars=300, n_first_pages_to_ignore=1, n_last_pages_to_ignore=1\n )"}, {"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 158, "func_end_lineno": 171, "func_code": " def _ascii_only(self, text: str) -> str:\n \"\"\"\n Convert the text to ASCII only.\n\n Will remove accents from characters and replace them with ASCII characters.\n Other non-ASCII characters will be removed.\n\n :param text: Text to convert to ASCII only.\n :returns: The text in ASCII only.\n \"\"\"\n\n # First normalize the text to NFKD to separate the characters and their diacritics\n # Then encode it to ASCII and ignore any characters that can't be encoded\n return self._normalize_unicode(text, \"NFKD\").encode(\"ascii\", \"ignore\").decode(\"utf-8\")"}, {"class_start_lineno": 18, "class_end_lineno": 325, "func_start_lineno": 93, "func_end_lineno": 145, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Cleans up the documents.\n\n :param documents: List of Documents to clean.\n\n :returns: A dictionary with the following key:\n - `documents`: List of cleaned Documents.\n\n :raises TypeError: if documents is not a list of Documents.\n \"\"\"\n if not isinstance(documents, list) or documents and not isinstance(documents[0], Document):\n raise TypeError(\"DocumentCleaner expects a List of Documents as input.\")\n\n cleaned_docs = []\n for doc in documents:\n if doc.content is None:\n logger.warning(\n \"DocumentCleaner only cleans text documents but document.content for document ID\"\n \" %{document_id} is None.\",\n document_id=doc.id,\n )\n cleaned_docs.append(doc)\n continue\n text = doc.content\n\n if self.unicode_normalization:\n text = self._normalize_unicode(text, self.unicode_normalization)\n if self.ascii_only:\n text = self._ascii_only(text)\n if self.remove_extra_whitespaces:\n text = self._remove_extra_whitespaces(text)\n if self.remove_empty_lines:\n text = self._remove_empty_lines(text)\n if self.remove_substrings:\n text = self._remove_substrings(text, self.remove_substrings)\n if self.remove_regex:\n text = self._remove_regex(text, self.remove_regex)\n if self.remove_repeated_substrings:\n text = self._remove_repeated_substrings(text)\n\n clean_doc = Document(\n id=doc.id if self.keep_id else \"\",\n content=text,\n blob=doc.blob,\n meta=deepcopy(doc.meta),\n score=doc.score,\n embedding=doc.embedding,\n sparse_embedding=doc.sparse_embedding,\n )\n cleaned_docs.append(clean_doc)\n\n return {\"documents\": cleaned_docs}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.preprocessors.document_cleaner.DocumentCleaner._find_and_remove_header_footer", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._find_longest_common_ngram", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._ngram", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._allngram", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._remove_repeated_substrings", "haystack.components.preprocessors.document_cleaner.DocumentCleaner._ascii_only", "haystack.components.preprocessors.document_cleaner.DocumentCleaner.run"], "language": "Python", "toolfunc_count": 4, "func_count": 6, "pytest_info": {"total_num": 14, "base_passed_num": 3}} {"id": ["haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_concatenate_units", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_character", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_function", "haystack.haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter::split_sentences", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_nltk_sentence", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::run"], "project": "haystack", "origin_file": ["haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/sentence_tokenizer.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py"], "test_list": ["test/components/preprocessors/test_document_splitter.py"], "prob_info": [{"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 263, "func_end_lineno": 306, "func_code": " def _concatenate_units(\n self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int\n ) -> Tuple[List[str], List[int], List[int]]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n\n Keeps track of the original page number that each element belongs. If the length of the current units is less\n than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current\n units with the last split, preventing the creation of excessively small splits.\n \"\"\"\n\n text_splits: List[str] = []\n splits_pages: List[int] = []\n splits_start_idxs: List[int] = []\n cur_start_idx = 0\n cur_page = 1\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n\n # check if length of current units is below split_threshold\n if len(current_units) < split_threshold and len(text_splits) > 0:\n # concatenate the last split with the current one\n text_splits[-1] += txt\n\n # NOTE: This line skips documents that have content=\"\"\n elif len(txt) > 0:\n text_splits.append(txt)\n splits_pages.append(cur_page)\n splits_start_idxs.append(cur_start_idx)\n\n processed_units = current_units[: split_length - split_overlap]\n cur_start_idx += len(\"\".join(processed_units))\n\n if self.split_by == \"page\":\n num_page_breaks = len(processed_units)\n else:\n num_page_breaks = sum(processed_unit.count(\"\\f\") for processed_unit in processed_units)\n\n cur_page += num_page_breaks\n\n return text_splits, splits_pages, splits_start_idxs"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 238, "func_end_lineno": 251, "func_code": " def _split_by_character(self, doc) -> List[Document]:\n split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]\n units = doc.content.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n units, self.split_length, self.split_overlap, self.split_threshold\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n return self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 253, "func_end_lineno": 261, "func_code": " def _split_by_function(self, doc) -> List[Document]:\n # the check for None is done already in the run method\n splits = self.splitting_function(doc.content) # type: ignore\n docs: List[Document] = []\n for s in splits:\n meta = deepcopy(doc.meta)\n meta[\"source_id\"] = doc.id\n docs.append(Document(content=s, meta=meta))\n return docs"}, {"class_start_lineno": 116, "class_end_lineno": 238, "func_start_lineno": 147, "func_end_lineno": 159, "func_code": " def split_sentences(self, text: str) -> List[Dict[str, Any]]:\n \"\"\"\n Splits a text into sentences including references to original char positions for each split.\n\n :param text: The text to split.\n :returns: list of sentences with positions.\n \"\"\"\n sentence_spans = list(self.sentence_tokenizer.span_tokenize(text))\n if self.use_split_rules:\n sentence_spans = SentenceSplitter._apply_split_rules(text, sentence_spans)\n\n sentences = [{\"sentence\": text[start:end], \"start\": start, \"end\": end} for start, end in sentence_spans]\n return sentences"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 213, "func_end_lineno": 236, "func_code": " def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:\n split_docs = []\n\n result = self.sentence_splitter.split_sentences(doc.content) # type: ignore # None check is done in run()\n units = [sentence[\"sentence\"] for sentence in result]\n\n if self.respect_sentence_boundary:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(\n sentences=units, split_length=self.split_length, split_overlap=self.split_overlap\n )\n else:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n elements=units,\n split_length=self.split_length,\n split_overlap=self.split_overlap,\n split_threshold=self.split_threshold,\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )\n\n return split_docs"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 204, "func_end_lineno": 211, "func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 166, "func_end_lineno": 202, "func_code": " def run(self, documents: List[Document]):\n \"\"\"\n Split documents into smaller parts.\n\n Splits documents by the unit expressed in `split_by`, with a length of `split_length`\n and an overlap of `split_overlap`.\n\n :param documents: The documents to split.\n :returns: A dictionary with the following key:\n - `documents`: List of documents with the split texts. Each document includes:\n - A metadata field `source_id` to track the original document.\n - A metadata field `page_number` to track the original page number.\n - All other metadata copied from the original document.\n\n :raises TypeError: if the input is not a list of Documents.\n :raises ValueError: if the content of a document is None.\n \"\"\"\n if self._use_sentence_splitter and self.sentence_splitter is None:\n raise RuntimeError(\n \"The component DocumentSplitter wasn't warmed up. Run 'warm_up()' before calling 'run()'.\"\n )\n\n if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):\n raise TypeError(\"DocumentSplitter expects a List of Documents as input.\")\n\n split_docs: List[Document] = []\n for doc in documents:\n if doc.content is None:\n raise ValueError(\n f\"DocumentSplitter only works with text documents but content for document ID {doc.id} is None.\"\n )\n if doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n\n split_docs += self._split_document(doc)\n return {\"documents\": split_docs}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.preprocessors.document_splitter.DocumentSplitter._concatenate_units", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_character", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_function", "haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter.split_sentences", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_nltk_sentence", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document", "haystack.components.preprocessors.document_splitter.DocumentSplitter.run"], "language": "Python", "toolfunc_count": 3, "func_count": 6, "pytest_info": {"total_num": 53, "base_passed_num": 20}} {"id": ["haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_concatenate_units", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_character", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_function", "haystack.haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter::split_sentences", "haystack.haystack.components.preprocessors.document_splitter.DocumentSplitter::_split_by_nltk_sentence"], "project": "haystack", "origin_file": ["haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/sentence_tokenizer.py", "haystack/components/preprocessors/document_splitter.py", "haystack/components/preprocessors/document_splitter.py"], "test_list": ["test/components/preprocessors/test_document_splitter.py"], "prob_info": [{"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 263, "func_end_lineno": 306, "func_code": " def _concatenate_units(\n self, elements: List[str], split_length: int, split_overlap: int, split_threshold: int\n ) -> Tuple[List[str], List[int], List[int]]:\n \"\"\"\n Concatenates the elements into parts of split_length units.\n\n Keeps track of the original page number that each element belongs. If the length of the current units is less\n than the pre-defined `split_threshold`, it does not create a new split. Instead, it concatenates the current\n units with the last split, preventing the creation of excessively small splits.\n \"\"\"\n\n text_splits: List[str] = []\n splits_pages: List[int] = []\n splits_start_idxs: List[int] = []\n cur_start_idx = 0\n cur_page = 1\n segments = windowed(elements, n=split_length, step=split_length - split_overlap)\n\n for seg in segments:\n current_units = [unit for unit in seg if unit is not None]\n txt = \"\".join(current_units)\n\n # check if length of current units is below split_threshold\n if len(current_units) < split_threshold and len(text_splits) > 0:\n # concatenate the last split with the current one\n text_splits[-1] += txt\n\n # NOTE: This line skips documents that have content=\"\"\n elif len(txt) > 0:\n text_splits.append(txt)\n splits_pages.append(cur_page)\n splits_start_idxs.append(cur_start_idx)\n\n processed_units = current_units[: split_length - split_overlap]\n cur_start_idx += len(\"\".join(processed_units))\n\n if self.split_by == \"page\":\n num_page_breaks = len(processed_units)\n else:\n num_page_breaks = sum(processed_unit.count(\"\\f\") for processed_unit in processed_units)\n\n cur_page += num_page_breaks\n\n return text_splits, splits_pages, splits_start_idxs"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 238, "func_end_lineno": 251, "func_code": " def _split_by_character(self, doc) -> List[Document]:\n split_at = _CHARACTER_SPLIT_BY_MAPPING[self.split_by]\n units = doc.content.split(split_at)\n # Add the delimiter back to all units except the last one\n for i in range(len(units) - 1):\n units[i] += split_at\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n units, self.split_length, self.split_overlap, self.split_threshold\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n return self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 253, "func_end_lineno": 261, "func_code": " def _split_by_function(self, doc) -> List[Document]:\n # the check for None is done already in the run method\n splits = self.splitting_function(doc.content) # type: ignore\n docs: List[Document] = []\n for s in splits:\n meta = deepcopy(doc.meta)\n meta[\"source_id\"] = doc.id\n docs.append(Document(content=s, meta=meta))\n return docs"}, {"class_start_lineno": 116, "class_end_lineno": 238, "func_start_lineno": 147, "func_end_lineno": 159, "func_code": " def split_sentences(self, text: str) -> List[Dict[str, Any]]:\n \"\"\"\n Splits a text into sentences including references to original char positions for each split.\n\n :param text: The text to split.\n :returns: list of sentences with positions.\n \"\"\"\n sentence_spans = list(self.sentence_tokenizer.span_tokenize(text))\n if self.use_split_rules:\n sentence_spans = SentenceSplitter._apply_split_rules(text, sentence_spans)\n\n sentences = [{\"sentence\": text[start:end], \"start\": start, \"end\": end} for start, end in sentence_spans]\n return sentences"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 213, "func_end_lineno": 236, "func_code": " def _split_by_nltk_sentence(self, doc: Document) -> List[Document]:\n split_docs = []\n\n result = self.sentence_splitter.split_sentences(doc.content) # type: ignore # None check is done in run()\n units = [sentence[\"sentence\"] for sentence in result]\n\n if self.respect_sentence_boundary:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_sentences_based_on_word_amount(\n sentences=units, split_length=self.split_length, split_overlap=self.split_overlap\n )\n else:\n text_splits, splits_pages, splits_start_idxs = self._concatenate_units(\n elements=units,\n split_length=self.split_length,\n split_overlap=self.split_overlap,\n split_threshold=self.split_threshold,\n )\n metadata = deepcopy(doc.meta)\n metadata[\"source_id\"] = doc.id\n split_docs += self._create_docs_from_splits(\n text_splits=text_splits, splits_pages=splits_pages, splits_start_idxs=splits_start_idxs, meta=metadata\n )\n\n return split_docs"}, {"class_start_lineno": 22, "class_end_lineno": 490, "func_start_lineno": 204, "func_end_lineno": 211, "func_code": " def _split_document(self, doc: Document) -> List[Document]:\n if self.split_by == \"sentence\" or self.respect_sentence_boundary:\n return self._split_by_nltk_sentence(doc)\n\n if self.split_by == \"function\" and self.splitting_function is not None:\n return self._split_by_function(doc)\n\n return self._split_by_character(doc)"}], "type": ["function_empty", "Development"], "node": ["haystack.components.preprocessors.document_splitter.DocumentSplitter._concatenate_units", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_character", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_function", "haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter.split_sentences", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_by_nltk_sentence", "haystack.components.preprocessors.document_splitter.DocumentSplitter._split_document"], "language": "Python", "toolfunc_count": 2, "func_count": 5, "pytest_info": {"total_num": 53, "base_passed_num": 20}} {"id": ["haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_length", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_split_chunk", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_apply_overlap"], "project": "haystack", "origin_file": ["haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py"], "test_list": ["test/components/preprocessors/test_recursive_splitter.py"], "prob_info": [{"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 215, "func_end_lineno": 227, "func_code": " def _chunk_length(self, text: str) -> int:\n \"\"\"\n Split the text by whitespace and count non-empty elements.\n\n :param: The text to be split.\n :return: The number of words in the text.\n \"\"\"\n\n if self.split_units == \"word\":\n words = [word for word in text.split(\" \") if word]\n return len(words)\n\n return len(text)"}, {"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 204, "func_end_lineno": 213, "func_code": " def _get_overlap(self, overlapped_chunks: List[str]) -> Tuple[str, str]:\n \"\"\"Get the previous overlapped chunk instead of the original chunk.\"\"\"\n prev_chunk = overlapped_chunks[-1]\n overlap_start = max(0, self._chunk_length(prev_chunk) - self.split_overlap)\n if self.split_units == \"word\":\n word_chunks = prev_chunk.split()\n overlap = \" \".join(word_chunks[overlap_start:])\n else:\n overlap = prev_chunk[overlap_start:]\n return overlap, prev_chunk"}, {"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 114, "func_end_lineno": 133, "func_code": " def _split_chunk(self, current_chunk: str) -> Tuple[str, str]:\n \"\"\"\n Splits a chunk based on the split_length and split_units attribute.\n\n :param current_chunk: The current chunk to be split.\n :returns:\n A tuple containing the current chunk and the remaining words or characters.\n \"\"\"\n\n if self.split_units == \"word\":\n words = current_chunk.split()\n current_chunk = \" \".join(words[: self.split_length])\n remaining_words = words[self.split_length :]\n return current_chunk, \" \".join(remaining_words)\n\n # split by characters\n text = current_chunk\n current_chunk = text[: self.split_length]\n remaining_chars = text[self.split_length :]\n return current_chunk, remaining_chars"}, {"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 135, "func_end_lineno": 202, "func_code": " def _apply_overlap(self, chunks: List[str]) -> List[str]:\n \"\"\"\n Applies an overlap between consecutive chunks if the chunk_overlap attribute is greater than zero.\n\n Works for both word- and character-level splitting. It trims the last chunk if it exceeds the split_length and\n adds the trimmed content to the next chunk. If the last chunk is still too long after trimming, it splits it\n and adds the first chunk to the list. This process continues until the last chunk is within the split_length.\n\n :param chunks: A list of text chunks.\n :returns:\n A list of text chunks with the overlap applied.\n \"\"\"\n overlapped_chunks: List[str] = []\n\n for idx, chunk in enumerate(chunks):\n if idx == 0:\n overlapped_chunks.append(chunk)\n continue\n\n # get the overlap between the current and previous chunk\n overlap, prev_chunk = self._get_overlap(overlapped_chunks)\n if overlap == prev_chunk:\n logger.warning(\n \"Overlap is the same as the previous chunk. \"\n \"Consider increasing the `split_length` parameter or decreasing the `split_overlap` parameter.\"\n )\n\n # create a new chunk starting with the overlap\n current_chunk = overlap + \" \" + chunk if self.split_units == \"word\" else overlap + chunk\n\n # if this new chunk exceeds 'split_length', trim it and move the remaining text to the next chunk\n # if this is the last chunk, another new chunk will contain the trimmed text preceded by the overlap\n # of the last chunk\n if self._chunk_length(current_chunk) > self.split_length:\n current_chunk, remaining_text = self._split_chunk(current_chunk)\n if idx < len(chunks) - 1:\n chunks[idx + 1] = remaining_text + (\" \" if self.split_units == \"word\" else \"\") + chunks[idx + 1]\n elif remaining_text:\n # create a new chunk with the trimmed text preceded by the overlap of the last chunk\n overlapped_chunks.append(current_chunk)\n chunk = remaining_text\n overlap, _ = self._get_overlap(overlapped_chunks)\n current_chunk = overlap + \" \" + chunk if self.split_units == \"word\" else overlap + chunk\n\n overlapped_chunks.append(current_chunk)\n\n # it can still be that the new last chunk exceeds the 'split_length'\n # continue splitting until the last chunk is within 'split_length'\n if idx == len(chunks) - 1 and self._chunk_length(current_chunk) > self.split_length:\n last_chunk = overlapped_chunks.pop()\n first_chunk, remaining_chunk = self._split_chunk(last_chunk)\n overlapped_chunks.append(first_chunk)\n\n while remaining_chunk:\n # combine overlap with remaining chunk\n overlap, _ = self._get_overlap(overlapped_chunks)\n current = overlap + (\" \" if self.split_units == \"word\" else \"\") + remaining_chunk\n\n # if it fits within split_length we are done\n if self._chunk_length(current) <= self.split_length:\n overlapped_chunks.append(current)\n break\n\n # otherwise split it again\n first_chunk, remaining_chunk = self._split_chunk(current)\n overlapped_chunks.append(first_chunk)\n\n return overlapped_chunks"}], "type": ["function_empty", "Development"], "node": ["haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_length", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._get_overlap", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._split_chunk", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._apply_overlap"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 35, "base_passed_num": 9}} {"id": ["haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_length", "haystack.haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter::split_sentences", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_chunk_text", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::_run_one", "haystack.haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter::run"], "project": "haystack", "origin_file": ["haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/sentence_tokenizer.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py", "haystack/components/preprocessors/recursive_splitter.py"], "test_list": ["test/components/preprocessors/test_recursive_splitter.py"], "prob_info": [{"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 215, "func_end_lineno": 227, "func_code": " def _chunk_length(self, text: str) -> int:\n \"\"\"\n Split the text by whitespace and count non-empty elements.\n\n :param: The text to be split.\n :return: The number of words in the text.\n \"\"\"\n\n if self.split_units == \"word\":\n words = [word for word in text.split(\" \") if word]\n return len(words)\n\n return len(text)"}, {"class_start_lineno": 116, "class_end_lineno": 238, "func_start_lineno": 147, "func_end_lineno": 159, "func_code": " def split_sentences(self, text: str) -> List[Dict[str, Any]]:\n \"\"\"\n Splits a text into sentences including references to original char positions for each split.\n\n :param text: The text to split.\n :returns: list of sentences with positions.\n \"\"\"\n sentence_spans = list(self.sentence_tokenizer.span_tokenize(text))\n if self.use_split_rules:\n sentence_spans = SentenceSplitter._apply_split_rules(text, sentence_spans)\n\n sentences = [{\"sentence\": text[start:end], \"start\": start, \"end\": end} for start, end in sentence_spans]\n return sentences"}, {"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 229, "func_end_lineno": 311, "func_code": " def _chunk_text(self, text: str) -> List[str]:\n \"\"\"\n Recursive chunking algorithm that divides text into smaller chunks based on a list of separator characters.\n\n It starts with a list of separator characters (e.g., [\"\\n\\n\", \"sentence\", \"\\n\", \" \"]) and attempts to divide\n the text using the first separator. If the resulting chunks are still larger than the specified chunk size,\n it moves to the next separator in the list. This process continues recursively, progressively applying each\n specific separator until the chunks meet the desired size criteria.\n\n :param text: The text to be split into chunks.\n :returns:\n A list of text chunks.\n \"\"\"\n if self._chunk_length(text) <= self.split_length:\n return [text]\n\n for curr_separator in self.separators: # type: ignore # the caller already checked that separators is not None\n if curr_separator == \"sentence\":\n # re. ignore: correct SentenceSplitter initialization is checked at the initialization of the component\n sentence_with_spans = self.nltk_tokenizer.split_sentences(text) # type: ignore\n splits = [sentence[\"sentence\"] for sentence in sentence_with_spans]\n else:\n # add escape \"\\\" to the separator and wrapped it in a group so that it's included in the splits as well\n escaped_separator = re.escape(curr_separator)\n escaped_separator = f\"({escaped_separator})\"\n\n # split the text and merge every two consecutive splits, i.e.: the text and the separator after it\n splits = re.split(escaped_separator, text)\n splits = [\n \"\".join([splits[i], splits[i + 1]]) if i < len(splits) - 1 else splits[i]\n for i in range(0, len(splits), 2)\n ]\n\n # remove last split if it's empty\n splits = splits[:-1] if splits[-1] == \"\" else splits\n\n if len(splits) == 1: # go to next separator, if current separator not found in the text\n continue\n\n chunks = []\n current_chunk: List[str] = []\n current_length = 0\n\n # check splits, if any is too long, recursively chunk it, otherwise add to current chunk\n for split in splits:\n split_text = split\n\n # if adding this split exceeds chunk_size, process current_chunk\n if current_length + self._chunk_length(split_text) > self.split_length:\n # process current_chunk\n if current_chunk: # keep the good splits\n chunks.append(\"\".join(current_chunk))\n current_chunk = []\n current_length = 0\n\n # recursively handle splits that are too large\n if self._chunk_length(split_text) > self.split_length:\n if curr_separator == self.separators[-1]:\n # tried last separator, can't split further, do a fixed-split based on word/character\n fall_back_chunks = self._fall_back_to_fixed_chunking(split_text, self.split_units)\n chunks.extend(fall_back_chunks)\n else:\n chunks.extend(self._chunk_text(split_text))\n current_length += self._chunk_length(split_text)\n\n else:\n current_chunk.append(split_text)\n current_length += self._chunk_length(split_text)\n else:\n current_chunk.append(split_text)\n current_length += self._chunk_length(split_text)\n\n if current_chunk:\n chunks.append(\"\".join(current_chunk))\n\n if self.split_overlap > 0:\n chunks = self._apply_overlap(chunks)\n\n if chunks:\n return chunks\n\n # if no separator worked, fall back to word- or character-level chunking\n return self._fall_back_to_fixed_chunking(text, self.split_units)"}, {"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 368, "func_end_lineno": 402, "func_code": " def _run_one(self, doc: Document) -> List[Document]:\n chunks = self._chunk_text(doc.content) # type: ignore # the caller already check for a non-empty doc.content\n chunks = chunks[:-1] if len(chunks[-1]) == 0 else chunks # remove last empty chunk if it exists\n current_position = 0\n current_page = 1\n\n new_docs: List[Document] = []\n\n for split_nr, chunk in enumerate(chunks):\n new_doc = Document(content=chunk, meta=deepcopy(doc.meta))\n new_doc.meta[\"split_id\"] = split_nr\n new_doc.meta[\"split_idx_start\"] = current_position\n new_doc.meta[\"_split_overlap\"] = [] if self.split_overlap > 0 else None\n\n # add overlap information to the previous and current doc\n if split_nr > 0 and self.split_overlap > 0:\n self._add_overlap_info(current_position, new_doc, new_docs)\n\n # count page breaks in the chunk\n current_page += chunk.count(\"\\f\")\n\n # if there are consecutive page breaks at the end with no more text, adjust the page number\n # e.g: \"text\\f\\f\\f\" -> 3 page breaks, but current_page should be 1\n consecutive_page_breaks = len(chunk) - len(chunk.rstrip(\"\\f\"))\n\n if consecutive_page_breaks > 0:\n new_doc.meta[\"page_number\"] = current_page - consecutive_page_breaks\n else:\n new_doc.meta[\"page_number\"] = current_page\n\n # keep the new chunk doc and update the current position\n new_docs.append(new_doc)\n current_position += len(chunk) - (self.split_overlap if split_nr < len(chunks) - 1 else 0)\n\n return new_docs"}, {"class_start_lineno": 15, "class_end_lineno": 421, "func_start_lineno": 405, "func_end_lineno": 421, "func_code": " def run(self, documents: List[Document]) -> Dict[str, List[Document]]:\n \"\"\"\n Split a list of documents into documents with smaller chunks of text.\n\n :param documents: List of Documents to split.\n :returns:\n A dictionary containing a key \"documents\" with a List of Documents with smaller chunks of text corresponding\n to the input documents.\n \"\"\"\n docs = []\n for doc in documents:\n if not doc.content or doc.content == \"\":\n logger.warning(\"Document ID {doc_id} has an empty content. Skipping this document.\", doc_id=doc.id)\n continue\n docs.extend(self._run_one(doc))\n\n return {\"documents\": docs}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_length", "haystack.components.preprocessors.sentence_tokenizer.SentenceSplitter.split_sentences", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._chunk_text", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter._run_one", "haystack.components.preprocessors.recursive_splitter.RecursiveDocumentSplitter.run"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 35, "base_passed_num": 8}} {"id": ["haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::_greedy_diversity_order", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::_maximum_margin_relevance", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::run"], "project": "haystack", "origin_file": ["haystack/components/rankers/sentence_transformers_diversity.py", "haystack/components/rankers/sentence_transformers_diversity.py", "haystack/components/rankers/sentence_transformers_diversity.py"], "test_list": ["test/components/rankers/test_sentence_transformers_diversity.py"], "prob_info": [{"class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 279, "func_end_lineno": 323, "func_code": " def _greedy_diversity_order(self, query: str, documents: List[Document]) -> List[Document]:\n \"\"\"\n Orders the given list of documents to maximize diversity.\n\n The algorithm first calculates embeddings for each document and the query. It starts by selecting the document\n that is semantically closest to the query. Then, for each remaining document, it selects the one that, on\n average, is least similar to the already selected documents. This process continues until all documents are\n selected, resulting in a list where each subsequent document contributes the most to the overall diversity of\n the selected set.\n\n :param query: The search query.\n :param documents: The list of Document objects to be ranked.\n\n :return: A list of documents ordered to maximize diversity.\n \"\"\"\n texts_to_embed = self._prepare_texts_to_embed(documents)\n\n doc_embeddings, query_embedding = self._embed_and_normalize(query, texts_to_embed)\n\n n = len(documents)\n selected: List[int] = []\n\n # Compute the similarity vector between the query and documents\n query_doc_sim = query_embedding @ doc_embeddings.T\n\n # Start with the document with the highest similarity to the query\n selected.append(int(torch.argmax(query_doc_sim).item()))\n\n selected_sum = doc_embeddings[selected[0]] / n\n\n while len(selected) < n:\n # Compute mean of dot products of all selected documents and all other documents\n similarities = selected_sum @ doc_embeddings.T\n # Mask documents that are already selected\n similarities[selected] = torch.inf\n # Select the document with the lowest total similarity score\n index_unselected = int(torch.argmin(similarities).item())\n selected.append(index_unselected)\n # It's enough just to add to the selected vectors because dot product is distributive\n # It's divided by n for numerical stability\n selected_sum += doc_embeddings[index_unselected] / n\n\n ranked_docs: List[Document] = [documents[i] for i in selected]\n\n return ranked_docs"}, {"class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 336, "func_end_lineno": 380, "func_code": " def _maximum_margin_relevance(\n self, query: str, documents: List[Document], lambda_threshold: float, top_k: int\n ) -> List[Document]:\n \"\"\"\n Orders the given list of documents according to the Maximum Margin Relevance (MMR) scores.\n\n MMR scores are calculated for each document based on their relevance to the query and diversity from already\n selected documents.\n\n The algorithm iteratively selects documents based on their MMR scores, balancing between relevance to the query\n and diversity from already selected documents. The 'lambda_threshold' controls the trade-off between relevance\n and diversity.\n\n A closer value to 0 favors diversity, while a closer value to 1 favors relevance to the query.\n\n See : \"The Use of MMR, Diversity-Based Reranking for Reordering Documents and Producing Summaries\"\n https://www.cs.cmu.edu/~jgc/publication/The_Use_MMR_Diversity_Based_LTMIR_1998.pdf\n \"\"\"\n\n texts_to_embed = self._prepare_texts_to_embed(documents)\n doc_embeddings, query_embedding = self._embed_and_normalize(query, texts_to_embed)\n top_k = top_k if top_k else len(documents)\n\n selected: List[int] = []\n query_similarities_as_tensor = query_embedding @ doc_embeddings.T\n query_similarities = query_similarities_as_tensor.reshape(-1)\n idx = int(torch.argmax(query_similarities))\n selected.append(idx)\n while len(selected) < top_k:\n best_idx = None\n best_score = -float(\"inf\")\n for idx, _ in enumerate(documents):\n if idx in selected:\n continue\n relevance_score = query_similarities[idx]\n diversity_score = max(doc_embeddings[idx] @ doc_embeddings[j].T for j in selected)\n mmr_score = lambda_threshold * relevance_score - (1 - lambda_threshold) * diversity_score\n if mmr_score > best_score:\n best_score = mmr_score\n best_idx = idx\n if best_idx is None:\n raise ValueError(\"No best document found, check if the documents list contains any documents.\")\n selected.append(best_idx)\n\n return [documents[i] for i in selected]"}, {"class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 388, "func_end_lineno": 435, "func_code": " def run(\n self,\n query: str,\n documents: List[Document],\n top_k: Optional[int] = None,\n lambda_threshold: Optional[float] = None,\n ) -> Dict[str, List[Document]]:\n \"\"\"\n Rank the documents based on their diversity.\n\n :param query: The search query.\n :param documents: List of Document objects to be ranker.\n :param top_k: Optional. An integer to override the top_k set during initialization.\n :param lambda_threshold: Override the trade-off parameter between relevance and diversity. Only used when\n strategy is \"maximum_margin_relevance\".\n\n :returns: A dictionary with the following key:\n - `documents`: List of Document objects that have been selected based on the diversity ranking.\n\n :raises ValueError: If the top_k value is less than or equal to 0.\n :raises RuntimeError: If the component has not been warmed up.\n \"\"\"\n if self.model is None:\n error_msg = (\n \"The component SentenceTransformersDiversityRanker wasn't warmed up. \"\n \"Run 'warm_up()' before calling 'run()'.\"\n )\n raise RuntimeError(error_msg)\n\n if not documents:\n return {\"documents\": []}\n\n if top_k is None:\n top_k = self.top_k\n elif not 0 < top_k <= len(documents):\n raise ValueError(f\"top_k must be between 1 and {len(documents)}, but got {top_k}\")\n\n if self.strategy == DiversityRankingStrategy.MAXIMUM_MARGIN_RELEVANCE:\n if lambda_threshold is None:\n lambda_threshold = self.lambda_threshold\n self._check_lambda_threshold(lambda_threshold, self.strategy)\n re_ranked_docs = self._maximum_margin_relevance(\n query=query, documents=documents, lambda_threshold=lambda_threshold, top_k=top_k\n )\n else:\n re_ranked_docs = self._greedy_diversity_order(query=query, documents=documents)\n\n return {\"documents\": re_ranked_docs[:top_k]}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker._greedy_diversity_order", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker._maximum_margin_relevance", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker.run"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 53, "base_passed_num": 23}} {"id": ["haystack.haystack.utils.device.ComponentDevice::to_dict", "haystack.haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/utils/device.py", "haystack/components/rankers/sentence_transformers_diversity.py", "haystack/core/serialization.py"], "test_list": ["test/components/rankers/test_sentence_transformers_diversity.py"], "prob_info": [{"class_start_lineno": 240, "class_end_lineno": 480, "func_start_lineno": 450, "func_end_lineno": 463, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Convert the component device representation to a JSON-serializable dictionary.\n\n :returns:\n The dictionary representation.\n \"\"\"\n if self._single_device is not None:\n return {\"type\": \"single\", \"device\": str(self._single_device)}\n elif self._multiple_devices is not None:\n return {\"type\": \"multiple\", \"device_map\": self._multiple_devices.to_dict()}\n else:\n # Unreachable\n assert False"}, {"class_start_lineno": 76, "class_end_lineno": 435, "func_start_lineno": 212, "func_end_lineno": 241, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialization_dict = default_to_dict(\n self,\n model=self.model_name_or_path,\n top_k=self.top_k,\n device=self.device.to_dict(),\n token=self.token.to_dict() if self.token else None,\n similarity=str(self.similarity),\n query_prefix=self.query_prefix,\n query_suffix=self.query_suffix,\n document_prefix=self.document_prefix,\n document_suffix=self.document_suffix,\n meta_fields_to_embed=self.meta_fields_to_embed,\n embedding_separator=self.embedding_separator,\n strategy=str(self.strategy),\n lambda_threshold=self.lambda_threshold,\n model_kwargs=self.model_kwargs,\n tokenizer_kwargs=self.tokenizer_kwargs,\n config_kwargs=self.config_kwargs,\n backend=self.backend,\n )\n if serialization_dict[\"init_parameters\"].get(\"model_kwargs\") is not None:\n serialize_hf_model_kwargs(serialization_dict[\"init_parameters\"][\"model_kwargs\"])\n return serialization_dict"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.utils.device.ComponentDevice.to_dict", "haystack.components.rankers.sentence_transformers_diversity.SentenceTransformersDiversityRanker.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 53, "base_passed_num": 49}} {"id": ["haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.document_stores.in_memory.document_store.InMemoryDocumentStore::to_dict", "haystack.haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/core/serialization.py", "haystack/document_stores/in_memory/document_store.py", "haystack/components/retrievers/in_memory/bm25_retriever.py", "haystack/components/retrievers/sentence_window_retriever.py", "haystack/core/serialization.py"], "test_list": ["test/components/retrievers/test_sentence_window_retriever.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"}, {"class_start_lineno": 58, "class_end_lineno": 738, "func_start_lineno": 344, "func_end_lineno": 358, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n bm25_tokenization_regex=self.bm25_tokenization_regex,\n bm25_algorithm=self.bm25_algorithm,\n bm25_parameters=self.bm25_parameters,\n embedding_similarity_function=self.embedding_similarity_function,\n index=self.index,\n )"}, {"class_start_lineno": 13, "class_end_lineno": 203, "func_start_lineno": 88, "func_end_lineno": 103, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(\n self,\n document_store=docstore,\n filters=self.filters,\n top_k=self.top_k,\n scale_score=self.scale_score,\n filter_policy=self.filter_policy.value,\n )"}, {"class_start_lineno": 13, "class_end_lineno": 198, "func_start_lineno": 122, "func_end_lineno": 130, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n docstore = self.document_store.to_dict()\n return default_to_dict(self, document_store=docstore, window_size=self.window_size)"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.core.serialization.default_to_dict", "haystack.document_stores.in_memory.document_store.InMemoryDocumentStore.to_dict", "haystack.components.retrievers.in_memory.bm25_retriever.InMemoryBM25Retriever.to_dict", "haystack.components.retrievers.sentence_window_retriever.SentenceWindowRetriever.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 16, "base_passed_num": 14}} {"id": ["haystack.haystack.components.tools.tool_invoker.ToolInvoker::to_dict", "haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/components/tools/tool_invoker.py", "haystack/components/generators/chat/openai.py", "haystack/core/serialization.py"], "test_list": ["test/components/tools/test_tool_invoker.py"], "prob_info": [{"class_start_lineno": 38, "class_end_lineno": 242, "func_start_lineno": 216, "func_end_lineno": 229, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_tools = [tool.to_dict() for tool in self.tools]\n return default_to_dict(\n self,\n tools=serialized_tools,\n raise_on_failure=self.raise_on_failure,\n convert_result_to_json_string=self.convert_result_to_json_string,\n )"}, {"class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 170, "func_end_lineno": 190, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}], "type": ["function_empty", "Development"], "node": ["haystack.components.tools.tool_invoker.ToolInvoker.to_dict", "haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict", "haystack.core.serialization.component_to_dict"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 16, "base_passed_num": 14}} {"id": ["haystack.haystack.core.pipeline.component_checks.are_all_sockets_ready", "haystack.haystack.core.pipeline.component_checks.has_lazy_variadic_socket_received_all_inputs", "haystack.haystack.core.pipeline.component_checks.has_socket_received_all_inputs", "haystack.haystack.core.pipeline.component_checks.can_component_run"], "project": "haystack", "origin_file": ["haystack/core/pipeline/component_checks.py", "haystack/core/pipeline/component_checks.py", "haystack/core/pipeline/component_checks.py", "haystack/core/pipeline/component_checks.py"], "test_list": ["test/core/pipeline/test_component_checks.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 52, "func_end_lineno": 83, "func_code": "def are_all_sockets_ready(component: Dict, inputs: Dict, only_check_mandatory: bool = False) -> bool:\n \"\"\"\n Checks if all sockets of a component have enough inputs for the component to execute.\n\n :param component: Component metadata and the component instance.\n :param inputs: Inputs for the component.\n :param only_check_mandatory: If only mandatory sockets should be checked.\n \"\"\"\n filled_sockets = set()\n expected_sockets = set()\n if only_check_mandatory:\n sockets_to_check = {\n socket_name: socket for socket_name, socket in component[\"input_sockets\"].items() if socket.is_mandatory\n }\n else:\n sockets_to_check = {\n socket_name: socket\n for socket_name, socket in component[\"input_sockets\"].items()\n if socket.is_mandatory or len(socket.senders)\n }\n\n for socket_name, socket in sockets_to_check.items():\n socket_inputs = inputs.get(socket_name, [])\n expected_sockets.add(socket_name)\n\n # Check if socket has all required inputs or is a lazy variadic socket with any input\n if has_socket_received_all_inputs(socket, socket_inputs) or (\n is_socket_lazy_variadic(socket) and any_socket_input_received(socket_inputs)\n ):\n filled_sockets.add(socket_name)\n\n return filled_sockets == expected_sockets"}, {"class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 149, "func_end_lineno": 163, "func_code": "def has_lazy_variadic_socket_received_all_inputs(socket: InputSocket, socket_inputs: List[Dict]) -> bool:\n \"\"\"\n Checks if a lazy variadic socket has received all expected inputs from other components in the pipeline.\n\n :param socket: The InputSocket of a component.\n :param socket_inputs: Inputs for the socket.\n \"\"\"\n expected_senders = set(socket.senders)\n actual_senders = {\n sock[\"sender\"]\n for sock in socket_inputs\n if sock[\"value\"] is not _NO_OUTPUT_PRODUCED and sock[\"sender\"] is not None\n }\n\n return expected_senders == actual_senders"}, {"class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 175, "func_end_lineno": 199, "func_code": "def has_socket_received_all_inputs(socket: InputSocket, socket_inputs: List[Dict]) -> bool:\n \"\"\"\n Checks if a socket has received all expected inputs.\n\n :param socket: The InputSocket of a component.\n :param socket_inputs: Inputs for the socket.\n \"\"\"\n # No inputs received for the socket, it is not filled.\n if len(socket_inputs) == 0:\n return False\n\n # The socket is greedy variadic and at least one input was produced, it is complete.\n if (\n socket.is_variadic\n and socket.is_greedy\n and any(sock[\"value\"] is not _NO_OUTPUT_PRODUCED for sock in socket_inputs)\n ):\n return True\n\n # The socket is lazy variadic and all expected inputs were produced.\n if is_socket_lazy_variadic(socket) and has_lazy_variadic_socket_received_all_inputs(socket, socket_inputs):\n return True\n\n # The socket is not variadic and the only expected input is complete.\n return not socket.is_variadic and socket_inputs[0][\"value\"] is not _NO_OUTPUT_PRODUCED"}, {"class_start_lineno": 1, "class_end_lineno": 251, "func_start_lineno": 12, "func_end_lineno": 25, "func_code": "def can_component_run(component: Dict, inputs: Dict) -> bool:\n \"\"\"\n Checks if the component can run, given the current state of its inputs.\n\n A component needs to pass two gates so that it is ready to run:\n 1. It has received all mandatory inputs.\n 2. It has received a trigger.\n :param component: Component metadata and the component instance.\n :param inputs: Inputs for the component.\n \"\"\"\n received_all_mandatory_inputs = are_all_sockets_ready(component, inputs, only_check_mandatory=True)\n received_trigger = has_any_trigger(component, inputs)\n\n return received_all_mandatory_inputs and received_trigger"}], "type": ["function_empty", "Development"], "node": ["haystack.core.pipeline.component_checks.are_all_sockets_ready", "haystack.core.pipeline.component_checks.has_lazy_variadic_socket_received_all_inputs", "haystack.core.pipeline.component_checks.has_socket_received_all_inputs", "haystack.core.pipeline.component_checks.can_component_run"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 78, "base_passed_num": 48}} {"id": ["haystack.haystack.components.websearch.serper_dev.SerperDevWebSearch::to_dict", "haystack.haystack.core.serialization.component_to_dict", "haystack.haystack.components.tools.tool_invoker.ToolInvoker::to_dict", "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict"], "project": "haystack", "origin_file": ["haystack/components/websearch/serper_dev.py", "haystack/core/serialization.py", "haystack/tools/component_tool.py", "haystack/components/tools/tool_invoker.py", "haystack/core/serialization.py", "haystack/components/generators/chat/openai.py"], "test_list": ["test/tools/test_component_tool.py"], "prob_info": [{"class_start_lineno": 23, "class_end_lineno": 175, "func_start_lineno": 67, "func_end_lineno": 80, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n top_k=self.top_k,\n allowed_domains=self.allowed_domains,\n search_params=self.search_params,\n api_key=self.api_key.to_dict(),\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}, {"class_start_lineno": 30, "class_end_lineno": 328, "func_start_lineno": 161, "func_end_lineno": 168, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the ComponentTool to a dictionary.\n \"\"\"\n # we do not serialize the function in this case: it can be recreated from the component at deserialization time\n serialized = {\"name\": self.name, \"description\": self.description, \"parameters\": self.parameters}\n serialized[\"component\"] = component_to_dict(obj=self._component, name=self.name)\n return {\"type\": generate_qualified_class_name(type(self)), \"data\": serialized}"}, {"class_start_lineno": 38, "class_end_lineno": 242, "func_start_lineno": 216, "func_end_lineno": 229, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_tools = [tool.to_dict() for tool in self.tools]\n return default_to_dict(\n self,\n tools=serialized_tools,\n raise_on_failure=self.raise_on_failure,\n convert_result_to_json_string=self.convert_result_to_json_string,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"}, {"class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 170, "func_end_lineno": 190, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )"}], "type": ["function_empty", "Development"], "node": ["haystack.components.websearch.serper_dev.SerperDevWebSearch.to_dict", "haystack.core.serialization.component_to_dict", "haystack.tools.component_tool.ComponentTool.to_dict", "haystack.components.tools.tool_invoker.ToolInvoker.to_dict", "haystack.core.serialization.default_to_dict", "haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 10, "base_passed_num": 8}} {"id": ["haystack.haystack.components.websearch.serper_dev.SerperDevWebSearch::to_dict", "haystack.haystack.components.tools.tool_invoker.ToolInvoker::to_dict", "haystack.haystack.core.serialization.default_to_dict", "haystack.haystack.components.generators.chat.openai.OpenAIChatGenerator::to_dict", "haystack.haystack.core.serialization.component_to_dict"], "project": "haystack", "origin_file": ["haystack/components/websearch/serper_dev.py", "haystack/components/tools/tool_invoker.py", "haystack/core/serialization.py", "haystack/components/generators/chat/openai.py", "haystack/core/serialization.py", "haystack/tools/component_tool.py"], "test_list": ["test/tools/test_component_tool.py"], "prob_info": [{"class_start_lineno": 23, "class_end_lineno": 175, "func_start_lineno": 67, "func_end_lineno": 80, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n return default_to_dict(\n self,\n top_k=self.top_k,\n allowed_domains=self.allowed_domains,\n search_params=self.search_params,\n api_key=self.api_key.to_dict(),\n )"}, {"class_start_lineno": 38, "class_end_lineno": 242, "func_start_lineno": 216, "func_end_lineno": 229, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the component to a dictionary.\n\n :returns:\n Dictionary with serialized data.\n \"\"\"\n serialized_tools = [tool.to_dict() for tool in self.tools]\n return default_to_dict(\n self,\n tools=serialized_tools,\n raise_on_failure=self.raise_on_failure,\n convert_result_to_json_string=self.convert_result_to_json_string,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 172, "func_end_lineno": 210, "func_code": "def default_to_dict(obj: Any, **init_parameters) -> Dict[str, Any]:\n \"\"\"\n Utility function to serialize an object to a dictionary.\n\n This is mostly necessary for components but can be used by any object.\n `init_parameters` are parameters passed to the object class `__init__`.\n They must be defined explicitly as they'll be used when creating a new\n instance of `obj` with `from_dict`. Omitting them might cause deserialisation\n errors or unexpected behaviours later, when calling `from_dict`.\n\n An example usage:\n\n ```python\n class MyClass:\n def __init__(self, my_param: int = 10):\n self.my_param = my_param\n\n def to_dict(self):\n return default_to_dict(self, my_param=self.my_param)\n\n\n obj = MyClass(my_param=1000)\n data = obj.to_dict()\n assert data == {\n \"type\": \"MyClass\",\n \"init_parameters\": {\n \"my_param\": 1000,\n },\n }\n ```\n\n :param obj:\n The object to be serialized.\n :param init_parameters:\n The parameters used to create a new instance of the class.\n :returns:\n A dictionary representation of the instance.\n \"\"\"\n return {\"type\": generate_qualified_class_name(type(obj)), \"init_parameters\": init_parameters}"}, {"class_start_lineno": 32, "class_end_lineno": 571, "func_start_lineno": 170, "func_end_lineno": 190, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serialize this component to a dictionary.\n\n :returns:\n The serialized component as a dictionary.\n \"\"\"\n callback_name = serialize_callable(self.streaming_callback) if self.streaming_callback else None\n return default_to_dict(\n self,\n model=self.model,\n streaming_callback=callback_name,\n api_base_url=self.api_base_url,\n organization=self.organization,\n generation_kwargs=self.generation_kwargs,\n api_key=self.api_key.to_dict(),\n timeout=self.timeout,\n max_retries=self.max_retries,\n tools=[tool.to_dict() for tool in self.tools] if self.tools else None,\n tools_strict=self.tools_strict,\n )"}, {"class_start_lineno": 1, "class_end_lineno": 264, "func_start_lineno": 36, "func_end_lineno": 82, "func_code": "def component_to_dict(obj: Any, name: str) -> Dict[str, Any]:\n \"\"\"\n Converts a component instance into a dictionary.\n\n If a `to_dict` method is present in the component instance, that will be used instead of the default method.\n\n :param obj:\n The component to be serialized.\n :param name:\n The name of the component.\n :returns:\n A dictionary representation of the component.\n\n :raises SerializationError:\n If the component doesn't have a `to_dict` method.\n If the values of the init parameters can't be determined.\n If a non-basic Python type is used in the serialized data.\n \"\"\"\n if hasattr(obj, \"to_dict\"):\n data = obj.to_dict()\n else:\n init_parameters = {}\n for param_name, param in inspect.signature(obj.__init__).parameters.items():\n # Ignore `args` and `kwargs`, used by the default constructor\n if param_name in (\"args\", \"kwargs\"):\n continue\n try:\n # This only works if the Component constructor assigns the init\n # parameter to an instance variable or property with the same name\n param_value = getattr(obj, param_name)\n except AttributeError as e:\n # If the parameter doesn't have a default value, raise an error\n if param.default is param.empty:\n raise SerializationError(\n f\"Cannot determine the value of the init parameter '{param_name}' \"\n f\"for the class {obj.__class__.__name__}.\"\n f\"You can fix this error by assigning 'self.{param_name} = {param_name}' or adding a \"\n f\"custom serialization method 'to_dict' to the class.\"\n ) from e\n # In case the init parameter was not assigned, we use the default value\n param_value = param.default\n init_parameters[param_name] = param_value\n\n data = default_to_dict(obj, **init_parameters)\n\n _validate_component_to_dict_output(obj, name, data)\n return data"}, {"class_start_lineno": 30, "class_end_lineno": 328, "func_start_lineno": 161, "func_end_lineno": 168, "func_code": " def to_dict(self) -> Dict[str, Any]:\n \"\"\"\n Serializes the ComponentTool to a dictionary.\n \"\"\"\n # we do not serialize the function in this case: it can be recreated from the component at deserialization time\n serialized = {\"name\": self.name, \"description\": self.description, \"parameters\": self.parameters}\n serialized[\"component\"] = component_to_dict(obj=self._component, name=self.name)\n return {\"type\": generate_qualified_class_name(type(self)), \"data\": serialized}"}], "type": ["function_empty", "Development"], "node": ["haystack.components.websearch.serper_dev.SerperDevWebSearch.to_dict", "haystack.components.tools.tool_invoker.ToolInvoker.to_dict", "haystack.core.serialization.default_to_dict", "haystack.components.generators.chat.openai.OpenAIChatGenerator.to_dict", "haystack.core.serialization.component_to_dict", "haystack.tools.component_tool.ComponentTool.to_dict"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 10, "base_passed_num": 8}} {"id": ["inference.inference.core.utils.preprocess.resize_image_keeping_aspect_ratio", "inference.inference.core.utils.preprocess.letterbox_image", "inference.inference.core.interfaces.stream.sinks._handle_frame_rendering"], "project": "inference", "origin_file": ["inference/core/utils/preprocess.py", "inference/core/utils/preprocess.py", "inference/core/interfaces/stream/sinks.py"], "test_list": ["tests/inference/unit_tests/core/interfaces/stream/test_sinks.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 298, "func_start_lineno": 253, "func_end_lineno": 298, "func_code": "def resize_image_keeping_aspect_ratio(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n) -> ImageMetaType:\n \"\"\"\n Resize reserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n \"\"\"\n if isinstance(image, np.ndarray):\n img_ratio = image.shape[1] / image.shape[0]\n elif USE_PYTORCH_FOR_PREPROCESSING:\n img_ratio = image.shape[-1] / image.shape[-2]\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(image)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )\n desired_ratio = desired_size[0] / desired_size[1]\n\n # Determine the new dimensions\n if img_ratio >= desired_ratio:\n # Resize by width\n new_width = desired_size[0]\n new_height = int(desired_size[0] / img_ratio)\n else:\n # Resize by height\n new_height = desired_size[1]\n new_width = int(desired_size[1] * img_ratio)\n\n # Resize the image to new dimensions\n if isinstance(image, np.ndarray):\n return cv2.resize(image, (new_width, new_height))\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.interpolate(\n image, size=(new_height, new_width), mode=\"bilinear\"\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(image)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 298, "func_start_lineno": 190, "func_end_lineno": 241, "func_code": "def letterbox_image(\n image: ImageMetaType,\n desired_size: Tuple[int, int],\n color: Tuple[int, int, int] = (0, 0, 0),\n) -> ImageMetaType:\n \"\"\"\n Resize and pad image to fit the desired size, preserving its aspect ratio.\n\n Parameters:\n - image: numpy array representing the image.\n - desired_size: tuple (width, height) representing the target dimensions.\n - color: tuple (B, G, R) representing the color to pad with.\n\n Returns:\n - letterboxed image.\n \"\"\"\n resized_img = resize_image_keeping_aspect_ratio(\n image=image,\n desired_size=desired_size,\n )\n new_height, new_width = (\n resized_img.shape[:2]\n if isinstance(resized_img, np.ndarray)\n else resized_img.shape[-2:]\n )\n top_padding = (desired_size[1] - new_height) // 2\n bottom_padding = desired_size[1] - new_height - top_padding\n left_padding = (desired_size[0] - new_width) // 2\n right_padding = desired_size[0] - new_width - left_padding\n if isinstance(resized_img, np.ndarray):\n return cv2.copyMakeBorder(\n resized_img,\n top_padding,\n bottom_padding,\n left_padding,\n right_padding,\n cv2.BORDER_CONSTANT,\n value=color,\n )\n elif USE_PYTORCH_FOR_PREPROCESSING:\n return torch.nn.functional.pad(\n resized_img,\n (left_padding, right_padding, top_padding, bottom_padding),\n \"constant\",\n color[0],\n )\n else:\n raise ValueError(\n f\"Received an image of unknown type, {type(resized_img)}; \"\n \"This is most likely a bug. Contact Roboflow team through github issues \"\n \"(https://github.com/roboflow/inference/issues) providing full context of the problem\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 570, "func_start_lineno": 155, "func_end_lineno": 196, "func_code": "def _handle_frame_rendering(\n frame: Optional[VideoFrame],\n prediction: dict,\n annotators: List[BaseAnnotator],\n display_size: Optional[Tuple[int, int]],\n display_statistics: bool,\n fps_value: Optional[float],\n) -> np.ndarray:\n if frame is None:\n image = np.zeros((256, 256, 3), dtype=np.uint8)\n else:\n try:\n labels = [p[\"class\"] for p in prediction[\"predictions\"]]\n if hasattr(sv.Detections, \"from_inference\"):\n detections = sv.Detections.from_inference(prediction)\n else:\n detections = sv.Detections.from_inference(prediction)\n image = frame.image.copy()\n for annotator in annotators:\n kwargs = {\n \"scene\": image,\n \"detections\": detections,\n }\n if isinstance(annotator, sv.LabelAnnotator):\n kwargs[\"labels\"] = labels\n image = annotator.annotate(**kwargs)\n except (TypeError, KeyError):\n logger.warning(\n f\"Used `render_boxes(...)` sink, but predictions that were provided do not match the expected \"\n f\"format of object detection prediction that could be accepted by \"\n f\"`supervision.Detection.from_inference(...)\"\n )\n image = frame.image.copy()\n if display_size is not None:\n image = letterbox_image(image, desired_size=display_size)\n if display_statistics:\n image = render_statistics(\n image=image,\n frame_timestamp=(frame.frame_timestamp if frame is not None else None),\n fps=fps_value,\n )\n return image"}], "type": ["function_empty", "Development"], "node": ["inference.core.utils.preprocess.resize_image_keeping_aspect_ratio", "inference.core.utils.preprocess.letterbox_image", "inference.core.interfaces.stream.sinks._handle_frame_rendering"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 11, "base_passed_num": 8}} {"id": ["inference.inference.core.utils.image_utils.validate_numpy_image", "inference.inference.core.utils.image_utils.load_image_base64", "inference.inference.core.utils.image_utils.load_image_from_encoded_bytes", "inference.inference.core.utils.image_utils.load_image_from_buffer", "inference.inference.core.utils.image_utils.load_image_from_numpy_str", "inference.inference.core.utils.image_utils.attempt_loading_image_from_string", "inference.inference.core.utils.image_utils.load_image_with_inferred_type"], "project": "inference", "origin_file": ["inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py"], "test_list": ["tests/inference/unit_tests/core/utils/test_image_utils.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 353, "func_end_lineno": 377, "func_code": "def validate_numpy_image(data: np.ndarray) -> None:\n \"\"\"\n Validate if the provided data is a valid numpy image.\n\n Args:\n data (np.ndarray): The numpy array representing an image.\n\n Raises:\n InvalidNumpyInput: If the provided data is not a valid numpy image.\n \"\"\"\n if not issubclass(type(data), np.ndarray):\n raise InvalidNumpyInput(\n message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n public_message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n )\n if len(data.shape) != 3 and len(data.shape) != 2:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 2 or 3 dimensions, got {len(data.shape)} dimensions.\",\n public_message=f\"For image given as np.ndarray expected 2 or 3 dimensions.\",\n )\n if data.shape[-1] != 3 and data.shape[-1] != 1:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 1 or 3 channels, got {data.shape[-1]} channels.\",\n public_message=\"For image given as np.ndarray expected 1 or 3 channels.\",\n )"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 258, "func_end_lineno": 292, "func_code": "def load_image_base64(\n value: Union[str, bytes], cv_imread_flags=cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"Loads an image from a base64 encoded string using OpenCV.\n\n Args:\n value (str): Base64 encoded string representing the image.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n # New routes accept images via json body (str), legacy routes accept bytes which need to be decoded as strings\n if not isinstance(value, str):\n value = value.decode(\"utf-8\")\n value = BASE64_DATA_TYPE_PATTERN.sub(\"\", value)\n try:\n value = pybase64.b64decode(value)\n except binascii.Error as error:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n ) from error\n if len(value) == 0:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Empty image payload.\",\n )\n image_np = np.frombuffer(value, np.uint8)\n result = cv2.imdecode(image_np, cv_imread_flags)\n if result is None:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n )\n return result"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 496, "func_end_lineno": 516, "func_code": "def load_image_from_encoded_bytes(\n value: bytes, cv_imread_flags: int = cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"\n Load an image from encoded bytes.\n\n Args:\n value (bytes): The byte sequence representing the image.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n image_np = np.asarray(bytearray(value), dtype=np.uint8)\n image = cv2.imdecode(image_np, cv_imread_flags)\n if image is None:\n raise InputImageLoadError(\n message=f\"Could not decode bytes as image.\",\n public_message=\"Data is not image.\",\n )\n return image"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 295, "func_end_lineno": 315, "func_code": "def load_image_from_buffer(\n value: _IOBase,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> np.ndarray:\n \"\"\"Loads an image from a multipart-encoded input.\n\n Args:\n value (Any): Multipart-encoded input representing the image.\n\n Returns:\n Image.Image: The loaded PIL image.\n \"\"\"\n value.seek(0)\n image_np = np.frombuffer(value.read(), np.uint8)\n result = cv2.imdecode(image_np, cv_imread_flags)\n if result is None:\n raise InputImageLoadError(\n message=\"Could not load valid image from buffer.\",\n public_message=\"Could not decode bytes into image.\",\n )\n return result"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 318, "func_end_lineno": 345, "func_code": "def load_image_from_numpy_str(value: Union[bytes, str]) -> np.ndarray:\n \"\"\"Loads an image from a numpy array string.\n\n Args:\n value (Union[bytes, str]): Base64 string or byte sequence representing the pickled numpy array of the image.\n\n Returns:\n Image.Image: The loaded PIL image.\n\n Raises:\n InvalidNumpyInput: If the numpy data is invalid.\n \"\"\"\n if not ALLOW_NUMPY_INPUT:\n raise InvalidImageTypeDeclared(\n message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n public_message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n )\n try:\n if isinstance(value, str):\n value = pybase64.b64decode(value)\n data = pickle.loads(value)\n except (EOFError, TypeError, pickle.UnpicklingError, binascii.Error) as error:\n raise InvalidNumpyInput(\n message=f\"Could not unpickle image data. Cause: {error}\",\n public_message=\"Could not deserialize pickle payload.\",\n ) from error\n validate_numpy_image(data=data)\n return data"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 215, "func_end_lineno": 255, "func_code": "def attempt_loading_image_from_string(\n value: Union[str, bytes, bytearray, _IOBase],\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Attempt to load an image from a string.\n\n Args:\n value (Union[str, bytes, bytearray, _IOBase]): The image data in string format.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n Tuple[np.ndarray, bool]: A tuple of the loaded image in numpy array format and a boolean flag indicating if the image is in BGR format.\n \"\"\"\n try:\n return load_image_base64(value=value, cv_imread_flags=cv_imread_flags), True\n except:\n pass\n try:\n return (\n load_image_from_encoded_bytes(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return (\n load_image_from_buffer(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return load_image_from_numpy_str(value=value), True\n except InvalidImageTypeDeclared as error:\n raise error\n except InvalidNumpyInput as error:\n raise InputFormatInferenceFailed(\n message=\"Input image format could not be inferred from string.\",\n public_message=\"Input image format could not be inferred from string.\",\n ) from error"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 180, "func_end_lineno": 212, "func_code": "def load_image_with_inferred_type(\n value: Any,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"Load an image by inferring its type.\n\n Args:\n value (Any): The image data.\n cv_imread_flags (int): Flags used for OpenCV's imread function.\n\n Returns:\n Tuple[np.ndarray, bool]: Loaded image as a numpy array and a boolean indicating if the image is in BGR format.\n\n Raises:\n NotImplementedError: If the image type could not be inferred.\n \"\"\"\n if isinstance(value, (np.ndarray, np.generic)):\n validate_numpy_image(data=value)\n return value, True\n elif isinstance(value, Image.Image):\n return np.asarray(value.convert(\"RGB\")), False\n elif isinstance(value, str) and (value.startswith(\"http\")):\n return load_image_from_url(value=value, cv_imread_flags=cv_imread_flags), True\n elif (\n isinstance(value, str)\n and ALLOW_LOADING_IMAGES_FROM_LOCAL_FILESYSTEM\n and os.path.isfile(value)\n ):\n return cv2.imread(value, cv_imread_flags), True\n else:\n return attempt_loading_image_from_string(\n value=value, cv_imread_flags=cv_imread_flags\n )"}], "type": ["function_empty", "Development"], "node": ["inference.core.utils.image_utils.validate_numpy_image", "inference.core.utils.image_utils.load_image_base64", "inference.core.utils.image_utils.load_image_from_encoded_bytes", "inference.core.utils.image_utils.load_image_from_buffer", "inference.core.utils.image_utils.load_image_from_numpy_str", "inference.core.utils.image_utils.attempt_loading_image_from_string", "inference.core.utils.image_utils.load_image_with_inferred_type"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 152, "base_passed_num": 71}} {"id": ["inference.inference.core.utils.image_utils.load_image_base64", "inference.inference.core.utils.image_utils.load_image_from_encoded_bytes", "inference.inference.core.utils.image_utils.load_image_from_buffer", "inference.inference.core.utils.image_utils.validate_numpy_image", "inference.inference.core.utils.image_utils.load_image_from_numpy_str", "inference.inference.core.utils.image_utils.attempt_loading_image_from_string"], "project": "inference", "origin_file": ["inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py", "inference/core/utils/image_utils.py"], "test_list": ["tests/inference/unit_tests/core/utils/test_image_utils.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 258, "func_end_lineno": 292, "func_code": "def load_image_base64(\n value: Union[str, bytes], cv_imread_flags=cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"Loads an image from a base64 encoded string using OpenCV.\n\n Args:\n value (str): Base64 encoded string representing the image.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n # New routes accept images via json body (str), legacy routes accept bytes which need to be decoded as strings\n if not isinstance(value, str):\n value = value.decode(\"utf-8\")\n value = BASE64_DATA_TYPE_PATTERN.sub(\"\", value)\n try:\n value = pybase64.b64decode(value)\n except binascii.Error as error:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n ) from error\n if len(value) == 0:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Empty image payload.\",\n )\n image_np = np.frombuffer(value, np.uint8)\n result = cv2.imdecode(image_np, cv_imread_flags)\n if result is None:\n raise InputImageLoadError(\n message=\"Could not load valid image from base64 string.\",\n public_message=\"Malformed base64 input image.\",\n )\n return result"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 496, "func_end_lineno": 516, "func_code": "def load_image_from_encoded_bytes(\n value: bytes, cv_imread_flags: int = cv2.IMREAD_COLOR\n) -> np.ndarray:\n \"\"\"\n Load an image from encoded bytes.\n\n Args:\n value (bytes): The byte sequence representing the image.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n np.ndarray: The loaded image as a numpy array.\n \"\"\"\n image_np = np.asarray(bytearray(value), dtype=np.uint8)\n image = cv2.imdecode(image_np, cv_imread_flags)\n if image is None:\n raise InputImageLoadError(\n message=f\"Could not decode bytes as image.\",\n public_message=\"Data is not image.\",\n )\n return image"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 295, "func_end_lineno": 315, "func_code": "def load_image_from_buffer(\n value: _IOBase,\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> np.ndarray:\n \"\"\"Loads an image from a multipart-encoded input.\n\n Args:\n value (Any): Multipart-encoded input representing the image.\n\n Returns:\n Image.Image: The loaded PIL image.\n \"\"\"\n value.seek(0)\n image_np = np.frombuffer(value.read(), np.uint8)\n result = cv2.imdecode(image_np, cv_imread_flags)\n if result is None:\n raise InputImageLoadError(\n message=\"Could not load valid image from buffer.\",\n public_message=\"Could not decode bytes into image.\",\n )\n return result"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 353, "func_end_lineno": 377, "func_code": "def validate_numpy_image(data: np.ndarray) -> None:\n \"\"\"\n Validate if the provided data is a valid numpy image.\n\n Args:\n data (np.ndarray): The numpy array representing an image.\n\n Raises:\n InvalidNumpyInput: If the provided data is not a valid numpy image.\n \"\"\"\n if not issubclass(type(data), np.ndarray):\n raise InvalidNumpyInput(\n message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n public_message=f\"Data provided as input could not be decoded into np.ndarray object.\",\n )\n if len(data.shape) != 3 and len(data.shape) != 2:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 2 or 3 dimensions, got {len(data.shape)} dimensions.\",\n public_message=f\"For image given as np.ndarray expected 2 or 3 dimensions.\",\n )\n if data.shape[-1] != 3 and data.shape[-1] != 1:\n raise InvalidNumpyInput(\n message=f\"For image given as np.ndarray expected 1 or 3 channels, got {data.shape[-1]} channels.\",\n public_message=\"For image given as np.ndarray expected 1 or 3 channels.\",\n )"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 318, "func_end_lineno": 345, "func_code": "def load_image_from_numpy_str(value: Union[bytes, str]) -> np.ndarray:\n \"\"\"Loads an image from a numpy array string.\n\n Args:\n value (Union[bytes, str]): Base64 string or byte sequence representing the pickled numpy array of the image.\n\n Returns:\n Image.Image: The loaded PIL image.\n\n Raises:\n InvalidNumpyInput: If the numpy data is invalid.\n \"\"\"\n if not ALLOW_NUMPY_INPUT:\n raise InvalidImageTypeDeclared(\n message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n public_message=f\"NumPy image type is not supported in this configuration of `inference`.\",\n )\n try:\n if isinstance(value, str):\n value = pybase64.b64decode(value)\n data = pickle.loads(value)\n except (EOFError, TypeError, pickle.UnpicklingError, binascii.Error) as error:\n raise InvalidNumpyInput(\n message=f\"Could not unpickle image data. Cause: {error}\",\n public_message=\"Could not deserialize pickle payload.\",\n ) from error\n validate_numpy_image(data=data)\n return data"}, {"class_start_lineno": 1, "class_end_lineno": 599, "func_start_lineno": 215, "func_end_lineno": 255, "func_code": "def attempt_loading_image_from_string(\n value: Union[str, bytes, bytearray, _IOBase],\n cv_imread_flags: int = cv2.IMREAD_COLOR,\n) -> Tuple[np.ndarray, bool]:\n \"\"\"\n Attempt to load an image from a string.\n\n Args:\n value (Union[str, bytes, bytearray, _IOBase]): The image data in string format.\n cv_imread_flags (int): OpenCV flags used for image reading.\n\n Returns:\n Tuple[np.ndarray, bool]: A tuple of the loaded image in numpy array format and a boolean flag indicating if the image is in BGR format.\n \"\"\"\n try:\n return load_image_base64(value=value, cv_imread_flags=cv_imread_flags), True\n except:\n pass\n try:\n return (\n load_image_from_encoded_bytes(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return (\n load_image_from_buffer(value=value, cv_imread_flags=cv_imread_flags),\n True,\n )\n except:\n pass\n try:\n return load_image_from_numpy_str(value=value), True\n except InvalidImageTypeDeclared as error:\n raise error\n except InvalidNumpyInput as error:\n raise InputFormatInferenceFailed(\n message=\"Input image format could not be inferred from string.\",\n public_message=\"Input image format could not be inferred from string.\",\n ) from error"}], "type": ["function_empty", "Development"], "node": ["inference.core.utils.image_utils.load_image_base64", "inference.core.utils.image_utils.load_image_from_encoded_bytes", "inference.core.utils.image_utils.load_image_from_buffer", "inference.core.utils.image_utils.validate_numpy_image", "inference.core.utils.image_utils.load_image_from_numpy_str", "inference.core.utils.image_utils.attempt_loading_image_from_string"], "language": "Python", "toolfunc_count": 5, "func_count": 6, "pytest_info": {"total_num": 152, "base_passed_num": 82}} {"id": ["langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_action_agent", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_iter_next_step", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_take_next_step"], "project": "langchain", "origin_file": ["langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent.py"], "test_list": ["libs/langchain/tests/unit_tests/agents/test_agent.py"], "prob_info": [{"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1177, "func_end_lineno": 1189, "func_code": " def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:\n \"\"\"Type cast self.agent.\n\n If the `agent` attribute is a Runnable, it will be converted one of\n RunnableAgentType in the validate_runnable_agent root_validator.\n\n To support instantiating with a Runnable, here we explicitly cast the type\n to reflect the changes made in the root_validator.\n \"\"\"\n if isinstance(self.agent, Runnable):\n return cast(RunnableAgentType, self.agent)\n else:\n return self.agent"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1419, "func_end_lineno": 1456, "func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1342, "func_end_lineno": 1417, "func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1321, "func_end_lineno": 1340, "func_code": " def _take_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Union[AgentFinish, List[Tuple[AgentAction, str]]]:\n return self._consume_next_step(\n [\n a\n for a in self._iter_next_step(\n name_to_tool_map,\n color_mapping,\n inputs,\n intermediate_steps,\n run_manager,\n )\n ]\n )"}], "type": ["function_empty", "Development"], "node": ["langchain.agents.agent.AgentExecutor._action_agent", "langchain.agents.agent.AgentExecutor._perform_agent_action", "langchain.agents.agent.AgentExecutor._iter_next_step", "langchain.agents.agent.AgentExecutor._take_next_step"], "language": "Python", "toolfunc_count": 1, "func_count": 4, "pytest_info": {"total_num": 14, "base_passed_num": 2}} {"id": ["langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_action_agent", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_iter_next_step"], "project": "langchain", "origin_file": ["langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent.py"], "test_list": ["libs/langchain/tests/unit_tests/agents/test_agent.py", "libs/langchain/tests/unit_tests/agents/test_agent_iterator.py"], "prob_info": [{"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1177, "func_end_lineno": 1189, "func_code": " def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:\n \"\"\"Type cast self.agent.\n\n If the `agent` attribute is a Runnable, it will be converted one of\n RunnableAgentType in the validate_runnable_agent root_validator.\n\n To support instantiating with a Runnable, here we explicitly cast the type\n to reflect the changes made in the root_validator.\n \"\"\"\n if isinstance(self.agent, Runnable):\n return cast(RunnableAgentType, self.agent)\n else:\n return self.agent"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1419, "func_end_lineno": 1456, "func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1342, "func_end_lineno": 1417, "func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )"}], "type": ["function_empty", "Development"], "node": ["langchain.agents.agent.AgentExecutor._action_agent", "langchain.agents.agent.AgentExecutor._perform_agent_action", "langchain.agents.agent.AgentExecutor._iter_next_step"], "language": "Python", "toolfunc_count": 1, "func_count": 3, "pytest_info": {"total_num": 28, "base_passed_num": 2}} {"id": ["langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_action_agent", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_iter_next_step", "langchain.libs.langchain.langchain.agents.agent_iterator.AgentExecutorIterator::__iter__", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::stream"], "project": "langchain", "origin_file": ["langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent_iterator.py", "langchain/agents/agent.py"], "test_list": ["libs/langchain/tests/unit_tests/agents/test_agent.py"], "prob_info": [{"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1177, "func_end_lineno": 1189, "func_code": " def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:\n \"\"\"Type cast self.agent.\n\n If the `agent` attribute is a Runnable, it will be converted one of\n RunnableAgentType in the validate_runnable_agent root_validator.\n\n To support instantiating with a Runnable, here we explicitly cast the type\n to reflect the changes made in the root_validator.\n \"\"\"\n if isinstance(self.agent, Runnable):\n return cast(RunnableAgentType, self.agent)\n else:\n return self.agent"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1419, "func_end_lineno": 1456, "func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1342, "func_end_lineno": 1417, "func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )"}, {"class_start_lineno": 46, "class_end_lineno": 418, "func_start_lineno": 174, "func_end_lineno": 234, "func_code": " def __iter__(self: \"AgentExecutorIterator\") -> Iterator[AddableDict]:\n logger.debug(\"Initialising AgentExecutorIterator\")\n self.reset()\n callback_manager = CallbackManager.configure(\n self.callbacks,\n self.agent_executor.callbacks,\n self.agent_executor.verbose,\n self.tags,\n self.agent_executor.tags,\n self.metadata,\n self.agent_executor.metadata,\n )\n run_manager = callback_manager.on_chain_start(\n dumpd(self.agent_executor),\n self.inputs,\n self.run_id,\n name=self.run_name,\n )\n try:\n while self.agent_executor._should_continue(\n self.iterations, self.time_elapsed\n ):\n # take the next step: this plans next action, executes it,\n # yielding action and observation as they are generated\n next_step_seq: NextStepOutput = []\n for chunk in self.agent_executor._iter_next_step(\n self.name_to_tool_map,\n self.color_mapping,\n self.inputs,\n self.intermediate_steps,\n run_manager,\n ):\n next_step_seq.append(chunk)\n # if we're yielding actions, yield them as they come\n # do not yield AgentFinish, which will be handled below\n if self.yield_actions:\n if isinstance(chunk, AgentAction):\n yield AddableDict(actions=[chunk], messages=chunk.messages)\n elif isinstance(chunk, AgentStep):\n yield AddableDict(steps=[chunk], messages=chunk.messages)\n\n # convert iterator output to format handled by _process_next_step_output\n next_step = self.agent_executor._consume_next_step(next_step_seq)\n # update iterations and time elapsed\n self.update_iterations()\n # decide if this is the final output\n output = self._process_next_step_output(next_step, run_manager)\n is_final = \"intermediate_step\" not in output\n # yield the final output always\n # for backwards compat, yield int. output if not yielding actions\n if not self.yield_actions or is_final:\n yield output\n # if final output reached, stop iteration\n if is_final:\n return\n except BaseException as e:\n run_manager.on_chain_error(e)\n raise\n\n # if we got here means we exhausted iterations or time\n yield self._stop(run_manager)"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1745, "func_end_lineno": 1774, "func_code": " def stream(\n self,\n input: Union[Dict[str, Any], Any],\n config: Optional[RunnableConfig] = None,\n **kwargs: Any,\n ) -> Iterator[AddableDict]:\n \"\"\"Enables streaming over steps taken to reach final output.\n\n Args:\n input: Input to the agent.\n config: Config to use.\n kwargs: Additional arguments.\n\n Yields:\n AddableDict: Addable dictionary.\n \"\"\"\n config = ensure_config(config)\n iterator = AgentExecutorIterator(\n self,\n input,\n config.get(\"callbacks\"),\n tags=config.get(\"tags\"),\n metadata=config.get(\"metadata\"),\n run_name=config.get(\"run_name\"),\n run_id=config.get(\"run_id\"),\n yield_actions=True,\n **kwargs,\n )\n for step in iterator:\n yield step"}], "type": ["function_empty", "Development"], "node": ["langchain.agents.agent.AgentExecutor._action_agent", "langchain.agents.agent.AgentExecutor._perform_agent_action", "langchain.agents.agent.AgentExecutor._iter_next_step", "langchain.agents.agent_iterator.AgentExecutorIterator.__iter__", "langchain.agents.agent.AgentExecutor.stream"], "language": "Python", "toolfunc_count": 2, "func_count": 5, "pytest_info": {"total_num": 14, "base_passed_num": 2}} {"id": ["langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_action_agent", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action", "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_iter_next_step", "langchain.libs.langchain.langchain.agents.agent_iterator.AgentExecutorIterator::__iter__"], "project": "langchain", "origin_file": ["langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent_iterator.py"], "test_list": ["libs/langchain/tests/unit_tests/agents/test_agent.py"], "prob_info": [{"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1177, "func_end_lineno": 1189, "func_code": " def _action_agent(self) -> Union[BaseSingleActionAgent, BaseMultiActionAgent]:\n \"\"\"Type cast self.agent.\n\n If the `agent` attribute is a Runnable, it will be converted one of\n RunnableAgentType in the validate_runnable_agent root_validator.\n\n To support instantiating with a Runnable, here we explicitly cast the type\n to reflect the changes made in the root_validator.\n \"\"\"\n if isinstance(self.agent, Runnable):\n return cast(RunnableAgentType, self.agent)\n else:\n return self.agent"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1419, "func_end_lineno": 1456, "func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)"}, {"class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1342, "func_end_lineno": 1417, "func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )"}, {"class_start_lineno": 46, "class_end_lineno": 418, "func_start_lineno": 174, "func_end_lineno": 234, "func_code": " def __iter__(self: \"AgentExecutorIterator\") -> Iterator[AddableDict]:\n logger.debug(\"Initialising AgentExecutorIterator\")\n self.reset()\n callback_manager = CallbackManager.configure(\n self.callbacks,\n self.agent_executor.callbacks,\n self.agent_executor.verbose,\n self.tags,\n self.agent_executor.tags,\n self.metadata,\n self.agent_executor.metadata,\n )\n run_manager = callback_manager.on_chain_start(\n dumpd(self.agent_executor),\n self.inputs,\n self.run_id,\n name=self.run_name,\n )\n try:\n while self.agent_executor._should_continue(\n self.iterations, self.time_elapsed\n ):\n # take the next step: this plans next action, executes it,\n # yielding action and observation as they are generated\n next_step_seq: NextStepOutput = []\n for chunk in self.agent_executor._iter_next_step(\n self.name_to_tool_map,\n self.color_mapping,\n self.inputs,\n self.intermediate_steps,\n run_manager,\n ):\n next_step_seq.append(chunk)\n # if we're yielding actions, yield them as they come\n # do not yield AgentFinish, which will be handled below\n if self.yield_actions:\n if isinstance(chunk, AgentAction):\n yield AddableDict(actions=[chunk], messages=chunk.messages)\n elif isinstance(chunk, AgentStep):\n yield AddableDict(steps=[chunk], messages=chunk.messages)\n\n # convert iterator output to format handled by _process_next_step_output\n next_step = self.agent_executor._consume_next_step(next_step_seq)\n # update iterations and time elapsed\n self.update_iterations()\n # decide if this is the final output\n output = self._process_next_step_output(next_step, run_manager)\n is_final = \"intermediate_step\" not in output\n # yield the final output always\n # for backwards compat, yield int. output if not yielding actions\n if not self.yield_actions or is_final:\n yield output\n # if final output reached, stop iteration\n if is_final:\n return\n except BaseException as e:\n run_manager.on_chain_error(e)\n raise\n\n # if we got here means we exhausted iterations or time\n yield self._stop(run_manager)"}], "type": ["function_empty", "Development"], "node": ["langchain.agents.agent.AgentExecutor._action_agent", "langchain.agents.agent.AgentExecutor._perform_agent_action", "langchain.agents.agent.AgentExecutor._iter_next_step", "langchain.agents.agent_iterator.AgentExecutorIterator.__iter__"], "language": "Python", "toolfunc_count": 1, "func_count": 4, "pytest_info": {"total_num": 14, "base_passed_num": 2}} {"id": ["open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_class", "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_pipeline", "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_node", "open-iris.src.iris.pipelines.iris_pipeline.IRISPipeline::instanciate_nodes"], "project": "open-iris", "origin_file": ["iris/pipelines/iris_pipeline.py", "iris/pipelines/iris_pipeline.py", "iris/pipelines/iris_pipeline.py", "iris/pipelines/iris_pipeline.py"], "test_list": ["tests/unit_tests/pipelines/test_iris_pipeline.py"], "prob_info": [{"class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 225, "func_end_lineno": 245, "func_code": " def instanciate_class(self, class_name: str, kwargs: Dict[str, Any]) -> Callable:\n \"\"\"Instanciate a class from its string definition and its kwargs.\n\n This function relies on pydoc.locate, a safe way to instanciate a class from its string definition, which itself relies on pydoc.safe_import.\n\n Args:\n class_name (str): name of the class.\n kwargs (Dict): kwargs to pass to the class at instanciation time\n\n Returns:\n Callable: the instanciated class\n\n Raises:\n IRISPipelineError: Raised if the class cannot be located.\n \"\"\"\n object_class = pydoc.locate(class_name)\n\n if object_class is None:\n raise IRISPipelineError(f\"Could not locate class {class_name}\")\n\n return object_class(**kwargs)"}, {"class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 179, "func_end_lineno": 200, "func_code": " def instanciate_pipeline(self) -> List[PipelineNode]:\n \"\"\"Given a list of PipelineNodes, crawl the parameters and instanciate the PipelineClass available.\n\n Returns:\n List[PipelineNode]: pipeline with instanciated parameters\n \"\"\"\n instanciated_pipeline = []\n for node in self.params.pipeline:\n current_node = node\n for param_name, param_value in node.algorithm.params.items():\n if isinstance(param_value, (tuple, list)):\n for i, value in enumerate(param_value):\n if isinstance(value, PipelineClass):\n current_node.algorithm.params[param_name][i] = self.instanciate_class(\n class_name=value.class_name, kwargs=value.params\n )\n elif isinstance(param_value, PipelineClass):\n current_node.algorithm.params[param_name] = self.instanciate_class(\n class_name=param_value.class_name, kwargs=param_value.params\n )\n instanciated_pipeline.append(current_node)\n return instanciated_pipeline"}, {"class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 202, "func_end_lineno": 223, "func_code": " def instanciate_node(\n self, node_class: str, algorithm_params: Dict[str, Any], callbacks: Optional[List[PipelineClass]]\n ) -> Algorithm:\n \"\"\"Instanciate an Algorithm from its class, kwargs and optional Callbacks.\n\n NOTE: All callbacks of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.\n\n Args:\n node_class (str): Node's class.\n algorithm_params (Dict[str, Any]): Node's kwargs.\n callbacks (Optional[List[PipelineClass]]): list of callbacks.\n\n Returns:\n Algorithm: instanciated node.\n \"\"\"\n if callbacks is not None and len(callbacks):\n instanciated_callbacks = [self.instanciate_class(cb.class_name, cb.params) for cb in callbacks]\n instanciated_callbacks = [cb for cb in instanciated_callbacks if type(cb) not in self.env.disabled_qa]\n\n algorithm_params = {**algorithm_params, **{\"callbacks\": instanciated_callbacks}}\n\n return self.instanciate_class(node_class, algorithm_params)"}, {"class_start_lineno": 27, "class_end_lineno": 324, "func_start_lineno": 159, "func_end_lineno": 177, "func_code": " def instanciate_nodes(self) -> Dict[str, Algorithm]:\n \"\"\"Given a list of PipelineNode, return the associated instanciated nodes.\n\n NOTE: All nodes of type listed in self.env.disabled_qa will be filtered out. This allows one config file to be used in various QA standards levels.\n\n Returns:\n Dict[str, Algorithm]: instanciated nodes.\n \"\"\"\n instanciated_pipeline = self.instanciate_pipeline()\n nodes = {\n node.name: self.instanciate_node(\n node_class=node.algorithm.class_name,\n algorithm_params=node.algorithm.params,\n callbacks=node.callbacks,\n )\n for node in instanciated_pipeline\n }\n nodes = {node_name: node for node_name, node in nodes.items() if type(node) not in self.env.disabled_qa}\n return nodes"}], "type": ["function_empty", "Development"], "node": ["iris.pipelines.iris_pipeline.IRISPipeline.instanciate_class", "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_pipeline", "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_node", "iris.pipelines.iris_pipeline.IRISPipeline.instanciate_nodes"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 33, "base_passed_num": 0}} {"id": ["rdt.rdt.transformers.null.NullTransformer::_get_missing_value_replacement", "rdt.rdt.transformers.null.NullTransformer::fit", "rdt.rdt.transformers.boolean.BinaryEncoder::_fit"], "project": "rdt", "origin_file": ["rdt/transformers/null.py", "rdt/transformers/null.py", "rdt/transformers/boolean.py"], "test_list": ["tests/unit/transformers/test_boolean.py"], "prob_info": [{"class_start_lineno": 13, "class_end_lineno": 194, "func_start_lineno": 60, "func_end_lineno": 93, "func_code": " def _get_missing_value_replacement(self, data):\n \"\"\"Get the fill value to use for the given data.\n\n Args:\n data (pd.Series):\n The data that is being transformed.\n\n Return:\n object:\n The fill value that needs to be used.\n\n Raise:\n TransformerInputError:\n Error raised when data only contains nans and ``_missing_value_replacement``\n is set to 'mean' or 'mode'.\n \"\"\"\n if self._missing_value_replacement is None:\n return None\n\n if self._missing_value_replacement in {'mean', 'mode', 'random'} and pd.isna(data).all():\n msg = (\n f\"'missing_value_replacement' cannot be set to '{self._missing_value_replacement}'\"\n ' when the provided data only contains NaNs. Using 0 instead.'\n )\n LOGGER.info(msg)\n return 0\n\n if self._missing_value_replacement == 'mean':\n return data.mean()\n\n if self._missing_value_replacement == 'mode':\n return data.mode(dropna=True)[0]\n\n return self._missing_value_replacement"}, {"class_start_lineno": 13, "class_end_lineno": 194, "func_start_lineno": 95, "func_end_lineno": 122, "func_code": " def fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Evaluate if the transformer has to create the null column or not.\n\n Args:\n data (pandas.Series):\n Data to transform.\n \"\"\"\n self._missing_value_replacement = self._get_missing_value_replacement(data)\n if self._missing_value_replacement == 'random':\n self._min_value = data.min()\n self._max_value = data.max()\n\n if self._missing_value_generation is not None:\n null_values = data.isna().to_numpy()\n self.nulls = null_values.any()\n\n if not self.nulls and self.models_missing_values():\n self._missing_value_generation = None\n guidance_message = (\n f'Guidance: There are no missing values in column {data.name}. '\n 'Extra column not created.'\n )\n LOGGER.info(guidance_message)\n\n if self._missing_value_generation == 'random':\n self._null_percentage = null_values.sum() / len(data)"}, {"class_start_lineno": 10, "class_end_lineno": 129, "func_start_lineno": 54, "func_end_lineno": 69, "func_code": " def _fit(self, data):\n \"\"\"Fit the transformer to the data.\n\n Args:\n data (pandas.Series):\n Data to fit to.\n \"\"\"\n self.null_transformer = NullTransformer(\n self.missing_value_replacement, self.missing_value_generation\n )\n self.null_transformer.fit(data)\n if self.null_transformer.models_missing_values():\n self.output_properties['is_null'] = {\n 'sdtype': 'float',\n 'next_transformer': None,\n }"}], "type": ["function_empty", "Development"], "node": ["rdt.transformers.null.NullTransformer._get_missing_value_replacement", "rdt.transformers.null.NullTransformer.fit", "rdt.transformers.boolean.BinaryEncoder._fit"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 16, "base_passed_num": 13}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.image_transforms.convert_to_rgb", "transformers.src.transformers.models.blip.image_processing_blip.BlipImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/blip/image_processing_blip.py"], "test_list": ["tests/models/blip/test_image_processing_blip.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 754, "func_end_lineno": 771, "func_code": "def convert_to_rgb(image: ImageInput) -> ImageInput:\n \"\"\"\n Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image\n as is.\n Args:\n image (Image):\n The image to convert.\n \"\"\"\n requires_backends(convert_to_rgb, [\"vision\"])\n\n if not isinstance(image, PIL.Image.Image):\n return image\n\n if image.mode == \"RGB\":\n return image\n\n image = image.convert(\"RGB\")\n return image"}, {"class_start_lineno": 46, "class_end_lineno": 294, "func_start_lineno": 160, "func_end_lineno": 294, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: PILImageResampling = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n do_convert_rgb: bool = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Controls the size of the image after `resize`. The shortest edge of the image is resized to\n `size[\"shortest_edge\"]` whilst preserving the aspect ratio. If the longest edge of this resized image\n is > `int(size[\"shortest_edge\"] * (1333 / 800))`, then the image is resized again to make the longest\n edge equal to `int(size[\"shortest_edge\"] * (1333 / 800))`.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to normalize the image by if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to normalize the image by if `do_normalize` is set to `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # PIL RGBA images are converted to RGB\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n encoded_outputs = BatchFeature(data={\"pixel_values\": images}, tensor_type=return_tensors)\n\n return encoded_outputs"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.image_transforms.convert_to_rgb", "transformers.models.blip.image_processing_blip.BlipImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 20, "base_passed_num": 12}} {"id": ["transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor::_pad_image", "transformers.src.transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor::pad"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/bridgetower/image_processing_bridgetower.py", "transformers/models/bridgetower/image_processing_bridgetower.py"], "test_list": ["tests/models/bridgetower/test_image_processing_bridgetower.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 124, "class_end_lineno": 540, "func_start_lineno": 290, "func_end_lineno": 315, "func_code": " def _pad_image(\n self,\n image: np.ndarray,\n output_size: Tuple[int, int],\n constant_values: Union[float, Iterable[float]] = 0,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(\n image,\n padding,\n mode=PaddingMode.CONSTANT,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n return padded_image"}, {"class_start_lineno": 124, "class_end_lineno": 540, "func_start_lineno": 318, "func_end_lineno": 371, "func_code": " def pad(\n self,\n images: List[np.ndarray],\n constant_values: Union[float, Iterable[float]] = 0,\n return_pixel_mask: bool = True,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> BatchFeature:\n \"\"\"\n Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width\n in the batch and optionally returns their corresponding pixel mask.\n\n Args:\n image (`np.ndarray`):\n Image to pad.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n return_pixel_mask (`bool`, *optional*, defaults to `True`):\n Whether to return a pixel mask.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n pad_size = get_max_height_width(images, input_data_format=input_data_format)\n\n padded_images = [\n self._pad_image(\n image,\n pad_size,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n for image in images\n ]\n data = {\"pixel_values\": padded_images}\n\n if return_pixel_mask:\n masks = [\n make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)\n for image in images\n ]\n data[\"pixel_mask\"] = masks\n\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.infer_channel_dimension_format", "transformers.image_utils.get_image_size", "transformers.image_transforms.pad", "transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor._pad_image", "transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor.pad"], "language": "Python", "toolfunc_count": 3, "func_count": 5, "pytest_info": {"total_num": 12, "base_passed_num": 7}} {"id": ["transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor::_pad_image"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/bridgetower/image_processing_bridgetower.py"], "test_list": ["tests/models/bridgetower/test_image_processing_bridgetower.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 124, "class_end_lineno": 540, "func_start_lineno": 290, "func_end_lineno": 315, "func_code": " def _pad_image(\n self,\n image: np.ndarray,\n output_size: Tuple[int, int],\n constant_values: Union[float, Iterable[float]] = 0,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(\n image,\n padding,\n mode=PaddingMode.CONSTANT,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n return padded_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.infer_channel_dimension_format", "transformers.image_utils.get_image_size", "transformers.image_transforms.pad", "transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor._pad_image"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 12, "base_passed_num": 7}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/bridgetower/image_processing_bridgetower.py"], "test_list": ["tests/models/bridgetower/test_image_processing_bridgetower.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 124, "class_end_lineno": 540, "func_start_lineno": 374, "func_end_lineno": 540, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n size_divisor: Optional[int] = None,\n resample: PILImageResampling = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Dict[str, int] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Controls the size of the image after `resize`. The shortest edge of the image is resized to\n `size[\"shortest_edge\"]` whilst preserving the aspect ratio. If the longest edge of this resized image\n is > `int(size[\"shortest_edge\"] * (1333 / 800))`, then the image is resized again to make the longest\n edge equal to `int(size[\"shortest_edge\"] * (1333 / 800))`.\n size_divisor (`int`, *optional*, defaults to `self.size_divisor`):\n The image is resized to a size that is a multiple of this value.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to normalize the image by if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to normalize the image by if `do_normalize` is set to `True`.\n do_pad (`bool`, *optional*, defaults to `self.do_pad`):\n Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also\n created and returned.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the\n image is padded with 0's and then center cropped.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be\n padded with zeros and then cropped\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size_divisor = size_divisor if size_divisor is not None else self.size_divisor\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_pad = do_pad if do_pad is not None else self.do_pad\n do_center_crop if do_center_crop is not None else self.do_center_crop\n # For backwards compatibility. Initial version of this processor was cropping to the \"size\" argument, which\n # it should default to if crop_size is undefined.\n crop_size = (\n crop_size if crop_size is not None else (self.crop_size if self.crop_size is not None else self.size)\n )\n\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n\n if not is_batched(images):\n images = [images]\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n # Here, crop_size is used only if it is set, else size will be used.\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_pad=do_pad,\n size_divisibility=size_divisor,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if do_resize:\n images = [\n self.resize(\n image=image,\n size=size,\n size_divisor=size_divisor,\n resample=resample,\n input_data_format=input_data_format,\n )\n for image in images\n ]\n\n if do_center_crop:\n images = [\n self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n if do_pad:\n encoded_outputs = self.pad(\n images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format\n )\n else:\n encoded_outputs = BatchFeature(data={\"pixel_values\": images}, tensor_type=return_tensors)\n\n return encoded_outputs"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.bridgetower.image_processing_bridgetower.BridgeTowerImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 5, "func_count": 6, "pytest_info": {"total_num": 12, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.models.chameleon.image_processing_chameleon.make_batched_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.models.chameleon.image_processing_chameleon.ChameleonImageProcessor::blend_rgba", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.chameleon.image_processing_chameleon.ChameleonImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/models/chameleon/image_processing_chameleon.py", "transformers/image_utils.py", "transformers/models/chameleon/image_processing_chameleon.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/chameleon/image_processing_chameleon.py"], "test_list": ["tests/models/chameleon/test_image_processing_chameleon.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 364, "func_start_lineno": 47, "func_end_lineno": 67, "func_code": "def make_batched_images(images) -> List[List[ImageInput]]:\n \"\"\"\n Accepts images in list or nested list format, and makes a list of images for preprocessing.\n\n Args:\n images (`Union[List[List[ImageInput]], List[ImageInput], ImageInput]`):\n The input image.\n\n Returns:\n list: A list of images.\n \"\"\"\n if isinstance(images, (list, tuple)) and isinstance(images[0], (list, tuple)) and is_valid_image(images[0][0]):\n return [img for img_list in images for img in img_list]\n\n elif isinstance(images, (list, tuple)) and is_valid_image(images[0]):\n return images\n\n elif is_valid_image(images):\n return [images]\n\n raise ValueError(f\"Could not make batched video from {images}\")"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 70, "class_end_lineno": 364, "func_start_lineno": 339, "func_end_lineno": 364, "func_code": " def blend_rgba(self, image: ImageInput) -> ImageInput:\n \"\"\"\n Convert image to RGB by blending the transparency layer if it's in RGBA format.\n If image is not `PIL.Image`, it si simply returned without modifications.\n\n Args:\n image (`ImageInput`):\n Image to convert.\n \"\"\"\n\n if not isinstance(image, PIL.Image.Image):\n return image\n elif image.mode == \"RGB\":\n return image\n\n img_rgba = np.array(image.convert(\"RGBA\"))\n\n # If there is no transparency layer, simple convert and return.\n if not (img_rgba[:, :, 3] < 255).any():\n return image.convert(\"RGB\")\n\n # There is a transparency layer, blend it with a white background.\n # Calculate the alpha proportion for blending.\n alpha = img_rgba[:, :, 3] / 255.0\n img_rgb = (1 - alpha[:, :, np.newaxis]) * 255 + alpha[:, :, np.newaxis] * img_rgba[:, :, :3]\n return PIL.Image.fromarray(img_rgb.astype(\"uint8\"), \"RGB\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 70, "class_end_lineno": 364, "func_start_lineno": 195, "func_end_lineno": 337, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_center_crop: bool = None,\n crop_size: int = None,\n do_rescale: bool = None,\n rescale_factor: float = None,\n do_normalize: bool = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_convert_rgb: bool = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to\n `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, param_name=\"size\", default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name=\"crop_size\", default_to_square=True)\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n\n images = make_batched_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n\n if do_convert_rgb:\n images = [self.blend_rgba(image) for image in images]\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n\n if do_normalize:\n image = self.normalize(\n image=image, mean=image_mean, std=image_std, input_data_format=input_data_format\n )\n\n all_images.append(image)\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in all_images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.models.chameleon.image_processing_chameleon.make_batched_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.models.chameleon.image_processing_chameleon.ChameleonImageProcessor.blend_rgba", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.chameleon.image_processing_chameleon.ChameleonImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 7, "func_count": 8, "pytest_info": {"total_num": 14, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/chinese_clip/image_processing_chinese_clip.py"], "test_list": ["tests/models/chinese_clip/test_image_processing_chinese_clip.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 51, "class_end_lineno": 306, "func_start_lineno": 125, "func_end_lineno": 162, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n output_size = get_resize_output_image_size(\n image, size=(size[\"height\"], size[\"width\"]), default_to_square=False, input_data_format=input_data_format\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor.resize"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 21, "base_passed_num": 12}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.clip.image_processing_clip.CLIPImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/clip/image_processing_clip.py"], "test_list": ["tests/models/clip/test_image_processing_clip.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 52, "class_end_lineno": 345, "func_start_lineno": 151, "func_end_lineno": 198, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n default_to_square = True\n if \"shortest_edge\" in size:\n size = size[\"shortest_edge\"]\n default_to_square = False\n elif \"height\" in size and \"width\" in size:\n size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n\n output_size = get_resize_output_image_size(\n image,\n size=size,\n default_to_square=default_to_square,\n input_data_format=input_data_format,\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.clip.image_processing_clip.CLIPImageProcessor.resize"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 13, "base_passed_num": 9}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.convnext.image_processing_convnext.ConvNextImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/convnext/image_processing_convnext.py"], "test_list": ["tests/models/convnext/test_image_processing_convnext.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 51, "class_end_lineno": 320, "func_start_lineno": 117, "func_end_lineno": 184, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n crop_pct: float,\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary of the form `{\"shortest_edge\": int}`, specifying the size of the output image. If\n `size[\"shortest_edge\"]` >= 384 image is resized to `(size[\"shortest_edge\"], size[\"shortest_edge\"])`.\n Otherwise, the smaller edge of the image will be matched to `int(size[\"shortest_edge\"] / crop_pct)`,\n after which the image is cropped to `(size[\"shortest_edge\"], size[\"shortest_edge\"])`.\n crop_pct (`float`):\n Percentage of the image to crop. Only has an effect if size < 384.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if \"shortest_edge\" not in size:\n raise ValueError(f\"Size dictionary must contain 'shortest_edge' key. Got {size.keys()}\")\n shortest_edge = size[\"shortest_edge\"]\n\n if shortest_edge < 384:\n # maintain same ratio, resizing shortest edge to shortest_edge/crop_pct\n resize_shortest_edge = int(shortest_edge / crop_pct)\n resize_size = get_resize_output_image_size(\n image, size=resize_shortest_edge, default_to_square=False, input_data_format=input_data_format\n )\n image = resize(\n image=image,\n size=resize_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )\n # then crop to (shortest_edge, shortest_edge)\n return center_crop(\n image=image,\n size=(shortest_edge, shortest_edge),\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )\n else:\n # warping (no cropping) when evaluated at 384 or larger\n return resize(\n image,\n size=(shortest_edge, shortest_edge),\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.convnext.image_processing_convnext.ConvNextImageProcessor.resize"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.deit.image_processing_deit.DeiTImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/deit/image_processing_deit.py"], "test_list": ["tests/models/deit/test_image_processing_deit.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 46, "class_end_lineno": 296, "func_start_lineno": 163, "func_end_lineno": 296, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: Dict[str, int] = None,\n resample=None,\n do_center_crop: bool = None,\n crop_size: Dict[str, int] = None,\n do_rescale: bool = None,\n rescale_factor: float = None,\n do_normalize: bool = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after `resize`.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to\n `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be\n padded with zeros and then cropped\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - `None`: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n\n size = size if size is not None else self.size\n size = get_size_dict(size)\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name=\"crop_size\")\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n\n if do_normalize:\n image = self.normalize(\n image=image, mean=image_mean, std=image_std, input_data_format=input_data_format\n )\n\n all_images.append(image)\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in all_images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.deit.image_processing_deit.DeiTImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 5}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.donut.image_processing_donut.DonutImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/donut/image_processing_donut.py"], "test_list": ["tests/models/donut/test_image_processing_donut.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 53, "class_end_lineno": 459, "func_start_lineno": 259, "func_end_lineno": 296, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size)\n shortest_edge = min(size[\"height\"], size[\"width\"])\n output_size = get_resize_output_image_size(\n image, size=shortest_edge, default_to_square=False, input_data_format=input_data_format\n )\n resized_image = resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )\n return resized_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.donut.image_processing_donut.DonutImageProcessor.resize"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.donut.image_processing_donut.DonutImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/donut/image_processing_donut.py"], "test_list": ["tests/models/donut/test_image_processing_donut.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 53, "class_end_lineno": 459, "func_start_lineno": 299, "func_end_lineno": 459, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_thumbnail: bool = None,\n do_align_long_axis: bool = None,\n do_pad: bool = None,\n random_padding: bool = False,\n do_rescale: bool = None,\n rescale_factor: float = None,\n do_normalize: bool = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to min(size[\"height\"],\n size[\"width\"]) with the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_thumbnail (`bool`, *optional*, defaults to `self.do_thumbnail`):\n Whether to resize the image using thumbnail method.\n do_align_long_axis (`bool`, *optional*, defaults to `self.do_align_long_axis`):\n Whether to align the long axis of the image with the long axis of `size` by rotating by 90 degrees.\n do_pad (`bool`, *optional*, defaults to `self.do_pad`):\n Whether to pad the image. If `random_padding` is set to `True`, each image is padded with a random\n amont of padding on each size, up to the largest image size in the batch. Otherwise, all images are\n padded to the largest image size in the batch.\n random_padding (`bool`, *optional*, defaults to `self.random_padding`):\n Whether to use random padding when padding the image. If `True`, each image in the batch with be padded\n with a random amount of padding on each side up to the size of the largest image in the batch.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image pixel values.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: defaults to the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n if isinstance(size, (tuple, list)):\n # Previous feature extractor had size in (width, height) format\n size = size[::-1]\n size = get_size_dict(size)\n resample = resample if resample is not None else self.resample\n do_thumbnail = do_thumbnail if do_thumbnail is not None else self.do_thumbnail\n do_align_long_axis = do_align_long_axis if do_align_long_axis is not None else self.do_align_long_axis\n do_pad = do_pad if do_pad is not None else self.do_pad\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_pad=do_pad,\n size_divisibility=size, # There is no pad divisibility in this processor, but pad requires the size arg.\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_align_long_axis:\n images = [self.align_long_axis(image, size=size, input_data_format=input_data_format) for image in images]\n\n if do_resize:\n images = [\n self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_thumbnail:\n images = [self.thumbnail(image=image, size=size, input_data_format=input_data_format) for image in images]\n\n if do_pad:\n images = [\n self.pad_image(\n image=image, size=size, random_padding=random_padding, input_data_format=input_data_format\n )\n for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.donut.image_processing_donut.DonutImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.dpt.image_processing_dpt.DPTImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/models/dpt/image_processing_dpt.py", "transformers/image_transforms.py", "transformers/models/dpt/image_processing_dpt.py"], "test_list": ["tests/models/dpt/test_image_processing_dpt.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 463, "func_start_lineno": 53, "func_end_lineno": 92, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n output_size: Union[int, Iterable[int]],\n keep_aspect_ratio: bool,\n multiple: int,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> Tuple[int, int]:\n def constrain_to_multiple_of(val, multiple, min_val=0, max_val=None):\n x = round(val / multiple) * multiple\n\n if max_val is not None and x > max_val:\n x = math.floor(val / multiple) * multiple\n\n if x < min_val:\n x = math.ceil(val / multiple) * multiple\n\n return x\n\n output_size = (output_size, output_size) if isinstance(output_size, int) else output_size\n\n input_height, input_width = get_image_size(input_image, input_data_format)\n output_height, output_width = output_size\n\n # determine new height and width\n scale_height = output_height / input_height\n scale_width = output_width / input_width\n\n if keep_aspect_ratio:\n # scale as little as possible\n if abs(1 - scale_width) < abs(1 - scale_height):\n # fit width\n scale_height = scale_width\n else:\n # fit height\n scale_width = scale_height\n\n new_height = constrain_to_multiple_of(scale_height * input_height, multiple=multiple)\n new_width = constrain_to_multiple_of(scale_width * input_width, multiple=multiple)\n\n return (new_height, new_width)"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 95, "class_end_lineno": 463, "func_start_lineno": 168, "func_end_lineno": 221, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n keep_aspect_ratio: bool = False,\n ensure_multiple_of: int = 1,\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image to target size `(size[\"height\"], size[\"width\"])`. If `keep_aspect_ratio` is `True`, the image\n is resized to the largest possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is\n set, the image is resized to a size that is a multiple of this value.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Target size of the output image.\n keep_aspect_ratio (`bool`, *optional*, defaults to `False`):\n If `True`, the image is resized to the largest possible size such that the aspect ratio is preserved.\n ensure_multiple_of (`int`, *optional*, defaults to 1):\n The image is resized to a size that is a multiple of this value.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Defines the resampling filter to use if resizing the image. Otherwise, the image is resized to size\n specified in `size`.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(f\"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}\")\n\n output_size = get_resize_output_image_size(\n image,\n output_size=(size[\"height\"], size[\"width\"]),\n keep_aspect_ratio=keep_aspect_ratio,\n multiple=ensure_multiple_of,\n input_data_format=input_data_format,\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.models.dpt.image_processing_dpt.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.dpt.image_processing_dpt.DPTImageProcessor.resize"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 15, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.dpt.image_processing_dpt.DPTImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/dpt/image_processing_dpt.py"], "test_list": ["tests/models/dpt/test_image_processing_dpt.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 95, "class_end_lineno": 463, "func_start_lineno": 269, "func_end_lineno": 419, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: int = None,\n keep_aspect_ratio: bool = None,\n ensure_multiple_of: int = None,\n resample: PILImageResampling = None,\n do_rescale: bool = None,\n rescale_factor: float = None,\n do_normalize: bool = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: bool = None,\n size_divisor: int = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after reszing. If `keep_aspect_ratio` is `True`, the image is resized to the largest\n possible size such that the aspect ratio is preserved. If `ensure_multiple_of` is set, the image is\n resized to a size that is a multiple of this value.\n keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):\n Whether to keep the aspect ratio of the image. If False, the image will be resized to (size, size). If\n True, the image will be resized to keep the aspect ratio and the size will be the maximum possible.\n ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):\n Ensure that the image size is a multiple of this value.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only\n has an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size)\n keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio\n ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_pad = do_pad if do_pad is not None else self.do_pad\n size_divisor = size_divisor if size_divisor is not None else self.size_divisor\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_pad=do_pad,\n size_divisibility=size_divisor,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(\n image=image,\n size=size,\n resample=resample,\n keep_aspect_ratio=keep_aspect_ratio,\n ensure_multiple_of=ensure_multiple_of,\n input_data_format=input_data_format,\n )\n for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_pad:\n images = [\n self.pad_image(image=image, size_divisor=size_divisor, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.dpt.image_processing_dpt.DPTImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 15, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.flava.image_processing_flava.FlavaImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/models/flava/image_processing_flava.py"], "test_list": ["tests/models/flava/test_image_processing_flava.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 136, "class_end_lineno": 700, "func_start_lineno": 338, "func_end_lineno": 384, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n size = get_size_dict(size)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(f\"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}\")\n output_size = (size[\"height\"], size[\"width\"])\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.resize", "transformers.models.flava.image_processing_flava.FlavaImageProcessor.resize"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 15, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.fuyu.image_processing_fuyu.FuyuImageProcessor::pad_image"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/fuyu/image_processing_fuyu.py"], "test_list": ["tests/models/fuyu/test_image_processing_fuyu.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 182, "class_end_lineno": 720, "func_start_lineno": 324, "func_end_lineno": 360, "func_code": " def pad_image(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n mode: str = \"constant\",\n constant_values: float = 1.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pad an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to pad.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n data_format (`ChannelDimension` or `str`, *optional*):\n The data format of the output image. If unset, the same format as the input image is used.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n image_height, image_width = get_image_size(image, input_data_format)\n target_height, target_width = size[\"height\"], size[\"width\"]\n padding_top = 0\n padding_left = 0\n padding_bottom = target_height - image_height\n padding_right = target_width - image_width\n padded_image = pad(\n image,\n padding=((padding_top, padding_bottom), (padding_left, padding_right)),\n mode=mode,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n return padded_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.infer_channel_dimension_format", "transformers.image_utils.get_image_size", "transformers.image_transforms.pad", "transformers.models.fuyu.image_processing_fuyu.FuyuImageProcessor.pad_image"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 4, "base_passed_num": 1}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.idefics2.image_processing_idefics2.Idefics2ImageProcessor::_pad_image"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/idefics2/image_processing_idefics2.py"], "test_list": ["tests/models/idefics2/test_image_processing_idefics2.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 181, "class_end_lineno": 596, "func_start_lineno": 287, "func_end_lineno": 312, "func_code": " def _pad_image(\n self,\n image: np.ndarray,\n output_size: Tuple[int, int],\n constant_values: Union[float, Iterable[float]] = 0,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(\n image,\n padding,\n mode=PaddingMode.CONSTANT,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n return padded_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.image_transforms.pad", "transformers.models.idefics2.image_processing_idefics2.Idefics2ImageProcessor._pad_image"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 11, "base_passed_num": 7}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.levit.image_processing_levit.LevitImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/levit/image_processing_levit.py"], "test_list": ["tests/models/levit/test_image_processing_levit.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 46, "class_end_lineno": 306, "func_start_lineno": 175, "func_end_lineno": 306, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: PILImageResampling = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, Iterable[float]]] = None,\n image_std: Optional[Union[float, Iterable[float]]] = None,\n return_tensors: Optional[TensorType] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> BatchFeature:\n \"\"\"\n Preprocess an image or batch of images to be used as input to a LeViT model.\n\n Args:\n images (`ImageInput`):\n Image or batch of images to preprocess. Expects a single or batch of images with pixel values ranging\n from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the output image after resizing. If size is a dict with keys \"width\" and \"height\", the image\n will be resized to (height, width). If size is a dict with key \"shortest_edge\", the shortest edge value\n `c` is rescaled to int(`c` * (256/224)). The smaller edge of the image will be matched to this value\n i.e, if height > width, then image will be rescaled to (size * height / width, size).\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the output image after center cropping. Crops images to (crop_size[\"height\"],\n crop_size[\"width\"]).\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image pixel values by `rescaling_factor` - typical to values between 0 and 1.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Factor to rescale the image pixel values by.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image pixel values by `image_mean` and `image_std`.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Mean to normalize the image pixel values by.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Standard deviation to normalize the image pixel values by.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name=\"crop_size\")\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [self.resize(image, size, resample, input_data_format=input_data_format) for image in images]\n\n if do_center_crop:\n images = [self.center_crop(image, crop_size, input_data_format=input_data_format) for image in images]\n\n if do_rescale:\n images = [self.rescale(image, rescale_factor, input_data_format=input_data_format) for image in images]\n\n if do_normalize:\n images = [\n self.normalize(image, image_mean, image_std, input_data_format=input_data_format) for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.levit.image_processing_levit.LevitImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_processing_utils.select_best_resolution", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::_resize_for_patching", "transformers.src.transformers.image_transforms._rescale_for_pil_conversion", "transformers.src.transformers.image_transforms.to_pil_image", "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::get_image_patches"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_processing_utils.py", "transformers/models/llava_next/image_processing_llava_next.py", "transformers/image_transforms.py", "transformers/models/llava_next/image_processing_llava_next.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/llava_next/image_processing_llava_next.py"], "test_list": ["tests/models/llava_next/test_image_processing_llava_next.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 252, "func_end_lineno": 287, "func_code": "def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:\n \"\"\"\n Selects the best resolution from a list of possible resolutions based on the original size.\n\n This is done by calculating the effective and wasted resolution for each possible resolution.\n\n The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.\n\n Args:\n original_size (tuple):\n The original size of the image in the format (height, width).\n possible_resolutions (list):\n A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].\n\n Returns:\n tuple: The best fit resolution in the format (height, width).\n \"\"\"\n original_height, original_width = original_size\n best_fit = None\n max_effective_resolution = 0\n min_wasted_resolution = float(\"inf\")\n\n for height, width in possible_resolutions:\n scale = min(width / original_width, height / original_height)\n downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)\n effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)\n wasted_resolution = (width * height) - effective_resolution\n\n if effective_resolution > max_effective_resolution or (\n effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution\n ):\n max_effective_resolution = effective_resolution\n min_wasted_resolution = wasted_resolution\n best_fit = (height, width)\n\n return best_fit"}, {"class_start_lineno": 1, "class_end_lineno": 749, "func_start_lineno": 125, "func_end_lineno": 139, "func_code": "def _get_patch_output_size(image, target_resolution, input_data_format):\n original_height, original_width = get_image_size(image, channel_dim=input_data_format)\n target_height, target_width = target_resolution\n\n scale_w = target_width / original_width\n scale_h = target_height / original_height\n\n if scale_w < scale_h:\n new_width = target_width\n new_height = min(math.ceil(original_height * scale_w), target_height)\n else:\n new_height = target_height\n new_width = min(math.ceil(original_width * scale_h), target_width)\n\n return new_height, new_width"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 436, "func_end_lineno": 460, "func_code": " def _resize_for_patching(\n self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension\n ) -> np.array:\n \"\"\"\n Resizes an image to a target resolution while maintaining aspect ratio.\n\n Args:\n image (np.array):\n The input image.\n target_resolution (tuple):\n The target resolution (height, width) of the image.\n resample (`PILImageResampling`):\n Resampling filter to use if resizing the image.\n input_data_format (`ChannelDimension` or `str`):\n The channel dimension format of the input image.\n\n Returns:\n np.array: The resized and padded image.\n \"\"\"\n new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)\n\n # Resize the image\n resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)\n\n return resized_image"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 135, "func_end_lineno": 159, "func_code": "def _rescale_for_pil_conversion(image):\n \"\"\"\n Detects whether or not the image needs to be rescaled before being converted to a PIL image.\n\n The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be\n rescaled.\n \"\"\"\n if image.dtype == np.uint8:\n do_rescale = False\n elif np.allclose(image, image.astype(int)):\n if np.all(0 <= image) and np.all(image <= 255):\n do_rescale = False\n else:\n raise ValueError(\n \"The image to be converted to a PIL image contains values outside the range [0, 255], \"\n f\"got [{image.min()}, {image.max()}] which cannot be converted to uint8.\"\n )\n elif np.all(0 <= image) and np.all(image <= 1):\n do_rescale = True\n else:\n raise ValueError(\n \"The image to be converted to a PIL image contains values outside the range [0, 1], \"\n f\"got [{image.min()}, {image.max()}] which cannot be converted to uint8.\"\n )\n return do_rescale"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 162, "func_end_lineno": 210, "func_code": "def to_pil_image(\n image: Union[np.ndarray, \"PIL.Image.Image\", \"torch.Tensor\", \"tf.Tensor\", \"jnp.ndarray\"],\n do_rescale: Optional[bool] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> \"PIL.Image.Image\":\n \"\"\"\n Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if\n needed.\n\n Args:\n image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):\n The image to convert to the `PIL.Image` format.\n do_rescale (`bool`, *optional*):\n Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default\n to `True` if the image type is a floating type and casting to `int` would result in a loss of precision,\n and `False` otherwise.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `PIL.Image.Image`: The converted image.\n \"\"\"\n requires_backends(to_pil_image, [\"vision\"])\n\n if isinstance(image, PIL.Image.Image):\n return image\n\n # Convert all tensors to numpy arrays before converting to PIL image\n if is_torch_tensor(image) or is_tf_tensor(image):\n image = image.numpy()\n elif is_jax_tensor(image):\n image = np.array(image)\n elif not isinstance(image, np.ndarray):\n raise ValueError(\"Input image type not supported: {}\".format(type(image)))\n\n # If the channel has been moved to first dim, we put it back at the end.\n image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)\n\n # If there is a single channel, we squeeze it, as otherwise PIL can't handle it.\n image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image\n\n # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed.\n do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale\n\n if do_rescale:\n image = rescale(image, 255)\n\n image = image.astype(np.uint8)\n return PIL.Image.fromarray(image)"}, {"class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 478, "func_end_lineno": 540, "func_code": " def get_image_patches(\n self,\n image: np.array,\n grid_pinpoints,\n size: tuple,\n patch_size: int,\n resample: PILImageResampling,\n data_format: ChannelDimension,\n input_data_format: ChannelDimension,\n ) -> List[np.array]:\n \"\"\"\n Process an image with variable resolutions by dividing it into patches.\n\n Args:\n image (np.array):\n The input image to be processed.\n grid_pinpoints (List):\n A string representation of a list of possible resolutions.\n size (`tuple`):\n Size to resize the original image to.\n patch_size (`int`):\n Size of the patches to divide the image into.\n resample (`PILImageResampling`):\n Resampling filter to use if resizing the image.\n data_format (`ChannelDimension` or `str`):\n The channel dimension format for the output image.\n input_data_format (`ChannelDimension` or `str`):\n The channel dimension format of the input image.\n\n Returns:\n List[np.array]: A list of NumPy arrays containing the processed image patches.\n \"\"\"\n if not isinstance(grid_pinpoints, list):\n raise TypeError(\"grid_pinpoints must be a list of possible resolutions.\")\n\n possible_resolutions = grid_pinpoints\n\n image_size = get_image_size(image, channel_dim=input_data_format)\n best_resolution = select_best_resolution(image_size, possible_resolutions)\n resized_image = self._resize_for_patching(\n image, best_resolution, resample=resample, input_data_format=input_data_format\n )\n padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)\n\n patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)\n\n # make sure that all patches are in the input data format\n patches = [\n to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)\n for patch in patches\n ]\n\n resized_original_image = resize(\n image,\n size=size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n\n image_patches = [resized_original_image] + patches\n\n return image_patches"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.image_processing_utils.select_best_resolution", "transformers.models.llava_next.image_processing_llava_next._get_patch_output_size", "transformers.image_transforms.resize", "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor._resize_for_patching", "transformers.image_transforms._rescale_for_pil_conversion", "transformers.image_transforms.to_pil_image", "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor.get_image_patches"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 8}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::pad", "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::_pad_for_patching"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/models/llava_next/image_processing_llava_next.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/llava_next/image_processing_llava_next.py", "transformers/models/llava_next/image_processing_llava_next.py"], "test_list": ["tests/models/llava_next/test_image_processing_llava_next.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 749, "func_start_lineno": 125, "func_end_lineno": 139, "func_code": "def _get_patch_output_size(image, target_resolution, input_data_format):\n original_height, original_width = get_image_size(image, channel_dim=input_data_format)\n target_height, target_width = target_resolution\n\n scale_w = target_width / original_width\n scale_h = target_height / original_height\n\n if scale_w < scale_h:\n new_width = target_width\n new_height = min(math.ceil(original_height * scale_w), target_height)\n else:\n new_height = target_height\n new_width = min(math.ceil(original_width * scale_h), target_width)\n\n return new_height, new_width"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 284, "func_end_lineno": 350, "func_code": " def pad(\n self,\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)\n dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected\n as input.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n\n # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim\n if isinstance(padding, int) or len(padding) != 4:\n return pad(image, padding, mode, constant_values, data_format, input_data_format)\n\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n if mode == PaddingMode.CONSTANT:\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n image = (\n to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n )\n return image"}, {"class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 462, "func_end_lineno": 476, "func_code": " def _pad_for_patching(\n self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension\n ) -> np.array:\n \"\"\"\n Pad an image to a target resolution while maintaining aspect ratio.\n \"\"\"\n target_height, target_width = target_resolution\n new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)\n\n paste_x = (target_width - new_width) // 2\n paste_y = (target_height - new_height) // 2\n\n padded_image = self.pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))\n\n return padded_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.models.llava_next.image_processing_llava_next._get_patch_output_size", "transformers.image_utils.infer_channel_dimension_format", "transformers.image_transforms.pad", "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor.pad", "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor._pad_for_patching"], "language": "Python", "toolfunc_count": 3, "func_count": 5, "pytest_info": {"total_num": 13, "base_passed_num": 9}} {"id": ["transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::pad"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/llava_next/image_processing_llava_next.py"], "test_list": ["tests/models/llava_next/test_image_processing_llava_next.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 284, "func_end_lineno": 350, "func_code": " def pad(\n self,\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)\n dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected\n as input.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n\n # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim\n if isinstance(padding, int) or len(padding) != 4:\n return pad(image, padding, mode, constant_values, data_format, input_data_format)\n\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n if mode == PaddingMode.CONSTANT:\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n image = (\n to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n )\n return image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.infer_channel_dimension_format", "transformers.image_transforms.pad", "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor.pad"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 13, "base_passed_num": 9}} {"id": ["transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.llava_next_video.image_processing_llava_next_video.LlavaNextVideoImageProcessor::_preprocess"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/llava_next_video/image_processing_llava_next_video.py"], "test_list": ["tests/models/llava_next_video/test_image_processing_llava_next_video.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 68, "class_end_lineno": 416, "func_start_lineno": 199, "func_end_lineno": 297, "func_code": " def _preprocess(\n self,\n images: ImageInput,\n do_resize: bool = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_center_crop: bool = None,\n crop_size: int = None,\n do_rescale: bool = None,\n rescale_factor: float = None,\n do_normalize: bool = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_convert_rgb: bool = None,\n data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> Image.Image:\n \"\"\"\n Preprocess an image or batch of images. Copy of the `preprocess` method from `CLIPImageProcessor`.\n\n Args:\n images (`ImageInput`):\n Batch of frames (one video) to preprocess. Expects a batch of frames with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only\n has an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to\n `True`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n images = make_list_of_images(images)\n\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n\n if do_normalize:\n image = self.normalize(\n image=image, mean=image_mean, std=image_std, input_data_format=input_data_format\n )\n\n all_images.append(image)\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in all_images\n ]\n\n return images"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.make_list_of_images", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.llava_next_video.image_processing_llava_next_video.LlavaNextVideoImageProcessor._preprocess"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 11, "base_passed_num": 8}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_processing_utils.select_best_resolution", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor::_resize_for_patching", "transformers.src.transformers.image_transforms._rescale_for_pil_conversion", "transformers.src.transformers.image_transforms.to_pil_image", "transformers.src.transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor::get_image_patches"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_processing_utils.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py", "transformers/image_transforms.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py"], "test_list": ["tests/models/llava_onevision/test_image_processing_llava_onevision.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 252, "func_end_lineno": 287, "func_code": "def select_best_resolution(original_size: tuple, possible_resolutions: list) -> tuple:\n \"\"\"\n Selects the best resolution from a list of possible resolutions based on the original size.\n\n This is done by calculating the effective and wasted resolution for each possible resolution.\n\n The best fit resolution is the one that maximizes the effective resolution and minimizes the wasted resolution.\n\n Args:\n original_size (tuple):\n The original size of the image in the format (height, width).\n possible_resolutions (list):\n A list of possible resolutions in the format [(height1, width1), (height2, width2), ...].\n\n Returns:\n tuple: The best fit resolution in the format (height, width).\n \"\"\"\n original_height, original_width = original_size\n best_fit = None\n max_effective_resolution = 0\n min_wasted_resolution = float(\"inf\")\n\n for height, width in possible_resolutions:\n scale = min(width / original_width, height / original_height)\n downscaled_width, downscaled_height = int(original_width * scale), int(original_height * scale)\n effective_resolution = min(downscaled_width * downscaled_height, original_width * original_height)\n wasted_resolution = (width * height) - effective_resolution\n\n if effective_resolution > max_effective_resolution or (\n effective_resolution == max_effective_resolution and wasted_resolution < min_wasted_resolution\n ):\n max_effective_resolution = effective_resolution\n min_wasted_resolution = wasted_resolution\n best_fit = (height, width)\n\n return best_fit"}, {"class_start_lineno": 1, "class_end_lineno": 711, "func_start_lineno": 127, "func_end_lineno": 141, "func_code": "def _get_patch_output_size(image, target_resolution, input_data_format):\n original_height, original_width = get_image_size(image, channel_dim=input_data_format)\n target_height, target_width = target_resolution\n\n scale_w = target_width / original_width\n scale_h = target_height / original_height\n\n if scale_w < scale_h:\n new_width = target_width\n new_height = min(math.ceil(original_height * scale_w), target_height)\n else:\n new_height = target_height\n new_width = min(math.ceil(original_width * scale_h), target_width)\n\n return new_height, new_width"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 144, "class_end_lineno": 711, "func_start_lineno": 329, "func_end_lineno": 353, "func_code": " def _resize_for_patching(\n self, image: np.array, target_resolution: tuple, resample, input_data_format: ChannelDimension\n ) -> np.array:\n \"\"\"\n Resizes an image to a target resolution while maintaining aspect ratio.\n\n Args:\n image (np.array):\n The input image.\n target_resolution (tuple):\n The target resolution (height, width) of the image.\n resample (`PILImageResampling`):\n Resampling filter to use if resizing the image.\n input_data_format (`ChannelDimension` or `str`):\n The channel dimension format of the input image.\n\n Returns:\n np.array: The resized and padded image.\n \"\"\"\n new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)\n\n # Resize the image\n resized_image = resize(image, (new_height, new_width), resample=resample, input_data_format=input_data_format)\n\n return resized_image"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 135, "func_end_lineno": 159, "func_code": "def _rescale_for_pil_conversion(image):\n \"\"\"\n Detects whether or not the image needs to be rescaled before being converted to a PIL image.\n\n The assumption is that if the image is of type `np.float` and all values are between 0 and 1, it needs to be\n rescaled.\n \"\"\"\n if image.dtype == np.uint8:\n do_rescale = False\n elif np.allclose(image, image.astype(int)):\n if np.all(0 <= image) and np.all(image <= 255):\n do_rescale = False\n else:\n raise ValueError(\n \"The image to be converted to a PIL image contains values outside the range [0, 255], \"\n f\"got [{image.min()}, {image.max()}] which cannot be converted to uint8.\"\n )\n elif np.all(0 <= image) and np.all(image <= 1):\n do_rescale = True\n else:\n raise ValueError(\n \"The image to be converted to a PIL image contains values outside the range [0, 1], \"\n f\"got [{image.min()}, {image.max()}] which cannot be converted to uint8.\"\n )\n return do_rescale"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 162, "func_end_lineno": 210, "func_code": "def to_pil_image(\n image: Union[np.ndarray, \"PIL.Image.Image\", \"torch.Tensor\", \"tf.Tensor\", \"jnp.ndarray\"],\n do_rescale: Optional[bool] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> \"PIL.Image.Image\":\n \"\"\"\n Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if\n needed.\n\n Args:\n image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor` or `tf.Tensor`):\n The image to convert to the `PIL.Image` format.\n do_rescale (`bool`, *optional*):\n Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will default\n to `True` if the image type is a floating type and casting to `int` would result in a loss of precision,\n and `False` otherwise.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `PIL.Image.Image`: The converted image.\n \"\"\"\n requires_backends(to_pil_image, [\"vision\"])\n\n if isinstance(image, PIL.Image.Image):\n return image\n\n # Convert all tensors to numpy arrays before converting to PIL image\n if is_torch_tensor(image) or is_tf_tensor(image):\n image = image.numpy()\n elif is_jax_tensor(image):\n image = np.array(image)\n elif not isinstance(image, np.ndarray):\n raise ValueError(\"Input image type not supported: {}\".format(type(image)))\n\n # If the channel has been moved to first dim, we put it back at the end.\n image = to_channel_dimension_format(image, ChannelDimension.LAST, input_data_format)\n\n # If there is a single channel, we squeeze it, as otherwise PIL can't handle it.\n image = np.squeeze(image, axis=-1) if image.shape[-1] == 1 else image\n\n # PIL.Image can only store uint8 values so we rescale the image to be between 0 and 255 if needed.\n do_rescale = _rescale_for_pil_conversion(image) if do_rescale is None else do_rescale\n\n if do_rescale:\n image = rescale(image, 255)\n\n image = image.astype(np.uint8)\n return PIL.Image.fromarray(image)"}, {"class_start_lineno": 144, "class_end_lineno": 711, "func_start_lineno": 373, "func_end_lineno": 435, "func_code": " def get_image_patches(\n self,\n image: np.array,\n grid_pinpoints,\n size: tuple,\n patch_size: int,\n resample: PILImageResampling,\n data_format: ChannelDimension,\n input_data_format: ChannelDimension,\n ) -> List[np.array]:\n \"\"\"\n Process an image with variable resolutions by dividing it into patches.\n\n Args:\n image (np.array):\n The input image to be processed.\n grid_pinpoints (List):\n A string representation of a list of possible resolutions.\n size (`tuple`):\n Size to resize the original image to.\n patch_size (`int`):\n Size of the patches to divide the image into.\n resample (`PILImageResampling`):\n Resampling filter to use if resizing the image.\n data_format (`ChannelDimension` or `str`):\n The channel dimension format for the output image.\n input_data_format (`ChannelDimension` or `str`):\n The channel dimension format of the input image.\n\n Returns:\n List[np.array]: A list of NumPy arrays containing the processed image patches.\n \"\"\"\n if not isinstance(grid_pinpoints, list):\n raise TypeError(\"grid_pinpoints must be a list of possible resolutions.\")\n\n possible_resolutions = grid_pinpoints\n\n image_size = get_image_size(image, channel_dim=input_data_format)\n best_resolution = select_best_resolution(image_size, possible_resolutions)\n resized_image = self._resize_for_patching(\n image, best_resolution, resample=resample, input_data_format=input_data_format\n )\n padded_image = self._pad_for_patching(resized_image, best_resolution, input_data_format=input_data_format)\n\n patches = divide_to_patches(padded_image, patch_size=patch_size, input_data_format=input_data_format)\n\n # make sure that all patches are in the input data format\n patches = [\n to_channel_dimension_format(patch, channel_dim=data_format, input_channel_dim=input_data_format)\n for patch in patches\n ]\n\n resized_original_image = resize(\n image,\n size=size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n\n image_patches = [resized_original_image] + patches\n\n return image_patches"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.image_processing_utils.select_best_resolution", "transformers.models.llava_onevision.image_processing_llava_onevision._get_patch_output_size", "transformers.image_transforms.resize", "transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor._resize_for_patching", "transformers.image_transforms._rescale_for_pil_conversion", "transformers.image_transforms.to_pil_image", "transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor.get_image_patches"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 16, "base_passed_num": 9}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor::pad"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py"], "test_list": ["tests/models/llava_onevision/test_image_processing_llava_onevision.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 711, "func_start_lineno": 127, "func_end_lineno": 141, "func_code": "def _get_patch_output_size(image, target_resolution, input_data_format):\n original_height, original_width = get_image_size(image, channel_dim=input_data_format)\n target_height, target_width = target_resolution\n\n scale_w = target_width / original_width\n scale_h = target_height / original_height\n\n if scale_w < scale_h:\n new_width = target_width\n new_height = min(math.ceil(original_height * scale_w), target_height)\n else:\n new_height = target_height\n new_width = min(math.ceil(original_width * scale_h), target_width)\n\n return new_height, new_width"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 144, "class_end_lineno": 711, "func_start_lineno": 260, "func_end_lineno": 326, "func_code": " def pad(\n self,\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)\n dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected\n as input.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n\n # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim\n if isinstance(padding, int) or len(padding) != 4:\n return pad(image, padding, mode, constant_values, data_format, input_data_format)\n\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n if mode == PaddingMode.CONSTANT:\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n image = (\n to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n )\n return image"}, {"class_start_lineno": 144, "class_end_lineno": 711, "func_start_lineno": 356, "func_end_lineno": 370, "func_code": " def _pad_for_patching(\n self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension\n ) -> np.array:\n \"\"\"\n Pad an image to a target resolution while maintaining aspect ratio.\n \"\"\"\n target_height, target_width = target_resolution\n new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)\n\n paste_x = (target_width - new_width) // 2\n paste_y = (target_height - new_height) // 2\n\n padded_image = self.pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))\n\n return padded_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.models.llava_onevision.image_processing_llava_onevision._get_patch_output_size", "transformers.image_utils.infer_channel_dimension_format", "transformers.image_transforms.pad", "transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor.pad", "transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor._pad_for_patching"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 16, "base_passed_num": 9}} {"id": ["transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor::pad"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/llava_onevision/image_processing_llava_onevision.py"], "test_list": ["tests/models/llava_onevision/test_image_processing_llava_onevision.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 144, "class_end_lineno": 711, "func_start_lineno": 260, "func_end_lineno": 326, "func_code": " def pad(\n self,\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)\n dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected\n as input.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n\n # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim\n if isinstance(padding, int) or len(padding) != 4:\n return pad(image, padding, mode, constant_values, data_format, input_data_format)\n\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n if mode == PaddingMode.CONSTANT:\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n image = (\n to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n )\n return image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.infer_channel_dimension_format", "transformers.image_transforms.pad", "transformers.models.llava_onevision.image_processing_llava_onevision.LlavaOnevisionImageProcessor.pad"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 16, "base_passed_num": 9}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/mobilenet_v1/image_processing_mobilenet_v1.py"], "test_list": ["tests/models/mobilenet_v1/test_image_processing_mobilenet_v1.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 46, "class_end_lineno": 302, "func_start_lineno": 168, "func_end_lineno": 302, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_center_crop: bool = None,\n crop_size: Dict[str, int] = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ):\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):\n `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has\n an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use if `do_normalize` is set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size)\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n\n if do_normalize:\n image = self.normalize(\n image=image, mean=image_mean, std=image_std, input_data_format=input_data_format\n )\n\n all_images.append(image)\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in all_images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.mobilenet_v1.image_processing_mobilenet_v1.MobileNetV1ImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.mobilenet_v2.image_processing_mobilenet_v2.MobileNetV2ImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/mobilenet_v2/image_processing_mobilenet_v2.py"], "test_list": ["tests/models/mobilenet_v2/test_image_processing_mobilenet_v2.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 50, "class_end_lineno": 349, "func_start_lineno": 172, "func_end_lineno": 305, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Dict[str, int] = None,\n resample: PILImageResampling = None,\n do_center_crop: bool = None,\n crop_size: Dict[str, int] = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ):\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. Shortest edge of the image is resized to size[\"shortest_edge\"], with\n the longest edge resized to keep the input aspect ratio.\n resample (`PILImageResampling` filter, *optional*, defaults to `self.resample`):\n `PILImageResampling` filter to use if resizing the image e.g. `PILImageResampling.BILINEAR`. Only has\n an effect if `do_resize` is set to `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether to center crop the image.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to use if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to use if `do_normalize` is set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n crop_size = get_size_dict(crop_size, param_name=\"crop_size\")\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n all_images = []\n for image in images:\n if do_resize:\n image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n\n if do_center_crop:\n image = self.center_crop(image=image, size=crop_size, input_data_format=input_data_format)\n\n if do_rescale:\n image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n\n if do_normalize:\n image = self.normalize(\n image=image, mean=image_mean, std=image_std, input_data_format=input_data_format\n )\n\n all_images.append(image)\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)\n for image in all_images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.mobilenet_v2.image_processing_mobilenet_v2.MobileNetV2ImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/owlvit/image_processing_owlvit.py"], "test_list": ["tests/models/owlvit/test_image_processing_owlvit.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 92, "class_end_lineno": 598, "func_start_lineno": 272, "func_end_lineno": 413, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: PILImageResampling = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[TensorType, str]] = None,\n data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> BatchFeature:\n \"\"\"\n Prepares an image or batch of images for the model.\n\n Args:\n images (`ImageInput`):\n The image or batch of images to be prepared. Expects a single or batch of images with pixel values\n ranging from 0 to 255. If passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether or not to resize the input. If `True`, will resize the input to the size specified by `size`.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n The size to resize the input to. Only has an effect if `do_resize` is set to `True`.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n The resampling filter to use when resizing the input. Only has an effect if `do_resize` is set to\n `True`.\n do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):\n Whether or not to center crop the input. If `True`, will center crop the input to the size specified by\n `crop_size`.\n crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):\n The size to center crop the input to. Only has an effect if `do_center_crop` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether or not to rescale the input. If `True`, will rescale the input by dividing it by\n `rescale_factor`.\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n The factor to rescale the input by. Only has an effect if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether or not to normalize the input. If `True`, will normalize the input by subtracting `image_mean`\n and dividing by `image_std`.\n image_mean (`Union[float, List[float]]`, *optional*, defaults to `self.image_mean`):\n The mean to subtract from the input when normalizing. Only has an effect if `do_normalize` is set to\n `True`.\n image_std (`Union[float, List[float]]`, *optional*, defaults to `self.image_std`):\n The standard deviation to divide the input by when normalizing. Only has an effect if `do_normalize` is\n set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: defaults to the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n resample = resample if resample is not None else self.resample\n do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop\n crop_size = crop_size if crop_size is not None else self.crop_size\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_center_crop=do_center_crop,\n crop_size=crop_size,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n\n # All transformations expect numpy arrays\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_center_crop:\n images = [\n self.center_crop(image, crop_size=crop_size, input_data_format=input_data_format) for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image, rescale_factor=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n encoded_inputs = BatchFeature(data={\"pixel_values\": images}, tensor_type=return_tensors)\n return encoded_inputs"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.owlvit.image_processing_owlvit.OwlViTImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 5, "func_count": 6, "pytest_info": {"total_num": 13, "base_passed_num": 8}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.vilt.image_processing_vilt.ViltImageProcessor::_pad_image", "transformers.src.transformers.models.vilt.image_processing_vilt.ViltImageProcessor::pad"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/vilt/image_processing_vilt.py", "transformers/models/vilt/image_processing_vilt.py"], "test_list": ["tests/models/vilt/test_image_processing_vilt.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 121, "class_end_lineno": 486, "func_start_lineno": 255, "func_end_lineno": 280, "func_code": " def _pad_image(\n self,\n image: np.ndarray,\n output_size: Tuple[int, int],\n constant_values: Union[float, Iterable[float]] = 0,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(\n image,\n padding,\n mode=PaddingMode.CONSTANT,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n return padded_image"}, {"class_start_lineno": 121, "class_end_lineno": 486, "func_start_lineno": 282, "func_end_lineno": 335, "func_code": " def pad(\n self,\n images: List[np.ndarray],\n constant_values: Union[float, Iterable[float]] = 0,\n return_pixel_mask: bool = True,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> BatchFeature:\n \"\"\"\n Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width\n in the batch and optionally returns their corresponding pixel mask.\n\n Args:\n image (`np.ndarray`):\n Image to pad.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n return_pixel_mask (`bool`, *optional*, defaults to `True`):\n Whether to return a pixel mask.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n pad_size = get_max_height_width(images, input_data_format=input_data_format)\n\n padded_images = [\n self._pad_image(\n image,\n pad_size,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n for image in images\n ]\n data = {\"pixel_values\": padded_images}\n\n if return_pixel_mask:\n masks = [\n make_pixel_mask(image=image, output_size=pad_size, input_data_format=input_data_format)\n for image in images\n ]\n data[\"pixel_mask\"] = masks\n\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.image_transforms.pad", "transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image", "transformers.models.vilt.image_processing_vilt.ViltImageProcessor.pad"], "language": "Python", "toolfunc_count": 2, "func_count": 4, "pytest_info": {"total_num": 13, "base_passed_num": 8}} {"id": ["transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.pad", "transformers.src.transformers.models.vilt.image_processing_vilt.ViltImageProcessor::_pad_image"], "project": "transformers", "origin_file": ["transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/vilt/image_processing_vilt.py"], "test_list": ["tests/models/vilt/test_image_processing_vilt.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 667, "func_end_lineno": 750, "func_code": "def pad(\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified (height, width) `padding` and `mode`.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n\n def _expand_for_data_format(values):\n \"\"\"\n Convert values to be in the format expected by np.pad based on the data format.\n \"\"\"\n if isinstance(values, (int, float)):\n values = ((values, values), (values, values))\n elif isinstance(values, tuple) and len(values) == 1:\n values = ((values[0], values[0]), (values[0], values[0]))\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], int):\n values = (values, values)\n elif isinstance(values, tuple) and len(values) == 2 and isinstance(values[0], tuple):\n values = values\n else:\n raise ValueError(f\"Unsupported format: {values}\")\n\n # add 0 for channel dimension\n values = ((0, 0), *values) if input_data_format == ChannelDimension.FIRST else (*values, (0, 0))\n\n # Add additional padding if there's a batch dimension\n values = (0, *values) if image.ndim == 4 else values\n return values\n\n padding = _expand_for_data_format(padding)\n\n if mode == PaddingMode.CONSTANT:\n constant_values = _expand_for_data_format(constant_values)\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n\n image = to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n return image"}, {"class_start_lineno": 121, "class_end_lineno": 486, "func_start_lineno": 255, "func_end_lineno": 280, "func_code": " def _pad_image(\n self,\n image: np.ndarray,\n output_size: Tuple[int, int],\n constant_values: Union[float, Iterable[float]] = 0,\n data_format: Optional[ChannelDimension] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pad an image with zeros to the given size.\n \"\"\"\n input_height, input_width = get_image_size(image, channel_dim=input_data_format)\n output_height, output_width = output_size\n\n pad_bottom = output_height - input_height\n pad_right = output_width - input_width\n padding = ((0, pad_bottom), (0, pad_right))\n padded_image = pad(\n image,\n padding,\n mode=PaddingMode.CONSTANT,\n constant_values=constant_values,\n data_format=data_format,\n input_data_format=input_data_format,\n )\n return padded_image"}], "type": ["function_empty", "Development"], "node": ["transformers.image_utils.get_image_size", "transformers.image_transforms.pad", "transformers.models.vilt.image_processing_vilt.ViltImageProcessor._pad_image"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 13, "base_passed_num": 8}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.vilt.image_processing_vilt.ViltImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/vilt/image_processing_vilt.py"], "test_list": ["tests/models/vilt/test_image_processing_vilt.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 121, "class_end_lineno": 486, "func_start_lineno": 338, "func_end_lineno": 486, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n size_divisor: Optional[int] = None,\n resample: PILImageResampling = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Controls the size of the image after `resize`. The shortest edge of the image is resized to\n `size[\"shortest_edge\"]` whilst preserving the aspect ratio. If the longest edge of this resized image\n is > `int(size[\"shortest_edge\"] * (1333 / 800))`, then the image is resized again to make the longest\n edge equal to `int(size[\"shortest_edge\"] * (1333 / 800))`.\n size_divisor (`int`, *optional*, defaults to `self.size_divisor`):\n The image is resized to a size that is a multiple of this value.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to normalize the image by if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to normalize the image by if `do_normalize` is set to `True`.\n do_pad (`bool`, *optional*, defaults to `self.do_pad`):\n Whether to pad the image to the (max_height, max_width) in the batch. If `True`, a pixel mask is also\n created and returned.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size_divisor = size_divisor if size_divisor is not None else self.size_divisor\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_pad = do_pad if do_pad is not None else self.do_pad\n\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n # Here the pad() method does not require any additional argument as it takes the maximum of (height, width).\n # Hence, it does not need to be passed to a validate_preprocess_arguments() method.\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(\n image=image,\n size=size,\n size_divisor=size_divisor,\n resample=resample,\n input_data_format=input_data_format,\n )\n for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n if do_pad:\n encoded_outputs = self.pad(\n images, return_pixel_mask=True, return_tensors=return_tensors, input_data_format=data_format\n )\n else:\n encoded_outputs = BatchFeature(data={\"pixel_values\": images}, tensor_type=return_tensors)\n\n return encoded_outputs"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.vilt.image_processing_vilt.ViltImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 13, "base_passed_num": 6}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.vivit.image_processing_vivit.VivitImageProcessor::resize"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/vivit/image_processing_vivit.py"], "test_list": ["tests/models/vivit/test_image_processing_vivit.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)"}, {"class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image"}, {"class_start_lineno": 66, "class_end_lineno": 404, "func_start_lineno": 142, "func_end_lineno": 184, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\n have the size `(h, w)`. If `size` is of the form `{\"shortest_edge\": s}`, the output image will have its\n shortest edge of length `s` while keeping the aspect ratio of the original image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if \"shortest_edge\" in size:\n output_size = get_resize_output_image_size(\n image, size[\"shortest_edge\"], default_to_square=False, input_data_format=input_data_format\n )\n elif \"height\" in size and \"width\" in size:\n output_size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\")\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.vivit.image_processing_vivit.VivitImageProcessor.resize"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 14, "base_passed_num": 7}} {"id": ["transformers.src.transformers.utils.generic.to_py_obj", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::decode"], "project": "transformers", "origin_file": ["transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py"], "test_list": ["tests/models/wav2vec2/test_tokenization_wav2vec2.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 254, "func_end_lineno": 281, "func_code": "def to_py_obj(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.\n \"\"\"\n\n framework_to_py_obj = {\n \"pt\": lambda obj: obj.detach().cpu().tolist(),\n \"tf\": lambda obj: obj.numpy().tolist(),\n \"jax\": lambda obj: np.asarray(obj).tolist(),\n \"np\": lambda obj: obj.tolist(),\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_py_obj(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return [to_py_obj(o) for o in obj]\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_py_obj[framework](obj)\n\n # tolist also works on 0d np arrays\n if isinstance(obj, np.number):\n return obj.tolist()\n else:\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" str:\n \"\"\"\n Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\n tokens and clean up tokenization spaces.\n\n Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\n Args:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces.\n output_char_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output character offsets. Character offsets can be used in combination with the\n sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.\n\n \n\n Please take a look at the example below to better understand how to make use of `output_char_offsets`.\n\n \n\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n\n \n\n Please take a look at the example below to better understand how to make use of `output_word_offsets`.\n\n \n\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\n Returns:\n `str` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded\n sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when\n `output_char_offsets == True` or `output_word_offsets == True`.\n\n Example:\n\n ```python\n >>> # Let's see how to retrieve time steps for a model\n >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC\n >>> from datasets import load_dataset\n >>> import datasets\n >>> import torch\n\n >>> # import model, feature extractor, tokenizer\n >>> model = AutoModelForCTC.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"facebook/wav2vec2-base-960h\")\n\n >>> # load first sample of English common_voice\n >>> dataset = load_dataset(\"mozilla-foundation/common_voice_11_0\", \"en\", split=\"train\", streaming=True, trust_remote_code=True)\n >>> dataset = dataset.cast_column(\"audio\", datasets.Audio(sampling_rate=16_000))\n >>> dataset_iter = iter(dataset)\n >>> sample = next(dataset_iter)\n\n >>> # forward sample through model to get greedily predicted transcription ids\n >>> input_values = feature_extractor(sample[\"audio\"][\"array\"], return_tensors=\"pt\").input_values\n >>> logits = model(input_values).logits[0]\n >>> pred_ids = torch.argmax(logits, axis=-1)\n\n >>> # retrieve word stamps (analogous commands for `output_char_offsets`)\n >>> outputs = tokenizer.decode(pred_ids, output_word_offsets=True)\n >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate\n >>> time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate\n\n >>> word_offsets = [\n ... {\n ... \"word\": d[\"word\"],\n ... \"start_time\": round(d[\"start_offset\"] * time_offset, 2),\n ... \"end_time\": round(d[\"end_offset\"] * time_offset, 2),\n ... }\n ... for d in outputs.word_offsets\n ... ]\n >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer:\n >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en\n >>> word_offsets[:3]\n [{'word': 'THE', 'start_time': 0.7, 'end_time': 0.78}, {'word': 'TRICK', 'start_time': 0.88, 'end_time': 1.08}, {'word': 'APPEARS', 'start_time': 1.2, 'end_time': 1.64}]\n ```\"\"\"\n # Convert inputs to python lists\n token_ids = to_py_obj(token_ids)\n\n return self._decode(\n token_ids=token_ids,\n skip_special_tokens=skip_special_tokens,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n output_char_offsets=output_char_offsets,\n output_word_offsets=output_word_offsets,\n **kwargs,\n )"}], "type": ["function_empty", "Development"], "node": ["transformers.utils.generic.to_py_obj", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer.decode"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 102, "base_passed_num": 80}} {"id": ["transformers.src.transformers.utils.generic.to_py_obj", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::decode", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::batch_decode"], "project": "transformers", "origin_file": ["transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py"], "test_list": ["tests/models/wav2vec2/test_tokenization_wav2vec2.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 254, "func_end_lineno": 281, "func_code": "def to_py_obj(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.\n \"\"\"\n\n framework_to_py_obj = {\n \"pt\": lambda obj: obj.detach().cpu().tolist(),\n \"tf\": lambda obj: obj.numpy().tolist(),\n \"jax\": lambda obj: np.asarray(obj).tolist(),\n \"np\": lambda obj: obj.tolist(),\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_py_obj(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return [to_py_obj(o) for o in obj]\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_py_obj[framework](obj)\n\n # tolist also works on 0d np arrays\n if isinstance(obj, np.number):\n return obj.tolist()\n else:\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" str:\n \"\"\"\n Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\n tokens and clean up tokenization spaces.\n\n Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\n Args:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces.\n output_char_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output character offsets. Character offsets can be used in combination with the\n sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.\n\n \n\n Please take a look at the example below to better understand how to make use of `output_char_offsets`.\n\n \n\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n\n \n\n Please take a look at the example below to better understand how to make use of `output_word_offsets`.\n\n \n\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\n Returns:\n `str` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded\n sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when\n `output_char_offsets == True` or `output_word_offsets == True`.\n\n Example:\n\n ```python\n >>> # Let's see how to retrieve time steps for a model\n >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC\n >>> from datasets import load_dataset\n >>> import datasets\n >>> import torch\n\n >>> # import model, feature extractor, tokenizer\n >>> model = AutoModelForCTC.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"facebook/wav2vec2-base-960h\")\n\n >>> # load first sample of English common_voice\n >>> dataset = load_dataset(\"mozilla-foundation/common_voice_11_0\", \"en\", split=\"train\", streaming=True, trust_remote_code=True)\n >>> dataset = dataset.cast_column(\"audio\", datasets.Audio(sampling_rate=16_000))\n >>> dataset_iter = iter(dataset)\n >>> sample = next(dataset_iter)\n\n >>> # forward sample through model to get greedily predicted transcription ids\n >>> input_values = feature_extractor(sample[\"audio\"][\"array\"], return_tensors=\"pt\").input_values\n >>> logits = model(input_values).logits[0]\n >>> pred_ids = torch.argmax(logits, axis=-1)\n\n >>> # retrieve word stamps (analogous commands for `output_char_offsets`)\n >>> outputs = tokenizer.decode(pred_ids, output_word_offsets=True)\n >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate\n >>> time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate\n\n >>> word_offsets = [\n ... {\n ... \"word\": d[\"word\"],\n ... \"start_time\": round(d[\"start_offset\"] * time_offset, 2),\n ... \"end_time\": round(d[\"end_offset\"] * time_offset, 2),\n ... }\n ... for d in outputs.word_offsets\n ... ]\n >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer:\n >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en\n >>> word_offsets[:3]\n [{'word': 'THE', 'start_time': 0.7, 'end_time': 0.78}, {'word': 'TRICK', 'start_time': 0.88, 'end_time': 1.08}, {'word': 'APPEARS', 'start_time': 1.2, 'end_time': 1.64}]\n ```\"\"\"\n # Convert inputs to python lists\n token_ids = to_py_obj(token_ids)\n\n return self._decode(\n token_ids=token_ids,\n skip_special_tokens=skip_special_tokens,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n output_char_offsets=output_char_offsets,\n output_word_offsets=output_word_offsets,\n **kwargs,\n )"}, {"class_start_lineno": 115, "class_end_lineno": 644, "func_start_lineno": 458, "func_end_lineno": 524, "func_code": " def batch_decode(\n self,\n sequences: Union[List[int], List[List[int]], \"np.ndarray\", \"torch.Tensor\", \"tf.Tensor\"],\n skip_special_tokens: bool = False,\n clean_up_tokenization_spaces: bool = None,\n output_char_offsets: bool = False,\n output_word_offsets: bool = False,\n **kwargs,\n ) -> List[str]:\n \"\"\"\n Convert a list of lists of token ids into a list of strings by calling decode.\n\n Args:\n sequences (`Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces.\n output_char_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output character offsets. Character offsets can be used in combination with the\n sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.\n\n \n\n Please take a look at the Example of [`~Wav2Vec2CTCTokenizer.decode`] to better understand how to make\n use of `output_char_offsets`. [`~Wav2Vec2CTCTokenizer.batch_decode`] works the same way with batched\n output.\n\n \n\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n\n \n\n Please take a look at the Example of [`~Wav2Vec2CTCTokenizer.decode`] to better understand how to make\n use of `output_word_offsets`. [`~Wav2Vec2CTCTokenizer.batch_decode`] works the same way with batched\n output.\n\n \n\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\n Returns:\n `List[str]` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded\n sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when\n `output_char_offsets == True` or `output_word_offsets == True`.\n \"\"\"\n batch_decoded = [\n self.decode(\n seq,\n skip_special_tokens=skip_special_tokens,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n output_char_offsets=output_char_offsets,\n output_word_offsets=output_word_offsets,\n **kwargs,\n )\n for seq in sequences\n ]\n if output_char_offsets or output_word_offsets:\n # transform list of dicts to dict of lists\n return Wav2Vec2CTCTokenizerOutput({k: [d[k] for d in batch_decoded] for k in batch_decoded[0]})\n\n return batch_decoded"}], "type": ["function_empty", "Development"], "node": ["transformers.utils.generic.to_py_obj", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer.decode", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer.batch_decode"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 102, "base_passed_num": 80}} {"id": ["transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.make_list_of_images", "transformers.src.transformers.image_utils.validate_preprocess_arguments", "transformers.src.transformers.utils.generic.to_numpy", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.zoedepth.image_processing_zoedepth.ZoeDepthImageProcessor::preprocess"], "project": "transformers", "origin_file": ["transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_utils.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/image_utils.py", "transformers/models/zoedepth/image_processing_zoedepth.py"], "test_list": ["tests/models/zoedepth/test_image_processing_zoedepth.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 172, "func_end_lineno": 208, "func_code": "def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:\n \"\"\"\n Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.\n If the input is a batch of images, it is converted to a list of images.\n\n Args:\n images (`ImageInput`):\n Image of images to turn into a list of images.\n expected_ndims (`int`, *optional*, defaults to 3):\n Expected number of dimensions for a single input image. If the input image has a different number of\n dimensions, an error is raised.\n \"\"\"\n if is_batched(images):\n return images\n\n # Either the input is a single image, in which case we create a list of length 1\n if isinstance(images, PIL.Image.Image):\n # PIL images are never batched\n return [images]\n\n if is_valid_image(images):\n if images.ndim == expected_ndims + 1:\n # Batch of images\n images = list(images)\n elif images.ndim == expected_ndims:\n # Single image\n images = [images]\n else:\n raise ValueError(\n f\"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got\"\n f\" {images.ndim} dimensions.\"\n )\n return images\n raise ValueError(\n \"Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or \"\n f\"jax.ndarray, but got {type(images)}.\"\n )"}, {"class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 388, "func_end_lineno": 426, "func_code": "def validate_preprocess_arguments(\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_pad: Optional[bool] = None,\n size_divisibility: Optional[int] = None,\n do_center_crop: Optional[bool] = None,\n crop_size: Optional[Dict[str, int]] = None,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: Optional[\"PILImageResampling\"] = None,\n):\n \"\"\"\n Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.\n Raises `ValueError` if arguments incompatibility is caught.\n Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,\n sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow\n existing arguments when possible.\n\n \"\"\"\n if do_rescale and rescale_factor is None:\n raise ValueError(\"`rescale_factor` must be specified if `do_rescale` is `True`.\")\n\n if do_pad and size_divisibility is None:\n # Here, size_divisor might be passed as the value of size\n raise ValueError(\n \"Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`.\"\n )\n\n if do_normalize and (image_mean is None or image_std is None):\n raise ValueError(\"`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.\")\n\n if do_center_crop and crop_size is None:\n raise ValueError(\"`crop_size` must be specified if `do_center_crop` is `True`.\")\n\n if do_resize and (size is None or resample is None):\n raise ValueError(\"`size` and `resample` must be specified if `do_resize` is `True`.\")"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 284, "func_end_lineno": 307, "func_code": "def to_numpy(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a Numpy array.\n \"\"\"\n\n framework_to_numpy = {\n \"pt\": lambda obj: obj.detach().cpu().numpy(),\n \"tf\": lambda obj: obj.numpy(),\n \"jax\": lambda obj: np.asarray(obj),\n \"np\": lambda obj: obj,\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_numpy(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return np.array(obj)\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_numpy[framework](obj)\n\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" np.ndarray:\n if not is_valid_image(img):\n raise ValueError(f\"Invalid image type: {type(img)}\")\n\n if is_vision_available() and isinstance(img, PIL.Image.Image):\n return np.array(img)\n return to_numpy(img)"}, {"class_start_lineno": 98, "class_end_lineno": 444, "func_start_lineno": 294, "func_end_lineno": 444, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_pad: bool = None,\n do_rescale: bool = None,\n rescale_factor: float = None,\n do_normalize: bool = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n do_resize: bool = None,\n size: int = None,\n keep_aspect_ratio: bool = None,\n ensure_multiple_of: int = None,\n resample: PILImageResampling = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_pad (`bool`, *optional*, defaults to `self.do_pad`):\n Whether to pad the input image.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Size of the image after resizing. If `keep_aspect_ratio` is `True`, he image is resized by choosing the smaller of\n the height and width scaling factors and using it for both dimensions. If `ensure_multiple_of` is also set,\n the image is further resized to a size that is a multiple of this value.\n keep_aspect_ratio (`bool`, *optional*, defaults to `self.keep_aspect_ratio`):\n If `True` and `do_resize=True`, the image is resized by choosing the smaller of the height and width scaling factors and using it for\n both dimensions. This ensures that the image is scaled down as little as possible while still fitting within the\n desired output size. In case `ensure_multiple_of` is also set, the image is further resized to a size that is a\n multiple of this value by flooring the height and width to the nearest multiple of this value.\n ensure_multiple_of (`int`, *optional*, defaults to `self.ensure_multiple_of`):\n If `do_resize` is `True`, the image is resized to a size that is a multiple of this value. Works by flooring\n the height and width to the nearest multiple of this value.\n\n Works both with and without `keep_aspect_ratio` being set to `True`. Can be overidden by `ensure_multiple_of` in `preprocess`.\n resample (`int`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`, Only\n has an effect if `do_resize` is set to `True`.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n size = size if size is not None else self.size\n size = get_size_dict(size)\n keep_aspect_ratio = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio\n ensure_multiple_of = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_pad = do_pad if do_pad is not None else self.do_pad\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_pad:\n images = [self.pad_image(image=image, input_data_format=input_data_format) for image in images]\n\n if do_resize:\n images = [\n self.resize(\n image=image,\n size=size,\n resample=resample,\n keep_aspect_ratio=keep_aspect_ratio,\n ensure_multiple_of=ensure_multiple_of,\n input_data_format=input_data_format,\n )\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n data = {\"pixel_values\": images}\n return BatchFeature(data=data, tensor_type=return_tensors)"}], "type": ["function_empty", "Development"], "node": ["transformers.image_processing_utils.get_size_dict", "transformers.image_utils.make_list_of_images", "transformers.image_utils.validate_preprocess_arguments", "transformers.utils.generic.to_numpy", "transformers.utils.generic.infer_framework_from_repr", "transformers.utils.generic._get_frameworks_and_test_func", "transformers.image_utils.to_numpy_array", "transformers.models.zoedepth.image_processing_zoedepth.ZoeDepthImageProcessor.preprocess"], "language": "Python", "toolfunc_count": 6, "func_count": 7, "pytest_info": {"total_num": 15, "base_passed_num": 6}} {"id": ["transformers.src.transformers.utils.generic.to_py_obj", "transformers.src.transformers.utils.generic.infer_framework_from_repr", "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.data.data_collator.DataCollatorForSeq2Seq::__call__"], "project": "transformers", "origin_file": ["transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/utils/generic.py", "transformers/data/data_collator.py"], "test_list": ["tests/trainer/test_data_collator.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 254, "func_end_lineno": 281, "func_code": "def to_py_obj(obj):\n \"\"\"\n Convert a TensorFlow tensor, PyTorch tensor, Numpy array or python list to a python list.\n \"\"\"\n\n framework_to_py_obj = {\n \"pt\": lambda obj: obj.detach().cpu().tolist(),\n \"tf\": lambda obj: obj.numpy().tolist(),\n \"jax\": lambda obj: np.asarray(obj).tolist(),\n \"np\": lambda obj: obj.tolist(),\n }\n\n if isinstance(obj, (dict, UserDict)):\n return {k: to_py_obj(v) for k, v in obj.items()}\n elif isinstance(obj, (list, tuple)):\n return [to_py_obj(o) for o in obj]\n\n # This gives us a smart order to test the frameworks with the corresponding tests.\n framework_to_test_func = _get_frameworks_and_test_func(obj)\n for framework, test_func in framework_to_test_func.items():\n if test_func(obj):\n return framework_to_py_obj[framework](obj)\n\n # tolist also works on 0d np arrays\n if isinstance(obj, np.number):\n return obj.tolist()\n else:\n return obj"}, {"class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 80, "func_end_lineno": 95, "func_code": "def infer_framework_from_repr(x):\n \"\"\"\n Tries to guess the framework of an object `x` from its repr (brittle but will help in `is_tensor` to try the\n frameworks in a smart order, without the need to import the frameworks).\n \"\"\"\n representation = str(type(x))\n if representation.startswith(\" None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value"}, {"class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 57, "func_end_lineno": 61, "func_code": " def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)"}, {"class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 152, "func_end_lineno": 173, "func_code": " def _initialize_extra_fields(self, instances: Instances) -> Instances:\n \"\"\"\n If input instances don't have ID, ID_period, lost_frame_count fields,\n this method is used to initialize these fields.\n\n Args:\n instances: D2 Instances, for predictions of the current frame\n Return:\n D2 Instances with extra fields added\n \"\"\"\n if not instances.has(\"ID\"):\n instances.set(\"ID\", [None] * len(instances))\n if not instances.has(\"ID_period\"):\n instances.set(\"ID_period\", [None] * len(instances))\n if not instances.has(\"lost_frame_count\"):\n instances.set(\"lost_frame_count\", [None] * len(instances))\n if self._prev_instances is None:\n instances.ID = list(range(len(instances)))\n self._id_count += len(instances)\n instances.ID_period = [1] * len(instances)\n instances.lost_frame_count = [0] * len(instances)\n return instances"}, {"class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 310, "func_end_lineno": 329, "func_code": "def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M,\n compute the intersection area between __all__ N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax)\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: intersection, sized [N,M].\n \"\"\"\n boxes1, boxes2 = boxes1.tensor, boxes2.tensor\n width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(\n boxes1[:, None, :2], boxes2[:, :2]\n ) # [N,M,2]\n\n width_height.clamp_(min=0) # [N,M,2]\n intersection = width_height.prod(dim=2) # [N,M]\n return intersection"}, {"class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 334, "func_end_lineno": 356, "func_code": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou"}, {"class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 123, "func_end_lineno": 150, "func_code": " def _create_prediction_pairs(\n self, instances: Instances, iou_all: np.ndarray\n ) -> List:\n \"\"\"\n For all instances in previous and current frames, create pairs. For each\n pair, store index of the instance in current frame predcitions, index in\n previous predictions, ID in previous predictions, IoU of the bboxes in this\n pair, period in previous predictions.\n\n Args:\n instances: D2 Instances, for predictions of the current frame\n iou_all: IoU for all bboxes pairs\n Return:\n A list of IoU for all pairs\n \"\"\"\n bbox_pairs = []\n for i in range(len(instances)):\n for j in range(len(self._prev_instances)):\n bbox_pairs.append(\n {\n \"idx\": i,\n \"prev_idx\": j,\n \"prev_id\": self._prev_instances.ID[j],\n \"IoU\": iou_all[i, j],\n \"prev_period\": self._prev_instances.ID_period[j],\n }\n )\n return bbox_pairs"}, {"class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 183, "func_end_lineno": 198, "func_code": " def _assign_new_id(self, instances: Instances) -> Instances:\n \"\"\"\n For each untracked instance, assign a new id\n\n Args:\n instances: D2 Instances, for predictions of the current frame\n Return:\n D2 Instances with new ID assigned\n \"\"\"\n untracked_idx = set(range(len(instances))).difference(self._matched_idx)\n for idx in untracked_idx:\n instances.ID[idx] = self._id_count\n self._id_count += 1\n instances.ID_period[idx] = 1\n instances.lost_frame_count[idx] = 0\n return instances"}, {"class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 38, "func_end_lineno": 47, "func_code": " def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\n \"\"\"\n Args:\n image_size (height, width): the spatial size of the image.\n kwargs: fields to add to this `Instances`.\n \"\"\"\n self._image_size = image_size\n self._fields: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.set(k, v)"}, {"class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 200, "func_end_lineno": 260, "func_code": " def _merge_untracked_instances(self, instances: Instances) -> Instances:\n \"\"\"\n For untracked previous instances, under certain condition, still keep them\n in tracking and merge with the current instances.\n\n Args:\n instances: D2 Instances, for predictions of the current frame\n Return:\n D2 Instances merging current instances and instances from previous\n frame decided to keep tracking\n \"\"\"\n untracked_instances = Instances(\n image_size=instances.image_size,\n pred_boxes=[],\n pred_masks=[],\n pred_classes=[],\n scores=[],\n ID=[],\n ID_period=[],\n lost_frame_count=[],\n )\n prev_bboxes = list(self._prev_instances.pred_boxes)\n prev_classes = list(self._prev_instances.pred_classes)\n prev_scores = list(self._prev_instances.scores)\n prev_ID_period = self._prev_instances.ID_period\n if instances.has(\"pred_masks\"):\n prev_masks = list(self._prev_instances.pred_masks)\n for idx in self._untracked_prev_idx:\n x_left, y_top, x_right, y_bot = prev_bboxes[idx]\n if (\n (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim)\n or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim)\n or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count\n or prev_ID_period[idx] <= self._min_instance_period\n ):\n continue\n untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy()))\n untracked_instances.pred_classes.append(int(prev_classes[idx]))\n untracked_instances.scores.append(float(prev_scores[idx]))\n untracked_instances.ID.append(self._prev_instances.ID[idx])\n untracked_instances.ID_period.append(self._prev_instances.ID_period[idx])\n untracked_instances.lost_frame_count.append(\n self._prev_instances.lost_frame_count[idx] + 1\n )\n if instances.has(\"pred_masks\"):\n untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8))\n\n untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes))\n untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes)\n untracked_instances.scores = torch.FloatTensor(untracked_instances.scores)\n if instances.has(\"pred_masks\"):\n untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks)\n else:\n untracked_instances.remove(\"pred_masks\")\n\n return Instances.cat(\n [\n instances,\n untracked_instances,\n ]\n )"}, {"class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 88, "func_end_lineno": 121, "func_code": " def update(self, instances: Instances) -> Instances:\n \"\"\"\n See BaseTracker description\n \"\"\"\n if instances.has(\"pred_keypoints\"):\n raise NotImplementedError(\"Need to add support for keypoints\")\n instances = self._initialize_extra_fields(instances)\n if self._prev_instances is not None:\n # calculate IoU of all bbox pairs\n iou_all = pairwise_iou(\n boxes1=instances.pred_boxes,\n boxes2=self._prev_instances.pred_boxes,\n )\n # sort IoU in descending order\n bbox_pairs = self._create_prediction_pairs(instances, iou_all)\n # assign previous ID to current bbox if IoU > track_iou_threshold\n self._reset_fields()\n for bbox_pair in bbox_pairs:\n idx = bbox_pair[\"idx\"]\n prev_id = bbox_pair[\"prev_id\"]\n if idx in self._matched_idx \\\n or prev_id in self._matched_ID \\\n or bbox_pair[\"IoU\"] < self._track_iou_threshold:\n continue\n instances.ID[idx] = prev_id\n instances.ID_period[idx] = bbox_pair[\"prev_period\"] + 1\n instances.lost_frame_count[idx] = 0\n self._matched_idx.add(idx)\n self._matched_ID.add(prev_id)\n self._untracked_prev_idx.remove(bbox_pair[\"prev_idx\"])\n instances = self._assign_new_id(instances)\n instances = self._merge_untracked_instances(instances)\n self._prev_instances = copy.deepcopy(instances)\n return instances"}], "type": ["function_empty", "Development"], "node": ["detectron2.structures.instances.Instances.set", "detectron2.structures.instances.Instances.__setattr__", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker._initialize_extra_fields", "detectron2.structures.boxes.pairwise_intersection", "detectron2.structures.boxes.pairwise_iou", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker._create_prediction_pairs", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker._assign_new_id", "detectron2.structures.instances.Instances.__init__", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker._merge_untracked_instances", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker.update"], "language": "Python", "toolfunc_count": 7, "func_count": 8, "pytest_info": {"total_num": 5, "base_passed_num": 2}} {"id": ["langchain_core.libs.core.langchain_core.runnables.graph.is_uuid", "langchain_core.libs.core.langchain_core.runnables.graph.node_data_str", "langchain_core.libs.core.langchain_core.runnables.graph.Graph::add_node"], "project": "langchain_core", "origin_file": ["langchain_core/runnables/graph.py", "langchain_core/runnables/graph.py", "langchain_core/runnables/graph.py"], "test_list": ["libs/core/tests/unit_tests/runnables/test_graph.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 664, "func_start_lineno": 42, "func_end_lineno": 55, "func_code": "def is_uuid(value: str) -> bool:\n \"\"\"Check if a string is a valid UUID.\n\n Args:\n value: The string to check.\n\n Returns:\n True if the string is a valid UUID, False otherwise.\n \"\"\"\n try:\n UUID(value)\n except ValueError:\n return False\n return True"}, {"class_start_lineno": 1, "class_end_lineno": 664, "func_start_lineno": 178, "func_end_lineno": 196, "func_code": "def node_data_str(id: str, data: Union[type[BaseModel], RunnableType]) -> str:\n \"\"\"Convert the data of a node to a string.\n\n Args:\n id: The node id.\n data: The node data.\n\n Returns:\n A string representation of the data.\n \"\"\"\n from langchain_core.runnables.base import Runnable\n\n if not is_uuid(id):\n return id\n elif isinstance(data, Runnable):\n data_str = data.get_name()\n else:\n data_str = data.__name__\n return data_str if not data_str.startswith(\"Runnable\") else data_str[8:]"}, {"class_start_lineno": 256, "class_end_lineno": 636, "func_start_lineno": 313, "func_end_lineno": 339, "func_code": " def add_node(\n self,\n data: Union[type[BaseModel], RunnableType],\n id: Optional[str] = None,\n *,\n metadata: Optional[dict[str, Any]] = None,\n ) -> Node:\n \"\"\"Add a node to the graph and return it.\n\n Args:\n data: The data of the node.\n id: The id of the node. Defaults to None.\n metadata: Optional metadata for the node. Defaults to None.\n\n Returns:\n The node that was added to the graph.\n\n Raises:\n ValueError: If a node with the same id already exists.\n \"\"\"\n if id is not None and id in self.nodes:\n msg = f\"Node with id {id} already exists\"\n raise ValueError(msg)\n id = id or self.next_id()\n node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data))\n self.nodes[node.id] = node\n return node"}], "type": ["function_empty", "Development"], "node": ["langchain_core.runnables.graph.is_uuid", "langchain_core.runnables.graph.node_data_str", "langchain_core.runnables.graph.Graph.add_node"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 11, "base_passed_num": 3}} {"id": ["finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.components.callback.CallbackComponent::_initialize"], "project": "finam", "origin_file": ["finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/info.py", "finam/sdk/output.py", "finam/sdk/output.py", "finam/sdk/component.py", "finam/components/callback.py"], "test_list": ["tests/components/test_callback.py", "tests/components/test_debug.py", "tests/core/test_schedule.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))"}, {"class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask"}, {"class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 226, "func_end_lineno": 231, "func_code": " def __setattr__(self, name, value):\n # first check if attribute present or meta not yet present (e.g. grid)\n if name in self.__dir__() or \"meta\" not in self.__dict__:\n super().__setattr__(name, value)\n else:\n self.__dict__[\"meta\"][name] = value"}, {"class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info"}, {"class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0"}, {"class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io"}, {"class_start_lineno": 12, "class_end_lineno": 129, "func_start_lineno": 90, "func_end_lineno": 101, "func_code": " def _initialize(self):\n for name, info in self._input_infos.items():\n info.time = self.time\n self.inputs.add(name=name, info=info)\n\n for name, info in self._output_infos.items():\n info.time = self.time\n self.outputs.add(name=name, info=info)\n\n pull_data = list(self._input_infos) if self._initial_pull else {}\n\n self.create_connector(pull_data=pull_data)"}], "type": ["function_empty", "Development"], "node": ["finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.info.Info.__setattr__", "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.sdk.component.IOList.add", "finam.components.callback.CallbackComponent._initialize"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 34, "base_passed_num": 6}} {"id": ["finam.src.finam.sdk.component.IOList::__getitem__", "finam.src.finam.sdk.input.Input::pull_data", "finam.src.finam.sdk.output.Output::get_data", "finam.src.finam.components.callback.CallbackComponent::_update"], "project": "finam", "origin_file": ["finam/sdk/component.py", "finam/sdk/input.py", "finam/sdk/output.py", "finam/components/callback.py"], "test_list": ["tests/components/test_callback.py"], "prob_info": [{"class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 678, "func_end_lineno": 692, "func_code": " def __getitem__(self, key):\n \"\"\"Access an item by name.\"\"\"\n if key in self._dict:\n return self._dict[key]\n\n if self.owner is None:\n raise KeyError(f\"No {self.cls.__name__} `{key}` in unknown component.\")\n\n msg = f\"No {self.cls.__name__} `{key}` in component `{self.owner.name}`.\"\n if self.owner.status == ComponentStatus.CREATED:\n msg += \" The component is not initialized. Did you miss to add it to the composition?\"\n raise KeyError(msg)\n\n with ErrorLogger(self.owner.logger):\n raise KeyError(msg)"}, {"class_start_lineno": 14, "class_end_lineno": 234, "func_start_lineno": 101, "func_end_lineno": 136, "func_code": " def pull_data(self, time, target=None):\n \"\"\"Retrieve the data from the input's source.\n\n Parameters\n ----------\n time : :class:`datetime `\n Simulation time to get the data for.\n target : :class:`.IInput` or None\n Requesting end point of this pull.\n Should be ``None`` for normal input pulls in components.\n Simple adapters should forward the source in :meth:`.Adapter._get_data`.\n Push-based adapters should use ``self`` in :meth:`.Adapter._source_updated`.\n\n Returns\n -------\n :class:`pint.Quantity`\n Data set for the given simulation time.\n \"\"\"\n self.logger.trace(\"pull data\")\n\n if time is not None and not isinstance(time, datetime):\n with ErrorLogger(self.logger):\n raise ValueError(\"Time must be of type datetime\")\n\n if self.is_static:\n if self._cached_data is None:\n data = self._source.get_data(time, target or self)\n with ErrorLogger(self.logger):\n self._cached_data = self._convert_and_check(data)\n data = self._cached_data\n else:\n data = self._source.get_data(time, target or self)\n with ErrorLogger(self.logger):\n data = self._convert_and_check(data)\n\n return data"}, {"class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 234, "func_end_lineno": 282, "func_code": " def get_data(self, time, target):\n \"\"\"Get the output's data-set for the given time.\n\n Parameters\n ----------\n time : :class:`datetime `\n simulation time to get the data for.\n target : :class:`.IInput` or None\n Requesting end point of this pull.\n\n Returns\n -------\n :class:`pint.Quantity`\n data-set for the requested time.\n\n Raises\n ------\n FinamNoDataError\n Raises the error if no data is available\n \"\"\"\n self.logger.trace(\"get data\")\n\n with ErrorLogger(self.logger):\n _check_time(time, self.is_static)\n\n if self._output_info is None:\n raise FinamNoDataError(f\"No data info available in {self.name}\")\n if self._out_infos_exchanged < len(self._connected_inputs):\n raise FinamNoDataError(f\"Data info was not yet exchanged in {self.name}\")\n if len(self.data) == 0:\n raise FinamNoDataError(f\"No data available in {self.name}\")\n\n with ErrorLogger(self.logger):\n data = (\n self._unpack(self.data[0][1])\n if self.is_static\n else self._interpolate(time)\n )\n\n if not self.is_static:\n data_count = len(self.data)\n self._clear_data(time, target)\n\n if len(self.data) < data_count:\n self.logger.trace(\n \"reduced data cache: %d -> %d\", data_count, len(self.data)\n )\n\n return data"}, {"class_start_lineno": 12, "class_end_lineno": 129, "func_start_lineno": 119, "func_end_lineno": 126, "func_code": " def _update(self):\n self._time += self._step\n\n inp = {n: self.inputs[n].pull_data(self.time) for n in self._input_infos.keys()}\n outp = self._callback(inp, self.time)\n for name, val in outp.items():\n if val is not None:\n self.outputs[name].push_data(val, self.time)"}], "type": ["function_empty", "Development"], "node": ["finam.sdk.component.IOList.__getitem__", "finam.sdk.input.Input.pull_data", "finam.sdk.output.Output.get_data", "finam.components.callback.CallbackComponent._update"], "language": "Python", "toolfunc_count": 3, "func_count": 4, "pytest_info": {"total_num": 1, "base_passed_num": 0}} {"id": ["finam.src.finam.sdk.component.IOList::__getitem__", "finam.src.finam.sdk.input.Input::pull_data", "finam.src.finam.components.callback.CallbackComponent::_update"], "project": "finam", "origin_file": ["finam/sdk/component.py", "finam/sdk/input.py", "finam/components/callback.py"], "test_list": ["tests/components/test_debug.py", "tests/core/test_schedule.py"], "prob_info": [{"class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 678, "func_end_lineno": 692, "func_code": " def __getitem__(self, key):\n \"\"\"Access an item by name.\"\"\"\n if key in self._dict:\n return self._dict[key]\n\n if self.owner is None:\n raise KeyError(f\"No {self.cls.__name__} `{key}` in unknown component.\")\n\n msg = f\"No {self.cls.__name__} `{key}` in component `{self.owner.name}`.\"\n if self.owner.status == ComponentStatus.CREATED:\n msg += \" The component is not initialized. Did you miss to add it to the composition?\"\n raise KeyError(msg)\n\n with ErrorLogger(self.owner.logger):\n raise KeyError(msg)"}, {"class_start_lineno": 14, "class_end_lineno": 234, "func_start_lineno": 101, "func_end_lineno": 136, "func_code": " def pull_data(self, time, target=None):\n \"\"\"Retrieve the data from the input's source.\n\n Parameters\n ----------\n time : :class:`datetime `\n Simulation time to get the data for.\n target : :class:`.IInput` or None\n Requesting end point of this pull.\n Should be ``None`` for normal input pulls in components.\n Simple adapters should forward the source in :meth:`.Adapter._get_data`.\n Push-based adapters should use ``self`` in :meth:`.Adapter._source_updated`.\n\n Returns\n -------\n :class:`pint.Quantity`\n Data set for the given simulation time.\n \"\"\"\n self.logger.trace(\"pull data\")\n\n if time is not None and not isinstance(time, datetime):\n with ErrorLogger(self.logger):\n raise ValueError(\"Time must be of type datetime\")\n\n if self.is_static:\n if self._cached_data is None:\n data = self._source.get_data(time, target or self)\n with ErrorLogger(self.logger):\n self._cached_data = self._convert_and_check(data)\n data = self._cached_data\n else:\n data = self._source.get_data(time, target or self)\n with ErrorLogger(self.logger):\n data = self._convert_and_check(data)\n\n return data"}, {"class_start_lineno": 12, "class_end_lineno": 129, "func_start_lineno": 119, "func_end_lineno": 126, "func_code": " def _update(self):\n self._time += self._step\n\n inp = {n: self.inputs[n].pull_data(self.time) for n in self._input_infos.keys()}\n outp = self._callback(inp, self.time)\n for name, val in outp.items():\n if val is not None:\n self.outputs[name].push_data(val, self.time)"}], "type": ["function_empty", "Development"], "node": ["finam.sdk.component.IOList.__getitem__", "finam.sdk.input.Input.pull_data", "finam.components.callback.CallbackComponent._update"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 33, "base_passed_num": 5}} {"id": ["skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._gumbel._base_inverse_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives"], "project": "skfolio", "origin_file": ["skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_utils.py"], "test_list": ["tests/test_distribution/test_copula/test_gumbel.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X"}, {"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X"}, {"class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 464, "func_end_lineno": 507, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the partial derivative (h-function) for the unrotated Gumbel copula.\n\n For Gumbel, the copula is defined as:\n\n .. math::\n C(u,v)=\\exp\\Bigl(-\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{1/\\theta}\\Bigr).\n\n The partial derivative with respect to v is:\n\n .. math::\n \\frac{\\partial C(u,v)}{\\partial v}\n = C(u,v)\\,\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{\\frac{1}{\\theta}-1}\n \\,(-\\ln v)^{\\theta-1}\\,\\frac{1}{v}.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` with values in [0, 1].\n\n first_margin : bool, default=False\n If True, compute with respect to u (by swapping margins); otherwise,\n compute with respect to v.\n\n theta : float\n The dependence parameter (must be > 1).\n\n Returns\n -------\n p : ndarray of shape (n_observations,)\n The computed h-function values.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n _, v = X.T\n x, y = -np.log(X).T\n p = (\n np.exp(-np.power(np.power(x, theta) + np.power(y, theta), 1.0 / theta))\n * np.power(np.power(x / y, theta) + 1.0, 1.0 / theta - 1.0)\n / v\n )\n return p"}, {"class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 510, "func_end_lineno": 560, "func_code": "def _base_inverse_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the inverse partial derivative for the unrotated Gumbel copula,\n i.e. solve for u in h(u|v)=p.\n\n In other words, given\n - p, the value of the h-function, and\n - v, the conditioning variable,\n solve:\n\n .. math::\n p = C(u,v)\\,\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{\\frac{1}{\\theta}-1}\\,\n (-\\ln v)^{\\theta-1}\\,\\frac{1}{v},\n\n for u ∈ [0,1]. Since no closed-form solution exists, we use a numerical method.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array with first column p (h-function values) and second column v\n (conditioning variable).\n\n first_margin : bool, default=False\n If True, treat the first margin as the conditioning variable.\n\n theta : float\n The dependence parameter (must be > 1).\n\n Returns\n -------\n u : ndarray of shape (n_observations,)\n A 1D-array where each element is the solution u ∈ [0,1] such that h(u|v)=p.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n p, v = -np.log(X).T\n s = v + p + np.log(v) * (theta - 1.0)\n # Initial guess\n x = v.copy()\n max_iters = 50\n tol = 1e-8\n for _ in range(max_iters):\n x_new = x * (s - (theta - 1.0) * (np.log(x) - 1.0)) / (theta + x - 1.0)\n x_new = np.clip(x_new, x, None)\n diff = np.max(np.abs(x_new - x))\n x = x_new\n if diff < tol:\n break\n u = np.exp(-np.power(np.power(x, theta) - np.power(v, theta), 1.0 / theta))\n return u"}, {"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z"}], "type": ["function_empty", "Development"], "node": ["skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.distribution.copula._gumbel._base_inverse_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives"], "language": "Python", "toolfunc_count": 3, "func_count": 5, "pytest_info": {"total_num": 69, "base_passed_num": 5}} {"id": ["skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._joe._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._joe._base_inverse_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives"], "project": "skfolio", "origin_file": ["skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_utils.py"], "test_list": ["tests/test_distribution/test_copula/test_joe.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X"}, {"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X"}, {"class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 517, "func_end_lineno": 546, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"Compute the h-function (partial derivative) for the bivariate unrotated\n Joe copula with respect to a specified margin.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n first_margin : bool, default=False\n If True, compute the partial derivative with respect to the first\n margin `u`; otherwise, compute the partial derivative with respect to the\n second margin `v`.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n : ndarray of shape (n_observations,)\n h-function values :math:`h(u \\mid v) \\;=\\; p` for each observation in X.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n x, y = np.power(1 - X, theta).T\n p = np.power(1 + x / y - x, 1 / theta - 1) * (1.0 - x)\n return p"}, {"class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 549, "func_end_lineno": 609, "func_code": "def _base_inverse_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"Compute the inverse of the bivariate copula's partial derivative, commonly\n known as the inverse h-function.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(p, v)`, each in the interval `[0, 1]`.\n - The first column `p` corresponds to the value of the h-function.\n - The second column `v` is the conditioning variable.\n\n first_margin : bool, default=False\n If True, compute the inverse partial derivative with respect to the first\n margin `u`; otherwise, compute the inverse partial derivative with respect to\n the second margin `v`.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n u : ndarray of shape (n_observations,)\n A 1D-array of length `n_observations`, where each element is the computed\n :math:`u = h^{-1}(p \\mid v)` for the corresponding pair in `X`.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n\n p, v = X.T\n\n y = np.power(1 - v, theta)\n\n # No known closed-form solution, hence we use Newton method\n # with an early-stopping criterion\n\n # Initial guess\n x = np.power(\n (1 - v) * (np.power(1.0 - p, 1.0 / theta - 1) - 1.0) / y + 1.0,\n theta / (1.0 - theta),\n )\n\n max_iters = 50\n tol = 1e-8\n for _ in range(max_iters):\n k = (x - 1.0) * y\n w = np.power((1.0 / y - 1.0) * x + 1.0, 1.0 / theta)\n x_new = (\n x\n - (theta * (k - x) * (p * (-k + x) + k * w))\n / ((y - 1.0) * k - theta * y)\n / w\n )\n x_new = np.clip(x_new, 0.0, 1.0)\n diff = np.max(np.abs(x_new - x))\n x = x_new\n if diff < tol:\n break\n\n u = 1.0 - np.power(x, 1.0 / theta)\n return u"}, {"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z"}], "type": ["function_empty", "Development"], "node": ["skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._joe._base_partial_derivative", "skfolio.distribution.copula._joe._base_inverse_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives"], "language": "Python", "toolfunc_count": 4, "func_count": 5, "pytest_info": {"total_num": 69, "base_passed_num": 5}} {"id": ["skfolio.src.skfolio.utils.stats.assert_is_square", "skfolio.src.skfolio.utils.stats.assert_is_symmetric", "skfolio.src.skfolio.utils.stats.cov_nearest"], "project": "skfolio", "origin_file": ["skfolio/utils/stats.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py"], "test_list": ["tests/test_optimization/test_ensemble/test_stacking.py", "tests/test_prior/test_factor_model.py", "tests/test_uncertainty_set/test_empirical.py", "tests/test_utils/test_stats.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 208, "func_end_lineno": 221, "func_code": "def assert_is_square(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not square.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not square.\n \"\"\"\n if x.ndim != 2 or x.shape[0] != x.shape[1]:\n raise ValueError(\"The matrix must be square\")"}, {"class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": "def assert_is_symmetric(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not symmetric.\n\n Parameters\n ----------\n x : ndarray of shape (n, m)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not symmetric.\n \"\"\"\n assert_is_square(x)\n if not np.allclose(x, x.T):\n raise ValueError(\"The matrix must be symmetric\")"}, {"class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 308, "func_end_lineno": 400, "func_code": "def cov_nearest(\n cov: np.ndarray,\n higham: bool = False,\n higham_max_iteration: int = 100,\n warn: bool = False,\n):\n \"\"\"Compute the nearest covariance matrix that is positive definite and with a\n cholesky decomposition than can be computed. The variance is left unchanged.\n A covariance matrix that is not positive definite often occurs in high\n dimensional problems. It can be due to multicollinearity, floating-point\n inaccuracies, or when the number of observations is smaller than the number of\n assets.\n\n First, it converts the covariance matrix to a correlation matrix.\n Then, it finds the nearest correlation matrix and converts it back to a covariance\n matrix using the initial standard deviation.\n\n Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due\n to floating point error and inversely, Cholesky decomposition can success for\n non-SPD matrix. Therefore, we need to test for both. We always start by testing\n for Cholesky decomposition which is significantly faster than checking for positive\n eigenvalues.\n\n Parameters\n ----------\n cov : ndarray of shape (n, n)\n Covariance matrix.\n\n higham : bool, default=False\n If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,\n otherwise the eigenvalues are clipped to threshold above zeros (1e-13).\n The default (`False`) is to use the clipping method as the Higham & Nick\n algorithm can be slow for large datasets.\n\n higham_max_iteration : int, default=100\n Maximum number of iteration of the Higham & Nick (2002) algorithm.\n The default value is `100`.\n\n warn : bool, default=False\n If this is set to True, a user warning is emitted when the covariance matrix\n is not positive definite and replaced by the nearest. The default is False.\n\n Returns\n -------\n cov : ndarray\n The nearest covariance matrix.\n\n References\n ----------\n .. [1] \"Computing the nearest correlation matrix - a problem from finance\"\n IMA Journal of Numerical Analysis\n Higham & Nick (2002)\n \"\"\"\n assert_is_square(cov)\n assert_is_symmetric(cov)\n\n # Around 100 times faster than checking eigenvalues with np.linalg.eigh\n if is_cholesky_dec(cov) and is_positive_definite(cov):\n return cov\n\n if warn:\n warnings.warn(\n \"The covariance matrix is not positive definite. \"\n f\"The {'Higham' if higham else 'Clipping'} algorithm will be used to find \"\n \"the nearest positive definite covariance.\",\n stacklevel=2,\n )\n corr, std = cov_to_corr(cov)\n\n if higham:\n eps = np.finfo(np.float64).eps * 5\n diff = np.zeros(corr.shape)\n x = corr.copy()\n for _ in range(higham_max_iteration):\n x_adj = x - diff\n eig_vals, eig_vecs = np.linalg.eigh(x_adj)\n x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T\n diff = x - x_adj\n np.fill_diagonal(x, 1)\n cov = corr_to_cov(x, std)\n if is_cholesky_dec(cov) and is_positive_definite(cov):\n break\n else:\n raise ValueError(\"Unable to find the nearest positive definite matrix\")\n else:\n eig_vals, eig_vecs = np.linalg.eigh(corr)\n # Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to\n # consider the matrix non-psd is some corner cases (see test/test_stats.py)\n x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T\n x, _ = cov_to_corr(x)\n cov = corr_to_cov(x, std)\n\n return cov"}], "type": ["function_empty", "Development"], "node": ["skfolio.utils.stats.assert_is_square", "skfolio.utils.stats.assert_is_symmetric", "skfolio.utils.stats.cov_nearest"], "language": "Python", "toolfunc_count": 2, "func_count": 3, "pytest_info": {"total_num": 50, "base_passed_num": 37}} {"id": ["skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._gumbel._base_inverse_partial_derivative", "skfolio.src.skfolio.distribution.copula._clayton._base_inverse_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives"], "project": "skfolio", "origin_file": ["skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_utils.py"], "test_list": ["tests/test_prior/test_synthetic_data.py"], "prob_info": [{"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X"}, {"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X"}, {"class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 464, "func_end_lineno": 507, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the partial derivative (h-function) for the unrotated Gumbel copula.\n\n For Gumbel, the copula is defined as:\n\n .. math::\n C(u,v)=\\exp\\Bigl(-\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{1/\\theta}\\Bigr).\n\n The partial derivative with respect to v is:\n\n .. math::\n \\frac{\\partial C(u,v)}{\\partial v}\n = C(u,v)\\,\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{\\frac{1}{\\theta}-1}\n \\,(-\\ln v)^{\\theta-1}\\,\\frac{1}{v}.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` with values in [0, 1].\n\n first_margin : bool, default=False\n If True, compute with respect to u (by swapping margins); otherwise,\n compute with respect to v.\n\n theta : float\n The dependence parameter (must be > 1).\n\n Returns\n -------\n p : ndarray of shape (n_observations,)\n The computed h-function values.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n _, v = X.T\n x, y = -np.log(X).T\n p = (\n np.exp(-np.power(np.power(x, theta) + np.power(y, theta), 1.0 / theta))\n * np.power(np.power(x / y, theta) + 1.0, 1.0 / theta - 1.0)\n / v\n )\n return p"}, {"class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 510, "func_end_lineno": 560, "func_code": "def _base_inverse_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the inverse partial derivative for the unrotated Gumbel copula,\n i.e. solve for u in h(u|v)=p.\n\n In other words, given\n - p, the value of the h-function, and\n - v, the conditioning variable,\n solve:\n\n .. math::\n p = C(u,v)\\,\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{\\frac{1}{\\theta}-1}\\,\n (-\\ln v)^{\\theta-1}\\,\\frac{1}{v},\n\n for u ∈ [0,1]. Since no closed-form solution exists, we use a numerical method.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array with first column p (h-function values) and second column v\n (conditioning variable).\n\n first_margin : bool, default=False\n If True, treat the first margin as the conditioning variable.\n\n theta : float\n The dependence parameter (must be > 1).\n\n Returns\n -------\n u : ndarray of shape (n_observations,)\n A 1D-array where each element is the solution u ∈ [0,1] such that h(u|v)=p.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n p, v = -np.log(X).T\n s = v + p + np.log(v) * (theta - 1.0)\n # Initial guess\n x = v.copy()\n max_iters = 50\n tol = 1e-8\n for _ in range(max_iters):\n x_new = x * (s - (theta - 1.0) * (np.log(x) - 1.0)) / (theta + x - 1.0)\n x_new = np.clip(x_new, x, None)\n diff = np.max(np.abs(x_new - x))\n x = x_new\n if diff < tol:\n break\n u = np.exp(-np.power(np.power(x, theta) - np.power(v, theta), 1.0 / theta))\n return u"}, {"class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 501, "func_end_lineno": 539, "func_code": "def _base_inverse_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the inverse partial derivative for the unrotated Clayton copula,\n i.e. solve for u in h(u|v)=p.\n\n In other words, given\n - p, the value of the h-function, and\n - v, the conditioning variable,\n solve:\n\n .. math::\n p = \\Bigl(u^{-\\theta}+v^{-\\theta}-1\\Bigr)^{-1/\\theta-1}\\,v^{-\\theta-1},\n\n for u ∈ [0,1]. Since no closed-form solution exists, we use a Newton method.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array with first column p (h-function values) and second column v\n (conditioning variable).\n\n first_margin : bool, default=False\n If True, treat the first margin as the conditioning variable.\n\n theta : float\n The dependence parameter (must be > 0).\n\n Returns\n -------\n u : ndarray of shape (n_observations,)\n A 1D-array where each element is the solution u ∈ [0,1] such that h(u|v)=p.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n x = np.power(X[:, 0], -theta / (theta + 1.0))\n y = np.power(X[:, 1], -theta)\n u = np.power(1.0 + y * (x - 1.0), -1.0 / theta)\n return u"}, {"class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z"}], "type": ["function_empty", "Development"], "node": ["skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.distribution.copula._gumbel._base_inverse_partial_derivative", "skfolio.distribution.copula._clayton._base_inverse_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives"], "language": "Python", "toolfunc_count": 4, "func_count": 6, "pytest_info": {"total_num": 4, "base_passed_num": 0}}