id
list
project
string
origin_file
list
test_list
list
prob_info
list
type
list
node
list
language
string
toolfunc_count
int64
func_count
int64
pytest_info
dict
[ "transformers.src.transformers.audio_utils.hertz_to_mel", "transformers.src.transformers.audio_utils.mel_filter_bank" ]
transformers
[ "transformers/audio_utils.py", "transformers/audio_utils.py", "transformers/models/clap/feature_extraction_clap.py" ]
[ "tests/models/audio_spectrogram_transformer/test_feature_extraction_audio_spectrogram_transformer.py", "tests/models/clap/test_feature_extraction_clap.py", "tests/models/univnet/test_feature_extraction_univnet.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1127, "func_start_lineno": 26, "func_end_lineno": 59, "func_code": "def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str = \"htk\") -> Union[float, np.ndarray]:\n \"\"\"\n Convert frequency from hertz to mels.\n\n Args:\n freq (`float` or `np.ndarray`):\n The frequency, or multiple frequencies, in hertz (Hz).\n mel_scale (`str`, *optional*, defaults to `\"htk\"`):\n The mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n\n Returns:\n `float` or `np.ndarray`: The frequencies on the mel scale.\n \"\"\"\n\n if mel_scale not in [\"slaney\", \"htk\", \"kaldi\"]:\n raise ValueError('mel_scale should be one of \"htk\", \"slaney\" or \"kaldi\".')\n\n if mel_scale == \"htk\":\n return 2595.0 * np.log10(1.0 + (freq / 700.0))\n elif mel_scale == \"kaldi\":\n return 1127.0 * np.log(1.0 + (freq / 700.0))\n\n min_log_hertz = 1000.0\n min_log_mel = 15.0\n logstep = 27.0 / np.log(6.4)\n mels = 3.0 * freq / 200.0\n\n if isinstance(freq, np.ndarray):\n log_region = freq >= min_log_hertz\n mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep\n elif freq >= min_log_hertz:\n mels = min_log_mel + np.log(freq / min_log_hertz) * logstep\n\n return mels" }, { "class_start_lineno": 1, "class_end_lineno": 1127, "func_start_lineno": 218, "func_end_lineno": 303, "func_code": "def mel_filter_bank(\n num_frequency_bins: int,\n num_mel_filters: int,\n min_frequency: float,\n max_frequency: float,\n sampling_rate: int,\n norm: Optional[str] = None,\n mel_scale: str = \"htk\",\n triangularize_in_mel_space: bool = False,\n) -> np.ndarray:\n \"\"\"\n Creates a frequency bin conversion matrix used to obtain a mel spectrogram. This is called a *mel filter bank*, and\n various implementation exist, which differ in the number of filters, the shape of the filters, the way the filters\n are spaced, the bandwidth of the filters, and the manner in which the spectrum is warped. The goal of these\n features is to approximate the non-linear human perception of the variation in pitch with respect to the frequency.\n\n Different banks of mel filters were introduced in the literature. The following variations are supported:\n\n - MFCC FB-20: introduced in 1980 by Davis and Mermelstein, it assumes a sampling frequency of 10 kHz and a speech\n bandwidth of `[0, 4600]` Hz.\n - MFCC FB-24 HTK: from the Cambridge HMM Toolkit (HTK) (1995) uses a filter bank of 24 filters for a speech\n bandwidth of `[0, 8000]` Hz. This assumes sampling rate ≥ 16 kHz.\n - MFCC FB-40: from the Auditory Toolbox for MATLAB written by Slaney in 1998, assumes a sampling rate of 16 kHz and\n speech bandwidth of `[133, 6854]` Hz. This version also includes area normalization.\n - HFCC-E FB-29 (Human Factor Cepstral Coefficients) of Skowronski and Harris (2004), assumes a sampling rate of\n 12.5 kHz and speech bandwidth of `[0, 6250]` Hz.\n\n This code is adapted from *torchaudio* and *librosa*. Note that the default parameters of torchaudio's\n `melscale_fbanks` implement the `\"htk\"` filters while librosa uses the `\"slaney\"` implementation.\n\n Args:\n num_frequency_bins (`int`):\n Number of frequencies used to compute the spectrogram (should be the same as in `stft`).\n num_mel_filters (`int`):\n Number of mel filters to generate.\n min_frequency (`float`):\n Lowest frequency of interest in Hz.\n max_frequency (`float`):\n Highest frequency of interest in Hz. This should not exceed `sampling_rate / 2`.\n sampling_rate (`int`):\n Sample rate of the audio waveform.\n norm (`str`, *optional*):\n If `\"slaney\"`, divide the triangular mel weights by the width of the mel band (area normalization).\n mel_scale (`str`, *optional*, defaults to `\"htk\"`):\n The mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n triangularize_in_mel_space (`bool`, *optional*, defaults to `False`):\n If this option is enabled, the triangular filter is applied in mel space rather than frequency space. This\n should be set to `true` in order to get the same results as `torchaudio` when computing mel filters.\n\n Returns:\n `np.ndarray` of shape (`num_frequency_bins`, `num_mel_filters`): Triangular filter bank matrix. This is a\n projection matrix to go from a spectrogram to a mel spectrogram.\n \"\"\"\n if norm is not None and norm != \"slaney\":\n raise ValueError('norm must be one of None or \"slaney\"')\n\n # center points of the triangular mel filters\n mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale)\n mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale)\n mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2)\n filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale)\n\n if triangularize_in_mel_space:\n # frequencies of FFT bins in Hz, but filters triangularized in mel space\n fft_bin_width = sampling_rate / (num_frequency_bins * 2)\n fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale)\n filter_freqs = mel_freqs\n else:\n # frequencies of FFT bins in Hz\n fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins)\n\n mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs)\n\n if norm is not None and norm == \"slaney\":\n # Slaney-style mel is scaled to be approx constant energy per channel\n enorm = 2.0 / (filter_freqs[2 : num_mel_filters + 2] - filter_freqs[:num_mel_filters])\n mel_filters *= np.expand_dims(enorm, 0)\n\n if (mel_filters.max(axis=0) == 0.0).any():\n warnings.warn(\n \"At least one mel filter has all zero values. \"\n f\"The value for `num_mel_filters` ({num_mel_filters}) may be set too high. \"\n f\"Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low.\"\n )\n\n return mel_filters" }, { "class_start_lineno": 32, "class_end_lineno": 362, "func_start_lineno": 84, "func_end_lineno": 135, "func_code": " def __init__(\n self,\n feature_size=64,\n sampling_rate=48_000,\n hop_length=480,\n max_length_s=10,\n fft_window_size=1024,\n padding_value=0.0,\n return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask\n frequency_min: float = 0,\n frequency_max: float = 14_000,\n top_db: int = None,\n truncation: str = \"fusion\",\n padding: str = \"repeatpad\",\n **kwargs,\n ):\n super().__init__(\n feature_size=feature_size,\n sampling_rate=sampling_rate,\n padding_value=padding_value,\n return_attention_mask=return_attention_mask,\n **kwargs,\n )\n self.top_db = top_db\n self.truncation = truncation\n self.padding = padding\n self.fft_window_size = fft_window_size\n self.nb_frequency_bins = (fft_window_size >> 1) + 1\n self.hop_length = hop_length\n self.max_length_s = max_length_s\n self.nb_max_samples = max_length_s * sampling_rate\n self.sampling_rate = sampling_rate\n self.frequency_min = frequency_min\n self.frequency_max = frequency_max\n self.mel_filters = mel_filter_bank(\n num_frequency_bins=self.nb_frequency_bins,\n num_mel_filters=feature_size,\n min_frequency=frequency_min,\n max_frequency=frequency_max,\n sampling_rate=sampling_rate,\n norm=None,\n mel_scale=\"htk\",\n )\n self.mel_filters_slaney = mel_filter_bank(\n num_frequency_bins=self.nb_frequency_bins,\n num_mel_filters=feature_size,\n min_frequency=frequency_min,\n max_frequency=frequency_max,\n sampling_rate=sampling_rate,\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )" } ]
[ "function_empty" ]
[ "transformers.audio_utils.hertz_to_mel", "transformers.audio_utils.mel_filter_bank", "transformers.models.clap.feature_extraction_clap.ClapFeatureExtractor.__init__" ]
Python
2
2
{ "total_num": 77, "base_passed_num": 47 }
[ "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.blip.image_processing_blip.BlipImageProcessor::resize", "transformers.src.transformers.models.blip.image_processing_blip.BlipImageProcessor::preprocess" ]
transformers
[ "transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/models/blip/image_processing_blip.py", "transformers/models/blip/image_processing_blip.py" ]
[ "tests/models/blip/test_image_processing_blip.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 46, "class_end_lineno": 294, "func_start_lineno": 111, "func_end_lineno": 157, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BICUBIC`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n size = get_size_dict(size)\n if \"height\" not in size or \"width\" not in size:\n raise ValueError(f\"The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}\")\n output_size = (size[\"height\"], size[\"width\"])\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" }, { "class_start_lineno": 46, "class_end_lineno": 294, "func_start_lineno": 160, "func_end_lineno": 294, "func_code": " def preprocess(\n self,\n images: ImageInput,\n do_resize: Optional[bool] = None,\n size: Optional[Dict[str, int]] = None,\n resample: PILImageResampling = None,\n do_rescale: Optional[bool] = None,\n rescale_factor: Optional[float] = None,\n do_normalize: Optional[bool] = None,\n image_mean: Optional[Union[float, List[float]]] = None,\n image_std: Optional[Union[float, List[float]]] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n do_convert_rgb: bool = None,\n data_format: ChannelDimension = ChannelDimension.FIRST,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> PIL.Image.Image:\n \"\"\"\n Preprocess an image or batch of images.\n\n Args:\n images (`ImageInput`):\n Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If\n passing in images with pixel values between 0 and 1, set `do_rescale=False`.\n do_resize (`bool`, *optional*, defaults to `self.do_resize`):\n Whether to resize the image.\n size (`Dict[str, int]`, *optional*, defaults to `self.size`):\n Controls the size of the image after `resize`. The shortest edge of the image is resized to\n `size[\"shortest_edge\"]` whilst preserving the aspect ratio. If the longest edge of this resized image\n is > `int(size[\"shortest_edge\"] * (1333 / 800))`, then the image is resized again to make the longest\n edge equal to `int(size[\"shortest_edge\"] * (1333 / 800))`.\n resample (`PILImageResampling`, *optional*, defaults to `self.resample`):\n Resampling filter to use if resizing the image. Only has an effect if `do_resize` is set to `True`.\n do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):\n Whether to rescale the image values between [0 - 1].\n rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):\n Rescale factor to rescale the image by if `do_rescale` is set to `True`.\n do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):\n Whether to normalize the image.\n image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):\n Image mean to normalize the image by if `do_normalize` is set to `True`.\n image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):\n Image standard deviation to normalize the image by if `do_normalize` is set to `True`.\n do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):\n Whether to convert the image to RGB.\n return_tensors (`str` or `TensorType`, *optional*):\n The type of tensors to return. Can be one of:\n - Unset: Return a list of `np.ndarray`.\n - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.\n - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.\n - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.\n - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.\n data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - Unset: Use the channel dimension format of the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n \"\"\"\n do_resize = do_resize if do_resize is not None else self.do_resize\n resample = resample if resample is not None else self.resample\n do_rescale = do_rescale if do_rescale is not None else self.do_rescale\n rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor\n do_normalize = do_normalize if do_normalize is not None else self.do_normalize\n image_mean = image_mean if image_mean is not None else self.image_mean\n image_std = image_std if image_std is not None else self.image_std\n do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb\n\n size = size if size is not None else self.size\n size = get_size_dict(size, default_to_square=False)\n\n images = make_list_of_images(images)\n\n if not valid_images(images):\n raise ValueError(\n \"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, \"\n \"torch.Tensor, tf.Tensor or jax.ndarray.\"\n )\n\n validate_preprocess_arguments(\n do_rescale=do_rescale,\n rescale_factor=rescale_factor,\n do_normalize=do_normalize,\n image_mean=image_mean,\n image_std=image_std,\n do_resize=do_resize,\n size=size,\n resample=resample,\n )\n # PIL RGBA images are converted to RGB\n if do_convert_rgb:\n images = [convert_to_rgb(image) for image in images]\n\n # All transformations expect numpy arrays.\n images = [to_numpy_array(image) for image in images]\n\n if is_scaled_image(images[0]) and do_rescale:\n logger.warning_once(\n \"It looks like you are trying to rescale already rescaled images. If the input\"\n \" images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again.\"\n )\n\n if input_data_format is None:\n # We assume that all images have the same channel dimension format.\n input_data_format = infer_channel_dimension_format(images[0])\n\n if do_resize:\n images = [\n self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_rescale:\n images = [\n self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)\n for image in images\n ]\n\n if do_normalize:\n images = [\n self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)\n for image in images\n ]\n\n images = [\n to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images\n ]\n\n encoded_outputs = BatchFeature(data={\"pixel_values\": images}, tensor_type=return_tensors)\n\n return encoded_outputs" } ]
[ "function_empty", "Development" ]
[ "transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.resize", "transformers.models.blip.image_processing_blip.BlipImageProcessor.resize", "transformers.models.blip.image_processing_blip.BlipImageProcessor.preprocess" ]
Python
3
4
{ "total_num": 20, "base_passed_num": 12 }
[ "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor::resize" ]
transformers
[ "transformers/image_processing_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/chinese_clip/image_processing_chinese_clip.py" ]
[ "tests/models/chinese_clip/test_image_processing_chinese_clip.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 51, "class_end_lineno": 306, "func_start_lineno": 125, "func_end_lineno": 162, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred from the input\n image.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n output_size = get_resize_output_image_size(\n image, size=(size[\"height\"], size[\"width\"]), default_to_square=False, input_data_format=input_data_format\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "Development" ]
[ "transformers.image_processing_utils.get_size_dict", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.chinese_clip.image_processing_chinese_clip.ChineseCLIPImageProcessor.resize" ]
Python
3
4
{ "total_num": 21, "base_passed_num": 12 }
[ "transformers.src.transformers.image_utils.infer_channel_dimension_format", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.fuyu.image_processing_fuyu.FuyuImageProcessor::resize" ]
transformers
[ "transformers/image_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/models/fuyu/image_processing_fuyu.py" ]
[ "tests/models/fuyu/test_image_processing_fuyu.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 220, "func_end_lineno": 254, "func_code": "def infer_channel_dimension_format(\n image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None\n) -> ChannelDimension:\n \"\"\"\n Infers the channel dimension format of `image`.\n\n Args:\n image (`np.ndarray`):\n The image to infer the channel dimension of.\n num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):\n The number of channels of the image.\n\n Returns:\n The channel dimension of the image.\n \"\"\"\n num_channels = num_channels if num_channels is not None else (1, 3)\n num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels\n\n if image.ndim == 3:\n first_dim, last_dim = 0, 2\n elif image.ndim == 4:\n first_dim, last_dim = 1, 3\n else:\n raise ValueError(f\"Unsupported number of image dimensions: {image.ndim}\")\n\n if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:\n logger.warning(\n f\"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension.\"\n )\n return ChannelDimension.FIRST\n elif image.shape[first_dim] in num_channels:\n return ChannelDimension.FIRST\n elif image.shape[last_dim] in num_channels:\n return ChannelDimension.LAST\n raise ValueError(\"Unable to infer channel dimension format\")" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 182, "class_end_lineno": 720, "func_start_lineno": 266, "func_end_lineno": 322, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image to `(size[\"height\"], size[\"width\"])`.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Dictionary in the format `{\"height\": int, \"width\": int}` specifying the size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.\n data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the output image. If unset, the channel dimension format of the input\n image is used. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format for the input image. If unset, the channel dimension format is inferred\n from the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n - `\"none\"` or `ChannelDimension.NONE`: image in (height, width) format.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n image_height, image_width = get_image_size(image, input_data_format)\n target_height, target_width = size[\"height\"], size[\"width\"]\n\n if image_width <= target_width and image_height <= target_height:\n return image\n\n height_scale_factor = target_height / image_height\n width_scale_factor = target_width / image_width\n optimal_scale_factor = min(height_scale_factor, width_scale_factor)\n\n new_height = int(image_height * optimal_scale_factor)\n new_width = int(image_width * optimal_scale_factor)\n\n scaled_image = resize(\n image=image,\n size=(new_height, new_width),\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )\n return scaled_image" } ]
[ "function_empty" ]
[ "transformers.image_utils.infer_channel_dimension_format", "transformers.image_utils.get_image_size", "transformers.image_transforms.resize", "transformers.models.fuyu.image_processing_fuyu.FuyuImageProcessor.resize" ]
Python
4
4
{ "total_num": 4, "base_passed_num": 1 }
[ "transformers.src.transformers.models.musicgen_melody.feature_extraction_musicgen_melody.MusicgenMelodyFeatureExtractor::_extract_stem_indices", "transformers.src.transformers.models.musicgen_melody.feature_extraction_musicgen_melody.MusicgenMelodyFeatureExtractor::__call__" ]
transformers
[ "transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py", "transformers/models/musicgen_melody/feature_extraction_musicgen_melody.py" ]
[ "tests/models/musicgen_melody/test_feature_extraction_musicgen_melody.py" ]
[ { "class_start_lineno": 39, "class_end_lineno": 331, "func_start_lineno": 146, "func_end_lineno": 179, "func_code": " def _extract_stem_indices(self, audio, sampling_rate=None):\n \"\"\"\n Extracts stems from the output of the [Demucs](https://github.com/adefossez/demucs/tree/main) audio separation model,\n then converts to mono-channel and resample to the feature extractor sampling rate.\n\n Args:\n audio (`torch.Tensor` of shape `(batch_size, num_stems, channel_size, audio_length)`):\n The output of the Demucs model to be processed.\n sampling_rate (`int`, *optional*):\n Demucs sampling rate. If not specified, defaults to `44000`.\n \"\"\"\n sampling_rate = 44000 if sampling_rate is None else sampling_rate\n\n # extract \"vocals\" and \"others\" sources from audio encoder (demucs) output\n # [batch_size, num_stems, channel_size, audio_length]\n wav = audio[:, torch.tensor(self.stem_indices)]\n\n # merge extracted stems to single waveform\n wav = wav.sum(1)\n\n # convert to mono-channel waveform\n wav = wav.mean(dim=1, keepdim=True)\n\n # resample to model sampling rate\n # not equivalent to julius.resample\n if sampling_rate != self.sampling_rate:\n wav = torchaudio.functional.resample(\n wav, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24\n )\n\n # [batch_size, 1, audio_length] -> [batch_size, audio_length]\n wav = wav.squeeze(1)\n\n return wav" }, { "class_start_lineno": 39, "class_end_lineno": 331, "func_start_lineno": 181, "func_end_lineno": 314, "func_code": " def __call__(\n self,\n audio: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],\n truncation: bool = True,\n pad_to_multiple_of: Optional[int] = None,\n return_tensors: Optional[Union[str, TensorType]] = None,\n return_attention_mask: Optional[bool] = None,\n padding: Optional[str] = True,\n max_length: Optional[int] = None,\n sampling_rate: Optional[int] = None,\n **kwargs,\n ) -> BatchFeature:\n \"\"\"\n Main method to featurize and prepare for the model one or several sequence(s).\n\n Args:\n audio (`torch.Tensor`, `np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[torch.Tensor]`, `List[List[float]]`):\n The sequence or batch of sequences to be padded. Each sequence can be a torch tensor, a numpy array, a list of float\n values, a list of numpy arrays, a list of torch tensors, or a list of list of float values.\n If `audio` is the output of Demucs, it has to be a torch tensor of shape `(batch_size, num_stems, channel_size, audio_length)`.\n Otherwise, it must be mono or stereo channel audio.\n truncation (`bool`, *optional*, default to `True`):\n Activates truncation to cut input sequences longer than *max_length* to *max_length*.\n pad_to_multiple_of (`int`, *optional*, defaults to None):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability\n `>= 7.5` (Volta), or on TPUs which benefit from having sequence lengths be a multiple of 128.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether to return the attention mask. If left to the default, will return the attention mask according\n to the specific feature_extractor's default.\n\n [What are attention masks?](../glossary#attention-mask)\n\n <Tip>\n For Musicgen Melody models, audio `attention_mask` is not necessary.\n </Tip>\n\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding\n index) among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n sampling_rate (`int`, *optional*):\n The sampling rate at which the `audio` input was sampled. It is strongly recommended to pass\n `sampling_rate` at the forward call to prevent silent errors.\n Note that if `audio` is the output of Demucs, `sampling_rate` must be the sampling rate at which Demucs operates.\n \"\"\"\n\n if sampling_rate is None:\n logger.warning_once(\n \"It is strongly recommended to pass the `sampling_rate` argument to this function. \"\n \"Failing to do so can result in silent errors that might be hard to debug.\"\n )\n\n if isinstance(audio, torch.Tensor) and len(audio.shape) == 4:\n logger.warning_once(\n \"`audio` is a 4-dimensional torch tensor and has thus been recognized as the output of `Demucs`. \"\n \"If this is not the case, make sure to read Musicgen Melody docstrings and \"\n \"to correct `audio` to get the right behaviour.\"\n \"Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody\"\n )\n audio = self._extract_stem_indices(audio, sampling_rate=sampling_rate)\n elif sampling_rate is not None and sampling_rate != self.sampling_rate:\n audio = torchaudio.functional.resample(\n audio, sampling_rate, self.sampling_rate, rolloff=0.945, lowpass_filter_width=24\n )\n\n is_batched = isinstance(audio, (np.ndarray, torch.Tensor)) and len(audio.shape) > 1\n is_batched = is_batched or (\n isinstance(audio, (list, tuple)) and (isinstance(audio[0], (torch.Tensor, np.ndarray, tuple, list)))\n )\n\n if is_batched and not isinstance(audio[0], torch.Tensor):\n audio = [torch.tensor(speech, dtype=torch.float32).unsqueeze(-1) for speech in audio]\n elif is_batched:\n audio = [speech.unsqueeze(-1) for speech in audio]\n elif not is_batched and not isinstance(audio, torch.Tensor):\n audio = torch.tensor(audio, dtype=torch.float32).unsqueeze(-1)\n\n if isinstance(audio[0], torch.Tensor) and audio[0].dtype is torch.float64:\n audio = [speech.to(torch.float32) for speech in audio]\n\n # always return batch\n if not is_batched:\n audio = [audio]\n\n if len(audio[0].shape) == 3:\n logger.warning_once(\n \"`audio` has been detected as a batch of stereo signals. Will be convert to mono signals. \"\n \"If this is an undesired behaviour, make sure to read Musicgen Melody docstrings and \"\n \"to correct `audio` to get the right behaviour.\"\n \"Link to the docstrings: https://huggingface.co/docs/transformers/main/en/model_doc/musicgen_melody\"\n )\n # convert to mono-channel waveform\n audio = [stereo.mean(dim=0) for stereo in audio]\n\n batched_speech = BatchFeature({\"input_features\": audio})\n\n padded_inputs = self.pad(\n batched_speech,\n padding=padding,\n max_length=max_length if max_length else self.n_samples,\n truncation=truncation,\n pad_to_multiple_of=pad_to_multiple_of,\n return_attention_mask=return_attention_mask,\n return_tensors=\"pt\",\n )\n\n input_features = self._torch_extract_fbank_features(padded_inputs[\"input_features\"].squeeze(-1))\n\n padded_inputs[\"input_features\"] = input_features\n\n if return_attention_mask:\n # rescale from raw audio length to spectrogram length\n padded_inputs[\"attention_mask\"] = padded_inputs[\"attention_mask\"][:, :: self.hop_length]\n\n if return_tensors is not None:\n padded_inputs = padded_inputs.convert_to_tensors(return_tensors)\n\n return padded_inputs" } ]
[ "function_empty" ]
[ "transformers.models.musicgen_melody.feature_extraction_musicgen_melody.MusicgenMelodyFeatureExtractor._extract_stem_indices", "transformers.models.musicgen_melody.feature_extraction_musicgen_melody.MusicgenMelodyFeatureExtractor.__call__" ]
Python
2
2
{ "total_num": 18, "base_passed_num": 15 }
[ "transformers.src.transformers.utils.backbone_utils._align_output_features_output_indices", "transformers.src.transformers.utils.backbone_utils.get_aligned_output_features_output_indices" ]
transformers
[ "transformers/utils/backbone_utils.py", "transformers/utils/backbone_utils.py", "transformers/models/rt_detr/configuration_rt_detr_resnet.py" ]
[ "tests/models/rt_detr/test_modeling_rt_detr_resnet.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 377, "func_start_lineno": 77, "func_end_lineno": 105, "func_code": "def _align_output_features_output_indices(\n out_features: Optional[List[str]],\n out_indices: Optional[Union[List[int], Tuple[int]]],\n stage_names: List[str],\n):\n \"\"\"\n Finds the corresponding `out_features` and `out_indices` for the given `stage_names`.\n\n The logic is as follows:\n - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the\n `out_indices`.\n - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the\n `out_features`.\n - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.\n - `out_indices` and `out_features` set: input `out_indices` and `out_features` are returned.\n\n Args:\n out_features (`List[str]`): The names of the features for the backbone to output.\n out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.\n stage_names (`List[str]`): The names of the stages of the backbone.\n \"\"\"\n if out_indices is None and out_features is None:\n out_indices = [len(stage_names) - 1]\n out_features = [stage_names[-1]]\n elif out_indices is None and out_features is not None:\n out_indices = [stage_names.index(layer) for layer in out_features]\n elif out_features is None and out_indices is not None:\n out_features = [stage_names[idx] for idx in out_indices]\n return out_features, out_indices" }, { "class_start_lineno": 1, "class_end_lineno": 377, "func_start_lineno": 108, "func_end_lineno": 137, "func_code": "def get_aligned_output_features_output_indices(\n out_features: Optional[List[str]],\n out_indices: Optional[Union[List[int], Tuple[int]]],\n stage_names: List[str],\n) -> Tuple[List[str], List[int]]:\n \"\"\"\n Get the `out_features` and `out_indices` so that they are aligned.\n\n The logic is as follows:\n - `out_features` not set, `out_indices` set: `out_features` is set to the `out_features` corresponding to the\n `out_indices`.\n - `out_indices` not set, `out_features` set: `out_indices` is set to the `out_indices` corresponding to the\n `out_features`.\n - `out_indices` and `out_features` not set: `out_indices` and `out_features` are set to the last stage.\n - `out_indices` and `out_features` set: they are verified to be aligned.\n\n Args:\n out_features (`List[str]`): The names of the features for the backbone to output.\n out_indices (`List[int]` or `Tuple[int]`): The indices of the features for the backbone to output.\n stage_names (`List[str]`): The names of the stages of the backbone.\n \"\"\"\n out_indices = list(out_indices) if out_indices is not None else None\n # First verify that the out_features and out_indices are valid\n verify_out_features_out_indices(out_features=out_features, out_indices=out_indices, stage_names=stage_names)\n output_features, output_indices = _align_output_features_output_indices(\n out_features=out_features, out_indices=out_indices, stage_names=stage_names\n )\n # Verify that the aligned out_features and out_indices are valid\n verify_out_features_out_indices(out_features=output_features, out_indices=output_indices, stage_names=stage_names)\n return output_features, output_indices" }, { "class_start_lineno": 25, "class_end_lineno": 111, "func_start_lineno": 83, "func_end_lineno": 111, "func_code": " def __init__(\n self,\n num_channels=3,\n embedding_size=64,\n hidden_sizes=[256, 512, 1024, 2048],\n depths=[3, 4, 6, 3],\n layer_type=\"bottleneck\",\n hidden_act=\"relu\",\n downsample_in_first_stage=False,\n downsample_in_bottleneck=False,\n out_features=None,\n out_indices=None,\n **kwargs,\n ):\n super().__init__(**kwargs)\n if layer_type not in self.layer_types:\n raise ValueError(f\"layer_type={layer_type} is not one of {','.join(self.layer_types)}\")\n self.num_channels = num_channels\n self.embedding_size = embedding_size\n self.hidden_sizes = hidden_sizes\n self.depths = depths\n self.layer_type = layer_type\n self.hidden_act = hidden_act\n self.downsample_in_first_stage = downsample_in_first_stage\n self.downsample_in_bottleneck = downsample_in_bottleneck\n self.stage_names = [\"stem\"] + [f\"stage{idx}\" for idx in range(1, len(depths) + 1)]\n self._out_features, self._out_indices = get_aligned_output_features_output_indices(\n out_features=out_features, out_indices=out_indices, stage_names=self.stage_names\n )" } ]
[ "function_empty" ]
[ "transformers.utils.backbone_utils._align_output_features_output_indices", "transformers.utils.backbone_utils.get_aligned_output_features_output_indices", "transformers.models.rt_detr.configuration_rt_detr_resnet.RTDetrResNetConfig.__init__" ]
Python
2
2
{ "total_num": 8, "base_passed_num": 0 }
[ "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.video_llava.image_processing_video_llava.VideoLlavaImageProcessor::resize" ]
transformers
[ "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/video_llava/image_processing_video_llava.py" ]
[ "tests/models/video_llava/test_image_processing_video_llava.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 69, "class_end_lineno": 404, "func_start_lineno": 143, "func_end_lineno": 190, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BICUBIC,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image. The shortest edge of the image is resized to size[\"shortest_edge\"], with the longest edge\n resized to keep the input aspect ratio.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`ChannelDimension` or `str`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n default_to_square = True\n if \"shortest_edge\" in size:\n size = size[\"shortest_edge\"]\n default_to_square = False\n elif \"height\" in size and \"width\" in size:\n size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(\"Size must contain either 'shortest_edge' or 'height' and 'width'.\")\n\n output_size = get_resize_output_image_size(\n image,\n size=size,\n default_to_square=default_to_square,\n input_data_format=input_data_format,\n )\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty" ]
[ "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.video_llava.image_processing_video_llava.VideoLlavaImageProcessor.resize" ]
Python
4
4
{ "total_num": 18, "base_passed_num": 6 }
[ "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.videomae.image_processing_videomae.VideoMAEImageProcessor::resize" ]
transformers
[ "transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/videomae/image_processing_videomae.py" ]
[ "tests/models/videomae/test_image_processing_videomae.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 63, "class_end_lineno": 345, "func_start_lineno": 134, "func_end_lineno": 176, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\n have the size `(h, w)`. If `size` is of the form `{\"shortest_edge\": s}`, the output image will have its\n shortest edge of length `s` while keeping the aspect ratio of the original image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if \"shortest_edge\" in size:\n output_size = get_resize_output_image_size(\n image, size[\"shortest_edge\"], default_to_square=False, input_data_format=input_data_format\n )\n elif \"height\" in size and \"width\" in size:\n output_size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\")\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty" ]
[ "transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.videomae.image_processing_videomae.VideoMAEImageProcessor.resize" ]
Python
5
5
{ "total_num": 13, "base_passed_num": 6 }
[ "transformers.src.transformers.image_processing_utils.get_size_dict", "transformers.src.transformers.image_utils.get_image_size", "transformers.src.transformers.image_transforms.get_resize_output_image_size", "transformers.src.transformers.image_transforms.resize", "transformers.src.transformers.models.vivit.image_processing_vivit.VivitImageProcessor::resize" ]
transformers
[ "transformers/image_processing_utils.py", "transformers/image_utils.py", "transformers/image_transforms.py", "transformers/image_transforms.py", "transformers/models/vivit/image_processing_vivit.py" ]
[ "tests/models/vivit/test_image_processing_vivit.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 287, "func_start_lineno": 208, "func_end_lineno": 249, "func_code": "def get_size_dict(\n size: Union[int, Iterable[int], Dict[str, int]] = None,\n max_size: Optional[int] = None,\n height_width_order: bool = True,\n default_to_square: bool = True,\n param_name=\"size\",\n) -> dict:\n \"\"\"\n Converts the old size parameter in the config into the new dict expected in the config. This is to ensure backwards\n compatibility with the old image processor configs and removes ambiguity over whether the tuple is in (height,\n width) or (width, height) format.\n\n - If `size` is tuple, it is converted to `{\"height\": size[0], \"width\": size[1]}` or `{\"height\": size[1], \"width\":\n size[0]}` if `height_width_order` is `False`.\n - If `size` is an int, and `default_to_square` is `True`, it is converted to `{\"height\": size, \"width\": size}`.\n - If `size` is an int and `default_to_square` is False, it is converted to `{\"shortest_edge\": size}`. If `max_size`\n is set, it is added to the dict as `{\"longest_edge\": max_size}`.\n\n Args:\n size (`Union[int, Iterable[int], Dict[str, int]]`, *optional*):\n The `size` parameter to be cast into a size dictionary.\n max_size (`Optional[int]`, *optional*):\n The `max_size` parameter to be cast into a size dictionary.\n height_width_order (`bool`, *optional*, defaults to `True`):\n If `size` is a tuple, whether it's in (height, width) or (width, height) order.\n default_to_square (`bool`, *optional*, defaults to `True`):\n If `size` is an int, whether to default to a square image or not.\n \"\"\"\n if not isinstance(size, dict):\n size_dict = convert_to_size_dict(size, max_size, default_to_square, height_width_order)\n logger.info(\n f\"{param_name} should be a dictionary on of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size}.\"\n f\" Converted to {size_dict}.\",\n )\n else:\n size_dict = size\n\n if not is_valid_size_dict(size_dict):\n raise ValueError(\n f\"{param_name} must have one of the following set of keys: {VALID_SIZE_DICT_KEYS}, got {size_dict.keys()}\"\n )\n return size_dict" }, { "class_start_lineno": 1, "class_end_lineno": 811, "func_start_lineno": 281, "func_end_lineno": 302, "func_code": "def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:\n \"\"\"\n Returns the (height, width) dimensions of the image.\n\n Args:\n image (`np.ndarray`):\n The image to get the dimensions of.\n channel_dim (`ChannelDimension`, *optional*):\n Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.\n\n Returns:\n A tuple of the image's height and width.\n \"\"\"\n if channel_dim is None:\n channel_dim = infer_channel_dimension_format(image)\n\n if channel_dim == ChannelDimension.FIRST:\n return image.shape[-2], image.shape[-1]\n elif channel_dim == ChannelDimension.LAST:\n return image.shape[-3], image.shape[-2]\n else:\n raise ValueError(f\"Unsupported data format: {channel_dim}\")" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 214, "func_end_lineno": 278, "func_code": "def get_resize_output_image_size(\n input_image: np.ndarray,\n size: Union[int, Tuple[int, int], List[int], Tuple[int]],\n default_to_square: bool = True,\n max_size: Optional[int] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> tuple:\n \"\"\"\n Find the target (height, width) dimension of the output image after resizing given the input image and the desired\n size.\n\n Args:\n input_image (`np.ndarray`):\n The image to resize.\n size (`int` or `Tuple[int, int]` or List[int] or `Tuple[int]`):\n The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be matched to\n this.\n\n If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If\n `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to this\n number. i.e, if height > width, then image will be rescaled to (size * height / width, size).\n default_to_square (`bool`, *optional*, defaults to `True`):\n How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a square\n (`size`,`size`). If set to `False`, will replicate\n [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)\n with support for resizing only the smallest edge and providing an optional `max_size`.\n max_size (`int`, *optional*):\n The maximum allowed for the longer edge of the resized image: if the longer edge of the image is greater\n than `max_size` after being resized according to `size`, then the image is resized again so that the longer\n edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller edge may be shorter\n than `size`. Only used if `default_to_square` is `False`.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `tuple`: The target (height, width) dimension of the output image after resizing.\n \"\"\"\n if isinstance(size, (tuple, list)):\n if len(size) == 2:\n return tuple(size)\n elif len(size) == 1:\n # Perform same logic as if size was an int\n size = size[0]\n else:\n raise ValueError(\"size must have 1 or 2 elements if it is a list or tuple\")\n\n if default_to_square:\n return (size, size)\n\n height, width = get_image_size(input_image, input_data_format)\n short, long = (width, height) if width <= height else (height, width)\n requested_new_short = size\n\n new_short, new_long = requested_new_short, int(requested_new_short * long / short)\n\n if max_size is not None:\n if max_size <= requested_new_short:\n raise ValueError(\n f\"max_size = {max_size} must be strictly greater than the requested \"\n f\"size for the smaller edge size = {size}\"\n )\n if new_long > max_size:\n new_short, new_long = int(max_size * new_short / new_long), max_size\n\n return (new_long, new_short) if width <= height else (new_short, new_long)" }, { "class_start_lineno": 1, "class_end_lineno": 854, "func_start_lineno": 281, "func_end_lineno": 349, "func_code": "def resize(\n image: np.ndarray,\n size: Tuple[int, int],\n resample: \"PILImageResampling\" = None,\n reducing_gap: Optional[int] = None,\n data_format: Optional[ChannelDimension] = None,\n return_numpy: bool = True,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n) -> np.ndarray:\n \"\"\"\n Resizes `image` to `(height, width)` specified by `size` using the PIL library.\n\n Args:\n image (`np.ndarray`):\n The image to resize.\n size (`Tuple[int, int]`):\n The size to use for resizing the image.\n resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n The filter to user for resampling.\n reducing_gap (`int`, *optional*):\n Apply optimization by resizing the image in two steps. The bigger `reducing_gap`, the closer the result to\n the fair resampling. See corresponding Pillow documentation for more details.\n data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the output image. If unset, will use the inferred format from the input.\n return_numpy (`bool`, *optional*, defaults to `True`):\n Whether or not to return the resized image as a numpy array. If False a `PIL.Image.Image` object is\n returned.\n input_data_format (`ChannelDimension`, *optional*):\n The channel dimension format of the input image. If unset, will use the inferred format from the input.\n\n Returns:\n `np.ndarray`: The resized image.\n \"\"\"\n requires_backends(resize, [\"vision\"])\n\n resample = resample if resample is not None else PILImageResampling.BILINEAR\n\n if not len(size) == 2:\n raise ValueError(\"size must have 2 elements\")\n\n # For all transformations, we want to keep the same data format as the input image unless otherwise specified.\n # The resized image from PIL will always have channels last, so find the input format first.\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n data_format = input_data_format if data_format is None else data_format\n\n # To maintain backwards compatibility with the resizing done in previous image feature extractors, we use\n # the pillow library to resize the image and then convert back to numpy\n do_rescale = False\n if not isinstance(image, PIL.Image.Image):\n do_rescale = _rescale_for_pil_conversion(image)\n image = to_pil_image(image, do_rescale=do_rescale, input_data_format=input_data_format)\n height, width = size\n # PIL images are in the format (width, height)\n resized_image = image.resize((width, height), resample=resample, reducing_gap=reducing_gap)\n\n if return_numpy:\n resized_image = np.array(resized_image)\n # If the input image channel dimension was of size 1, then it is dropped when converting to a PIL image\n # so we need to add it back if necessary.\n resized_image = np.expand_dims(resized_image, axis=-1) if resized_image.ndim == 2 else resized_image\n # The image is always in channels last format after converting from a PIL image\n resized_image = to_channel_dimension_format(\n resized_image, data_format, input_channel_dim=ChannelDimension.LAST\n )\n # If an image was rescaled to be in the range [0, 255] before converting to a PIL image, then we need to\n # rescale it back to the original range.\n resized_image = rescale(resized_image, 1 / 255) if do_rescale else resized_image\n return resized_image" }, { "class_start_lineno": 66, "class_end_lineno": 404, "func_start_lineno": 142, "func_end_lineno": 184, "func_code": " def resize(\n self,\n image: np.ndarray,\n size: Dict[str, int],\n resample: PILImageResampling = PILImageResampling.BILINEAR,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n **kwargs,\n ) -> np.ndarray:\n \"\"\"\n Resize an image.\n\n Args:\n image (`np.ndarray`):\n Image to resize.\n size (`Dict[str, int]`):\n Size of the output image. If `size` is of the form `{\"height\": h, \"width\": w}`, the output image will\n have the size `(h, w)`. If `size` is of the form `{\"shortest_edge\": s}`, the output image will have its\n shortest edge of length `s` while keeping the aspect ratio of the original image.\n resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`):\n Resampling filter to use when resiizing the image.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the image. If not provided, it will be the same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format of the input image. If not provided, it will be inferred.\n \"\"\"\n size = get_size_dict(size, default_to_square=False)\n if \"shortest_edge\" in size:\n output_size = get_resize_output_image_size(\n image, size[\"shortest_edge\"], default_to_square=False, input_data_format=input_data_format\n )\n elif \"height\" in size and \"width\" in size:\n output_size = (size[\"height\"], size[\"width\"])\n else:\n raise ValueError(f\"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}\")\n return resize(\n image,\n size=output_size,\n resample=resample,\n data_format=data_format,\n input_data_format=input_data_format,\n **kwargs,\n )" } ]
[ "function_empty", "Development" ]
[ "transformers.image_processing_utils.get_size_dict", "transformers.image_utils.get_image_size", "transformers.image_transforms.get_resize_output_image_size", "transformers.image_transforms.resize", "transformers.models.vivit.image_processing_vivit.VivitImageProcessor.resize" ]
Python
4
5
{ "total_num": 14, "base_passed_num": 7 }
[ "transformers.src.transformers.utils.generic._get_frameworks_and_test_func", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::decode", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::convert_tokens_to_string", "transformers.src.transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer::_decode" ]
transformers
[ "transformers/utils/generic.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py", "transformers/models/wav2vec2/tokenization_wav2vec2.py" ]
[ "tests/models/wav2vec2/test_tokenization_wav2vec2.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 856, "func_start_lineno": 98, "func_end_lineno": 116, "func_code": "def _get_frameworks_and_test_func(x):\n \"\"\"\n Returns an (ordered since we are in Python 3.7+) dictionary framework to test function, which places the framework\n we can guess from the repr first, then Numpy, then the others.\n \"\"\"\n framework_to_test = {\n \"pt\": is_torch_tensor,\n \"tf\": is_tf_tensor,\n \"jax\": is_jax_tensor,\n \"np\": is_numpy_array,\n \"mlx\": is_mlx_array,\n }\n preferred_framework = infer_framework_from_repr(x)\n # We will test this one first, then numpy, then the others.\n frameworks = [] if preferred_framework is None else [preferred_framework]\n if preferred_framework != \"np\":\n frameworks.append(\"np\")\n frameworks.extend([f for f in framework_to_test if f not in [preferred_framework, \"np\"]])\n return {f: framework_to_test[f] for f in frameworks}" }, { "class_start_lineno": 115, "class_end_lineno": 644, "func_start_lineno": 528, "func_end_lineno": 631, "func_code": " def decode(\n self,\n token_ids: Union[int, List[int], \"np.ndarray\", \"torch.Tensor\", \"tf.Tensor\"],\n skip_special_tokens: bool = False,\n clean_up_tokenization_spaces: bool = None,\n output_char_offsets: bool = False,\n output_word_offsets: bool = False,\n **kwargs,\n ) -> str:\n \"\"\"\n Converts a sequence of ids in a string, using the tokenizer and vocabulary with options to remove special\n tokens and clean up tokenization spaces.\n\n Similar to doing `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))`.\n\n Args:\n token_ids (`Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]`):\n List of tokenized input ids. Can be obtained using the `__call__` method.\n skip_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not to remove special tokens in the decoding.\n clean_up_tokenization_spaces (`bool`, *optional*):\n Whether or not to clean up the tokenization spaces.\n output_char_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output character offsets. Character offsets can be used in combination with the\n sampling rate and model downsampling rate to compute the time-stamps of transcribed characters.\n\n <Tip>\n\n Please take a look at the example below to better understand how to make use of `output_char_offsets`.\n\n </Tip>\n\n output_word_offsets (`bool`, *optional*, defaults to `False`):\n Whether or not to output word offsets. Word offsets can be used in combination with the sampling rate\n and model downsampling rate to compute the time-stamps of transcribed words.\n\n <Tip>\n\n Please take a look at the example below to better understand how to make use of `output_word_offsets`.\n\n </Tip>\n\n kwargs (additional keyword arguments, *optional*):\n Will be passed to the underlying model specific decode method.\n\n Returns:\n `str` or [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`]: The list of decoded\n sentences. Will be a [`~models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizerOutput`] when\n `output_char_offsets == True` or `output_word_offsets == True`.\n\n Example:\n\n ```python\n >>> # Let's see how to retrieve time steps for a model\n >>> from transformers import AutoTokenizer, AutoFeatureExtractor, AutoModelForCTC\n >>> from datasets import load_dataset\n >>> import datasets\n >>> import torch\n\n >>> # import model, feature extractor, tokenizer\n >>> model = AutoModelForCTC.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> tokenizer = AutoTokenizer.from_pretrained(\"facebook/wav2vec2-base-960h\")\n >>> feature_extractor = AutoFeatureExtractor.from_pretrained(\"facebook/wav2vec2-base-960h\")\n\n >>> # load first sample of English common_voice\n >>> dataset = load_dataset(\"mozilla-foundation/common_voice_11_0\", \"en\", split=\"train\", streaming=True, trust_remote_code=True)\n >>> dataset = dataset.cast_column(\"audio\", datasets.Audio(sampling_rate=16_000))\n >>> dataset_iter = iter(dataset)\n >>> sample = next(dataset_iter)\n\n >>> # forward sample through model to get greedily predicted transcription ids\n >>> input_values = feature_extractor(sample[\"audio\"][\"array\"], return_tensors=\"pt\").input_values\n >>> logits = model(input_values).logits[0]\n >>> pred_ids = torch.argmax(logits, axis=-1)\n\n >>> # retrieve word stamps (analogous commands for `output_char_offsets`)\n >>> outputs = tokenizer.decode(pred_ids, output_word_offsets=True)\n >>> # compute `time_offset` in seconds as product of downsampling ratio and sampling_rate\n >>> time_offset = model.config.inputs_to_logits_ratio / feature_extractor.sampling_rate\n\n >>> word_offsets = [\n ... {\n ... \"word\": d[\"word\"],\n ... \"start_time\": round(d[\"start_offset\"] * time_offset, 2),\n ... \"end_time\": round(d[\"end_offset\"] * time_offset, 2),\n ... }\n ... for d in outputs.word_offsets\n ... ]\n >>> # compare word offsets with audio `en_train_0/common_voice_en_19121553.mp3` online on the dataset viewer:\n >>> # https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/viewer/en\n >>> word_offsets[:3]\n [{'word': 'THE', 'start_time': 0.7, 'end_time': 0.78}, {'word': 'TRICK', 'start_time': 0.88, 'end_time': 1.08}, {'word': 'APPEARS', 'start_time': 1.2, 'end_time': 1.64}]\n ```\"\"\"\n # Convert inputs to python lists\n token_ids = to_py_obj(token_ids)\n\n return self._decode(\n token_ids=token_ids,\n skip_special_tokens=skip_special_tokens,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n output_char_offsets=output_char_offsets,\n output_word_offsets=output_word_offsets,\n **kwargs,\n )" }, { "class_start_lineno": 115, "class_end_lineno": 644, "func_start_lineno": 285, "func_end_lineno": 346, "func_code": " def convert_tokens_to_string(\n self,\n tokens: List[str],\n group_tokens: bool = True,\n spaces_between_special_tokens: bool = False,\n output_char_offsets: bool = False,\n output_word_offsets: bool = False,\n ) -> Dict[str, Union[str, float]]:\n \"\"\"\n Converts a connectionist-temporal-classification (CTC) output tokens into a single string.\n \"\"\"\n if len(tokens) == 0:\n return {\"text\": \"\", \"char_offsets\": [], \"word_offsets\": []}\n # group same tokens into non-repeating tokens in CTC style decoding\n if group_tokens:\n chars, char_repetitions = zip(*((token, len(list(group_iter))) for token, group_iter in groupby(tokens)))\n else:\n chars = tokens\n char_repetitions = len(tokens) * [1]\n\n # filter self.pad_token which is used as CTC-blank token\n processed_chars = list(filter(lambda char: char != self.pad_token, chars))\n\n # replace delimiter token\n processed_chars = [\n self.replace_word_delimiter_char if char == self.word_delimiter_token else char for char in processed_chars\n ]\n\n # retrieve offsets\n char_offsets = word_offsets = None\n if output_char_offsets or output_word_offsets:\n char_offsets = self._compute_offsets(char_repetitions, chars, self.pad_token)\n\n if len(char_offsets) != len(processed_chars):\n raise ValueError(\n f\"`char_offsets`: {char_offsets} and `processed_tokens`: {processed_chars}\"\n \" have to be of the same length, but are: \"\n f\"`len(offsets)`: {len(char_offsets)} and `len(processed_tokens)`:\"\n f\" {len(processed_chars)}\"\n )\n\n # set tokens to correct processed token\n for i, char in enumerate(processed_chars):\n char_offsets[i][\"char\"] = char\n\n # retrieve word offsets from character offsets\n word_offsets = None\n if output_word_offsets:\n word_offsets = self._get_word_offsets(char_offsets, self.replace_word_delimiter_char)\n\n # don't output chars if not set to True\n if not output_char_offsets:\n char_offsets = None\n\n # join to string\n join_char = \" \" if spaces_between_special_tokens else \"\"\n string = join_char.join(processed_chars).strip()\n\n if self.do_lower_case:\n string = string.lower()\n\n return {\"text\": string, \"char_offsets\": char_offsets, \"word_offsets\": word_offsets}" }, { "class_start_lineno": 115, "class_end_lineno": 644, "func_start_lineno": 403, "func_end_lineno": 453, "func_code": " def _decode(\n self,\n token_ids: List[int],\n skip_special_tokens: bool = False,\n clean_up_tokenization_spaces: bool = None,\n group_tokens: bool = True,\n spaces_between_special_tokens: bool = False,\n output_word_offsets: Optional[bool] = False,\n output_char_offsets: Optional[bool] = False,\n ) -> str:\n \"\"\"\n special _decode function is needed for Wav2Vec2Tokenizer because added tokens should be treated exactly the\n same as tokens of the base vocabulary and therefore the function `convert_tokens_to_string` has to be called on\n the whole token list and not individually on added tokens\n \"\"\"\n filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)\n\n result = []\n for token in filtered_tokens:\n if skip_special_tokens and (\n token in self.all_special_ids or (token != self.pad_token and token in self.all_special_tokens)\n ):\n continue\n result.append(token)\n\n string_output = self.convert_tokens_to_string(\n result,\n group_tokens=group_tokens,\n spaces_between_special_tokens=spaces_between_special_tokens,\n output_word_offsets=output_word_offsets,\n output_char_offsets=output_char_offsets,\n )\n\n text = string_output[\"text\"]\n\n clean_up_tokenization_spaces = (\n clean_up_tokenization_spaces\n if clean_up_tokenization_spaces is not None\n else self.clean_up_tokenization_spaces\n )\n if clean_up_tokenization_spaces:\n text = self.clean_up_tokenization(text)\n\n if output_word_offsets or output_char_offsets:\n return Wav2Vec2CTCTokenizerOutput(\n text=text,\n char_offsets=string_output[\"char_offsets\"],\n word_offsets=string_output[\"word_offsets\"],\n )\n else:\n return text" } ]
[ "function_empty", "Development" ]
[ "transformers.utils.generic._get_frameworks_and_test_func", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer.decode", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer.convert_tokens_to_string", "transformers.models.wav2vec2.tokenization_wav2vec2.Wav2Vec2CTCTokenizer._decode" ]
Python
1
4
{ "total_num": 102, "base_passed_num": 79 }
[ "transformers.src.transformers.audio_utils.hertz_to_mel", "transformers.src.transformers.audio_utils.mel_filter_bank", "transformers.src.transformers.audio_utils.window_function", "transformers.src.transformers.models.whisper.feature_extraction_whisper.WhisperFeatureExtractor::_np_extract_fbank_features" ]
transformers
[ "transformers/audio_utils.py", "transformers/audio_utils.py", "transformers/models/whisper/feature_extraction_whisper.py", "transformers/audio_utils.py", "transformers/models/whisper/feature_extraction_whisper.py" ]
[ "tests/models/whisper/test_feature_extraction_whisper.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1127, "func_start_lineno": 26, "func_end_lineno": 59, "func_code": "def hertz_to_mel(freq: Union[float, np.ndarray], mel_scale: str = \"htk\") -> Union[float, np.ndarray]:\n \"\"\"\n Convert frequency from hertz to mels.\n\n Args:\n freq (`float` or `np.ndarray`):\n The frequency, or multiple frequencies, in hertz (Hz).\n mel_scale (`str`, *optional*, defaults to `\"htk\"`):\n The mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n\n Returns:\n `float` or `np.ndarray`: The frequencies on the mel scale.\n \"\"\"\n\n if mel_scale not in [\"slaney\", \"htk\", \"kaldi\"]:\n raise ValueError('mel_scale should be one of \"htk\", \"slaney\" or \"kaldi\".')\n\n if mel_scale == \"htk\":\n return 2595.0 * np.log10(1.0 + (freq / 700.0))\n elif mel_scale == \"kaldi\":\n return 1127.0 * np.log(1.0 + (freq / 700.0))\n\n min_log_hertz = 1000.0\n min_log_mel = 15.0\n logstep = 27.0 / np.log(6.4)\n mels = 3.0 * freq / 200.0\n\n if isinstance(freq, np.ndarray):\n log_region = freq >= min_log_hertz\n mels[log_region] = min_log_mel + np.log(freq[log_region] / min_log_hertz) * logstep\n elif freq >= min_log_hertz:\n mels = min_log_mel + np.log(freq / min_log_hertz) * logstep\n\n return mels" }, { "class_start_lineno": 1, "class_end_lineno": 1127, "func_start_lineno": 218, "func_end_lineno": 303, "func_code": "def mel_filter_bank(\n num_frequency_bins: int,\n num_mel_filters: int,\n min_frequency: float,\n max_frequency: float,\n sampling_rate: int,\n norm: Optional[str] = None,\n mel_scale: str = \"htk\",\n triangularize_in_mel_space: bool = False,\n) -> np.ndarray:\n \"\"\"\n Creates a frequency bin conversion matrix used to obtain a mel spectrogram. This is called a *mel filter bank*, and\n various implementation exist, which differ in the number of filters, the shape of the filters, the way the filters\n are spaced, the bandwidth of the filters, and the manner in which the spectrum is warped. The goal of these\n features is to approximate the non-linear human perception of the variation in pitch with respect to the frequency.\n\n Different banks of mel filters were introduced in the literature. The following variations are supported:\n\n - MFCC FB-20: introduced in 1980 by Davis and Mermelstein, it assumes a sampling frequency of 10 kHz and a speech\n bandwidth of `[0, 4600]` Hz.\n - MFCC FB-24 HTK: from the Cambridge HMM Toolkit (HTK) (1995) uses a filter bank of 24 filters for a speech\n bandwidth of `[0, 8000]` Hz. This assumes sampling rate ≥ 16 kHz.\n - MFCC FB-40: from the Auditory Toolbox for MATLAB written by Slaney in 1998, assumes a sampling rate of 16 kHz and\n speech bandwidth of `[133, 6854]` Hz. This version also includes area normalization.\n - HFCC-E FB-29 (Human Factor Cepstral Coefficients) of Skowronski and Harris (2004), assumes a sampling rate of\n 12.5 kHz and speech bandwidth of `[0, 6250]` Hz.\n\n This code is adapted from *torchaudio* and *librosa*. Note that the default parameters of torchaudio's\n `melscale_fbanks` implement the `\"htk\"` filters while librosa uses the `\"slaney\"` implementation.\n\n Args:\n num_frequency_bins (`int`):\n Number of frequencies used to compute the spectrogram (should be the same as in `stft`).\n num_mel_filters (`int`):\n Number of mel filters to generate.\n min_frequency (`float`):\n Lowest frequency of interest in Hz.\n max_frequency (`float`):\n Highest frequency of interest in Hz. This should not exceed `sampling_rate / 2`.\n sampling_rate (`int`):\n Sample rate of the audio waveform.\n norm (`str`, *optional*):\n If `\"slaney\"`, divide the triangular mel weights by the width of the mel band (area normalization).\n mel_scale (`str`, *optional*, defaults to `\"htk\"`):\n The mel frequency scale to use, `\"htk\"`, `\"kaldi\"` or `\"slaney\"`.\n triangularize_in_mel_space (`bool`, *optional*, defaults to `False`):\n If this option is enabled, the triangular filter is applied in mel space rather than frequency space. This\n should be set to `true` in order to get the same results as `torchaudio` when computing mel filters.\n\n Returns:\n `np.ndarray` of shape (`num_frequency_bins`, `num_mel_filters`): Triangular filter bank matrix. This is a\n projection matrix to go from a spectrogram to a mel spectrogram.\n \"\"\"\n if norm is not None and norm != \"slaney\":\n raise ValueError('norm must be one of None or \"slaney\"')\n\n # center points of the triangular mel filters\n mel_min = hertz_to_mel(min_frequency, mel_scale=mel_scale)\n mel_max = hertz_to_mel(max_frequency, mel_scale=mel_scale)\n mel_freqs = np.linspace(mel_min, mel_max, num_mel_filters + 2)\n filter_freqs = mel_to_hertz(mel_freqs, mel_scale=mel_scale)\n\n if triangularize_in_mel_space:\n # frequencies of FFT bins in Hz, but filters triangularized in mel space\n fft_bin_width = sampling_rate / (num_frequency_bins * 2)\n fft_freqs = hertz_to_mel(fft_bin_width * np.arange(num_frequency_bins), mel_scale=mel_scale)\n filter_freqs = mel_freqs\n else:\n # frequencies of FFT bins in Hz\n fft_freqs = np.linspace(0, sampling_rate // 2, num_frequency_bins)\n\n mel_filters = _create_triangular_filter_bank(fft_freqs, filter_freqs)\n\n if norm is not None and norm == \"slaney\":\n # Slaney-style mel is scaled to be approx constant energy per channel\n enorm = 2.0 / (filter_freqs[2 : num_mel_filters + 2] - filter_freqs[:num_mel_filters])\n mel_filters *= np.expand_dims(enorm, 0)\n\n if (mel_filters.max(axis=0) == 0.0).any():\n warnings.warn(\n \"At least one mel filter has all zero values. \"\n f\"The value for `num_mel_filters` ({num_mel_filters}) may be set too high. \"\n f\"Or, the value for `num_frequency_bins` ({num_frequency_bins}) may be set too low.\"\n )\n\n return mel_filters" }, { "class_start_lineno": 36, "class_end_lineno": 324, "func_start_lineno": 64, "func_end_lineno": 96, "func_code": " def __init__(\n self,\n feature_size=80,\n sampling_rate=16000,\n hop_length=160,\n chunk_length=30,\n n_fft=400,\n padding_value=0.0,\n return_attention_mask=False, # pad inputs to max length with silence token (zero) and no attention mask\n **kwargs,\n ):\n super().__init__(\n feature_size=feature_size,\n sampling_rate=sampling_rate,\n padding_value=padding_value,\n return_attention_mask=return_attention_mask,\n **kwargs,\n )\n self.n_fft = n_fft\n self.hop_length = hop_length\n self.chunk_length = chunk_length\n self.n_samples = chunk_length * sampling_rate\n self.nb_max_frames = self.n_samples // hop_length\n self.sampling_rate = sampling_rate\n self.mel_filters = mel_filter_bank(\n num_frequency_bins=1 + n_fft // 2,\n num_mel_filters=feature_size,\n min_frequency=0.0,\n max_frequency=8000.0,\n sampling_rate=sampling_rate,\n norm=\"slaney\",\n mel_scale=\"slaney\",\n )" }, { "class_start_lineno": 1, "class_end_lineno": 1127, "func_start_lineno": 319, "func_end_lineno": 379, "func_code": "def window_function(\n window_length: int,\n name: str = \"hann\",\n periodic: bool = True,\n frame_length: Optional[int] = None,\n center: bool = True,\n) -> np.ndarray:\n \"\"\"\n Returns an array containing the specified window. This window is intended to be used with `stft`.\n\n The following window types are supported:\n\n - `\"boxcar\"`: a rectangular window\n - `\"hamming\"`: the Hamming window\n - `\"hann\"`: the Hann window\n - `\"povey\"`: the Povey window\n\n Args:\n window_length (`int`):\n The length of the window in samples.\n name (`str`, *optional*, defaults to `\"hann\"`):\n The name of the window function.\n periodic (`bool`, *optional*, defaults to `True`):\n Whether the window is periodic or symmetric.\n frame_length (`int`, *optional*):\n The length of the analysis frames in samples. Provide a value for `frame_length` if the window is smaller\n than the frame length, so that it will be zero-padded.\n center (`bool`, *optional*, defaults to `True`):\n Whether to center the window inside the FFT buffer. Only used when `frame_length` is provided.\n\n Returns:\n `np.ndarray` of shape `(window_length,)` or `(frame_length,)` containing the window.\n \"\"\"\n length = window_length + 1 if periodic else window_length\n\n if name == \"boxcar\":\n window = np.ones(length)\n elif name in [\"hamming\", \"hamming_window\"]:\n window = np.hamming(length)\n elif name in [\"hann\", \"hann_window\"]:\n window = np.hanning(length)\n elif name in [\"povey\"]:\n window = np.power(np.hanning(length), 0.85)\n else:\n raise ValueError(f\"Unknown window function '{name}'\")\n\n if periodic:\n window = window[:-1]\n\n if frame_length is None:\n return window\n\n if window_length > frame_length:\n raise ValueError(\n f\"Length of the window ({window_length}) may not be larger than frame_length ({frame_length})\"\n )\n\n padded_window = np.zeros(frame_length)\n offset = (frame_length - window_length) // 2 if center else 0\n padded_window[offset : offset + window_length] = window\n return padded_window" }, { "class_start_lineno": 36, "class_end_lineno": 324, "func_start_lineno": 98, "func_end_lineno": 125, "func_code": " def _np_extract_fbank_features(self, waveform_batch: np.array, device: str) -> np.ndarray:\n \"\"\"\n Compute the log-mel spectrogram of the provided audio, gives similar results to Whisper's original torch\n implementation with 1e-5 tolerance.\n \"\"\"\n if device != \"cpu\":\n raise ValueError(\n f\"Got device `{device}` for feature extraction, but feature extraction on CUDA accelerator \"\n \"devices requires torch, which is not installed. Either set `device='cpu'`, or \"\n \"install torch according to the official instructions: https://pytorch.org/get-started/locally/\"\n )\n log_spec_batch = []\n for waveform in waveform_batch:\n log_spec = spectrogram(\n waveform,\n window_function(self.n_fft, \"hann\"),\n frame_length=self.n_fft,\n hop_length=self.hop_length,\n power=2.0,\n mel_filters=self.mel_filters,\n log_mel=\"log10\",\n )\n log_spec = log_spec[:, :-1]\n log_spec = np.maximum(log_spec, log_spec.max() - 8.0)\n log_spec = (log_spec + 4.0) / 4.0\n log_spec_batch.append(log_spec)\n log_spec_batch = np.array(log_spec_batch)\n return log_spec_batch" } ]
[ "function_empty", "Development" ]
[ "transformers.audio_utils.hertz_to_mel", "transformers.audio_utils.mel_filter_bank", "transformers.models.whisper.feature_extraction_whisper.WhisperFeatureExtractor.__init__", "transformers.audio_utils.window_function", "transformers.models.whisper.feature_extraction_whisper.WhisperFeatureExtractor._np_extract_fbank_features" ]
Python
3
4
{ "total_num": 19, "base_passed_num": 8 }
[ "transformers.src.transformers.utils.import_utils.create_import_structure_from_path", "transformers.src.transformers.utils.import_utils.define_import_structure" ]
transformers
[ "transformers/utils/import_utils.py", "transformers/utils/import_utils.py" ]
[ "tests/utils/test_dynamic_module_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 2158, "func_start_lineno": 1846, "func_end_lineno": 2037, "func_code": "def create_import_structure_from_path(module_path):\n \"\"\"\n This method takes the path to a file/a folder and returns the import structure.\n If a file is given, it will return the import structure of the parent folder.\n\n Import structures are designed to be digestible by `_LazyModule` objects. They are\n created from the __all__ definitions in each files as well as the `@export` decorators\n above methods and objects.\n\n The import structure allows explicit display of the required backends for a given object.\n These backends are specified in two ways:\n\n 1. Through their `@export`, if they are exported with that decorator. This `@export` decorator\n accepts a `backend` tuple kwarg mentioning which backends are required to run this object.\n\n 2. If an object is defined in a file with \"default\" backends, it will have, at a minimum, this\n backend specified. The default backends are defined according to the filename:\n\n - If a file is named like `modeling_*.py`, it will have a `torch` backend\n - If a file is named like `modeling_tf_*.py`, it will have a `tf` backend\n - If a file is named like `modeling_flax_*.py`, it will have a `flax` backend\n - If a file is named like `tokenization_*_fast.py`, it will have a `tokenizers` backend\n\n Backends serve the purpose of displaying a clear error message to the user in case the backends are not installed.\n Should an object be imported without its required backends being in the environment, any attempt to use the\n object will raise an error mentioning which backend(s) should be added to the environment in order to use\n that object.\n\n Here's an example of an input import structure at the src.transformers.models level:\n\n {\n 'albert': {\n frozenset(): {\n 'configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'}\n },\n frozenset({'tokenizers'}): {\n 'tokenization_albert_fast': {'AlbertTokenizerFast'}\n },\n },\n 'align': {\n frozenset(): {\n 'configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},\n 'processing_align': {'AlignProcessor'}\n },\n },\n 'altclip': {\n frozenset(): {\n 'configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},\n 'processing_altclip': {'AltCLIPProcessor'},\n }\n }\n }\n \"\"\"\n import_structure = {}\n if os.path.isdir(module_path):\n directory = module_path\n adjacent_modules = []\n\n for f in os.listdir(module_path):\n if f != \"__pycache__\" and os.path.isdir(os.path.join(module_path, f)):\n import_structure[f] = create_import_structure_from_path(os.path.join(module_path, f))\n\n elif not os.path.isdir(os.path.join(directory, f)):\n adjacent_modules.append(f)\n\n else:\n directory = os.path.dirname(module_path)\n adjacent_modules = [f for f in os.listdir(directory) if not os.path.isdir(os.path.join(directory, f))]\n\n # We're only taking a look at files different from __init__.py\n # We could theoretically export things directly from the __init__.py\n # files, but this is not supported at this time.\n if \"__init__.py\" in adjacent_modules:\n adjacent_modules.remove(\"__init__.py\")\n\n module_requirements = {}\n for module_name in adjacent_modules:\n # Only modules ending in `.py` are accepted here.\n if not module_name.endswith(\".py\"):\n continue\n\n with open(os.path.join(directory, module_name)) as f:\n file_content = f.read()\n\n # Remove the .py suffix\n module_name = module_name[:-3]\n\n previous_line = \"\"\n previous_index = 0\n\n # Some files have some requirements by default.\n # For example, any file named `modeling_tf_xxx.py`\n # should have TensorFlow as a required backend.\n base_requirements = ()\n for string_check, requirements in BASE_FILE_REQUIREMENTS.items():\n if string_check(module_name):\n base_requirements = requirements\n break\n\n # Objects that have a `@export` assigned to them will get exported\n # with the backends specified in the decorator as well as the file backends.\n exported_objects = set()\n if \"@export\" in file_content:\n lines = file_content.split(\"\\n\")\n for index, line in enumerate(lines):\n # This allows exporting items with other decorators. We'll take a look\n # at the line that follows at the same indentation level.\n if line.startswith((\" \", \"\\t\", \"@\", \")\")) and not line.startswith(\"@export\"):\n continue\n\n # Skipping line enables putting whatever we want between the\n # export() call and the actual class/method definition.\n # This is what enables having # Copied from statements, docs, etc.\n skip_line = False\n\n if \"@export\" in previous_line:\n skip_line = False\n\n # Backends are defined on the same line as export\n if \"backends\" in previous_line:\n backends_string = previous_line.split(\"backends=\")[1].split(\"(\")[1].split(\")\")[0]\n backends = tuple(sorted([b.strip(\"'\\\",\") for b in backends_string.split(\", \") if b]))\n\n # Backends are defined in the lines following export, for example such as:\n # @export(\n # backends=(\n # \"sentencepiece\",\n # \"torch\",\n # \"tf\",\n # )\n # )\n #\n # or\n #\n # @export(\n # backends=(\n # \"sentencepiece\", \"tf\"\n # )\n # )\n elif \"backends\" in lines[previous_index + 1]:\n backends = []\n for backend_line in lines[previous_index:index]:\n if \"backends\" in backend_line:\n backend_line = backend_line.split(\"=\")[1]\n if '\"' in backend_line or \"'\" in backend_line:\n if \", \" in backend_line:\n backends.extend(backend.strip(\"()\\\"', \") for backend in backend_line.split(\", \"))\n else:\n backends.append(backend_line.strip(\"()\\\"', \"))\n\n # If the line is only a ')', then we reached the end of the backends and we break.\n if backend_line.strip() == \")\":\n break\n backends = tuple(backends)\n\n # No backends are registered for export\n else:\n backends = ()\n\n backends = frozenset(backends + base_requirements)\n if backends not in module_requirements:\n module_requirements[backends] = {}\n if module_name not in module_requirements[backends]:\n module_requirements[backends][module_name] = set()\n\n if not line.startswith(\"class\") and not line.startswith(\"def\"):\n skip_line = True\n else:\n start_index = 6 if line.startswith(\"class\") else 4\n object_name = line[start_index:].split(\"(\")[0].strip(\":\")\n module_requirements[backends][module_name].add(object_name)\n exported_objects.add(object_name)\n\n if not skip_line:\n previous_line = line\n previous_index = index\n\n # All objects that are in __all__ should be exported by default.\n # These objects are exported with the file backends.\n if \"__all__\" in file_content:\n for _all_object in fetch__all__(file_content):\n if _all_object not in exported_objects:\n backends = frozenset(base_requirements)\n if backends not in module_requirements:\n module_requirements[backends] = {}\n if module_name not in module_requirements[backends]:\n module_requirements[backends][module_name] = set()\n\n module_requirements[backends][module_name].add(_all_object)\n\n import_structure = {**module_requirements, **import_structure}\n return import_structure" }, { "class_start_lineno": 1, "class_end_lineno": 2158, "func_start_lineno": 2136, "func_end_lineno": 2158, "func_code": "def define_import_structure(module_path: str) -> IMPORT_STRUCTURE_T:\n \"\"\"\n This method takes a module_path as input and creates an import structure digestible by a _LazyModule.\n\n Here's an example of an output import structure at the src.transformers.models level:\n\n {\n frozenset({'tokenizers'}): {\n 'albert.tokenization_albert_fast': {'AlbertTokenizerFast'}\n },\n frozenset(): {\n 'albert.configuration_albert': {'AlbertConfig', 'AlbertOnnxConfig'},\n 'align.processing_align': {'AlignProcessor'},\n 'align.configuration_align': {'AlignConfig', 'AlignTextConfig', 'AlignVisionConfig'},\n 'altclip.configuration_altclip': {'AltCLIPConfig', 'AltCLIPTextConfig', 'AltCLIPVisionConfig'},\n 'altclip.processing_altclip': {'AltCLIPProcessor'}\n }\n }\n\n The import structure is a dict defined with frozensets as keys, and dicts of strings to sets of objects.\n \"\"\"\n import_structure = create_import_structure_from_path(module_path)\n return spread_import_structure(import_structure)" } ]
[ "function_empty" ]
[ "transformers.utils.import_utils.create_import_structure_from_path", "transformers.utils.import_utils.define_import_structure" ]
Python
2
2
{ "total_num": 10, "base_passed_num": 0 }
[ "transformers.src.transformers.modeling_rope_utils._compute_default_rope_parameters", "transformers.src.transformers.modeling_rope_utils._compute_linear_scaling_rope_parameters", "transformers.src.transformers.modeling_rope_utils._compute_llama3_parameters" ]
transformers
[ "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py", "transformers/modeling_rope_utils.py" ]
[ "tests/utils/test_modeling_rope_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 29, "func_end_lineno": 68, "func_code": "def _compute_default_rope_parameters(\n config: Optional[PretrainedConfig] = None,\n device: Optional[\"torch.device\"] = None,\n seq_len: Optional[int] = None,\n **rope_kwargs,\n) -> Tuple[\"torch.Tensor\", float]:\n \"\"\"\n Computes the inverse frequencies according to the original RoPE implementation\n Args:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\n Returns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).\n \"\"\"\n if config is not None and len(rope_kwargs) > 0:\n raise ValueError(\n \"Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in \"\n f\"`_compute_default_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}\"\n )\n if len(rope_kwargs) > 0:\n base = rope_kwargs[\"base\"]\n dim = rope_kwargs[\"dim\"]\n elif config is not None:\n base = config.rope_theta\n partial_rotary_factor = config.partial_rotary_factor if hasattr(config, \"partial_rotary_factor\") else 1.0\n head_dim = getattr(config, \"head_dim\", config.hidden_size // config.num_attention_heads)\n dim = int(head_dim * partial_rotary_factor)\n\n attention_factor = 1.0 # Unused in this type of RoPE\n\n # Compute the inverse frequencies\n inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2, dtype=torch.int64).float().to(device) / dim))\n return inv_freq, attention_factor" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 71, "func_end_lineno": 109, "func_code": "def _compute_linear_scaling_rope_parameters(\n config: Optional[PretrainedConfig] = None,\n device: Optional[\"torch.device\"] = None,\n seq_len: Optional[int] = None,\n **rope_kwargs,\n) -> Tuple[\"torch.Tensor\", float]:\n \"\"\"\n Computes the inverse frequencies with linear scaling. Credits to the Reddit user /u/kaiokendev\n Args:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\n Returns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin (unused in this type of RoPE).\n \"\"\"\n if config is not None and len(rope_kwargs) > 0:\n raise ValueError(\n \"Unexpected arguments: `**rope_kwargs` and `config` are mutually exclusive in \"\n f\"`_compute_linear_scaling_rope_parameters`, got `rope_kwargs`={rope_kwargs} and `config`={config}\"\n )\n if len(rope_kwargs) > 0:\n factor = rope_kwargs[\"factor\"]\n elif config is not None:\n factor = config.rope_scaling[\"factor\"]\n\n # Gets the default RoPE parameters\n inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n\n # Then applies linear scaling to the frequencies.\n # NOTE: originally, scaling was applied to the position_ids. However, we get `embs = inv_freq @ position_ids`, so\n # applying scaling to the inverse frequencies is equivalent.\n inv_freq /= factor\n return inv_freq, attention_factor" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 307, "func_end_lineno": 347, "func_code": "def _compute_llama3_parameters(\n config: PretrainedConfig, device: \"torch.device\", seq_len: Optional[int] = None, **rope_kwargs\n) -> Tuple[\"torch.Tensor\", float]:\n \"\"\"\n Computes the inverse frequencies for llama 3.1.\n\n Args:\n config ([`~transformers.PretrainedConfig`]):\n The model configuration.\n device (`torch.device`):\n The device to use for initialization of the inverse frequencies.\n seq_len (`int`, *optional*):\n The current sequence length. Unused for this type of RoPE.\n rope_kwargs (`Dict`, *optional*):\n BC compatibility with the previous RoPE class instantiation, will be removed in v4.45.\n Returns:\n Tuple of (`torch.Tensor`, `float`), containing the inverse frequencies for the RoPE embeddings and the\n post-processing scaling factor applied to the computed cos/sin.\n \"\"\"\n # Gets the default RoPE parameters\n inv_freq, attention_factor = _compute_default_rope_parameters(config, device, seq_len, **rope_kwargs)\n\n factor = config.rope_scaling[\"factor\"] # `8` in the original implementation\n low_freq_factor = config.rope_scaling[\"low_freq_factor\"] # `1` in the original implementation\n high_freq_factor = config.rope_scaling[\"high_freq_factor\"] # `4` in the original implementation\n old_context_len = config.rope_scaling[\"original_max_position_embeddings\"] # `8192` in the original implementation\n\n low_freq_wavelen = old_context_len / low_freq_factor\n high_freq_wavelen = old_context_len / high_freq_factor\n\n wavelen = 2 * math.pi / inv_freq\n # wavelen < high_freq_wavelen: do nothing\n # wavelen > low_freq_wavelen: divide by factor\n inv_freq_llama = torch.where(wavelen > low_freq_wavelen, inv_freq / factor, inv_freq)\n # otherwise: interpolate between the two, using a smooth factor\n smooth_factor = (old_context_len / wavelen - low_freq_factor) / (high_freq_factor - low_freq_factor)\n smoothed_inv_freq = (1 - smooth_factor) * inv_freq_llama / factor + smooth_factor * inv_freq_llama\n is_medium_freq = ~(wavelen < high_freq_wavelen) * ~(wavelen > low_freq_wavelen)\n inv_freq_llama = torch.where(is_medium_freq, smoothed_inv_freq, inv_freq_llama)\n\n return inv_freq_llama, attention_factor" } ]
[ "function_empty" ]
[ "transformers.modeling_rope_utils._compute_default_rope_parameters", "transformers.modeling_rope_utils._compute_linear_scaling_rope_parameters", "transformers.modeling_rope_utils._compute_llama3_parameters" ]
Python
3
3
{ "total_num": 10, "base_passed_num": 2 }
[ "UniRef.detectron2.structures.boxes.Boxes::__getitem__", "UniRef.detectron2.structures.instances.Instances::set", "UniRef.detectron2.structures.instances.Instances::__getitem__" ]
UniRef
[ "detectron2/structures/boxes.py", "detectron2/structures/instances.py", "detectron2/structures/instances.py" ]
[ "tests/structures/test_instances.py" ]
[ { "class_start_lineno": 130, "class_end_lineno": 307, "func_start_lineno": 213, "func_end_lineno": 235, "func_code": " def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)" }, { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 68, "func_end_lineno": 79, "func_code": " def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value" }, { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 122, "func_end_lineno": 140, "func_code": " def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\n \"\"\"\n Args:\n item: an index-like object and will be used to index all the fields.\n\n Returns:\n If `item` is a string, return the data in the corresponding field.\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\n \"\"\"\n if type(item) == int:\n if item >= len(self) or item < -len(self):\n raise IndexError(\"Instances index out of range!\")\n else:\n item = slice(item, None, len(self))\n\n ret = Instances(self._image_size)\n for k, v in self._fields.items():\n ret.set(k, v[item])\n return ret" } ]
[ "function_empty" ]
[ "detectron2.structures.boxes.Boxes.__getitem__", "detectron2.structures.instances.Instances.set", "detectron2.structures.instances.Instances.__getitem__" ]
Python
3
3
{ "total_num": 10, "base_passed_num": 0 }
[ "UniRef.detectron2.modeling.roi_heads.fast_rcnn._log_classification_stats", "UniRef.detectron2.modeling.box_regression.Box2BoxTransform::get_deltas", "UniRef.detectron2.modeling.box_regression.Box2BoxTransformRotated::get_deltas", "UniRef.detectron2.modeling.box_regression._dense_box_regression_loss", "UniRef.detectron2.modeling.roi_heads.fast_rcnn.FastRCNNOutputLayers::losses" ]
UniRef
[ "detectron2/modeling/roi_heads/fast_rcnn.py", "detectron2/modeling/box_regression.py", "detectron2/modeling/box_regression.py", "detectron2/modeling/box_regression.py", "detectron2/modeling/roi_heads/fast_rcnn.py" ]
[ "tests/modeling/test_fast_rcnn.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 462, "func_start_lineno": 87, "func_end_lineno": 114, "func_code": "def _log_classification_stats(pred_logits, gt_classes, prefix=\"fast_rcnn\"):\n \"\"\"\n Log the classification metrics to EventStorage.\n\n Args:\n pred_logits: Rx(K+1) logits. The last column is for background class.\n gt_classes: R labels\n \"\"\"\n num_instances = gt_classes.numel()\n if num_instances == 0:\n return\n pred_classes = pred_logits.argmax(dim=1)\n bg_class_ind = pred_logits.shape[1] - 1\n\n fg_inds = (gt_classes >= 0) & (gt_classes < bg_class_ind)\n num_fg = fg_inds.nonzero().numel()\n fg_gt_classes = gt_classes[fg_inds]\n fg_pred_classes = pred_classes[fg_inds]\n\n num_false_negative = (fg_pred_classes == bg_class_ind).nonzero().numel()\n num_accurate = (pred_classes == gt_classes).nonzero().numel()\n fg_num_accurate = (fg_pred_classes == fg_gt_classes).nonzero().numel()\n\n storage = get_event_storage()\n storage.put_scalar(f\"{prefix}/cls_accuracy\", num_accurate / num_instances)\n if num_fg > 0:\n storage.put_scalar(f\"{prefix}/fg_cls_accuracy\", fg_num_accurate / num_fg)\n storage.put_scalar(f\"{prefix}/false_negative\", num_false_negative / num_fg)" }, { "class_start_lineno": 21, "class_end_lineno": 116, "func_start_lineno": 43, "func_end_lineno": 76, "func_code": " def get_deltas(self, src_boxes, target_boxes):\n \"\"\"\n Get box regression transformation deltas (dx, dy, dw, dh) that can be used\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless\n any delta is too large and is clamped).\n\n Args:\n src_boxes (Tensor): source boxes, e.g., object proposals\n target_boxes (Tensor): target of the transformation, e.g., ground-truth\n boxes.\n \"\"\"\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\n\n src_widths = src_boxes[:, 2] - src_boxes[:, 0]\n src_heights = src_boxes[:, 3] - src_boxes[:, 1]\n src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths\n src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights\n\n target_widths = target_boxes[:, 2] - target_boxes[:, 0]\n target_heights = target_boxes[:, 3] - target_boxes[:, 1]\n target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths\n target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights\n\n wx, wy, ww, wh = self.weights\n dx = wx * (target_ctr_x - src_ctr_x) / src_widths\n dy = wy * (target_ctr_y - src_ctr_y) / src_heights\n dw = ww * torch.log(target_widths / src_widths)\n dh = wh * torch.log(target_heights / src_heights)\n\n deltas = torch.stack((dx, dy, dw, dh), dim=1)\n assert (src_widths > 0).all().item(), \"Input boxes to Box2BoxTransform are not valid!\"\n return deltas" }, { "class_start_lineno": 120, "class_end_lineno": 227, "func_start_lineno": 145, "func_end_lineno": 181, "func_code": " def get_deltas(self, src_boxes, target_boxes):\n \"\"\"\n Get box regression transformation deltas (dx, dy, dw, dh, da) that can be used\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless\n any delta is too large and is clamped).\n\n Args:\n src_boxes (Tensor): Nx5 source boxes, e.g., object proposals\n target_boxes (Tensor): Nx5 target of the transformation, e.g., ground-truth\n boxes.\n \"\"\"\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\n\n src_ctr_x, src_ctr_y, src_widths, src_heights, src_angles = torch.unbind(src_boxes, dim=1)\n\n target_ctr_x, target_ctr_y, target_widths, target_heights, target_angles = torch.unbind(\n target_boxes, dim=1\n )\n\n wx, wy, ww, wh, wa = self.weights\n dx = wx * (target_ctr_x - src_ctr_x) / src_widths\n dy = wy * (target_ctr_y - src_ctr_y) / src_heights\n dw = ww * torch.log(target_widths / src_widths)\n dh = wh * torch.log(target_heights / src_heights)\n # Angles of deltas are in radians while angles of boxes are in degrees.\n # the conversion to radians serve as a way to normalize the values\n da = target_angles - src_angles\n da = (da + 180.0) % 360.0 - 180.0 # make it in [-180, 180)\n da *= wa * math.pi / 180.0\n\n deltas = torch.stack((dx, dy, dw, dh, da), dim=1)\n assert (\n (src_widths > 0).all().item()\n ), \"Input boxes to Box2BoxTransformRotated are not valid!\"\n return deltas" }, { "class_start_lineno": 1, "class_end_lineno": 369, "func_start_lineno": 310, "func_end_lineno": 369, "func_code": "def _dense_box_regression_loss(\n anchors: List[Union[Boxes, torch.Tensor]],\n box2box_transform: Box2BoxTransform,\n pred_anchor_deltas: List[torch.Tensor],\n gt_boxes: List[torch.Tensor],\n fg_mask: torch.Tensor,\n box_reg_loss_type=\"smooth_l1\",\n smooth_l1_beta=0.0,\n):\n \"\"\"\n Compute loss for dense multi-level box regression.\n Loss is accumulated over ``fg_mask``.\n\n Args:\n anchors: #lvl anchor boxes, each is (HixWixA, 4)\n pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)\n gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))\n fg_mask: the foreground boolean mask of shape (N, R) to compute loss on\n box_reg_loss_type (str): Loss type to use. Supported losses: \"smooth_l1\", \"giou\",\n \"diou\", \"ciou\".\n smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to\n use L1 loss. Only used when `box_reg_loss_type` is \"smooth_l1\"\n \"\"\"\n if isinstance(anchors[0], Boxes):\n anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)\n else:\n anchors = cat(anchors)\n if box_reg_loss_type == \"smooth_l1\":\n gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]\n gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)\n loss_box_reg = smooth_l1_loss(\n cat(pred_anchor_deltas, dim=1)[fg_mask],\n gt_anchor_deltas[fg_mask],\n beta=smooth_l1_beta,\n reduction=\"sum\",\n )\n elif box_reg_loss_type == \"giou\":\n pred_boxes = [\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\n ]\n loss_box_reg = giou_loss(\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\n )\n elif box_reg_loss_type == \"diou\":\n pred_boxes = [\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\n ]\n loss_box_reg = diou_loss(\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\n )\n elif box_reg_loss_type == \"ciou\":\n pred_boxes = [\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\n ]\n loss_box_reg = ciou_loss(\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\n )\n else:\n raise ValueError(f\"Invalid dense box regression loss type '{box_reg_loss_type}'\")\n return loss_box_reg" }, { "class_start_lineno": 173, "class_end_lineno": 462, "func_start_lineno": 278, "func_end_lineno": 318, "func_code": " def losses(self, predictions, proposals):\n \"\"\"\n Args:\n predictions: return values of :meth:`forward()`.\n proposals (list[Instances]): proposals that match the features that were used\n to compute predictions. The fields ``proposal_boxes``, ``gt_boxes``,\n ``gt_classes`` are expected.\n\n Returns:\n Dict[str, Tensor]: dict of losses\n \"\"\"\n scores, proposal_deltas = predictions\n\n # parse classification outputs\n gt_classes = (\n cat([p.gt_classes for p in proposals], dim=0) if len(proposals) else torch.empty(0)\n )\n _log_classification_stats(scores, gt_classes)\n\n # parse box regression outputs\n if len(proposals):\n proposal_boxes = cat([p.proposal_boxes.tensor for p in proposals], dim=0) # Nx4\n assert not proposal_boxes.requires_grad, \"Proposals should not require gradients!\"\n # If \"gt_boxes\" does not exist, the proposals must be all negative and\n # should not be included in regression loss computation.\n # Here we just use proposal_boxes as an arbitrary placeholder because its\n # value won't be used in self.box_reg_loss().\n gt_boxes = cat(\n [(p.gt_boxes if p.has(\"gt_boxes\") else p.proposal_boxes).tensor for p in proposals],\n dim=0,\n )\n else:\n proposal_boxes = gt_boxes = torch.empty((0, 4), device=proposal_deltas.device)\n\n losses = {\n \"loss_cls\": cross_entropy(scores, gt_classes, reduction=\"mean\"),\n \"loss_box_reg\": self.box_reg_loss(\n proposal_boxes, gt_boxes, proposal_deltas, gt_classes\n ),\n }\n return {k: v * self.loss_weight.get(k, 1.0) for k, v in losses.items()}" } ]
[ "function_empty" ]
[ "detectron2.modeling.roi_heads.fast_rcnn._log_classification_stats", "detectron2.modeling.box_regression.Box2BoxTransform.get_deltas", "detectron2.modeling.box_regression.Box2BoxTransformRotated.get_deltas", "detectron2.modeling.box_regression._dense_box_regression_loss", "detectron2.modeling.roi_heads.fast_rcnn.FastRCNNOutputLayers.losses" ]
Python
5
5
{ "total_num": 5, "base_passed_num": 2 }
[ "UniRef.detectron2.data.detection_utils.check_metadata_consistency", "UniRef.detectron2.data.detection_utils.create_keypoint_hflip_indices", "UniRef.detectron2.structures.instances.Instances::set", "UniRef.detectron2.data.detection_utils.annotations_to_instances" ]
UniRef
[ "detectron2/data/detection_utils.py", "detectron2/data/detection_utils.py", "detectron2/structures/instances.py", "detectron2/structures/instances.py", "detectron2/data/detection_utils.py" ]
[ "tests/data/test_detection_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 623, "func_start_lineno": 564, "func_end_lineno": 590, "func_code": "def check_metadata_consistency(key, dataset_names):\n \"\"\"\n Check that the datasets have consistent metadata.\n\n Args:\n key (str): a metadata key\n dataset_names (list[str]): a list of dataset names\n\n Raises:\n AttributeError: if the key does not exist in the metadata\n ValueError: if the given datasets do not have the same metadata values defined by key\n \"\"\"\n if len(dataset_names) == 0:\n return\n logger = logging.getLogger(__name__)\n entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]\n for idx, entry in enumerate(entries_per_dataset):\n if entry != entries_per_dataset[0]:\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(key, dataset_names[idx], str(entry))\n )\n logger.error(\n \"Metadata '{}' for dataset '{}' is '{}'\".format(\n key, dataset_names[0], str(entries_per_dataset[0])\n )\n )\n raise ValueError(\"Datasets have different metadata '{}'!\".format(key))" }, { "class_start_lineno": 1, "class_end_lineno": 623, "func_start_lineno": 509, "func_end_lineno": 531, "func_code": "def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:\n \"\"\"\n Args:\n dataset_names: list of dataset names\n\n Returns:\n list[int]: a list of size=#keypoints, storing the\n horizontally-flipped keypoint indices.\n \"\"\"\n if isinstance(dataset_names, str):\n dataset_names = [dataset_names]\n\n check_metadata_consistency(\"keypoint_names\", dataset_names)\n check_metadata_consistency(\"keypoint_flip_map\", dataset_names)\n\n meta = MetadataCatalog.get(dataset_names[0])\n names = meta.keypoint_names\n # TODO flip -> hflip\n flip_map = dict(meta.keypoint_flip_map)\n flip_map.update({v: k for k, v in flip_map.items()})\n flipped_names = [i if i not in flip_map else flip_map[i] for i in names]\n flip_indices = [names.index(i) for i in flipped_names]\n return flip_indices" }, { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 68, "func_end_lineno": 79, "func_code": " def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value" }, { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 57, "func_end_lineno": 61, "func_code": " def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)" }, { "class_start_lineno": 1, "class_end_lineno": 623, "func_start_lineno": 369, "func_end_lineno": 441, "func_code": "def annotations_to_instances(annos, image_size, mask_format=\"polygon\"):\n \"\"\"\n Create an :class:`Instances` object used by the models,\n from instance annotations in the dataset dict.\n\n Args:\n annos (list[dict]): a list of instance annotations in one image, each\n element for one instance.\n image_size (tuple): height, width\n\n Returns:\n Instances:\n It will contain fields \"gt_boxes\", \"gt_classes\",\n \"gt_masks\", \"gt_keypoints\", if they can be obtained from `annos`.\n This is the format that builtin models expect.\n \"\"\"\n boxes = (\n np.stack(\n [BoxMode.convert(obj[\"bbox\"], obj[\"bbox_mode\"], BoxMode.XYXY_ABS) for obj in annos]\n )\n if len(annos)\n else np.zeros((0, 4))\n )\n target = Instances(image_size)\n target.gt_boxes = Boxes(boxes)\n\n classes = [int(obj[\"category_id\"]) for obj in annos]\n classes = torch.tensor(classes, dtype=torch.int64)\n target.gt_classes = classes\n\n if len(annos) and \"segmentation\" in annos[0]:\n segms = [obj[\"segmentation\"] for obj in annos]\n if mask_format == \"polygon\":\n try:\n masks = PolygonMasks(segms)\n except ValueError as e:\n raise ValueError(\n \"Failed to use mask_format=='polygon' from the given annotations!\"\n ) from e\n else:\n assert mask_format == \"bitmask\", mask_format\n masks = []\n for segm in segms:\n if isinstance(segm, list):\n # polygon\n masks.append(polygons_to_bitmask(segm, *image_size))\n elif isinstance(segm, dict):\n # COCO RLE\n masks.append(mask_util.decode(segm))\n elif isinstance(segm, np.ndarray):\n assert segm.ndim == 2, \"Expect segmentation of 2 dimensions, got {}.\".format(\n segm.ndim\n )\n # mask array\n masks.append(segm)\n else:\n raise ValueError(\n \"Cannot convert segmentation of type '{}' to BitMasks!\"\n \"Supported types are: polygons as list[list[float] or ndarray],\"\n \" COCO-style RLE as a dict, or a binary segmentation mask \"\n \" in a 2D numpy array of shape HxW.\".format(type(segm))\n )\n # torch.from_numpy does not support array with negative stride.\n masks = BitMasks(\n torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])\n )\n target.gt_masks = masks\n\n if len(annos) and \"keypoints\" in annos[0]:\n kpts = [obj.get(\"keypoints\", []) for obj in annos]\n target.gt_keypoints = Keypoints(kpts)\n\n return target" } ]
[ "function_empty" ]
[ "detectron2.data.detection_utils.check_metadata_consistency", "detectron2.data.detection_utils.create_keypoint_hflip_indices", "detectron2.structures.instances.Instances.set", "detectron2.structures.instances.Instances.__setattr__", "detectron2.data.detection_utils.annotations_to_instances" ]
Python
4
4
{ "total_num": 10, "base_passed_num": 7 }
[ "UniRef.detectron2.structures.instances.Instances::set", "UniRef.detectron2.tracking.hungarian_tracker.BaseHungarianTracker::_initialize_extra_fields" ]
UniRef
[ "detectron2/structures/instances.py", "detectron2/structures/instances.py", "detectron2/tracking/hungarian_tracker.py" ]
[ "tests/tracking/test_hungarian_tracker.py" ]
[ { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 68, "func_end_lineno": 79, "func_code": " def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value" }, { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 57, "func_end_lineno": 61, "func_code": " def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)" }, { "class_start_lineno": 16, "class_end_lineno": 176, "func_start_lineno": 74, "func_end_lineno": 95, "func_code": " def _initialize_extra_fields(self, instances: Instances) -> Instances:\n \"\"\"\n If input instances don't have ID, ID_period, lost_frame_count fields,\n this method is used to initialize these fields.\n\n Args:\n instances: D2 Instances, for predictions of the current frame\n Return:\n D2 Instances with extra fields added\n \"\"\"\n if not instances.has(\"ID\"):\n instances.set(\"ID\", [None] * len(instances))\n if not instances.has(\"ID_period\"):\n instances.set(\"ID_period\", [None] * len(instances))\n if not instances.has(\"lost_frame_count\"):\n instances.set(\"lost_frame_count\", [None] * len(instances))\n if self._prev_instances is None:\n instances.ID = list(range(len(instances)))\n self._id_count += len(instances)\n instances.ID_period = [1] * len(instances)\n instances.lost_frame_count = [0] * len(instances)\n return instances" } ]
[ "function_empty" ]
[ "detectron2.structures.instances.Instances.set", "detectron2.structures.instances.Instances.__setattr__", "detectron2.tracking.hungarian_tracker.BaseHungarianTracker._initialize_extra_fields" ]
Python
2
2
{ "total_num": 2, "base_passed_num": 1 }
[ "UniRef.detectron2.data.catalog._DatasetCatalog::get", "UniRef.detectron2.data.datasets.coco.convert_to_coco_dict" ]
UniRef
[ "detectron2/data/catalog.py", "detectron2/data/datasets/coco.py" ]
[ "tests/data/test_coco.py" ]
[ { "class_start_lineno": 13, "class_end_lineno": 78, "func_start_lineno": 40, "func_end_lineno": 58, "func_code": " def get(self, name):\n \"\"\"\n Call the registered function and return its results.\n\n Args:\n name (str): the name that identifies a dataset, e.g. \"coco_2014_train\".\n\n Returns:\n list[dict]: dataset annotations.\n \"\"\"\n try:\n f = self[name]\n except KeyError as e:\n raise KeyError(\n \"Dataset '{}' is not registered! Available datasets are: {}\".format(\n name, \", \".join(list(self.keys()))\n )\n ) from e\n return f()" }, { "class_start_lineno": 1, "class_end_lineno": 541, "func_start_lineno": 308, "func_end_lineno": 444, "func_code": "def convert_to_coco_dict(dataset_name):\n \"\"\"\n Convert an instance detection/segmentation or keypoint detection dataset\n in detectron2's standard format into COCO json format.\n\n Generic dataset description can be found here:\n https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset\n\n COCO data format description can be found here:\n http://cocodataset.org/#format-data\n\n Args:\n dataset_name (str):\n name of the source dataset\n Must be registered in DatastCatalog and in detectron2's standard format.\n Must have corresponding metadata \"thing_classes\"\n Returns:\n coco_dict: serializable dict in COCO json format\n \"\"\"\n\n dataset_dicts = DatasetCatalog.get(dataset_name)\n metadata = MetadataCatalog.get(dataset_name)\n\n # unmap the category mapping ids for COCO\n if hasattr(metadata, \"thing_dataset_id_to_contiguous_id\"):\n reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}\n reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa\n else:\n reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa\n\n categories = [\n {\"id\": reverse_id_mapper(id), \"name\": name}\n for id, name in enumerate(metadata.thing_classes)\n ]\n\n logger.info(\"Converting dataset dicts into COCO format\")\n coco_images = []\n coco_annotations = []\n\n for image_id, image_dict in enumerate(dataset_dicts):\n coco_image = {\n \"id\": image_dict.get(\"image_id\", image_id),\n \"width\": int(image_dict[\"width\"]),\n \"height\": int(image_dict[\"height\"]),\n \"file_name\": str(image_dict[\"file_name\"]),\n }\n coco_images.append(coco_image)\n\n anns_per_image = image_dict.get(\"annotations\", [])\n for annotation in anns_per_image:\n # create a new dict with only COCO fields\n coco_annotation = {}\n\n # COCO requirement: XYWH box format for axis-align and XYWHA for rotated\n bbox = annotation[\"bbox\"]\n if isinstance(bbox, np.ndarray):\n if bbox.ndim != 1:\n raise ValueError(f\"bbox has to be 1-dimensional. Got shape={bbox.shape}.\")\n bbox = bbox.tolist()\n if len(bbox) not in [4, 5]:\n raise ValueError(f\"bbox has to has length 4 or 5. Got {bbox}.\")\n from_bbox_mode = annotation[\"bbox_mode\"]\n to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS\n bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)\n\n # COCO requirement: instance area\n if \"segmentation\" in annotation:\n # Computing areas for instances by counting the pixels\n segmentation = annotation[\"segmentation\"]\n # TODO: check segmentation type: RLE, BinaryMask or Polygon\n if isinstance(segmentation, list):\n polygons = PolygonMasks([segmentation])\n area = polygons.area()[0].item()\n elif isinstance(segmentation, dict): # RLE\n area = mask_util.area(segmentation).item()\n else:\n raise TypeError(f\"Unknown segmentation type {type(segmentation)}!\")\n else:\n # Computing areas using bounding boxes\n if to_bbox_mode == BoxMode.XYWH_ABS:\n bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)\n area = Boxes([bbox_xy]).area()[0].item()\n else:\n area = RotatedBoxes([bbox]).area()[0].item()\n\n if \"keypoints\" in annotation:\n keypoints = annotation[\"keypoints\"] # list[int]\n for idx, v in enumerate(keypoints):\n if idx % 3 != 2:\n # COCO's segmentation coordinates are floating points in [0, H or W],\n # but keypoint coordinates are integers in [0, H-1 or W-1]\n # For COCO format consistency we substract 0.5\n # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163\n keypoints[idx] = v - 0.5\n if \"num_keypoints\" in annotation:\n num_keypoints = annotation[\"num_keypoints\"]\n else:\n num_keypoints = sum(kp > 0 for kp in keypoints[2::3])\n\n # COCO requirement:\n # linking annotations to images\n # \"id\" field must start with 1\n coco_annotation[\"id\"] = len(coco_annotations) + 1\n coco_annotation[\"image_id\"] = coco_image[\"id\"]\n coco_annotation[\"bbox\"] = [round(float(x), 3) for x in bbox]\n coco_annotation[\"area\"] = float(area)\n coco_annotation[\"iscrowd\"] = int(annotation.get(\"iscrowd\", 0))\n coco_annotation[\"category_id\"] = int(reverse_id_mapper(annotation[\"category_id\"]))\n\n # Add optional fields\n if \"keypoints\" in annotation:\n coco_annotation[\"keypoints\"] = keypoints\n coco_annotation[\"num_keypoints\"] = num_keypoints\n\n if \"segmentation\" in annotation:\n seg = coco_annotation[\"segmentation\"] = annotation[\"segmentation\"]\n if isinstance(seg, dict): # RLE\n counts = seg[\"counts\"]\n if not isinstance(counts, str):\n # make it json-serializable\n seg[\"counts\"] = counts.decode(\"ascii\")\n\n coco_annotations.append(coco_annotation)\n\n logger.info(\n \"Conversion finished, \"\n f\"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}\"\n )\n\n info = {\n \"date_created\": str(datetime.datetime.now()),\n \"description\": \"Automatically generated COCO json file for Detectron2.\",\n }\n coco_dict = {\"info\": info, \"images\": coco_images, \"categories\": categories, \"licenses\": None}\n if len(coco_annotations) > 0:\n coco_dict[\"annotations\"] = coco_annotations\n return coco_dict" } ]
[ "function_empty", "Development" ]
[ "detectron2.data.catalog._DatasetCatalog.get", "detectron2.data.datasets.coco.convert_to_coco_dict" ]
Python
1
2
{ "total_num": 3, "base_passed_num": 1 }
[ "UniRef.detectron2.utils.registry.locate", "UniRef.detectron2.utils.registry._convert_target_to_string" ]
UniRef
[ "detectron2/utils/registry.py", "detectron2/utils/registry.py" ]
[ "tests/test_registry.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 60, "func_start_lineno": 40, "func_end_lineno": 60, "func_code": "def locate(name: str) -> Any:\n \"\"\"\n Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``,\n such as \"module.submodule.class_name\".\n\n Raise Exception if it cannot be found.\n \"\"\"\n obj = pydoc.locate(name)\n\n # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly\n # by pydoc.locate. Try a private function from hydra.\n if obj is None:\n try:\n # from hydra.utils import get_method - will print many errors\n from hydra.utils import _locate\n except ImportError as e:\n raise ImportError(f\"Cannot dynamically locate object {name}!\") from e\n else:\n obj = _locate(name) # it raises if fails\n\n return obj" }, { "class_start_lineno": 1, "class_end_lineno": 60, "func_start_lineno": 15, "func_end_lineno": 37, "func_code": "def _convert_target_to_string(t: Any) -> str:\n \"\"\"\n Inverse of ``locate()``.\n\n Args:\n t: any object with ``__module__`` and ``__qualname__``\n \"\"\"\n module, qualname = t.__module__, t.__qualname__\n\n # Compress the path to this object, e.g. ``module.submodule._impl.class``\n # may become ``module.submodule.class``, if the later also resolves to the same\n # object. This simplifies the string, and also is less affected by moving the\n # class implementation.\n module_parts = module.split(\".\")\n for k in range(1, len(module_parts)):\n prefix = \".\".join(module_parts[:k])\n candidate = f\"{prefix}.{qualname}\"\n try:\n if locate(candidate) is t:\n return candidate\n except ImportError:\n pass\n return f\"{module}.{qualname}\"" } ]
[ "Development" ]
[ "detectron2.utils.registry.locate", "detectron2.utils.registry._convert_target_to_string" ]
Python
0
2
{ "total_num": 6, "base_passed_num": 0 }
[ "UniRef.detectron2.utils.visualizer._create_text_labels", "UniRef.detectron2.utils.visualizer.Visualizer::draw_instance_predictions", "UniRef.detectron2.utils.visualizer.Visualizer::_convert_masks", "UniRef.detectron2.utils.colormap.random_color", "UniRef.detectron2.utils.visualizer.Visualizer::overlay_instances" ]
UniRef
[ "detectron2/utils/visualizer.py", "detectron2/utils/visualizer.py", "detectron2/utils/visualizer.py", "detectron2/utils/colormap.py", "detectron2/utils/visualizer.py" ]
[ "tests/test_visualizer.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1274, "func_start_lineno": 237, "func_end_lineno": 261, "func_code": "def _create_text_labels(classes, scores, class_names, is_crowd=None):\n \"\"\"\n Args:\n classes (list[int] or None):\n scores (list[float] or None):\n class_names (list[str] or None):\n is_crowd (list[bool] or None):\n\n Returns:\n list[str] or None\n \"\"\"\n labels = None\n if classes is not None:\n if class_names is not None and len(class_names) > 0:\n labels = [class_names[i] for i in classes]\n else:\n labels = [str(i) for i in classes]\n if scores is not None:\n if labels is None:\n labels = [\"{:.0f}%\".format(s * 100) for s in scores]\n else:\n labels = [\"{} {:.0f}%\".format(l, s * 100) for l, s in zip(labels, scores)]\n if labels is not None and is_crowd is not None:\n labels = [l + (\"|crowd\" if crowd else \"\") for l, crowd in zip(labels, is_crowd)]\n return labels" }, { "class_start_lineno": 338, "class_end_lineno": 1274, "func_start_lineno": 390, "func_end_lineno": 441, "func_code": " def draw_instance_predictions(self, predictions):\n \"\"\"\n Draw instance-level prediction results on an image.\n\n Args:\n predictions (Instances): the output of an instance detection/segmentation\n model. Following fields will be used to draw:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\" (or \"pred_masks_rle\").\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes.tolist() if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes\n ]\n alpha = 0.8\n else:\n colors = None\n alpha = 0.5\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(\n self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n if predictions.has(\"pred_masks\")\n else None\n )\n )\n alpha = 0.3\n\n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output" }, { "class_start_lineno": 338, "class_end_lineno": 1274, "func_start_lineno": 1221, "func_end_lineno": 1242, "func_code": " def _convert_masks(self, masks_or_polygons):\n \"\"\"\n Convert different format of masks or polygons to a tuple of masks and polygons.\n\n Returns:\n list[GenericMask]:\n \"\"\"\n\n m = masks_or_polygons\n if isinstance(m, PolygonMasks):\n m = m.polygons\n if isinstance(m, BitMasks):\n m = m.tensor.numpy()\n if isinstance(m, torch.Tensor):\n m = m.numpy()\n ret = []\n for x in m:\n if isinstance(x, GenericMask):\n ret.append(x)\n else:\n ret.append(GenericMask(x, self.output.height, self.output.width))\n return ret" }, { "class_start_lineno": 1, "class_end_lineno": 158, "func_start_lineno": 112, "func_end_lineno": 125, "func_code": "def random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret" }, { "class_start_lineno": 338, "class_end_lineno": 1274, "func_start_lineno": 614, "func_end_lineno": 754, "func_code": " def overlay_instances(\n self,\n *,\n boxes=None,\n labels=None,\n masks=None,\n keypoints=None,\n assigned_colors=None,\n alpha=0.5,\n ):\n \"\"\"\n Args:\n boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,\n or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,\n or a :class:`RotatedBoxes`,\n or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image,\n labels (list[str]): the text to be displayed for each instance.\n masks (masks-like object): Supported types are:\n\n * :class:`detectron2.structures.PolygonMasks`,\n :class:`detectron2.structures.BitMasks`.\n * list[list[ndarray]]: contains the segmentation masks for all objects in one image.\n The first level of the list corresponds to individual instances. The second\n level to all the polygon that compose the instance, and the third level\n to the polygon coordinates. The third level should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n * list[ndarray]: each ndarray is a binary mask of shape (H, W).\n * list[dict]: each dict is a COCO-style RLE.\n keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),\n where the N is the number of instances and K is the number of keypoints.\n The last dimension corresponds to (x, y, visibility or score).\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = 0\n if boxes is not None:\n boxes = self._convert_boxes(boxes)\n num_instances = len(boxes)\n if masks is not None:\n masks = self._convert_masks(masks)\n if num_instances:\n assert len(masks) == num_instances\n else:\n num_instances = len(masks)\n if keypoints is not None:\n if num_instances:\n assert len(keypoints) == num_instances\n else:\n num_instances = len(keypoints)\n keypoints = self._convert_keypoints(keypoints)\n if labels is not None:\n assert len(labels) == num_instances\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n if boxes is not None and boxes.shape[1] == 5:\n return self.overlay_rotated_instances(\n boxes=boxes, labels=labels, assigned_colors=assigned_colors\n )\n\n # Display in largest to smallest order to reduce occlusion.\n areas = None\n if boxes is not None:\n areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)\n elif masks is not None:\n areas = np.asarray([x.area() for x in masks])\n\n if areas is not None:\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs] if boxes is not None else None\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None\n assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]\n keypoints = keypoints[sorted_idxs] if keypoints is not None else None\n\n for i in range(num_instances):\n color = assigned_colors[i]\n if boxes is not None:\n self.draw_box(boxes[i], edge_color=color)\n\n if masks is not None:\n for segment in masks[i].polygons:\n self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)\n\n if labels is not None:\n # first get a box\n if boxes is not None:\n x0, y0, x1, y1 = boxes[i]\n text_pos = (x0, y0) # if drawing boxes, put text on the box corner.\n horiz_align = \"left\"\n elif masks is not None:\n # skip small mask without polygon\n if len(masks[i].polygons) == 0:\n continue\n\n x0, y0, x1, y1 = masks[i].bbox()\n\n # draw text in the center (defined by median) when box is not drawn\n # median is less sensitive to outliers.\n text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]\n horiz_align = \"center\"\n else:\n continue # drawing the box confidence for keypoints isn't very useful.\n # for small objects, draw text at the side to avoid occlusion\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale\n or y1 - y0 < 40 * self.output.scale\n ):\n if y1 >= self.output.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n self.draw_text(\n labels[i],\n text_pos,\n color=lighter_color,\n horizontal_alignment=horiz_align,\n font_size=font_size,\n )\n\n # draw keypoints\n if keypoints is not None:\n for keypoints_per_instance in keypoints:\n self.draw_and_connect_keypoints(keypoints_per_instance)\n\n return self.output" } ]
[ "function_empty", "Development" ]
[ "detectron2.utils.visualizer._create_text_labels", "detectron2.utils.visualizer.Visualizer.draw_instance_predictions", "detectron2.utils.visualizer.Visualizer._convert_masks", "detectron2.utils.colormap.random_color", "detectron2.utils.visualizer.Visualizer.overlay_instances" ]
Python
4
5
{ "total_num": 14, "base_passed_num": 10 }
[ "UniRef.detectron2.structures.instances.Instances::set", "UniRef.detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker::_initialize_extra_fields", "UniRef.detectron2.structures.boxes.pairwise_intersection", "UniRef.detectron2.structures.boxes.pairwise_iou", "UniRef.detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker::update" ]
UniRef
[ "detectron2/structures/instances.py", "detectron2/structures/instances.py", "detectron2/tracking/bbox_iou_tracker.py", "detectron2/structures/boxes.py", "detectron2/structures/boxes.py", "detectron2/tracking/bbox_iou_tracker.py" ]
[ "tests/tracking/test_bbox_iou_tracker.py" ]
[ { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 68, "func_end_lineno": 79, "func_code": " def set(self, name: str, value: Any) -> None:\n \"\"\"\n Set the field named `name` to `value`.\n The length of `value` must be the number of instances,\n and must agree with other existing fields in this object.\n \"\"\"\n data_len = len(value)\n if len(self._fields):\n assert (\n len(self) == data_len\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\n self._fields[name] = value" }, { "class_start_lineno": 7, "class_end_lineno": 192, "func_start_lineno": 57, "func_end_lineno": 61, "func_code": " def __setattr__(self, name: str, val: Any) -> None:\n if name.startswith(\"_\"):\n super().__setattr__(name, val)\n else:\n self.set(name, val)" }, { "class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 152, "func_end_lineno": 173, "func_code": " def _initialize_extra_fields(self, instances: Instances) -> Instances:\n \"\"\"\n If input instances don't have ID, ID_period, lost_frame_count fields,\n this method is used to initialize these fields.\n\n Args:\n instances: D2 Instances, for predictions of the current frame\n Return:\n D2 Instances with extra fields added\n \"\"\"\n if not instances.has(\"ID\"):\n instances.set(\"ID\", [None] * len(instances))\n if not instances.has(\"ID_period\"):\n instances.set(\"ID_period\", [None] * len(instances))\n if not instances.has(\"lost_frame_count\"):\n instances.set(\"lost_frame_count\", [None] * len(instances))\n if self._prev_instances is None:\n instances.ID = list(range(len(instances)))\n self._id_count += len(instances)\n instances.ID_period = [1] * len(instances)\n instances.lost_frame_count = [0] * len(instances)\n return instances" }, { "class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 310, "func_end_lineno": 329, "func_code": "def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M,\n compute the intersection area between __all__ N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax)\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: intersection, sized [N,M].\n \"\"\"\n boxes1, boxes2 = boxes1.tensor, boxes2.tensor\n width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(\n boxes1[:, None, :2], boxes2[:, :2]\n ) # [N,M,2]\n\n width_height.clamp_(min=0) # [N,M,2]\n intersection = width_height.prod(dim=2) # [N,M]\n return intersection" }, { "class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 334, "func_end_lineno": 356, "func_code": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" }, { "class_start_lineno": 17, "class_end_lineno": 260, "func_start_lineno": 88, "func_end_lineno": 121, "func_code": " def update(self, instances: Instances) -> Instances:\n \"\"\"\n See BaseTracker description\n \"\"\"\n if instances.has(\"pred_keypoints\"):\n raise NotImplementedError(\"Need to add support for keypoints\")\n instances = self._initialize_extra_fields(instances)\n if self._prev_instances is not None:\n # calculate IoU of all bbox pairs\n iou_all = pairwise_iou(\n boxes1=instances.pred_boxes,\n boxes2=self._prev_instances.pred_boxes,\n )\n # sort IoU in descending order\n bbox_pairs = self._create_prediction_pairs(instances, iou_all)\n # assign previous ID to current bbox if IoU > track_iou_threshold\n self._reset_fields()\n for bbox_pair in bbox_pairs:\n idx = bbox_pair[\"idx\"]\n prev_id = bbox_pair[\"prev_id\"]\n if idx in self._matched_idx \\\n or prev_id in self._matched_ID \\\n or bbox_pair[\"IoU\"] < self._track_iou_threshold:\n continue\n instances.ID[idx] = prev_id\n instances.ID_period[idx] = bbox_pair[\"prev_period\"] + 1\n instances.lost_frame_count[idx] = 0\n self._matched_idx.add(idx)\n self._matched_ID.add(prev_id)\n self._untracked_prev_idx.remove(bbox_pair[\"prev_idx\"])\n instances = self._assign_new_id(instances)\n instances = self._merge_untracked_instances(instances)\n self._prev_instances = copy.deepcopy(instances)\n return instances" } ]
[ "function_empty", "Development" ]
[ "detectron2.structures.instances.Instances.set", "detectron2.structures.instances.Instances.__setattr__", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker._initialize_extra_fields", "detectron2.structures.boxes.pairwise_intersection", "detectron2.structures.boxes.pairwise_iou", "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker.update" ]
Python
4
5
{ "total_num": 5, "base_passed_num": 2 }
[ "UniRef.detectron2.structures.boxes.pairwise_intersection", "UniRef.detectron2.structures.boxes.pairwise_ioa", "UniRef.detectron2.structures.boxes.pairwise_iou" ]
UniRef
[ "detectron2/structures/boxes.py", "detectron2/structures/boxes.py", "detectron2/structures/boxes.py" ]
[ "tests/structures/test_boxes.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 310, "func_end_lineno": 329, "func_code": "def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M,\n compute the intersection area between __all__ N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax)\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: intersection, sized [N,M].\n \"\"\"\n boxes1, boxes2 = boxes1.tensor, boxes2.tensor\n width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max(\n boxes1[:, None, :2], boxes2[:, :2]\n ) # [N,M,2]\n\n width_height.clamp_(min=0) # [N,M,2]\n intersection = width_height.prod(dim=2) # [N,M]\n return intersection" }, { "class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 359, "func_end_lineno": 376, "func_code": "def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoA, sized [N,M].\n \"\"\"\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n ioa = torch.where(\n inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device)\n )\n return ioa" }, { "class_start_lineno": 1, "class_end_lineno": 423, "func_start_lineno": 334, "func_end_lineno": 356, "func_code": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\n \"\"\"\n Given two lists of boxes of size N and M, compute the IoU\n (intersection over union) between **all** N x M pairs of boxes.\n The box order must be (xmin, ymin, xmax, ymax).\n\n Args:\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\n\n Returns:\n Tensor: IoU, sized [N,M].\n \"\"\"\n area1 = boxes1.area() # [N]\n area2 = boxes2.area() # [M]\n inter = pairwise_intersection(boxes1, boxes2)\n\n # handle empty boxes\n iou = torch.where(\n inter > 0,\n inter / (area1[:, None] + area2 - inter),\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\n )\n return iou" } ]
[ "function_empty" ]
[ "detectron2.structures.boxes.pairwise_intersection", "detectron2.structures.boxes.pairwise_ioa", "detectron2.structures.boxes.pairwise_iou" ]
Python
3
3
{ "total_num": 17, "base_passed_num": 15 }
[ "UniRef.detectron2.data.transforms.transform.RotationTransform::create_rotation_matrix", "UniRef.detectron2.data.transforms.transform.RotationTransform::inverse" ]
UniRef
[ "detectron2/data/transforms/transform.py", "detectron2/data/transforms/transform.py" ]
[ "tests/data/test_rotation_transform.py" ]
[ { "class_start_lineno": 162, "class_end_lineno": 247, "func_start_lineno": 223, "func_end_lineno": 233, "func_code": " def create_rotation_matrix(self, offset=0):\n center = (self.center[0] + offset, self.center[1] + offset)\n rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)\n if self.expand:\n # Find the coordinates of the center of rotation in the new image\n # The only point for which we know the future coordinates is the center of the image\n rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]\n new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center\n # shift the rotation center to the new coordinates\n rm[:, 2] += new_center\n return rm" }, { "class_start_lineno": 162, "class_end_lineno": 247, "func_start_lineno": 235, "func_end_lineno": 247, "func_code": " def inverse(self):\n \"\"\"\n The inverse is to rotate it back with expand, and crop to get the original shape.\n \"\"\"\n if not self.expand: # Not possible to inverse if a part of the image is lost\n raise NotImplementedError()\n rotation = RotationTransform(\n self.bound_h, self.bound_w, -self.angle, True, None, self.interp\n )\n crop = CropTransform(\n (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h\n )\n return TransformList([rotation, crop])" } ]
[ "Development" ]
[ "detectron2.data.transforms.transform.RotationTransform.create_rotation_matrix", "detectron2.data.transforms.transform.RotationTransform.inverse" ]
Python
0
2
{ "total_num": 6, "base_passed_num": 1 }
[ "langchain_core.libs.core.langchain_core.load.dump.dumps", "langchain_core.libs.core.langchain_core.load.dump.default", "langchain_core.libs.core.langchain_core.load.dump.dumpd" ]
langchain_core
[ "langchain_core/load/dump.py", "langchain_core/load/dump.py", "langchain_core/load/dump.py" ]
[ "libs/core/tests/unit_tests/load/test_serializable.py", "libs/core/tests/unit_tests/messages/test_ai.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 23, "func_end_lineno": 53, "func_code": "def dumps(obj: Any, *, pretty: bool = False, **kwargs: Any) -> str:\n \"\"\"Return a json string representation of an object.\n\n Args:\n obj: The object to dump.\n pretty: Whether to pretty print the json. If true, the json will be\n indented with 2 spaces (if no indent is provided as part of kwargs).\n Default is False.\n kwargs: Additional arguments to pass to json.dumps\n\n Returns:\n A json string representation of the object.\n\n Raises:\n ValueError: If `default` is passed as a kwarg.\n \"\"\"\n if \"default\" in kwargs:\n msg = \"`default` should not be passed to dumps\"\n raise ValueError(msg)\n try:\n if pretty:\n indent = kwargs.pop(\"indent\", 2)\n return json.dumps(obj, default=default, indent=indent, **kwargs)\n else:\n return json.dumps(obj, default=default, **kwargs)\n except TypeError:\n if pretty:\n indent = kwargs.pop(\"indent\", 2)\n return json.dumps(to_json_not_implemented(obj), indent=indent, **kwargs)\n else:\n return json.dumps(to_json_not_implemented(obj), **kwargs)" }, { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 7, "func_end_lineno": 20, "func_code": "def default(obj: Any) -> Any:\n \"\"\"Return a default value for a Serializable object or\n a SerializedNotImplemented object.\n\n Args:\n obj: The object to serialize to json if it is a Serializable object.\n\n Returns:\n A json serializable object or a SerializedNotImplemented object.\n \"\"\"\n if isinstance(obj, Serializable):\n return obj.to_json()\n else:\n return to_json_not_implemented(obj)" }, { "class_start_lineno": 1, "class_end_lineno": 70, "func_start_lineno": 56, "func_end_lineno": 70, "func_code": "def dumpd(obj: Any) -> Any:\n \"\"\"Return a dict representation of an object.\n\n Note:\n Unfortunately this function is not as efficient as it could be\n because it first dumps the object to a json string and then loads it\n back into a dictionary.\n\n Args:\n obj: The object to dump.\n\n Returns:\n dictionary that can be serialized to json using json.dumps\n \"\"\"\n return json.loads(dumps(obj))" } ]
[ "function_empty" ]
[ "langchain_core.load.dump.dumps", "langchain_core.load.dump.default", "langchain_core.load.dump.dumpd" ]
Python
3
3
{ "total_num": 19, "base_passed_num": 12 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.utils.json.parse_json_markdown", "langchain_core.libs.core.langchain_core.output_parsers.json.JsonOutputParser::parse_result" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/utils/json.py", "langchain_core/output_parsers/json.py" ]
[ "libs/core/tests/unit_tests/output_parsers/test_json.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 1, "class_end_lineno": 191, "func_start_lineno": 125, "func_end_lineno": 145, "func_code": "def parse_json_markdown(\n json_string: str, *, parser: Callable[[str], Any] = parse_partial_json\n) -> dict:\n \"\"\"Parse a JSON string from a Markdown string.\n\n Args:\n json_string: The Markdown string.\n\n Returns:\n The parsed JSON object as a Python dictionary.\n \"\"\"\n try:\n return _parse_json(json_string, parser=parser)\n except json.JSONDecodeError:\n # Try to find JSON string within triple backticks\n match = _json_markdown_re.search(json_string)\n\n # If no match found, assume the entire string is a JSON string\n # Else, use the content within the backticks\n json_str = json_string if match is None else match.group(2)\n return _parse_json(json_str, parser=parser)" }, { "class_start_lineno": 34, "class_end_lineno": 123, "func_start_lineno": 57, "func_end_lineno": 86, "func_code": " def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:\n \"\"\"Parse the result of an LLM call to a JSON object.\n\n Args:\n result: The result of the LLM call.\n partial: Whether to parse partial JSON objects.\n If True, the output will be a JSON object containing\n all the keys that have been returned so far.\n If False, the output will be the full JSON object.\n Default is False.\n\n Returns:\n The parsed JSON object.\n\n Raises:\n OutputParserException: If the output is not valid JSON.\n \"\"\"\n text = result[0].text\n text = text.strip()\n if partial:\n try:\n return parse_json_markdown(text)\n except JSONDecodeError:\n return None\n else:\n try:\n return parse_json_markdown(text)\n except JSONDecodeError as e:\n msg = f\"Invalid json output: {text}\"\n raise OutputParserException(msg, llm_output=text) from e" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config", "langchain_core.utils.json.parse_json_markdown", "langchain_core.output_parsers.json.JsonOutputParser.parse_result" ]
Python
4
4
{ "total_num": 36, "base_passed_num": 11 }
[ "langchain_core.libs.core.langchain_core.utils.json.parse_partial_json", "langchain_core.libs.core.langchain_core.messages.ai.AIMessageChunk::init_tool_calls", "langchain_core.libs.core.langchain_core.utils._merge.merge_lists", "langchain_core.libs.core.langchain_core.utils._merge.merge_dicts" ]
langchain_core
[ "langchain_core/utils/json.py", "langchain_core/messages/ai.py", "langchain_core/utils/_merge.py", "langchain_core/utils/_merge.py", "langchain_core/runnables/base.py" ]
[ "libs/core/tests/unit_tests/output_parsers/test_openai_tools.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 191, "func_start_lineno": 43, "func_end_lineno": 119, "func_code": "def parse_partial_json(s: str, *, strict: bool = False) -> Any:\n \"\"\"Parse a JSON string that may be missing closing braces.\n\n Args:\n s: The JSON string to parse.\n strict: Whether to use strict parsing. Defaults to False.\n\n Returns:\n The parsed JSON object as a Python dictionary.\n \"\"\"\n # Attempt to parse the string as-is.\n try:\n return json.loads(s, strict=strict)\n except json.JSONDecodeError:\n pass\n\n # Initialize variables.\n new_chars = []\n stack = []\n is_inside_string = False\n escaped = False\n\n # Process each character in the string one at a time.\n for char in s:\n if is_inside_string:\n if char == '\"' and not escaped:\n is_inside_string = False\n elif char == \"\\n\" and not escaped:\n char = \"\\\\n\" # Replace the newline character with the escape sequence.\n elif char == \"\\\\\":\n escaped = not escaped\n else:\n escaped = False\n else:\n if char == '\"':\n is_inside_string = True\n escaped = False\n elif char == \"{\":\n stack.append(\"}\")\n elif char == \"[\":\n stack.append(\"]\")\n elif char == \"}\" or char == \"]\":\n if stack and stack[-1] == char:\n stack.pop()\n else:\n # Mismatched closing character; the input is malformed.\n return None\n\n # Append the processed character to the new string.\n new_chars.append(char)\n\n # If we're still inside a string at the end of processing,\n # we need to close the string.\n if is_inside_string:\n if escaped: # Remoe unterminated escape character\n new_chars.pop()\n new_chars.append('\"')\n\n # Reverse the stack to get the closing characters.\n stack.reverse()\n\n # Try to parse mods of string until we succeed or run out of characters.\n while new_chars:\n # Close any remaining open structures in the reverse\n # order that they were opened.\n # Attempt to parse the modified string as JSON.\n try:\n return json.loads(\"\".join(new_chars + stack), strict=strict)\n except json.JSONDecodeError:\n # If we still can't parse the string as JSON,\n # try removing the last character\n new_chars.pop()\n\n # If we got here, we ran out of characters to remove\n # and still couldn't parse the string as JSON, so return the parse error\n # for the original string.\n return json.loads(s, strict=strict)" }, { "class_start_lineno": 296, "class_end_lineno": 403, "func_start_lineno": 328, "func_end_lineno": 394, "func_code": " def init_tool_calls(self) -> Self:\n \"\"\"Initialize tool calls from tool call chunks.\n\n Args:\n values: The values to validate.\n\n Returns:\n The values with tool calls initialized.\n\n Raises:\n ValueError: If the tool call chunks are malformed.\n \"\"\"\n if not self.tool_call_chunks:\n if self.tool_calls:\n self.tool_call_chunks = [\n create_tool_call_chunk(\n name=tc[\"name\"],\n args=json.dumps(tc[\"args\"]),\n id=tc[\"id\"],\n index=None,\n )\n for tc in self.tool_calls\n ]\n if self.invalid_tool_calls:\n tool_call_chunks = self.tool_call_chunks\n tool_call_chunks.extend(\n [\n create_tool_call_chunk(\n name=tc[\"name\"], args=tc[\"args\"], id=tc[\"id\"], index=None\n )\n for tc in self.invalid_tool_calls\n ]\n )\n self.tool_call_chunks = tool_call_chunks\n\n return self\n tool_calls = []\n invalid_tool_calls = []\n\n def add_chunk_to_invalid_tool_calls(chunk: ToolCallChunk) -> None:\n invalid_tool_calls.append(\n create_invalid_tool_call(\n name=chunk[\"name\"],\n args=chunk[\"args\"],\n id=chunk[\"id\"],\n error=None,\n )\n )\n\n for chunk in self.tool_call_chunks:\n try:\n args_ = parse_partial_json(chunk[\"args\"]) if chunk[\"args\"] != \"\" else {} # type: ignore[arg-type]\n if isinstance(args_, dict):\n tool_calls.append(\n create_tool_call(\n name=chunk[\"name\"] or \"\",\n args=args_,\n id=chunk[\"id\"],\n )\n )\n else:\n add_chunk_to_invalid_tool_calls(chunk)\n except Exception:\n add_chunk_to_invalid_tool_calls(chunk)\n self.tool_calls = tool_calls\n self.invalid_tool_calls = invalid_tool_calls\n return self" }, { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 72, "func_end_lineno": 106, "func_code": "def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]:\n \"\"\"Add many lists, handling None.\n\n Args:\n left: The first list to merge.\n others: The other lists to merge.\n\n Returns:\n The merged list.\n \"\"\"\n merged = left.copy() if left is not None else None\n for other in others:\n if other is None:\n continue\n elif merged is None:\n merged = other.copy()\n else:\n for e in other:\n if isinstance(e, dict) and \"index\" in e and isinstance(e[\"index\"], int):\n to_merge = [\n i\n for i, e_left in enumerate(merged)\n if e_left[\"index\"] == e[\"index\"]\n ]\n if to_merge:\n # TODO: Remove this once merge_dict is updated with special\n # handling for 'type'.\n if \"type\" in e:\n e = {k: v for k, v in e.items() if k != \"type\"}\n merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)\n else:\n merged.append(e)\n else:\n merged.append(e)\n return merged" }, { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 6, "func_end_lineno": 69, "func_code": "def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Merge many dicts, handling specific scenarios where a key exists in both\n dictionaries but has a value of None in 'left'. In such cases, the method uses the\n value from 'right' for that key in the merged dictionary.\n\n Args:\n left: The first dictionary to merge.\n others: The other dictionaries to merge.\n\n Returns:\n The merged dictionary.\n\n Raises:\n TypeError: If the key exists in both dictionaries but has a different type.\n TypeError: If the value has an unsupported type.\n\n Example:\n If left = {\"function_call\": {\"arguments\": None}} and\n right = {\"function_call\": {\"arguments\": \"{\\n\"}}\n then, after merging, for the key \"function_call\",\n the value from 'right' is used,\n resulting in merged = {\"function_call\": {\"arguments\": \"{\\n\"}}.\n \"\"\"\n merged = left.copy()\n for right in others:\n for right_k, right_v in right.items():\n if right_k not in merged or right_v is not None and merged[right_k] is None:\n merged[right_k] = right_v\n elif right_v is None:\n continue\n elif type(merged[right_k]) is not type(right_v):\n msg = (\n f'additional_kwargs[\"{right_k}\"] already exists in this message,'\n \" but with a different type.\"\n )\n raise TypeError(msg)\n elif isinstance(merged[right_k], str):\n # TODO: Add below special handling for 'type' key in 0.3 and remove\n # merge_lists 'type' logic.\n #\n # if right_k == \"type\":\n # if merged[right_k] == right_v:\n # continue\n # else:\n # raise ValueError(\n # \"Unable to merge. Two different values seen for special \"\n # f\"key 'type': {merged[right_k]} and {right_v}. 'type' \"\n # \"should either occur once or have the same value across \"\n # \"all dicts.\"\n # )\n merged[right_k] += right_v\n elif isinstance(merged[right_k], dict):\n merged[right_k] = merge_dicts(merged[right_k], right_v)\n elif isinstance(merged[right_k], list):\n merged[right_k] = merge_lists(merged[right_k], right_v)\n elif merged[right_k] == right_v:\n continue\n else:\n msg = (\n f\"Additional kwargs key {right_k} already exists in left dict and \"\n f\"value has unsupported type {type(merged[right_k])}.\"\n )\n raise TypeError(msg)\n return merged" }, { "class_start_lineno": 2659, "class_end_lineno": 3435, "func_start_lineno": 3403, "func_end_lineno": 3409, "func_code": " def stream(\n self,\n input: Input,\n config: Optional[RunnableConfig] = None,\n **kwargs: Optional[Any],\n ) -> Iterator[Output]:\n yield from self.transform(iter([input]), config, **kwargs)" } ]
[ "function_empty" ]
[ "langchain_core.utils.json.parse_partial_json", "langchain_core.messages.ai.AIMessageChunk.init_tool_calls", "langchain_core.utils._merge.merge_lists", "langchain_core.utils._merge.merge_dicts", "langchain_core.runnables.base.RunnableSequence.stream" ]
Python
4
4
{ "total_num": 11, "base_passed_num": 2 }
[ "langchain_core.libs.core.langchain_core.utils.formatting.StrictFormatter::validate_input_variables", "langchain_core.libs.core.langchain_core.prompts.string.check_valid_template" ]
langchain_core
[ "langchain_core/utils/formatting.py", "langchain_core/prompts/string.py", "langchain_core/prompts/few_shot.py" ]
[ "libs/core/tests/unit_tests/prompts/test_few_shot.py" ]
[ { "class_start_lineno": 8, "class_end_lineno": 48, "func_start_lineno": 35, "func_end_lineno": 48, "func_code": " def validate_input_variables(\n self, format_string: str, input_variables: list[str]\n ) -> None:\n \"\"\"Check that all input variables are used in the format string.\n\n Args:\n format_string: The format string.\n input_variables: The input variables.\n\n Raises:\n ValueError: If any input variables are not used in the format string.\n \"\"\"\n dummy_inputs = dict.fromkeys(input_variables, \"foo\")\n super().format(format_string, **dummy_inputs)" }, { "class_start_lineno": 1, "class_end_lineno": 319, "func_start_lineno": 207, "func_end_lineno": 236, "func_code": "def check_valid_template(\n template: str, template_format: str, input_variables: list[str]\n) -> None:\n \"\"\"Check that template string is valid.\n\n Args:\n template: The template string.\n template_format: The template format. Should be one of \"f-string\" or \"jinja2\".\n input_variables: The input variables.\n\n Raises:\n ValueError: If the template format is not supported.\n ValueError: If the prompt schema is invalid.\n \"\"\"\n try:\n validator_func = DEFAULT_VALIDATOR_MAPPING[template_format]\n except KeyError as exc:\n msg = (\n f\"Invalid template format {template_format!r}, should be one of\"\n f\" {list(DEFAULT_FORMATTER_MAPPING)}.\"\n )\n raise ValueError(msg) from exc\n try:\n validator_func(template, input_variables)\n except (KeyError, IndexError) as exc:\n msg = (\n \"Invalid prompt schema; check for mismatched or missing input parameters\"\n f\" from {input_variables}.\"\n )\n raise ValueError(msg) from exc" }, { "class_start_lineno": 115, "class_end_lineno": 244, "func_start_lineno": 148, "func_end_lineno": 164, "func_code": " def template_is_valid(self) -> Self:\n \"\"\"Check that prefix, suffix, and input variables are consistent.\"\"\"\n if self.validate_template:\n check_valid_template(\n self.prefix + self.suffix,\n self.template_format,\n self.input_variables + list(self.partial_variables),\n )\n elif self.template_format or None:\n self.input_variables = [\n var\n for var in get_template_variables(\n self.prefix + self.suffix, self.template_format\n )\n if var not in self.partial_variables\n ]\n return self" } ]
[ "function_empty" ]
[ "langchain_core.utils.formatting.StrictFormatter.validate_input_variables", "langchain_core.prompts.string.check_valid_template", "langchain_core.prompts.few_shot.FewShotPromptTemplate.template_is_valid" ]
Python
2
2
{ "total_num": 16, "base_passed_num": 13 }
[ "langchain_core.libs.core.langchain_core.prompts.loading.load_prompt_from_config", "langchain_core.libs.core.langchain_core.prompts.loading.load_prompt" ]
langchain_core
[ "langchain_core/prompts/loading.py", "langchain_core/prompts/loading.py", "langchain_core/prompts/loading.py" ]
[ "libs/core/tests/unit_tests/prompts/test_loading.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 20, "func_end_lineno": 41, "func_code": "def load_prompt_from_config(config: dict) -> BasePromptTemplate:\n \"\"\"Load prompt from Config Dict.\n\n Args:\n config: Dict containing the prompt configuration.\n\n Returns:\n A PromptTemplate object.\n\n Raises:\n ValueError: If the prompt type is not supported.\n \"\"\"\n if \"_type\" not in config:\n logger.warning(\"No `_type` key found, defaulting to `prompt`.\")\n config_type = config.pop(\"_type\", \"prompt\")\n\n if config_type not in type_to_loader_dict:\n msg = f\"Loading {config_type} prompt not supported\"\n raise ValueError(msg)\n\n prompt_loader = type_to_loader_dict[config_type]\n return prompt_loader(config)" }, { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 141, "func_end_lineno": 163, "func_code": "def load_prompt(\n path: Union[str, Path], encoding: Optional[str] = None\n) -> BasePromptTemplate:\n \"\"\"Unified method for loading a prompt from LangChainHub or local fs.\n\n Args:\n path: Path to the prompt file.\n encoding: Encoding of the file. Defaults to None.\n\n Returns:\n A PromptTemplate object.\n\n Raises:\n RuntimeError: If the path is a Lang Chain Hub path.\n \"\"\"\n if isinstance(path, str) and path.startswith(\"lc://\"):\n msg = (\n \"Loading from the deprecated github-based Hub is no longer supported. \"\n \"Please use the new LangChain Hub at https://smith.langchain.com/hub \"\n \"instead.\"\n )\n raise RuntimeError(msg)\n return _load_prompt_from_file(path, encoding)" }, { "class_start_lineno": 1, "class_end_lineno": 203, "func_start_lineno": 99, "func_end_lineno": 118, "func_code": "def _load_few_shot_prompt(config: dict) -> FewShotPromptTemplate:\n \"\"\"Load the \"few shot\" prompt from the config.\"\"\"\n # Load the suffix and prefix templates.\n config = _load_template(\"suffix\", config)\n config = _load_template(\"prefix\", config)\n # Load the example prompt.\n if \"example_prompt_path\" in config:\n if \"example_prompt\" in config:\n msg = (\n \"Only one of example_prompt and example_prompt_path should \"\n \"be specified.\"\n )\n raise ValueError(msg)\n config[\"example_prompt\"] = load_prompt(config.pop(\"example_prompt_path\"))\n else:\n config[\"example_prompt\"] = load_prompt_from_config(config[\"example_prompt\"])\n # Load the examples.\n config = _load_examples(config)\n config = _load_output_parser(config)\n return FewShotPromptTemplate(**config)" } ]
[ "function_empty" ]
[ "langchain_core.prompts.loading.load_prompt_from_config", "langchain_core.prompts.loading.load_prompt", "langchain_core.prompts.loading._load_few_shot_prompt" ]
Python
2
2
{ "total_num": 10, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.runnables.base.RunnableLambda::deps", "langchain_core.libs.core.langchain_core.beta.runnables.context.config_with_context", "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config" ]
langchain_core
[ "langchain_core/runnables/base.py", "langchain_core/runnables/base.py", "langchain_core/beta/runnables/context.py", "langchain_core/runnables/config.py", "langchain_core/runnables/config.py" ]
[ "libs/core/tests/unit_tests/prompts/test_structured.py" ]
[ { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4471, "func_end_lineno": 4491, "func_code": " def deps(self) -> list[Runnable]:\n \"\"\"The dependencies of this Runnable.\n\n Returns:\n The dependencies of this Runnable. If the function has nonlocal\n variables that are Runnables, they are considered dependencies.\n \"\"\"\n if hasattr(self, \"func\"):\n objects = get_function_nonlocals(self.func)\n elif hasattr(self, \"afunc\"):\n objects = get_function_nonlocals(self.afunc)\n else:\n objects = []\n\n deps: list[Runnable] = []\n for obj in objects:\n if isinstance(obj, Runnable):\n deps.append(obj)\n elif isinstance(getattr(obj, \"__self__\", None), Runnable):\n deps.append(obj.__self__)\n return deps" }, { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4494, "func_end_lineno": 4497, "func_code": " def config_specs(self) -> list[ConfigurableFieldSpec]:\n return get_unique_config_specs(\n spec for dep in self.deps for spec in dep.config_specs\n )" }, { "class_start_lineno": 1, "class_end_lineno": 401, "func_start_lineno": 140, "func_end_lineno": 153, "func_code": "def config_with_context(\n config: RunnableConfig,\n steps: list[Runnable],\n) -> RunnableConfig:\n \"\"\"Patch a runnable config with context getters and setters.\n\n Args:\n config: The runnable config.\n steps: The runnable steps.\n\n Returns:\n The patched runnable config.\n \"\"\"\n return _config_with_context(config, steps, _setter, _getter, threading.Event)" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" } ]
[ "function_empty" ]
[ "langchain_core.runnables.base.RunnableLambda.deps", "langchain_core.runnables.base.RunnableLambda.config_specs", "langchain_core.beta.runnables.context.config_with_context", "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config" ]
Python
4
4
{ "total_num": 4, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.callbacks.manager.handle_event", "langchain_core.libs.core.langchain_core.callbacks.manager.CallbackManagerForChainRun::on_chain_end" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/callbacks/manager.py", "langchain_core/callbacks/manager.py" ]
[ "libs/core/tests/unit_tests/runnables/test_context.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 1, "class_end_lineno": 2606, "func_start_lineno": 236, "func_end_lineno": 312, "func_code": "def handle_event(\n handlers: list[BaseCallbackHandler],\n event_name: str,\n ignore_condition_name: Optional[str],\n *args: Any,\n **kwargs: Any,\n) -> None:\n \"\"\"Generic event handler for CallbackManager.\n\n Note: This function is used by LangServe to handle events.\n\n Args:\n handlers: The list of handlers that will handle the event.\n event_name: The name of the event (e.g., \"on_llm_start\").\n ignore_condition_name: Name of the attribute defined on handler\n that if True will cause the handler to be skipped for the given event.\n *args: The arguments to pass to the event handler.\n **kwargs: The keyword arguments to pass to the event handler\n \"\"\"\n coros: list[Coroutine[Any, Any, Any]] = []\n\n try:\n message_strings: Optional[list[str]] = None\n for handler in handlers:\n try:\n if ignore_condition_name is None or not getattr(\n handler, ignore_condition_name\n ):\n event = getattr(handler, event_name)(*args, **kwargs)\n if asyncio.iscoroutine(event):\n coros.append(event)\n except NotImplementedError as e:\n if event_name == \"on_chat_model_start\":\n if message_strings is None:\n message_strings = [get_buffer_string(m) for m in args[1]]\n handle_event(\n [handler],\n \"on_llm_start\",\n \"ignore_llm\",\n args[0],\n message_strings,\n *args[2:],\n **kwargs,\n )\n else:\n handler_name = handler.__class__.__name__\n logger.warning(\n f\"NotImplementedError in {handler_name}.{event_name}\"\n f\" callback: {repr(e)}\"\n )\n except Exception as e:\n logger.warning(\n f\"Error in {handler.__class__.__name__}.{event_name} callback:\"\n f\" {repr(e)}\"\n )\n if handler.raise_error:\n raise\n finally:\n if coros:\n try:\n # Raises RuntimeError if there is no current event loop.\n asyncio.get_running_loop()\n loop_running = True\n except RuntimeError:\n loop_running = False\n\n if loop_running:\n # If we try to submit this coroutine to the running loop\n # we end up in a deadlock, as we'd have gotten here from a\n # running coroutine, which we cannot interrupt to run this one.\n # The solution is to create a new loop in a new thread.\n with ThreadPoolExecutor(1) as executor:\n executor.submit(\n cast(Callable, copy_context().run), _run_coros, coros\n ).result()\n else:\n _run_coros(coros)" }, { "class_start_lineno": 817, "class_end_lineno": 900, "func_start_lineno": 820, "func_end_lineno": 836, "func_code": " def on_chain_end(self, outputs: Union[dict[str, Any], Any], **kwargs: Any) -> None:\n \"\"\"Run when chain ends running.\n\n Args:\n outputs (Union[Dict[str, Any], Any]): The outputs of the chain.\n **kwargs (Any): Additional keyword arguments.\n \"\"\"\n handle_event(\n self.handlers,\n \"on_chain_end\",\n \"ignore_chain\",\n outputs,\n run_id=self.run_id,\n parent_run_id=self.parent_run_id,\n tags=self.tags,\n **kwargs,\n )" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config", "langchain_core.callbacks.manager.handle_event", "langchain_core.callbacks.manager.CallbackManagerForChainRun.on_chain_end" ]
Python
4
4
{ "total_num": 27, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.globals.get_llm_cache", "langchain_core.libs.core.langchain_core.language_models.llms.get_prompts" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/globals.py", "langchain_core/language_models/llms.py" ]
[ "libs/core/tests/unit_tests/runnables/test_fallbacks.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 1, "class_end_lineno": 222, "func_start_lineno": 186, "func_end_lineno": 222, "func_code": "def get_llm_cache() -> \"BaseCache\":\n \"\"\"Get the value of the `llm_cache` global setting.\n\n Returns:\n The value of the `llm_cache` global setting.\n \"\"\"\n try:\n import langchain # type: ignore[import]\n\n # We're about to run some deprecated code, don't report warnings from it.\n # The user called the correct (non-deprecated) code path and shouldn't get warnings.\n with warnings.catch_warnings():\n warnings.filterwarnings(\n \"ignore\",\n message=(\n \"Importing llm_cache from langchain root module is no longer supported\"\n ),\n )\n # N.B.: This is a workaround for an unfortunate quirk of Python's\n # module-level `__getattr__()` implementation:\n # https://github.com/langchain-ai/langchain/pull/11311#issuecomment-1743780004\n #\n # Remove it once `langchain.llm_cache` is no longer supported, and\n # once all users have migrated to using `set_llm_cache()` here.\n #\n # In the meantime, the `llm_cache` setting returns whichever of\n # its two backing sources is truthy (not `None` and non-empty),\n # or the old value if both are falsy. This accommodates users\n # who haven't migrated to using `set_llm_cache()` yet.\n # Those users are getting deprecation warnings directing them\n # to use `set_llm_cache()` when they import `langchain.llm_cache`.\n old_llm_cache = langchain.llm_cache\n except ImportError:\n old_llm_cache = None\n\n global _llm_cache\n return _llm_cache or old_llm_cache" }, { "class_start_lineno": 1, "class_end_lineno": 1547, "func_start_lineno": 151, "func_end_lineno": 184, "func_code": "def get_prompts(\n params: dict[str, Any],\n prompts: list[str],\n cache: Optional[Union[BaseCache, bool, None]] = None,\n) -> tuple[dict[int, list], str, list[int], list[str]]:\n \"\"\"Get prompts that are already cached.\n\n Args:\n params: Dictionary of parameters.\n prompts: List of prompts.\n cache: Cache object. Default is None.\n\n Returns:\n A tuple of existing prompts, llm_string, missing prompt indexes,\n and missing prompts.\n\n Raises:\n ValueError: If the cache is not set and cache is True.\n \"\"\"\n llm_string = str(sorted(params.items()))\n missing_prompts = []\n missing_prompt_idxs = []\n existing_prompts = {}\n\n llm_cache = _resolve_cache(cache)\n for i, prompt in enumerate(prompts):\n if llm_cache:\n cache_val = llm_cache.lookup(prompt, llm_string)\n if isinstance(cache_val, list):\n existing_prompts[i] = cache_val\n else:\n missing_prompts.append(prompt)\n missing_prompt_idxs.append(i)\n return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.patch_config", "langchain_core.globals.get_llm_cache", "langchain_core.language_models.llms.get_prompts" ]
Python
4
4
{ "total_num": 16, "base_passed_num": 2 }
[ "langchain_core.libs.core.langchain_core.runnables.graph.is_uuid", "langchain_core.libs.core.langchain_core.runnables.graph.node_data_str", "langchain_core.libs.core.langchain_core.runnables.graph.Graph::add_node", "langchain_core.libs.core.langchain_core.runnables.graph_ascii.AsciiCanvas::point", "langchain_core.libs.core.langchain_core.runnables.graph_ascii.AsciiCanvas::line" ]
langchain_core
[ "langchain_core/runnables/graph.py", "langchain_core/runnables/graph.py", "langchain_core/runnables/graph.py", "langchain_core/runnables/graph_ascii.py", "langchain_core/runnables/graph_ascii.py" ]
[ "libs/core/tests/unit_tests/runnables/test_graph.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 664, "func_start_lineno": 42, "func_end_lineno": 55, "func_code": "def is_uuid(value: str) -> bool:\n \"\"\"Check if a string is a valid UUID.\n\n Args:\n value: The string to check.\n\n Returns:\n True if the string is a valid UUID, False otherwise.\n \"\"\"\n try:\n UUID(value)\n except ValueError:\n return False\n return True" }, { "class_start_lineno": 1, "class_end_lineno": 664, "func_start_lineno": 178, "func_end_lineno": 196, "func_code": "def node_data_str(id: str, data: Union[type[BaseModel], RunnableType]) -> str:\n \"\"\"Convert the data of a node to a string.\n\n Args:\n id: The node id.\n data: The node data.\n\n Returns:\n A string representation of the data.\n \"\"\"\n from langchain_core.runnables.base import Runnable\n\n if not is_uuid(id):\n return id\n elif isinstance(data, Runnable):\n data_str = data.get_name()\n else:\n data_str = data.__name__\n return data_str if not data_str.startswith(\"Runnable\") else data_str[8:]" }, { "class_start_lineno": 256, "class_end_lineno": 636, "func_start_lineno": 313, "func_end_lineno": 339, "func_code": " def add_node(\n self,\n data: Union[type[BaseModel], RunnableType],\n id: Optional[str] = None,\n *,\n metadata: Optional[dict[str, Any]] = None,\n ) -> Node:\n \"\"\"Add a node to the graph and return it.\n\n Args:\n data: The data of the node.\n id: The id of the node. Defaults to None.\n metadata: Optional metadata for the node. Defaults to None.\n\n Returns:\n The node that was added to the graph.\n\n Raises:\n ValueError: If a node with the same id already exists.\n \"\"\"\n if id is not None and id in self.nodes:\n msg = f\"Node with id {id} already exists\"\n raise ValueError(msg)\n id = id or self.next_id()\n node = Node(id=id, data=data, metadata=metadata, name=node_data_str(id, data))\n self.nodes[node.id] = node\n return node" }, { "class_start_lineno": 39, "class_end_lineno": 157, "func_start_lineno": 64, "func_end_lineno": 85, "func_code": " def point(self, x: int, y: int, char: str) -> None:\n \"\"\"Create a point on ASCII canvas.\n\n Args:\n x (int): x coordinate. Should be >= 0 and < number of columns in\n the canvas.\n y (int): y coordinate. Should be >= 0 an < number of lines in the\n canvas.\n char (str): character to place in the specified point on the\n canvas.\n \"\"\"\n if len(char) != 1:\n msg = \"char should be a single character\"\n raise ValueError(msg)\n if x >= self.cols or x < 0:\n msg = \"x should be >= 0 and < number of columns\"\n raise ValueError(msg)\n if y >= self.lines or y < 0:\n msg = \"y should be >= 0 and < number of lines\"\n raise ValueError(msg)\n\n self.canvas[y][x] = char" }, { "class_start_lineno": 39, "class_end_lineno": 157, "func_start_lineno": 87, "func_end_lineno": 117, "func_code": " def line(self, x0: int, y0: int, x1: int, y1: int, char: str) -> None:\n \"\"\"Create a line on ASCII canvas.\n\n Args:\n x0 (int): x coordinate where the line should start.\n y0 (int): y coordinate where the line should start.\n x1 (int): x coordinate where the line should end.\n y1 (int): y coordinate where the line should end.\n char (str): character to draw the line with.\n \"\"\"\n if x0 > x1:\n x1, x0 = x0, x1\n y1, y0 = y0, y1\n\n dx = x1 - x0\n dy = y1 - y0\n\n if dx == 0 and dy == 0:\n self.point(x0, y0, char)\n elif abs(dx) >= abs(dy):\n for x in range(x0, x1 + 1):\n y = y0 if dx == 0 else y0 + int(round((x - x0) * dy / float(dx)))\n self.point(x, y, char)\n elif y0 < y1:\n for y in range(y0, y1 + 1):\n x = x0 if dy == 0 else x0 + int(round((y - y0) * dx / float(dy)))\n self.point(x, y, char)\n else:\n for y in range(y1, y0 + 1):\n x = x0 if dy == 0 else x1 + int(round((y - y1) * dx / float(dy)))\n self.point(x, y, char)" } ]
[ "function_empty", "Development" ]
[ "langchain_core.runnables.graph.is_uuid", "langchain_core.runnables.graph.node_data_str", "langchain_core.runnables.graph.Graph.add_node", "langchain_core.runnables.graph_ascii.AsciiCanvas.point", "langchain_core.runnables.graph_ascii.AsciiCanvas.line" ]
Python
4
5
{ "total_num": 11, "base_passed_num": 3 }
[ "langchain_core.libs.core.langchain_core.runnables.config.ensure_config", "langchain_core.libs.core.langchain_core.runnables.config.merge_configs", "langchain_core.libs.core.langchain_core.runnables.config.patch_config", "langchain_core.libs.core.langchain_core.runnables.base.RunnableLambda::invoke" ]
langchain_core
[ "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/runnables/config.py", "langchain_core/runnables/base.py", "langchain_core/runnables/base.py" ]
[ "libs/core/tests/unit_tests/runnables/test_history.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 149, "func_end_lineno": 199, "func_code": "def ensure_config(config: Optional[RunnableConfig] = None) -> RunnableConfig:\n \"\"\"Ensure that a config is a dict with all keys present.\n\n Args:\n config (Optional[RunnableConfig], optional): The config to ensure.\n Defaults to None.\n\n Returns:\n RunnableConfig: The ensured config.\n \"\"\"\n empty = RunnableConfig(\n tags=[],\n metadata={},\n callbacks=None,\n recursion_limit=DEFAULT_RECURSION_LIMIT,\n configurable={},\n )\n if var_config := var_child_runnable_config.get():\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in var_config.items()\n if v is not None\n },\n )\n )\n if config is not None:\n empty.update(\n cast(\n RunnableConfig,\n {\n k: v.copy() if k in COPIABLE_KEYS else v # type: ignore[attr-defined]\n for k, v in config.items()\n if v is not None and k in CONFIG_KEYS\n },\n )\n )\n if config is not None:\n for k, v in config.items():\n if k not in CONFIG_KEYS and v is not None:\n empty[\"configurable\"][k] = v\n for key, value in empty.get(\"configurable\", {}).items():\n if (\n not key.startswith(\"__\")\n and isinstance(value, (str, int, float, bool))\n and key not in empty[\"metadata\"]\n ):\n empty[\"metadata\"][key] = value\n return empty" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 295, "func_end_lineno": 358, "func_code": "def merge_configs(*configs: Optional[RunnableConfig]) -> RunnableConfig:\n \"\"\"Merge multiple configs into one.\n\n Args:\n *configs (Optional[RunnableConfig]): The configs to merge.\n\n Returns:\n RunnableConfig: The merged config.\n \"\"\"\n base: RunnableConfig = {}\n # Even though the keys aren't literals, this is correct\n # because both dicts are the same type\n for config in (ensure_config(c) for c in configs if c is not None):\n for key in config:\n if key == \"metadata\":\n base[key] = { # type: ignore\n **base.get(key, {}), # type: ignore\n **(config.get(key) or {}), # type: ignore\n }\n elif key == \"tags\":\n base[key] = sorted( # type: ignore\n set(base.get(key, []) + (config.get(key) or [])), # type: ignore\n )\n elif key == \"configurable\":\n base[key] = { # type: ignore\n **base.get(key, {}), # type: ignore\n **(config.get(key) or {}), # type: ignore\n }\n elif key == \"callbacks\":\n base_callbacks = base.get(\"callbacks\")\n these_callbacks = config[\"callbacks\"]\n # callbacks can be either None, list[handler] or manager\n # so merging two callbacks values has 6 cases\n if isinstance(these_callbacks, list):\n if base_callbacks is None:\n base[\"callbacks\"] = these_callbacks.copy()\n elif isinstance(base_callbacks, list):\n base[\"callbacks\"] = base_callbacks + these_callbacks\n else:\n # base_callbacks is a manager\n mngr = base_callbacks.copy()\n for callback in these_callbacks:\n mngr.add_handler(callback, inherit=True)\n base[\"callbacks\"] = mngr\n elif these_callbacks is not None:\n # these_callbacks is a manager\n if base_callbacks is None:\n base[\"callbacks\"] = these_callbacks.copy()\n elif isinstance(base_callbacks, list):\n mngr = these_callbacks.copy()\n for callback in base_callbacks:\n mngr.add_handler(callback, inherit=True)\n base[\"callbacks\"] = mngr\n else:\n # base_callbacks is also a manager\n base[\"callbacks\"] = base_callbacks.merge(these_callbacks)\n elif key == \"recursion_limit\":\n if config[\"recursion_limit\"] != DEFAULT_RECURSION_LIMIT:\n base[\"recursion_limit\"] = config[\"recursion_limit\"]\n elif key in COPIABLE_KEYS and config[key] is not None: # type: ignore[literal-required]\n base[key] = config[key].copy() # type: ignore[literal-required]\n else:\n base[key] = config[key] or base.get(key) # type: ignore\n return base" }, { "class_start_lineno": 1, "class_end_lineno": 593, "func_start_lineno": 249, "func_end_lineno": 292, "func_code": "def patch_config(\n config: Optional[RunnableConfig],\n *,\n callbacks: Optional[BaseCallbackManager] = None,\n recursion_limit: Optional[int] = None,\n max_concurrency: Optional[int] = None,\n run_name: Optional[str] = None,\n configurable: Optional[dict[str, Any]] = None,\n) -> RunnableConfig:\n \"\"\"Patch a config with new values.\n\n Args:\n config (Optional[RunnableConfig]): The config to patch.\n callbacks (Optional[BaseCallbackManager], optional): The callbacks to set.\n Defaults to None.\n recursion_limit (Optional[int], optional): The recursion limit to set.\n Defaults to None.\n max_concurrency (Optional[int], optional): The max concurrency to set.\n Defaults to None.\n run_name (Optional[str], optional): The run name to set. Defaults to None.\n configurable (Optional[Dict[str, Any]], optional): The configurable to set.\n Defaults to None.\n\n Returns:\n RunnableConfig: The patched config.\n \"\"\"\n config = ensure_config(config)\n if callbacks is not None:\n # If we're replacing callbacks, we need to unset run_name\n # As that should apply only to the same run as the original callbacks\n config[\"callbacks\"] = callbacks\n if \"run_name\" in config:\n del config[\"run_name\"]\n if \"run_id\" in config:\n del config[\"run_id\"]\n if recursion_limit is not None:\n config[\"recursion_limit\"] = recursion_limit\n if max_concurrency is not None:\n config[\"max_concurrency\"] = max_concurrency\n if run_name is not None:\n config[\"run_name\"] = run_name\n if configurable is not None:\n config[\"configurable\"] = {**config.get(\"configurable\", {}), **configurable}\n return config" }, { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4696, "func_end_lineno": 4699, "func_code": " def _config(\n self, config: Optional[RunnableConfig], callable: Callable[..., Any]\n ) -> RunnableConfig:\n return ensure_config(config)" }, { "class_start_lineno": 4230, "class_end_lineno": 4967, "func_start_lineno": 4701, "func_end_lineno": 4732, "func_code": " def invoke(\n self,\n input: Input,\n config: Optional[RunnableConfig] = None,\n **kwargs: Optional[Any],\n ) -> Output:\n \"\"\"Invoke this Runnable synchronously.\n\n Args:\n input: The input to this Runnable.\n config: The config to use. Defaults to None.\n kwargs: Additional keyword arguments.\n\n Returns:\n The output of this Runnable.\n\n Raises:\n TypeError: If the Runnable is a coroutine function.\n \"\"\"\n if hasattr(self, \"func\"):\n return self._call_with_config(\n self._invoke,\n input,\n self._config(config, self.func),\n **kwargs,\n )\n else:\n msg = (\n \"Cannot invoke a coroutine function synchronously.\"\n \"Use `ainvoke` instead.\"\n )\n raise TypeError(msg)" } ]
[ "function_empty" ]
[ "langchain_core.runnables.config.ensure_config", "langchain_core.runnables.config.merge_configs", "langchain_core.runnables.config.patch_config", "langchain_core.runnables.base.RunnableLambda._config", "langchain_core.runnables.base.RunnableLambda.invoke" ]
Python
4
4
{ "total_num": 23, "base_passed_num": 4 }
[ "langchain_core.libs.core.langchain_core.utils.env.get_from_env", "langchain_core.libs.core.langchain_core.utils.env.get_from_dict_or_env" ]
langchain_core
[ "langchain_core/utils/env.py", "langchain_core/utils/env.py" ]
[ "libs/core/tests/unit_tests/utils/test_env.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 81, "func_start_lineno": 54, "func_end_lineno": 81, "func_code": "def get_from_env(key: str, env_key: str, default: Optional[str] = None) -> str:\n \"\"\"Get a value from a dictionary or an environment variable.\n\n Args:\n key: The key to look up in the dictionary.\n env_key: The environment variable to look up if the key is not\n in the dictionary.\n default: The default value to return if the key is not in the dictionary\n or the environment. Defaults to None.\n\n Returns:\n str: The value of the key.\n\n Raises:\n ValueError: If the key is not in the dictionary and no default value is\n provided or if the environment variable is not set.\n \"\"\"\n if env_key in os.environ and os.environ[env_key]:\n return os.environ[env_key]\n elif default is not None:\n return default\n else:\n msg = (\n f\"Did not find {key}, please add an environment variable\"\n f\" `{env_key}` which contains it, or pass\"\n f\" `{key}` as a named parameter.\"\n )\n raise ValueError(msg)" }, { "class_start_lineno": 1, "class_end_lineno": 81, "func_start_lineno": 24, "func_end_lineno": 51, "func_code": "def get_from_dict_or_env(\n data: dict[str, Any],\n key: Union[str, list[str]],\n env_key: str,\n default: Optional[str] = None,\n) -> str:\n \"\"\"Get a value from a dictionary or an environment variable.\n\n Args:\n data: The dictionary to look up the key in.\n key: The key to look up in the dictionary. This can be a list of keys to try\n in order.\n env_key: The environment variable to look up if the key is not\n in the dictionary.\n default: The default value to return if the key is not in the dictionary\n or the environment. Defaults to None.\n \"\"\"\n if isinstance(key, (list, tuple)):\n for k in key:\n if k in data and data[k]:\n return data[k]\n\n if isinstance(key, str) and key in data and data[key]:\n return data[key]\n\n key_for_err = key[0] if isinstance(key, (list, tuple)) else key\n\n return get_from_env(key_for_err, env_key, default=default)" } ]
[ "function_empty" ]
[ "langchain_core.utils.env.get_from_env", "langchain_core.utils.env.get_from_dict_or_env" ]
Python
2
2
{ "total_num": 1, "base_passed_num": 0 }
[ "langchain_core.libs.core.langchain_core.utils._merge.merge_lists", "langchain_core.libs.core.langchain_core.utils._merge.merge_dicts" ]
langchain_core
[ "langchain_core/utils/_merge.py", "langchain_core/utils/_merge.py" ]
[ "libs/core/tests/unit_tests/utils/test_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 72, "func_end_lineno": 106, "func_code": "def merge_lists(left: Optional[list], *others: Optional[list]) -> Optional[list]:\n \"\"\"Add many lists, handling None.\n\n Args:\n left: The first list to merge.\n others: The other lists to merge.\n\n Returns:\n The merged list.\n \"\"\"\n merged = left.copy() if left is not None else None\n for other in others:\n if other is None:\n continue\n elif merged is None:\n merged = other.copy()\n else:\n for e in other:\n if isinstance(e, dict) and \"index\" in e and isinstance(e[\"index\"], int):\n to_merge = [\n i\n for i, e_left in enumerate(merged)\n if e_left[\"index\"] == e[\"index\"]\n ]\n if to_merge:\n # TODO: Remove this once merge_dict is updated with special\n # handling for 'type'.\n if \"type\" in e:\n e = {k: v for k, v in e.items() if k != \"type\"}\n merged[to_merge[0]] = merge_dicts(merged[to_merge[0]], e)\n else:\n merged.append(e)\n else:\n merged.append(e)\n return merged" }, { "class_start_lineno": 1, "class_end_lineno": 148, "func_start_lineno": 6, "func_end_lineno": 69, "func_code": "def merge_dicts(left: dict[str, Any], *others: dict[str, Any]) -> dict[str, Any]:\n \"\"\"Merge many dicts, handling specific scenarios where a key exists in both\n dictionaries but has a value of None in 'left'. In such cases, the method uses the\n value from 'right' for that key in the merged dictionary.\n\n Args:\n left: The first dictionary to merge.\n others: The other dictionaries to merge.\n\n Returns:\n The merged dictionary.\n\n Raises:\n TypeError: If the key exists in both dictionaries but has a different type.\n TypeError: If the value has an unsupported type.\n\n Example:\n If left = {\"function_call\": {\"arguments\": None}} and\n right = {\"function_call\": {\"arguments\": \"{\\n\"}}\n then, after merging, for the key \"function_call\",\n the value from 'right' is used,\n resulting in merged = {\"function_call\": {\"arguments\": \"{\\n\"}}.\n \"\"\"\n merged = left.copy()\n for right in others:\n for right_k, right_v in right.items():\n if right_k not in merged or right_v is not None and merged[right_k] is None:\n merged[right_k] = right_v\n elif right_v is None:\n continue\n elif type(merged[right_k]) is not type(right_v):\n msg = (\n f'additional_kwargs[\"{right_k}\"] already exists in this message,'\n \" but with a different type.\"\n )\n raise TypeError(msg)\n elif isinstance(merged[right_k], str):\n # TODO: Add below special handling for 'type' key in 0.3 and remove\n # merge_lists 'type' logic.\n #\n # if right_k == \"type\":\n # if merged[right_k] == right_v:\n # continue\n # else:\n # raise ValueError(\n # \"Unable to merge. Two different values seen for special \"\n # f\"key 'type': {merged[right_k]} and {right_v}. 'type' \"\n # \"should either occur once or have the same value across \"\n # \"all dicts.\"\n # )\n merged[right_k] += right_v\n elif isinstance(merged[right_k], dict):\n merged[right_k] = merge_dicts(merged[right_k], right_v)\n elif isinstance(merged[right_k], list):\n merged[right_k] = merge_lists(merged[right_k], right_v)\n elif merged[right_k] == right_v:\n continue\n else:\n msg = (\n f\"Additional kwargs key {right_k} already exists in left dict and \"\n f\"value has unsupported type {type(merged[right_k])}.\"\n )\n raise TypeError(msg)\n return merged" } ]
[ "function_empty" ]
[ "langchain_core.utils._merge.merge_lists", "langchain_core.utils._merge.merge_dicts" ]
Python
2
2
{ "total_num": 47, "base_passed_num": 26 }
[ "finam.src.finam.data.grid_spec.NoGrid::compatible_with", "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/data/grid_spec.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/adapters/test_probe.py", "tests/adapters/test_time.py", "tests/components/test_debug.py", "tests/core/test_pull_based_component.py" ]
[ { "class_start_lineno": 30, "class_end_lineno": 90, "func_start_lineno": 71, "func_end_lineno": 87, "func_code": " def compatible_with(self, other, check_location=True):\n \"\"\"\n Check for compatibility with other Grid.\n\n Parameters\n ----------\n other : instance of Grid\n Other grid to compatibility with.\n check_location : bool, optional\n Whether to check location for equality, by default True\n\n Returns\n -------\n bool\n compatibility\n \"\"\"\n return isinstance(other, NoGrid) and self.data_shape == other.data_shape" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.data.grid_spec.NoGrid.compatible_with", "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
4
4
{ "total_num": 15, "base_passed_num": 2 }
[ "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.from_compressed", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/data/tools/mask.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/adapters/test_regrid_mask.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 151, "func_end_lineno": 205, "func_code": "def from_compressed(xdata, shape, order=\"C\", mask=None, **kwargs):\n \"\"\"\n Fill a (masked) array following a given mask or shape with the provided data.\n\n This will only create a masked array if kwargs are given (especially a mask).\n Otherwise this is simply reshaping the given data.\n Filling is performed in the given array order.\n\n Parameters\n ----------\n data : :class:`pint.Quantity` or :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray`\n The reference object input.\n shape : str\n shape argument for :any:`numpy.reshape`\n order : str\n order argument for :any:`numpy.reshape`\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to use\n **kwargs\n keyword arguments forwarded to :any:`numpy.ma.array`\n\n Returns\n -------\n :class:`pint.Quantity` or :class:`numpy.ndarray` or :class:`numpy.ma.MaskedArray`\n New object with the desired shape and same type as input.\n Units will be taken from the input if present.\n Will only be a masked array if kwargs are given.\n\n See also\n --------\n to_compressed:\n Inverse operation.\n :any:`numpy.ma.array`:\n Routine consuming kwargs to create a masked array.\n :any:`numpy.reshape`:\n Equivalent routine if no mask is provided.\n\n Notes\n -----\n If both `mask` and `shape` are given, they need to match in size.\n \"\"\"\n if mask is None or mask is np.ma.nomask or not mask_specified(mask):\n if kwargs and mask is Mask.NONE:\n msg = \"from_compressed: Can't create masked array with mask=Mask.NONE\"\n raise FinamDataError(msg)\n data = np.reshape(xdata, shape, order=order)\n return to_masked(data, **kwargs) if kwargs or mask is np.ma.nomask else data\n if is_quantified(xdata):\n # pylint: disable-next=unexpected-keyword-arg\n data = quantify(np.empty_like(xdata, shape=np.prod(shape)), xdata.units)\n else:\n # pylint: disable-next=unexpected-keyword-arg\n data = np.empty_like(xdata, shape=np.prod(shape))\n data[np.logical_not(np.ravel(mask, order=order))] = xdata\n return to_masked(np.reshape(data, shape, order=order), mask=mask, **kwargs)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.data.tools.mask.mask_specified", "finam.data.tools.mask.from_compressed", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
4
4
{ "total_num": 6, "base_passed_num": 0 }
[ "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/adapters/test_stats.py", "tests/components/test_simplex_noise.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
3
3
{ "total_num": 3, "base_passed_num": 0 }
[ "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.data.grid_spec.NoGrid::compatible_with", "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/sdk/output.py", "finam/sdk/output.py", "finam/sdk/component.py", "finam/data/grid_spec.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/info.py" ]
[ "tests/adapters/test_time_integration.py", "tests/components/test_noise.py", "tests/core/test_propagate_info.py", "tests/core/test_schedule.py" ]
[ { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0" }, { "class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io" }, { "class_start_lineno": 30, "class_end_lineno": 90, "func_start_lineno": 71, "func_end_lineno": 87, "func_code": " def compatible_with(self, other, check_location=True):\n \"\"\"\n Check for compatibility with other Grid.\n\n Parameters\n ----------\n other : instance of Grid\n Other grid to compatibility with.\n check_location : bool, optional\n Whether to check location for equality, by default True\n\n Returns\n -------\n bool\n compatibility\n \"\"\"\n return isinstance(other, NoGrid) and self.data_shape == other.data_shape" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.sdk.component.IOList.add", "finam.data.grid_spec.NoGrid.compatible_with", "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.info.Info.accepts" ]
Python
5
5
{ "total_num": 50, "base_passed_num": 7 }
[ "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.components.callback.CallbackComponent::_initialize", "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.data.grid_spec.NoGrid::compatible_with", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/sdk/component.py", "finam/components/callback.py", "finam/sdk/output.py", "finam/sdk/output.py", "finam/data/grid_spec.py", "finam/data/tools/info.py" ]
[ "tests/components/test_callback.py" ]
[ { "class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io" }, { "class_start_lineno": 12, "class_end_lineno": 129, "func_start_lineno": 90, "func_end_lineno": 101, "func_code": " def _initialize(self):\n for name, info in self._input_infos.items():\n info.time = self.time\n self.inputs.add(name=name, info=info)\n\n for name, info in self._output_infos.items():\n info.time = self.time\n self.outputs.add(name=name, info=info)\n\n pull_data = list(self._input_infos) if self._initial_pull else {}\n\n self.create_connector(pull_data=pull_data)" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0" }, { "class_start_lineno": 30, "class_end_lineno": 90, "func_start_lineno": 71, "func_end_lineno": 87, "func_code": " def compatible_with(self, other, check_location=True):\n \"\"\"\n Check for compatibility with other Grid.\n\n Parameters\n ----------\n other : instance of Grid\n Other grid to compatibility with.\n check_location : bool, optional\n Whether to check location for equality, by default True\n\n Returns\n -------\n bool\n compatibility\n \"\"\"\n return isinstance(other, NoGrid) and self.data_shape == other.data_shape" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty", "Development" ]
[ "finam.sdk.component.IOList.add", "finam.components.callback.CallbackComponent._initialize", "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.data.grid_spec.NoGrid.compatible_with", "finam.data.tools.info.Info.accepts" ]
Python
4
5
{ "total_num": 1, "base_passed_num": 0 }
[ "finam.src.finam.sdk.output.Output::push_info", "finam.src.finam.sdk.component.IOList::add", "finam.src.finam.data.tools.mask.mask_specified", "finam.src.finam.data.tools.mask.masks_compatible", "finam.src.finam.data.tools.info.Info::accepts" ]
finam
[ "finam/sdk/output.py", "finam/sdk/output.py", "finam/sdk/component.py", "finam/data/tools/mask.py", "finam/data/tools/info.py", "finam/data/tools/mask.py", "finam/data/tools/info.py" ]
[ "tests/components/test_control.py", "tests/components/test_parametric.py", "tests/core/test_units.py" ]
[ { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 204, "func_end_lineno": 216, "func_code": " def push_info(self, info):\n \"\"\"Push data info into the output.\n\n Parameters\n ----------\n info : :class:`.Info`\n Delivered data info\n \"\"\"\n self.logger.trace(\"push info\")\n if not isinstance(info, Info):\n with ErrorLogger(self.logger):\n raise FinamMetaDataError(\"Metadata must be of type Info\")\n self._output_info = info" }, { "class_start_lineno": 25, "class_end_lineno": 461, "func_start_lineno": 28, "func_end_lineno": 53, "func_code": " def __init__(self, name=None, info=None, static=False, **info_kwargs):\n Loggable.__init__(self)\n self._targets = []\n self.data = []\n self._output_info = None\n self.base_logger_name = None\n if name is None:\n raise ValueError(\"Output: needs a name.\")\n self._name = name\n self._static = static\n\n if info_kwargs:\n if info is not None:\n raise ValueError(\"Output: can't use **kwargs in combination with info\")\n info = Info(**info_kwargs)\n if info is not None:\n self.push_info(info)\n\n self._connected_inputs = {}\n self._out_infos_exchanged = 0\n\n self._time = None\n self._mem_limit = None\n self._mem_location = None\n self._total_mem = 0\n self._mem_counter = 0" }, { "class_start_lineno": 572, "class_end_lineno": 711, "func_start_lineno": 602, "func_end_lineno": 635, "func_code": " def add(self, io=None, *, name=None, info=None, static=False, **info_kwargs):\n \"\"\"\n Add a new IO object either directly ob by attributes.\n\n Parameters\n ----------\n io : :class:`.IInput` or :class:`.IOutput`, optional\n IO object to add, by default None\n name : str, optional\n Name of the new IO object to add, by default None\n info : :class:`.Info`, optional\n Info of the new IO object to add, by default None\n static : bool, optional\n Whether the new IO object in static, by default False\n **info_kwargs\n Optional keyword arguments to instantiate an Info object\n\n Raises\n ------\n ValueError\n If io is not of the correct type.\n \"\"\"\n if self.frozen:\n raise ValueError(\"IO.add: list is frozen.\")\n io = (\n self.cls(name=name, info=info, static=static, **info_kwargs)\n if io is None\n else io\n )\n if not isinstance(io, self.icls):\n raise ValueError(f\"IO.add: {self.name} is not of type {self.iname}\")\n if io.name in self._dict:\n raise ValueError(f\"IO.add: {self.name} '{io.name}' already exists.\")\n self._dict[io.name] = io" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 364, "func_end_lineno": 378, "func_code": "def mask_specified(mask):\n \"\"\"\n Determine whether given mask selection indicates a masked array.\n\n Parameters\n ----------\n mask : :any:`Mask` value or valid boolean mask for :any:`MaskedArray`\n mask to check\n\n Returns\n -------\n bool\n False if mask is Mask.FLEX or Mask.NONE, True otherwise\n \"\"\"\n return not any(mask is val for val in list(Mask))" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 87, "func_end_lineno": 89, "func_code": " def mask(self):\n \"\"\"Mask or ndarray: data mask.\"\"\"\n return self._mask" }, { "class_start_lineno": 1, "class_end_lineno": 378, "func_start_lineno": 243, "func_end_lineno": 285, "func_code": "def masks_compatible(\n this, incoming, incoming_donwstream, this_grid=None, incoming_grid=None\n):\n \"\"\"\n Check if an incoming mask is compatible with a given mask.\n\n Parameters\n ----------\n this : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n mask specification to check against\n incoming : :any:`Mask` value or valid boolean mask for :any:`MaskedArray` or None\n incoming mask to check for compatibility\n incoming_donwstream : bool\n Whether the incoming mask is from downstream data\n this_grid : Grid or NoGrid or None, optional\n grid for first mask (to check shape and value equality)\n incoming_grid : Grid or NoGrid or None, optional\n grid for second mask (to check shape and value equality)\n\n Returns\n -------\n bool\n mask compatibility\n \"\"\"\n if incoming_donwstream:\n upstream, downstream = this, incoming\n up_grid, down_grid = this_grid, incoming_grid\n else:\n upstream, downstream = incoming, this\n up_grid, down_grid = incoming_grid, this_grid\n # None is incompatible\n if upstream is None:\n return False\n # Mask.FLEX accepts anything, Mask.NONE only Mask.NONE\n if not mask_specified(downstream):\n if not mask_specified(upstream):\n return downstream == Mask.FLEX or upstream == Mask.NONE\n return downstream == Mask.FLEX\n # if mask is specified, upstream mask must also be specified\n if not mask_specified(upstream):\n return False\n # if both mask given, compare them\n return masks_equal(downstream, upstream, down_grid, up_grid)" }, { "class_start_lineno": 22, "class_end_lineno": 248, "func_start_lineno": 157, "func_end_lineno": 201, "func_code": " def accepts(self, incoming, fail_info, incoming_donwstream=False):\n \"\"\"\n Tests whether this info can accept/is compatible with an incoming info.\n\n Tested attributes are: \"grid\", \"mask\" and \"units\"\n\n Parameters\n ----------\n incoming : Info\n Incoming/source info to check. This is the info from upstream.\n fail_info : dict\n Dictionary that will be filled with failed properties; name: (source, target).\n incoming_donwstream : bool, optional\n Whether the incoming info is from downstream data. Default: False\n\n Returns\n -------\n bool\n Whether the incoming info is accepted\n \"\"\"\n if not isinstance(incoming, Info):\n fail_info[\"type\"] = (incoming.__class__, self.__class__)\n return False\n\n success = True\n if self.grid is not None and not self.grid.compatible_with(incoming.grid):\n if not (incoming_donwstream and incoming.grid is None):\n fail_info[\"grid\"] = (incoming.grid, self.grid)\n success = False\n\n if self.mask is not None and not masks_compatible(\n self.mask, incoming.mask, incoming_donwstream, self.grid, incoming.grid\n ):\n if not (incoming_donwstream and incoming.mask is None):\n fail_info[\"mask\"] = (incoming.mask, self.mask)\n success = False\n\n u1_none = (u1 := self.units) is None\n u2_none = (u2 := incoming.units) is None\n if not u1_none and (u2_none or not compatible_units(u1, u2)):\n if not (incoming_donwstream and u2_none):\n fail_info[\"units\"] = (u2, u1)\n success = False\n\n return success" } ]
[ "function_empty" ]
[ "finam.sdk.output.Output.push_info", "finam.sdk.output.Output.__init__", "finam.sdk.component.IOList.add", "finam.data.tools.mask.mask_specified", "finam.data.tools.info.Info.mask", "finam.data.tools.mask.masks_compatible", "finam.data.tools.info.Info.accepts" ]
Python
5
5
{ "total_num": 16, "base_passed_num": 1 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.stats.assert_is_square", "skfolio.src.skfolio.utils.stats.assert_is_symmetric", "skfolio.src.skfolio.utils.stats.cov_nearest" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py" ]
[ "tests/test_distance/test_distance.py", "tests/test_metrics/test_scorer.py", "tests/test_moment/test_expected_returns/test_expected_returns.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 208, "func_end_lineno": 221, "func_code": "def assert_is_square(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not square.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not square.\n \"\"\"\n if x.ndim != 2 or x.shape[0] != x.shape[1]:\n raise ValueError(\"The matrix must be square\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": "def assert_is_symmetric(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not symmetric.\n\n Parameters\n ----------\n x : ndarray of shape (n, m)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not symmetric.\n \"\"\"\n assert_is_square(x)\n if not np.allclose(x, x.T):\n raise ValueError(\"The matrix must be symmetric\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 308, "func_end_lineno": 400, "func_code": "def cov_nearest(\n cov: np.ndarray,\n higham: bool = False,\n higham_max_iteration: int = 100,\n warn: bool = False,\n):\n \"\"\"Compute the nearest covariance matrix that is positive definite and with a\n cholesky decomposition than can be computed. The variance is left unchanged.\n A covariance matrix that is not positive definite often occurs in high\n dimensional problems. It can be due to multicollinearity, floating-point\n inaccuracies, or when the number of observations is smaller than the number of\n assets.\n\n First, it converts the covariance matrix to a correlation matrix.\n Then, it finds the nearest correlation matrix and converts it back to a covariance\n matrix using the initial standard deviation.\n\n Cholesky decomposition can fail for symmetric positive definite (SPD) matrix due\n to floating point error and inversely, Cholesky decomposition can success for\n non-SPD matrix. Therefore, we need to test for both. We always start by testing\n for Cholesky decomposition which is significantly faster than checking for positive\n eigenvalues.\n\n Parameters\n ----------\n cov : ndarray of shape (n, n)\n Covariance matrix.\n\n higham : bool, default=False\n If this is set to True, the Higham & Nick (2002) algorithm [1]_ is used,\n otherwise the eigenvalues are clipped to threshold above zeros (1e-13).\n The default (`False`) is to use the clipping method as the Higham & Nick\n algorithm can be slow for large datasets.\n\n higham_max_iteration : int, default=100\n Maximum number of iteration of the Higham & Nick (2002) algorithm.\n The default value is `100`.\n\n warn : bool, default=False\n If this is set to True, a user warning is emitted when the covariance matrix\n is not positive definite and replaced by the nearest. The default is False.\n\n Returns\n -------\n cov : ndarray\n The nearest covariance matrix.\n\n References\n ----------\n .. [1] \"Computing the nearest correlation matrix - a problem from finance\"\n IMA Journal of Numerical Analysis\n Higham & Nick (2002)\n \"\"\"\n assert_is_square(cov)\n assert_is_symmetric(cov)\n\n # Around 100 times faster than checking eigenvalues with np.linalg.eigh\n if is_cholesky_dec(cov) and is_positive_definite(cov):\n return cov\n\n if warn:\n warnings.warn(\n \"The covariance matrix is not positive definite. \"\n f\"The {'Higham' if higham else 'Clipping'} algorithm will be used to find \"\n \"the nearest positive definite covariance.\",\n stacklevel=2,\n )\n corr, std = cov_to_corr(cov)\n\n if higham:\n eps = np.finfo(np.float64).eps * 5\n diff = np.zeros(corr.shape)\n x = corr.copy()\n for _ in range(higham_max_iteration):\n x_adj = x - diff\n eig_vals, eig_vecs = np.linalg.eigh(x_adj)\n x = eig_vecs * np.maximum(eig_vals, eps) @ eig_vecs.T\n diff = x - x_adj\n np.fill_diagonal(x, 1)\n cov = corr_to_cov(x, std)\n if is_cholesky_dec(cov) and is_positive_definite(cov):\n break\n else:\n raise ValueError(\"Unable to find the nearest positive definite matrix\")\n else:\n eig_vals, eig_vecs = np.linalg.eigh(corr)\n # Clipping the eigenvalues with a value smaller than 1e-13 can cause scipy to\n # consider the matrix non-psd is some corner cases (see test/test_stats.py)\n x = eig_vecs * np.maximum(eig_vals, _CLIPPING_VALUE) @ eig_vecs.T\n x, _ = cov_to_corr(x)\n cov = corr_to_cov(x, std)\n\n return cov" } ]
[ "function_empty", "Development" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.stats.assert_is_square", "skfolio.utils.stats.assert_is_symmetric", "skfolio.utils.stats.cov_nearest" ]
Python
4
5
{ "total_num": 26, "base_passed_num": 6 }
[ "skfolio.src.skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.src.skfolio.distribution.copula._clayton._neg_log_likelihood", "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_clayton.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 416, "func_end_lineno": 448, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n r\"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate Clayton\n copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n Bivariate samples `(u, v)`, with each component in [0,1].\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n logpdf : ndarray of shape (n_observations,)\n Log-likelihood values for each observation.\n\n Raises\n ------\n ValueError\n If theta is not greater than 0.\n \"\"\"\n if theta <= 0:\n raise ValueError(\"Theta must be greater than 1 for the Clayton copula.\")\n\n x, y = np.log(X).T\n\n log_density = (\n np.log1p(theta)\n - (2.0 + 1.0 / theta) * np.log1p(np.expm1(-theta * x) + np.expm1(-theta * y))\n - (1.0 + theta) * (x + y)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 395, "func_end_lineno": 413, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for the Clayton copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty" ]
[ "skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.distribution.copula._clayton._neg_log_likelihood", "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
4
4
{ "total_num": 69, "base_passed_num": 4 }
[ "skfolio.src.skfolio.distribution.copula._gaussian._base_sample_scores", "skfolio.src.skfolio.distribution.copula._gaussian._neg_log_likelihood" ]
skfolio
[ "skfolio/distribution/copula/_gaussian.py", "skfolio/distribution/copula/_gaussian.py" ]
[ "tests/test_distribution/test_copula/test_gaussian.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 407, "func_start_lineno": 373, "func_end_lineno": 407, "func_code": "def _base_sample_scores(X: np.ndarray, rho: float) -> np.ndarray:\n \"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate\n Gaussian copula model.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Gaussian copula parameter.\n\n Returns\n -------\n density : ndarray of shape (n_observations,)\n The log-likelihood of each sample under the fitted copula.\n\n Raises\n ------\n ValueError\n If rho is not in (-1, 1)\n \"\"\"\n if not (-1.0 <= rho <= 1.0):\n raise ValueError(\"rho must be between -1 and 1.\")\n\n # Inverse CDF (ppf) using stdtrit for better performance\n u_inv, v_inv = sp.ndtri(X).T\n\n # Using np.log1p to avoid loss of precision\n log_density = -0.5 * np.log1p(-(rho**2)) - rho * (\n 0.5 * rho * (u_inv**2 + v_inv**2) - u_inv * v_inv\n ) / (1 - rho**2)\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 407, "func_start_lineno": 352, "func_end_lineno": 370, "func_code": "def _neg_log_likelihood(rho: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for optimization.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Correlation copula parameter.\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, rho=rho))" } ]
[ "function_empty", "Development" ]
[ "skfolio.distribution.copula._gaussian._base_sample_scores", "skfolio.distribution.copula._gaussian._neg_log_likelihood" ]
Python
1
2
{ "total_num": 38, "base_passed_num": 26 }
[ "skfolio.src.skfolio.distribution.copula._gumbel._base_sample_scores", "skfolio.src.skfolio.distribution.copula._gumbel._neg_log_likelihood", "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_gumbel.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_gumbel.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 422, "func_end_lineno": 451, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n r\"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate Gumbel\n copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n Bivariate samples `(u, v)`, with each component in [0,1].\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n logpdf : ndarray of shape (n_observations,)\n Log-likelihood values for each observation.\n \"\"\"\n if theta <= 1:\n raise ValueError(\"Theta must be greater than 1 for the Gumbel copula.\")\n Z = -np.log(X)\n s = np.power(np.power(Z, theta).sum(axis=1), 1 / theta)\n s = np.clip(s, a_min=1e-10, a_max=None)\n log_density = (\n -s\n + np.log(s + theta - 1)\n + (1 - 2 * theta) * np.log(s)\n + (theta - 1) * np.log(Z.prod(axis=1))\n + Z.sum(axis=1)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 401, "func_end_lineno": 419, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for the Gumbel copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval [0, 1],\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 560, "func_start_lineno": 464, "func_end_lineno": 507, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"\n Compute the partial derivative (h-function) for the unrotated Gumbel copula.\n\n For Gumbel, the copula is defined as:\n\n .. math::\n C(u,v)=\\exp\\Bigl(-\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{1/\\theta}\\Bigr).\n\n The partial derivative with respect to v is:\n\n .. math::\n \\frac{\\partial C(u,v)}{\\partial v}\n = C(u,v)\\,\\Bigl[(-\\ln u)^{\\theta}+(-\\ln v)^{\\theta}\\Bigr]^{\\frac{1}{\\theta}-1}\n \\,(-\\ln v)^{\\theta-1}\\,\\frac{1}{v}.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` with values in [0, 1].\n\n first_margin : bool, default=False\n If True, compute with respect to u (by swapping margins); otherwise,\n compute with respect to v.\n\n theta : float\n The dependence parameter (must be > 1).\n\n Returns\n -------\n p : ndarray of shape (n_observations,)\n The computed h-function values.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n _, v = X.T\n x, y = -np.log(X).T\n p = (\n np.exp(-np.power(np.power(x, theta) + np.power(y, theta), 1.0 / theta))\n * np.power(np.power(x / y, theta) + 1.0, 1.0 / theta - 1.0)\n / v\n )\n return p" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty", "Development" ]
[ "skfolio.distribution.copula._gumbel._base_sample_scores", "skfolio.distribution.copula._gumbel._neg_log_likelihood", "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._gumbel._base_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
4
6
{ "total_num": 69, "base_passed_num": 5 }
[ "skfolio.src.skfolio.distribution.copula._joe._base_sample_scores", "skfolio.src.skfolio.distribution.copula._joe._neg_log_likelihood", "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_joe.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_joe.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 439, "func_end_lineno": 473, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n \"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate\n Joe copula model.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n density : ndarray of shape (n_observations,)\n The log-likelihood of each sample under the fitted copula.\n\n Raises\n ------\n ValueError\n If rho is not in (-1, 1) or dof is not positive.\n \"\"\"\n if theta <= 1.0:\n raise ValueError(\"Theta must be greater than 1 for the Joe copula.\")\n\n # log-space transformation to improve stability near 0 or 1\n x, y = np.log1p(-X).T\n x_y = x + y\n d = np.exp(x * theta) + np.exp(y * theta) - np.exp(x_y * theta)\n log_density = (\n (1.0 / theta - 2.0) * np.log(d) + x_y * (theta - 1.0) + np.log(theta - 1.0 + d)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 418, "func_end_lineno": 436, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for optimization.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 383, "func_end_lineno": 406, "func_code": "def _apply_margin_swap(X: np.ndarray, first_margin: bool) -> np.ndarray:\n \"\"\"\n Swap the columns of X if first_margin is False.\n\n If first_margin is True, X is returned unchanged; otherwise, the columns\n of X are swapped.\n\n Parameters\n ----------\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs (u, v).\n first_margin : bool\n If True, no swap is performed; if False, the columns of X are swapped.\n\n Returns\n -------\n X_swapped : ndarray of shape (n_observations, 2)\n The data array with columns swapped if first_margin is False.\n \"\"\"\n assert X.ndim == 2\n assert X.shape[1] == 2\n if first_margin:\n return X[:, [1, 0]]\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 609, "func_start_lineno": 517, "func_end_lineno": 546, "func_code": "def _base_partial_derivative(\n X: np.ndarray, first_margin: bool, theta: float\n) -> np.ndarray:\n r\"\"\"Compute the h-function (partial derivative) for the bivariate unrotated\n Joe copula with respect to a specified margin.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n first_margin : bool, default=False\n If True, compute the partial derivative with respect to the first\n margin `u`; otherwise, compute the partial derivative with respect to the\n second margin `v`.\n\n theta : float\n The dependence parameter (must be greater than 1).\n\n Returns\n -------\n : ndarray of shape (n_observations,)\n h-function values :math:`h(u \\mid v) \\;=\\; p` for each observation in X.\n \"\"\"\n X = _apply_margin_swap(X, first_margin=first_margin)\n x, y = np.power(1 - X, theta).T\n p = np.power(1 + x / y - x, 1 / theta - 1) * (1.0 - x)\n return p" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty" ]
[ "skfolio.distribution.copula._joe._base_sample_scores", "skfolio.distribution.copula._joe._neg_log_likelihood", "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_margin_swap", "skfolio.distribution.copula._joe._base_partial_derivative", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
5
5
{ "total_num": 69, "base_passed_num": 4 }
[ "skfolio.src.skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.src.skfolio.distribution.copula._clayton._neg_log_likelihood" ]
skfolio
[ "skfolio/distribution/copula/_clayton.py", "skfolio/distribution/copula/_clayton.py" ]
[ "tests/test_distribution/test_copula/test_selection.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 416, "func_end_lineno": 448, "func_code": "def _base_sample_scores(X: np.ndarray, theta: float) -> np.ndarray:\n r\"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate Clayton\n copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n Bivariate samples `(u, v)`, with each component in [0,1].\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n logpdf : ndarray of shape (n_observations,)\n Log-likelihood values for each observation.\n\n Raises\n ------\n ValueError\n If theta is not greater than 0.\n \"\"\"\n if theta <= 0:\n raise ValueError(\"Theta must be greater than 1 for the Clayton copula.\")\n\n x, y = np.log(X).T\n\n log_density = (\n np.log1p(theta)\n - (2.0 + 1.0 / theta) * np.log1p(np.expm1(-theta * x) + np.expm1(-theta * y))\n - (1.0 + theta) * (x + y)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 539, "func_start_lineno": 395, "func_end_lineno": 413, "func_code": "def _neg_log_likelihood(theta: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for the Clayton copula.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n theta : float\n The dependence parameter (must be greater than 0).\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_base_sample_scores(X=X, theta=theta))" } ]
[ "function_empty" ]
[ "skfolio.distribution.copula._clayton._base_sample_scores", "skfolio.distribution.copula._clayton._neg_log_likelihood" ]
Python
2
2
{ "total_num": 4, "base_passed_num": 3 }
[ "skfolio.src.skfolio.distribution.copula._student_t._sample_scores", "skfolio.src.skfolio.distribution.copula._student_t._neg_log_likelihood" ]
skfolio
[ "skfolio/distribution/copula/_student_t.py", "skfolio/distribution/copula/_student_t.py" ]
[ "tests/test_distribution/test_copula/test_student_t.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 486, "func_start_lineno": 445, "func_end_lineno": 486, "func_code": "def _sample_scores(X: np.ndarray, rho: float, dof: float) -> np.ndarray:\n \"\"\"Compute the log-likelihood of each sample (log-pdf) under the bivariate\n Gaussian copula model.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Gaussian copula parameter.\n\n Returns\n -------\n density : ndarray of shape (n_observations,)\n The log-likelihood of each sample under the fitted copula.\n\n Raises\n ------\n ValueError\n If rho is not in (-1, 1) or dof is not positive.\n \"\"\"\n if not (-1.0 <= rho <= 1.0):\n raise ValueError(\"rho must be between -1 and 1.\")\n if not 1.0 <= dof <= 50:\n raise ValueError(\"Degrees of freedom `dof` must be between 1 and 50.\")\n\n # Inverse CDF (ppf) using stdtrit for better performance\n x, y = sp.stdtrit(dof, X).T\n\n a = 1.0 - rho**2\n log_density = (\n sp.gammaln((dof + 2.0) / 2.0)\n + sp.gammaln(dof / 2.0)\n - 2.0 * sp.gammaln((dof + 1.0) / 2.0)\n - np.log(a) / 2\n + (dof + 1.0) / 2.0 * (np.log1p(x**2 / dof) + np.log1p(y**2 / dof))\n - (dof + 2.0) / 2.0 * np.log1p((x**2 - 2 * rho * x * y + y**2) / a / dof)\n )\n return log_density" }, { "class_start_lineno": 1, "class_end_lineno": 486, "func_start_lineno": 421, "func_end_lineno": 442, "func_code": "def _neg_log_likelihood(dof: float, rho: float, X: np.ndarray) -> float:\n \"\"\"Negative log-likelihood function for optimization.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation. Both `u` and `v` must be in the interval `[0, 1]`,\n having been transformed to uniform marginals.\n\n rho : float\n Correlation copula parameter.\n\n dof : float\n Degree of freedom copula parameter.\n\n Returns\n -------\n value : float\n The negative log-likelihood value.\n \"\"\"\n return -np.sum(_sample_scores(X=X, rho=rho, dof=dof))" } ]
[ "function_empty", "Development" ]
[ "skfolio.distribution.copula._student_t._sample_scores", "skfolio.distribution.copula._student_t._neg_log_likelihood" ]
Python
1
2
{ "total_num": 40, "base_passed_num": 17 }
[ "skfolio.src.skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_cdf", "skfolio.src.skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
skfolio
[ "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py", "skfolio/distribution/copula/_utils.py" ]
[ "tests/test_distribution/test_copula/test_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 341, "func_end_lineno": 380, "func_code": "def _apply_copula_rotation(X: npt.ArrayLike, rotation: CopulaRotation) -> np.ndarray:\n r\"\"\"Apply a bivariate copula rotation using the standard (clockwise) convention.\n\n The transformations are defined as follows:\n\n - `CopulaRotation.R0` (0°): :math:`(u, v) \\mapsto (u, v)`\n - `CopulaRotation.R90` (90°): :math:`(u, v) \\mapsto (v,\\, 1 - u)`\n - `CopulaRotation.R180` (180°): :math:`(u, v) \\mapsto (1 - u,\\, 1 - v)`\n - `CopulaRotation.R270` (270°): :math:`(u, v) \\mapsto (1 - v,\\, u)`\n\n Parameters\n ----------\n X : array-like of shape (n_observations, 2)\n An array of bivariate inputs `(u, v)` where each row represents a\n bivariate observation.\n\n rotation : CopulaRotation\n The rotation to apply to the copula (default is no rotation).\n\n Returns\n -------\n rotated_X: ndarray of shape (n_observations, 2)\n The rotated data array.\n \"\"\"\n match rotation:\n case CopulaRotation.R0:\n # No rotation\n pass\n case CopulaRotation.R90:\n # (u, v) -> (v, 1 - u)\n X = np.column_stack([X[:, 1], 1.0 - X[:, 0]])\n case CopulaRotation.R180:\n # (u, v) -> (1 - u, 1 - v)\n X = 1.0 - X\n case CopulaRotation.R270:\n # (u, v) -> (1 - v, u)\n X = np.column_stack([1.0 - X[:, 1], X[:, 0]])\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return X" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 409, "func_end_lineno": 449, "func_code": "def _apply_rotation_cdf(\n func: Callable, X: np.ndarray, rotation: CopulaRotation, **kwargs\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding CDF values.\n\n Parameters\n ----------\n func : Callable\n A function that computes the CDF given data X and additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n **kwargs\n Additional keyword arguments to pass to the CDF function.\n\n Returns\n -------\n rotated_cdf : ndarray of shape (n_observations,)\n The transformed CDF values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n cdf = func(X=rotated_X, **kwargs)\n\n match rotation:\n case CopulaRotation.R0:\n pass\n case CopulaRotation.R90:\n cdf = X[:, 1] - cdf\n case CopulaRotation.R180:\n cdf = np.sum(X, axis=1) - 1 + cdf\n case CopulaRotation.R270:\n cdf = X[:, 0] - cdf\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n\n return cdf" }, { "class_start_lineno": 1, "class_end_lineno": 509, "func_start_lineno": 452, "func_end_lineno": 509, "func_code": "def _apply_rotation_partial_derivatives(\n func: Callable,\n X: np.ndarray,\n rotation: CopulaRotation,\n first_margin: bool,\n **kwargs,\n) -> np.ndarray:\n \"\"\"\n Apply a copula rotation to X and compute the corresponding partial derivatives.\n\n This function rotates the data X using the specified rotation and then computes\n the partial derivative (h-function) using the provided function. The result is then\n adjusted according to the rotation and the margin of interest.\n\n Parameters\n ----------\n func : Callable\n A function that computes the partial derivative (h-function) given X, the\n margin, and any additional keyword arguments.\n\n X : ndarray of shape (n_observations, 2)\n A 2D array of bivariate inputs.\n\n rotation : CopulaRotation\n The rotation to apply.\n\n first_margin : bool\n If True, compute the partial derivative with respect to the first margin;\n otherwise, compute it with respect to the second margin.\n\n **kwargs\n Additional keyword arguments to pass to the partial derivative function.\n\n Returns\n -------\n z : ndarray of shape (n_observations,)\n The transformed partial derivative values after applying the rotation.\n \"\"\"\n rotated_X = _apply_copula_rotation(X, rotation=rotation)\n\n match rotation:\n case CopulaRotation.R0:\n z = func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R90:\n if first_margin:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case CopulaRotation.R180:\n z = 1 - func(X=rotated_X, first_margin=first_margin, **kwargs)\n case CopulaRotation.R270:\n if first_margin:\n z = 1 - func(X=rotated_X, first_margin=not first_margin, **kwargs)\n else:\n z = func(X=rotated_X, first_margin=not first_margin, **kwargs)\n case _:\n raise ValueError(f\"Unsupported rotation: {rotation}\")\n return z" } ]
[ "function_empty" ]
[ "skfolio.distribution.copula._utils._apply_copula_rotation", "skfolio.distribution.copula._utils._apply_rotation_cdf", "skfolio.distribution.copula._utils._apply_rotation_partial_derivatives" ]
Python
3
3
{ "total_num": 10, "base_passed_num": 6 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_distribution/test_multivariate/test_utils.py", "tests/test_model_selection/test_walk_forward.py", "tests/test_utils/test_bootstrap.py", "tests/test_utils/test_validation.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset" ]
Python
2
2
{ "total_num": 24, "base_passed_num": 5 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.measures._measures.get_cumulative_returns", "skfolio.src.skfolio.measures._measures.get_drawdowns" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/measures/_measures.py", "skfolio/measures/_measures.py" ]
[ "tests/test_measures/test_measures.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 405, "func_end_lineno": 428, "func_code": "def get_cumulative_returns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:\n \"\"\"Compute the cumulative returns from the returns.\n Non-compounded cumulative returns start at 0.\n Compounded cumulative returns are rescaled to start at 1000.\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n compounded : bool, default=False\n If this is set to True, the cumulative returns are compounded otherwise they\n are uncompounded.\n\n Returns\n -------\n values: ndarray of shape (n_observations,)\n Cumulative returns.\n \"\"\"\n if compounded:\n cumulative_returns = 1000 * np.cumprod(1 + returns) # Rescaled to start at 1000\n else:\n cumulative_returns = np.cumsum(returns)\n return cumulative_returns" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 431, "func_end_lineno": 453, "func_code": "def get_drawdowns(returns: np.ndarray, compounded: bool = False) -> np.ndarray:\n \"\"\"Compute the drawdowns' series from the returns.\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n compounded : bool, default=False\n If this is set to True, the cumulative returns are compounded otherwise they\n are uncompounded.\n\n Returns\n -------\n values: ndarray of shape (n_observations,)\n Drawdowns.\n \"\"\"\n cumulative_returns = get_cumulative_returns(returns=returns, compounded=compounded)\n if compounded:\n drawdowns = cumulative_returns / np.maximum.accumulate(cumulative_returns) - 1\n else:\n drawdowns = cumulative_returns - np.maximum.accumulate(cumulative_returns)\n return drawdowns" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.measures._measures.get_cumulative_returns", "skfolio.measures._measures.get_drawdowns" ]
Python
4
4
{ "total_num": 17, "base_passed_num": 0 }
[ "skfolio.src.skfolio.model_selection._combinatorial._n_splits", "skfolio.src.skfolio.model_selection._combinatorial._n_test_paths" ]
skfolio
[ "skfolio/model_selection/_combinatorial.py", "skfolio/model_selection/_combinatorial.py", "skfolio/model_selection/_combinatorial.py" ]
[ "tests/test_model_selection/test_combinatorial.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 564, "func_start_lineno": 415, "func_end_lineno": 431, "func_code": "def _n_splits(n_folds: int, n_test_folds: int) -> int:\n \"\"\"Number of splits.\n\n Parameters\n ----------\n n_folds : int\n Number of folds.\n\n n_test_folds : int\n Number of test folds.\n\n Returns\n -------\n n_splits : int\n Number of splits\n \"\"\"\n return math.comb(n_folds, n_test_folds)" }, { "class_start_lineno": 1, "class_end_lineno": 564, "func_start_lineno": 434, "func_end_lineno": 453, "func_code": "def _n_test_paths(n_folds: int, n_test_folds: int) -> int:\n \"\"\"Number of test paths that can be reconstructed from the train/test\n combinations.\n\n Parameters\n ----------\n n_folds : int\n Number of folds.\n\n n_test_folds : int\n Number of test folds.\n\n Returns\n -------\n n_splits : int\n Number of test paths.\n \"\"\"\n return (\n _n_splits(n_folds=n_folds, n_test_folds=n_test_folds) * n_test_folds // n_folds\n )" }, { "class_start_lineno": 46, "class_end_lineno": 412, "func_start_lineno": 203, "func_end_lineno": 207, "func_code": " def n_test_paths(self) -> int:\n \"\"\"Number of test paths that can be reconstructed from the train/test\n combinations.\n \"\"\"\n return _n_test_paths(n_folds=self.n_folds, n_test_folds=self.n_test_folds)" } ]
[ "function_empty" ]
[ "skfolio.model_selection._combinatorial._n_splits", "skfolio.model_selection._combinatorial._n_test_paths", "skfolio.model_selection._combinatorial.CombinatorialPurgedCV.n_test_paths" ]
Python
2
2
{ "total_num": 8, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.tools.safe_indexing", "skfolio.src.skfolio.utils.tools.safe_split", "skfolio.src.skfolio.model_selection._validation.cross_val_predict" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/model_selection/_validation.py" ]
[ "tests/test_model_selection/test_validation.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 190, "func_end_lineno": 219, "func_code": "def safe_indexing(\n X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0\n):\n \"\"\"Return rows, items or columns of X using indices.\n\n Parameters\n ----------\n X : array-like\n Data from which to sample rows.\n\n indices : array-like, optional\n Indices of rows or columns.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n subset :\n Subset of X on axis 0.\n \"\"\"\n if indices is None:\n return X\n if hasattr(X, \"iloc\"):\n return X.take(indices, axis=axis)\n if axis == 0:\n return X[indices]\n return X[:, indices]" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 222, "func_end_lineno": 261, "func_code": "def safe_split(\n X: npt.ArrayLike,\n y: npt.ArrayLike | None = None,\n indices: np.ndarray | None = None,\n axis: int = 0,\n):\n \"\"\"Create subset of dataset.\n\n Slice X, y according to indices for cross-validation.\n\n Parameters\n ----------\n X : array-like\n Data to be indexed.\n\n y : array-like\n Data to be indexed.\n\n indices : ndarray of int, optional\n Rows or columns to select from X and y.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n X_subset : array-like\n Indexed data.\n\n y_subset : array-like\n Indexed targets.\n \"\"\"\n X_subset = safe_indexing(X, indices=indices, axis=axis)\n if y is not None:\n y_subset = safe_indexing(y, indices=indices, axis=axis)\n else:\n y_subset = None\n return X_subset, y_subset" }, { "class_start_lineno": 1, "class_end_lineno": 254, "func_start_lineno": 38, "func_end_lineno": 254, "func_code": "def cross_val_predict(\n estimator: skb.BaseEstimator,\n X: npt.ArrayLike,\n y: npt.ArrayLike = None,\n cv: sks.BaseCrossValidator | BaseCombinatorialCV | int | None = None,\n n_jobs: int | None = None,\n method: str = \"predict\",\n verbose: int = 0,\n params: dict | None = None,\n pre_dispatch: str = \"2*n_jobs\",\n column_indices: np.ndarray | None = None,\n portfolio_params: dict | None = None,\n) -> MultiPeriodPortfolio | Population:\n \"\"\"Generate cross-validated `Portfolios` estimates.\n\n The data is split according to the `cv` parameter.\n The optimization estimator is fitted on the training set and portfolios are\n predicted on the corresponding test set.\n\n For non-combinatorial cross-validation like `Kfold`, the output is the predicted\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` where\n each :class:`~skfolio.portfolio.Portfolio` corresponds to the prediction on each\n train/test pair (`k` portfolios for `Kfold`).\n\n For combinatorial cross-validation\n like :class:`~skfolio.model_selection.CombinatorialPurgedCV`, the output is the\n predicted :class:`~skfolio.population.Population` of multiple\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` (each test outputs are a\n collection of multiple paths instead of one single path).\n\n Parameters\n ----------\n estimator : BaseOptimization\n :ref:`Optimization estimators <optimization>` use to fit the data.\n\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Target data (optional).\n For example, the price returns of the factors.\n\n cv : int | cross-validation generator, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n * None, to use the default 5-fold cross validation,\n * int, to specify the number of folds in a `(Stratified)KFold`,\n * `CV splitter`,\n * An iterable that generates (train, test) splits as arrays of indices.\n\n n_jobs : int, optional\n The number of jobs to run in parallel for `fit` of all `estimators`.\n `None` means 1 unless in a `joblib.parallel_backend` context. -1 means\n using all processors.\n\n method : str\n Invokes the passed method name of the passed estimator.\n\n verbose : int, default=0\n The verbosity level.\n\n params : dict, optional\n Parameters to pass to the underlying estimator's ``fit`` and the CV splitter.\n\n pre_dispatch : int or str, default='2*n_jobs'\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n * None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n * An int, giving the exact number of total jobs that are\n spawned\n\n * A str, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n column_indices : ndarray, optional\n Indices of the `X` columns to cross-validate on.\n\n portfolio_params : dict, optional\n Additional portfolio parameters passed to `MultiPeriodPortfolio`.\n\n Returns\n -------\n predictions : MultiPeriodPortfolio | Population\n This is the result of calling `predict`\n \"\"\"\n params = {} if params is None else params\n\n X, y = safe_split(X, y, indices=column_indices, axis=1)\n X, y = sku.indexable(X, y)\n\n if _routing_enabled():\n # For estimators, a MetadataRouter is created in get_metadata_routing\n # methods. For these router methods, we create the router to use\n # `process_routing` on it.\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=\"cross_validate\")\n .add(\n splitter=cv,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"split\"),\n )\n .add(\n estimator=estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n try:\n routed_params = skm.process_routing(router, \"fit\", **params)\n except ske.UnsetMetadataPassedError as e:\n # The default exception would mention `fit` since in the above\n # `process_routing` code, we pass `fit` as the caller. However,\n # the user is not calling `fit` directly, so we change the message\n # to make it more suitable for this case.\n unrequested_params = sorted(e.unrequested_params)\n raise ske.UnsetMetadataPassedError(\n message=(\n f\"{unrequested_params} are passed to `cross_val_predict` but are\"\n \" not explicitly set as requested or not requested for\"\n f\" cross_validate's estimator: {estimator.__class__.__name__} Call\"\n \" `.set_fit_request({{metadata}}=True)` on the estimator for\"\n f\" each metadata in {unrequested_params} that you want to use and\"\n \" `metadata=False` for not using it. See the Metadata Routing User\"\n \" guide <https://scikit-learn.org/stable/metadata_routing.html>\"\n \" for more information.\"\n ),\n unrequested_params=e.unrequested_params,\n routed_params=e.routed_params,\n ) from None\n else:\n routed_params = sku.Bunch()\n routed_params.splitter = sku.Bunch(split={})\n routed_params.estimator = sku.Bunch(fit=params)\n\n cv = sks.check_cv(cv, y)\n splits = list(cv.split(X, y, **routed_params.splitter.split))\n\n portfolio_params = {} if portfolio_params is None else portfolio_params.copy()\n\n # We ensure that the folds are not shuffled\n if not isinstance(cv, BaseCombinatorialCV):\n try:\n if cv.shuffle:\n raise ValueError(\n \"`cross_val_predict` only works with cross-validation setting\"\n \" `shuffle=False`\"\n )\n except AttributeError:\n # If we cannot find the attribute shuffle, we check if the first folds\n # are shuffled\n for fold in splits[0]:\n if not np.all(np.diff(fold) > 0):\n raise ValueError(\n \"`cross_val_predict` only works with un-shuffled folds\"\n ) from None\n\n # We clone the estimator to make sure that all the folds are independent\n # and that it is pickle-able.\n parallel = skp.Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)\n # TODO remove when https://github.com/joblib/joblib/issues/1071 is fixed\n # noinspection PyCallingNonCallable\n predictions = parallel(\n skp.delayed(fit_and_predict)(\n sk.clone(estimator),\n X,\n y,\n train=train,\n test=test,\n fit_params=routed_params.estimator.fit,\n method=method,\n )\n for train, test in splits\n )\n\n if isinstance(cv, BaseCombinatorialCV):\n path_ids = cv.get_path_ids()\n path_nb = np.max(path_ids) + 1\n portfolios = [[] for _ in range(path_nb)]\n for i, prediction in enumerate(predictions):\n for j, p in enumerate(prediction):\n path_id = path_ids[i, j]\n portfolios[path_id].append(p)\n name = portfolio_params.pop(\"name\", \"path\")\n pred = Population(\n [\n MultiPeriodPortfolio(\n name=f\"{name}_{i}\", portfolios=portfolios[i], **portfolio_params\n )\n for i in range(path_nb)\n ]\n )\n else:\n # We need to re-order the test folds in case they were un-ordered by the\n # CV generator.\n # Because the tests folds are not shuffled, we use the first index of each\n # fold to order them.\n test_indices = np.concatenate([test for _, test in splits])\n if np.unique(test_indices, axis=0).shape[0] != test_indices.shape[0]:\n raise ValueError(\n \"`cross_val_predict` only works with non-duplicated test indices\"\n )\n test_indices = [test for _, test in splits]\n sorted_fold_id = np.argsort([x[0] for x in test_indices])\n pred = MultiPeriodPortfolio(\n portfolios=[predictions[fold_id] for fold_id in sorted_fold_id],\n check_observations_order=False,\n **portfolio_params,\n )\n\n return pred" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.tools.safe_indexing", "skfolio.utils.tools.safe_split", "skfolio.model_selection._validation.cross_val_predict" ]
Python
5
5
{ "total_num": 3, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.datasets._base.get_data_home", "skfolio.src.skfolio.datasets._base.download_dataset", "skfolio.src.skfolio.datasets._base.load_sp500_implied_vol_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_moment/test_covariance/test_implied_covariance.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 25, "func_end_lineno": 55, "func_code": "def get_data_home(data_home: str | Path | None = None) -> str:\n \"\"\"Return the path of the skfolio data directory.\n\n This folder is used by some large dataset loaders to avoid downloading the\n data several times.\n\n By default, the data directory is set to a folder named 'skfolio_data' in the\n user home folder.\n\n Alternatively, it can be set by the 'SKFOLIO_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str, optional\n The path to skfolio data directory. If `None`, the default path\n is `~/skfolio_data`.\n\n Returns\n -------\n data_home: str or path-like, optional\n The path to skfolio data directory.\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SKFOLIO_DATA\", os.path.join(\"~\", \"skfolio_data\"))\n data_home = os.path.expanduser(data_home)\n os.makedirs(data_home, exist_ok=True)\n return data_home" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 116, "func_end_lineno": 165, "func_code": "def download_dataset(\n data_filename: str,\n data_home: str | Path | None = None,\n download_if_missing: bool = True,\n) -> pd.DataFrame:\n \"\"\"Download and save locally a dataset from the remote GitHub dataset folder.\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from a remote\n GitHub dataset folder.\n\n data_home : str or path-like, optional\n Specify another download and cache folder for the datasets. By default,\n all skfolio data is stored in `~/skfolio_data` sub-folders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n # Use a CORS proxy when triggering requests from the browser\n url_prefix = \"https://corsproxy.io/?\" if sys.platform == \"emscripten\" else \"\"\n url = url_prefix + (\n f\"https://github.com/skfolio/skfolio-datasets/raw/main/\"\n f\"datasets/{data_filename}.csv.gz\"\n )\n\n data_home = get_data_home(data_home=data_home)\n filepath = os.path.join(data_home, f\"{data_filename}.pkz\")\n\n if os.path.exists(filepath):\n return joblib.load(filepath)\n\n if not download_if_missing:\n raise OSError(\"Data not found and `download_if_missing` is False\")\n\n archive_path = os.path.join(data_home, os.path.basename(url))\n ur.urlretrieve(url, archive_path)\n df = load_gzip_compressed_csv_data(archive_path)\n joblib.dump(df, filepath, compress=6)\n os.remove(archive_path)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 400, "func_end_lineno": 448, "func_code": "def load_sp500_implied_vol_dataset(\n data_home=None, download_if_missing=True\n) -> pd.DataFrame:\n \"\"\"Load the 3 months ATM implied volatility of the 20 assets from the\n SP500 dataset.\n\n This dataset is composed of the 3 months ATM implied volatility of 20 assets\n from the S&P 500 composition starting from 2010-01-04 up to 2022-12-28.\n\n The data comes from the Yahoo public API option chains.\n\n ============== ==================\n Observations 3270\n Assets 20\n ============== ==================\n\n Parameters\n ----------\n data_home : str, optional\n Specify another download and cache folder for the datasets.\n By default, all skfolio data is stored in `~/skfolio_data` subfolders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Implied volatility DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_implied_vol_dataset\n >>> implied_vol = load_sp500_implied_vol_dataset()\n >>> implied_vol.head()\n AAPL AMD BAC ... UNH WMT XOM\n Date ...\n 2010-01-04 0.364353 0.572056 0.382926 ... 0.362751 0.171737 0.201485\n 2010-01-05 0.371865 0.568791 0.374699 ... 0.368504 0.174764 0.203852\n 2010-01-06 0.356746 0.558054 0.349220 ... 0.368514 0.171892 0.197475\n 2010-01-07 0.361084 0.560475 0.354942 ... 0.355792 0.169083 0.200046\n 2010-01-08 0.348085 0.543932 0.360345 ... 0.351130 0.170897 0.204832\n \"\"\"\n data_filename = \"sp500_implied_vol_dataset\"\n df = download_dataset(\n data_filename, data_home=data_home, download_if_missing=download_if_missing\n )\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.datasets._base.get_data_home", "skfolio.datasets._base.download_dataset", "skfolio.datasets._base.load_sp500_implied_vol_dataset" ]
Python
5
5
{ "total_num": 25, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.stats.assert_is_square", "skfolio.src.skfolio.utils.stats.assert_is_symmetric", "skfolio.src.skfolio.utils.stats.assert_is_distance" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py", "skfolio/utils/stats.py" ]
[ "tests/test_optimization/test_cluster/test_nco.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 208, "func_end_lineno": 221, "func_code": "def assert_is_square(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not square.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not square.\n \"\"\"\n if x.ndim != 2 or x.shape[0] != x.shape[1]:\n raise ValueError(\"The matrix must be square\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": "def assert_is_symmetric(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not symmetric.\n\n Parameters\n ----------\n x : ndarray of shape (n, m)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is not symmetric.\n \"\"\"\n assert_is_square(x)\n if not np.allclose(x, x.T):\n raise ValueError(\"The matrix must be symmetric\")" }, { "class_start_lineno": 1, "class_end_lineno": 577, "func_start_lineno": 241, "func_end_lineno": 257, "func_code": "def assert_is_distance(x: np.ndarray) -> None:\n \"\"\"Raises an error if the matrix is not a distance matrix.\n\n Parameters\n ----------\n x : ndarray of shape (n, n)\n The matrix.\n\n Raises\n ------\n ValueError: if the matrix is a distance matrix.\n \"\"\"\n assert_is_symmetric(x)\n if not np.allclose(np.diag(x), np.zeros(x.shape[0]), atol=1e-5):\n raise ValueError(\n \"The distance matrix must have diagonal elements close to zeros\"\n )" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.stats.assert_is_square", "skfolio.utils.stats.assert_is_symmetric", "skfolio.utils.stats.assert_is_distance" ]
Python
5
5
{ "total_num": 15, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::get_metadata_routing", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/prior/_empirical.py", "skfolio/prior/_empirical.py" ]
[ "tests/test_optimization/test_cluster/test_hierarchical/test_herc.py", "tests/test_optimization/test_cluster/test_hierarchical/test_hrp.py", "tests/test_optimization/test_convex/test_maximum_diversification.py", "tests/test_optimization/test_convex/test_risk_budgeting.py", "tests/test_prior/test_empirical.py", "tests/test_uncertainty_set/test_bootstrap.py", "tests/test_uncertainty_set/test_empirical.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 93, "func_end_lineno": 106, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=self.__class__.__name__)\n .add(\n mu_estimator=self.mu_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n .add(\n covariance_estimator=self.covariance_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n return router" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 108, "func_end_lineno": 202, "func_code": " def fit(self, X: npt.ArrayLike, y=None, **fit_params) -> \"EmpiricalPrior\":\n \"\"\"Fit the Empirical Prior estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : EmpiricalPrior\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n self.mu_estimator_ = check_estimator(\n self.mu_estimator,\n default=EmpiricalMu(),\n check_type=BaseMu,\n )\n self.covariance_estimator_ = check_estimator(\n self.covariance_estimator,\n default=EmpiricalCovariance(),\n check_type=BaseCovariance,\n )\n # fitting estimators\n if not self.is_log_normal:\n if self.investment_horizon is not None:\n raise ValueError(\n \"`investment_horizon` must be `None` when \"\n \"`is_log_normal` is `False`\"\n )\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X, y, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X, y, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n else:\n if self.investment_horizon is None:\n raise ValueError(\n \"`investment_horizon` must be provided when \"\n \"`is_log_normal` is `True`\"\n )\n # Convert linear returns to log returns\n X_log = np.log(1 + X)\n y_log = np.log(1 + y) if y is not None else None\n\n # Estimates the moments on the log returns\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X_log, y_log, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X_log, y_log, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n\n # Using the property of aggregation across time we scale this distribution\n # to the investment horizon by the “square-root rule”.\n mu *= self.investment_horizon\n covariance *= self.investment_horizon\n\n # We convert it into a distribution of linear returns over the investment\n # horizon\n mu = np.exp(mu + 0.5 * np.diag(covariance))\n covariance = np.outer(mu, mu) * (np.exp(covariance) - 1)\n\n # we validate and convert to numpy after all models have been fitted to keep\n # features names information.\n X = skv.validate_data(self, X)\n self.prior_model_ = PriorModel(\n mu=mu,\n covariance=covariance,\n returns=X,\n )\n return self" } ]
[ "function_empty", "Development" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.prior._empirical.EmpiricalPrior.get_metadata_routing", "skfolio.prior._empirical.EmpiricalPrior.fit" ]
Python
2
4
{ "total_num": 398, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.optimization.ensemble._stacking.StackingOptimization::get_metadata_routing", "skfolio.src.skfolio.optimization.ensemble._stacking.StackingOptimization::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/optimization/ensemble/_stacking.py", "skfolio/optimization/ensemble/_stacking.py" ]
[ "tests/test_optimization/test_ensemble/test_stacking.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 30, "class_end_lineno": 355, "func_start_lineno": 233, "func_end_lineno": 241, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = skm.MetadataRouter(owner=self.__class__.__name__)\n for name, estimator in self.estimators:\n router.add(\n **{name: estimator},\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n return router" }, { "class_start_lineno": 30, "class_end_lineno": 355, "func_start_lineno": 243, "func_end_lineno": 355, "func_code": " def fit(\n self, X: npt.ArrayLike, y: npt.ArrayLike | None = None, **fit_params\n ) -> \"StackingOptimization\":\n \"\"\"Fit the Stacking Optimization estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Price returns of factors or a target benchmark.\n The default is `None`.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : StackingOptimization\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n names, all_estimators = self._validate_estimators()\n self.final_estimator_ = check_estimator(\n self.final_estimator,\n default=MeanRisk(),\n check_type=BaseOptimization,\n )\n\n if self.cv == \"prefit\":\n self.estimators_ = []\n for estimator in all_estimators:\n skv.check_is_fitted(estimator)\n self.estimators_.append(estimator)\n else:\n # Fit the base estimators on the whole training data. Those\n # base estimators will be used to retrieve the inner weights.\n # They are exposed publicly.\n # noinspection PyCallingNonCallable\n self.estimators_ = skp.Parallel(n_jobs=self.n_jobs)(\n skp.delayed(fit_single_estimator)(\n sk.clone(est), X, y, routed_params[name][\"fit\"]\n )\n for name, est in zip(names, all_estimators, strict=True)\n )\n\n self.named_estimators_ = {\n name: estimator\n for name, estimator in zip(names, self.estimators_, strict=True)\n }\n\n inner_weights = np.array([estimator.weights_ for estimator in self.estimators_])\n\n # To train the final-estimator using the most data as possible, we use\n # a cross-validation to obtain the output of the stacked estimators.\n # To ensure that the data provided to each estimator are the same,\n # we need to set the random state of the cv if there is one and we\n # need to take a copy.\n if self.cv in [\"prefit\", \"ignore\"]:\n X_pred = np.array(\n [estimator.predict(X) for estimator in self.estimators_]\n ).T\n else:\n cv = sks.check_cv(self.cv)\n if hasattr(cv, \"random_state\") and cv.random_state is None:\n cv.random_state = np.random.RandomState()\n # noinspection PyCallingNonCallable\n cv_predictions = skp.Parallel(n_jobs=self.n_jobs)(\n skp.delayed(cross_val_predict)(\n sk.clone(est),\n X,\n y,\n cv=deepcopy(cv),\n method=\"predict\",\n n_jobs=self.n_jobs,\n params=routed_params[name][\"fit\"],\n verbose=self.verbose,\n )\n for name, est in zip(names, all_estimators, strict=True)\n )\n\n # We validate and convert to numpy array only after base-estimator fitting\n # to keep the assets names in case they are used in the estimator.\n if y is not None:\n _, y = skv.validate_data(self, X, y, multi_output=True)\n else:\n _ = skv.validate_data(self, X)\n\n if isinstance(self.cv, BaseCombinatorialCV):\n X_pred = np.array(\n [\n pred.quantile(measure=self.quantile_measure, q=self.quantile)\n for pred in cv_predictions\n ]\n ).T\n else:\n X_pred = np.array(cv_predictions).T\n if y is not None:\n test_indices = np.sort(\n np.concatenate([test for _, test in cv.split(X, y)])\n )\n y = y[test_indices]\n\n fit_single_estimator(self.final_estimator_, X_pred, y, {})\n outer_weights = self.final_estimator_.weights_\n self.weights_ = outer_weights @ inner_weights\n return self" } ]
[ "function_empty", "Development" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.optimization.ensemble._stacking.StackingOptimization.get_metadata_routing", "skfolio.optimization.ensemble._stacking.StackingOptimization.fit" ]
Python
2
4
{ "total_num": 5, "base_passed_num": 1 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.datasets._base.load_factors_dataset", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::get_metadata_routing", "skfolio.src.skfolio.prior._empirical.EmpiricalPrior::fit" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/prior/_empirical.py", "skfolio/prior/_empirical.py" ]
[ "tests/test_optimization/test_naive/test_naive.py", "tests/test_prior/test_factor_model.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 247, "func_end_lineno": 292, "func_code": "def load_factors_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 5 factor ETFs.\n\n This dataset is composed of the daily prices of 5 ETF representing common factors\n starting from 2014-01-02 up to 2022-12-28.\n\n The factors are:\n\n * \"MTUM\": Momentum\n * \"QUAL\": Quality\n * \"SIZE\": Size\n * \"VLUE\": Value\n * \"USMV\": low volatility\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 2264\n Assets 5\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_factors_dataset\n >>> prices = load_factors_dataset()\n >>> prices.head()\n MTUM QUAL SIZE USMV VLUE\n Date\n 2014-01-02 52.704 48.351 48.986 29.338 47.054\n 2014-01-03 52.792 48.256 48.722 29.330 46.999\n 2014-01-06 52.677 48.067 48.722 29.263 46.991\n 2014-01-07 53.112 48.455 48.731 29.430 47.253\n 2014-01-08 53.502 48.437 48.731 29.422 47.253\n \"\"\"\n data_filename = \"factors_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 93, "func_end_lineno": 106, "func_code": " def get_metadata_routing(self):\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=self.__class__.__name__)\n .add(\n mu_estimator=self.mu_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n .add(\n covariance_estimator=self.covariance_estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n return router" }, { "class_start_lineno": 17, "class_end_lineno": 202, "func_start_lineno": 108, "func_end_lineno": 202, "func_code": " def fit(self, X: npt.ArrayLike, y=None, **fit_params) -> \"EmpiricalPrior\":\n \"\"\"Fit the Empirical Prior estimator.\n\n Parameters\n ----------\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : Ignored\n Not used, present for API consistency by convention.\n\n **fit_params : dict\n Parameters to pass to the underlying estimators.\n Only available if `enable_metadata_routing=True`, which can be\n set by using ``sklearn.set_config(enable_metadata_routing=True)``.\n See :ref:`Metadata Routing User Guide <metadata_routing>` for\n more details.\n\n Returns\n -------\n self : EmpiricalPrior\n Fitted estimator.\n \"\"\"\n routed_params = skm.process_routing(self, \"fit\", **fit_params)\n\n self.mu_estimator_ = check_estimator(\n self.mu_estimator,\n default=EmpiricalMu(),\n check_type=BaseMu,\n )\n self.covariance_estimator_ = check_estimator(\n self.covariance_estimator,\n default=EmpiricalCovariance(),\n check_type=BaseCovariance,\n )\n # fitting estimators\n if not self.is_log_normal:\n if self.investment_horizon is not None:\n raise ValueError(\n \"`investment_horizon` must be `None` when \"\n \"`is_log_normal` is `False`\"\n )\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X, y, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X, y, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n else:\n if self.investment_horizon is None:\n raise ValueError(\n \"`investment_horizon` must be provided when \"\n \"`is_log_normal` is `True`\"\n )\n # Convert linear returns to log returns\n X_log = np.log(1 + X)\n y_log = np.log(1 + y) if y is not None else None\n\n # Estimates the moments on the log returns\n # Expected returns\n # noinspection PyArgumentList\n self.mu_estimator_.fit(X_log, y_log, **routed_params.mu_estimator.fit)\n mu = self.mu_estimator_.mu_\n\n # Covariance\n # noinspection PyArgumentList\n self.covariance_estimator_.fit(\n X_log, y_log, **routed_params.covariance_estimator.fit\n )\n covariance = self.covariance_estimator_.covariance_\n\n # Using the property of aggregation across time we scale this distribution\n # to the investment horizon by the “square-root rule”.\n mu *= self.investment_horizon\n covariance *= self.investment_horizon\n\n # We convert it into a distribution of linear returns over the investment\n # horizon\n mu = np.exp(mu + 0.5 * np.diag(covariance))\n covariance = np.outer(mu, mu) * (np.exp(covariance) - 1)\n\n # we validate and convert to numpy after all models have been fitted to keep\n # features names information.\n X = skv.validate_data(self, X)\n self.prior_model_ = PriorModel(\n mu=mu,\n covariance=covariance,\n returns=X,\n )\n return self" } ]
[ "function_empty", "Development" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.datasets._base.load_factors_dataset", "skfolio.prior._empirical.EmpiricalPrior.get_metadata_routing", "skfolio.prior._empirical.EmpiricalPrior.fit" ]
Python
3
5
{ "total_num": 8, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.measures._measures.semi_variance", "skfolio.src.skfolio.measures._measures.semi_deviation" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/measures/_measures.py", "skfolio/measures/_measures.py" ]
[ "tests/test_population/test_population.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 98, "func_end_lineno": 124, "func_code": "def semi_variance(\n returns: np.ndarray, min_acceptable_return: float | None = None\n) -> float:\n \"\"\"Compute the semi-variance (second lower partial moment).\n\n The semi-variance is the variance of the returns below a minimum acceptable return.\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns\n\n min_acceptable_return : float, optional\n Minimum acceptable return. It is the return target to distinguish \"downside\" and\n \"upside\" returns.\n The default (`None`) is to use the mean.\n\n Returns\n -------\n value : float\n Semi-variance.\n \"\"\"\n if min_acceptable_return is None:\n min_acceptable_return = np.mean(returns, axis=0)\n return np.sum(np.power(np.minimum(0, returns - min_acceptable_return), 2)) / (\n len(returns) - 1\n )" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 143, "func_end_lineno": 166, "func_code": "def semi_deviation(\n returns: np.ndarray, min_acceptable_return: float | None = None\n) -> float:\n \"\"\"Compute the semi standard-deviation (semi-deviation) (square root of the second lower\n partial moment).\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n min_acceptable_return : float, optional\n Minimum acceptable return. It is the return target to distinguish \"downside\" and\n \"upside\" returns.\n The default (`None`) is to use the returns mean.\n\n Returns\n -------\n value : float\n Semi-standard-deviation.\n \"\"\"\n return np.sqrt(\n semi_variance(returns=returns, min_acceptable_return=min_acceptable_return)\n )" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.measures._measures.semi_variance", "skfolio.measures._measures.semi_deviation" ]
Python
4
4
{ "total_num": 10, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.measures._measures.variance", "skfolio.src.skfolio.measures._measures.standard_deviation" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/measures/_measures.py", "skfolio/measures/_measures.py" ]
[ "tests/test_portfolio/test_multi_period_portfolio.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 82, "func_end_lineno": 95, "func_code": "def variance(returns: np.ndarray) -> float:\n \"\"\"Compute the variance (second moment).\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n Returns\n -------\n value : float\n Variance.\n \"\"\"\n return returns.var(ddof=1)" }, { "class_start_lineno": 1, "class_end_lineno": 633, "func_start_lineno": 127, "func_end_lineno": 140, "func_code": "def standard_deviation(returns: np.ndarray) -> float:\n \"\"\"Compute the standard-deviation (square root of the second moment).\n\n Parameters\n ----------\n returns : ndarray of shape (n_observations,)\n Vector of returns.\n\n Returns\n -------\n value : float\n Standard-deviation.\n \"\"\"\n return np.sqrt(variance(returns=returns))" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.measures._measures.variance", "skfolio.measures._measures.standard_deviation" ]
Python
4
4
{ "total_num": 106, "base_passed_num": 0 }
[ "skfolio.src.skfolio.utils.tools.safe_indexing", "skfolio.src.skfolio.utils.tools.safe_split", "skfolio.src.skfolio.model_selection._validation.cross_val_predict" ]
skfolio
[ "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/model_selection/_validation.py" ]
[ "tests/test_pre_selection/test_select_non_expiring.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 190, "func_end_lineno": 219, "func_code": "def safe_indexing(\n X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0\n):\n \"\"\"Return rows, items or columns of X using indices.\n\n Parameters\n ----------\n X : array-like\n Data from which to sample rows.\n\n indices : array-like, optional\n Indices of rows or columns.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n subset :\n Subset of X on axis 0.\n \"\"\"\n if indices is None:\n return X\n if hasattr(X, \"iloc\"):\n return X.take(indices, axis=axis)\n if axis == 0:\n return X[indices]\n return X[:, indices]" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 222, "func_end_lineno": 261, "func_code": "def safe_split(\n X: npt.ArrayLike,\n y: npt.ArrayLike | None = None,\n indices: np.ndarray | None = None,\n axis: int = 0,\n):\n \"\"\"Create subset of dataset.\n\n Slice X, y according to indices for cross-validation.\n\n Parameters\n ----------\n X : array-like\n Data to be indexed.\n\n y : array-like\n Data to be indexed.\n\n indices : ndarray of int, optional\n Rows or columns to select from X and y.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n X_subset : array-like\n Indexed data.\n\n y_subset : array-like\n Indexed targets.\n \"\"\"\n X_subset = safe_indexing(X, indices=indices, axis=axis)\n if y is not None:\n y_subset = safe_indexing(y, indices=indices, axis=axis)\n else:\n y_subset = None\n return X_subset, y_subset" }, { "class_start_lineno": 1, "class_end_lineno": 254, "func_start_lineno": 38, "func_end_lineno": 254, "func_code": "def cross_val_predict(\n estimator: skb.BaseEstimator,\n X: npt.ArrayLike,\n y: npt.ArrayLike = None,\n cv: sks.BaseCrossValidator | BaseCombinatorialCV | int | None = None,\n n_jobs: int | None = None,\n method: str = \"predict\",\n verbose: int = 0,\n params: dict | None = None,\n pre_dispatch: str = \"2*n_jobs\",\n column_indices: np.ndarray | None = None,\n portfolio_params: dict | None = None,\n) -> MultiPeriodPortfolio | Population:\n \"\"\"Generate cross-validated `Portfolios` estimates.\n\n The data is split according to the `cv` parameter.\n The optimization estimator is fitted on the training set and portfolios are\n predicted on the corresponding test set.\n\n For non-combinatorial cross-validation like `Kfold`, the output is the predicted\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` where\n each :class:`~skfolio.portfolio.Portfolio` corresponds to the prediction on each\n train/test pair (`k` portfolios for `Kfold`).\n\n For combinatorial cross-validation\n like :class:`~skfolio.model_selection.CombinatorialPurgedCV`, the output is the\n predicted :class:`~skfolio.population.Population` of multiple\n :class:`~skfolio.portfolio.MultiPeriodPortfolio` (each test outputs are a\n collection of multiple paths instead of one single path).\n\n Parameters\n ----------\n estimator : BaseOptimization\n :ref:`Optimization estimators <optimization>` use to fit the data.\n\n X : array-like of shape (n_observations, n_assets)\n Price returns of the assets.\n\n y : array-like of shape (n_observations, n_targets), optional\n Target data (optional).\n For example, the price returns of the factors.\n\n cv : int | cross-validation generator, optional\n Determines the cross-validation splitting strategy.\n Possible inputs for cv are:\n\n * None, to use the default 5-fold cross validation,\n * int, to specify the number of folds in a `(Stratified)KFold`,\n * `CV splitter`,\n * An iterable that generates (train, test) splits as arrays of indices.\n\n n_jobs : int, optional\n The number of jobs to run in parallel for `fit` of all `estimators`.\n `None` means 1 unless in a `joblib.parallel_backend` context. -1 means\n using all processors.\n\n method : str\n Invokes the passed method name of the passed estimator.\n\n verbose : int, default=0\n The verbosity level.\n\n params : dict, optional\n Parameters to pass to the underlying estimator's ``fit`` and the CV splitter.\n\n pre_dispatch : int or str, default='2*n_jobs'\n Controls the number of jobs that get dispatched during parallel\n execution. Reducing this number can be useful to avoid an\n explosion of memory consumption when more jobs get dispatched\n than CPUs can process. This parameter can be:\n\n * None, in which case all the jobs are immediately\n created and spawned. Use this for lightweight and\n fast-running jobs, to avoid delays due to on-demand\n spawning of the jobs\n\n * An int, giving the exact number of total jobs that are\n spawned\n\n * A str, giving an expression as a function of n_jobs,\n as in '2*n_jobs'\n\n column_indices : ndarray, optional\n Indices of the `X` columns to cross-validate on.\n\n portfolio_params : dict, optional\n Additional portfolio parameters passed to `MultiPeriodPortfolio`.\n\n Returns\n -------\n predictions : MultiPeriodPortfolio | Population\n This is the result of calling `predict`\n \"\"\"\n params = {} if params is None else params\n\n X, y = safe_split(X, y, indices=column_indices, axis=1)\n X, y = sku.indexable(X, y)\n\n if _routing_enabled():\n # For estimators, a MetadataRouter is created in get_metadata_routing\n # methods. For these router methods, we create the router to use\n # `process_routing` on it.\n # noinspection PyTypeChecker\n router = (\n skm.MetadataRouter(owner=\"cross_validate\")\n .add(\n splitter=cv,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"split\"),\n )\n .add(\n estimator=estimator,\n method_mapping=skm.MethodMapping().add(caller=\"fit\", callee=\"fit\"),\n )\n )\n try:\n routed_params = skm.process_routing(router, \"fit\", **params)\n except ske.UnsetMetadataPassedError as e:\n # The default exception would mention `fit` since in the above\n # `process_routing` code, we pass `fit` as the caller. However,\n # the user is not calling `fit` directly, so we change the message\n # to make it more suitable for this case.\n unrequested_params = sorted(e.unrequested_params)\n raise ske.UnsetMetadataPassedError(\n message=(\n f\"{unrequested_params} are passed to `cross_val_predict` but are\"\n \" not explicitly set as requested or not requested for\"\n f\" cross_validate's estimator: {estimator.__class__.__name__} Call\"\n \" `.set_fit_request({{metadata}}=True)` on the estimator for\"\n f\" each metadata in {unrequested_params} that you want to use and\"\n \" `metadata=False` for not using it. See the Metadata Routing User\"\n \" guide <https://scikit-learn.org/stable/metadata_routing.html>\"\n \" for more information.\"\n ),\n unrequested_params=e.unrequested_params,\n routed_params=e.routed_params,\n ) from None\n else:\n routed_params = sku.Bunch()\n routed_params.splitter = sku.Bunch(split={})\n routed_params.estimator = sku.Bunch(fit=params)\n\n cv = sks.check_cv(cv, y)\n splits = list(cv.split(X, y, **routed_params.splitter.split))\n\n portfolio_params = {} if portfolio_params is None else portfolio_params.copy()\n\n # We ensure that the folds are not shuffled\n if not isinstance(cv, BaseCombinatorialCV):\n try:\n if cv.shuffle:\n raise ValueError(\n \"`cross_val_predict` only works with cross-validation setting\"\n \" `shuffle=False`\"\n )\n except AttributeError:\n # If we cannot find the attribute shuffle, we check if the first folds\n # are shuffled\n for fold in splits[0]:\n if not np.all(np.diff(fold) > 0):\n raise ValueError(\n \"`cross_val_predict` only works with un-shuffled folds\"\n ) from None\n\n # We clone the estimator to make sure that all the folds are independent\n # and that it is pickle-able.\n parallel = skp.Parallel(n_jobs=n_jobs, verbose=verbose, pre_dispatch=pre_dispatch)\n # TODO remove when https://github.com/joblib/joblib/issues/1071 is fixed\n # noinspection PyCallingNonCallable\n predictions = parallel(\n skp.delayed(fit_and_predict)(\n sk.clone(estimator),\n X,\n y,\n train=train,\n test=test,\n fit_params=routed_params.estimator.fit,\n method=method,\n )\n for train, test in splits\n )\n\n if isinstance(cv, BaseCombinatorialCV):\n path_ids = cv.get_path_ids()\n path_nb = np.max(path_ids) + 1\n portfolios = [[] for _ in range(path_nb)]\n for i, prediction in enumerate(predictions):\n for j, p in enumerate(prediction):\n path_id = path_ids[i, j]\n portfolios[path_id].append(p)\n name = portfolio_params.pop(\"name\", \"path\")\n pred = Population(\n [\n MultiPeriodPortfolio(\n name=f\"{name}_{i}\", portfolios=portfolios[i], **portfolio_params\n )\n for i in range(path_nb)\n ]\n )\n else:\n # We need to re-order the test folds in case they were un-ordered by the\n # CV generator.\n # Because the tests folds are not shuffled, we use the first index of each\n # fold to order them.\n test_indices = np.concatenate([test for _, test in splits])\n if np.unique(test_indices, axis=0).shape[0] != test_indices.shape[0]:\n raise ValueError(\n \"`cross_val_predict` only works with non-duplicated test indices\"\n )\n test_indices = [test for _, test in splits]\n sorted_fold_id = np.argsort([x[0] for x in test_indices])\n pred = MultiPeriodPortfolio(\n portfolios=[predictions[fold_id] for fold_id in sorted_fold_id],\n check_observations_order=False,\n **portfolio_params,\n )\n\n return pred" } ]
[ "function_empty" ]
[ "skfolio.utils.tools.safe_indexing", "skfolio.utils.tools.safe_split", "skfolio.model_selection._validation.cross_val_predict" ]
Python
3
3
{ "total_num": 2, "base_passed_num": 1 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_factors_dataset", "skfolio.src.skfolio.datasets._base.load_sp500_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_preprocessing/test_returns.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 247, "func_end_lineno": 292, "func_code": "def load_factors_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 5 factor ETFs.\n\n This dataset is composed of the daily prices of 5 ETF representing common factors\n starting from 2014-01-02 up to 2022-12-28.\n\n The factors are:\n\n * \"MTUM\": Momentum\n * \"QUAL\": Quality\n * \"SIZE\": Size\n * \"VLUE\": Value\n * \"USMV\": low volatility\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 2264\n Assets 5\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_factors_dataset\n >>> prices = load_factors_dataset()\n >>> prices.head()\n MTUM QUAL SIZE USMV VLUE\n Date\n 2014-01-02 52.704 48.351 48.986 29.338 47.054\n 2014-01-03 52.792 48.256 48.722 29.330 46.999\n 2014-01-06 52.677 48.067 48.722 29.263 46.991\n 2014-01-07 53.112 48.455 48.731 29.430 47.253\n 2014-01-08 53.502 48.437 48.731 29.422 47.253\n \"\"\"\n data_filename = \"factors_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_factors_dataset", "skfolio.datasets._base.load_sp500_dataset" ]
Python
3
3
{ "total_num": 2, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.utils.equations._validate_groups", "skfolio.src.skfolio.utils.equations.equations_to_matrix" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/utils/equations.py", "skfolio/utils/equations.py" ]
[ "tests/test_prior/test_black_litterman.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 195, "func_end_lineno": 226, "func_code": "def _validate_groups(groups: npt.ArrayLike, name: str = \"groups\") -> np.ndarray:\n \"\"\"Validate groups by checking its dim and if group names don't appear in multiple\n levels and convert to numpy array.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D-array of strings.\n\n Returns\n -------\n groups : ndarray of shape (n_groups, n_assets)\n 2D-array of strings.\n \"\"\"\n groups = np.asarray(groups)\n if groups.ndim != 2:\n raise ValueError(\n f\"`{name} must be a 2D array, got {groups.ndim}D array instead.\"\n )\n n = len(groups)\n group_sets = [set(groups[i]) for i in range(n)]\n for i in range(n - 1):\n for e in group_sets[i]:\n for j in range(i + 1, n):\n if e in group_sets[j]:\n raise DuplicateGroupsError(\n f\"'{e}' appear in two levels: {list(groups[i])} \"\n f\"and {list(groups[i])}. \"\n f\"{name} must be in only one level.\"\n )\n\n return groups" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 32, "func_end_lineno": 134, "func_code": "def equations_to_matrix(\n groups: npt.ArrayLike,\n equations: npt.ArrayLike,\n sum_to_one: bool = False,\n raise_if_group_missing: bool = False,\n names: tuple[str, str] = (\"groups\", \"equations\"),\n) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Convert a list of linear equations into the left and right matrices of the\n inequality A <= B and equality A == B.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D array of assets groups.\n\n For example:\n\n groups = np.array(\n [\n [\"SPX\", \"SX5E\", \"NKY\", \"TLT\"],\n [\"Equity\", \"Equity\", \"Equity\", \"Bond\"],\n [\"US\", \"Europe\", \"Japan\", \"US\"],\n ]\n )\n\n equations : array-like of shape (n_equations,)\n 1D array of equations.\n\n Example of valid equation patterns:\n * \"number_1 * group_1 + number_3 <= number_4 * group_3 + number_5\"\n * \"group_1 == number * group_2\"\n * \"group_1 <= number\"\n * \"group_1 == number\"\n\n \"group_1\" and \"group_2\" are the group names defined in `groups`.\n The second expression means that the sum of all assets in \"group_1\" should be\n less or equal to \"number\" times the sum of all assets in \"group_2\".\n\n For example:\n\n equations = [\n \"Equity <= 3 * Bond\",\n \"US >= 1.5\",\n \"Europe >= 0.5 * Japan\",\n \"Japan == 1\",\n \"3*SPX + 5*SX5E == 2*TLT + 3\",\n ]\n\n sum_to_one : bool\n If this is set to True, all elements in a group sum to one (used in the `views`\n of the Black-Litterman model).\n\n raise_if_group_missing : bool, default=False\n If this is set to True, an error is raised when a group is not found in the\n groups, otherwise only a warning is shown.\n The default is False.\n\n names : tuple[str, str], default=('groups', 'equations')\n The group and equation names used in error messages.\n The default is `('groups', 'equations')`.\n\n Returns\n -------\n left_equality: ndarray of shape (n_equations_equality, n_assets)\n right_equality: ndarray of shape (n_equations_equality,)\n The left and right matrices of the inequality A <= B.\n\n left_inequality: ndarray of shape (n_equations_inequality, n_assets)\n right_inequality: ndarray of shape (n_equations_inequality,)\n The left and right matrices of the equality A == B.\n \"\"\"\n groups = _validate_groups(groups, name=names[0])\n equations = _validate_equations(equations, name=names[1])\n\n a_equality = []\n b_equality = []\n\n a_inequality = []\n b_inequality = []\n\n for string in equations:\n try:\n left, right, is_inequality = _string_to_equation(\n groups=groups,\n string=string,\n sum_to_one=sum_to_one,\n )\n if is_inequality:\n a_inequality.append(left)\n b_inequality.append(right)\n else:\n a_equality.append(left)\n b_equality.append(right)\n except GroupNotFoundError as e:\n if raise_if_group_missing:\n raise\n warnings.warn(str(e), stacklevel=2)\n return (\n np.array(a_equality),\n np.array(b_equality),\n np.array(a_inequality),\n np.array(b_inequality),\n )" } ]
[ "function_empty", "Development" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.utils.equations._validate_groups", "skfolio.utils.equations.equations_to_matrix" ]
Python
3
4
{ "total_num": 4, "base_passed_num": 0 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.distribution.multivariate._utils.ChildNode::central", "skfolio.src.skfolio.distribution.multivariate._utils.Tree::set_edges_from_mst" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/distribution/multivariate/_utils.py", "skfolio/distribution/multivariate/_utils.py", "skfolio/distribution/multivariate/_utils.py" ]
[ "tests/test_prior/test_synthetic_data.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 160, "class_end_lineno": 324, "func_start_lineno": 191, "func_end_lineno": 202, "func_code": " def central(self) -> bool:\n \"\"\"Determine whether this node is considered central.\n It is inherited from the associated edge's centrality.\n\n Returns\n -------\n central: bool\n True if the node is central; otherwise, False.\n \"\"\"\n if self._central is None:\n self._central = self.ref.strongly_central\n return self._central" }, { "class_start_lineno": 327, "class_end_lineno": 479, "func_start_lineno": 365, "func_end_lineno": 369, "func_code": " def weakly_central(self) -> bool:\n \"\"\"Determine if the edge is weakly central.\n An edge is weakly central if at least one of its two nodes is central.\n \"\"\"\n return self.node1.central or self.node2.central" }, { "class_start_lineno": 482, "class_end_lineno": 591, "func_start_lineno": 520, "func_end_lineno": 582, "func_code": " def set_edges_from_mst(self, dependence_method: DependenceMethod) -> None:\n \"\"\"Construct the Maximum Spanning Tree (MST) from the current nodes using\n the specified dependence method.\n\n The MST is built based on pairwise dependence measures computed between nodes.\n If any edge is (weakly) central, a central factor is added to the dependence\n measure to favor edges connected to central nodes.\n\n Parameters\n ----------\n dependence_method : DependenceMethod\n The method used to compute the dependence measure between nodes (e.g.,\n Kendall's tau).\n\n Returns\n -------\n None\n \"\"\"\n n = len(self.nodes)\n dependence_matrix = np.zeros((n, n))\n eligible_edges = {}\n central = False\n for i, j in combinations(range(n), 2):\n node1 = self.nodes[i]\n node2 = self.nodes[j]\n if self.level == 0 or node1.ref.share_one_node(node2.ref):\n edge = Edge(\n node1=node1, node2=node2, dependence_method=dependence_method\n )\n if not central and edge.weakly_central:\n central = True\n # Negate the matrix to use minimum_spanning_tree for maximum spanning\n # Add a cst to ensure that even if dep is 0, we still build a valid MST\n dep = abs(edge.dependence) + 1e-5\n dependence_matrix[i, j] = dep\n eligible_edges[(i, j)] = edge\n\n if np.any(np.isnan(dependence_matrix)):\n raise RuntimeError(\"dependence_matrix contains NaNs\")\n\n if central:\n max_dep = np.max(dependence_matrix)\n for (i, j), edge in eligible_edges.items():\n if edge.weakly_central:\n if edge.strongly_central:\n central_factor = 3 * max_dep\n else:\n central_factor = 2 * max_dep\n dep = dependence_matrix[i, j] + central_factor\n dependence_matrix[i, j] = dep\n\n # Compute the minimum spanning tree\n mst = ssc.minimum_spanning_tree(-dependence_matrix, overwrite=True)\n\n edges = []\n # Extract the indices of the non-zero entries (edges)\n for i, j in zip(*mst.nonzero(), strict=True):\n edge = eligible_edges[(i, j)]\n # connect Nodes to Edges\n edge.ref_to_nodes()\n edges.append(edge)\n\n self.edges = edges" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.distribution.multivariate._utils.ChildNode.central", "skfolio.distribution.multivariate._utils.Edge.weakly_central", "skfolio.distribution.multivariate._utils.Tree.set_edges_from_mst" ]
Python
4
4
{ "total_num": 4, "base_passed_num": 0 }
[ "skfolio.src.skfolio.utils.equations._split_equation_string", "skfolio.src.skfolio.utils.equations._string_to_equation", "skfolio.src.skfolio.utils.equations._validate_groups", "skfolio.src.skfolio.utils.equations.equations_to_matrix" ]
skfolio
[ "skfolio/utils/equations.py", "skfolio/utils/equations.py", "skfolio/utils/equations.py", "skfolio/utils/equations.py" ]
[ "tests/test_utils/test_equations.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 347, "func_end_lineno": 371, "func_code": "def _split_equation_string(string: str) -> list[str]:\n \"\"\"Split an equation strings by operators.\"\"\"\n comp_pattern = \"(?=\" + \"|\".join([\".+\\\\\" + e for e in _COMPARISON_OPERATORS]) + \")\"\n if not bool(re.match(comp_pattern, string)):\n raise EquationToMatrixError(\n f\"The string must contains a comparison operator: \"\n f\"{list(_COMPARISON_OPERATORS)}\"\n )\n\n # Regex to match only '>' and '<' but not '<=' or '>='\n invalid_pattern = r\"(?<!<)(?<!<=)>(?!=)|(?<!>)<(?!=)\"\n invalid_matches = re.findall(invalid_pattern, string)\n\n if len(invalid_matches) > 0:\n raise EquationToMatrixError(\n f\"{invalid_matches[0]} is an invalid comparison operator. \"\n f\"Valid comparison operators are: {list(_COMPARISON_OPERATORS)}\"\n )\n\n # '==' needs to be before '='\n operators = sorted(_OPERATORS, reverse=True)\n pattern = \"((?:\" + \"|\".join([\"\\\\\" + e for e in operators]) + \"))\"\n res = [x.strip() for x in re.split(pattern, string)]\n res = [x for x in res if x != \"\"]\n return res" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 374, "func_end_lineno": 499, "func_code": "def _string_to_equation(\n groups: np.ndarray,\n string: str,\n sum_to_one: bool,\n) -> tuple[np.ndarray, float, bool]:\n \"\"\"Convert a string to a left 1D-array and right float of the form:\n `groups @ left <= right` or `groups @ left == right` and return whether it's an\n equality or inequality.\n\n Parameters\n ----------\n groups : ndarray of shape (n_groups, n_assets)\n Groups 2D-array\n\n string : str\n String to convert\n\n sum_to_one : bool\n If this is set to True, the 1D-array is scaled to have a sum of one.\n\n Returns\n -------\n left : 1D-array of shape (n_assets,)\n right : float\n is_inequality : bool\n \"\"\"\n n = groups.shape[1]\n err_msg = f\"Wrong pattern encountered while converting the string '{string}'\"\n\n iterator = iter(_split_equation_string(string))\n group_names = set(groups.flatten())\n\n def is_group(name: str) -> bool:\n return name in group_names\n\n left = np.zeros(n)\n right = 0\n main_sign = 1\n comparison_sign = None\n is_inequality = None\n e = next(iterator, None)\n i = 0\n while True:\n i += 1\n if i > 1e6:\n raise RecursionError(err_msg)\n if e is None:\n break\n sign = 1\n if e in _COMPARISON_OPERATORS:\n if e in _INEQUALITY_OPERATORS:\n is_inequality = True\n else:\n is_inequality = False\n main_sign = -1\n comparison_sign = _comparison_operator_sign(e)\n e = next(iterator, None)\n if e in _SUB_ADD_OPERATORS:\n sign *= _sub_add_operator_sign(e)\n e = next(iterator, None)\n elif e in _SUB_ADD_OPERATORS:\n sign *= _sub_add_operator_sign(e)\n e = next(iterator, None)\n elif e in _MUL_OPERATORS:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n sign *= main_sign\n # next can only be a number or a group\n if e is None or e in _OPERATORS:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n if is_group(e):\n arr = _matching_array(values=groups, key=e, sum_to_one=sum_to_one)\n # next can only be a '*' or an ['-', '+', '>=', '<=', '==', '='] or None\n e = next(iterator, None)\n if e is None or e in _NON_MUL_OPERATORS:\n left += sign * arr\n elif e in _MUL_OPERATORS:\n # next can only a number\n e = next(iterator, None)\n try:\n number = float(e)\n except ValueError:\n raise GroupNotFoundError(\n f\"{err_msg}: the group '{e}' is missing from the groups\"\n f\" {groups}\"\n ) from None\n\n left += number * sign * arr\n e = next(iterator, None)\n else:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n else:\n try:\n number = float(e)\n except ValueError:\n raise GroupNotFoundError(\n f\"{err_msg}: the group '{e}' is missing from the groups {groups}\"\n ) from None\n # next can only be a '*' or an operator or None\n e = next(iterator, None)\n if e in _MUL_OPERATORS:\n # next can only a group\n e = next(iterator, None)\n if not is_group(e):\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n arr = _matching_array(values=groups, key=e, sum_to_one=sum_to_one)\n left += number * sign * arr\n e = next(iterator, None)\n elif e is None or e in _NON_MUL_OPERATORS:\n right += number * sign\n else:\n raise EquationToMatrixError(\n f\"{err_msg}: the character '{e}' is wrongly positioned\"\n )\n\n left *= comparison_sign\n right *= -comparison_sign\n\n return left, right, is_inequality" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 195, "func_end_lineno": 226, "func_code": "def _validate_groups(groups: npt.ArrayLike, name: str = \"groups\") -> np.ndarray:\n \"\"\"Validate groups by checking its dim and if group names don't appear in multiple\n levels and convert to numpy array.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D-array of strings.\n\n Returns\n -------\n groups : ndarray of shape (n_groups, n_assets)\n 2D-array of strings.\n \"\"\"\n groups = np.asarray(groups)\n if groups.ndim != 2:\n raise ValueError(\n f\"`{name} must be a 2D array, got {groups.ndim}D array instead.\"\n )\n n = len(groups)\n group_sets = [set(groups[i]) for i in range(n)]\n for i in range(n - 1):\n for e in group_sets[i]:\n for j in range(i + 1, n):\n if e in group_sets[j]:\n raise DuplicateGroupsError(\n f\"'{e}' appear in two levels: {list(groups[i])} \"\n f\"and {list(groups[i])}. \"\n f\"{name} must be in only one level.\"\n )\n\n return groups" }, { "class_start_lineno": 1, "class_end_lineno": 499, "func_start_lineno": 32, "func_end_lineno": 134, "func_code": "def equations_to_matrix(\n groups: npt.ArrayLike,\n equations: npt.ArrayLike,\n sum_to_one: bool = False,\n raise_if_group_missing: bool = False,\n names: tuple[str, str] = (\"groups\", \"equations\"),\n) -> tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:\n \"\"\"Convert a list of linear equations into the left and right matrices of the\n inequality A <= B and equality A == B.\n\n Parameters\n ----------\n groups : array-like of shape (n_groups, n_assets)\n 2D array of assets groups.\n\n For example:\n\n groups = np.array(\n [\n [\"SPX\", \"SX5E\", \"NKY\", \"TLT\"],\n [\"Equity\", \"Equity\", \"Equity\", \"Bond\"],\n [\"US\", \"Europe\", \"Japan\", \"US\"],\n ]\n )\n\n equations : array-like of shape (n_equations,)\n 1D array of equations.\n\n Example of valid equation patterns:\n * \"number_1 * group_1 + number_3 <= number_4 * group_3 + number_5\"\n * \"group_1 == number * group_2\"\n * \"group_1 <= number\"\n * \"group_1 == number\"\n\n \"group_1\" and \"group_2\" are the group names defined in `groups`.\n The second expression means that the sum of all assets in \"group_1\" should be\n less or equal to \"number\" times the sum of all assets in \"group_2\".\n\n For example:\n\n equations = [\n \"Equity <= 3 * Bond\",\n \"US >= 1.5\",\n \"Europe >= 0.5 * Japan\",\n \"Japan == 1\",\n \"3*SPX + 5*SX5E == 2*TLT + 3\",\n ]\n\n sum_to_one : bool\n If this is set to True, all elements in a group sum to one (used in the `views`\n of the Black-Litterman model).\n\n raise_if_group_missing : bool, default=False\n If this is set to True, an error is raised when a group is not found in the\n groups, otherwise only a warning is shown.\n The default is False.\n\n names : tuple[str, str], default=('groups', 'equations')\n The group and equation names used in error messages.\n The default is `('groups', 'equations')`.\n\n Returns\n -------\n left_equality: ndarray of shape (n_equations_equality, n_assets)\n right_equality: ndarray of shape (n_equations_equality,)\n The left and right matrices of the inequality A <= B.\n\n left_inequality: ndarray of shape (n_equations_inequality, n_assets)\n right_inequality: ndarray of shape (n_equations_inequality,)\n The left and right matrices of the equality A == B.\n \"\"\"\n groups = _validate_groups(groups, name=names[0])\n equations = _validate_equations(equations, name=names[1])\n\n a_equality = []\n b_equality = []\n\n a_inequality = []\n b_inequality = []\n\n for string in equations:\n try:\n left, right, is_inequality = _string_to_equation(\n groups=groups,\n string=string,\n sum_to_one=sum_to_one,\n )\n if is_inequality:\n a_inequality.append(left)\n b_inequality.append(right)\n else:\n a_equality.append(left)\n b_equality.append(right)\n except GroupNotFoundError as e:\n if raise_if_group_missing:\n raise\n warnings.warn(str(e), stacklevel=2)\n return (\n np.array(a_equality),\n np.array(b_equality),\n np.array(a_inequality),\n np.array(b_inequality),\n )" } ]
[ "function_empty", "Development" ]
[ "skfolio.utils.equations._split_equation_string", "skfolio.utils.equations._string_to_equation", "skfolio.utils.equations._validate_groups", "skfolio.utils.equations.equations_to_matrix" ]
Python
2
4
{ "total_num": 12, "base_passed_num": 2 }
[ "skfolio.src.skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.src.skfolio.datasets._base.load_sp500_dataset", "skfolio.src.skfolio.datasets._base.get_data_home", "skfolio.src.skfolio.datasets._base.download_dataset", "skfolio.src.skfolio.datasets._base.load_nasdaq_dataset" ]
skfolio
[ "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py", "skfolio/datasets/_base.py" ]
[ "tests/test_utils/test_stats.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 71, "func_end_lineno": 113, "func_code": "def load_gzip_compressed_csv_data(\n data_filename: str,\n data_module: str = DATA_MODULE,\n encoding=\"utf-8\",\n datetime_index: bool = True,\n) -> pd.DataFrame:\n \"\"\"Load gzip-compressed csv files with `importlib.resources`.\n\n 1) Open resource file with `importlib.resources.open_binary`\n 2) Decompress csv file with `gzip.open`\n 3) Load decompressed data with `pd.read_csv`\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from\n `data_module/data_file_name`. For example `'SPX500.csv.gz'`.\n\n data_module : str or module, default='skfolio.datasets.data'\n Module where data lives. The default is `'skfolio.datasets.data'`.\n\n encoding : str, default=\"utf-8\"\n Name of the encoding that the gzip-decompressed file will be\n decoded with. The default is 'utf-8'.\n\n datetime_index: bool, default=True\n If this is set to True, the DataFrame index is converted to datetime with\n format=\"%Y-%m-%d\".\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n path = resources.files(data_module).joinpath(data_filename)\n with path.open(\"rb\") as compressed_file:\n compressed_file = gzip.open(compressed_file, mode=\"rt\", encoding=encoding)\n df = pd.read_csv(compressed_file, sep=\",\", index_col=0)\n if datetime_index:\n df.index = pd.to_datetime(df.index, format=\"%Y-%m-%d\")\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 168, "func_end_lineno": 204, "func_code": "def load_sp500_dataset() -> pd.DataFrame:\n \"\"\"Load the prices of 20 assets from the S&P 500 Index composition.\n\n This dataset is composed of the daily prices of 20 assets from the S&P 500\n composition starting from 1990-01-02 up to 2022-12-28.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 8313\n Assets 20\n ============== ==================\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_sp500_dataset\n >>> prices = load_sp500_dataset()\n >>> prices.head()\n AAPL AMD BAC ... UNH WMT XOM\n 1990-01-02 0.332589 4.1250 11.65625 ... 0.382813 5.890625 12.5000\n 1990-01-03 0.334821 4.0000 11.75000 ... 0.375000 5.890625 12.3750\n 1990-01-04 0.335938 3.9375 11.50000 ... 0.371094 5.859375 12.2500\n 1990-01-05 0.337054 3.8125 11.25000 ... 0.355469 5.796875 12.1875\n 1990-01-08 0.339286 3.8125 11.31250 ... 0.347656 5.875000 12.3750\n \"\"\"\n data_filename = \"sp500_dataset.csv.gz\"\n df = load_gzip_compressed_csv_data(data_filename)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 25, "func_end_lineno": 55, "func_code": "def get_data_home(data_home: str | Path | None = None) -> str:\n \"\"\"Return the path of the skfolio data directory.\n\n This folder is used by some large dataset loaders to avoid downloading the\n data several times.\n\n By default, the data directory is set to a folder named 'skfolio_data' in the\n user home folder.\n\n Alternatively, it can be set by the 'SKFOLIO_DATA' environment\n variable or programmatically by giving an explicit folder path. The '~'\n symbol is expanded to the user home folder.\n\n If the folder does not already exist, it is automatically created.\n\n Parameters\n ----------\n data_home : str, optional\n The path to skfolio data directory. If `None`, the default path\n is `~/skfolio_data`.\n\n Returns\n -------\n data_home: str or path-like, optional\n The path to skfolio data directory.\n \"\"\"\n if data_home is None:\n data_home = os.environ.get(\"SKFOLIO_DATA\", os.path.join(\"~\", \"skfolio_data\"))\n data_home = os.path.expanduser(data_home)\n os.makedirs(data_home, exist_ok=True)\n return data_home" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 116, "func_end_lineno": 165, "func_code": "def download_dataset(\n data_filename: str,\n data_home: str | Path | None = None,\n download_if_missing: bool = True,\n) -> pd.DataFrame:\n \"\"\"Download and save locally a dataset from the remote GitHub dataset folder.\n\n Parameters\n ----------\n data_filename : str\n Name of gzip-compressed csv file (`'*.csv.gz'`) to be loaded from a remote\n GitHub dataset folder.\n\n data_home : str or path-like, optional\n Specify another download and cache folder for the datasets. By default,\n all skfolio data is stored in `~/skfolio_data` sub-folders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n The default is `True`.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n DataFrame with each row representing one observation and each column\n representing the asset price of a given observation.\n \"\"\"\n # Use a CORS proxy when triggering requests from the browser\n url_prefix = \"https://corsproxy.io/?\" if sys.platform == \"emscripten\" else \"\"\n url = url_prefix + (\n f\"https://github.com/skfolio/skfolio-datasets/raw/main/\"\n f\"datasets/{data_filename}.csv.gz\"\n )\n\n data_home = get_data_home(data_home=data_home)\n filepath = os.path.join(data_home, f\"{data_filename}.pkz\")\n\n if os.path.exists(filepath):\n return joblib.load(filepath)\n\n if not download_if_missing:\n raise OSError(\"Data not found and `download_if_missing` is False\")\n\n archive_path = os.path.join(data_home, os.path.basename(url))\n ur.urlretrieve(url, archive_path)\n df = load_gzip_compressed_csv_data(archive_path)\n joblib.dump(df, filepath, compress=6)\n os.remove(archive_path)\n return df" }, { "class_start_lineno": 1, "class_end_lineno": 448, "func_start_lineno": 348, "func_end_lineno": 397, "func_code": "def load_nasdaq_dataset(data_home=None, download_if_missing=True) -> pd.DataFrame:\n \"\"\"Load the prices of 1455 assets from the NASDAQ Composite Index.\n\n This dataset is composed of the daily prices of 1455 assets from the NASDAQ\n Composite starting from 2018-01-02 up to 2023-05-31.\n\n The data comes from the Yahoo public API.\n The price is the adjusted close which is the closing price after adjustments for\n all applicable splits and dividend distributions.\n The adjustment uses appropriate split and dividend multipliers, adhering to\n the Center for Research in Security Prices (CRSP) standards.\n\n ============== ==================\n Observations 1362\n Assets 1455\n ============== ==================\n\n Parameters\n ----------\n data_home : str, optional\n Specify another download and cache folder for the datasets.\n By default, all skfolio data is stored in `~/skfolio_data` subfolders.\n\n download_if_missing : bool, default=True\n If False, raise an OSError if the data is not locally available\n instead of trying to download the data from the source site.\n\n Returns\n -------\n df : DataFrame of shape (n_observations, n_assets)\n Prices DataFrame\n\n Examples\n --------\n >>> from skfolio.datasets import load_nasdaq_dataset\n >>> prices = load_nasdaq_dataset()\n >>> prices.head()\n AAL AAOI AAON AAPL ... ZVRA ZYME ZYNE ZYXI\n Date ...\n 2018-01-02 51.648 37.91 35.621 41.310 ... 66.4 7.933 12.995 2.922\n 2018-01-03 51.014 37.89 36.247 41.303 ... 72.8 7.965 13.460 2.913\n 2018-01-04 51.336 38.38 36.103 41.495 ... 78.4 8.430 12.700 2.869\n 2018-01-05 51.316 38.89 36.681 41.967 ... 77.6 8.400 12.495 2.780\n 2018-01-08 50.809 38.37 36.103 41.811 ... 82.4 8.310 12.550 2.825\n \"\"\"\n data_filename = \"nasdaq_dataset\"\n df = download_dataset(\n data_filename, data_home=data_home, download_if_missing=download_if_missing\n )\n return df" } ]
[ "function_empty" ]
[ "skfolio.datasets._base.load_gzip_compressed_csv_data", "skfolio.datasets._base.load_sp500_dataset", "skfolio.datasets._base.get_data_home", "skfolio.datasets._base.download_dataset", "skfolio.datasets._base.load_nasdaq_dataset" ]
Python
5
5
{ "total_num": 37, "base_passed_num": 33 }
[ "skfolio.src.skfolio.utils.tools.optimal_rounding_decimals", "skfolio.src.skfolio.utils.tools.format_measure", "skfolio.src.skfolio.utils.tools.safe_indexing", "skfolio.src.skfolio.utils.tools.safe_split" ]
skfolio
[ "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py", "skfolio/utils/tools.py" ]
[ "tests/test_utils/test_tools.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 537, "func_end_lineno": 550, "func_code": "def optimal_rounding_decimals(x: float) -> int:\n \"\"\"Return the optimal rounding decimal number for a user-friendly formatting.\n\n Parameters\n ----------\n x : float\n Number to round.\n\n Returns\n -------\n n : int\n Rounding decimal number.\n \"\"\"\n return min(6, max(int(-np.log10(abs(x))) + 2, 2))" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 506, "func_end_lineno": 534, "func_code": "def format_measure(x: float, percent: bool = False) -> str:\n \"\"\"Format a measure number into a user-friendly string.\n\n Parameters\n ----------\n x : float\n Number to format.\n\n percent : bool, default=False\n If this is set to True, the number is formatted in percentage.\n\n Returns\n -------\n formatted : str\n Formatted string.\n \"\"\"\n if np.isnan(x):\n return str(x)\n if percent:\n xn = x * 100\n f = \"%\"\n else:\n xn = x\n f = \"f\"\n if xn == 0:\n n = 0\n else:\n n = optimal_rounding_decimals(xn)\n return \"{value:{fmt}}\".format(value=x, fmt=f\".{n}{f}\")" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 190, "func_end_lineno": 219, "func_code": "def safe_indexing(\n X: npt.ArrayLike | pd.DataFrame, indices: npt.ArrayLike | None, axis: int = 0\n):\n \"\"\"Return rows, items or columns of X using indices.\n\n Parameters\n ----------\n X : array-like\n Data from which to sample rows.\n\n indices : array-like, optional\n Indices of rows or columns.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n subset :\n Subset of X on axis 0.\n \"\"\"\n if indices is None:\n return X\n if hasattr(X, \"iloc\"):\n return X.take(indices, axis=axis)\n if axis == 0:\n return X[indices]\n return X[:, indices]" }, { "class_start_lineno": 1, "class_end_lineno": 787, "func_start_lineno": 222, "func_end_lineno": 261, "func_code": "def safe_split(\n X: npt.ArrayLike,\n y: npt.ArrayLike | None = None,\n indices: np.ndarray | None = None,\n axis: int = 0,\n):\n \"\"\"Create subset of dataset.\n\n Slice X, y according to indices for cross-validation.\n\n Parameters\n ----------\n X : array-like\n Data to be indexed.\n\n y : array-like\n Data to be indexed.\n\n indices : ndarray of int, optional\n Rows or columns to select from X and y.\n The default (`None`) is to select the entire data.\n\n axis : int, default=0\n The axis along which `X` will be sub-sampled. `axis=0` will select\n rows while `axis=1` will select columns.\n\n Returns\n -------\n X_subset : array-like\n Indexed data.\n\n y_subset : array-like\n Indexed targets.\n \"\"\"\n X_subset = safe_indexing(X, indices=indices, axis=axis)\n if y is not None:\n y_subset = safe_indexing(y, indices=indices, axis=axis)\n else:\n y_subset = None\n return X_subset, y_subset" } ]
[ "function_empty", "Development" ]
[ "skfolio.utils.tools.optimal_rounding_decimals", "skfolio.utils.tools.format_measure", "skfolio.utils.tools.safe_indexing", "skfolio.utils.tools.safe_split" ]
Python
3
4
{ "total_num": 21, "base_passed_num": 16 }
[ "d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.metrics.evaluators.make_batches", "d3rlpy.d3rlpy.models.encoders.DefaultEncoderFactory::create", "d3rlpy.d3rlpy.models.builders.create_discrete_q_function" ]
d3rlpy
[ "d3rlpy/dataset/transition_pickers.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/metrics/evaluators.py", "d3rlpy/models/encoders.py", "d3rlpy/models/builders.py" ]
[ "tests_copy/metrics/test_evaluators.py" ]
[ { "class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )" }, { "class_start_lineno": 1, "class_end_lineno": 548, "func_start_lineno": 52, "func_end_lineno": 68, "func_code": "def make_batches(\n episode: EpisodeBase,\n window_size: int,\n transition_picker: TransitionPickerProtocol,\n) -> Iterator[TransitionMiniBatch]:\n n_batches = len(episode) // window_size\n if len(episode) % window_size != 0:\n n_batches += 1\n for i in range(n_batches):\n head_index = i * window_size\n last_index = min(head_index + window_size, episode.transition_count)\n transitions = [\n transition_picker(episode, index)\n for index in range(head_index, last_index)\n ]\n batch = TransitionMiniBatch.from_transitions(transitions)\n yield batch" }, { "class_start_lineno": 71, "class_end_lineno": 121, "func_start_lineno": 93, "func_end_lineno": 121, "func_code": " def __call__(\n self,\n algo: QLearningAlgoProtocol,\n dataset: ReplayBufferBase,\n ) -> float:\n total_errors = []\n episodes = self._episodes if self._episodes else dataset.episodes\n for episode in episodes:\n for batch in make_batches(\n episode, WINDOW_SIZE, dataset.transition_picker\n ):\n # estimate values for current observations\n values = algo.predict_value(batch.observations, batch.actions)\n\n # estimate values for next observations\n next_actions = algo.predict(batch.next_observations)\n next_values = algo.predict_value(\n batch.next_observations, next_actions\n )\n\n # calculate td errors\n mask = (1.0 - batch.terminals).reshape(-1)\n rewards = np.asarray(batch.rewards).reshape(-1)\n if algo.reward_scaler:\n rewards = algo.reward_scaler.transform_numpy(rewards)\n y = rewards + algo.gamma * next_values * mask\n total_errors += ((values - y) ** 2).tolist()\n\n return float(np.mean(total_errors))" }, { "class_start_lineno": 209, "class_end_lineno": 265, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": " def create(self, observation_shape: Shape) -> Encoder:\n factory: Union[PixelEncoderFactory, VectorEncoderFactory]\n if len(observation_shape) == 3:\n factory = PixelEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n else:\n factory = VectorEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n return factory.create(observation_shape)" }, { "class_start_lineno": 1, "class_end_lineno": 403, "func_start_lineno": 47, "func_end_lineno": 82, "func_code": "def create_discrete_q_function(\n observation_shape: Shape,\n action_size: int,\n encoder_factory: EncoderFactory,\n q_func_factory: QFunctionFactory,\n device: str,\n enable_ddp: bool,\n n_ensembles: int = 1,\n) -> tuple[nn.ModuleList, DiscreteEnsembleQFunctionForwarder]:\n if q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n # normalize gradient scale by ensemble size\n for p in cast(nn.Module, encoder).parameters():\n p.register_hook(lambda grad: grad / n_ensembles)\n\n q_funcs = []\n forwarders = []\n for _ in range(n_ensembles):\n if not q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n q_func, forwarder = q_func_factory.create_discrete(\n encoder, hidden_size, action_size\n )\n q_func.to(device)\n if enable_ddp:\n q_func = wrap_model_by_ddp(q_func)\n forwarder.set_q_func(q_func)\n q_funcs.append(q_func)\n forwarders.append(forwarder)\n q_func_modules = nn.ModuleList(q_funcs)\n ensemble_forwarder = DiscreteEnsembleQFunctionForwarder(\n forwarders, action_size\n )\n return q_func_modules, ensemble_forwarder" } ]
[ "BugFix" ]
[ "d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.metrics.evaluators.make_batches", "d3rlpy.metrics.evaluators.TDErrorEvaluator.__call__", "d3rlpy.models.encoders.DefaultEncoderFactory.create", "d3rlpy.models.builders.create_discrete_q_function" ]
Python
0
4
{ "total_num": 19, "base_passed_num": 0 }
[ "d3rlpy.d3rlpy.models.encoders.DefaultEncoderFactory::create", "d3rlpy.d3rlpy.models.builders.create_discrete_q_function", "d3rlpy.d3rlpy.models.encoders.DefaultEncoderFactory::create_with_action", "d3rlpy.d3rlpy.models.builders.create_continuous_q_function" ]
d3rlpy
[ "d3rlpy/models/encoders.py", "d3rlpy/models/builders.py", "d3rlpy/models/encoders.py", "d3rlpy/models/builders.py" ]
[ "tests_copy/models/test_builders.py" ]
[ { "class_start_lineno": 209, "class_end_lineno": 265, "func_start_lineno": 224, "func_end_lineno": 238, "func_code": " def create(self, observation_shape: Shape) -> Encoder:\n factory: Union[PixelEncoderFactory, VectorEncoderFactory]\n if len(observation_shape) == 3:\n factory = PixelEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n else:\n factory = VectorEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n return factory.create(observation_shape)" }, { "class_start_lineno": 1, "class_end_lineno": 403, "func_start_lineno": 47, "func_end_lineno": 82, "func_code": "def create_discrete_q_function(\n observation_shape: Shape,\n action_size: int,\n encoder_factory: EncoderFactory,\n q_func_factory: QFunctionFactory,\n device: str,\n enable_ddp: bool,\n n_ensembles: int = 1,\n) -> tuple[nn.ModuleList, DiscreteEnsembleQFunctionForwarder]:\n if q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n # normalize gradient scale by ensemble size\n for p in cast(nn.Module, encoder).parameters():\n p.register_hook(lambda grad: grad / n_ensembles)\n\n q_funcs = []\n forwarders = []\n for _ in range(n_ensembles):\n if not q_func_factory.share_encoder:\n encoder = encoder_factory.create(observation_shape)\n hidden_size = compute_output_size([observation_shape], encoder)\n q_func, forwarder = q_func_factory.create_discrete(\n encoder, hidden_size, action_size\n )\n q_func.to(device)\n if enable_ddp:\n q_func = wrap_model_by_ddp(q_func)\n forwarder.set_q_func(q_func)\n q_funcs.append(q_func)\n forwarders.append(forwarder)\n q_func_modules = nn.ModuleList(q_funcs)\n ensemble_forwarder = DiscreteEnsembleQFunctionForwarder(\n forwarders, action_size\n )\n return q_func_modules, ensemble_forwarder" }, { "class_start_lineno": 209, "class_end_lineno": 265, "func_start_lineno": 240, "func_end_lineno": 261, "func_code": " def create_with_action(\n self,\n observation_shape: Shape,\n action_size: int,\n discrete_action: bool = False,\n ) -> EncoderWithAction:\n factory: Union[PixelEncoderFactory, VectorEncoderFactory]\n if len(observation_shape) == 3:\n factory = PixelEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n else:\n factory = VectorEncoderFactory(\n activation=self.activation,\n use_batch_norm=self.use_batch_norm,\n dropout_rate=self.dropout_rate,\n )\n return factory.create_with_action(\n observation_shape, action_size, discrete_action\n )" }, { "class_start_lineno": 1, "class_end_lineno": 403, "func_start_lineno": 85, "func_end_lineno": 128, "func_code": "def create_continuous_q_function(\n observation_shape: Shape,\n action_size: int,\n encoder_factory: EncoderFactory,\n q_func_factory: QFunctionFactory,\n device: str,\n enable_ddp: bool,\n n_ensembles: int = 1,\n) -> tuple[nn.ModuleList, ContinuousEnsembleQFunctionForwarder]:\n if q_func_factory.share_encoder:\n encoder = encoder_factory.create_with_action(\n observation_shape, action_size\n )\n hidden_size = compute_output_size(\n [observation_shape, (action_size,)], encoder\n )\n # normalize gradient scale by ensemble size\n for p in cast(nn.Module, encoder).parameters():\n p.register_hook(lambda grad: grad / n_ensembles)\n\n q_funcs = []\n forwarders = []\n for _ in range(n_ensembles):\n if not q_func_factory.share_encoder:\n encoder = encoder_factory.create_with_action(\n observation_shape, action_size\n )\n hidden_size = compute_output_size(\n [observation_shape, (action_size,)], encoder\n )\n q_func, forwarder = q_func_factory.create_continuous(\n encoder, hidden_size\n )\n q_func.to(device)\n if enable_ddp:\n q_func = wrap_model_by_ddp(q_func)\n forwarder.set_q_func(q_func)\n q_funcs.append(q_func)\n forwarders.append(forwarder)\n q_func_modules = nn.ModuleList(q_funcs)\n ensemble_forwarder = ContinuousEnsembleQFunctionForwarder(\n forwarders, action_size\n )\n return q_func_modules, ensemble_forwarder" } ]
[ "BugFix" ]
[ "d3rlpy.models.encoders.DefaultEncoderFactory.create", "d3rlpy.models.builders.create_discrete_q_function", "d3rlpy.models.encoders.DefaultEncoderFactory.create_with_action", "d3rlpy.models.builders.create_continuous_q_function" ]
Python
0
4
{ "total_num": 39, "base_passed_num": 25 }
[ "d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function._gather_quantiles_by_indices", "d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function._reduce_quantile_ensemble", "d3rlpy.d3rlpy.models.torch.q_functions.mean_q_function.DiscreteMeanQFunctionForwarder::compute_error", "d3rlpy.d3rlpy.models.torch.q_functions.ensemble_q_function.compute_ensemble_q_function_error" ]
d3rlpy
[ "d3rlpy/models/torch/q_functions/ensemble_q_function.py", "d3rlpy/models/torch/q_functions/ensemble_q_function.py", "d3rlpy/models/torch/q_functions/mean_q_function.py", "d3rlpy/models/torch/q_functions/ensemble_q_function.py", "d3rlpy/models/torch/q_functions/ensemble_q_function.py" ]
[ "tests_copy/models/torch/q_functions/test_ensemble_q_function.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 367, "func_start_lineno": 35, "func_end_lineno": 52, "func_code": "def _gather_quantiles_by_indices(\n y: torch.Tensor, indices: torch.Tensor\n) -> torch.Tensor:\n # TODO: implement this in general case\n if y.dim() == 3:\n # (N, batch, n_quantiles) -> (batch, n_quantiles)\n return y.transpose(0, 1)[torch.arange(y.shape[1]), indices]\n elif y.dim() == 4:\n # (N, batch, action, n_quantiles) -> (batch, action, N, n_quantiles)\n transposed_y = y.transpose(0, 1).transpose(1, 2)\n # (batch, action, N, n_quantiles) -> (batch * action, N, n_quantiles)\n flat_y = transposed_y.reshape(-1, y.shape[0], y.shape[3])\n head_indices = torch.arange(y.shape[1] * y.shape[2])\n # (batch * action, N, n_quantiles) -> (batch * action, n_quantiles)\n gathered_y = flat_y[head_indices, indices.view(-1)]\n # (batch * action, n_quantiles) -> (batch, action, n_quantiles)\n return gathered_y.view(y.shape[1], y.shape[2], -1)\n raise ValueError" }, { "class_start_lineno": 1, "class_end_lineno": 367, "func_start_lineno": 55, "func_end_lineno": 74, "func_code": "def _reduce_quantile_ensemble(\n y: torch.Tensor, reduction: str = \"min\", dim: int = 0, lam: float = 0.75\n) -> torch.Tensor:\n # reduction beased on expectation\n mean = y.mean(dim=-1)\n if reduction == \"min\":\n indices = mean.min(dim=dim).indices\n return _gather_quantiles_by_indices(y, indices)\n elif reduction == \"max\":\n indices = mean.max(dim=dim).indices\n return _gather_quantiles_by_indices(y, indices)\n elif reduction == \"none\":\n return y\n elif reduction == \"mix\":\n min_indices = mean.min(dim=dim).indices\n max_indices = mean.max(dim=dim).indices\n min_values = _gather_quantiles_by_indices(y, min_indices)\n max_values = _gather_quantiles_by_indices(y, max_indices)\n return lam * min_values + (1.0 - lam) * max_values\n raise ValueError" }, { "class_start_lineno": 47, "class_end_lineno": 86, "func_start_lineno": 58, "func_end_lineno": 74, "func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n reduction: str = \"mean\",\n ) -> torch.Tensor:\n one_hot = F.one_hot(actions.view(-1), num_classes=self._action_size)\n value = (self._q_func(observations).q_value * one_hot.float()).sum(\n dim=1, keepdim=True\n )\n y = rewards + gamma * target * (1 - terminals)\n loss = compute_huber_loss(value, y)\n return compute_reduce(loss, reduction)" }, { "class_start_lineno": 1, "class_end_lineno": 367, "func_start_lineno": 77, "func_end_lineno": 109, "func_code": "def compute_ensemble_q_function_error(\n forwarders: Union[\n Sequence[DiscreteQFunctionForwarder],\n Sequence[ContinuousQFunctionForwarder],\n ],\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n) -> torch.Tensor:\n assert target.ndim == 2\n td_sum = torch.tensor(\n 0.0,\n dtype=torch.float32,\n device=get_device(observations),\n )\n for forwarder in forwarders:\n loss = forwarder.compute_error(\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n reduction=\"none\",\n )\n if masks is not None:\n loss = loss * masks\n td_sum += loss.mean()\n return td_sum" }, { "class_start_lineno": 150, "class_end_lineno": 218, "func_start_lineno": 179, "func_end_lineno": 198, "func_code": " def compute_error(\n self,\n observations: TorchObservation,\n actions: torch.Tensor,\n rewards: torch.Tensor,\n target: torch.Tensor,\n terminals: torch.Tensor,\n gamma: Union[float, torch.Tensor] = 0.99,\n masks: Optional[torch.Tensor] = None,\n ) -> torch.Tensor:\n return compute_ensemble_q_function_error(\n forwarders=self._forwarders,\n observations=observations,\n actions=actions,\n rewards=rewards,\n target=target,\n terminals=terminals,\n gamma=gamma,\n masks=masks,\n )" } ]
[ "BugFix" ]
[ "d3rlpy.models.torch.q_functions.ensemble_q_function._gather_quantiles_by_indices", "d3rlpy.models.torch.q_functions.ensemble_q_function._reduce_quantile_ensemble", "d3rlpy.models.torch.q_functions.mean_q_function.DiscreteMeanQFunctionForwarder.compute_error", "d3rlpy.models.torch.q_functions.ensemble_q_function.compute_ensemble_q_function_error", "d3rlpy.models.torch.q_functions.ensemble_q_function.DiscreteEnsembleQFunctionForwarder.compute_error" ]
Python
0
4
{ "total_num": 30, "base_passed_num": 10 }
[ "d3rlpy.d3rlpy.dataset.transition_pickers.BasicTransitionPicker::__call__", "d3rlpy.d3rlpy.preprocessing.reward_scalers.MinMaxRewardScaler::fit_with_transition_picker" ]
d3rlpy
[ "d3rlpy/dataset/transition_pickers.py", "d3rlpy/preprocessing/reward_scalers.py" ]
[ "tests_copy/preprocessing/test_reward_scalers.py" ]
[ { "class_start_lineno": 43, "class_end_lineno": 72, "func_start_lineno": 49, "func_end_lineno": 72, "func_code": " def __call__(self, episode: EpisodeBase, index: int) -> Transition:\n _validate_index(episode, index)\n\n observation = retrieve_observation(episode.observations, index)\n is_terminal = episode.terminated and index == episode.size() - 1\n if is_terminal:\n next_observation = create_zero_observation(observation)\n next_action = np.zeros_like(episode.actions[index])\n else:\n next_observation = retrieve_observation(\n episode.observations, index + 1\n )\n next_action = episode.actions[index + 1]\n\n return Transition(\n observation=observation,\n action=episode.actions[index],\n reward=episode.rewards[index],\n next_observation=next_observation,\n next_action=next_action,\n terminal=float(is_terminal),\n interval=1,\n rewards_to_go=episode.rewards[index:],\n )" }, { "class_start_lineno": 149, "class_end_lineno": 239, "func_start_lineno": 180, "func_end_lineno": 192, "func_code": " def fit_with_transition_picker(\n self,\n episodes: Sequence[EpisodeBase],\n transition_picker: TransitionPickerProtocol,\n ) -> None:\n assert not self.built\n rewards = []\n for episode in episodes:\n for i in range(episode.transition_count):\n transition = transition_picker(episode, i)\n rewards.append(transition.reward)\n self.minimum = float(np.min(rewards))\n self.maximum = float(np.max(rewards))" } ]
[ "BugFix" ]
[ "d3rlpy.dataset.transition_pickers.BasicTransitionPicker.__call__", "d3rlpy.preprocessing.reward_scalers.MinMaxRewardScaler.fit_with_transition_picker" ]
Python
0
2
{ "total_num": 15, "base_passed_num": 12 }
[ "datachain.src.datachain.lib.file.File::ensure_cached", "datachain.src.datachain.lib.file.File::open", "datachain.src.datachain.lib.file.File::_symlink_to" ]
datachain
[ "datachain/lib/file.py", "datachain/lib/file.py", "datachain/lib/file.py" ]
[ "tests/unit/lib/test_file.py" ]
[ { "class_start_lineno": 125, "class_end_lineno": 468, "func_start_lineno": 331, "func_end_lineno": 337, "func_code": " def ensure_cached(self) -> None:\n if self._catalog is None:\n raise RuntimeError(\n \"cannot download file to cache because catalog is not setup\"\n )\n client = self._catalog.get_client(self.source)\n client.download(self, callback=self._download_cb)" }, { "class_start_lineno": 125, "class_end_lineno": 468, "func_start_lineno": 243, "func_end_lineno": 256, "func_code": " def open(self, mode: Literal[\"rb\", \"r\"] = \"rb\") -> Iterator[Any]:\n \"\"\"Open the file and return a file object.\"\"\"\n if self.location:\n with VFileRegistry.resolve(self, self.location) as f: # type: ignore[arg-type]\n yield f\n\n else:\n if self._caching_enabled:\n self.ensure_cached()\n client: Client = self._catalog.get_client(self.source)\n with client.open_object(\n self, use_cache=self._caching_enabled, cb=self._download_cb\n ) as f:\n yield io.TextIOWrapper(f) if mode == \"r\" else f" }, { "class_start_lineno": 125, "class_end_lineno": 468, "func_start_lineno": 282, "func_end_lineno": 295, "func_code": " def _symlink_to(self, destination: str):\n if self.location:\n raise OSError(errno.ENOTSUP, \"Symlinking virtual file is not supported\")\n\n if self._caching_enabled:\n self.ensure_cached()\n source = self.get_local_path()\n assert source, \"File was not cached\"\n elif self.source.startswith(\"file://\"):\n source = self.get_path()\n else:\n raise OSError(errno.EXDEV, \"can't link across filesystems\")\n\n return os.symlink(source, destination)" } ]
[ "BugFix" ]
[ "datachain.lib.file.File.ensure_cached", "datachain.lib.file.File.open", "datachain.lib.file.File._symlink_to" ]
Python
0
3
{ "total_num": 33, "base_passed_num": 0 }
[ "datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.signal_schema.SignalSchema::get_column_type", "datachain.src.datachain.lib.signal_schema.SignalSchema::mutate" ]
datachain
[ "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py" ]
[ "tests/unit/lib/test_signal_schema.py" ]
[ { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 464, "func_end_lineno": 479, "func_code": " def get_column_type(self, col_name: str, with_subtree: bool = False) -> DataType:\n \"\"\"\n Returns column type by column name.\n\n If `with_subtree` is True, then it will return the type of the column\n even if it has a subtree (e.g. model with nested fields), otherwise it will\n return the type of the column (standard type field, not the model).\n\n If column is not found, raises `SignalResolvingError`.\n \"\"\"\n for path, _type, has_subtree, _ in self.get_flat_tree():\n if (with_subtree or not has_subtree) and DEFAULT_DELIMITER.join(\n path\n ) == col_name:\n return _type\n raise SignalResolvingError([col_name], \"is not found\")" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 557, "func_end_lineno": 585, "func_code": " def mutate(self, args_map: dict) -> \"SignalSchema\":\n new_values = self.values.copy()\n\n for name, value in args_map.items():\n if isinstance(value, Column) and value.name in self.values:\n # renaming existing signal\n del new_values[value.name]\n new_values[name] = self.values[value.name]\n continue\n if isinstance(value, Column):\n # adding new signal from existing signal field\n try:\n new_values[name] = self.get_column_type(\n value.name, with_subtree=True\n )\n continue\n except SignalResolvingError:\n pass\n if isinstance(value, Func):\n # adding new signal with function\n new_values[name] = value.get_result_type(self)\n continue\n if isinstance(value, ColumnElement):\n # adding new signal\n new_values[name] = sql_to_python(value)\n continue\n new_values[name] = value\n\n return SignalSchema(new_values)" } ]
[ "BugFix" ]
[ "datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.signal_schema.SignalSchema.get_column_type", "datachain.lib.signal_schema.SignalSchema.mutate" ]
Python
0
3
{ "total_num": 58, "base_passed_num": 56 }
[ "datachain.src.datachain.lib.webdataset.Builder::add", "datachain.src.datachain.lib.webdataset.get_tar_groups" ]
datachain
[ "datachain/lib/webdataset.py", "datachain/lib/webdataset.py" ]
[ "tests/unit/lib/test_webdataset.py" ]
[ { "class_start_lineno": 104, "class_end_lineno": 194, "func_start_lineno": 134, "func_end_lineno": 171, "func_code": " def add(self, file: tarfile.TarInfo):\n fstream = File(path=file.name)\n ext = fstream.get_file_ext()\n stem = fstream.get_file_stem()\n\n if self.state.stem is not None and self.state.stem != stem:\n raise StopIteration\n\n if self.state.stem is None:\n self.state.stem = stem\n\n if ext in self._core_extensions:\n if self.state.core_file is not None:\n raise CoreFileDuplicationError(\n self._tar_stream, file.name, self.state.core_file.name\n )\n self.state.core_file = file\n elif ext in self.state.data:\n raise WDSError(\n self._tar_stream,\n f\"file with extension '.{ext}' already exists in the archive\",\n )\n else:\n type_ = self._get_type(ext)\n if type_ is None:\n raise UnknownFileExtensionError(self._tar_stream, fstream.name, ext)\n\n if issubclass(type_, WDSReadableSubclass):\n reader = type_._reader\n else:\n reader = self.DEFAULT_TYPES_READERS.get(type_, None)\n\n if reader is None:\n raise WDSError(\n self._tar_stream,\n f\"unable to find a reader for type {type_}, extension .{ext}\",\n )\n self.state.data[ext] = reader(self, file)" }, { "class_start_lineno": 1, "class_end_lineno": 220, "func_start_lineno": 197, "func_end_lineno": 209, "func_code": "def get_tar_groups(stream, tar, core_extensions, spec, encoding=\"utf-8\"):\n builder = Builder(stream, core_extensions, spec, tar, encoding)\n\n for item in sorted(tar.getmembers(), key=lambda m: Path(m.name).stem):\n if not item.isfile():\n continue\n try:\n builder.add(item)\n except StopIteration:\n yield builder.produce()\n builder.add(item)\n if builder.state.stem is not None:\n yield builder.produce()" } ]
[ "BugFix" ]
[ "datachain.lib.webdataset.Builder.add", "datachain.lib.webdataset.get_tar_groups" ]
Python
0
2
{ "total_num": 7, "base_passed_num": 3 }
[ "haystack.haystack.utils.auth.EnvVarSecret::resolve_value", "haystack.haystack.components.rankers.transformers_similarity.TransformersSimilarityRanker::warm_up" ]
haystack
[ "haystack/utils/auth.py", "haystack/components/rankers/transformers_similarity.py" ]
[ "test/components/rankers/test_transformers_similarity.py" ]
[ { "class_start_lineno": 171, "class_end_lineno": 211, "func_start_lineno": 196, "func_end_lineno": 206, "func_code": " def resolve_value(self) -> Optional[Any]:\n \"\"\"Resolve the secret to an atomic value. The semantics of the value is secret-dependent.\"\"\"\n out = None\n for env_var in self._env_vars:\n value = os.getenv(env_var)\n if value is not None:\n out = value\n break\n if out is None and self._strict:\n raise ValueError(f\"None of the following authentication environment variables are set: {self._env_vars}\")\n return out" }, { "class_start_lineno": 24, "class_end_lineno": 309, "func_start_lineno": 142, "func_end_lineno": 155, "func_code": " def warm_up(self):\n \"\"\"\n Initializes the component.\n \"\"\"\n if self.model is None:\n self.model = AutoModelForSequenceClassification.from_pretrained(\n self.model_name_or_path, token=self.token.resolve_value() if self.token else None, **self.model_kwargs\n )\n self.tokenizer = AutoTokenizer.from_pretrained(\n self.model_name_or_path,\n token=self.token.resolve_value() if self.token else None,\n **self.tokenizer_kwargs,\n )\n self.device = ComponentDevice.from_multiple(device_map=DeviceMap.from_hf(self.model.hf_device_map))" } ]
[ "BugFix" ]
[ "haystack.utils.auth.EnvVarSecret.resolve_value", "haystack.components.rankers.transformers_similarity.TransformersSimilarityRanker.warm_up" ]
Python
0
2
{ "total_num": 26, "base_passed_num": 14 }
[ "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::pad", "transformers.src.transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor::_pad_for_patching" ]
transformers
[ "transformers/models/llava_next/image_processing_llava_next.py", "transformers/models/llava_next/image_processing_llava_next.py" ]
[ "tests/models/llava_next/test_image_processing_llava_next.py" ]
[ { "class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 284, "func_end_lineno": 350, "func_code": " def pad(\n self,\n image: np.ndarray,\n padding: Union[int, Tuple[int, int], Iterable[Tuple[int, int]]],\n mode: PaddingMode = PaddingMode.CONSTANT,\n constant_values: Union[float, Iterable[float]] = 0.0,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n \"\"\"\n Pads the `image` with the specified `padding` and `mode`. Padding can be in the (`height`, `width`)\n dimension of in the (`num_patches`) dimension. In the second case an iterable if tuples is expected\n as input.\n\n Args:\n image (`np.ndarray`):\n The image to pad.\n padding (`int` or `Tuple[int, int]` or `Iterable[Tuple[int, int]]`):\n Padding to apply to the edges of the height, width axes. Can be one of three formats:\n - `((before_height, after_height), (before_width, after_width))` unique pad widths for each axis.\n - `((before, after),)` yields same before and after pad for height and width.\n - `(pad,)` or int is a shortcut for before = after = pad width for all axes.\n mode (`PaddingMode`):\n The padding mode to use. Can be one of:\n - `\"constant\"`: pads with a constant value.\n - `\"reflect\"`: pads with the reflection of the vector mirrored on the first and last values of the\n vector along each axis.\n - `\"replicate\"`: pads with the replication of the last value on the edge of the array along each axis.\n - `\"symmetric\"`: pads with the reflection of the vector mirrored along the edge of the array.\n constant_values (`float` or `Iterable[float]`, *optional*):\n The value to use for the padding if `mode` is `\"constant\"`.\n data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the output image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use same as the input image.\n input_data_format (`str` or `ChannelDimension`, *optional*):\n The channel dimension format for the input image. Can be one of:\n - `\"channels_first\"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.\n - `\"channels_last\"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.\n If unset, will use the inferred format of the input image.\n\n Returns:\n `np.ndarray`: The padded image.\n\n \"\"\"\n\n # call the general `pad` if padding on `height/width`, otherwise it's the `num_patched` dim\n if isinstance(padding, int) or len(padding) != 4:\n return pad(image, padding, mode, constant_values, data_format, input_data_format)\n\n if input_data_format is None:\n input_data_format = infer_channel_dimension_format(image)\n if mode == PaddingMode.CONSTANT:\n image = np.pad(image, padding, mode=\"constant\", constant_values=constant_values)\n elif mode == PaddingMode.REFLECT:\n image = np.pad(image, padding, mode=\"reflect\")\n elif mode == PaddingMode.REPLICATE:\n image = np.pad(image, padding, mode=\"edge\")\n elif mode == PaddingMode.SYMMETRIC:\n image = np.pad(image, padding, mode=\"symmetric\")\n else:\n raise ValueError(f\"Invalid padding mode: {mode}\")\n image = (\n to_channel_dimension_format(image, data_format, input_data_format) if data_format is not None else image\n )\n return image" }, { "class_start_lineno": 142, "class_end_lineno": 749, "func_start_lineno": 462, "func_end_lineno": 476, "func_code": " def _pad_for_patching(\n self, image: np.array, target_resolution: tuple, input_data_format: ChannelDimension\n ) -> np.array:\n \"\"\"\n Pad an image to a target resolution while maintaining aspect ratio.\n \"\"\"\n target_height, target_width = target_resolution\n new_height, new_width = _get_patch_output_size(image, target_resolution, input_data_format)\n\n paste_x = (target_width - new_width) // 2\n paste_y = (target_height - new_height) // 2\n\n padded_image = self.pad(image, padding=((paste_y, paste_y), (paste_x, paste_x)))\n\n return padded_image" } ]
[ "BugFix" ]
[ "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor.pad", "transformers.models.llava_next.image_processing_llava_next.LlavaNextImageProcessor._pad_for_patching" ]
Python
0
2
{ "total_num": 13, "base_passed_num": 0 }
[ "langchain.libs.langchain.langchain.agents.agent.AgentExecutor::_perform_agent_action", "langchain.libs.langchain.langchain.agents.agent_iterator.AgentExecutorIterator::__iter__" ]
langchain
[ "langchain/agents/agent.py", "langchain/agents/agent.py", "langchain/agents/agent_iterator.py" ]
[ "libs/langchain/tests/unit_tests/agents/test_agent.py" ]
[ { "class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1419, "func_end_lineno": 1456, "func_code": " def _perform_agent_action(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n agent_action: AgentAction,\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> AgentStep:\n if run_manager:\n run_manager.on_agent_action(agent_action, color=\"green\")\n # Otherwise we lookup the tool\n if agent_action.tool in name_to_tool_map:\n tool = name_to_tool_map[agent_action.tool]\n return_direct = tool.return_direct\n color = color_mapping[agent_action.tool]\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n if return_direct:\n tool_run_kwargs[\"llm_prefix\"] = \"\"\n # We then call the tool on the tool input to get an observation\n observation = tool.run(\n agent_action.tool_input,\n verbose=self.verbose,\n color=color,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n else:\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = InvalidTool().run(\n {\n \"requested_tool_name\": agent_action.tool,\n \"available_tool_names\": list(name_to_tool_map.keys()),\n },\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n return AgentStep(action=agent_action, observation=observation)" }, { "class_start_lineno": 1047, "class_end_lineno": 1806, "func_start_lineno": 1342, "func_end_lineno": 1417, "func_code": " def _iter_next_step(\n self,\n name_to_tool_map: Dict[str, BaseTool],\n color_mapping: Dict[str, str],\n inputs: Dict[str, str],\n intermediate_steps: List[Tuple[AgentAction, str]],\n run_manager: Optional[CallbackManagerForChainRun] = None,\n ) -> Iterator[Union[AgentFinish, AgentAction, AgentStep]]:\n \"\"\"Take a single step in the thought-action-observation loop.\n\n Override this to take control of how the agent makes and acts on choices.\n \"\"\"\n try:\n intermediate_steps = self._prepare_intermediate_steps(intermediate_steps)\n\n # Call the LLM to see what to do.\n output = self._action_agent.plan(\n intermediate_steps,\n callbacks=run_manager.get_child() if run_manager else None,\n **inputs,\n )\n except OutputParserException as e:\n if isinstance(self.handle_parsing_errors, bool):\n raise_error = not self.handle_parsing_errors\n else:\n raise_error = False\n if raise_error:\n raise ValueError(\n \"An output parsing error occurred. \"\n \"In order to pass this error back to the agent and have it try \"\n \"again, pass `handle_parsing_errors=True` to the AgentExecutor. \"\n f\"This is the error: {str(e)}\"\n )\n text = str(e)\n if isinstance(self.handle_parsing_errors, bool):\n if e.send_to_llm:\n observation = str(e.observation)\n text = str(e.llm_output)\n else:\n observation = \"Invalid or incomplete response\"\n elif isinstance(self.handle_parsing_errors, str):\n observation = self.handle_parsing_errors\n elif callable(self.handle_parsing_errors):\n observation = self.handle_parsing_errors(e)\n else:\n raise ValueError(\"Got unexpected type of `handle_parsing_errors`\")\n output = AgentAction(\"_Exception\", observation, text)\n if run_manager:\n run_manager.on_agent_action(output, color=\"green\")\n tool_run_kwargs = self._action_agent.tool_run_logging_kwargs()\n observation = ExceptionTool().run(\n output.tool_input,\n verbose=self.verbose,\n color=None,\n callbacks=run_manager.get_child() if run_manager else None,\n **tool_run_kwargs,\n )\n yield AgentStep(action=output, observation=observation)\n return\n\n # If the tool chosen is the finishing tool, then we end and return.\n if isinstance(output, AgentFinish):\n yield output\n return\n\n actions: List[AgentAction]\n if isinstance(output, AgentAction):\n actions = [output]\n else:\n actions = output\n for agent_action in actions:\n yield agent_action\n for agent_action in actions:\n yield self._perform_agent_action(\n name_to_tool_map, color_mapping, agent_action, run_manager\n )" }, { "class_start_lineno": 46, "class_end_lineno": 418, "func_start_lineno": 174, "func_end_lineno": 234, "func_code": " def __iter__(self: \"AgentExecutorIterator\") -> Iterator[AddableDict]:\n logger.debug(\"Initialising AgentExecutorIterator\")\n self.reset()\n callback_manager = CallbackManager.configure(\n self.callbacks,\n self.agent_executor.callbacks,\n self.agent_executor.verbose,\n self.tags,\n self.agent_executor.tags,\n self.metadata,\n self.agent_executor.metadata,\n )\n run_manager = callback_manager.on_chain_start(\n dumpd(self.agent_executor),\n self.inputs,\n self.run_id,\n name=self.run_name,\n )\n try:\n while self.agent_executor._should_continue(\n self.iterations, self.time_elapsed\n ):\n # take the next step: this plans next action, executes it,\n # yielding action and observation as they are generated\n next_step_seq: NextStepOutput = []\n for chunk in self.agent_executor._iter_next_step(\n self.name_to_tool_map,\n self.color_mapping,\n self.inputs,\n self.intermediate_steps,\n run_manager,\n ):\n next_step_seq.append(chunk)\n # if we're yielding actions, yield them as they come\n # do not yield AgentFinish, which will be handled below\n if self.yield_actions:\n if isinstance(chunk, AgentAction):\n yield AddableDict(actions=[chunk], messages=chunk.messages)\n elif isinstance(chunk, AgentStep):\n yield AddableDict(steps=[chunk], messages=chunk.messages)\n\n # convert iterator output to format handled by _process_next_step_output\n next_step = self.agent_executor._consume_next_step(next_step_seq)\n # update iterations and time elapsed\n self.update_iterations()\n # decide if this is the final output\n output = self._process_next_step_output(next_step, run_manager)\n is_final = \"intermediate_step\" not in output\n # yield the final output always\n # for backwards compat, yield int. output if not yielding actions\n if not self.yield_actions or is_final:\n yield output\n # if final output reached, stop iteration\n if is_final:\n return\n except BaseException as e:\n run_manager.on_chain_error(e)\n raise\n\n # if we got here means we exhausted iterations or time\n yield self._stop(run_manager)" } ]
[ "BugFix" ]
[ "langchain.agents.agent.AgentExecutor._perform_agent_action", "langchain.agents.agent.AgentExecutor._iter_next_step", "langchain.agents.agent_iterator.AgentExecutorIterator.__iter__" ]
Python
0
2
{ "total_num": 14, "base_passed_num": 13 }
[ "cloudnetpy.cloudnetpy.utils.cumsumr", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_adiabatic_lwc" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/categorize/atmos_utils.py" ]
[ "tests/unit/test_atmos_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 532, "func_end_lineno": 549, "func_code": "def cumsumr(array: np.ndarray, axis: int = 0) -> np.ndarray:\n \"\"\"Finds cumulative sum that resets on 0.\n\n Args:\n array: Input array.\n axis: Axis where the sum is calculated. Default is 0.\n\n Returns:\n Cumulative sum, restarted at 0.\n\n Examples:\n >>> x = np.array([0, 0, 1, 1, 0, 0, 0, 1, 1, 1])\n >>> cumsumr(x)\n [0, 0, 1, 2, 0, 0, 0, 1, 2, 3]\n\n \"\"\"\n cums = array.cumsum(axis=axis)\n return cums - np.maximum.accumulate(cums * (array == 0), axis=axis)" }, { "class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 302, "func_end_lineno": 318, "func_code": "def calc_adiabatic_lwc(lwc_dz: np.ndarray, height: np.ndarray) -> np.ndarray:\n \"\"\"Calculates adiabatic liquid water content (kg m-3).\n\n Args:\n lwc_dz: Liquid water content change rate (kg m-3 m-1) calculated at the\n base of each cloud and filled to that cloud.\n height: Height vector (m).\n\n Returns:\n Liquid water content (kg m-3).\n\n \"\"\"\n is_cloud = lwc_dz != 0\n cloud_indices = utils.cumsumr(is_cloud, axis=1)\n dz = utils.path_lengths_from_ground(height) * np.ones_like(lwc_dz)\n dz[cloud_indices < 1] = 0\n return utils.cumsumr(dz, axis=1) * lwc_dz" } ]
[ "function_empty" ]
[ "cloudnetpy.utils.cumsumr", "cloudnetpy.categorize.atmos_utils.calc_adiabatic_lwc" ]
Python
2
2
{ "total_num": 5, "base_passed_num": 4 }
[ "cloudnetpy.cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.cloudnetpy.output.save_level1b", "cloudnetpy.cloudnetpy.utils.isscalar", "cloudnetpy.cloudnetpy.output._get_dimensions" ]
cloudnetpy
[ "cloudnetpy/output.py", "cloudnetpy/output.py", "cloudnetpy/utils.py", "cloudnetpy/output.py" ]
[ "tests/unit/test_basta.py", "tests/unit/test_bowtie.py", "tests/unit/test_categorize.py", "tests/unit/test_hatpro.py", "tests/unit/test_mrr.py", "tests/unit/test_plotting.py", "tests/unit/test_radiometrics.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 54, "func_end_lineno": 74, "func_code": "def _get_netcdf_dimensions(obj) -> dict:\n dimensions = {\n key: len(obj.data[key][:]) for key in (\"time\", \"range\") if key in obj.data\n }\n # RPG cloud radar\n if \"chirp_start_indices\" in obj.data:\n dimensions[\"chirp_sequence\"] = len(obj.data[\"chirp_start_indices\"][:])\n # disdrometer\n if hasattr(obj, \"n_diameter\") and hasattr(obj, \"n_velocity\"):\n dimensions[\"diameter\"] = obj.n_diameter\n dimensions[\"velocity\"] = obj.n_velocity\n dimensions[\"nv\"] = 2\n # HATPRO l1c\n if \"tb\" in obj.data:\n dimensions[\"frequency\"] = obj.data[\"tb\"][:].shape[1]\n dimensions[\"receiver_nb\"] = len(obj.data[\"receiver_nb\"][:])\n dimensions[\"band\"] = 2\n dimensions[\"t_amb_nb\"] = 2\n if \"irt\" in obj.data:\n dimensions[\"ir_channel\"] = obj.data[\"irt\"][:].shape[1]\n return dimensions" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 21, "func_end_lineno": 51, "func_code": "def save_level1b(\n obj,\n output_file: PathLike | str,\n uuid: UUID | str | None = None,\n) -> str:\n \"\"\"Saves Cloudnet Level 1b file.\"\"\"\n dimensions = _get_netcdf_dimensions(obj)\n with init_file(output_file, dimensions, obj.data, uuid) as nc:\n file_uuid = nc.file_uuid\n fix_attribute_name(nc)\n location = obj.site_meta[\"name\"]\n nc.cloudnet_file_type = obj.instrument.domain\n nc.title = get_l1b_title(obj.instrument, location)\n if isinstance(obj.date, list):\n nc.year, nc.month, nc.day = obj.date\n elif isinstance(obj.date, datetime.date):\n nc.year = str(obj.date.year)\n nc.month = str(obj.date.month).zfill(2)\n nc.day = str(obj.date.day).zfill(2)\n else:\n raise TypeError\n nc.location = location\n nc.history = get_l1b_history(obj.instrument)\n nc.source = get_l1b_source(obj.instrument)\n if hasattr(obj, \"serial_number\") and obj.serial_number is not None:\n nc.serial_number = obj.serial_number\n if hasattr(obj, \"software\"):\n for software, version in obj.software.items():\n nc.setncattr(f\"{software}_version\", version)\n nc.references = get_references()\n return file_uuid" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 655, "func_end_lineno": 672, "func_code": "def isscalar(array: np.ndarray | float | list | netCDF4.Variable) -> bool:\n \"\"\"Tests if input is scalar.\n\n By \"scalar\" we mean that array has a single value.\n\n Examples:\n >>> isscalar(1)\n True\n >>> isscalar([1])\n True\n >>> isscalar(np.array(1))\n True\n >>> isscalar(np.array([1]))\n True\n\n \"\"\"\n arr = ma.array(array)\n return not hasattr(arr, \"__len__\") or arr.shape == () or len(arr) == 1" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 417, "func_end_lineno": 427, "func_code": "def _get_dimensions(nc: netCDF4.Dataset, data: np.ndarray) -> tuple:\n \"\"\"Finds correct dimensions for a variable.\"\"\"\n if utils.isscalar(data):\n return ()\n variable_size: list = []\n file_dims = nc.dimensions\n array_dims = data.shape\n for length in array_dims:\n dim = [key for key in file_dims if file_dims[key].size == length][0] # noqa: RUF015\n variable_size = [*variable_size, dim]\n return tuple(variable_size)" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.output.save_level1b", "cloudnetpy.utils.isscalar", "cloudnetpy.output._get_dimensions" ]
Python
1
4
{ "total_num": 80, "base_passed_num": 23 }
[ "cloudnetpy.cloudnetpy.instruments.ceilo._initialize_ceilo", "cloudnetpy.cloudnetpy.instruments.ceilo.ceilo2nc", "cloudnetpy.cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.cloudnetpy.output.save_level1b" ]
cloudnetpy
[ "cloudnetpy/instruments/ceilo.py", "cloudnetpy/instruments/ceilo.py", "cloudnetpy/output.py", "cloudnetpy/output.py" ]
[ "tests/unit/test_ceilo.py", "tests/unit/test_vaisala.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 278, "func_start_lineno": 129, "func_end_lineno": 159, "func_code": "def _initialize_ceilo(\n full_path: str,\n site_meta: dict,\n date: str | None = None,\n) -> ClCeilo | Ct25k | LufftCeilo | Cl61d | Cs135:\n if \"model\" in site_meta:\n if site_meta[\"model\"] not in (\n \"cl31\",\n \"cl51\",\n \"cl61d\",\n \"ct25k\",\n \"chm15k\",\n \"cs135\",\n ):\n msg = f\"Invalid ceilometer model: {site_meta['model']}\"\n raise ValueError(msg)\n if site_meta[\"model\"] in (\"cl31\", \"cl51\"):\n model = \"cl31_or_cl51\"\n else:\n model = site_meta[\"model\"]\n else:\n model = _find_ceilo_model(full_path)\n if model == \"cl31_or_cl51\":\n return ClCeilo(full_path, site_meta, date)\n if model == \"ct25k\":\n return Ct25k(full_path, site_meta, date)\n if model == \"cl61d\":\n return Cl61d(full_path, site_meta, date)\n if model == \"cs135\":\n return Cs135(full_path, site_meta, date)\n return LufftCeilo(full_path, site_meta, date)" }, { "class_start_lineno": 1, "class_end_lineno": 278, "func_start_lineno": 15, "func_end_lineno": 113, "func_code": "def ceilo2nc(\n full_path: str,\n output_file: str,\n site_meta: dict,\n uuid: str | None = None,\n date: str | None = None,\n) -> str:\n \"\"\"Converts Vaisala, Lufft and Campbell Scientific ceilometer data into\n Cloudnet Level 1b netCDF file.\n\n This function reads raw Vaisala (CT25k, CL31, CL51, CL61), Lufft\n (CHM 15k, CHM 15k-x) and Campbell Scientific (CS135) ceilometer files and writes\n the data into netCDF file. Three variants of the backscatter are saved:\n\n 1. Raw backscatter, `beta_raw`\n 2. Signal-to-noise screened backscatter, `beta`\n 3. SNR-screened backscatter with smoothed weak background, `beta_smooth`\n\n With CL61 two additional depolarisation parameters are saved:\n\n 1. Signal-to-noise screened depolarisation, `depolarisation`\n 2. SNR-screened depolarisation with smoothed weak background,\n `depolarisation_smooth`\n\n CL61 screened backscatter is screened using beta_smooth mask to improve detection\n of weak aerosol layers and supercooled liquid clouds.\n\n Args:\n full_path: Ceilometer file name.\n output_file: Output file name, e.g. 'ceilo.nc'.\n site_meta: Dictionary containing information about the site and instrument.\n Required key value pairs are `name` and `altitude` (metres above mean\n sea level). Also, 'calibration_factor' is recommended because the default\n value is probably incorrect. If the background noise is *not*\n range-corrected, you must define: {'range_corrected': False}.\n You can also explicitly set the instrument model with\n e.g. {'model': 'cl61d'}.\n uuid: Set specific UUID for the file.\n date: Expected date as YYYY-MM-DD of all profiles in the file.\n\n Returns:\n UUID of the generated file.\n\n Raises:\n RuntimeError: Failed to read or process raw ceilometer data.\n\n Examples:\n >>> from cloudnetpy.instruments import ceilo2nc\n >>> site_meta = {'name': 'Mace-Head', 'altitude': 5}\n >>> ceilo2nc('vaisala_raw.txt', 'vaisala.nc', site_meta)\n >>> site_meta = {'name': 'Juelich', 'altitude': 108,\n 'calibration_factor': 2.3e-12}\n >>> ceilo2nc('chm15k_raw.nc', 'chm15k.nc', site_meta)\n\n \"\"\"\n snr_limit = 5\n ceilo_obj = _initialize_ceilo(full_path, site_meta, date)\n calibration_factor = site_meta.get(\"calibration_factor\")\n range_corrected = site_meta.get(\"range_corrected\", True)\n ceilo_obj.read_ceilometer_file(calibration_factor)\n ceilo_obj.check_beta_raw_shape()\n n_negatives = _get_n_negatives(ceilo_obj)\n ceilo_obj.data[\"beta\"] = ceilo_obj.calc_screened_product(\n ceilo_obj.data[\"beta_raw\"],\n snr_limit,\n range_corrected=range_corrected,\n n_negatives=n_negatives,\n )\n ceilo_obj.data[\"beta_smooth\"] = ceilo_obj.calc_beta_smooth(\n ceilo_obj.data[\"beta\"],\n snr_limit,\n range_corrected=range_corrected,\n n_negatives=n_negatives,\n )\n if ceilo_obj.instrument is None or ceilo_obj.instrument.model is None:\n msg = \"Failed to read ceilometer model\"\n raise RuntimeError(msg)\n if (\n any(\n model in ceilo_obj.instrument.model.lower()\n for model in (\"cl61\", \"chm15k\", \"chm15kx\", \"cl51\", \"cl31\")\n )\n and range_corrected\n ):\n mask = ceilo_obj.data[\"beta_smooth\"].mask\n ceilo_obj.data[\"beta\"] = ma.masked_where(mask, ceilo_obj.data[\"beta_raw\"])\n ceilo_obj.data[\"beta\"][ceilo_obj.data[\"beta\"] <= 0] = ma.masked\n if \"depolarisation\" in ceilo_obj.data:\n ceilo_obj.data[\"depolarisation\"].mask = ceilo_obj.data[\"beta\"].mask\n ceilo_obj.screen_depol()\n ceilo_obj.screen_invalid_values()\n ceilo_obj.prepare_data()\n ceilo_obj.data_to_cloudnet_arrays()\n ceilo_obj.add_site_geolocation()\n attributes = output.add_time_attribute(ATTRIBUTES, ceilo_obj.date)\n output.update_attributes(ceilo_obj.data, attributes)\n for key in (\"beta\", \"beta_smooth\"):\n ceilo_obj.add_snr_info(key, snr_limit)\n return output.save_level1b(ceilo_obj, output_file, uuid)" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 54, "func_end_lineno": 74, "func_code": "def _get_netcdf_dimensions(obj) -> dict:\n dimensions = {\n key: len(obj.data[key][:]) for key in (\"time\", \"range\") if key in obj.data\n }\n # RPG cloud radar\n if \"chirp_start_indices\" in obj.data:\n dimensions[\"chirp_sequence\"] = len(obj.data[\"chirp_start_indices\"][:])\n # disdrometer\n if hasattr(obj, \"n_diameter\") and hasattr(obj, \"n_velocity\"):\n dimensions[\"diameter\"] = obj.n_diameter\n dimensions[\"velocity\"] = obj.n_velocity\n dimensions[\"nv\"] = 2\n # HATPRO l1c\n if \"tb\" in obj.data:\n dimensions[\"frequency\"] = obj.data[\"tb\"][:].shape[1]\n dimensions[\"receiver_nb\"] = len(obj.data[\"receiver_nb\"][:])\n dimensions[\"band\"] = 2\n dimensions[\"t_amb_nb\"] = 2\n if \"irt\" in obj.data:\n dimensions[\"ir_channel\"] = obj.data[\"irt\"][:].shape[1]\n return dimensions" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 21, "func_end_lineno": 51, "func_code": "def save_level1b(\n obj,\n output_file: PathLike | str,\n uuid: UUID | str | None = None,\n) -> str:\n \"\"\"Saves Cloudnet Level 1b file.\"\"\"\n dimensions = _get_netcdf_dimensions(obj)\n with init_file(output_file, dimensions, obj.data, uuid) as nc:\n file_uuid = nc.file_uuid\n fix_attribute_name(nc)\n location = obj.site_meta[\"name\"]\n nc.cloudnet_file_type = obj.instrument.domain\n nc.title = get_l1b_title(obj.instrument, location)\n if isinstance(obj.date, list):\n nc.year, nc.month, nc.day = obj.date\n elif isinstance(obj.date, datetime.date):\n nc.year = str(obj.date.year)\n nc.month = str(obj.date.month).zfill(2)\n nc.day = str(obj.date.day).zfill(2)\n else:\n raise TypeError\n nc.location = location\n nc.history = get_l1b_history(obj.instrument)\n nc.source = get_l1b_source(obj.instrument)\n if hasattr(obj, \"serial_number\") and obj.serial_number is not None:\n nc.serial_number = obj.serial_number\n if hasattr(obj, \"software\"):\n for software, version in obj.software.items():\n nc.setncattr(f\"{software}_version\", version)\n nc.references = get_references()\n return file_uuid" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.instruments.ceilo._initialize_ceilo", "cloudnetpy.instruments.ceilo.ceilo2nc", "cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.output.save_level1b" ]
Python
2
4
{ "total_num": 33, "base_passed_num": 5 }
[ "cloudnetpy.cloudnetpy.concat_lib._Concat::__init__", "cloudnetpy.cloudnetpy.concat_lib.concatenate_files", "cloudnetpy.cloudnetpy.concat_lib._Concat::_write_initial_data", "cloudnetpy.cloudnetpy.concat_lib._Concat::concat_data" ]
cloudnetpy
[ "cloudnetpy/concat_lib.py", "cloudnetpy/concat_lib.py", "cloudnetpy/concat_lib.py", "cloudnetpy/concat_lib.py" ]
[ "tests/unit/test_cl61d.py", "tests/unit/test_concat_lib.py", "tests/unit/test_copernicus.py", "tests/unit/test_galileo.py", "tests/unit/test_lufft.py" ]
[ { "class_start_lineno": 122, "class_end_lineno": 253, "func_start_lineno": 125, "func_end_lineno": 136, "func_code": " def __init__(\n self,\n filenames: Iterable[PathLike | str],\n output_file: str,\n concat_dimension: str = \"time\",\n ):\n self.filenames = sorted(map(Path, filenames), key=lambda f: f.name)\n self.concat_dimension = concat_dimension\n self.first_filename = self.filenames[0]\n self.first_file = netCDF4.Dataset(self.first_filename)\n self.concatenated_file = self._init_output_file(output_file)\n self.common_variables = set()" }, { "class_start_lineno": 1, "class_end_lineno": 352, "func_start_lineno": 85, "func_end_lineno": 119, "func_code": "def concatenate_files(\n filenames: Iterable[PathLike | str],\n output_file: str,\n concat_dimension: str = \"time\",\n variables: list | None = None,\n new_attributes: dict | None = None,\n ignore: list | None = None,\n allow_difference: list | None = None,\n) -> list:\n \"\"\"Concatenate netCDF files in one dimension.\n\n Args:\n filenames: List of files to be concatenated.\n output_file: Output file name.\n concat_dimension: Dimension name for concatenation. Default is 'time'.\n variables: List of variables with the 'concat_dimension' to be concatenated.\n Default is None when all variables with 'concat_dimension' will be saved.\n new_attributes: Optional new global attributes as {'attribute_name': value}.\n ignore: List of variables to be ignored.\n allow_difference: Names of scalar variables that can differ from one file to\n another (value from the first file is saved).\n\n Returns:\n List of filenames that were successfully concatenated.\n\n Notes:\n Arrays without 'concat_dimension', scalars, and global attributes will be taken\n from the first file. Groups, possibly present in a NETCDF4 formatted file,\n are ignored.\n\n \"\"\"\n with _Concat(filenames, output_file, concat_dimension) as concat:\n concat.get_common_variables()\n concat.create_global_attributes(new_attributes)\n return concat.concat_data(variables, ignore, allow_difference)" }, { "class_start_lineno": 122, "class_end_lineno": 253, "func_start_lineno": 173, "func_end_lineno": 202, "func_code": " def _write_initial_data(self, variables: list | None, ignore: list | None) -> None:\n for key in self.first_file.variables:\n if (\n variables is not None\n and key not in variables\n and key not in self.common_variables\n and key != self.concat_dimension\n ):\n continue\n if ignore and key in ignore:\n continue\n\n auto_scale = False\n self.first_file[key].set_auto_scale(auto_scale)\n array = self.first_file[key][:]\n dimensions = self.first_file[key].dimensions\n fill_value = getattr(self.first_file[key], \"_FillValue\", None)\n var = self.concatenated_file.createVariable(\n key,\n array.dtype,\n dimensions,\n zlib=True,\n complevel=3,\n shuffle=False,\n fill_value=fill_value,\n )\n auto_scale = False\n var.set_auto_scale(auto_scale)\n var[:] = array\n _copy_attributes(self.first_file[key], var)" }, { "class_start_lineno": 122, "class_end_lineno": 253, "func_start_lineno": 151, "func_end_lineno": 171, "func_code": " def concat_data(\n self,\n variables: list | None,\n ignore: list | None,\n allow_vary: list | None,\n ) -> list:\n \"\"\"Concatenates data arrays.\"\"\"\n self._write_initial_data(variables, ignore)\n output = [self.first_filename]\n if len(self.filenames) > 1:\n for filename in self.filenames[1:]:\n try:\n self._append_data(filename, allow_vary)\n except RuntimeError as e:\n if \"NetCDF: HDF error\" in str(e):\n msg = f\"Caught a NetCDF HDF error. Skipping file '{filename}'.\"\n logging.exception(msg)\n continue\n raise\n output.append(filename)\n return output" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.concat_lib._Concat.__init__", "cloudnetpy.concat_lib.concatenate_files", "cloudnetpy.concat_lib._Concat._write_initial_data", "cloudnetpy.concat_lib._Concat.concat_data" ]
Python
2
4
{ "total_num": 69, "base_passed_num": 0 }
[ "cloudnetpy.cloudnetpy.utils.binvec", "cloudnetpy.cloudnetpy.utils.rebin_2d", "cloudnetpy.cloudnetpy.cloudnetarray.CloudnetArray::rebin_data" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/utils.py", "cloudnetpy/cloudnetarray.py" ]
[ "tests/unit/test_cloudnetarray.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 124, "func_end_lineno": 140, "func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 143, "func_end_lineno": 192, "func_code": "def rebin_2d(\n x_in: np.ndarray,\n array: ma.MaskedArray,\n x_new: np.ndarray,\n statistic: Literal[\"mean\", \"std\"] = \"mean\",\n n_min: int = 1,\n *,\n mask_zeros: bool = True,\n) -> tuple[ma.MaskedArray, list]:\n \"\"\"Rebins 2-D data in one dimension.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 2-D input data with shape (n, m).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n n_min: Minimum number of points to have good statistics in a bin. Default is 1.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n tuple: Rebinned data with shape (N, m) and indices of bins without enough data.\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros((len(x_new), array.shape[1]))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n for ind, values in enumerate(array_screened.T):\n mask = ~values.mask\n if ma.any(values[mask]):\n result[:, ind], _, _ = stats.binned_statistic(\n x_in[mask],\n values[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros is True:\n masked_result = ma.masked_equal(result, 0)\n else:\n masked_result = ma.array(result)\n\n # Fill bins with not enough profiles\n x_hist, _ = np.histogram(x_in, bins=edges)\n empty_mask = x_hist < n_min\n masked_result[empty_mask, :] = ma.masked\n empty_indices = list(np.nonzero(empty_mask)[0])\n if len(empty_indices) > 0:\n logging.debug(\"No data in %s bins\", len(empty_indices))\n\n return masked_result, empty_indices" }, { "class_start_lineno": 14, "class_end_lineno": 211, "func_start_lineno": 61, "func_end_lineno": 84, "func_code": " def rebin_data(\n self, time: np.ndarray, time_new: np.ndarray, *, mask_zeros: bool = True\n ) -> list:\n \"\"\"Rebins `data` in time.\n\n Args:\n time: 1D time array.\n time_new: 1D new time array.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Time indices without data.\n\n \"\"\"\n if self.data.ndim == 1:\n self.data = utils.rebin_1d(time, self.data, time_new, mask_zeros=mask_zeros)\n bad_indices = list(np.where(self.data == ma.masked)[0])\n else:\n if not isinstance(self.data, ma.MaskedArray):\n self.data = ma.masked_array(self.data)\n self.data, bad_indices = utils.rebin_2d(\n time, self.data, time_new, mask_zeros=mask_zeros\n )\n return bad_indices" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.utils.binvec", "cloudnetpy.utils.rebin_2d", "cloudnetpy.cloudnetarray.CloudnetArray.rebin_data" ]
Python
2
3
{ "total_num": 17, "base_passed_num": 15 }
[ "cloudnetpy.cloudnetpy.instruments.disdrometer.parsivel._read_toa5", "cloudnetpy.cloudnetpy.instruments.disdrometer.parsivel._read_fmi", "cloudnetpy.cloudnetpy.instruments.disdrometer.parsivel.parsivel2nc", "cloudnetpy.cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.cloudnetpy.output.save_level1b" ]
cloudnetpy
[ "cloudnetpy/instruments/disdrometer/parsivel.py", "cloudnetpy/instruments/disdrometer/parsivel.py", "cloudnetpy/instruments/disdrometer/parsivel.py", "cloudnetpy/output.py", "cloudnetpy/output.py" ]
[ "tests/unit/test_disdrometer.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 713, "func_start_lineno": 450, "func_end_lineno": 519, "func_code": "def _read_toa5(filename: str | PathLike) -> dict[str, list]:\n \"\"\"Read ASCII data from Campbell Scientific datalogger such as CR1000.\n\n References:\n CR1000 Measurement and Control System.\n https://s.campbellsci.com/documents/us/manuals/cr1000.pdf\n \"\"\"\n with open(filename, errors=\"ignore\") as file:\n reader = csv.reader(file)\n _origin_line = next(reader)\n header_line = next(reader)\n headers = [\n TOA5_HEADERS.get(re.sub(r\"\\(.*\", \"\", field)) for field in header_line\n ]\n if unknown_headers := [\n header_line[i] for i in range(len(header_line)) if headers[i] is None\n ]:\n msg = \"Unknown headers: \" + \", \".join(unknown_headers)\n logging.warning(msg)\n _units_line = next(reader)\n _process_line = next(reader)\n data: dict[str, list] = {header: [] for header in headers if header is not None}\n n_rows = 0\n n_invalid_rows = 0\n for data_line in reader:\n n_rows += 1\n scalars: dict[str, datetime.datetime | int | float | str] = {}\n arrays: dict[str, list] = {\n \"number_concentration\": [],\n \"fall_velocity\": [],\n \"spectrum\": [],\n }\n try:\n for header, value in zip(headers, data_line, strict=True):\n if header is None:\n continue\n if header == \"_datetime\":\n scalars[header] = datetime.datetime.strptime(\n value,\n \"%Y-%m-%d %H:%M:%S\",\n )\n elif header in (\"number_concentration\", \"fall_velocity\"):\n arrays[header].append(float(value))\n elif header == \"spectrum\":\n arrays[header].append(int(value))\n elif PARSERS.get(header) == _parse_int:\n scalars[header] = int(value)\n elif PARSERS.get(header) == _parse_float:\n scalars[header] = float(value)\n else:\n scalars[header] = value\n except ValueError:\n n_invalid_rows += 1\n continue\n for header, scalar in scalars.items():\n data[header].append(scalar)\n if \"spectrum\" in headers:\n data[\"spectrum\"].append(\n np.array(arrays[\"spectrum\"], dtype=\"i2\").reshape((32, 32)),\n )\n if \"number_concentration\" in headers:\n data[\"number_concentration\"].append(arrays[\"number_concentration\"])\n if \"fall_velocity\" in headers:\n data[\"fall_velocity\"].append(arrays[\"fall_velocity\"])\n if n_invalid_rows == n_rows:\n msg = \"No valid data in file\"\n raise DisdrometerDataError(msg)\n if n_invalid_rows > 0:\n logging.info(\"Skipped %s invalid rows\", n_invalid_rows)\n return data" }, { "class_start_lineno": 1, "class_end_lineno": 713, "func_start_lineno": 618, "func_end_lineno": 657, "func_code": "def _read_fmi(content: str):\n r\"\"\"Read format used by Finnish Meteorological Institute and University of\n Helsinki.\n\n Format consists of sequence of the following:\n - \"[YYYY-MM-DD HH:MM:SS\\n\"\n - output of \"CS/PA\" command without non-printable characters at the end\n - \"]\\n\"\n \"\"\"\n output: dict[str, list] = {\"_datetime\": []}\n for m in re.finditer(\n r\"\\[(?P<year>\\d+)-(?P<month>\\d+)-(?P<day>\\d+) \"\n r\"(?P<hour>\\d+):(?P<minute>\\d+):(?P<second>\\d+)\"\n r\"(?P<output>[^\\]]*)\\]\",\n content,\n ):\n try:\n record = _read_typ_op4a(m[\"output\"].splitlines())\n except ValueError:\n continue\n\n for key, value in record.items():\n if key not in output:\n output[key] = [None] * len(output[\"_datetime\"])\n output[key].append(value)\n for key in output:\n if key not in record and key != \"_datetime\":\n output[key].append(None)\n\n output[\"_datetime\"].append(\n datetime.datetime(\n int(m[\"year\"]),\n int(m[\"month\"]),\n int(m[\"day\"]),\n int(m[\"hour\"]),\n int(m[\"minute\"]),\n int(m[\"second\"]),\n )\n )\n return output" }, { "class_start_lineno": 1, "class_end_lineno": 713, "func_start_lineno": 23, "func_end_lineno": 77, "func_code": "def parsivel2nc(\n disdrometer_file: str | PathLike | Iterable[str | PathLike],\n output_file: str,\n site_meta: dict,\n uuid: str | None = None,\n date: str | datetime.date | None = None,\n telegram: Sequence[int | None] | None = None,\n timestamps: Sequence[datetime.datetime] | None = None,\n) -> str:\n \"\"\"Converts OTT Parsivel-2 disdrometer data into Cloudnet Level 1b netCDF\n file.\n\n Args:\n disdrometer_file: Filename of disdrometer file or list of filenames.\n output_file: Output filename.\n site_meta: Dictionary containing information about the site. Required key\n is `name`.\n uuid: Set specific UUID for the file.\n date: Expected date of the measurements as YYYY-MM-DD.\n telegram: List of measured value numbers as specified in section 11.2 of\n the instrument's operating instructions. Unknown values are indicated\n with None. Telegram is required if the input file doesn't contain a\n header.\n timestamps: Specify list of timestamps if they are missing in the input file.\n\n Returns:\n UUID of the generated file.\n\n Raises:\n DisdrometerDataError: Timestamps do not match the expected date, or unable\n to read the disdrometer file.\n\n Examples:\n >>> from cloudnetpy.instruments import parsivel2nc\n >>> site_meta = {'name': 'Lindenberg', 'altitude': 104, 'latitude': 52.2,\n 'longitude': 14.1}\n >>> uuid = parsivel2nc('parsivel.log', 'parsivel.nc', site_meta)\n\n \"\"\"\n if isinstance(date, str):\n date = datetime.date.fromisoformat(date)\n if isinstance(disdrometer_file, str | PathLike):\n disdrometer_file = [disdrometer_file]\n disdrometer = Parsivel(disdrometer_file, site_meta, telegram, date, timestamps)\n disdrometer.sort_timestamps()\n disdrometer.remove_duplicate_timestamps()\n disdrometer.mask_invalid_values()\n if len(disdrometer.data[\"time\"].data) < 2:\n msg = \"Too few data points\"\n raise DisdrometerDataError(msg)\n disdrometer.convert_units()\n disdrometer.add_meta()\n attributes = output.add_time_attribute(ATTRIBUTES, disdrometer.date)\n output.update_attributes(disdrometer.data, attributes)\n return output.save_level1b(disdrometer, output_file, uuid)" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 54, "func_end_lineno": 74, "func_code": "def _get_netcdf_dimensions(obj) -> dict:\n dimensions = {\n key: len(obj.data[key][:]) for key in (\"time\", \"range\") if key in obj.data\n }\n # RPG cloud radar\n if \"chirp_start_indices\" in obj.data:\n dimensions[\"chirp_sequence\"] = len(obj.data[\"chirp_start_indices\"][:])\n # disdrometer\n if hasattr(obj, \"n_diameter\") and hasattr(obj, \"n_velocity\"):\n dimensions[\"diameter\"] = obj.n_diameter\n dimensions[\"velocity\"] = obj.n_velocity\n dimensions[\"nv\"] = 2\n # HATPRO l1c\n if \"tb\" in obj.data:\n dimensions[\"frequency\"] = obj.data[\"tb\"][:].shape[1]\n dimensions[\"receiver_nb\"] = len(obj.data[\"receiver_nb\"][:])\n dimensions[\"band\"] = 2\n dimensions[\"t_amb_nb\"] = 2\n if \"irt\" in obj.data:\n dimensions[\"ir_channel\"] = obj.data[\"irt\"][:].shape[1]\n return dimensions" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 21, "func_end_lineno": 51, "func_code": "def save_level1b(\n obj,\n output_file: PathLike | str,\n uuid: UUID | str | None = None,\n) -> str:\n \"\"\"Saves Cloudnet Level 1b file.\"\"\"\n dimensions = _get_netcdf_dimensions(obj)\n with init_file(output_file, dimensions, obj.data, uuid) as nc:\n file_uuid = nc.file_uuid\n fix_attribute_name(nc)\n location = obj.site_meta[\"name\"]\n nc.cloudnet_file_type = obj.instrument.domain\n nc.title = get_l1b_title(obj.instrument, location)\n if isinstance(obj.date, list):\n nc.year, nc.month, nc.day = obj.date\n elif isinstance(obj.date, datetime.date):\n nc.year = str(obj.date.year)\n nc.month = str(obj.date.month).zfill(2)\n nc.day = str(obj.date.day).zfill(2)\n else:\n raise TypeError\n nc.location = location\n nc.history = get_l1b_history(obj.instrument)\n nc.source = get_l1b_source(obj.instrument)\n if hasattr(obj, \"serial_number\") and obj.serial_number is not None:\n nc.serial_number = obj.serial_number\n if hasattr(obj, \"software\"):\n for software, version in obj.software.items():\n nc.setncattr(f\"{software}_version\", version)\n nc.references = get_references()\n return file_uuid" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.instruments.disdrometer.parsivel._read_toa5", "cloudnetpy.instruments.disdrometer.parsivel._read_fmi", "cloudnetpy.instruments.disdrometer.parsivel.parsivel2nc", "cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.output.save_level1b" ]
Python
4
5
{ "total_num": 54, "base_passed_num": 0 }
[ "cloudnetpy.cloudnetpy.products.drizzle_error._get_drizzle_indices", "cloudnetpy.cloudnetpy.products.drizzle_error.get_drizzle_error", "cloudnetpy.cloudnetpy.utils.l2norm_weighted", "cloudnetpy.cloudnetpy.products.drizzle_error._calc_error" ]
cloudnetpy
[ "cloudnetpy/products/drizzle_error.py", "cloudnetpy/products/drizzle_error.py", "cloudnetpy/utils.py", "cloudnetpy/products/drizzle_error.py" ]
[ "tests/unit/test_drizzle.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 188, "func_start_lineno": 36, "func_end_lineno": 41, "func_code": "def _get_drizzle_indices(diameter: np.ndarray) -> dict:\n return {\n \"drizzle\": diameter > 0,\n \"small\": np.logical_and(diameter <= 1e-4, diameter > 1e-5),\n \"tiny\": np.logical_and(diameter <= 1e-5, diameter > 0),\n }" }, { "class_start_lineno": 1, "class_end_lineno": 188, "func_start_lineno": 11, "func_end_lineno": 33, "func_code": "def get_drizzle_error(\n categorize: DrizzleSource,\n drizzle_parameters: DrizzleSolver,\n) -> dict:\n \"\"\"Estimates error and bias for drizzle classification.\n\n Args:\n categorize: The :class:`DrizzleSource` instance.\n drizzle_parameters: The :class:`DrizzleSolver` instance.\n\n Returns:\n dict: Dictionary containing information of estimated error and bias for drizzle\n\n \"\"\"\n parameters = drizzle_parameters.params\n drizzle_indices = _get_drizzle_indices(parameters[\"Do\"])\n error_input = _read_input_uncertainty(categorize, \"error\")\n if utils.isscalar(error_input[0]) is True: # Constant Z error\n z_error, bias_error = error_input\n z_error = np.full(categorize.z.shape, z_error)\n error_input = z_error, bias_error\n bias_input = _read_input_uncertainty(categorize, \"bias\")\n return _calc_errors(drizzle_indices, error_input, bias_input)" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 504, "func_end_lineno": 529, "func_code": "def l2norm_weighted(\n values: tuple,\n overall_scale: float,\n term_weights: tuple,\n) -> ma.MaskedArray:\n \"\"\"Calculates scaled and weighted Euclidean distance.\n\n Calculated distance is of form: scale * sqrt((a1*a)**2 + (b1*b)**2 + ...)\n where a, b, ... are terms to be summed and a1, a2, ... are optional weights\n for the terms.\n\n Args:\n values: Tuple containing the values.\n overall_scale: Scale factor for the calculated Euclidean distance.\n term_weights: Weights for the terms. Must be single float or a list of numbers\n (one per term).\n\n Returns:\n Scaled and weighted Euclidean distance.\n\n TODO: Use masked arrays instead of tuples.\n\n \"\"\"\n generic_values = ma.array(values, dtype=object)\n weighted_values = ma.multiply(generic_values, term_weights)\n return overall_scale * l2norm(*weighted_values)" }, { "class_start_lineno": 1, "class_end_lineno": 188, "func_start_lineno": 140, "func_end_lineno": 153, "func_code": "def _calc_error(\n scale: float,\n weights: tuple,\n error_input: tuple,\n *,\n add_mu: bool = False,\n add_mu_small: bool = False,\n) -> ma.MaskedArray:\n error = utils.l2norm_weighted(error_input, scale, weights)\n if add_mu is True:\n error = utils.l2norm(error, MU_ERROR)\n if add_mu_small is True:\n error = utils.l2norm(error, MU_ERROR_SMALL)\n return error" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.products.drizzle_error._get_drizzle_indices", "cloudnetpy.products.drizzle_error.get_drizzle_error", "cloudnetpy.utils.l2norm_weighted", "cloudnetpy.products.drizzle_error._calc_error" ]
Python
2
4
{ "total_num": 77, "base_passed_num": 55 }
[ "cloudnetpy.cloudnetpy.utils.l2norm_weighted", "cloudnetpy.cloudnetpy.products.drizzle_error._calc_error" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/products/drizzle_error.py" ]
[ "tests/unit/test_drizzle_error.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 504, "func_end_lineno": 529, "func_code": "def l2norm_weighted(\n values: tuple,\n overall_scale: float,\n term_weights: tuple,\n) -> ma.MaskedArray:\n \"\"\"Calculates scaled and weighted Euclidean distance.\n\n Calculated distance is of form: scale * sqrt((a1*a)**2 + (b1*b)**2 + ...)\n where a, b, ... are terms to be summed and a1, a2, ... are optional weights\n for the terms.\n\n Args:\n values: Tuple containing the values.\n overall_scale: Scale factor for the calculated Euclidean distance.\n term_weights: Weights for the terms. Must be single float or a list of numbers\n (one per term).\n\n Returns:\n Scaled and weighted Euclidean distance.\n\n TODO: Use masked arrays instead of tuples.\n\n \"\"\"\n generic_values = ma.array(values, dtype=object)\n weighted_values = ma.multiply(generic_values, term_weights)\n return overall_scale * l2norm(*weighted_values)" }, { "class_start_lineno": 1, "class_end_lineno": 188, "func_start_lineno": 140, "func_end_lineno": 153, "func_code": "def _calc_error(\n scale: float,\n weights: tuple,\n error_input: tuple,\n *,\n add_mu: bool = False,\n add_mu_small: bool = False,\n) -> ma.MaskedArray:\n error = utils.l2norm_weighted(error_input, scale, weights)\n if add_mu is True:\n error = utils.l2norm(error, MU_ERROR)\n if add_mu_small is True:\n error = utils.l2norm(error, MU_ERROR_SMALL)\n return error" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.utils.l2norm_weighted", "cloudnetpy.products.drizzle_error._calc_error" ]
Python
1
2
{ "total_num": 26, "base_passed_num": 15 }
[ "cloudnetpy.cloudnetpy.categorize.droplet.interpolate_lwp", "cloudnetpy.cloudnetpy.categorize.droplet.find_liquid" ]
cloudnetpy
[ "cloudnetpy/categorize/droplet.py", "cloudnetpy/categorize/droplet.py" ]
[ "tests/unit/test_droplet.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 245, "func_start_lineno": 225, "func_end_lineno": 238, "func_code": "def interpolate_lwp(obs: ClassData) -> np.ndarray:\n \"\"\"Linear interpolation of liquid water path to fill masked values.\n\n Args:\n obs: The :class:`ClassData` instance.\n\n Returns:\n Liquid water path where the masked values are filled by interpolation.\n\n \"\"\"\n if obs.lwp.all() is ma.masked:\n return np.zeros(obs.time.shape)\n ind = ma.where(obs.lwp)\n return np.interp(obs.time, obs.time[ind], obs.lwp[ind])" }, { "class_start_lineno": 1, "class_end_lineno": 245, "func_start_lineno": 52, "func_end_lineno": 121, "func_code": "def find_liquid(\n obs: ClassData,\n peak_amp: float = 1e-6,\n max_width: float = 300,\n min_points: int = 3,\n min_top_der: float = 1e-7,\n min_lwp: float = 0,\n min_alt: float = 100,\n) -> np.ndarray:\n \"\"\"Estimate liquid layers from SNR-screened attenuated backscatter.\n\n Args:\n obs: The :class:`ClassData` instance.\n peak_amp: Minimum value of peak. Default is 1e-6.\n max_width: Maximum width of peak. Default is 300 (m).\n min_points: Minimum number of valid points in peak. Default is 3.\n min_top_der: Minimum derivative above peak, defined as\n (beta_peak-beta_top) / (alt_top-alt_peak). Default is 1e-7.\n min_lwp: Minimum value from linearly interpolated lwp (kg m-2)\n measured by the mwr. Default is 0.\n min_alt: Minimum altitude of the peak from the ground. Default is 100 (m).\n\n Returns:\n 2-D boolean array denoting liquid layers.\n\n References:\n The method is based on Tuononen, M. et.al, 2019,\n https://acp.copernicus.org/articles/19/1985/2019/.\n\n \"\"\"\n\n def _is_proper_peak() -> bool:\n conditions = (\n npoints >= min_points,\n peak_width < max_width,\n top_der > min_top_der,\n is_positive_lwp,\n peak_alt > min_alt,\n )\n return all(conditions)\n\n lwp_int = interpolate_lwp(obs)\n beta = ma.copy(obs.beta)\n height = obs.height\n\n is_liquid = np.zeros(beta.shape, dtype=bool)\n base_below_peak = utils.n_elements(height, 200)\n top_above_peak = utils.n_elements(height, 150)\n difference = ma.array(np.diff(beta, axis=1))\n beta_diff = difference.filled(0)\n beta = beta.filled(0)\n peak_indices = _find_strong_peaks(beta, peak_amp)\n\n for n, peak in zip(*peak_indices, strict=True):\n lprof = beta[n, :]\n dprof = beta_diff[n, :]\n try:\n base = ind_base(dprof, peak, base_below_peak, 4)\n top = ind_top(dprof, peak, height.shape[0], top_above_peak, 4)\n except IndexError:\n continue\n npoints = np.count_nonzero(lprof[base : top + 1])\n peak_width = height[top] - height[base]\n peak_alt = height[peak] - height[0]\n top_der = (lprof[peak] - lprof[top]) / (height[top] - height[peak])\n is_positive_lwp = lwp_int[n] >= min_lwp\n if _is_proper_peak():\n is_liquid[n, base : top + 1] = True\n\n return is_liquid" } ]
[ "function_empty" ]
[ "cloudnetpy.categorize.droplet.interpolate_lwp", "cloudnetpy.categorize.droplet.find_liquid" ]
Python
2
2
{ "total_num": 18, "base_passed_num": 15 }
[ "cloudnetpy.cloudnetpy.categorize.itu._calc_line_shape", "cloudnetpy.cloudnetpy.categorize.itu._calc_oxygen_refractivity", "cloudnetpy.cloudnetpy.categorize.itu.calc_gas_specific_attenuation" ]
cloudnetpy
[ "cloudnetpy/categorize/itu.py", "cloudnetpy/categorize/itu.py", "cloudnetpy/categorize/itu.py" ]
[ "tests/unit/test_itu.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 243, "func_start_lineno": 74, "func_end_lineno": 89, "func_code": "def _calc_line_shape(\n frequency: float | np.floating,\n center: npt.NDArray,\n width: npt.NDArray,\n correction: npt.NDArray | float,\n) -> npt.NDArray:\n return (\n frequency\n / center\n * (\n (width - correction * (center - frequency))\n / ((center - frequency) ** 2 + width**2)\n + (width - correction * (center + frequency))\n / ((center + frequency) ** 2 + width**2)\n )\n )" }, { "class_start_lineno": 1, "class_end_lineno": 243, "func_start_lineno": 92, "func_end_lineno": 116, "func_code": "def _calc_oxygen_refractivity(\n dry_pressure: npt.NDArray,\n vapor_pressure: npt.NDArray,\n frequency: float | np.floating,\n theta: npt.NDArray,\n) -> npt.NDArray:\n f0, a1, a2, a3, a4, a5, a6 = OXYGEN_TABLE[:, :, np.newaxis, np.newaxis]\n strength = a1 * 1e-7 * dry_pressure * theta**3 * np.exp(a2 * (1 - theta))\n width = (\n a3 * 1e-4 * (dry_pressure * theta ** (0.8 - a4) + 1.1 * vapor_pressure * theta)\n )\n width = np.sqrt(width**2 + 2.25e-6)\n correction = (a5 + a6 * theta) * 1e-4 * (dry_pressure + vapor_pressure) * theta**0.8\n shape = _calc_line_shape(frequency, f0, width, correction)\n d = 5.6e-4 * (dry_pressure + vapor_pressure) * theta**0.8\n continuum = (\n frequency\n * dry_pressure\n * theta**2\n * (\n 6.14e-5 / (d * (1 + (frequency / d) ** 2))\n + ((1.4e-12 * dry_pressure * theta**1.5) / (1 + 1.9e-5 * frequency**1.5))\n )\n )\n return np.sum(strength * shape, axis=0) + continuum" }, { "class_start_lineno": 1, "class_end_lineno": 243, "func_start_lineno": 42, "func_end_lineno": 71, "func_code": "def calc_gas_specific_attenuation(\n pressure: npt.NDArray,\n vapor_pressure: npt.NDArray,\n temperature: npt.NDArray,\n frequency: float | np.floating,\n) -> npt.NDArray:\n \"\"\"Calculate specific attenuation due to dry air and water vapor for\n frequency up to 1000 GHz.\n\n Args:\n pressure: Pressure (Pa)\n vapor_pressure: Water vapor partial pressure (Pa)\n temperature: Temperature (K)\n frequency: Frequency (GHz)\n\n References:\n ITU-R P.676-13: Attenuation by atmospheric gases and related effects.\n https://www.itu.int/dms_pubrec/itu-r/rec/p/R-REC-P.676-13-202208-I!!PDF-E.pdf\n \"\"\"\n pressure = pressure * con.PA_TO_HPA\n vapor_pressure = vapor_pressure * con.PA_TO_HPA\n dry_pressure = pressure - vapor_pressure\n theta = 300 / temperature\n oxygen_refractivity = _calc_oxygen_refractivity(\n dry_pressure, vapor_pressure, frequency, theta\n )\n vapor_refractivity = _calc_vapor_refractivity(\n dry_pressure, vapor_pressure, frequency, theta\n )\n return 0.1820 * frequency * (oxygen_refractivity + vapor_refractivity)" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.categorize.itu._calc_line_shape", "cloudnetpy.categorize.itu._calc_oxygen_refractivity", "cloudnetpy.categorize.itu.calc_gas_specific_attenuation" ]
Python
1
3
{ "total_num": 2, "base_passed_num": 0 }
[ "cloudnetpy.cloudnetpy.categorize.atmos_utils.fill_clouds_with_lwc_dz", "cloudnetpy.cloudnetpy.products.lwc.Lwc::_init_lwc_adiabatic", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_saturation_vapor_pressure", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_mixing_ratio", "cloudnetpy.cloudnetpy.categorize.atmos_utils.calc_lwc_change_rate" ]
cloudnetpy
[ "cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/products/lwc.py", "cloudnetpy/products/lwc.py", "cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/categorize/atmos_utils.py", "cloudnetpy/categorize/atmos_utils.py" ]
[ "tests/unit/test_lwc.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 154, "func_end_lineno": 172, "func_code": "def fill_clouds_with_lwc_dz(\n temperature: np.ndarray, pressure: np.ndarray, is_liquid: np.ndarray\n) -> np.ndarray:\n \"\"\"Fills liquid clouds with lwc change rate at the cloud bases.\n\n Args:\n temperature: 2D temperature array (K).\n pressure: 2D pressure array (Pa).\n is_liquid: Boolean array indicating presence of liquid clouds.\n\n Returns:\n Liquid water content change rate (kg m-3 m-1), so that for each cloud the base\n value is filled for the whole cloud.\n\n \"\"\"\n lwc_dz = get_lwc_change_rate_at_bases(temperature, pressure, is_liquid)\n lwc_dz_filled = ma.zeros(lwc_dz.shape)\n lwc_dz_filled[is_liquid] = utils.ffill(lwc_dz[is_liquid])\n return lwc_dz_filled" }, { "class_start_lineno": 120, "class_end_lineno": 167, "func_start_lineno": 146, "func_end_lineno": 152, "func_code": " def _init_lwc_adiabatic(self) -> np.ndarray:\n \"\"\"Returns theoretical adiabatic lwc in liquid clouds (kg/m3).\"\"\"\n lwc_dz = atmos_utils.fill_clouds_with_lwc_dz(\n *self.lwc_source.atmosphere,\n self.is_liquid,\n )\n return atmos_utils.calc_adiabatic_lwc(lwc_dz, self.height)" }, { "class_start_lineno": 120, "class_end_lineno": 167, "func_start_lineno": 134, "func_end_lineno": 140, "func_code": " def __init__(self, lwc_source: LwcSource):\n self.lwc_source = lwc_source\n self.height = lwc_source.getvar(\"height\")\n self.is_liquid = self._get_liquid()\n self.lwc_adiabatic = self._init_lwc_adiabatic()\n self.lwc = self._adiabatic_lwc_to_lwc()\n self._mask_rain()" }, { "class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 245, "func_end_lineno": 266, "func_code": "def calc_saturation_vapor_pressure(temperature: np.ndarray) -> np.ndarray:\n \"\"\"Goff-Gratch formula for saturation vapor pressure over water adopted by WMO.\n\n Args:\n temperature: Temperature (K).\n\n Returns:\n Saturation vapor pressure (Pa).\n\n \"\"\"\n ratio = con.T0 / temperature\n inv_ratio = ratio**-1\n return (\n 10\n ** (\n 10.79574 * (1 - ratio)\n - 5.028 * np.log10(inv_ratio)\n + 1.50475e-4 * (1 - (10 ** (-8.2969 * (inv_ratio - 1))))\n + 0.42873e-3 * (10 ** (4.76955 * (1 - ratio)) - 1)\n + 0.78614\n )\n ) * con.HPA_TO_PA" }, { "class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 269, "func_end_lineno": 280, "func_code": "def calc_mixing_ratio(vapor_pressure: np.ndarray, pressure: np.ndarray) -> np.ndarray:\n \"\"\"Calculates mixing ratio from partial vapor pressure and pressure.\n\n Args:\n vapor_pressure: Partial pressure of water vapor (Pa).\n pressure: Atmospheric pressure (Pa).\n\n Returns:\n Mixing ratio (kg kg-1).\n\n \"\"\"\n return con.MW_RATIO * vapor_pressure / (pressure - vapor_pressure)" }, { "class_start_lineno": 1, "class_end_lineno": 357, "func_start_lineno": 201, "func_end_lineno": 242, "func_code": "def calc_lwc_change_rate(temperature: np.ndarray, pressure: np.ndarray) -> np.ndarray:\n \"\"\"Returns rate of change of condensable water (LWC).\n\n Calculates the theoretical adiabatic rate of increase of LWC\n with height, given the cloud base temperature and pressure.\n\n Args:\n temperature: Temperature of cloud base (K).\n pressure: Pressure of cloud base (Pa).\n\n Returns:\n dlwc/dz (kg m-3 m-1)\n\n References:\n Brenguier, 1991, https://doi.org/10.1175/1520-0469(1991)048<0264:POTCPA>2.0.CO;2\n\n \"\"\"\n svp = calc_saturation_vapor_pressure(temperature)\n svp_mixing_ratio = calc_mixing_ratio(svp, pressure)\n air_density = calc_air_density(pressure, temperature, svp_mixing_ratio)\n\n e = 0.622\n Cp = 1004 # J kg-1 K-1\n Lv = 2.45e6 # J kg-1 = Pa m3 kg-1\n qs = svp_mixing_ratio # kg kg-1\n pa = air_density # kg m-3\n es = svp # Pa\n P = pressure # Pa\n T = temperature # K\n\n # See Appendix B in Brenguier (1991) for the derivation of the following equation\n dqs_dp = (\n -(1 - (Cp * T) / (e * Lv))\n * (((Cp * T) / (e * Lv)) + ((Lv * qs * pa) / (P - es))) ** -1\n * (e * es)\n * (P - es) ** -2\n )\n\n # Using hydrostatic equation to convert dqs_dp to dqs_dz\n dqs_dz = dqs_dp * air_density * -scipy.constants.g\n\n return dqs_dz * air_density" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.categorize.atmos_utils.fill_clouds_with_lwc_dz", "cloudnetpy.products.lwc.Lwc._init_lwc_adiabatic", "cloudnetpy.products.lwc.Lwc.__init__", "cloudnetpy.categorize.atmos_utils.calc_saturation_vapor_pressure", "cloudnetpy.categorize.atmos_utils.calc_mixing_ratio", "cloudnetpy.categorize.atmos_utils.calc_lwc_change_rate" ]
Python
3
5
{ "total_num": 37, "base_passed_num": 0 }
[ "cloudnetpy.cloudnetpy.utils._parse_global_attribute_numeral", "cloudnetpy.cloudnetpy.utils.add_site_geolocation", "cloudnetpy.cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.cloudnetpy.output.save_level1b" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/utils.py", "cloudnetpy/output.py", "cloudnetpy/output.py" ]
[ "tests/unit/test_mira.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 1132, "func_end_lineno": 1140, "func_code": "def _parse_global_attribute_numeral(dataset: netCDF4.Dataset, key: str) -> float | None:\n new_str = \"\"\n attr = getattr(dataset, key)\n if attr == \"Unknown\":\n return None\n for char in attr:\n if char.isdigit() or char == \".\":\n new_str += char\n return float(new_str)" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 1057, "func_end_lineno": 1129, "func_code": "def add_site_geolocation(\n data: dict,\n *,\n gps: bool,\n site_meta: dict | None = None,\n dataset: netCDF4.Dataset | None = None,\n):\n tmp_data = {}\n tmp_source = {}\n\n for key in (\"latitude\", \"longitude\", \"altitude\"):\n value = None\n source = None\n # Prefer accurate GPS coordinates.\n if gps:\n values = None\n if isinstance(dataset, netCDF4.Dataset) and key in dataset.variables:\n values = dataset[key][:]\n elif key in data:\n values = data[key].data\n if (\n values is not None\n and not np.all(ma.getmaskarray(values))\n and np.any(values != 0)\n ):\n value = ma.masked_where(values == 0, values)\n source = \"GPS\"\n # User-supplied site coordinate.\n if value is None and site_meta is not None and key in site_meta:\n value = float(site_meta[key])\n source = \"site coordinates\"\n # From source data (CHM15k, CL61, MRR-PRO, Copernicus, Galileo...).\n # Assume value is manually set, so cannot trust it.\n if (\n value is None\n and isinstance(dataset, netCDF4.Dataset)\n and key in dataset.variables\n and not np.all(ma.getmaskarray(dataset[key][:]))\n ):\n value = dataset[key][:]\n source = \"raw file\"\n # From source global attributes (MIRA).\n # Seems to be manually set, so cannot trust it.\n if (\n value is None\n and isinstance(dataset, netCDF4.Dataset)\n and hasattr(dataset, key.capitalize())\n ):\n value = _parse_global_attribute_numeral(dataset, key.capitalize())\n source = \"raw file\"\n if value is not None:\n tmp_data[key] = value\n tmp_source[key] = source\n\n if \"latitude\" in tmp_data and \"longitude\" in tmp_data:\n lat = np.atleast_1d(tmp_data[\"latitude\"])\n lon = np.atleast_1d(tmp_data[\"longitude\"])\n lon[lon > 180] - 360\n if _are_stationary(lat, lon):\n tmp_data[\"latitude\"] = float(ma.mean(lat))\n tmp_data[\"longitude\"] = float(ma.mean(lon))\n else:\n tmp_data[\"latitude\"] = lat\n tmp_data[\"longitude\"] = lon\n\n if \"altitude\" in tmp_data:\n alt = np.atleast_1d(tmp_data[\"altitude\"])\n if ma.max(alt) - ma.min(alt) < 100:\n tmp_data[\"altitude\"] = float(ma.mean(alt))\n\n for key in (\"latitude\", \"longitude\", \"altitude\"):\n if key in tmp_data:\n data[key] = CloudnetArray(tmp_data[key], key, source=tmp_source[key])" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 54, "func_end_lineno": 74, "func_code": "def _get_netcdf_dimensions(obj) -> dict:\n dimensions = {\n key: len(obj.data[key][:]) for key in (\"time\", \"range\") if key in obj.data\n }\n # RPG cloud radar\n if \"chirp_start_indices\" in obj.data:\n dimensions[\"chirp_sequence\"] = len(obj.data[\"chirp_start_indices\"][:])\n # disdrometer\n if hasattr(obj, \"n_diameter\") and hasattr(obj, \"n_velocity\"):\n dimensions[\"diameter\"] = obj.n_diameter\n dimensions[\"velocity\"] = obj.n_velocity\n dimensions[\"nv\"] = 2\n # HATPRO l1c\n if \"tb\" in obj.data:\n dimensions[\"frequency\"] = obj.data[\"tb\"][:].shape[1]\n dimensions[\"receiver_nb\"] = len(obj.data[\"receiver_nb\"][:])\n dimensions[\"band\"] = 2\n dimensions[\"t_amb_nb\"] = 2\n if \"irt\" in obj.data:\n dimensions[\"ir_channel\"] = obj.data[\"irt\"][:].shape[1]\n return dimensions" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 21, "func_end_lineno": 51, "func_code": "def save_level1b(\n obj,\n output_file: PathLike | str,\n uuid: UUID | str | None = None,\n) -> str:\n \"\"\"Saves Cloudnet Level 1b file.\"\"\"\n dimensions = _get_netcdf_dimensions(obj)\n with init_file(output_file, dimensions, obj.data, uuid) as nc:\n file_uuid = nc.file_uuid\n fix_attribute_name(nc)\n location = obj.site_meta[\"name\"]\n nc.cloudnet_file_type = obj.instrument.domain\n nc.title = get_l1b_title(obj.instrument, location)\n if isinstance(obj.date, list):\n nc.year, nc.month, nc.day = obj.date\n elif isinstance(obj.date, datetime.date):\n nc.year = str(obj.date.year)\n nc.month = str(obj.date.month).zfill(2)\n nc.day = str(obj.date.day).zfill(2)\n else:\n raise TypeError\n nc.location = location\n nc.history = get_l1b_history(obj.instrument)\n nc.source = get_l1b_source(obj.instrument)\n if hasattr(obj, \"serial_number\") and obj.serial_number is not None:\n nc.serial_number = obj.serial_number\n if hasattr(obj, \"software\"):\n for software, version in obj.software.items():\n nc.setncattr(f\"{software}_version\", version)\n nc.references = get_references()\n return file_uuid" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.utils._parse_global_attribute_numeral", "cloudnetpy.utils.add_site_geolocation", "cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.output.save_level1b" ]
Python
1
4
{ "total_num": 31, "base_passed_num": 0 }
[ "cloudnetpy.cloudnetpy.utils.rebin_1d", "cloudnetpy.cloudnetpy.cloudnetarray.CloudnetArray::rebin_data", "cloudnetpy.cloudnetpy.utils.binvec" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/cloudnetarray.py", "cloudnetpy/categorize/mwr.py", "cloudnetpy/utils.py" ]
[ "tests/unit/test_mwr.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 195, "func_end_lineno": 231, "func_code": "def rebin_1d(\n x_in: np.ndarray,\n array: np.ndarray | ma.MaskedArray,\n x_new: np.ndarray,\n statistic: str = \"mean\",\n *,\n mask_zeros: bool = True,\n) -> ma.MaskedArray:\n \"\"\"Rebins 1D array.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 1-D input data with shape (m,).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Re-binned data with shape (N,).\n\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros(len(x_new))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n mask = ~array_screened.mask\n if ma.any(array_screened[mask]):\n result, _, _ = stats.binned_statistic(\n x_in[mask],\n array_screened[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros:\n return ma.masked_equal(result, 0)\n return ma.array(result)" }, { "class_start_lineno": 14, "class_end_lineno": 211, "func_start_lineno": 61, "func_end_lineno": 84, "func_code": " def rebin_data(\n self, time: np.ndarray, time_new: np.ndarray, *, mask_zeros: bool = True\n ) -> list:\n \"\"\"Rebins `data` in time.\n\n Args:\n time: 1D time array.\n time_new: 1D new time array.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Time indices without data.\n\n \"\"\"\n if self.data.ndim == 1:\n self.data = utils.rebin_1d(time, self.data, time_new, mask_zeros=mask_zeros)\n bad_indices = list(np.where(self.data == ma.masked)[0])\n else:\n if not isinstance(self.data, ma.MaskedArray):\n self.data = ma.masked_array(self.data)\n self.data, bad_indices = utils.rebin_2d(\n time, self.data, time_new, mask_zeros=mask_zeros\n )\n return bad_indices" }, { "class_start_lineno": 11, "class_end_lineno": 50, "func_start_lineno": 24, "func_end_lineno": 32, "func_code": " def rebin_to_grid(self, time_grid: np.ndarray) -> None:\n \"\"\"Approximates lwp and its error in a grid using mean.\n\n Args:\n time_grid: 1D target time grid.\n\n \"\"\"\n for array in self.data.values():\n array.rebin_data(self.time, time_grid)" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 124, "func_end_lineno": 140, "func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.utils.rebin_1d", "cloudnetpy.cloudnetarray.CloudnetArray.rebin_data", "cloudnetpy.categorize.mwr.Mwr.rebin_to_grid", "cloudnetpy.utils.binvec" ]
Python
2
3
{ "total_num": 4, "base_passed_num": 3 }
[ "cloudnetpy.cloudnetpy.utils.get_sorted_filenames", "cloudnetpy.cloudnetpy.instruments.rpg.rpg2nc", "cloudnetpy.cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.cloudnetpy.output.save_level1b" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/instruments/rpg.py", "cloudnetpy/output.py", "cloudnetpy/output.py" ]
[ "tests/unit/test_rpg.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 802, "func_end_lineno": 810, "func_code": "def get_sorted_filenames(file_path: str, extension: str) -> list:\n \"\"\"Returns full paths of files with some extension, sorted by filename.\"\"\"\n extension = extension.lower()\n all_files = os.listdir(file_path)\n files = [\n f\"{file_path}/{file}\" for file in all_files if file.lower().endswith(extension)\n ]\n files.sort()\n return files" }, { "class_start_lineno": 1, "class_end_lineno": 524, "func_start_lineno": 25, "func_end_lineno": 85, "func_code": "def rpg2nc(\n path_to_l1_files: str,\n output_file: str,\n site_meta: dict,\n uuid: str | None = None,\n date: str | None = None,\n) -> tuple[str, list]:\n \"\"\"Converts RPG-FMCW-94 cloud radar data into Cloudnet Level 1b netCDF file.\n\n This function reads one day of RPG Level 1 cloud radar binary files,\n concatenates the data and writes a netCDF file.\n\n Args:\n path_to_l1_files: Folder containing one day of RPG LV1 files.\n output_file: Output file name.\n site_meta: Dictionary containing information about the\n site. Required key value pairs are `altitude` (metres above mean\n sea level) and `name`.\n uuid: Set specific UUID for the file.\n date: Expected date in the input files. If not set,\n all files will be used. This might cause unexpected behavior if\n there are files from several days. If date is set as 'YYYY-MM-DD',\n only files that match the date will be used.\n\n Returns:\n 2-element tuple containing\n\n - UUID of the generated file.\n - Files used in the processing.\n\n Raises:\n ValidTimeStampError: No valid timestamps found.\n\n Examples:\n >>> from cloudnetpy.instruments import rpg2nc\n >>> site_meta = {'name': 'Hyytiala', 'altitude': 174}\n >>> rpg2nc('/path/to/files/', 'test.nc', site_meta)\n\n \"\"\"\n l1_files = utils.get_sorted_filenames(path_to_l1_files, \".LV1\")\n fmcw94_objects, valid_files = _get_fmcw94_objects(l1_files, date)\n one_day_of_data = create_one_day_data_record(fmcw94_objects)\n if not valid_files:\n return \"\", []\n print_info(one_day_of_data)\n fmcw = Fmcw(one_day_of_data, site_meta)\n fmcw.convert_time_to_fraction_hour()\n fmcw.mask_invalid_ldr()\n fmcw.mask_invalid_width()\n fmcw.sort_timestamps()\n fmcw.remove_duplicate_timestamps()\n fmcw.linear_to_db((\"Zh\", \"antenna_gain\"))\n fmcw.convert_units()\n fmcw.add_site_geolocation()\n valid_ind = fmcw.add_zenith_angle()\n fmcw.screen_time_indices(valid_ind)\n fmcw.add_height()\n attributes = output.add_time_attribute(RPG_ATTRIBUTES, fmcw.date)\n output.update_attributes(fmcw.data, attributes)\n uuid = output.save_level1b(fmcw, output_file, uuid)\n return uuid, valid_files" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 54, "func_end_lineno": 74, "func_code": "def _get_netcdf_dimensions(obj) -> dict:\n dimensions = {\n key: len(obj.data[key][:]) for key in (\"time\", \"range\") if key in obj.data\n }\n # RPG cloud radar\n if \"chirp_start_indices\" in obj.data:\n dimensions[\"chirp_sequence\"] = len(obj.data[\"chirp_start_indices\"][:])\n # disdrometer\n if hasattr(obj, \"n_diameter\") and hasattr(obj, \"n_velocity\"):\n dimensions[\"diameter\"] = obj.n_diameter\n dimensions[\"velocity\"] = obj.n_velocity\n dimensions[\"nv\"] = 2\n # HATPRO l1c\n if \"tb\" in obj.data:\n dimensions[\"frequency\"] = obj.data[\"tb\"][:].shape[1]\n dimensions[\"receiver_nb\"] = len(obj.data[\"receiver_nb\"][:])\n dimensions[\"band\"] = 2\n dimensions[\"t_amb_nb\"] = 2\n if \"irt\" in obj.data:\n dimensions[\"ir_channel\"] = obj.data[\"irt\"][:].shape[1]\n return dimensions" }, { "class_start_lineno": 1, "class_end_lineno": 494, "func_start_lineno": 21, "func_end_lineno": 51, "func_code": "def save_level1b(\n obj,\n output_file: PathLike | str,\n uuid: UUID | str | None = None,\n) -> str:\n \"\"\"Saves Cloudnet Level 1b file.\"\"\"\n dimensions = _get_netcdf_dimensions(obj)\n with init_file(output_file, dimensions, obj.data, uuid) as nc:\n file_uuid = nc.file_uuid\n fix_attribute_name(nc)\n location = obj.site_meta[\"name\"]\n nc.cloudnet_file_type = obj.instrument.domain\n nc.title = get_l1b_title(obj.instrument, location)\n if isinstance(obj.date, list):\n nc.year, nc.month, nc.day = obj.date\n elif isinstance(obj.date, datetime.date):\n nc.year = str(obj.date.year)\n nc.month = str(obj.date.month).zfill(2)\n nc.day = str(obj.date.day).zfill(2)\n else:\n raise TypeError\n nc.location = location\n nc.history = get_l1b_history(obj.instrument)\n nc.source = get_l1b_source(obj.instrument)\n if hasattr(obj, \"serial_number\") and obj.serial_number is not None:\n nc.serial_number = obj.serial_number\n if hasattr(obj, \"software\"):\n for software, version in obj.software.items():\n nc.setncattr(f\"{software}_version\", version)\n nc.references = get_references()\n return file_uuid" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.utils.get_sorted_filenames", "cloudnetpy.instruments.rpg.rpg2nc", "cloudnetpy.output._get_netcdf_dimensions", "cloudnetpy.output.save_level1b" ]
Python
2
4
{ "total_num": 34, "base_passed_num": 0 }
[ "cloudnetpy.cloudnetpy.utils.binvec", "cloudnetpy.cloudnetpy.utils.rebin_2d", "cloudnetpy.cloudnetpy.utils.rebin_1d" ]
cloudnetpy
[ "cloudnetpy/utils.py", "cloudnetpy/utils.py", "cloudnetpy/utils.py" ]
[ "tests/unit/test_utils.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 124, "func_end_lineno": 140, "func_code": "def binvec(x: np.ndarray | list) -> np.ndarray:\n \"\"\"Converts 1-D center points to bins with even spacing.\n\n Args:\n x: 1-D array of N real values.\n\n Returns:\n ndarray: N + 1 edge values.\n\n Examples:\n >>> binvec([1, 2, 3])\n [0.5, 1.5, 2.5, 3.5]\n\n \"\"\"\n edge1 = x[0] - (x[1] - x[0]) / 2\n edge2 = x[-1] + (x[-1] - x[-2]) / 2\n return np.linspace(edge1, edge2, len(x) + 1)" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 143, "func_end_lineno": 192, "func_code": "def rebin_2d(\n x_in: np.ndarray,\n array: ma.MaskedArray,\n x_new: np.ndarray,\n statistic: Literal[\"mean\", \"std\"] = \"mean\",\n n_min: int = 1,\n *,\n mask_zeros: bool = True,\n) -> tuple[ma.MaskedArray, list]:\n \"\"\"Rebins 2-D data in one dimension.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 2-D input data with shape (n, m).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n n_min: Minimum number of points to have good statistics in a bin. Default is 1.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n tuple: Rebinned data with shape (N, m) and indices of bins without enough data.\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros((len(x_new), array.shape[1]))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n for ind, values in enumerate(array_screened.T):\n mask = ~values.mask\n if ma.any(values[mask]):\n result[:, ind], _, _ = stats.binned_statistic(\n x_in[mask],\n values[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros is True:\n masked_result = ma.masked_equal(result, 0)\n else:\n masked_result = ma.array(result)\n\n # Fill bins with not enough profiles\n x_hist, _ = np.histogram(x_in, bins=edges)\n empty_mask = x_hist < n_min\n masked_result[empty_mask, :] = ma.masked\n empty_indices = list(np.nonzero(empty_mask)[0])\n if len(empty_indices) > 0:\n logging.debug(\"No data in %s bins\", len(empty_indices))\n\n return masked_result, empty_indices" }, { "class_start_lineno": 1, "class_end_lineno": 1151, "func_start_lineno": 195, "func_end_lineno": 231, "func_code": "def rebin_1d(\n x_in: np.ndarray,\n array: np.ndarray | ma.MaskedArray,\n x_new: np.ndarray,\n statistic: str = \"mean\",\n *,\n mask_zeros: bool = True,\n) -> ma.MaskedArray:\n \"\"\"Rebins 1D array.\n\n Args:\n x_in: 1-D array with shape (n,).\n array: 1-D input data with shape (m,).\n x_new: 1-D target vector (center points) with shape (N,).\n statistic: Statistic to be calculated. Possible statistics are 'mean', 'std'.\n Default is 'mean'.\n mask_zeros: Whether to mask 0 values in the returned array. Default is True.\n\n Returns:\n Re-binned data with shape (N,).\n\n \"\"\"\n edges = binvec(x_new)\n result = np.zeros(len(x_new))\n array_screened = ma.masked_invalid(array, copy=True) # data may contain nan-values\n mask = ~array_screened.mask\n if ma.any(array_screened[mask]):\n result, _, _ = stats.binned_statistic(\n x_in[mask],\n array_screened[mask],\n statistic=statistic,\n bins=edges,\n )\n result[~np.isfinite(result)] = 0\n if mask_zeros:\n return ma.masked_equal(result, 0)\n return ma.array(result)" } ]
[ "function_empty", "TDD" ]
[ "cloudnetpy.utils.binvec", "cloudnetpy.utils.rebin_2d", "cloudnetpy.utils.rebin_1d" ]
Python
1
3
{ "total_num": 160, "base_passed_num": 151 }
[ "datachain.src.datachain.asyn.AsyncMapper::shutdown_producer", "datachain.src.datachain.asyn.AsyncMapper::iterate" ]
datachain
[ "datachain/asyn.py", "datachain/asyn.py" ]
[ "tests/unit/test_asyn.py" ]
[ { "class_start_lineno": 26, "class_end_lineno": 197, "func_start_lineno": 84, "func_end_lineno": 98, "func_code": " def shutdown_producer(self) -> None:\n \"\"\"\n Signal the producer to stop and drain any remaining items from the work_queue.\n\n This method sets an internal event, `_shutdown_producer`, which tells the\n producer that it should stop adding items to the queue. To ensure that the\n producer notices this signal promptly, we also attempt to drain any items\n currently in the queue, clearing it so that the event can be checked without\n delay.\n \"\"\"\n self._shutdown_producer.set()\n q = self.work_queue\n while not q.empty():\n q.get_nowait()\n q.task_done()" }, { "class_start_lineno": 26, "class_end_lineno": 197, "func_start_lineno": 174, "func_end_lineno": 191, "func_code": " def iterate(self, timeout=None) -> Generator[ResultT, None, None]:\n init = asyncio.run_coroutine_threadsafe(self.init(), self.loop)\n init.result(timeout=1)\n async_run = asyncio.run_coroutine_threadsafe(self.run(), self.loop)\n try:\n while True:\n if (result := self.next_result(timeout)) is not None:\n yield result\n else:\n break\n if exc := async_run.exception():\n raise exc\n finally:\n self.shutdown_producer()\n if not async_run.done():\n async_run.cancel()\n wait([async_run])\n self._producer_is_shutdown.wait()" } ]
[ "TDD" ]
[ "datachain.asyn.AsyncMapper.shutdown_producer", "datachain.asyn.AsyncMapper.iterate" ]
Python
0
2
{ "total_num": 19, "base_passed_num": 3 }
[ "datachain.src.datachain.catalog.loader.get_metastore", "datachain.src.datachain.catalog.loader.get_catalog" ]
datachain
[ "datachain/catalog/loader.py", "datachain/catalog/loader.py" ]
[ "tests/unit/test_catalog_loader.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 141, "func_start_lineno": 29, "func_end_lineno": 61, "func_code": "def get_metastore(in_memory: bool = False) -> \"AbstractMetastore\":\n metastore_serialized = os.environ.get(METASTORE_SERIALIZED)\n if metastore_serialized:\n metastore_obj = deserialize(metastore_serialized)\n if not isinstance(metastore_obj, AbstractMetastore):\n raise RuntimeError(\n \"Deserialized Metastore is not an instance of AbstractMetastore: \"\n f\"{metastore_obj}\"\n )\n return metastore_obj\n\n metastore_import_path = os.environ.get(METASTORE_IMPORT_PATH)\n metastore_arg_envs = get_envs_by_prefix(METASTORE_ARG_PREFIX)\n # Convert env variable names to keyword argument names by lowercasing them\n metastore_args: dict[str, Any] = {\n k.lower(): v for k, v in metastore_arg_envs.items()\n }\n\n if not metastore_import_path:\n metastore_args[\"in_memory\"] = in_memory\n return SQLiteMetastore(**metastore_args)\n if in_memory:\n raise RuntimeError(IN_MEMORY_ERROR_MESSAGE)\n # Metastore paths are specified as (for example):\n # datachain.data_storage.SQLiteMetastore\n if \".\" not in metastore_import_path:\n raise RuntimeError(\n f\"Invalid {METASTORE_IMPORT_PATH} import path: {metastore_import_path}\"\n )\n module_name, _, class_name = metastore_import_path.rpartition(\".\")\n metastore = import_module(module_name)\n metastore_class = getattr(metastore, class_name)\n return metastore_class(**metastore_args)" }, { "class_start_lineno": 1, "class_end_lineno": 141, "func_start_lineno": 122, "func_end_lineno": 141, "func_code": "def get_catalog(\n client_config: Optional[dict[str, Any]] = None, in_memory: bool = False\n) -> Catalog:\n \"\"\"\n Function that creates Catalog instance with appropriate metastore\n and warehouse classes. Metastore class can be provided with env variable\n DATACHAIN_METASTORE and if not provided, default one is used. Warehouse class\n can be provided with env variable DATACHAIN_WAREHOUSE and if not provided,\n\n If classes expects some kwargs, they can be provided via env variables\n by adding them with prefix (DATACHAIN_METASTORE_ARG_ and DATACHAIN_WAREHOUSE_ARG_)\n and name of variable after, e.g. if it accepts team_id as kwargs\n we can provide DATACHAIN_METASTORE_ARG_TEAM_ID=12345 env variable.\n \"\"\"\n return Catalog(\n metastore=get_metastore(in_memory=in_memory),\n warehouse=get_warehouse(in_memory=in_memory),\n client_config=client_config,\n in_memory=in_memory,\n )" } ]
[ "function_empty", "TDD" ]
[ "datachain.catalog.loader.get_metastore", "datachain.catalog.loader.get_catalog" ]
Python
1
2
{ "total_num": 6, "base_passed_num": 3 }
[ "datachain.src.datachain.func.func.Func::_db_cols", "datachain.src.datachain.func.func.Func::_db_col_type", "datachain.src.datachain.func.func.Func::get_result_type", "datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.func.func.Func::get_column" ]
datachain
[ "datachain/func/func.py", "datachain/func/func.py", "datachain/func/func.py", "datachain/lib/convert/python_to_sql.py", "datachain/func/func.py" ]
[ "tests/unit/test_func.py" ]
[ { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 77, "func_end_lineno": 89, "func_code": " def _db_cols(self) -> Sequence[ColT]:\n return (\n [\n col\n if isinstance(col, (Func, BindParameter, Case, Comparator, tuple))\n else ColumnMeta.to_db_name(\n col.name if isinstance(col, ColumnElement) else col\n )\n for col in self.cols\n ]\n if self.cols\n else []\n )" }, { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 91, "func_end_lineno": 103, "func_code": " def _db_col_type(self, signals_schema: \"SignalSchema\") -> Optional[\"DataType\"]:\n if not self._db_cols:\n return None\n\n col_type: type = get_db_col_type(signals_schema, self._db_cols[0])\n for col in self._db_cols[1:]:\n if get_db_col_type(signals_schema, col) != col_type:\n raise DataChainColumnError(\n str(self),\n \"Columns must have the same type to infer result type\",\n )\n\n return list[col_type] if self.is_array else col_type # type: ignore[valid-type]" }, { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 361, "func_end_lineno": 373, "func_code": " def get_result_type(\n self, signals_schema: Optional[\"SignalSchema\"] = None\n ) -> \"DataType\":\n if self.result_type:\n return self.result_type\n\n if signals_schema and (col_type := self._db_col_type(signals_schema)):\n return col_type\n\n raise DataChainColumnError(\n str(self),\n \"Column name is required to infer result type\",\n )" }, { "class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")" }, { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 375, "func_end_lineno": 422, "func_code": " def get_column(\n self,\n signals_schema: Optional[\"SignalSchema\"] = None,\n label: Optional[str] = None,\n table: Optional[\"TableClause\"] = None,\n ) -> Column:\n col_type = self.get_result_type(signals_schema)\n sql_type = python_to_sql(col_type)\n\n def get_col(col: ColT, string_as_literal=False) -> ColT:\n # string_as_literal is used only for conditionals like `case()` where\n # literals are nested inside ColT as we have tuples of condition - values\n # and if user wants to set some case value as column, explicit `C(\"col\")`\n # syntax must be used to distinguish from literals\n if isinstance(col, tuple):\n return tuple(get_col(x, string_as_literal=True) for x in col)\n if isinstance(col, Func):\n return col.get_column(signals_schema, table=table)\n if isinstance(col, str) and not string_as_literal:\n column = Column(col, sql_type)\n column.table = table\n return column\n return col\n\n cols = [get_col(col) for col in self._db_cols]\n kwargs = {k: get_col(v, string_as_literal=True) for k, v in self.kwargs.items()}\n func_col = self.inner(*cols, *self.args, **kwargs)\n\n if self.is_window:\n if not self.window:\n raise DataChainParamsError(\n f\"Window function {self} requires over() clause with a window spec\",\n )\n func_col = func_col.over(\n partition_by=self.window.partition_by,\n order_by=(\n desc(self.window.order_by)\n if self.window.desc\n else self.window.order_by\n ),\n )\n\n func_col.type = sql_type() if inspect.isclass(sql_type) else sql_type\n\n if col_name := self.get_col_name(label):\n func_col = func_col.label(col_name)\n\n return func_col" } ]
[ "TDD" ]
[ "datachain.func.func.Func._db_cols", "datachain.func.func.Func._db_col_type", "datachain.func.func.Func.get_result_type", "datachain.lib.convert.python_to_sql.python_to_sql", "datachain.func.func.Func.get_column" ]
Python
0
5
{ "total_num": 94, "base_passed_num": 0 }
[ "datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.lib.signal_schema.SignalSchema::db_signals" ]
datachain
[ "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py", "datachain/lib/convert/python_to_sql.py", "datachain/lib/signal_schema.py" ]
[ "tests/unit/lib/test_arrow.py" ]
[ { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 627, "func_end_lineno": 628, "func_code": " def get_flat_tree(self) -> Iterator[tuple[list[str], DataType, bool, int]]:\n yield from self._get_flat_tree(self.tree, [], 0)" }, { "class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 481, "func_end_lineno": 503, "func_code": " def db_signals(\n self, name: Optional[str] = None, as_columns=False\n ) -> Union[list[str], list[Column]]:\n \"\"\"\n Returns DB columns as strings or Column objects with proper types\n Optionally, it can filter results by specific object, returning only his signals\n \"\"\"\n signals = [\n DEFAULT_DELIMITER.join(path)\n if not as_columns\n else Column(DEFAULT_DELIMITER.join(path), python_to_sql(_type))\n for path, _type, has_subtree, _ in self.get_flat_tree()\n if not has_subtree\n ]\n\n if name:\n signals = [\n s\n for s in signals\n if str(s) == name or str(s).startswith(f\"{name}{DEFAULT_DELIMITER}\")\n ]\n\n return signals # type: ignore[return-value]" } ]
[ "function_empty", "TDD" ]
[ "datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.signal_schema.SignalSchema.get_flat_tree", "datachain.lib.convert.python_to_sql.python_to_sql", "datachain.lib.signal_schema.SignalSchema.db_signals" ]
Python
1
3
{ "total_num": 32, "base_passed_num": 31 }
[ "datachain.src.datachain.lib.image.convert_image", "datachain.src.datachain.lib.image.convert_images" ]
datachain
[ "datachain/lib/image.py", "datachain/lib/image.py" ]
[ "tests/unit/lib/test_clip.py", "tests/unit/lib/test_image.py" ]
[ { "class_start_lineno": 1, "class_end_lineno": 81, "func_start_lineno": 7, "func_end_lineno": 46, "func_code": "def convert_image(\n img: Image.Image,\n mode: str = \"RGB\",\n size: Optional[tuple[int, int]] = None,\n transform: Optional[Callable] = None,\n encoder: Optional[Callable] = None,\n device: Optional[Union[str, torch.device]] = None,\n) -> Union[Image.Image, torch.Tensor]:\n \"\"\"\n Resize, transform, and otherwise convert an image.\n\n Args:\n img (Image): PIL.Image object.\n mode (str): PIL.Image mode.\n size (tuple[int, int]): Size in (width, height) pixels for resizing.\n transform (Callable): Torchvision transform or huggingface processor to apply.\n encoder (Callable): Encode image using model.\n device (str or torch.device): Device to use.\n \"\"\"\n if mode:\n img = img.convert(mode)\n if size:\n img = img.resize(size)\n if transform:\n img = transform(img)\n\n try:\n from transformers.image_processing_utils import BaseImageProcessor\n\n if isinstance(transform, BaseImageProcessor):\n img = torch.as_tensor(img.pixel_values[0]).clone().detach() # type: ignore[assignment,attr-defined]\n except ImportError:\n pass\n if device:\n img = img.to(device) # type: ignore[attr-defined]\n if encoder:\n img = img.unsqueeze(0) # type: ignore[attr-defined]\n if encoder:\n img = encoder(img)\n return img" }, { "class_start_lineno": 1, "class_end_lineno": 81, "func_start_lineno": 49, "func_end_lineno": 81, "func_code": "def convert_images(\n images: Union[Image.Image, list[Image.Image]],\n mode: str = \"RGB\",\n size: Optional[tuple[int, int]] = None,\n transform: Optional[Callable] = None,\n encoder: Optional[Callable] = None,\n device: Optional[Union[str, torch.device]] = None,\n) -> Union[list[Image.Image], torch.Tensor]:\n \"\"\"\n Resize, transform, and otherwise convert one or more images.\n\n Args:\n images (Image, list[Image]): PIL.Image object or list of objects.\n mode (str): PIL.Image mode.\n size (tuple[int, int]): Size in (width, height) pixels for resizing.\n transform (Callable): Torchvision transform or huggingface processor to apply.\n encoder (Callable): Encode image using model.\n device (str or torch.device): Device to use.\n \"\"\"\n if isinstance(images, Image.Image):\n images = [images]\n\n converted = [\n convert_image(img, mode, size, transform, device=device) for img in images\n ]\n\n if isinstance(converted[0], torch.Tensor):\n converted = torch.stack(converted) # type: ignore[assignment,arg-type]\n\n if encoder:\n converted = encoder(converted)\n\n return converted # type: ignore[return-value]" } ]
[ "function_empty", "TDD" ]
[ "datachain.lib.image.convert_image", "datachain.lib.image.convert_images" ]
Python
1
2
{ "total_num": 41, "base_passed_num": 13 }
[ "datachain.src.datachain.lib.file.File::get_destination_path", "datachain.src.datachain.lib.file.File::export" ]
datachain
[ "datachain/lib/file.py", "datachain/lib/file.py" ]
[ "tests/unit/lib/test_file.py" ]
[ { "class_start_lineno": 125, "class_end_lineno": 468, "func_start_lineno": 396, "func_end_lineno": 414, "func_code": " def get_destination_path(self, output: str, placement: ExportPlacement) -> str:\n \"\"\"\n Returns full destination path of a file for exporting to some output\n based on export placement\n \"\"\"\n if placement == \"filename\":\n path = unquote(self.name)\n elif placement == \"etag\":\n path = f\"{self.etag}{self.get_file_suffix()}\"\n elif placement == \"fullpath\":\n path = unquote(self.get_full_name())\n source = urlparse(self.source)\n if source.scheme and source.scheme != \"file\":\n path = posixpath.join(source.netloc, path)\n elif placement == \"checksum\":\n raise NotImplementedError(\"Checksum placement not implemented yet\")\n else:\n raise ValueError(f\"Unsupported file export placement: {placement}\")\n return posixpath.join(output, path) # type: ignore[union-attr]" }, { "class_start_lineno": 125, "class_end_lineno": 468, "func_start_lineno": 297, "func_end_lineno": 319, "func_code": " def export(\n self,\n output: str,\n placement: ExportPlacement = \"fullpath\",\n use_cache: bool = True,\n link_type: Literal[\"copy\", \"symlink\"] = \"copy\",\n ) -> None:\n \"\"\"Export file to new location.\"\"\"\n if use_cache:\n self._caching_enabled = use_cache\n dst = self.get_destination_path(output, placement)\n dst_dir = os.path.dirname(dst)\n client: Client = self._catalog.get_client(dst_dir)\n client.fs.makedirs(dst_dir, exist_ok=True)\n\n if link_type == \"symlink\":\n try:\n return self._symlink_to(dst)\n except OSError as exc:\n if exc.errno not in (errno.ENOTSUP, errno.EXDEV, errno.ENOSYS):\n raise\n\n self.save(dst)" } ]
[ "function_empty" ]
[ "datachain.lib.file.File.get_destination_path", "datachain.lib.file.File.export" ]
Python
2
2
{ "total_num": 33, "base_passed_num": 25 }
[ "datachain.src.datachain.lib.signal_schema.SignalSchema::_get_flat_tree", "datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.lib.signal_schema.SignalSchema::to_udf_spec", "datachain.src.datachain.lib.signal_schema.SignalSchema::db_signals" ]
datachain
[ "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py", "datachain/lib/convert/python_to_sql.py", "datachain/lib/signal_schema.py", "datachain/lib/signal_schema.py" ]
[ "tests/unit/lib/test_signal_schema.py" ]
[ { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 630, "func_end_lineno": 639, "func_code": " def _get_flat_tree(\n self, tree: dict, prefix: list[str], depth: int\n ) -> Iterator[tuple[list[str], DataType, bool, int]]:\n for name, (type_, substree) in tree.items():\n suffix = name.split(\".\")\n new_prefix = prefix + suffix\n has_subtree = substree is not None\n yield new_prefix, type_, has_subtree, depth\n if substree is not None:\n yield from self._get_flat_tree(substree, new_prefix, depth + 1)" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 627, "func_end_lineno": 628, "func_code": " def get_flat_tree(self) -> Iterator[tuple[list[str], DataType, bool, int]]:\n yield from self._get_flat_tree(self.tree, [], 0)" }, { "class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 387, "func_end_lineno": 395, "func_code": " def to_udf_spec(self) -> dict[str, type]:\n res = {}\n for path, type_, has_subtree, _ in self.get_flat_tree():\n if path[0] in self.setup_func:\n continue\n if not has_subtree:\n db_name = DEFAULT_DELIMITER.join(path)\n res[db_name] = python_to_sql(type_)\n return res" }, { "class_start_lineno": 135, "class_end_lineno": 751, "func_start_lineno": 481, "func_end_lineno": 503, "func_code": " def db_signals(\n self, name: Optional[str] = None, as_columns=False\n ) -> Union[list[str], list[Column]]:\n \"\"\"\n Returns DB columns as strings or Column objects with proper types\n Optionally, it can filter results by specific object, returning only his signals\n \"\"\"\n signals = [\n DEFAULT_DELIMITER.join(path)\n if not as_columns\n else Column(DEFAULT_DELIMITER.join(path), python_to_sql(_type))\n for path, _type, has_subtree, _ in self.get_flat_tree()\n if not has_subtree\n ]\n\n if name:\n signals = [\n s\n for s in signals\n if str(s) == name or str(s).startswith(f\"{name}{DEFAULT_DELIMITER}\")\n ]\n\n return signals # type: ignore[return-value]" } ]
[ "function_empty", "TDD" ]
[ "datachain.lib.signal_schema.SignalSchema._get_flat_tree", "datachain.lib.signal_schema.SignalSchema.get_flat_tree", "datachain.lib.convert.python_to_sql.python_to_sql", "datachain.lib.signal_schema.SignalSchema.to_udf_spec", "datachain.lib.signal_schema.SignalSchema.db_signals" ]
Python
1
4
{ "total_num": 58, "base_passed_num": 47 }
[ "datachain.src.datachain.func.func.Func::get_result_type", "datachain.src.datachain.lib.convert.python_to_sql.python_to_sql", "datachain.src.datachain.func.func.Func::get_column" ]
datachain
[ "datachain/func/func.py", "datachain/lib/convert/python_to_sql.py", "datachain/func/func.py", "datachain/sql/selectable.py" ]
[ "tests/unit/sql/test_array.py" ]
[ { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 361, "func_end_lineno": 373, "func_code": " def get_result_type(\n self, signals_schema: Optional[\"SignalSchema\"] = None\n ) -> \"DataType\":\n if self.result_type:\n return self.result_type\n\n if signals_schema and (col_type := self._db_col_type(signals_schema)):\n return col_type\n\n raise DataChainColumnError(\n str(self),\n \"Column name is required to infer result type\",\n )" }, { "class_start_lineno": 1, "class_end_lineno": 117, "func_start_lineno": 37, "func_end_lineno": 82, "func_code": "def python_to_sql(typ): # noqa: PLR0911\n if inspect.isclass(typ):\n if issubclass(typ, SQLType):\n return typ\n if issubclass(typ, Enum):\n return str\n\n res = PYTHON_TO_SQL.get(typ)\n if res:\n return res\n\n orig = get_origin(typ)\n\n if orig in (Literal, LiteralEx):\n return String\n\n args = get_args(typ)\n if inspect.isclass(orig) and (issubclass(list, orig) or issubclass(tuple, orig)):\n if args is None:\n raise TypeError(f\"Cannot resolve type '{typ}' for flattening features\")\n\n args0 = args[0]\n if ModelStore.is_pydantic(args0):\n return Array(JSON())\n\n list_type = list_of_args_to_type(args)\n return Array(list_type)\n\n if orig is Annotated:\n # Ignoring annotations\n return python_to_sql(args[0])\n\n if inspect.isclass(orig) and issubclass(dict, orig):\n return JSON\n\n if orig == Union:\n if len(args) == 2 and (type(None) in args):\n return python_to_sql(args[0])\n\n if _is_union_str_literal(orig, args):\n return String\n\n if _is_json_inside_union(orig, args):\n return JSON\n\n raise TypeError(f\"Cannot recognize type {typ}\")" }, { "class_start_lineno": 29, "class_end_lineno": 422, "func_start_lineno": 375, "func_end_lineno": 422, "func_code": " def get_column(\n self,\n signals_schema: Optional[\"SignalSchema\"] = None,\n label: Optional[str] = None,\n table: Optional[\"TableClause\"] = None,\n ) -> Column:\n col_type = self.get_result_type(signals_schema)\n sql_type = python_to_sql(col_type)\n\n def get_col(col: ColT, string_as_literal=False) -> ColT:\n # string_as_literal is used only for conditionals like `case()` where\n # literals are nested inside ColT as we have tuples of condition - values\n # and if user wants to set some case value as column, explicit `C(\"col\")`\n # syntax must be used to distinguish from literals\n if isinstance(col, tuple):\n return tuple(get_col(x, string_as_literal=True) for x in col)\n if isinstance(col, Func):\n return col.get_column(signals_schema, table=table)\n if isinstance(col, str) and not string_as_literal:\n column = Column(col, sql_type)\n column.table = table\n return column\n return col\n\n cols = [get_col(col) for col in self._db_cols]\n kwargs = {k: get_col(v, string_as_literal=True) for k, v in self.kwargs.items()}\n func_col = self.inner(*cols, *self.args, **kwargs)\n\n if self.is_window:\n if not self.window:\n raise DataChainParamsError(\n f\"Window function {self} requires over() clause with a window spec\",\n )\n func_col = func_col.over(\n partition_by=self.window.partition_by,\n order_by=(\n desc(self.window.order_by)\n if self.window.desc\n else self.window.order_by\n ),\n )\n\n func_col.type = sql_type() if inspect.isclass(sql_type) else sql_type\n\n if col_name := self.get_col_name(label):\n func_col = func_col.label(col_name)\n\n return func_col" }, { "class_start_lineno": 1, "class_end_lineno": 56, "func_start_lineno": 24, "func_end_lineno": 29, "func_code": "def process_column_expression(col):\n if hasattr(col, \"get_column\"):\n return col.get_column()\n if isinstance(col, str):\n return expression.column(col)\n return col" } ]
[ "TDD" ]
[ "datachain.func.func.Func.get_result_type", "datachain.lib.convert.python_to_sql.python_to_sql", "datachain.func.func.Func.get_column", "datachain.sql.selectable.process_column_expression" ]
Python
0
3
{ "total_num": 12, "base_passed_num": 0 }