o
    i                  	   @   sN  d dl Z d dlmZ d dlmZ d dlmZmZmZ d dl	m
Z
mZ e
eZeG dd dZeG dd	 d	Zd
eeeee f  dee dee fddZG dd deZG dd deZG dd deZdeeeef  deeee f fddZdd Zded
ee deeeef  fddZdeee  deee  fddZdS )    N)defaultdict)	dataclass)AnyOptionalUnion)logging	yaml_dumpc                   @   s8  e Zd ZU dZeed< eed< eed< eed< eed< dZee ed< dZ	ee ed	< dZ
ee ed
< dZee ed< dZeeeef  ed< dZee ed< dZee ed< dZeeeef  ed< dZee ed< dZee ed< dZee ed< dZee ed< edefddZdd defddZdddZdS )
EvalResultu  
    Flattened representation of individual evaluation results found in model-index of Model Cards.

    For more information on the model-index spec, see https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1.

    Args:
        task_type (`str`):
            The task identifier. Example: "image-classification".
        dataset_type (`str`):
            The dataset identifier. Example: "common_voice". Use dataset id from https://hf.co/datasets.
        dataset_name (`str`):
            A pretty name for the dataset. Example: "Common Voice (French)".
        metric_type (`str`):
            The metric identifier. Example: "wer". Use metric id from https://hf.co/metrics.
        metric_value (`Any`):
            The metric value. Example: 0.9 or "20.0 ± 1.2".
        task_name (`str`, *optional*):
            A pretty name for the task. Example: "Speech Recognition".
        dataset_config (`str`, *optional*):
            The name of the dataset configuration used in `load_dataset()`.
            Example: fr in `load_dataset("common_voice", "fr")`. See the `datasets` docs for more info:
            https://hf.co/docs/datasets/package_reference/loading_methods#datasets.load_dataset.name
        dataset_split (`str`, *optional*):
            The split used in `load_dataset()`. Example: "test".
        dataset_revision (`str`, *optional*):
            The revision (AKA Git Sha) of the dataset used in `load_dataset()`.
            Example: 5503434ddd753f426f4b38109466949a1217c2bb
        dataset_args (`dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: `{"max_order": 4}`
        metric_name (`str`, *optional*):
            A pretty name for the metric. Example: "Test WER".
        metric_config (`str`, *optional*):
            The name of the metric configuration used in `load_metric()`.
            Example: bleurt-large-512 in `load_metric("bleurt", "bleurt-large-512")`.
            See the `datasets` docs for more info: https://huggingface.co/docs/datasets/v2.1.0/en/loading#load-configurations
        metric_args (`dict[str, Any]`, *optional*):
            The arguments passed during `Metric.compute()`. Example for `bleu`: max_order: 4
        verified (`bool`, *optional*):
            Indicates whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not. Automatically computed by Hugging Face, do not set.
        verify_token (`str`, *optional*):
            A JSON Web Token that is used to verify whether the metrics originate from Hugging Face's [evaluation service](https://huggingface.co/spaces/autoevaluate/model-evaluator) or not.
        source_name (`str`, *optional*):
            The name of the source of the evaluation result. Example: "Open LLM Leaderboard".
        source_url (`str`, *optional*):
            The URL of the source of the evaluation result. Example: "https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard".
    	task_typedataset_typedataset_namemetric_typemetric_valueN	task_namedataset_configdataset_splitdataset_revisiondataset_argsmetric_namemetric_configmetric_argsverifiedverify_tokensource_name
source_urlreturnc                 C   s   | j | j| j| j| jfS )z9Returns a tuple that uniquely identifies this evaluation.)r
   r   r   r   r   self r   c/var/www/html/karishye-ai-python/venv/lib/python3.10/site-packages/huggingface_hub/repocard_data.pyunique_identifier   s   zEvalResult.unique_identifierotherc                 C   sD   | j  D ]\}}|dkrq|dkrt| |t||kr dS qdS )zx
        Return True if `self` and `other` describe exactly the same metric but with a
        different value.
        r   r   FT)__dict__itemsgetattr)r   r!   key_r   r   r   is_equal_except_value   s   z EvalResult.is_equal_except_valuec                 C   s$   | j d ur| jd u rtdd S d S )NzAIf `source_name` is provided, `source_url` must also be provided.)r   r   
ValueErrorr   r   r   r   __post_init__   s   zEvalResult.__post_init__)r   N)__name__
__module____qualname____doc__str__annotations__r   r   r   r   r   r   r   dictr   r   r   r   boolr   r   r   propertytupler    r'   r)   r   r   r   r   r	      s.   
 3
r	   c                   @   s   e Zd ZdZd"defddZdd Zdd	 Zd#dee	e
  de
fddZdd Zdd Zd$de
dedefddZd$de
dedefddZde
defddZde
dedd
fddZde
defddZdefd d!Zd
S )%CardDataa  Structure containing metadata from a RepoCard.

    [`CardData`] is the parent class of [`ModelCardData`] and [`DatasetCardData`].

    Metadata can be exported as a dictionary or YAML. Export can be customized to alter the representation of the data
    (example: flatten evaluation results). `CardData` behaves as a dictionary (can get, pop, set values) but do not
    inherit from `dict` to allow this export step.
    Fignore_metadata_errorsc                 K   s   | j | d S N)r"   update)r   r5   kwargsr   r   r   __init__   s   zCardData.__init__c                 C   s(   t | j}| | dd | D S )zConverts CardData to a dict.

        Returns:
            `dict`: CardData represented as a dictionary ready to be dumped to a YAML
            block for inclusion in a README.md file.
        c                 S   s   i | ]\}}|d ur||qS r6   r   ).0r%   valuer   r   r   
<dictcomp>   s    z$CardData.to_dict.<locals>.<dictcomp>)copydeepcopyr"   _to_dictr#   r   	data_dictr   r   r   to_dict   s   
zCardData.to_dictc                 C   s   dS )zUse this method in child classes to alter the dict representation of the data. Alter the dict in-place.

        Args:
            data_dict (`dict`): The raw dict representation of the card data.
        Nr   r@   r   r   r   r?      s   zCardData._to_dictNoriginal_orderr   c                    sH   |r fdd|t t j t|  D  _t  d|d S )a
  Dumps CardData to a YAML block for inclusion in a README.md file.

        Args:
            line_break (str, *optional*):
                The line break to use when dumping to yaml.

        Returns:
            `str`: CardData represented as a YAML block.
        c                    s"   i | ]}| j v r| j | qS r   r"   )r:   kr   r   r   r<      s
    

z$CardData.to_yaml.<locals>.<dictcomp>F)	sort_keys
line_break)listsetr"   keysr   rB   strip)r   rG   rC   r   r   r   to_yaml   s
   

zCardData.to_yamlc                 C   
   t | jS r6   )reprr"   r   r   r   r   __repr__   s   
zCardData.__repr__c                 C   s   |   S r6   )rL   r   r   r   r   __str__   s   zCardData.__str__r%   defaultc                 C   s   | j |}|du r|S |S )#Get value for a given metadata key.N)r"   get)r   r%   rQ   r;   r   r   r   rS      s   zCardData.getc                 C   s   | j ||S )z#Pop value for a given metadata key.)r"   pop)r   r%   rQ   r   r   r   rT         zCardData.popc                 C   s
   | j | S )rR   rD   r   r%   r   r   r   __getitem__      
zCardData.__getitem__r;   c                 C   s   || j |< dS )z#Set value for a given metadata key.NrD   )r   r%   r;   r   r   r   __setitem__   rU   zCardData.__setitem__c                 C   s
   || j v S )z%Check if a given metadata key is set.rD   rV   r   r   r   __contains__   rX   zCardData.__contains__c                 C   rM   )z'Return the number of metadata keys set.)lenr"   r   r   r   r   __len__   rX   zCardData.__len__)F)NNr6   )r*   r+   r,   r-   r1   r9   rB   r?   r   rH   r.   rL   rO   rP   r   rS   rT   rW   rY   rZ   intr\   r   r   r   r   r4      s    	r4   eval_results
model_namer   c                 C   s`   | d u rg S t | tr| g} t | trtdd | D s&tdt|  d|d u r.td| S )Nc                 s   s    | ]}t |tV  qd S r6   )
isinstancer	   )r:   rr   r   r   	<genexpr>   s    z)_validate_eval_results.<locals>.<genexpr>zM`eval_results` should be of type `EvalResult` or a list of `EvalResult`, got .z7Passing `eval_results` requires `model_name` to be set.)r`   r	   rH   allr(   type)r^   r_   r   r   r   _validate_eval_results   s   
rf   c                       s   e Zd ZdZdddddddddddddddeeeee f  deeeee f  deee  deeeee f  d	ee d
ee dee dee deee  dee dee deee  de	f fddZ
dd Z  ZS )ModelCardDataaQ  Model Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        base_model (`str` or `list[str]`, *optional*):
            The identifier of the base model from which the model derives. This is applicable for example if your model is a
            fine-tune or adapter of an existing model. The value must be the ID of a model on the Hub (or a list of IDs
            if your model derives from multiple models). Defaults to None.
        datasets (`Union[str, list[str]]`, *optional*):
            Dataset or list of datasets that were used to train this model. Should be a dataset ID
            found on https://hf.co/datasets. Defaults to None.
        eval_results (`Union[list[EvalResult], EvalResult]`, *optional*):
            List of `huggingface_hub.EvalResult` that define evaluation results of the model. If provided,
            `model_name` is used to as a name on PapersWithCode's leaderboards. Defaults to `None`.
        language (`Union[str, list[str]]`, *optional*):
            Language of model's training data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual". Defaults to `None`.
        library_name (`str`, *optional*):
            Name of library used by this model. Example: keras or any library from
            https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/model-libraries.ts.
            Defaults to None.
        license (`str`, *optional*):
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses. Defaults to None.
        license_name (`str`, *optional*):
            Name of the license of this model. Defaults to None. To be used in conjunction with `license_link`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a name. In that case, use `license` instead.
        license_link (`str`, *optional*):
            Link to the license of this model. Defaults to None. To be used in conjunction with `license_name`.
            Common licenses (Apache-2.0, MIT, CC-BY-SA-4.0) do not need a link. In that case, use `license` instead.
        metrics (`list[str]`, *optional*):
            List of metrics used to evaluate this model. Should be a metric name that can be found
            at https://hf.co/metrics. Example: 'accuracy'. Defaults to None.
        model_name (`str`, *optional*):
            A name for this model. It is used along with
            `eval_results` to construct the `model-index` within the card's metadata. The name
            you supply here is what will be used on PapersWithCode's leaderboards. If None is provided
            then the repo name is used as a default. Defaults to None.
        pipeline_tag (`str`, *optional*):
            The pipeline tag associated with the model. Example: "text-classification".
        tags (`list[str]`, *optional*):
            List of tags to add to your model that can be used when filtering on the Hugging
            Face Hub. Defaults to None.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the model card. Defaults to None.

    Example:
        ```python
        >>> from huggingface_hub import ModelCardData
        >>> card_data = ModelCardData(
        ...     language="en",
        ...     license="mit",
        ...     library_name="timm",
        ...     tags=['image-classification', 'resnet'],
        ... )
        >>> card_data.to_dict()
        {'language': 'en', 'license': 'mit', 'library_name': 'timm', 'tags': ['image-classification', 'resnet']}

        ```
    NF)
base_modeldatasetsr^   languagelibrary_namelicenselicense_namelicense_linkmetricsr_   pipeline_tagtagsr5   rh   ri   r^   rj   rk   rl   rm   rn   ro   r_   rp   rq   r5   c             
      sL  || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
t|| _|dd }|rezt|\}
}|
| _	|| _W n( ttfyd } z|rNtd ntd|j d| dW Y d }~nd }~ww t jdi | | jrzt| j| j	| _W d S  ty } z|rtd| d ntd| |W Y d }~d S d }~ww d S )	Nmodel-indexz<Invalid model-index. Not loading eval results into CardData.z4Invalid `model_index` in metadata cannot be parsed:  z. Pass `ignore_metadata_errors=True` to ignore this error while loading a Model Card. Warning: some information will be lost. Use it at your own risk.z!Failed to validate eval_results: z). Not loading eval results into CardData.r   )rh   ri   r^   rj   rk   rl   rm   rn   ro   r_   rp   _to_unique_listrq   rT   model_index_to_eval_resultsKeyError	TypeErrorloggerwarningr(   	__class__superr9   rf   	Exception)r   rh   ri   r^   rj   rk   rl   rm   rn   ro   r_   rp   rq   r5   r8   model_indexerrorerz   r   r   r9   I  sN   


zModelCardData.__init__c                 C   s0   | j durt| j| j |d< |d= |d= dS dS )z[Format the internal data dict. In this case, we convert eval results to a valid model indexNrr   r^   r_   )r^   eval_results_to_model_indexr_   r@   r   r   r   r?     s   
zModelCardData._to_dict)r*   r+   r,   r-   r   r   r.   rH   r	   r1   r9   r?   __classcell__r   r   r   r   rg   	  sV    B
	


:rg   c                       s&  e Zd ZdZddddddddddddddddeeeee f  deeeee f  deeeee f  deeeee f  d	eeeee f  d
eeeee f  deee  deeeee f  deeeee f  dee dee dee deeeee f  de	f fddZ
dd Z  ZS )DatasetCardDataa	  Dataset Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    Args:
        language (`list[str]`, *optional*):
            Language of dataset's data or metadata. It must be an ISO 639-1, 639-2 or
            639-3 code (two/three letters), or a special value like "code", "multilingual".
        license (`Union[str, list[str]]`, *optional*):
            License(s) of this dataset. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        annotations_creators (`Union[str, list[str]]`, *optional*):
            How the annotations for the dataset were created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'no-annotation', 'other'.
        language_creators (`Union[str, list[str]]`, *optional*):
            How the text-based data in the dataset was created.
            Options are: 'found', 'crowdsourced', 'expert-generated', 'machine-generated', 'other'
        multilinguality (`Union[str, list[str]]`, *optional*):
            Whether the dataset is multilingual.
            Options are: 'monolingual', 'multilingual', 'translation', 'other'.
        size_categories (`Union[str, list[str]]`, *optional*):
            The number of examples in the dataset. Options are: 'n<1K', '1K<n<10K', '10K<n<100K',
            '100K<n<1M', '1M<n<10M', '10M<n<100M', '100M<n<1B', '1B<n<10B', '10B<n<100B', '100B<n<1T', 'n>1T', and 'other'.
        source_datasets (`list[str]]`, *optional*):
            Indicates whether the dataset is an original dataset or extended from another existing dataset.
            Options are: 'original' and 'extended'.
        task_categories (`Union[str, list[str]]`, *optional*):
            What categories of task does the dataset support?
        task_ids (`Union[str, list[str]]`, *optional*):
            What specific tasks does the dataset support?
        paperswithcode_id (`str`, *optional*):
            ID of the dataset on PapersWithCode.
        pretty_name (`str`, *optional*):
            A more human-readable name for the dataset. (ex. "Cats vs. Dogs")
        train_eval_index (`dict`, *optional*):
            A dictionary that describes the necessary spec for doing evaluation on the Hub.
            If not provided, it will be gathered from the 'train-eval-index' key of the kwargs.
        config_names (`Union[str, list[str]]`, *optional*):
            A list of the available dataset configs for the dataset.
    NF)rj   rl   annotations_creatorslanguage_creatorsmultilingualitysize_categoriessource_datasetstask_categoriestask_idspaperswithcode_idpretty_nametrain_eval_indexconfig_namesr5   rj   rl   r   r   r   r   r   r   r   r   r   r   r   r5   c                   sp   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	|| _
|| _|p+|dd | _t jdi | d S )Ntrain-eval-indexr   )r   r   rj   rl   r   r   r   r   r   r   r   r   rT   r   r{   r9   )r   rj   rl   r   r   r   r   r   r   r   r   r   r   r   r5   r8   r   r   r   r9     s   zDatasetCardData.__init__c                 C   s   | d|d< d S )Nr   r   )rT   r@   r   r   r   r?     s   zDatasetCardData._to_dict)r*   r+   r,   r-   r   r   r.   rH   r0   r1   r9   r?   r   r   r   r   r   r     s\    *
	
$r   c                       s   e Zd ZdZddddddddddddddee dee dee dee d	ee d
ee dee dee deee  deee  deee  def fddZ	  Z
S )SpaceCardDataa	  Space Card Metadata that is used by Hugging Face Hub when included at the top of your README.md

    To get an exhaustive reference of Spaces configuration, please visit https://huggingface.co/docs/hub/spaces-config-reference#spaces-configuration-reference.

    Args:
        title (`str`, *optional*)
            Title of the Space.
        sdk (`str`, *optional*)
            SDK of the Space (one of `gradio`, `streamlit`, `docker`, or `static`).
        sdk_version (`str`, *optional*)
            Version of the used SDK (if Gradio/Streamlit sdk).
        python_version (`str`, *optional*)
            Python version used in the Space (if Gradio/Streamlit sdk).
        app_file (`str`, *optional*)
            Path to your main application file (which contains either gradio or streamlit Python code, or static html code).
            Path is relative to the root of the repository.
        app_port (`str`, *optional*)
            Port on which your application is running. Used only if sdk is `docker`.
        license (`str`, *optional*)
            License of this model. Example: apache-2.0 or any license from
            https://huggingface.co/docs/hub/repositories-licenses.
        duplicated_from (`str`, *optional*)
            ID of the original Space if this is a duplicated Space.
        models (list[`str`], *optional*)
            List of models related to this Space. Should be a dataset ID found on https://hf.co/models.
        datasets (`list[str]`, *optional*)
            List of datasets related to this Space. Should be a dataset ID found on https://hf.co/datasets.
        tags (`list[str]`, *optional*)
            List of tags to add to your Space that can be used when filtering on the Hub.
        ignore_metadata_errors (`str`):
            If True, errors while parsing the metadata section will be ignored. Some information might be lost during
            the process. Use it at your own risk.
        kwargs (`dict`, *optional*):
            Additional metadata that will be added to the space card.

    Example:
        ```python
        >>> from huggingface_hub import SpaceCardData
        >>> card_data = SpaceCardData(
        ...     title="Dreambooth Training",
        ...     license="mit",
        ...     sdk="gradio",
        ...     duplicated_from="multimodalart/dreambooth-training"
        ... )
        >>> card_data.to_dict()
        {'title': 'Dreambooth Training', 'sdk': 'gradio', 'license': 'mit', 'duplicated_from': 'multimodalart/dreambooth-training'}
        ```
    NF)titlesdksdk_versionpython_versionapp_fileapp_portrl   duplicated_frommodelsri   rq   r5   r   r   r   r   r   r   rl   r   r   ri   rq   r5   c                   s\   || _ || _|| _|| _|| _|| _|| _|| _|	| _|
| _	t
|| _t jdi | d S )Nr   )r   r   r   r   r   r   rl   r   r   ri   rt   rq   r{   r9   )r   r   r   r   r   r   r   rl   r   r   ri   rq   r5   r8   r   r   r   r9     s   
zSpaceCardData.__init__)r*   r+   r,   r-   r   r.   r]   rH   r1   r9   r   r   r   r   r   r     sN    4	



r   r}   c                 C   s  g }| D ]}|d }|d }|D ]}|d d }|d  d}|d d }|d d }	|d  d}
|d  d}|d  d}|d  d	}| d
i  d}| d
i  d}|d D ]`}|d }|d }| d}| d	}| d}| d}| d}td!i d|d|d|	d|d|d|d|
d|d|d|d|d|d|d|d|d|d|}|| q]qq||fS )"a  Takes in a model index and returns the model name and a list of `huggingface_hub.EvalResult` objects.

    A detailed spec of the model index can be found here:
    https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1

    Args:
        model_index (`list[dict[str, Any]]`):
            A model index data structure, likely coming from a README.md file on the
            Hugging Face Hub.

    Returns:
        model_name (`str`):
            The name of the model as found in the model index. This is used as the
            identifier for the model on leaderboards like PapersWithCode.
        eval_results (`list[EvalResult]`):
            A list of `huggingface_hub.EvalResult` objects containing the metrics
            reported in the provided model_index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import model_index_to_eval_results
        >>> # Define a minimal model index
        >>> model_index = [
        ...     {
        ...         "name": "my-cool-model",
        ...         "results": [
        ...             {
        ...                 "task": {
        ...                     "type": "image-classification"
        ...                 },
        ...                 "dataset": {
        ...                     "type": "beans",
        ...                     "name": "Beans"
        ...                 },
        ...                 "metrics": [
        ...                     {
        ...                         "type": "accuracy",
        ...                         "value": 0.9
        ...                     }
        ...                 ]
        ...             }
        ...         ]
        ...     }
        ... ]
        >>> model_name, eval_results = model_index_to_eval_results(model_index)
        >>> model_name
        'my-cool-model'
        >>> eval_results[0].task_type
        'image-classification'
        >>> eval_results[0].metric_type
        'accuracy'

        ```
    nameresultstaskre   datasetconfigsplitrevisionargssourceurlro   r;   r   verifyTokenr
   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   Nr   )rS   r	   append)r}   r^   elemr   r   resultr
   r   r   r   r   r   r   r   r   r   metricr   r   r   r   r   r   r   eval_resultr   r   r   ru   +  s|   8




	
)ru   c                 C   sN   t | tttfrt| dd | D S t | tr%t| dd |  D S | S )zk
    Recursively remove `None` values from a dict. Borrowed from: https://stackoverflow.com/a/20558778
    c                 s   s     | ]}|d urt |V  qd S r6   _remove_none)r:   xr   r   r   rb     s    z_remove_none.<locals>.<genexpr>c                 s   s4    | ]\}}|d ur|d urt |t |fV  qd S r6   r   )r:   rE   vr   r   r   rb     s   2 )r`   rH   r3   rI   re   r0   r#   )objr   r   r   r     s
   
r   c           
   	   C   s   t t}|D ]
}||j | qg }| D ]@}|d }|j|jd|j|j|j	|j
|j|jddd |D d}|jdurRd|ji}|jdurN|j|d	< ||d
< || q| |dg}	t|	S )a  Takes in given model name and list of `huggingface_hub.EvalResult` and returns a
    valid model-index that will be compatible with the format expected by the
    Hugging Face Hub.

    Args:
        model_name (`str`):
            Name of the model (ex. "my-cool-model"). This is used as the identifier
            for the model on leaderboards like PapersWithCode.
        eval_results (`list[EvalResult]`):
            List of `huggingface_hub.EvalResult` objects containing the metrics to be
            reported in the model-index.

    Returns:
        model_index (`list[dict[str, Any]]`): The eval_results converted to a model-index.

    Example:
        ```python
        >>> from huggingface_hub.repocard_data import eval_results_to_model_index, EvalResult
        >>> # Define minimal eval_results
        >>> eval_results = [
        ...     EvalResult(
        ...         task_type="image-classification",  # Required
        ...         dataset_type="beans",  # Required
        ...         dataset_name="Beans",  # Required
        ...         metric_type="accuracy",  # Required
        ...         metric_value=0.9,  # Required
        ...     )
        ... ]
        >>> eval_results_to_model_index("my-cool-model", eval_results)
        [{'name': 'my-cool-model', 'results': [{'task': {'type': 'image-classification'}, 'dataset': {'name': 'Beans', 'type': 'beans'}, 'metrics': [{'type': 'accuracy', 'value': 0.9}]}]}]

        ```
    r   )re   r   )r   re   r   r   r   r   c              
   S   s.   g | ]}|j |j|j|j|j|j|jd qS ))re   r;   r   r   r   r   r   )r   r   r   r   r   r   r   )r:   r   r   r   r   
<listcomp>  s    
z/eval_results_to_model_index.<locals>.<listcomp>)r   r   ro   Nr   r   r   )r   r   )r   rH   r    r   valuesr
   r   r   r   r   r   r   r   r   r   r   )
r_   r^   task_and_ds_types_mapr   model_index_datar   sample_resultdatar   r}   r   r   r   r     s@   %



r   rq   c                 C   s0   | d u r| S g }| D ]}||vr| | q
|S r6   )r   )rq   unique_tagstagr   r   r   rt     s   
rt   )r=   collectionsr   dataclassesr   typingr   r   r   huggingface_hub.utilsr   r   
get_loggerr*   rx   r	   r4   rH   r.   rf   rg   r   r   r0   r3   ru   r   r   rt   r   r   r   r   <module>   s4    
 S
 P*Qh&&\