Update with commit ca543f822f73ebc69b00835c74ae927d9730b6f5
Browse filesSee: https://github.com/huggingface/transformers/commit/ca543f822f73ebc69b00835c74ae927d9730b6f5
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -107,6 +107,7 @@
|
|
107 |
{"model_type":"fastspeech2_conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
108 |
{"model_type":"flaubert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
109 |
{"model_type":"flava","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
|
|
110 |
{"model_type":"fnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
111 |
{"model_type":"focalnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
112 |
{"model_type":"fsmt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
107 |
{"model_type":"fastspeech2_conformer","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
108 |
{"model_type":"flaubert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
109 |
{"model_type":"flava","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
110 |
+
{"model_type":"florence2","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
111 |
{"model_type":"fnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
112 |
{"model_type":"focalnet","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoImageProcessor"}
|
113 |
{"model_type":"fsmt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -432,6 +432,8 @@
|
|
432 |
{"model_class":"FlaxXLMRobertaForSequenceClassification","pipeline_tag":"text-classification","auto_class":"Flax_AutoModelForSequenceClassification"}
|
433 |
{"model_class":"FlaxXLMRobertaForTokenClassification","pipeline_tag":"token-classification","auto_class":"Flax_AutoModelForTokenClassification"}
|
434 |
{"model_class":"FlaxXLMRobertaModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
|
|
|
|
|
435 |
{"model_class":"FocalNetForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
436 |
{"model_class":"FocalNetModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
437 |
{"model_class":"FunnelBaseModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
432 |
{"model_class":"FlaxXLMRobertaForSequenceClassification","pipeline_tag":"text-classification","auto_class":"Flax_AutoModelForSequenceClassification"}
|
433 |
{"model_class":"FlaxXLMRobertaForTokenClassification","pipeline_tag":"token-classification","auto_class":"Flax_AutoModelForTokenClassification"}
|
434 |
{"model_class":"FlaxXLMRobertaModel","pipeline_tag":"feature-extraction","auto_class":"Flax_AutoModel"}
|
435 |
+
{"model_class":"Florence2ForConditionalGeneration","pipeline_tag":"image-text-to-text","auto_class":"AutoModelForImageTextToText"}
|
436 |
+
{"model_class":"Florence2Model","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
437 |
{"model_class":"FocalNetForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
438 |
{"model_class":"FocalNetModel","pipeline_tag":"image-feature-extraction","auto_class":"AutoModel"}
|
439 |
{"model_class":"FunnelBaseModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|