How to use🦙:

import torch
import bitsandbytes as bnb
from peft import PeftModel, PeftConfig, prepare_model_for_int8_training, LoraConfig, get_peft_model
from transformers import LlamaTokenizer, LlamaForCausalLM, GenerationConfig

peft_model_id = "Yasbok/Alpaca_instruction_fine_tune_Arabic"
# config = PeftConfig.from_pretrained(peft_model_id)

tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
model = LlamaForCausalLM.from_pretrained("decapoda-research/llama-7b-hf",
                                          load_in_8bit=True,
                                          device_map="auto",)
# Load the Lora model
model = PeftModel.from_pretrained(model, peft_model_id)

# Based on the inference code by `tloen/alpaca-lora`
def generate_prompt(instruction, input=None):
    if input:
        return f"""يوجد أدناه تعليمات تصف مهمة ، إلى جانب إدخال يوفر المزيد من السياق. اكتب ردًا يكمل الطلب بشكل مناسب.


### تعليمات:
{instruction}

### مدخل:
{input}

### انتاج:"""
    else:
        return f"""يوجد أدناه إرشادات تصف مهمة. يُرجى كتابة رد يكمل الطلب بشكل مناسب.



### تعليمات:
{instruction}

### انتاج:"""

# Inputs to instantiate the model:
generation_config = GenerationConfig(
    temperature=0.2,
    top_p=0.75,
    num_beams=4,
)
# Evaluate the model:
def evaluate(instruction, input=None):
    prompt = generate_prompt(instruction, input)
    inputs = tokenizer(prompt, return_tensors="pt")
    input_ids = inputs["input_ids"].cuda()
    generation_output = model.generate(
        input_ids=input_ids,
        generation_config=generation_config,
        return_dict_in_generate=True,
        output_scores=True,
        max_new_tokens=256
    )
    for s in generation_output.sequences:
        output = tokenizer.decode(s)
        print("انتاج:", output.split("### انتاج:")[1].strip())

evaluate(input("تعليمات: "))
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model is not currently available via any of the supported Inference Providers.
The model cannot be deployed to the HF Inference API: The model has no pipeline_tag.

Dataset used to train Yasbok/Alpaca_instruction_fine_tune_Arabic

Space using Yasbok/Alpaca_instruction_fine_tune_Arabic 1