import gradio as gr import spaces from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer from threading import Thread import torch MODEL_ID = "nroggendorff/smallama-7b-it" tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) model = AutoModelForCausalLM.from_pretrained( MODEL_ID, dtype=torch.float16, device_map="auto" ) @spaces.GPU def respond( message, history: list[dict[str, str]], max_tokens, temperature, top_p, ): messages = history messages.append({"role": "user", "content": message}) inputs = tokenizer.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device) streamer = TextIteratorStreamer( tokenizer, skip_prompt=True, skip_special_tokens=True ) generation_kwargs = dict( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], max_new_tokens=max_tokens, temperature=temperature, top_p=top_p, do_sample=True, streamer=streamer, ) thread = Thread(target=model.generate, kwargs=generation_kwargs) thread.start() response = "" for token in streamer: response += token yield response chatbot = gr.ChatInterface( respond, type="messages", additional_inputs=[ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"), gr.Slider(minimum=0.1, maximum=4.0, value=0.2, step=0.1, label="Temperature"), gr.Slider( minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)", ), ], ) with gr.Blocks() as demo: chatbot.render() if __name__ == "__main__": demo.launch()