Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| # make function using import pip to install torch | |
| import pip | |
| pip.main(['install', 'torch']) | |
| pip.main(['install', 'transformers']) | |
| import torch | |
| import transformers | |
| # saved_model | |
| def load_model(model_path): | |
| saved_data = torch.load( | |
| model_path, | |
| map_location="cpu" | |
| ) | |
| bart_best = saved_data["model"] | |
| train_config = saved_data["config"] | |
| tokenizer = transformers.PreTrainedTokenizerFast.from_pretrained('gogamza/kobart-base-v1') | |
| ## Load weights. | |
| model = transformers.BartForConditionalGeneration.from_pretrained('gogamza/kobart-base-v1') | |
| model.load_state_dict(bart_best) | |
| return model, tokenizer | |
| # main | |
| def inference(prompt): | |
| model_path = "./kobart-model-poem.pth" | |
| model, tokenizer = load_model( | |
| model_path=model_path | |
| ) | |
| input_ids = tokenizer.encode(prompt) | |
| input_ids = torch.tensor(input_ids) | |
| input_ids = input_ids.unsqueeze(0) | |
| output = model.generate(input_ids) | |
| output = tokenizer.decode(output[0], skip_special_tokens=True) | |
| return output | |
| demo = gr.Interface( | |
| fn=inference, | |
| inputs="text", | |
| outputs="text", #return κ° | |
| examples=[ | |
| "μμ μ¨μ νλ κΈΈκ°μ λ¨μ΄μ‘μ΄μ\nμλΌμ μλΌμ λͺ¨λλ€ λ°μΌλκΉμ\nμμ μ¨μ νλ λλ°μ λ¨μ΄μ‘μ΄μ\nμ«μ΄μ μ«μ΄μ ν¬κ² μλ μ μμ΄μ\nμμ μ¨μ νλ κ°μλ°μ λ¨μ΄μ‘μ΄μ\nμ μΌμΌ, μνμ μ¨μ μ΄ μκ° μμ΄μ\nμμ μ¨μ νλ μ’μ λ°μ λ¨μ΄μ‘μ΄μ\nμ’μμ μ’μμ μ μλΌμ μ’μ λ무 λκ² μ΄μ" | |
| ] | |
| ).launch() # launch(share=True)λ₯Ό μ€μ νλ©΄ μΈλΆμμ μ μ κ°λ₯ν λ§ν¬κ° μμ±λ¨ | |
| demo.launch() |