botsi commited on
Commit
906b1dc
·
verified ·
1 Parent(s): c1b5eae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -7
app.py CHANGED
@@ -29,16 +29,16 @@ As a derivate work of [Llama-2-7b-chat](https://huggingface.co/meta-llama/Llama-
29
  this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
30
  """
31
 
32
- '''if not torch.cuda.is_available():
33
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
34
- '''
35
 
36
- '''if torch.cuda.is_available():'''
37
- model_id = "meta-llama/Llama-2-7b-chat-hf"
38
- model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
39
- tokenizer = AutoTokenizer.from_pretrained(model_id)
40
- tokenizer.use_default_system_prompt = False
41
 
 
 
 
 
 
 
42
 
43
  @spaces.GPU
44
  def generate(
 
29
  this demo is governed by the original [license](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/LICENSE.txt) and [acceptable use policy](https://huggingface.co/spaces/huggingface-projects/llama-2-7b-chat/blob/main/USE_POLICY.md).
30
  """
31
 
32
+ if not torch.cuda.is_available():
33
  DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
 
34
 
 
 
 
 
 
35
 
36
+ if torch.cuda.is_available():
37
+ model_id = "meta-llama/Llama-2-7b-chat-hf"
38
+ model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16, device_map="auto")
39
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
40
+ tokenizer.use_default_system_prompt = False
41
+
42
 
43
  @spaces.GPU
44
  def generate(