thliang01 commited on
Commit
eaa3ded
·
1 Parent(s): c8d33df

feat: Enhance README and app functionality; add new images and update requirements

Browse files

- Enhance app.py for zerogpu ssupport
- Enhance README with image lincence
- Chore requirements.txt
- Add cat, hot-dog, llama, medieval_knight images

Files changed (4) hide show
  1. README.md +11 -1
  2. app.py +29 -15
  3. cat.avif +0 -0
  4. requirements.txt +5 -1
README.md CHANGED
@@ -10,4 +10,14 @@ pinned: false
10
  license: mit
11
  ---
12
 
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
 
 
 
 
 
 
 
 
 
 
 
10
  license: mit
11
  ---
12
 
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference
14
+
15
+ contains free-licensed images, downloaded from [unsplash](https://unsplash.com/). Curated and created by:
16
+
17
+ [cat](https://unsplash.com/photos/black-and-white-cat-lying-on-brown-bamboo-chair-inside-room-gKXKBY-C-Dk)
18
+
19
+ [hot-dog](https://unsplash.com/photos/hotdog-sandwich-on-white-ceramic-plate-w96PYF0Uwjs)
20
+
21
+ [medieval_knight](https://huggingface.co/datasets/thliang01/medieval_knight)
22
+
23
+ [llama](https://huggingface.co/datasets/thliang01/Cute-Llama)
app.py CHANGED
@@ -1,7 +1,6 @@
 
1
  import torch
2
-
3
- model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet18', pretrained=True).eval()
4
-
5
  import requests
6
  from torchvision import transforms
7
 
@@ -9,17 +8,32 @@ from torchvision import transforms
9
  response = requests.get("https://git.io/JJkYN")
10
  labels = response.text.split("\n")
11
 
12
- def predict(inp):
13
- inp = transforms.ToTensor()(inp).unsqueeze(0)
14
- with torch.no_grad():
15
- prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
16
- confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
17
- return confidences
18
 
19
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
- gr.Interface(fn=predict,
22
- inputs=gr.Image(type="pil"),
23
- outputs=gr.Label(num_top_classes=3),
24
- examples=["lion.jpg", "cheetah.jpg"],
25
- css=".footer{display:none !important}").launch()
 
 
 
 
1
+ import spaces
2
  import torch
3
+ import gradio as gr
 
 
4
  import requests
5
  from torchvision import transforms
6
 
 
8
  response = requests.get("https://git.io/JJkYN")
9
  labels = response.text.split("\n")
10
 
11
+ # Load model initially (will be moved to GPU dynamically)
12
+ model = torch.hub.load('pytorch/vision:v0.6.0', 'resnet34', pretrained=True).eval()
 
 
 
 
13
 
14
+ @spaces.GPU
15
+ def predict(inp):
16
+ """
17
+ This function will be allocated GPU resources dynamically when called
18
+ """
19
+ # Move model to GPU when function is called
20
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
21
+ model.to(device)
22
+
23
+ # Process input image
24
+ inp = transforms.ToTensor()(inp).unsqueeze(0).to(device)
25
+
26
+ with torch.no_grad():
27
+ prediction = torch.nn.functional.softmax(model(inp)[0], dim=0)
28
+ confidences = {labels[i]: float(prediction[i]) for i in range(1000)}
29
+
30
+ return confidences
31
 
32
+ # Create Gradio interface
33
+ gr.Interface(
34
+ fn=predict,
35
+ inputs=gr.Image(type="pil"),
36
+ outputs=gr.Label(num_top_classes=3),
37
+ examples=["lion.jpg", "cheetah.jpg", "cat.avif", "hot-dog.avif", "llama.jpg", "medieval_knight.jpg"],
38
+ css=".footer{display:none !important}"
39
+ ).launch()
cat.avif ADDED
requirements.txt CHANGED
@@ -1 +1,5 @@
1
- torch
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ spaces
4
+ numpy
5
+ torchvision