Spaces:
Running
on
Zero
Running
on
Zero
update app.py, styles.py, dmanifest, service worker and icon for mobile prepare
Browse files- .DS_Store +0 -0
- ConvNextV2Base_best_model.pth +0 -3
- app.py +48 -35
- assets/icon-192.png +0 -0
- assets/icon-512.png +0 -0
- manifest.json +21 -0
- service-worker.js +23 -0
- styles.py +101 -25
.DS_Store
ADDED
Binary file (6.15 kB). View file
|
|
ConvNextV2Base_best_model.pth
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:b3d10b865b83d0fcda631e31e1aac7b2b51f43dc139674706611bd5c1b68afd8
|
3 |
-
size 413251664
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -61,7 +61,7 @@ class ModelManager:
|
|
61 |
if not ModelManager._initialized:
|
62 |
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
63 |
ModelManager._initialized = True
|
64 |
-
|
65 |
@property
|
66 |
def device(self):
|
67 |
if self._device is None:
|
@@ -78,10 +78,10 @@ class ModelManager:
|
|
78 |
def breed_model(self):
|
79 |
if self._breed_model is None:
|
80 |
self._breed_model = BaseModel(
|
81 |
-
num_classes=len(dog_breeds),
|
82 |
device=self.device
|
83 |
).to(self.device)
|
84 |
-
|
85 |
checkpoint = torch.load(
|
86 |
'ConvNextV2Base_best_model.pth',
|
87 |
map_location=self.device
|
@@ -110,18 +110,18 @@ def preprocess_image(image):
|
|
110 |
def predict_single_dog(image):
|
111 |
"""Predicts dog breed for a single image"""
|
112 |
image_tensor = preprocess_image(image).to(model_manager.device)
|
113 |
-
|
114 |
with torch.no_grad():
|
115 |
logits = model_manager.breed_model(image_tensor)[0]
|
116 |
probs = F.softmax(logits, dim=1)
|
117 |
-
|
118 |
top5_prob, top5_idx = torch.topk(probs, k=5)
|
119 |
breeds = [dog_breeds[idx.item()] for idx in top5_idx[0]]
|
120 |
probabilities = [prob.item() for prob in top5_prob[0]]
|
121 |
-
|
122 |
sum_probs = sum(probabilities[:3])
|
123 |
relative_probs = [f"{(prob/sum_probs * 100):.2f}%" for prob in probabilities[:3]]
|
124 |
-
|
125 |
return probabilities[0], breeds[:3], relative_probs
|
126 |
|
127 |
def enhanced_preprocess(image, is_standing=False, has_overlap=False):
|
@@ -131,7 +131,7 @@ def enhanced_preprocess(image, is_standing=False, has_overlap=False):
|
|
131 |
"""
|
132 |
target_size = 224
|
133 |
w, h = image.size
|
134 |
-
|
135 |
if is_standing:
|
136 |
if h > w * 1.5:
|
137 |
new_h = target_size
|
@@ -145,13 +145,13 @@ def enhanced_preprocess(image, is_standing=False, has_overlap=False):
|
|
145 |
scale = min(target_size/w, target_size/h)
|
146 |
new_w = int(w * scale)
|
147 |
new_h = int(h * scale)
|
148 |
-
|
149 |
resized = image.resize((new_w, new_h), Image.Resampling.LANCZOS)
|
150 |
final_image = Image.new('RGB', (target_size, target_size), (240, 240, 240))
|
151 |
paste_x = (target_size - new_w) // 2
|
152 |
paste_y = (target_size - new_h) // 2
|
153 |
final_image.paste(resized, (paste_x, paste_y))
|
154 |
-
|
155 |
return final_image
|
156 |
|
157 |
@spaces.GPU
|
@@ -163,7 +163,7 @@ def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.3):
|
|
163 |
results = model_manager.yolo_model(image, conf=conf_threshold, iou=iou_threshold)[0]
|
164 |
img_width, img_height = image.size
|
165 |
detected_boxes = []
|
166 |
-
|
167 |
# Phase 1: Initial detection and processing
|
168 |
for box in results.boxes:
|
169 |
if box.cls.item() == 16: # Dog class
|
@@ -172,7 +172,7 @@ def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.3):
|
|
172 |
x1, y1, x2, y2 = map(int, xyxy)
|
173 |
w = x2 - x1
|
174 |
h = y2 - y1
|
175 |
-
|
176 |
detected_boxes.append({
|
177 |
'coords': [x1, y1, x2, y2],
|
178 |
'width': w,
|
@@ -183,55 +183,55 @@ def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.3):
|
|
183 |
'confidence': confidence,
|
184 |
'aspect_ratio': w / h if h != 0 else 1
|
185 |
})
|
186 |
-
|
187 |
if not detected_boxes:
|
188 |
return [(image, 1.0, [0, 0, img_width, img_height], False)]
|
189 |
-
|
190 |
# Phase 2: Analysis of detection relationships
|
191 |
avg_height = sum(box['height'] for box in detected_boxes) / len(detected_boxes)
|
192 |
avg_width = sum(box['width'] for box in detected_boxes) / len(detected_boxes)
|
193 |
avg_area = sum(box['area'] for box in detected_boxes) / len(detected_boxes)
|
194 |
-
|
195 |
def calculate_iou(box1, box2):
|
196 |
x1 = max(box1['coords'][0], box2['coords'][0])
|
197 |
y1 = max(box1['coords'][1], box2['coords'][1])
|
198 |
x2 = min(box1['coords'][2], box2['coords'][2])
|
199 |
y2 = min(box1['coords'][3], box2['coords'][3])
|
200 |
-
|
201 |
if x2 <= x1 or y2 <= y1:
|
202 |
return 0.0
|
203 |
-
|
204 |
intersection = (x2 - x1) * (y2 - y1)
|
205 |
area1 = box1['area']
|
206 |
area2 = box2['area']
|
207 |
return intersection / (area1 + area2 - intersection)
|
208 |
-
|
209 |
# Phase 3: Processing each detection
|
210 |
processed_boxes = []
|
211 |
overlap_threshold = 0.2
|
212 |
-
|
213 |
for i, box_info in enumerate(detected_boxes):
|
214 |
x1, y1, x2, y2 = box_info['coords']
|
215 |
w = box_info['width']
|
216 |
h = box_info['height']
|
217 |
center_x = box_info['center_x']
|
218 |
center_y = box_info['center_y']
|
219 |
-
|
220 |
# Check for overlaps
|
221 |
has_overlap = False
|
222 |
for j, other_box in enumerate(detected_boxes):
|
223 |
if i != j and calculate_iou(box_info, other_box) > overlap_threshold:
|
224 |
has_overlap = True
|
225 |
break
|
226 |
-
|
227 |
# Adjust expansion strategy
|
228 |
base_expansion = 0.03
|
229 |
max_expansion = 0.05
|
230 |
-
|
231 |
is_standing = h > 1.5 * w
|
232 |
is_sitting = 0.8 <= h/w <= 1.2
|
233 |
is_abnormal_size = (h * w) > (avg_area * 1.5) or (h * w) < (avg_area * 0.5)
|
234 |
-
|
235 |
if has_overlap:
|
236 |
h_expansion = w_expansion = base_expansion * 0.8
|
237 |
else:
|
@@ -242,41 +242,41 @@ def detect_multiple_dogs(image, conf_threshold=0.3, iou_threshold=0.3):
|
|
242 |
h_expansion = w_expansion = base_expansion
|
243 |
else:
|
244 |
h_expansion = w_expansion = base_expansion * 0.9
|
245 |
-
|
246 |
# Position compensation
|
247 |
if center_x < img_width * 0.2 or center_x > img_width * 0.8:
|
248 |
w_expansion *= 0.9
|
249 |
-
|
250 |
if is_abnormal_size:
|
251 |
h_expansion *= 0.8
|
252 |
w_expansion *= 0.8
|
253 |
-
|
254 |
# Calculate final bounding box
|
255 |
expansion_w = w * w_expansion
|
256 |
expansion_h = h * h_expansion
|
257 |
-
|
258 |
new_x1 = max(0, center_x - (w + expansion_w)/2)
|
259 |
new_y1 = max(0, center_y - (h + expansion_h)/2)
|
260 |
new_x2 = min(img_width, center_x + (w + expansion_w)/2)
|
261 |
new_y2 = min(img_height, center_y + (h + expansion_h)/2)
|
262 |
-
|
263 |
# Crop and process image
|
264 |
-
cropped_image = image.crop((int(new_x1), int(new_y1),
|
265 |
int(new_x2), int(new_y2)))
|
266 |
-
|
267 |
processed_image = enhanced_preprocess(
|
268 |
cropped_image,
|
269 |
is_standing=is_standing,
|
270 |
has_overlap=has_overlap
|
271 |
)
|
272 |
-
|
273 |
processed_boxes.append((
|
274 |
-
processed_image,
|
275 |
box_info['confidence'],
|
276 |
-
[new_x1, new_y1, new_x2, new_y2],
|
277 |
True
|
278 |
))
|
279 |
-
|
280 |
return processed_boxes
|
281 |
|
282 |
@spaces.GPU
|
@@ -443,6 +443,19 @@ def main():
|
|
443 |
# Header HTML
|
444 |
|
445 |
gr.HTML("""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
446 |
<header style='text-align: center; padding: 20px; margin-bottom: 20px;'>
|
447 |
<h1 style='font-size: 2.5em; margin-bottom: 10px; color: #2D3748;'>
|
448 |
🐾 PawMatch AI
|
@@ -522,4 +535,4 @@ def main():
|
|
522 |
|
523 |
if __name__ == "__main__":
|
524 |
iface = main()
|
525 |
-
iface.launch()
|
|
|
61 |
if not ModelManager._initialized:
|
62 |
self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
63 |
ModelManager._initialized = True
|
64 |
+
|
65 |
@property
|
66 |
def device(self):
|
67 |
if self._device is None:
|
|
|
78 |
def breed_model(self):
|
79 |
if self._breed_model is None:
|
80 |
self._breed_model = BaseModel(
|
81 |
+
num_classes=len(dog_breeds),
|
82 |
device=self.device
|
83 |
).to(self.device)
|
84 |
+
|
85 |
checkpoint = torch.load(
|
86 |
'ConvNextV2Base_best_model.pth',
|
87 |
map_location=self.device
|
|
|
110 |
def predict_single_dog(image):
|
111 |
"""Predicts dog breed for a single image"""
|
112 |
image_tensor = preprocess_image(image).to(model_manager.device)
|
113 |
+
|
114 |
with torch.no_grad():
|
115 |
logits = model_manager.breed_model(image_tensor)[0]
|
116 |
probs = F.softmax(logits, dim=1)
|
117 |
+
|
118 |
top5_prob, top5_idx = torch.topk(probs, k=5)
|
119 |
breeds = [dog_breeds[idx.item()] for idx in top5_idx[0]]
|
120 |
probabilities = [prob.item() for prob in top5_prob[0]]
|
121 |
+
|
122 |
sum_probs = sum(probabilities[:3])
|
123 |
relative_probs = [f"{(prob/sum_probs * 100):.2f}%" for prob in probabilities[:3]]
|
124 |
+
|
125 |
return probabilities[0], breeds[:3], relative_probs
|
126 |
|
127 |
def enhanced_preprocess(image, is_standing=False, has_overlap=False):
|
|
|
131 |
"""
|
132 |
target_size = 224
|
133 |
w, h = image.size
|
134 |
+
|
135 |
if is_standing:
|
136 |
if h > w * 1.5:
|
137 |
new_h = target_size
|
|
|
145 |
scale = min(target_size/w, target_size/h)
|
146 |
new_w = int(w * scale)
|
147 |
new_h = int(h * scale)
|
148 |
+
|
149 |
resized = image.resize((new_w, new_h), Image.Resampling.LANCZOS)
|
150 |
final_image = Image.new('RGB', (target_size, target_size), (240, 240, 240))
|
151 |
paste_x = (target_size - new_w) // 2
|
152 |
paste_y = (target_size - new_h) // 2
|
153 |
final_image.paste(resized, (paste_x, paste_y))
|
154 |
+
|
155 |
return final_image
|
156 |
|
157 |
@spaces.GPU
|
|
|
163 |
results = model_manager.yolo_model(image, conf=conf_threshold, iou=iou_threshold)[0]
|
164 |
img_width, img_height = image.size
|
165 |
detected_boxes = []
|
166 |
+
|
167 |
# Phase 1: Initial detection and processing
|
168 |
for box in results.boxes:
|
169 |
if box.cls.item() == 16: # Dog class
|
|
|
172 |
x1, y1, x2, y2 = map(int, xyxy)
|
173 |
w = x2 - x1
|
174 |
h = y2 - y1
|
175 |
+
|
176 |
detected_boxes.append({
|
177 |
'coords': [x1, y1, x2, y2],
|
178 |
'width': w,
|
|
|
183 |
'confidence': confidence,
|
184 |
'aspect_ratio': w / h if h != 0 else 1
|
185 |
})
|
186 |
+
|
187 |
if not detected_boxes:
|
188 |
return [(image, 1.0, [0, 0, img_width, img_height], False)]
|
189 |
+
|
190 |
# Phase 2: Analysis of detection relationships
|
191 |
avg_height = sum(box['height'] for box in detected_boxes) / len(detected_boxes)
|
192 |
avg_width = sum(box['width'] for box in detected_boxes) / len(detected_boxes)
|
193 |
avg_area = sum(box['area'] for box in detected_boxes) / len(detected_boxes)
|
194 |
+
|
195 |
def calculate_iou(box1, box2):
|
196 |
x1 = max(box1['coords'][0], box2['coords'][0])
|
197 |
y1 = max(box1['coords'][1], box2['coords'][1])
|
198 |
x2 = min(box1['coords'][2], box2['coords'][2])
|
199 |
y2 = min(box1['coords'][3], box2['coords'][3])
|
200 |
+
|
201 |
if x2 <= x1 or y2 <= y1:
|
202 |
return 0.0
|
203 |
+
|
204 |
intersection = (x2 - x1) * (y2 - y1)
|
205 |
area1 = box1['area']
|
206 |
area2 = box2['area']
|
207 |
return intersection / (area1 + area2 - intersection)
|
208 |
+
|
209 |
# Phase 3: Processing each detection
|
210 |
processed_boxes = []
|
211 |
overlap_threshold = 0.2
|
212 |
+
|
213 |
for i, box_info in enumerate(detected_boxes):
|
214 |
x1, y1, x2, y2 = box_info['coords']
|
215 |
w = box_info['width']
|
216 |
h = box_info['height']
|
217 |
center_x = box_info['center_x']
|
218 |
center_y = box_info['center_y']
|
219 |
+
|
220 |
# Check for overlaps
|
221 |
has_overlap = False
|
222 |
for j, other_box in enumerate(detected_boxes):
|
223 |
if i != j and calculate_iou(box_info, other_box) > overlap_threshold:
|
224 |
has_overlap = True
|
225 |
break
|
226 |
+
|
227 |
# Adjust expansion strategy
|
228 |
base_expansion = 0.03
|
229 |
max_expansion = 0.05
|
230 |
+
|
231 |
is_standing = h > 1.5 * w
|
232 |
is_sitting = 0.8 <= h/w <= 1.2
|
233 |
is_abnormal_size = (h * w) > (avg_area * 1.5) or (h * w) < (avg_area * 0.5)
|
234 |
+
|
235 |
if has_overlap:
|
236 |
h_expansion = w_expansion = base_expansion * 0.8
|
237 |
else:
|
|
|
242 |
h_expansion = w_expansion = base_expansion
|
243 |
else:
|
244 |
h_expansion = w_expansion = base_expansion * 0.9
|
245 |
+
|
246 |
# Position compensation
|
247 |
if center_x < img_width * 0.2 or center_x > img_width * 0.8:
|
248 |
w_expansion *= 0.9
|
249 |
+
|
250 |
if is_abnormal_size:
|
251 |
h_expansion *= 0.8
|
252 |
w_expansion *= 0.8
|
253 |
+
|
254 |
# Calculate final bounding box
|
255 |
expansion_w = w * w_expansion
|
256 |
expansion_h = h * h_expansion
|
257 |
+
|
258 |
new_x1 = max(0, center_x - (w + expansion_w)/2)
|
259 |
new_y1 = max(0, center_y - (h + expansion_h)/2)
|
260 |
new_x2 = min(img_width, center_x + (w + expansion_w)/2)
|
261 |
new_y2 = min(img_height, center_y + (h + expansion_h)/2)
|
262 |
+
|
263 |
# Crop and process image
|
264 |
+
cropped_image = image.crop((int(new_x1), int(new_y1),
|
265 |
int(new_x2), int(new_y2)))
|
266 |
+
|
267 |
processed_image = enhanced_preprocess(
|
268 |
cropped_image,
|
269 |
is_standing=is_standing,
|
270 |
has_overlap=has_overlap
|
271 |
)
|
272 |
+
|
273 |
processed_boxes.append((
|
274 |
+
processed_image,
|
275 |
box_info['confidence'],
|
276 |
+
[new_x1, new_y1, new_x2, new_y2],
|
277 |
True
|
278 |
))
|
279 |
+
|
280 |
return processed_boxes
|
281 |
|
282 |
@spaces.GPU
|
|
|
443 |
# Header HTML
|
444 |
|
445 |
gr.HTML("""
|
446 |
+
<head>
|
447 |
+
<link rel="manifest" href="manifest.json">
|
448 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
449 |
+
<meta name="theme-color" content="#4299e1">
|
450 |
+
<link rel="apple-touch-icon" href="assets/icon-192.png">
|
451 |
+
</head>
|
452 |
+
<script>
|
453 |
+
if ('serviceWorker' in navigator) {
|
454 |
+
window.addEventListener('load', () => {
|
455 |
+
navigator.serviceWorker.register('/service-worker.js');
|
456 |
+
});
|
457 |
+
}
|
458 |
+
</script>
|
459 |
<header style='text-align: center; padding: 20px; margin-bottom: 20px;'>
|
460 |
<h1 style='font-size: 2.5em; margin-bottom: 10px; color: #2D3748;'>
|
461 |
🐾 PawMatch AI
|
|
|
535 |
|
536 |
if __name__ == "__main__":
|
537 |
iface = main()
|
538 |
+
iface.launch()
|
assets/icon-192.png
ADDED
assets/icon-512.png
ADDED
manifest.json
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "PawMatch AI",
|
3 |
+
"short_name": "PawMatch",
|
4 |
+
"start_url": ".",
|
5 |
+
"display": "standalone",
|
6 |
+
"background_color": "#ffffff",
|
7 |
+
"theme_color": "#4299e1",
|
8 |
+
"description": "Your Smart Dog Breed Guide",
|
9 |
+
"icons": [
|
10 |
+
{
|
11 |
+
"src": "assets/icon-192.png",
|
12 |
+
"sizes": "192x192",
|
13 |
+
"type": "image/png"
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"src": "assets/icon-512.png",
|
17 |
+
"sizes": "512x512",
|
18 |
+
"type": "image/png"
|
19 |
+
}
|
20 |
+
]
|
21 |
+
}
|
service-worker.js
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
const CACHE_NAME = 'pawmatch-v1';
|
2 |
+
const urlsToCache = [
|
3 |
+
'/',
|
4 |
+
'/index.html',
|
5 |
+
'/assets/icon-192.png',
|
6 |
+
'/assets/icon-512.png'
|
7 |
+
];
|
8 |
+
|
9 |
+
self.addEventListener('install', (event) => {
|
10 |
+
event.waitUntil(
|
11 |
+
caches.open(CACHE_NAME).then((cache) => {
|
12 |
+
return cache.addAll(urlsToCache);
|
13 |
+
})
|
14 |
+
);
|
15 |
+
});
|
16 |
+
|
17 |
+
self.addEventListener('fetch', (event) => {
|
18 |
+
event.respondWith(
|
19 |
+
caches.match(event.request).then((response) => {
|
20 |
+
return response || fetch(event.request);
|
21 |
+
})
|
22 |
+
);
|
23 |
+
});
|
styles.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
|
2 |
def get_css_styles():
|
3 |
return """
|
4 |
.dog-info-card {
|
@@ -15,7 +14,7 @@ def get_css_styles():
|
|
15 |
.dog-info-card:hover {
|
16 |
box-shadow: 0 4px 16px rgba(0,0,0,0.12);
|
17 |
}
|
18 |
-
.dog-info-card:before {
|
19 |
content: '';
|
20 |
position: absolute;
|
21 |
left: 0;
|
@@ -840,15 +839,15 @@ def get_css_styles():
|
|
840 |
background: #e9ecef;
|
841 |
border-radius: 2px;
|
842 |
}
|
843 |
-
.level-indicator.low .bar:nth-child(1) {
|
844 |
-
background: #4CAF50;
|
845 |
}
|
846 |
.level-indicator.medium .bar:nth-child(1),
|
847 |
-
.level-indicator.medium .bar:nth-child(2) {
|
848 |
-
background: #FFA726;
|
849 |
}
|
850 |
-
.level-indicator.high .bar {
|
851 |
-
background: #EF5350;
|
852 |
}
|
853 |
.feature-list, .health-list, .screening-list {
|
854 |
list-style: none;
|
@@ -1071,24 +1070,52 @@ def get_css_styles():
|
|
1071 |
}
|
1072 |
|
1073 |
@media (max-width: 768px) {
|
1074 |
-
|
1075 |
-
|
1076 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1077 |
}
|
1078 |
-
|
1079 |
-
|
1080 |
-
|
1081 |
-
|
|
|
|
|
|
|
|
|
1082 |
}
|
1083 |
|
1084 |
-
|
1085 |
-
|
1086 |
-
font-size: 1rem;
|
1087 |
}
|
1088 |
-
|
1089 |
-
|
1090 |
-
|
1091 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1092 |
}
|
1093 |
}
|
1094 |
|
@@ -1153,7 +1180,56 @@ def get_css_styles():
|
|
1153 |
|
1154 |
.section-header h3 .tooltip .tooltip-text::after {
|
1155 |
right: calc(100% - 2px); /* 向右移動箭頭 */
|
1156 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1157 |
}
|
1158 |
-
|
1159 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
def get_css_styles():
|
2 |
return """
|
3 |
.dog-info-card {
|
|
|
14 |
.dog-info-card:hover {
|
15 |
box-shadow: 0 4px 16px rgba(0,0,0,0.12);
|
16 |
}
|
17 |
+
.dog-info-card:before {
|
18 |
content: '';
|
19 |
position: absolute;
|
20 |
left: 0;
|
|
|
839 |
background: #e9ecef;
|
840 |
border-radius: 2px;
|
841 |
}
|
842 |
+
.level-indicator.low .bar:nth-child(1) {
|
843 |
+
background: #4CAF50;
|
844 |
}
|
845 |
.level-indicator.medium .bar:nth-child(1),
|
846 |
+
.level-indicator.medium .bar:nth-child(2) {
|
847 |
+
background: #FFA726;
|
848 |
}
|
849 |
+
.level-indicator.high .bar {
|
850 |
+
background: #EF5350;
|
851 |
}
|
852 |
.feature-list, .health-list, .screening-list {
|
853 |
list-style: none;
|
|
|
1070 |
}
|
1071 |
|
1072 |
@media (max-width: 768px) {
|
1073 |
+
.info-cards {
|
1074 |
+
grid-template-columns: 1fr !important; /* 在手機上改為單列 */
|
1075 |
+
gap: 12px !important;
|
1076 |
+
padding: 10px !important;
|
1077 |
+
width: 100% !important;
|
1078 |
+
box-sizing: border-box !important;
|
1079 |
+
min-height: auto !important; /* 在手機上移除最小高度限制 */
|
1080 |
+
height: auto !important; /* 允許高度自適應 */
|
1081 |
+
padding: 12px !important; /* 稍微減少填充 */
|
1082 |
}
|
1083 |
+
|
1084 |
+
.info-card {
|
1085 |
+
width: 100% !important;
|
1086 |
+
margin: 0 !important;
|
1087 |
+
padding: 12px !important;
|
1088 |
+
min-height: auto !important; /* 移除最小高度限制 */
|
1089 |
+
height: auto !important; /* 允許高度自適應 */
|
1090 |
+
overflow: visible !important; /* 確保內容不被切斷 */
|
1091 |
}
|
1092 |
|
1093 |
+
.info-card .tooltip {
|
1094 |
+
flex-wrap: wrap !important; /* 在手機版允許換行 */
|
|
|
1095 |
}
|
1096 |
+
.info-card span {
|
1097 |
+
display: block !important; /* 確保文字完整顯示 */
|
1098 |
+
overflow: visible !important;
|
1099 |
+
}
|
1100 |
+
|
1101 |
+
.tooltip {
|
1102 |
+
width: 100% !important;
|
1103 |
+
display: flex !important;
|
1104 |
+
align-items: center !important;
|
1105 |
+
gap: 8px !important;
|
1106 |
+
}
|
1107 |
+
|
1108 |
+
.tooltip-text {
|
1109 |
+
left: auto !important;
|
1110 |
+
right: 0 !important;
|
1111 |
+
width: 200px !important;
|
1112 |
+
}
|
1113 |
+
|
1114 |
+
/* 確保所有文字可見 */
|
1115 |
+
.label, .value {
|
1116 |
+
overflow: visible !important;
|
1117 |
+
white-space: normal !important;
|
1118 |
+
word-wrap: break-word !important;
|
1119 |
}
|
1120 |
}
|
1121 |
|
|
|
1180 |
|
1181 |
.section-header h3 .tooltip .tooltip-text::after {
|
1182 |
right: calc(100% - 2px); /* 向右移動箭頭 */
|
1183 |
+
|
1184 |
+
}
|
1185 |
+
|
1186 |
+
.analysis-container {
|
1187 |
+
padding: 20px;
|
1188 |
+
background: white;
|
1189 |
+
border-radius: 10px;
|
1190 |
+
box-shadow: 0 2px 4px rgba(0,0,0,0.1);
|
1191 |
+
}
|
1192 |
+
|
1193 |
+
.metrics-grid {
|
1194 |
+
display: grid;
|
1195 |
+
grid-template-columns: repeat(auto-fit, minmax(250px, 1fr));
|
1196 |
+
gap: 20px;
|
1197 |
+
margin-top: 20px;
|
1198 |
}
|
1199 |
+
|
1200 |
+
.metric-card {
|
1201 |
+
padding: 20px;
|
1202 |
+
background: #f8fafc;
|
1203 |
+
border-radius: 8px;
|
1204 |
+
text-align: center;
|
1205 |
+
}
|
1206 |
+
|
1207 |
+
.metric-value {
|
1208 |
+
font-size: 24px;
|
1209 |
+
font-weight: bold;
|
1210 |
+
color: #2563eb;
|
1211 |
+
}
|
1212 |
+
|
1213 |
+
.metric-details {
|
1214 |
+
padding: 15px;
|
1215 |
+
background: #f8fafc;
|
1216 |
+
border-radius: 8px;
|
1217 |
+
margin: 10px 0;
|
1218 |
+
}
|
1219 |
+
|
1220 |
+
.metric-details h3 {
|
1221 |
+
color: #1e40af;
|
1222 |
+
margin-bottom: 10px;
|
1223 |
+
}
|
1224 |
+
|
1225 |
+
.metric-details ul {
|
1226 |
+
list-style-type: none;
|
1227 |
+
padding: 0;
|
1228 |
+
}
|
1229 |
+
|
1230 |
+
.metric-details li {
|
1231 |
+
margin: 5px 0;
|
1232 |
+
color: #4b5563;
|
1233 |
+
}
|
1234 |
+
|
1235 |
+
"""
|