adjust staus panel

This commit is contained in:
2026-01-21 12:31:30 +01:00
parent 4534857161
commit fe96f86b9c
3 changed files with 28 additions and 4 deletions

View File

@@ -181,15 +181,24 @@ function updateRunningModels(models) {
return; return;
} }
container.innerHTML = models.map(model => ` container.innerHTML = models.map(model => {
const gpuLayers = model.gpu_layers || 0;
const totalLayers = model.total_layers || 0;
const cpuLayers = totalLayers > 0 ? totalLayers - gpuLayers : 0;
return `
<div class="running-model"> <div class="running-model">
<div class="running-model-name">${escapeHtml(model.name)}</div> <div class="running-model-name">${escapeHtml(model.name)}</div>
<div class="running-model-stats"> <div class="running-model-stats">
VRAM: ${model.vram_gb.toFixed(2)} GB VRAM: ${model.vram_gb.toFixed(2)} GB
${model.offload_pct > 0 ? ` | CPU: ${model.offload_pct.toFixed(1)}%` : ''} ${model.offload_pct > 0 ? ` | CPU: ${model.offload_pct.toFixed(1)}%` : ''}
</div> </div>
<div class="running-model-layers">
GPU: ${gpuLayers} layers | CPU: ${cpuLayers} layers
</div> </div>
`).join(''); </div>
`;
}).join('');
} }
// ===== MODEL MANAGEMENT ===== // ===== MODEL MANAGEMENT =====

View File

@@ -110,8 +110,16 @@ body {
} }
.running-model-stats { .running-model-stats {
font-size: 11px; font-size: 0.85rem;
color: var(--text-secondary); color: var(--text-secondary);
margin-top: 0.25rem;
}
.running-model-layers {
font-size: 0.85rem;
color: var(--accent);
margin-top: 0.25rem;
font-weight: 500;
} }
.no-models { .no-models {

View File

@@ -398,12 +398,19 @@ def api_status():
size_total = model.get('size', 0) / (1024**3) # GB size_total = model.get('size', 0) / (1024**3) # GB
offload_pct = ((size_total - size_vram) / size_total * 100) if size_total > 0 else 0 offload_pct = ((size_total - size_vram) / size_total * 100) if size_total > 0 else 0
# Extract layer information from model details
details = model.get('details', {})
total_layers = details.get('parameter_size', 0)
gpu_layers = details.get('quantization_level', 0)
running_models.append({ running_models.append({
'name': model.get('name', 'Unknown'), 'name': model.get('name', 'Unknown'),
'size_gb': size_total, 'size_gb': size_total,
'vram_gb': size_vram, 'vram_gb': size_vram,
'offload_pct': offload_pct, 'offload_pct': offload_pct,
'expires_at': model.get('expires_at', '') 'expires_at': model.get('expires_at', ''),
'gpu_layers': gpu_layers,
'total_layers': total_layers
}) })
except Exception as e: except Exception as e:
print(f"Error getting running models: {e}") print(f"Error getting running models: {e}")