mmproj upgrade

This commit is contained in:
2026-01-21 12:13:39 +01:00
parent 8149ac8c8b
commit b83f2e6e38
11 changed files with 258 additions and 31 deletions

View File

@@ -105,6 +105,19 @@ def parse_modelfile(modelfile_path):
caps_str = capabilities_match.group(1).strip()
capabilities = [cap.strip() for cap in caps_str.split(',') if cap.strip()]
# Look for optional mmproj (multimodal projection) configuration
# Format: # mmproj_url: https://huggingface.co/org/repo
mmproj_url_match = re.search(r'#\s*mmproj_url:\s*(https://huggingface\.co/[^\s]+)', content)
mmproj_url = mmproj_url_match.group(1) if mmproj_url_match else None
# Format: # mmproj_quant: BF16 (or F16, F32)
mmproj_quant_match = re.search(r'#\s*mmproj_quant:\s*([a-zA-Z0-9_]+)', content)
mmproj_quant = mmproj_quant_match.group(1) if mmproj_quant_match else 'BF16' # Default to BF16
# Format: # mmproj_sha256: <hash>
mmproj_sha256_match = re.search(r'#\s*mmproj_sha256:\s*([a-fA-F0-9]{64})', content)
mmproj_sha256 = mmproj_sha256_match.group(1) if mmproj_sha256_match else None
# Check if URL points to a specific GGUF file or just the repo
if hf_url.endswith('.gguf') or '/blob/' in hf_url or '/resolve/' in hf_url:
# Specific file provided - use as-is
@@ -133,6 +146,47 @@ def parse_modelfile(modelfile_path):
# Example: Ministral-3-3B-Instruct-2512-Q5_K_M.gguf -> ministral-3:3b-instruct-2512-q5_k_m
model_base, model_tag, model_name = parse_model_name_from_gguf(gguf_filename)
# Construct mmproj info if mmproj_url is provided
mmproj_info = None
if mmproj_url:
# Determine mmproj filename based on URL pattern
if mmproj_url.endswith('.gguf') or '/blob/' in mmproj_url or '/resolve/' in mmproj_url:
# Specific file provided
mmproj_resolve_url = mmproj_url.replace('/blob/', '/resolve/')
mmproj_filename = os.path.basename(urlparse(mmproj_resolve_url).path)
else:
# Repository root - construct filename
# Two common patterns:
# 1. mmproj-BF16.gguf (unsloth pattern)
# 2. ModelName-BF16-mmproj.gguf (mistralai pattern)
# Try to detect which pattern by checking the URL
url_parts = urlparse(mmproj_url).path.strip('/').split('/')
if len(url_parts) >= 2:
repo_org = url_parts[0]
if repo_org == 'unsloth':
# unsloth pattern: mmproj-{QUANT}.gguf
mmproj_filename = f"mmproj-{mmproj_quant}.gguf"
else:
# mistralai/others pattern: extract base name from main repo
repo_name = url_parts[1]
if repo_name.upper().endswith('-GGUF'):
repo_name = repo_name[:-5]
mmproj_filename = f"{repo_name}-{mmproj_quant}-mmproj.gguf"
mmproj_resolve_url = f"{mmproj_url.rstrip('/')}/resolve/main/{mmproj_filename}"
else:
print(f"✗ Invalid mmproj URL format: {mmproj_url}")
mmproj_resolve_url = None
mmproj_filename = None
if mmproj_resolve_url and mmproj_filename:
mmproj_info = {
'url': mmproj_url,
'resolve_url': mmproj_resolve_url,
'filename': mmproj_filename,
'sha256': mmproj_sha256
}
return {
'hf_url': hf_url,
'resolve_url': resolve_url,
@@ -140,7 +194,8 @@ def parse_modelfile(modelfile_path):
'model_name': model_name,
'modelfile_path': modelfile_path,
'sha256': sha256,
'capabilities': capabilities
'capabilities': capabilities,
'mmproj': mmproj_info
}
@@ -302,7 +357,7 @@ def download_file(url, dest_path, filename, should_cancel=None, progress_callbac
raise
def create_ollama_model(modelfile_path, gguf_path, model_name, capabilities=None):
def create_ollama_model(modelfile_path, gguf_path, model_name, capabilities=None, mmproj_path=None):
"""
Create an Ollama model from the Modelfile and GGUF file.
@@ -311,12 +366,15 @@ def create_ollama_model(modelfile_path, gguf_path, model_name, capabilities=None
gguf_path: Path to the downloaded GGUF file
model_name: Name for the Ollama model
capabilities: Optional list of capabilities to add (e.g., ['tools', 'vision'])
mmproj_path: Optional path to the mmproj file for vision models
"""
print(f"\nCreating Ollama model: {model_name}")
# Note: Capabilities are detected from the GGUF file metadata by Ollama automatically
if capabilities:
print(f" Expected capabilities from GGUF metadata: {', '.join(capabilities)}")
if mmproj_path:
print(f" Including mmproj file for vision support")
# Read the Modelfile and update the FROM path to point to the downloaded GGUF
with open(modelfile_path, 'r') as f:
@@ -331,12 +389,22 @@ def create_ollama_model(modelfile_path, gguf_path, model_name, capabilities=None
modelfile_content
)
# Add mmproj FROM line if provided
if mmproj_path:
# Add the mmproj FROM line after the main model FROM line
modelfile_content = modelfile_content.replace(
f'FROM {gguf_path}',
f'FROM {gguf_path}\nFROM {mmproj_path}'
)
# Debug: check if replacement happened
if original_content == modelfile_content:
print(f" WARNING: FROM line was not replaced!")
print(f" Looking for pattern in: {original_content[:200]}")
else:
print(f" ✓ Replaced FROM line with local path: {gguf_path}")
if mmproj_path:
print(f" ✓ Added mmproj FROM line: {mmproj_path}")
# Create a temporary Modelfile with the correct path
with tempfile.NamedTemporaryFile(mode='w', suffix='.Modelfile', delete=False) as tmp_modelfile:
@@ -405,6 +473,10 @@ def install_model(modelfile_path, dry_run=False, skip_existing=False, existing_m
log(f"SHA256: {model_info['sha256'][:16]}...")
if model_info.get('capabilities'):
log(f"Capabilities: {', '.join(model_info['capabilities'])}")
if model_info.get('mmproj'):
log(f"MMProj file: {model_info['mmproj']['filename']}")
if model_info['mmproj']['sha256']:
log(f"MMProj SHA256: {model_info['mmproj']['sha256'][:16]}...")
# Check if model already exists
if skip_existing and existing_models and model_info['model_name'] in existing_models:
@@ -413,9 +485,22 @@ def install_model(modelfile_path, dry_run=False, skip_existing=False, existing_m
# Get file size and check disk space
file_size = get_file_size(model_info['resolve_url'])
mmproj_file_size = None
if model_info.get('mmproj'):
mmproj_file_size = get_file_size(model_info['mmproj']['resolve_url'])
total_size = file_size or 0
if mmproj_file_size:
total_size += mmproj_file_size
if file_size:
size_gb = file_size / (1024**3)
log(f"File size: {size_gb:.2f} GB")
log(f"GGUF file size: {size_gb:.2f} GB")
if mmproj_file_size:
mmproj_size_gb = mmproj_file_size / (1024**3)
log(f"MMProj file size: {mmproj_size_gb:.2f} GB")
log(f"Total size: {total_size / (1024**3):.2f} GB")
file_size = total_size
if not dry_run:
has_space, available, required = check_disk_space(file_size)
@@ -434,6 +519,7 @@ def install_model(modelfile_path, dry_run=False, skip_existing=False, existing_m
# Create temporary directory for download
with tempfile.TemporaryDirectory() as tmp_dir:
gguf_path = os.path.join(tmp_dir, model_info['gguf_filename'])
mmproj_path = None
try:
# Download the GGUF file
@@ -445,12 +531,30 @@ def install_model(modelfile_path, dry_run=False, skip_existing=False, existing_m
print(f"✗ Checksum verification failed!")
return (False, False, model_info['model_name'])
# Download mmproj file if specified
if model_info.get('mmproj'):
mmproj_path = os.path.join(tmp_dir, model_info['mmproj']['filename'])
download_file(
model_info['mmproj']['resolve_url'],
mmproj_path,
model_info['mmproj']['filename'],
should_cancel,
progress_callback
)
# Verify mmproj checksum if provided
if model_info['mmproj']['sha256']:
if not verify_checksum(mmproj_path, model_info['mmproj']['sha256']):
print(f"✗ MMProj checksum verification failed!")
return (False, False, model_info['model_name'])
# Create the Ollama model
create_ollama_model(
modelfile_path,
gguf_path,
model_info['model_name'],
model_info.get('capabilities')
model_info.get('capabilities'),
mmproj_path
)
print(f"\n✓ Successfully installed model: {model_info['model_name']}")