Skip to content

Commit

Permalink
llama.cpp: import more defensively
Browse files Browse the repository at this point in the history
  • Loading branch information
lbeurerkellner committed Aug 15, 2023
1 parent 7f23adc commit f219bd9
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 4 deletions.
3 changes: 2 additions & 1 deletion src/lmql/models/lmtp/backends/llama_cpp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@
import sys

import numpy as np
from llama_cpp import Llama, LlamaTokenizer

import lmql.utils.nputil as nputil
from lmql.models.lmtp.backends.lmtp_model import (LMTPModel, LMTPModelResult,
TokenStreamer)

class LlamaCppModel(LMTPModel):
def __init__(self, model_identifier, **kwargs):
from llama_cpp import Llama

self.model_identifier = model_identifier
self.kwargs = kwargs

Expand Down
6 changes: 3 additions & 3 deletions src/lmql/models/lmtp/backends/lmtp_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,9 +107,9 @@ def wrapper(loader):
import importlib
if module_dependencies is not None:
for module in module_dependencies:
try:
importlib.import_module(module)
except ImportError:
# check without importing
spec = importlib.util.find_spec(module)
if spec is None:
def error_func(*args, **kwargs):
assert False, "To use the {} backend, please install the '{}' package.".format(name, module)
LMTPModel.registry[name] = error_func
Expand Down

0 comments on commit f219bd9

Please sign in to comment.