Skip to content

Commit

Permalink
chore: add missing requirements
Browse files Browse the repository at this point in the history
  • Loading branch information
mdjastrzebski committed Nov 16, 2023
1 parent 772af27 commit 741532d
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 9 deletions.
8 changes: 4 additions & 4 deletions Notebook.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -85,15 +85,15 @@
"llm = LlamaCpp(\n",
" model_path=MODEL_FILE,\n",
" n_ctx=MODEL_CONTEXT_WINDOW,\n",
" # Number of tokens to process in parallel. Should be a number between 1 and n_ctx.\n",
" n_batch=512,\n",
" # Number of layers to be loaded into gpu memory. Default None.\n",
" n_gpu_layers=1,\n",
" # Maximal lenght of model's output, in tokens.\n",
" max_tokens=MAX_ANSWER_TOKENS,\n",
" # Don't be creative.\n",
" temperature=0,\n",
" verbose=VERBOSE,\n",
"\n",
" # Remove next two lines if NOT using macOS & M1 processor:\n",
" n_batch=512,\n",
" n_gpu_layers=1,\n",
")"
]
},
Expand Down
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
langchain==0.0.335
gradio==3.50.2
llama-cpp-python==0.2.18
llama-cpp-python==0.2.18
pypdf=3.17.1
8 changes: 4 additions & 4 deletions web-ui.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,14 @@
llm = LlamaCpp(
model_path=MODEL_FILE,
n_ctx=MODEL_CONTEXT_WINDOW,
# Number of tokens to process in parallel. Should be a number between 1 and n_ctx.
n_batch=512,
# Number of layers to be loaded into gpu memory. Default None.
n_gpu_layers=1,
# Don't be creative.
temperature=0,
max_tokens=MAX_TOKENS,
verbose=VERBOSE,

# Remove next two lines if NOT using macOS & M1 processor:
n_batch=512,
n_gpu_layers=1,
)


Expand Down

0 comments on commit 741532d

Please sign in to comment.