-
Notifications
You must be signed in to change notification settings - Fork 0
/
Dockerfile
66 lines (49 loc) · 2.2 KB
/
Dockerfile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
ARG BASE_IMAGE=ubuntu:20.04
FROM ${BASE_IMAGE}
RUN apt update && \
apt install -y \
wget build-essential zlib1g-dev libncurses5-dev libgdbm-dev libnss3-dev libssl-dev \
libreadline-dev libffi-dev libsqlite3-dev libbz2-dev liblzma-dev && \
apt clean && \
rm -rf /var/lib/apt/lists/*
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get -y update && \
apt-get install -y tcl
RUN apt-get -y update && \
apt-get install -y ffmpeg
ARG PYTHON_VERSION=3.11.5
RUN cd /tmp && \
wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
tar -xvf Python-${PYTHON_VERSION}.tgz && \
cd Python-${PYTHON_VERSION} && \
./configure --enable-optimizations && \
make && make install && \
cd .. && rm Python-${PYTHON_VERSION}.tgz && rm -r Python-${PYTHON_VERSION} && \
ln -s /usr/local/bin/python3 /usr/local/bin/python && \
ln -s /usr/local/bin/pip3 /usr/local/bin/pip && \
python -m pip install --upgrade pip && \
rm -r /root/.cache/pip
WORKDIR /workspace
ADD requirements.txt .
ADD llama_cpp_server_config.json .
ADD setup.py .
RUN python -m pip install --no-cache-dir -r requirements.txt
ADD .. /workspace
# Set environment variable for the host
ENV HOST=0.0.0.0
ENV PORT=8080
EXPOSE ${PORT}
RUN python setup.py
ENTRYPOINT [ "python", "-m", "llama_cpp.server", "--config_file", "llama_cpp_server_config.json" ]
#ENTRYPOINT [ "ulimit", "-l", "unlimited", "&&", "python", "-m", "llama_cpp.server", "--config_file", "llama_cpp_server_config.json" ]
# docker build -t llama_cpp_server_image -f ./llama_cpp_server/Dockerfile ./llama_cpp_server
# docker run --name llama_cpp_server_container --network="host" llama_cpp_server_image:latest
# docker build -t ghcr.io/eericheva/llama_cpp_server_image:latest -f Dockerfile ./llama_cpp_server
# docker push ghcr.io/eericheva/llama_cpp_server_image:latest
# docker run --name llama_cpp_server_container --network="host" ghcr.io/eericheva/llama_cpp_server_image:latest
# docker system prune -a
# docker compose up -d
# check if it is ok here : http://<server-ip>
# docker exec -t -i llama_cpp_server_container /bin/bash
# docker logs llama_cpp_server_container
# docker container inspect imitation_game_container