sudo apt update && sudo apt upgrade -y
sudo apt install -y git python3 python3-pip python-is-python3
git clone <https://github.com/NicheTensor/NicheImage>
cd NicheImage
python -m venv main_env
source main_env/bin/activate
pip install -e .
pip uninstall uvloop -y
git submodule update --init --recursive
. generation_models/custom_pipelines/scripts/download_antelopev2.sh
NOTE: You need to set --num_gpus 0
and --num_replicas
equal to your max_concurrent_requests
(defined in point 7 below) to allow the miner to handle multiple requests concurrently.
source main_env/bin/activate
OPENAI_API_KEY=<your-openai-api-key> \\ # set if you use DallE model
RAY_SERVE_QUEUE_LENGTH_RESPONSE_DEADLINE_S=1.0 \\
pm2 start python --name "miner_endpoint" -- -m services.miner_endpoint.app \\
--model_name <selected-model-name> \\
--num_replicas X --num_gpus 0 \\
--port 10006 # default port
pm2 start python --name "miner" \\
-- \\
-m neurons.miner.miner \\
--netuid 23 \\
--wallet.name <wallet_name> --wallet.hotkey <wallet_hotkey> \\
--subtensor.network <network> \\ # default is finney
--axon.port <your_public_port> \\
--generate_endpoint [<http://127.0.0.1:10006/generate>](<http://127.0.0.1:10006/generate>) \\ # change if you use different port or host
--info_endpoint [<http://127.0.0.1:10006/info>](<http://127.0.0.1:10006/info>) \\ # change if you use different port or host
--miner.total_volume <your-generation-volume> # default is 40. Change based on your model timeout value and GPU capacity
--miner.max_concurrent_requests <your-max-concurrent-requests> # default is 4. Change based on your model timeout value and GPU capacity
miner.total_volume <your-generation-volume>
refers to the total number of requests your miner can process
miner.max_concurrent_requests <your-max-concurrent-requests>
refers to the number of requests that can be processed in parallel.