Llm_interactive not working with customed prompt

i m trying to use llm_interactive to pre-annotation with text classification.
when i set USE_INTERNAL_PROMPT_TEMPLATE =1 within docker-compose.yml, there s no response of the logs.
and as i input prompt at the front-end website, there s no response of the logs.
it only worked when i set USE_INTERNAL_PROMPT_TEMPLATE =0 and set DEFAULT_PROMPT within docker-compose.yml.

docker config:

version: "3.8"

services:
  llm_interactive:
    container_name: llm_interactive
    image: heartexlabs/label-studio-ml-backend:llm-master
    build:
      context: .
      args:
        TEST_ENV: ${TEST_ENV}
    environment:
      - MODEL_DIR=/data/models
      # Specify openai model provider: "openai", "azure", or "ollama"
      - OPENAI_PROVIDER=ollama
      # Specify API key for openai or azure
      - OPENAI_API_KEY=
      # Specify model name for openai or azure (by default it uses "gpt-3.5-turbo")
      - OPENAI_MODEL=qwen2.5:7b
      # Internal prompt template for the model is:
      # **Source Text**:\n\n"{text}"\n\n**Task Directive**:\n\n"{prompt}"
      # if you want to specify task data keys in the prompt (i.e. input <TextArea name="$PROMPT_PREFIX..."/>, set this to 0
      - USE_INTERNAL_PROMPT_TEMPLATE=1
      - PROMPT_PREFIX=prompt
      - LOG_LEVEL=DEBUG
      # Number of responses to generate for each request
      - NUM_RESPONSES=1
      # Temperature for the model
      - TEMPERATURE=0.7
      # Azure resourse endpoint (in case OPENAI_PROVIDER=azure)
      - AZURE_RESOURCE_ENDPOINT=
      # Azure deployment name (in case OPENAI_PROVIDER=azure)
      - AZURE_DEPLOYMENT_NAME=
      # Azure API version (in case OPENAI_PROVIDER=azure)
      - AZURE_API_VERSION=2023-05-15
      # Ollama Endpoint (in case OPENAI_PROVIDER=ollama, OPENAI_MODEL=<your_ollama_model>)
      # If running Ollama locally OLLAMA_ENDPOINT=http://host.docker.internal:11434/v1/
      - OLLAMA_ENDPOINT=http://192.168.240.192:11434/v1/
      # specify these parameters if you want to use basic auth for the model server
      - BASIC_AUTH_USER=
      - BASIC_AUTH_PASS=
    ports:
      - 9090:9090
    volumes:
      - "./data/server:/data"

and ui template is

<View>
    <Style>
        .lsf-main-content.lsf-requesting .prompt::before { content: ' loading...'; color: #808080; }
    </Style>
    <!-- Input data -->
    <Text name="text" value="$text"/>
    <!-- Prompt input -->
    <TextArea name="prompt" toName="text" editable="true" rows="1" maxSubmissions="2" showSubmitButton="true" placeholder="please input promp:"/>
    <!-- LLM response output -->
  	<Text name="resp" value="responseļ¼š"/>
    <TextArea name="response" toName="text" editable="true" rows="1"  maxSubmissions="2" smart="false" />
    <View style="box-shadow: 2px 2px 5px #999;                padding: 20px; margin-top: 2em;                border-radius: 5px;">
        <Choices name="labels" toName="text" choice="single" showInLine="true">
            <Choice value="a"/>
            <Choice value="b"/>
	<Choice value="c"/>
          	<Choice value="d"/>
        </Choices>
    </View>
</View>