{
    "workloads": [
      {
        "raw_data": "base64_encoded_pdf_content",
        "schemas": ["schema1", "schema2"]
      }
    ],
    "provider_type": "azure",
    "provider_model_name": "gpt-4o",
    "api_key": "sk-..."
    "additional_params": {
        "azure_endpoint": "AZURE_ENDPOINT",
        "azure_deployment": "AZURE_DEPLOYMENT_ID",
        "api_version": "AZURE_API_VERSION"
    }
  }
  {
    "task_id": "b6781f5b-022b-485e-b93c-6a958e51b992",
    "message": "Pipeline processing started"
  }

This explains how to use the POST /pipelines endpoint using custom parameters that will be passed to the model. For example when using Azure OpenAI, you will need to provide the azure_endpoint, azure_deployment, and api_version parameters. This is also where you can define parameters like temperature.

  {
    "workloads": [
      {
        "raw_data": "base64_encoded_pdf_content",
        "schemas": ["schema1", "schema2"]
      }
    ],
    "provider_type": "azure",
    "provider_model_name": "gpt-4o",
    "api_key": "sk-..."
    "additional_params": {
        "azure_endpoint": "AZURE_ENDPOINT",
        "azure_deployment": "AZURE_DEPLOYMENT_ID",
        "api_version": "AZURE_API_VERSION"
    }
  }
  {
    "task_id": "b6781f5b-022b-485e-b93c-6a958e51b992",
    "message": "Pipeline processing started"
  }