diff --git a/openapi.yaml b/openapi.yaml index 72d0e8c..c96265a 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -6117,6 +6117,38 @@ paths: "endpoint": "/v1/chat/completions", "input_file_id": "file-id" }' + - lang: Python + label: Audio transcriptions (Python) + source: | + # Audio transcription batches use the /v1/audio/transcriptions endpoint. + # Each line in the uploaded JSONL must include `"method": "FILE"` so the + # worker dispatches the request as multipart/form-data. Example line: + # {"custom_id": "transcription-1", "method": "FILE", + # "body": {"file": "https://example.com/audio.wav", + # "model": "openai/whisper-large-v3"}} + from together import Together + import os + + client = Together( + api_key=os.environ.get("TOGETHER_API_KEY"), + ) + + batch = client.batches.create( + input_file_id="file_id", + endpoint="/v1/audio/transcriptions", + ) + + print(batch.job) + - lang: Shell + label: Audio transcriptions (cURL) + source: | + curl -X POST "https://api.together.ai/v1/batches" \ + -H "Authorization: Bearer $TOGETHER_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "endpoint": "/v1/audio/transcriptions", + "input_file_id": "file-id" + }' security: - bearerAuth: [] requestBody: @@ -12296,7 +12328,15 @@ components: properties: endpoint: type: string - description: The endpoint to use for batch processing + enum: + - /v1/chat/completions + - /v1/audio/transcriptions + - /v1/audio/translations + description: | + The endpoint to use for batch processing. Each line of the uploaded input file is dispatched against this endpoint. + - `/v1/chat/completions` — chat completion batches + - `/v1/audio/transcriptions` — audio transcription batches (e.g. `openai/whisper-large-v3`) + - `/v1/audio/translations` — audio translation batches example: '/v1/chat/completions' input_file_id: type: string