This proxy keeps full logs of all prompts and AI responses. Prompt logs are anonymous and do not contain IP addresses or timestamps.
You can see the type of data logged here, along with the rest of the code..
If you are uncomfortable with this, don't send prompts to this proxy!
GPT-3.5 Turbo: no wait / GPT-4: no wait / GPT-4 Turbo: no wait / Claude (Sonnet): no wait / Claude (Opus): no wait / AWS Claude (Sonnet): no wait / AWS Claude (Opus): no wait / Azure GPT-4: no wait / DALL-E: no wait
No images yet.
{ "uptime": 272561, "endpoints": { "openai": "http://portia.whatbox.ca:3456/proxy/openai", "openai2": "http://portia.whatbox.ca:3456/proxy/openai/turbo-instruct", "openai-image": "http://portia.whatbox.ca:3456/proxy/openai-image", "anthropic": "http://portia.whatbox.ca:3456/proxy/anthropic", "aws": "http://portia.whatbox.ca:3456/proxy/aws/claude", "azure": "http://portia.whatbox.ca:3456/proxy/azure/openai" }, "proompts": 204, "tookens": "3.66m ($37.49)", "proomptersNow": 0, "openaiKeys": 15, "openaiOrgs": 12, "anthropicKeys": 42, "awsKeys": 1, "azureKeys": 3, "turbo": { "usage": "0 tokens ($0.00)", "activeKeys": 7, "revokedKeys": 3, "overQuotaKeys": 5, "trialKeys": 0, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "dall-e": { "usage": "0 tokens ($0.00)", "activeKeys": 7, "overQuotaKeys": 5, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "gpt4-turbo": { "usage": "3.49m tokens ($34.91)", "activeKeys": 7, "overQuotaKeys": 3, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "gpt4": { "usage": "0 tokens ($0.00)", "activeKeys": 7, "overQuotaKeys": 3, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "claude": { "usage": "3.3k tokens ($0.03)", "activeKeys": 4, "revokedKeys": 1, "overQuotaKeys": 37, "trialKeys": 0, "prefilledKeys": 5, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "claude-opus": { "usage": "163.1k tokens ($2.45)", "activeKeys": 4, "revokedKeys": 1, "overQuotaKeys": 37, "trialKeys": 0, "prefilledKeys": 5, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "aws-claude": { "usage": "0 tokens ($0.00)", "activeKeys": 1, "revokedKeys": 0, "sonnetKeys": 1, "haikuKeys": 1, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "aws-claude-opus": { "usage": "7.2k tokens ($0.11)", "activeKeys": 1, "revokedKeys": 0, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "azure-gpt4": { "usage": "0 tokens ($0.00)", "activeKeys": 1, "revokedKeys": 2, "proomptersInQueue": 0, "estimatedQueueTime": "no wait" }, "config": { "gatekeeper": "user_token", "maxIpsAutoBan": "true", "textModelRateLimit": "40", "imageModelRateLimit": "40", "maxContextTokensOpenAI": "4000000", "maxContextTokensAnthropic": "0", "maxOutputTokensOpenAI": "4000000", "maxOutputTokensAnthropic": "4000000", "allowAwsLogging": "false", "promptLogging": "true", "promptLoggingBackend": "file", "googleSheetsSpreadsheetId": "********", "tokenQuota": { "turbo": "0", "gpt4": "0", "gpt4-32k": "0", "gpt4-turbo": "0", "dall-e": "0", "claude": "0", "claude-opus": "0", "gemini-pro": "0", "mistral-tiny": "0", "mistral-small": "0", "mistral-medium": "0", "mistral-large": "0", "aws-claude": "0", "aws-claude-opus": "0", "azure-turbo": "0", "azure-gpt4": "0", "azure-gpt4-32k": "0", "azure-gpt4-turbo": "0", "azure-dall-e": "0" }, "allowOpenAIToolUsage": "false", "allowImagePrompts": "false" }, "build": "6dabc82 (modified) (main@khanon/oai-reverse-proxy)" }