Skip to content

Commit

Permalink
fix re-add virtual key auth checks on vertex ai pass thru endpoints
Browse files Browse the repository at this point in the history
  • Loading branch information
ishaan-jaff committed Sep 22, 2024
1 parent e4f309d commit 8a30d9b
Show file tree
Hide file tree
Showing 3 changed files with 49 additions and 3 deletions.
3 changes: 2 additions & 1 deletion docs/my-website/docs/proxy/configs.md
Original file line number Diff line number Diff line change
Expand Up @@ -792,7 +792,8 @@ general_settings:
"alerting": [
"string"
],
"alerting_threshold": 0
"alerting_threshold": 0,
"use_client_credentials_pass_through_routes" : "boolean", # use client credentials for all pass through routes like "/vertex-ai", /bedrock/. When this is True Virtual Key auth will not be applied on these endpoints" https://docs.litellm.ai/docs/pass_through/vertex_ai
}
}
```
Expand Down
21 changes: 19 additions & 2 deletions litellm/proxy/auth/auth_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -354,9 +354,26 @@ def is_pass_through_provider_route(route: str) -> bool:
def should_run_auth_on_pass_through_provider_route(route: str) -> bool:
"""
Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on /vertex-ai/{endpoint} routes
Use this to decide if the rest of the LiteLLM Virtual Key auth checks should run on provider pass through routes
ex /vertex-ai/{endpoint} routes
Run virtual key auth if the following is try:
- User is premium_user
- User has enabled litellm_setting.use_client_credentials_pass_through_routes
"""
# by default we do not run virtual key auth checks on /vertex-ai/{endpoint} routes
return False
from litellm.proxy.proxy_server import general_settings, premium_user

if premium_user is not True:
return False

# premium use has opted into using client credentials
if (
general_settings.get("use_client_credentials_pass_through_routes", False)
is True
):
return False

# only enabled for LiteLLM Enterprise
return True


def _has_user_setup_sso():
Expand Down
28 changes: 28 additions & 0 deletions litellm/tests/test_key_generate_prisma.py
Original file line number Diff line number Diff line change
Expand Up @@ -3221,3 +3221,31 @@ async def test_key_list_unsupported_params(prisma_client):
error_str = str(e.message)
assert "Unsupported parameter" in error_str
pass


@pytest.mark.asyncio
async def test_auth_vertex_ai_route(prisma_client):
"""
If user is premium user and vertex-ai route is used. Assert Virtual Key checks are run
"""
litellm.set_verbose = True
setattr(litellm.proxy.proxy_server, "prisma_client", prisma_client)
setattr(litellm.proxy.proxy_server, "premium_user", True)
setattr(litellm.proxy.proxy_server, "master_key", "sk-1234")
await litellm.proxy.proxy_server.prisma_client.connect()

route = "/vertex-ai/publishers/google/models/gemini-1.5-flash-001:generateContent"
request = Request(scope={"type": "http"})
request._url = URL(url=route)
request._headers = {"Authorization": "Bearer sk-12345"}
try:
await user_api_key_auth(request=request, api_key="Bearer " + "sk-12345")
pytest.fail("Expected this call to fail. User is over limit.")
except Exception as e:
print(vars(e))
print("error str=", str(e.message))
error_str = str(e.message)
assert e.code == "401"
assert "Invalid proxy server token passed" in error_str

pass

0 comments on commit 8a30d9b

Please sign in to comment.