Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add caching of catalog items in api #2220

Merged
merged 2 commits into from
Jan 22, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion catalog/api/Containerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM registry.access.redhat.com/ubi9/python-311:latest
FROM registry.access.redhat.com/ubi9/python-312:latest

USER 0
COPY . /tmp/src
Expand Down
61 changes: 55 additions & 6 deletions catalog/api/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,9 @@
ratings_api = os.environ.get('RATINGS_API', 'http://babylon-ratings.babylon-ratings.svc.cluster.local:8080')
reporting_api = os.environ.get('SALESFORCE_API', 'http://reporting-api.demo-reporting.svc.cluster.local:8080')
reporting_api_authorization_token = os.environ.get('SALESFORCE_AUTHORIZATION_TOKEN')
response_cache = {}
response_cache_clean_interval = int(os.environ.get('RESPONSE_CACHE_CLEAN_INTERVAL', 60))
response_cache_clean_task = None
session_cache = {}
session_lifetime = int(os.environ.get('SESSION_LIFETIME', 600))

Expand Down Expand Up @@ -83,7 +86,7 @@ async def api_proxy(method, url, headers, data=None, params=None):
)

async def on_startup(app):
global app_api_client, babylon_namespace, console_url, core_v1_api, custom_objects_api, redis_connection
global app_api_client, babylon_namespace, console_url, core_v1_api, custom_objects_api, redis_connection, response_cache_clean_task
if os.path.exists('/run/secrets/kubernetes.io/serviceaccount'):
kubernetes_asyncio.config.load_incluster_config()
if not babylon_namespace:
Expand Down Expand Up @@ -113,7 +116,11 @@ async def on_startup(app):
username = os.environ.get('REDIS_USER', 'default'),
)

response_cache_clean_task = asyncio.create_task(response_cache_clean())

async def on_cleanup(app):
response_cache_clean_task.cancel()
await response_cache_clean_task
await app_api_client.close()

async def check_admin_access(api_client):
Expand Down Expand Up @@ -327,6 +334,12 @@ async def start_user_session(user, groups):
elif await check_user_support_access(api_client):
session['roles'].append('userSupport')

session['catalogNamespaces'] = await get_catalog_namespaces(api_client)

user_namespace, service_namespaces = await get_service_namespaces(user, api_client)
session['userNamespace'] = user_namespace
session['serviceNamespaces'] = service_namespaces

token = random_string(32)
if redis_connection:
await redis_connection.setex(token, session_lifetime, json.dumps(session, separators=(',',':')))
Expand All @@ -342,20 +355,18 @@ async def get_auth_session(request):
groups = await get_user_groups(user)
api_client, session, token = await start_user_session(user, groups)
try:
catalog_namespaces = await get_catalog_namespaces(api_client)
user_is_admin = session.get('admin', False)
roles = session.get('roles', [])
user_namespace, service_namespaces = await get_service_namespaces(user, api_client)
ret = {
"admin": user_is_admin,
"consoleURL": console_url,
"groups": groups,
"user": user['metadata']['name'],
"token": token,
"catalogNamespaces": catalog_namespaces,
"catalogNamespaces": session['catalogNamespaces'],
"lifetime": session_lifetime,
"serviceNamespaces": service_namespaces,
"userNamespace": user_namespace,
"serviceNamespaces": session['serviceNamespaces'],
"userNamespace": session['userNamespace'],
"roles": roles,
}
if not user_is_admin:
Expand Down Expand Up @@ -796,6 +807,32 @@ async def workshop_post(request):

raise web.HTTPConflict()

@routes.get("/apis/babylon.gpte.redhat.com/v1/namespaces/{namespace}/catalogitems")
@routes.get("/apis/babylon.gpte.redhat.com/v1/namespaces/{namespace}/catalogitems/{name}")
async def openshift_api_proxy_with_cache(request):
namespace = request.match_info.get('namespace')

user = await get_proxy_user(request)
session = await get_user_session(request, user)

for catalog_namespace in session.get('catalogNamespaces', []):
if catalog_namespace['name'] == namespace:
break
else:
raise web.HTTPForbidden()

resp, cache_time = response_cache.get(request.path_qs, (None, None))
if resp != None and time() - cache_time < response_cache_clean_interval:
return web.Response(
body=resp.body,
headers=resp.headers,
status=resp.status,
)

resp = await openshift_api_proxy(request)
response_cache[request.path_qs] = (resp, time())
return resp

@routes.delete("/{path:apis?/.*}")
@routes.get("/{path:apis?/.*}")
@routes.patch("/{path:apis?/.*}")
Expand Down Expand Up @@ -867,6 +904,18 @@ async def openshift_api_proxy(request):
finally:
await api_client.close()

async def response_cache_clean():
"""Periodically remove old cache entries to avoid memory leak."""
try:
while True:
for key, value in list(response_cache.items()):
cache_time = value[1]
if time() - cache_time > response_cache_clean_interval:
response_cache.pop(key, None)
await asyncio.sleep(response_cache_clean_interval)
except asyncio.CancelledError:
return


app = web.Application()
app.add_routes(routes)
Expand Down
2 changes: 2 additions & 0 deletions helm/templates/catalog/interfaces/api/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,8 @@ spec:
value: babylon-catalog-redis
- name: REDIS_PORT
value: "6379"
- name: RESPONSE_CACHE_CLEAN_INTERVAL
value: "{{ $api.responseCacheCleanInterval }}"
- name: LOGGING_LEVEL
value: "{{ $api.loggingLevel }}"
image: {{ $api.image.repository }}:{{ $api.image.tag }}
Expand Down
1 change: 1 addition & 0 deletions helm/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,7 @@ catalog:
limits:
cpu: "2"
memory: "2Gi"
responseCacheCleanInterval: 60
oauthProxy:
image:
pullPolicy: Always
Expand Down
Loading