feat: add cache flush API and fix cache key consistency
- Add PUT /cache/flush endpoint for selective cache management - Fix critical cache key mismatch in cleanup_expired_index() - Update index file detection to use full path instead of filename - Bump version to 2.0.2 The key changes include adding a comprehensive cache flush API that supports selective flushing by cache type (index, files, metrics) and fixing a critical bug where cache keys were inconsistent between storage and cleanup operations, preventing proper cache invalidation.
This commit is contained in:
parent
e4013e6a2a
commit
d2ecc6b1a0
@ -1,6 +1,6 @@
|
||||
[project]
|
||||
name = "artifactapi"
|
||||
version = "2.0.1"
|
||||
version = "2.0.2"
|
||||
description = "Generic artifact caching system with support for various package managers"
|
||||
|
||||
dependencies = [
|
||||
|
||||
@ -66,9 +66,21 @@ class RedisCache:
|
||||
return
|
||||
|
||||
try:
|
||||
# Get the S3 key and remove it
|
||||
s3_key = storage.get_object_key_from_path(remote_name, path)
|
||||
if storage.exists(s3_key):
|
||||
storage.client.delete_object(Bucket=storage.bucket, Key=s3_key)
|
||||
# Construct the URL the same way as in the main flow
|
||||
from .config import ConfigManager
|
||||
import os
|
||||
config_path = os.environ.get("CONFIG_PATH")
|
||||
if config_path:
|
||||
config = ConfigManager(config_path)
|
||||
remote_config = config.get_remote_config(remote_name)
|
||||
if remote_config:
|
||||
base_url = remote_config.get("base_url")
|
||||
if base_url:
|
||||
# Construct URL the same way as construct_remote_url
|
||||
remote_url = f"{base_url.rstrip('/')}/{path}"
|
||||
# Use URL-based key (same as cache_single_artifact)
|
||||
s3_key = storage.get_object_key(remote_url)
|
||||
if storage.exists(s3_key):
|
||||
storage.client.delete_object(Bucket=storage.bucket, Key=s3_key)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
@ -28,7 +28,7 @@ logging.basicConfig(
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
app = FastAPI(title="Artifact Storage API", version="2.0.1")
|
||||
app = FastAPI(title="Artifact Storage API", version="2.0.2")
|
||||
|
||||
# Initialize components using config
|
||||
config_path = os.environ.get("CONFIG_PATH")
|
||||
@ -63,6 +63,81 @@ def health_check():
|
||||
return {"status": "healthy"}
|
||||
|
||||
|
||||
@app.put("/cache/flush")
|
||||
def flush_cache(
|
||||
remote: str = Query(default=None, description="Specific remote to flush (optional)"),
|
||||
cache_type: str = Query(default="all", description="Type to flush: 'all', 'index', 'files', 'metrics'")
|
||||
):
|
||||
"""Flush cache entries for specified remote or all remotes"""
|
||||
try:
|
||||
result = {
|
||||
"remote": remote,
|
||||
"cache_type": cache_type,
|
||||
"flushed": {
|
||||
"redis_keys": 0,
|
||||
"s3_objects": 0,
|
||||
"operations": []
|
||||
}
|
||||
}
|
||||
|
||||
# Flush Redis entries based on cache_type
|
||||
if cache_type in ["all", "index", "metrics"] and cache.available and cache.client:
|
||||
patterns = []
|
||||
|
||||
if cache_type in ["all", "index"]:
|
||||
if remote:
|
||||
patterns.append(f"index:{remote}:*")
|
||||
else:
|
||||
patterns.append("index:*")
|
||||
|
||||
if cache_type in ["all", "metrics"]:
|
||||
if remote:
|
||||
patterns.append(f"metrics:*:{remote}")
|
||||
else:
|
||||
patterns.append("metrics:*")
|
||||
|
||||
for pattern in patterns:
|
||||
keys = cache.client.keys(pattern)
|
||||
if keys:
|
||||
cache.client.delete(*keys)
|
||||
result["flushed"]["redis_keys"] += len(keys)
|
||||
logger.info(f"Cache flush: Deleted {len(keys)} Redis keys matching '{pattern}'")
|
||||
|
||||
if result["flushed"]["redis_keys"] > 0:
|
||||
result["flushed"]["operations"].append(f"Deleted {result['flushed']['redis_keys']} Redis keys")
|
||||
|
||||
# Flush S3 objects if requested
|
||||
if cache_type in ["all", "files"]:
|
||||
try:
|
||||
response = storage.client.list_objects_v2(Bucket=storage.bucket)
|
||||
if 'Contents' in response:
|
||||
objects_to_delete = [obj['Key'] for obj in response['Contents']]
|
||||
|
||||
for key in objects_to_delete:
|
||||
try:
|
||||
storage.client.delete_object(Bucket=storage.bucket, Key=key)
|
||||
result["flushed"]["s3_objects"] += 1
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to delete S3 object {key}: {e}")
|
||||
|
||||
if objects_to_delete:
|
||||
result["flushed"]["operations"].append(f"Deleted {len(objects_to_delete)} S3 objects")
|
||||
logger.info(f"Cache flush: Deleted {len(objects_to_delete)} S3 objects")
|
||||
|
||||
except Exception as e:
|
||||
result["flushed"]["operations"].append(f"S3 flush failed: {str(e)}")
|
||||
logger.error(f"Cache flush S3 error: {e}")
|
||||
|
||||
if not result["flushed"]["operations"]:
|
||||
result["flushed"]["operations"].append("No cache entries found to flush")
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Cache flush error: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Cache flush failed: {str(e)}")
|
||||
|
||||
|
||||
async def construct_remote_url(remote_name: str, path: str) -> str:
|
||||
remote_config = config.get_remote_config(remote_name)
|
||||
if not remote_config:
|
||||
@ -204,7 +279,7 @@ async def get_artifact(remote_name: str, path: str):
|
||||
|
||||
# For index files, check Redis TTL validity
|
||||
filename = os.path.basename(path)
|
||||
is_index = cache.is_index_file(filename)
|
||||
is_index = cache.is_index_file(path) # Check full path, not just filename
|
||||
|
||||
if cached_key and is_index:
|
||||
# Index file exists, but check if it's still valid
|
||||
|
||||
Loading…
Reference in New Issue
Block a user