77- Feature flag behavior
88- Atomic Lua script prevents race conditions
99
10- All tests marked with @pytest.mark.rate_limit to run last and avoid quota interference .
10+ All tests marked with @pytest.mark.rate_limit to run sequentially for proper isolation .
1111"""
1212
1313import asyncio
1818import httpx
1919import pytest
2020
21+ # Mark all tests in this module to run sequentially (no parallel execution)
22+ # This ensures proper Redis isolation between tests
23+ pytestmark = [pytest .mark .rate_limit ]
24+
2125
2226# Test rate limit configuration
2327RATE_LIMIT = 1 # requests
@@ -423,9 +427,9 @@ async def test_notion_connection_level_rate_limiting_isolated(
423427 verify_sync_stats_only_inserts (job1 , "connection 1" )
424428 verify_sync_stats_only_inserts (job2 , "connection 2" )
425429
426- # Verify Redis has 2 separate keys (connection-level tracking)
430+ # Verify Redis has exactly 2 separate keys (connection-level tracking)
427431 # Use monitoring data since keys expire quickly (6s TTL)
428- assert len (monitoring_data ) == 2 , f"Expected 2 connection-level keys during sync, got { len (monitoring_data )} : { list (monitoring_data .keys ())} "
432+ assert len (monitoring_data ) == 2 , f"Expected exactly 2 connection-level keys during sync, got { len (monitoring_data )} : { list (monitoring_data .keys ())} "
429433 print (f"✅ Found 2 separate connection-level rate limit keys during sync" )
430434
431435 # Verify rate limits were enforced during sync (check monitoring data)
@@ -545,9 +549,10 @@ async def test_google_drive_org_level_rate_limiting_aggregated(
545549 if job2 ["status" ] == "completed" :
546550 verify_sync_stats_only_inserts (job2 , "connection 2" )
547551
548- # Verify Redis has only 1 shared key (org-level tracking)
549- redis_keys = await get_redis_keys ("source_rate_limit:*:google_drive:org:*" )
550- assert len (redis_keys ) == 1 , f"Expected 1 org-level key, got { len (redis_keys )} : { redis_keys } "
552+ # Verify Redis had exactly 1 shared key during sync (org-level tracking)
553+ # Use monitoring data since keys expire quickly (3s TTL)
554+ assert len (monitoring_data ) == 1 , f"Expected exactly 1 org-level key during sync, got { len (monitoring_data )} : { list (monitoring_data .keys ())} "
555+ print (f"✅ Found 1 shared org-level rate limit key during sync" )
551556
552557 # Verify rate limits were enforced during sync (check monitoring data)
553558 print (f"\n 📊 Redis monitoring during sync:" )
@@ -556,14 +561,10 @@ async def test_google_drive_org_level_rate_limiting_aggregated(
556561 print (f" { key } : max={ max_counter } /{ RATE_LIMIT } , samples={ counters } " )
557562 assert max_counter <= RATE_LIMIT , f"Rate limit exceeded during sync: { max_counter } /{ RATE_LIMIT } "
558563
559- # Verify the shared counter respected the limit
560- counter = await get_redis_counter (redis_keys [0 ])
561- print (f"\n 📊 Shared org-level counter: { counter } /{ RATE_LIMIT } " )
562-
563564 # With aggressive limits and concurrent syncs, expect some contention
564565 # At least one should complete (demonstrates shared quota)
565566 completed_count = sum (1 for job in [job1 , job2 ] if job ["status" ] == "completed" )
566- print (f"\n ✅ Org-level aggregation verified: 1 shared quota, { completed_count } /2 syncs completed" )
567+ print (f"\n ✅ Org-level aggregation verified: Shared quota enforced , { completed_count } /2 syncs completed" )
567568
568569 finally :
569570 # Cleanup
0 commit comments