Coverage for src / taipanstack / core / optimizations.py: 100%
116 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-23 14:54 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-23 14:54 +0000
1"""
2Python Version-Specific Optimization Profiles.
4This module provides optimization strategies tailored to different Python
5versions, enabling performance improvements while maintaining stability.
7Following Stack pillars: Security, Stability, Simplicity, Scalability, Compatibility.
8"""
10import gc
11import logging
12import os
13from dataclasses import dataclass
15from taipanstack.core.compat import (
16 PY312,
17 PY313,
18 PY314,
19 get_features,
20 get_optimization_level,
21 is_experimental_enabled,
22)
24__all__ = [
25 "OptimizationProfile",
26 "OptimizationResult",
27 "apply_optimizations",
28 "get_optimization_profile",
29]
31logger = logging.getLogger(__name__)
34# =============================================================================
35# Optimization Profile
36# =============================================================================
39# Optimization Levels
40OPT_LEVEL_NONE = 0
41OPT_LEVEL_SAFE = 1
42OPT_LEVEL_AGGRESSIVE = 2
45@dataclass(frozen=True, slots=True)
46class OptimizationProfile:
47 """Version-specific optimization settings.
49 This profile defines recommended settings based on Python version
50 and available features. All settings follow the stability-first principle.
51 """
53 # GC tuning
54 gc_threshold_0: int = 700 # Default: 700
55 gc_threshold_1: int = 10 # Default: 10
56 gc_threshold_2: int = 10 # Default: 10
57 gc_freeze_enabled: bool = False # Freeze objects after init
59 # Threading
60 thread_pool_multiplier: float = 1.0 # Multiplier for CPU count
61 max_thread_pool_size: int = 32 # Absolute maximum
63 # Memory
64 prefer_slots: bool = True # Use __slots__ in classes
65 use_frozen_dataclasses: bool = True # Prefer frozen dataclasses
67 # Code patterns
68 prefer_match_statements: bool = False # 3.10+
69 prefer_exception_groups: bool = False # 3.11+
70 prefer_type_params: bool = False # 3.12+
72 # Performance hints
73 enable_perf_hints: bool = False # JIT hints, etc.
74 aggressive_inlining: bool = False # More aggressive optimizations
76 # Experimental
77 enable_experimental: bool = False
80@dataclass(frozen=True, slots=True)
81class OptimizationResult:
82 """Result of applying optimizations."""
84 success: bool
85 applied: tuple[str, ...]
86 skipped: tuple[str, ...]
87 errors: tuple[str, ...]
89 def to_dict(self) -> dict[str, object]:
90 """Convert to dictionary."""
91 return {
92 "success": self.success,
93 "applied": list(self.applied),
94 "skipped": list(self.skipped),
95 "errors": list(self.errors),
96 }
99# =============================================================================
100# Version-Specific Profiles
101# =============================================================================
103# Python 3.11 - Stable baseline
104_PROFILE_311 = OptimizationProfile(
105 gc_threshold_0=700,
106 gc_threshold_1=10,
107 gc_threshold_2=10,
108 gc_freeze_enabled=False,
109 thread_pool_multiplier=1.0,
110 max_thread_pool_size=32,
111 prefer_slots=True,
112 use_frozen_dataclasses=True,
113 prefer_match_statements=True,
114 prefer_exception_groups=True,
115 prefer_type_params=False,
116 enable_perf_hints=False,
117 aggressive_inlining=False,
118 enable_experimental=False,
119)
121# Python 3.12 - Enhanced
122_PROFILE_312 = OptimizationProfile(
123 gc_threshold_0=800, # Slightly higher due to better GC
124 gc_threshold_1=10,
125 gc_threshold_2=10,
126 gc_freeze_enabled=True, # Safe to use
127 thread_pool_multiplier=1.0,
128 max_thread_pool_size=32,
129 prefer_slots=True,
130 use_frozen_dataclasses=True,
131 prefer_match_statements=True,
132 prefer_exception_groups=True,
133 prefer_type_params=True,
134 enable_perf_hints=False,
135 aggressive_inlining=False,
136 enable_experimental=False,
137)
139# Python 3.13 - Modern (with experimental options available)
140_PROFILE_313 = OptimizationProfile(
141 gc_threshold_0=900, # Higher with mimalloc
142 gc_threshold_1=15,
143 gc_threshold_2=15,
144 gc_freeze_enabled=True,
145 thread_pool_multiplier=1.5, # Can use more threads with better GIL
146 max_thread_pool_size=48,
147 prefer_slots=True,
148 use_frozen_dataclasses=True,
149 prefer_match_statements=True,
150 prefer_exception_groups=True,
151 prefer_type_params=True,
152 enable_perf_hints=True, # JIT hints available
153 aggressive_inlining=False,
154 enable_experimental=False, # Requires explicit opt-in
155)
157# Python 3.14 - Cutting edge
158_PROFILE_314 = OptimizationProfile(
159 gc_threshold_0=1000, # Optimized incremental GC
160 gc_threshold_1=20,
161 gc_threshold_2=20,
162 gc_freeze_enabled=True,
163 thread_pool_multiplier=2.0, # Free-threading ready
164 max_thread_pool_size=64,
165 prefer_slots=True,
166 use_frozen_dataclasses=True,
167 prefer_match_statements=True,
168 prefer_exception_groups=True,
169 prefer_type_params=True,
170 enable_perf_hints=True,
171 aggressive_inlining=True, # Tail-call interpreter
172 enable_experimental=False, # Requires explicit opt-in
173)
176_cached_optimization_profile: OptimizationProfile | None = None
179def get_optimization_profile(*, force_refresh: bool = False) -> OptimizationProfile:
180 """Get the optimization profile for the current Python version.
182 Args:
183 force_refresh: If True, re-detect instead of using cache.
185 Returns:
186 OptimizationProfile suitable for the runtime environment.
188 """
189 global _cached_optimization_profile # noqa: PLW0603
191 if _cached_optimization_profile is not None and not force_refresh:
192 return _cached_optimization_profile
194 _ = get_features(force_refresh=force_refresh) # Warm up cache, validate version
195 experimental = is_experimental_enabled(force_refresh=force_refresh)
196 opt_level = get_optimization_level(force_refresh=force_refresh)
198 # Select base profile by version
199 if PY314:
200 profile = _PROFILE_314
201 elif PY313:
202 profile = _PROFILE_313
203 elif PY312:
204 profile = _PROFILE_312
205 else:
206 profile = _PROFILE_311
208 # Adjust for optimization level
209 match opt_level:
210 case opt if opt == OPT_LEVEL_NONE:
211 # Minimal optimizations - use 3.11 baseline
212 _cached_optimization_profile = _PROFILE_311
213 case opt if opt == OPT_LEVEL_AGGRESSIVE and experimental:
214 # Aggressive mode - enable experimental features
215 _cached_optimization_profile = OptimizationProfile(
216 gc_threshold_0=profile.gc_threshold_0,
217 gc_threshold_1=profile.gc_threshold_1,
218 gc_threshold_2=profile.gc_threshold_2,
219 gc_freeze_enabled=profile.gc_freeze_enabled,
220 thread_pool_multiplier=profile.thread_pool_multiplier,
221 max_thread_pool_size=profile.max_thread_pool_size,
222 prefer_slots=profile.prefer_slots,
223 use_frozen_dataclasses=profile.use_frozen_dataclasses,
224 prefer_match_statements=profile.prefer_match_statements,
225 prefer_exception_groups=profile.prefer_exception_groups,
226 prefer_type_params=profile.prefer_type_params,
227 enable_perf_hints=profile.enable_perf_hints,
228 aggressive_inlining=profile.aggressive_inlining,
229 enable_experimental=True,
230 )
231 case _:
232 _cached_optimization_profile = profile
234 return _cached_optimization_profile
237# =============================================================================
238# Apply Optimizations
239# =============================================================================
242def _apply_gc_tuning(
243 profile: OptimizationProfile,
244 applied: list[str],
245 errors: list[str],
246) -> None:
247 """Apply Garbage Collector tuning."""
248 try:
249 current = gc.get_threshold()
250 gc.set_threshold(
251 profile.gc_threshold_0,
252 profile.gc_threshold_1,
253 profile.gc_threshold_2,
254 )
255 applied.append(
256 f"gc_threshold: {current} -> "
257 f"({profile.gc_threshold_0}, {profile.gc_threshold_1}, "
258 f"{profile.gc_threshold_2})"
259 )
260 except Exception as e:
261 errors.append(f"gc_threshold: {e}")
264def _apply_gc_freeze(
265 profile: OptimizationProfile,
266 freeze_after: bool,
267 applied: list[str],
268 skipped: list[str],
269 errors: list[str],
270) -> None:
271 """Apply GC freeze if supported."""
272 if profile.gc_freeze_enabled and freeze_after and PY312:
273 try:
274 gc.freeze()
275 applied.append("gc_freeze: enabled")
276 except Exception as e:
277 errors.append(f"gc_freeze: {e}")
278 elif profile.gc_freeze_enabled and not PY312:
279 skipped.append("gc_freeze: requires Python 3.12+")
282def _apply_experimental(
283 profile: OptimizationProfile,
284 applied: list[str],
285 skipped: list[str],
286) -> None:
287 """Check and log experimental features."""
288 if profile.enable_experimental:
289 features = get_features()
290 if features.has_jit: # pragma: no branch
291 applied.append("jit: available")
292 if features.has_free_threading: # pragma: no branch
293 applied.append("free_threading: available")
294 logger.warning(
295 "EXPERIMENTAL FEATURES ENABLED: Stability and security may be affected."
296 )
297 else:
298 skipped.append("experimental: requires STACK_ENABLE_EXPERIMENTAL=1")
301def apply_optimizations(
302 *,
303 profile: OptimizationProfile | None = None,
304 apply_gc: bool = True,
305 freeze_after: bool = False,
306 force_refresh: bool = False,
307) -> OptimizationResult:
308 """Apply runtime optimizations based on profile.
310 This function applies safe, reversible optimizations to the Python
311 runtime. It is designed to be called once at application startup.
313 Args:
314 profile: Optimization profile to use (auto-detected if None).
315 apply_gc: Whether to apply GC tuning.
316 freeze_after: Whether to freeze objects after applying.
317 force_refresh: Whether to force re-detection of profile if none is provided.
319 Returns:
320 OptimizationResult with details of what was applied.
322 """
323 if profile is None:
324 profile = get_optimization_profile(force_refresh=force_refresh)
326 applied: list[str] = []
327 skipped: list[str] = []
328 errors: list[str] = []
330 # GC Tuning
331 if apply_gc:
332 _apply_gc_tuning(profile, applied, errors)
333 else:
334 skipped.append("gc_threshold: disabled")
336 # GC Freeze (3.12+)
337 _apply_gc_freeze(profile, freeze_after, applied, skipped, errors)
339 # Performance hints logging
340 if profile.enable_perf_hints:
341 applied.append("perf_hints: enabled (JIT-aware patterns)")
342 else:
343 skipped.append("perf_hints: disabled")
345 # Experimental features
346 _apply_experimental(profile, applied, skipped)
348 # Log summary
349 success = len(errors) == 0
350 if applied:
351 logger.debug("Applied optimizations: %s", ", ".join(applied))
352 if skipped: # pragma: no branch
353 logger.debug("Skipped optimizations: %s", ", ".join(skipped))
354 if errors:
355 logger.warning("Optimization errors: %s", ", ".join(errors))
357 return OptimizationResult(
358 success=success,
359 applied=tuple(applied),
360 skipped=tuple(skipped),
361 errors=tuple(errors),
362 )
365# =============================================================================
366# Utility Functions
367# =============================================================================
370def get_recommended_thread_pool_size(*, force_refresh: bool = False) -> int:
371 """Get recommended thread pool size based on version and features.
373 Args:
374 force_refresh: If True, re-detect instead of using cache.
376 Returns:
377 Recommended number of threads for ThreadPoolExecutor.
379 """
380 profile = get_optimization_profile(force_refresh=force_refresh)
381 cpu_count = os.cpu_count() or 4
383 size = int(cpu_count * profile.thread_pool_multiplier)
384 return min(size, profile.max_thread_pool_size)
387def should_use_slots() -> bool:
388 """Check if __slots__ should be used for new classes.
390 Returns:
391 True if slots are recommended for current version.
393 """
394 return get_optimization_profile().prefer_slots
397def should_use_frozen_dataclass() -> bool:
398 """Check if frozen=True should be used for dataclasses.
400 Returns:
401 True if frozen dataclasses are recommended.
403 """
404 return get_optimization_profile().use_frozen_dataclasses