Coverage for src / taipanstack / core / optimizations.py: 100%

115 statements  

« prev     ^ index     » next       coverage.py v7.13.5, created at 2026-05-12 21:18 +0000

1""" 

2Python Version-Specific Optimization Profiles. 

3 

4This module provides optimization strategies tailored to different Python 

5versions, enabling performance improvements while maintaining stability. 

6 

7Following Stack pillars: Security, Stability, Simplicity, Scalability, Compatibility. 

8""" 

9 

10import gc 

11import logging 

12import os 

13from dataclasses import dataclass 

14 

15from taipanstack.core.compat import ( 

16 PY312, 

17 PY313, 

18 PY314, 

19 get_features, 

20 get_optimization_level, 

21 is_experimental_enabled, 

22) 

23 

24__all__ = [ 

25 "OptimizationProfile", 

26 "OptimizationResult", 

27 "apply_optimizations", 

28 "get_optimization_profile", 

29] 

30 

31logger = logging.getLogger(__name__) 

32 

33 

34# ============================================================================= 

35# Optimization Profile 

36# ============================================================================= 

37 

38 

39# Optimization Levels 

40OPT_LEVEL_NONE = 0 

41OPT_LEVEL_AGGRESSIVE = 2 

42 

43 

44@dataclass(frozen=True, slots=True) 

45class OptimizationProfile: 

46 """Version-specific optimization settings. 

47 

48 This profile defines recommended settings based on Python version 

49 and available features. All settings follow the stability-first principle. 

50 """ 

51 

52 # GC tuning 

53 gc_threshold_0: int = 700 # Default: 700 

54 gc_threshold_1: int = 10 # Default: 10 

55 gc_threshold_2: int = 10 # Default: 10 

56 gc_freeze_enabled: bool = False # Freeze objects after init 

57 

58 # Threading 

59 thread_pool_multiplier: float = 1.0 # Multiplier for CPU count 

60 max_thread_pool_size: int = 32 # Absolute maximum 

61 

62 # Memory 

63 prefer_slots: bool = True # Use __slots__ in classes 

64 use_frozen_dataclasses: bool = True # Prefer frozen dataclasses 

65 

66 # Code patterns 

67 prefer_match_statements: bool = False # 3.10+ 

68 prefer_exception_groups: bool = False # 3.11+ 

69 prefer_type_params: bool = False # 3.12+ 

70 

71 # Performance hints 

72 enable_perf_hints: bool = False # JIT hints, etc. 

73 aggressive_inlining: bool = False # More aggressive optimizations 

74 

75 # Experimental 

76 enable_experimental: bool = False 

77 

78 

79@dataclass(frozen=True, slots=True) 

80class OptimizationResult: 

81 """Result of applying optimizations.""" 

82 

83 success: bool 

84 applied: tuple[str, ...] 

85 skipped: tuple[str, ...] 

86 errors: tuple[str, ...] 

87 

88 def to_dict(self) -> dict[str, object]: 

89 """Convert to dictionary.""" 

90 return { 

91 "success": self.success, 

92 "applied": list(self.applied), 

93 "skipped": list(self.skipped), 

94 "errors": list(self.errors), 

95 } 

96 

97 

98# ============================================================================= 

99# Version-Specific Profiles 

100# ============================================================================= 

101 

102# Python 3.11 - Stable baseline 

103_PROFILE_311 = OptimizationProfile( 

104 gc_threshold_0=700, 

105 gc_threshold_1=10, 

106 gc_threshold_2=10, 

107 gc_freeze_enabled=False, 

108 thread_pool_multiplier=1.0, 

109 max_thread_pool_size=32, 

110 prefer_slots=True, 

111 use_frozen_dataclasses=True, 

112 prefer_match_statements=True, 

113 prefer_exception_groups=True, 

114 prefer_type_params=False, 

115 enable_perf_hints=False, 

116 aggressive_inlining=False, 

117 enable_experimental=False, 

118) 

119 

120# Python 3.12 - Enhanced 

121_PROFILE_312 = OptimizationProfile( 

122 gc_threshold_0=800, # Slightly higher due to better GC 

123 gc_threshold_1=10, 

124 gc_threshold_2=10, 

125 gc_freeze_enabled=True, # Safe to use 

126 thread_pool_multiplier=1.0, 

127 max_thread_pool_size=32, 

128 prefer_slots=True, 

129 use_frozen_dataclasses=True, 

130 prefer_match_statements=True, 

131 prefer_exception_groups=True, 

132 prefer_type_params=True, 

133 enable_perf_hints=False, 

134 aggressive_inlining=False, 

135 enable_experimental=False, 

136) 

137 

138# Python 3.13 - Modern (with experimental options available) 

139_PROFILE_313 = OptimizationProfile( 

140 gc_threshold_0=900, # Higher with mimalloc 

141 gc_threshold_1=15, 

142 gc_threshold_2=15, 

143 gc_freeze_enabled=True, 

144 thread_pool_multiplier=1.5, # Can use more threads with better GIL 

145 max_thread_pool_size=48, 

146 prefer_slots=True, 

147 use_frozen_dataclasses=True, 

148 prefer_match_statements=True, 

149 prefer_exception_groups=True, 

150 prefer_type_params=True, 

151 enable_perf_hints=True, # JIT hints available 

152 aggressive_inlining=False, 

153 enable_experimental=False, # Requires explicit opt-in 

154) 

155 

156# Python 3.14 - Cutting edge 

157_PROFILE_314 = OptimizationProfile( 

158 gc_threshold_0=1000, # Optimized incremental GC 

159 gc_threshold_1=20, 

160 gc_threshold_2=20, 

161 gc_freeze_enabled=True, 

162 thread_pool_multiplier=2.0, # Free-threading ready 

163 max_thread_pool_size=64, 

164 prefer_slots=True, 

165 use_frozen_dataclasses=True, 

166 prefer_match_statements=True, 

167 prefer_exception_groups=True, 

168 prefer_type_params=True, 

169 enable_perf_hints=True, 

170 aggressive_inlining=True, # Tail-call interpreter 

171 enable_experimental=False, # Requires explicit opt-in 

172) 

173 

174 

175_cached_optimization_profile: OptimizationProfile | None = None 

176 

177 

178def _get_base_profile() -> OptimizationProfile: 

179 """Select base profile by version.""" 

180 if PY314: 

181 return _PROFILE_314 

182 if PY313: 

183 return _PROFILE_313 

184 if PY312: 

185 return _PROFILE_312 

186 return _PROFILE_311 

187 

188 

189def _build_aggressive_profile(profile: OptimizationProfile) -> OptimizationProfile: 

190 """Build an aggressive optimization profile with experimental features.""" 

191 return OptimizationProfile( 

192 gc_threshold_0=profile.gc_threshold_0, 

193 gc_threshold_1=profile.gc_threshold_1, 

194 gc_threshold_2=profile.gc_threshold_2, 

195 gc_freeze_enabled=profile.gc_freeze_enabled, 

196 thread_pool_multiplier=profile.thread_pool_multiplier, 

197 max_thread_pool_size=profile.max_thread_pool_size, 

198 prefer_slots=profile.prefer_slots, 

199 use_frozen_dataclasses=profile.use_frozen_dataclasses, 

200 prefer_match_statements=profile.prefer_match_statements, 

201 prefer_exception_groups=profile.prefer_exception_groups, 

202 prefer_type_params=profile.prefer_type_params, 

203 enable_perf_hints=profile.enable_perf_hints, 

204 aggressive_inlining=profile.aggressive_inlining, 

205 enable_experimental=True, 

206 ) 

207 

208 

209def get_optimization_profile(*, force_refresh: bool = False) -> OptimizationProfile: 

210 """Get the optimization profile for the current Python version. 

211 

212 Args: 

213 force_refresh: If True, re-detect instead of using cache. 

214 

215 Returns: 

216 OptimizationProfile suitable for the runtime environment. 

217 

218 """ 

219 global _cached_optimization_profile # noqa: PLW0603 

220 

221 if _cached_optimization_profile is not None and not force_refresh: 

222 return _cached_optimization_profile 

223 

224 _ = get_features(force_refresh=force_refresh) # Warm up cache, validate version 

225 experimental = is_experimental_enabled(force_refresh=force_refresh) 

226 opt_level = get_optimization_level(force_refresh=force_refresh) 

227 

228 profile = _get_base_profile() 

229 

230 # Adjust for optimization level 

231 match opt_level: 

232 case opt if opt == OPT_LEVEL_NONE: 

233 _cached_optimization_profile = _PROFILE_311 

234 case opt if opt == OPT_LEVEL_AGGRESSIVE and experimental: 

235 _cached_optimization_profile = _build_aggressive_profile(profile) 

236 case _: 

237 _cached_optimization_profile = profile 

238 

239 return _cached_optimization_profile 

240 

241 

242# ============================================================================= 

243# Apply Optimizations 

244# ============================================================================= 

245 

246 

247def _apply_gc_tuning( 

248 profile: OptimizationProfile, 

249 applied: list[str], 

250 errors: list[str], 

251) -> None: 

252 """Apply Garbage Collector tuning.""" 

253 try: 

254 current = gc.get_threshold() 

255 gc.set_threshold( 

256 profile.gc_threshold_0, 

257 profile.gc_threshold_1, 

258 profile.gc_threshold_2, 

259 ) 

260 applied.append( 

261 f"gc_threshold: {current} -> " 

262 f"({profile.gc_threshold_0}, {profile.gc_threshold_1}, " 

263 f"{profile.gc_threshold_2})" 

264 ) 

265 except Exception as e: 

266 errors.append(f"gc_threshold: {e}") 

267 

268 

269def _apply_gc_freeze( 

270 profile: OptimizationProfile, 

271 freeze_after: bool, 

272 applied: list[str], 

273 skipped: list[str], 

274 errors: list[str], 

275) -> None: 

276 """Apply GC freeze if supported.""" 

277 if profile.gc_freeze_enabled and freeze_after and PY312: 

278 try: 

279 gc.freeze() 

280 applied.append("gc_freeze: enabled") 

281 except Exception as e: 

282 errors.append(f"gc_freeze: {e}") 

283 elif profile.gc_freeze_enabled and not PY312: 

284 skipped.append("gc_freeze: requires Python 3.12+") 

285 

286 

287def _apply_experimental( 

288 profile: OptimizationProfile, 

289 applied: list[str], 

290 skipped: list[str], 

291) -> None: 

292 """Check and log experimental features.""" 

293 if profile.enable_experimental: 

294 features = get_features() 

295 if features.has_jit: # pragma: no branch 

296 applied.append("jit: available") 

297 if features.has_free_threading: # pragma: no branch 

298 applied.append("free_threading: available") 

299 logger.warning( 

300 "EXPERIMENTAL FEATURES ENABLED: Stability and security may be affected." 

301 ) 

302 else: 

303 skipped.append("experimental: requires STACK_ENABLE_EXPERIMENTAL=1") 

304 

305 

306def apply_optimizations( 

307 *, 

308 profile: OptimizationProfile | None = None, 

309 apply_gc: bool = True, 

310 freeze_after: bool = False, 

311 force_refresh: bool = False, 

312) -> OptimizationResult: 

313 """Apply runtime optimizations based on profile. 

314 

315 This function applies safe, reversible optimizations to the Python 

316 runtime. It is designed to be called once at application startup. 

317 

318 Args: 

319 profile: Optimization profile to use (auto-detected if None). 

320 apply_gc: Whether to apply GC tuning. 

321 freeze_after: Whether to freeze objects after applying. 

322 force_refresh: Whether to force re-detection of profile if none is provided. 

323 

324 Returns: 

325 OptimizationResult with details of what was applied. 

326 

327 """ 

328 if profile is None: 

329 profile = get_optimization_profile(force_refresh=force_refresh) 

330 

331 applied: list[str] = [] 

332 skipped: list[str] = [] 

333 errors: list[str] = [] 

334 

335 # GC Tuning 

336 if apply_gc: 

337 _apply_gc_tuning(profile, applied, errors) 

338 else: 

339 skipped.append("gc_threshold: disabled") 

340 

341 # GC Freeze (3.12+) 

342 _apply_gc_freeze(profile, freeze_after, applied, skipped, errors) 

343 

344 # Performance hints logging 

345 if profile.enable_perf_hints: 

346 applied.append("perf_hints: enabled (JIT-aware patterns)") 

347 else: 

348 skipped.append("perf_hints: disabled") 

349 

350 # Experimental features 

351 _apply_experimental(profile, applied, skipped) 

352 

353 # Log summary 

354 success = len(errors) == 0 

355 if applied: 

356 logger.debug("Applied optimizations: %s", ", ".join(applied)) 

357 if skipped: # pragma: no branch 

358 logger.debug("Skipped optimizations: %s", ", ".join(skipped)) 

359 if errors: 

360 logger.warning("Optimization errors: %s", ", ".join(errors)) 

361 

362 return OptimizationResult( 

363 success=success, 

364 applied=tuple(applied), 

365 skipped=tuple(skipped), 

366 errors=tuple(errors), 

367 ) 

368 

369 

370# ============================================================================= 

371# Utility Functions 

372# ============================================================================= 

373 

374 

375def get_recommended_thread_pool_size(*, force_refresh: bool = False) -> int: 

376 """Get recommended thread pool size based on version and features. 

377 

378 Args: 

379 force_refresh: If True, re-detect instead of using cache. 

380 

381 Returns: 

382 Recommended number of threads for ThreadPoolExecutor. 

383 

384 """ 

385 profile = get_optimization_profile(force_refresh=force_refresh) 

386 cpu_count = os.cpu_count() or 4 

387 

388 size = int(cpu_count * profile.thread_pool_multiplier) 

389 return min(size, profile.max_thread_pool_size)