函数调用优化(简单的python核心篇性能优化)

函数调用优化(简单的python核心篇性能优化)

admin 2025-11-13 社会资讯 16 次浏览 0个评论

性能优化是Python开发中的一种重要技能,包括性能分析、代码优化和工具使用。

函数调用优化(简单的python核心篇性能优化)
(图片来源网络,侵删)
import timeimport cProfileimport pstatsimport iofrom functools import wrapsdef timing_decorator(func): """计时装饰器""" @wraps(func) def wrapper(*args, **kwargs): start = time.perf_counter() result = func(*args, **kwargs) end = time.perf_counter() print(f"{func.__name__} 执行时间: {end - start:.6f} 秒") return result return wrapperdef profile_decorator(func): """性能分析装饰器""" @wraps(func) def wrapper(*args, **kwargs): pr = cProfile.Profile() pr.enable() result = func(*args, **kwargs) pr.disable() # 创建字符串缓冲区 s = io.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats(10) # 显示前10个最耗时的函数 print(f"\n{func.__name__} 性能分析:") print(s.getvalue()) return result return wrapper# 测试函数@timing_decoratordef slow_function(n): """慢函数""" result = 0 for i in range(n): result += i * i return result@profile_decoratordef complex_function(n): """复杂函数""" result = [] for i in range(n): if i % 2 == 0: result.append(i * i) else: result.append(i * i * i) return sum(result)# 测试性能测量print("=== 性能测量 ===")result1 = slow_function(100000)result2 = complex_function(1000)2. 内存使用分析import sysimport tracemallocimport psutilimport osdef memory_profiler(func): """内存分析装饰器""" @wraps(func) def wrapper(*args, **kwargs): # 开始内存跟踪 tracemalloc.start() # 获取初始内存 process = psutil.Process(os.getpid()) initial_memory = process.memory_info().rss / 1024 / 1024 # 执行函数 result = func(*args, **kwargs) # 获取最终内存 final_memory = process.memory_info().rss / 1024 / 1024 # 获取内存快照 current, peak = tracemalloc.get_traced_memory() tracemalloc.stop() print(f"{func.__name__} 内存使用:") print(f" 初始内存: {initial_memory:.2f} MB") print(f" 最终内存: {final_memory:.2f} MB") print(f" 内存变化: {final_memory - initial_memory:+.2f} MB") print(f" 峰值内存: {peak / 1024 / 1024:.2f} MB") return result return wrapper@memory_profilerdef memory_intensive_function(n): """内存密集型函数""" data = [] for i in range(n): data.append([i] * 100) return len(data)# 测试内存分析print("\n=== 内存分析 ===")result = memory_intensive_function(1000)算法优化1. 时间复杂度优化import timefrom collections import defaultdictdef inefficient_search(data, target): """低效搜索 - O(n²)""" result = [] for i in range(len(data)): for j in range(i + 1, len(data)): if data[i] + data[j] == target: result.append((i, j)) return resultdef efficient_search(data, target): """高效搜索 - O(n)""" result = [] seen = {} for i, num in enumerate(data): complement = target - num if complement in seen: result.append((seen[complement], i)) seen[num] = i return resultdef algorithm_comparison(): """算法比较""" print("=== 算法优化 ===") # 测试数据 data = list(range(1000)) target = 1999 # 测试低效算法 start = time.perf_counter() result1 = inefficient_search(data, target) time1 = time.perf_counter() - start # 测试高效算法 start = time.perf_counter() result2 = efficient_search(data, target) time2 = time.perf_counter() - start print(f"低效算法时间: {time1:.6f} 秒") print(f"高效算法时间: {time2:.6f} 秒") print(f"性能提升: {time1 / time2:.2f} 倍") print(f"结果数量: {len(result1)} vs {len(result2)}")algorithm_comparison()2. 数据结构优化def data_structure_optimization(): """数据结构优化""" print("\n=== 数据结构优化 ===") # 1. 列表 vs 集合 def list_search(data, target): """列表搜索""" return target in data def set_search(data, target): """集合搜索""" return target in data # 测试数据 n = 10000 data_list = list(range(n)) data_set = set(range(n)) target = n - 1 # 测试列表搜索 start = time.perf_counter() for _ in range(1000): list_search(data_list, target) list_time = time.perf_counter() - start # 测试集合搜索 start = time.perf_counter() for _ in range(1000): set_search(data_set, target) set_time = time.perf_counter() - start print(f"列表搜索时间: {list_time:.6f} 秒") print(f"集合搜索时间: {set_time:.6f} 秒") print(f"性能提升: {list_time / set_time:.2f} 倍") # 2. 字典 vs 列表 def list_lookup(data, key): """列表查找""" for item in data: if item[0] == key: return item[1] return None def dict_lookup(data, key): """字典查找""" return data.get(key) # 测试数据 n = 1000 data_list = [(i, f"value_{i}") for i in range(n)] data_dict = {i: f"value_{i}" for i in range(n)} key = n - 1 # 测试列表查找 start = time.perf_counter() for _ in range(1000): list_lookup(data_list, key) list_time = time.perf_counter() - start # 测试字典查找 start = time.perf_counter() for _ in range(1000): dict_lookup(data_dict, key) dict_time = time.perf_counter() - start print(f"列表查找时间: {list_time:.6f} 秒") print(f"字典查找时间: {dict_time:.6f} 秒") print(f"性能提升: {list_time / dict_time:.2f} 倍")data_structure_optimization()代码优化技巧1. 循环优化def loop_optimization(): """循环优化""" print("\n=== 循环优化 ===") # 1. 避免重复计算 def inefficient_loop(n): """低效循环""" result = [] for i in range(n): result.append(i * i + 2 * i + 1) return result def efficient_loop(n): """高效循环""" result = [] for i in range(n): # 预计算 i_squared = i * i two_i = 2 * i result.append(i_squared + two_i + 1) return result # 2. 使用列表推导式 def list_comprehension(n): """列表推导式""" return [i * i + 2 * i + 1 for i in range(n)] # 3. 使用生成器 def generator_approach(n): """生成器方法""" return (i * i + 2 * i + 1 for i in range(n)) # 测试性能 n = 100000 # 测试低效循环 start = time.perf_counter() result1 = inefficient_loop(n) time1 = time.perf_counter() - start # 测试高效循环 start = time.perf_counter() result2 = efficient_loop(n) time2 = time.perf_counter() - start # 测试列表推导式 start = time.perf_counter() result3 = list_comprehension(n) time3 = time.perf_counter() - start # 测试生成器 start = time.perf_counter() result4 = list(generator_approach(n)) time4 = time.perf_counter() - start print(f"低效循环时间: {time1:.6f} 秒") print(f"高效循环时间: {time2:.6f} 秒") print(f"列表推导式时间: {time3:.6f} 秒") print(f"生成器时间: {time4:.6f} 秒") # 验证结果 print(f"结果一致性: {result1 == result2 == result3 == result4}")loop_optimization()2. 函数调用优化def function_call_optimization(): """函数调用优化""" print("\n=== 函数调用优化 ===") # 1. 避免不必要的函数调用 def inefficient_approach(data): """低效方法""" result = [] for item in data: # 重复调用len() if len(item) > 0: result.append(len(item)) return result def efficient_approach(data): """高效方法""" result = [] for item in data: # 缓存len()结果 length = len(item) if length > 0: result.append(length) return result # 2. 使用内置函数 def builtin_approach(data): """内置函数方法""" return [len(item) for item in data if len(item) > 0] # 测试数据 data = [list(range(i)) for i in range(1000)] # 测试低效方法 start = time.perf_counter() result1 = inefficient_approach(data) time1 = time.perf_counter() - start # 测试高效方法 start = time.perf_counter() result2 = efficient_approach(data) time2 = time.perf_counter() - start # 测试内置函数方法 start = time.perf_counter() result3 = builtin_approach(data) time3 = time.perf_counter() - start print(f"低效方法时间: {time1:.6f} 秒") print(f"高效方法时间: {time2:.6f} 秒") print(f"内置函数时间: {time3:.6f} 秒") # 验证结果 print(f"结果一致性: {result1 == result2 == result3}")function_call_optimization()内置函数优化1. 使用内置函数def builtin_optimization(): """内置函数优化""" print("\n=== 内置函数优化 ===") # 1. sum() vs 手动求和 def manual_sum(data): """手动求和""" result = 0 for item in data: result += item return result def builtin_sum(data): """内置求和""" return sum(data) # 2. max() vs 手动查找 def manual_max(data): """手动查找最大值""" if not data: return None max_val = data[0] for item in data[1:]: if item > max_val: max_val = item return max_val def builtin_max(data): """内置查找最大值""" return max(data) if data else None # 3. 字符串连接 def inefficient_string_concat(strings): """低效字符串连接""" result = "" for s in strings: result += s return result def efficient_string_concat(strings): """高效字符串连接""" return "".join(strings) # 测试数据 data = list(range(10000)) strings = [f"string_{i}" for i in range(1000)] # 测试求和 start = time.perf_counter() result1 = manual_sum(data) time1 = time.perf_counter() - start start = time.perf_counter() result2 = builtin_sum(data) time2 = time.perf_counter() - start # 测试最大值 start = time.perf_counter() result3 = manual_max(data) time3 = time.perf_counter() - start start = time.perf_counter() result4 = builtin_max(data) time4 = time.perf_counter() - start # 测试字符串连接 start = time.perf_counter() result5 = inefficient_string_concat(strings) time5 = time.perf_counter() - start start = time.perf_counter() result6 = efficient_string_concat(strings) time6 = time.perf_counter() - start print(f"手动求和时间: {time1:.6f} 秒") print(f"内置求和时间: {time2:.6f} 秒") print(f"性能提升: {time1 / time2:.2f} 倍") print(f"手动最大值时间: {time3:.6f} 秒") print(f"内置最大值时间: {time4:.6f} 秒") print(f"性能提升: {time3 / time4:.2f} 倍") print(f"低效字符串连接时间: {time5:.6f} 秒") print(f"高效字符串连接时间: {time6:.6f} 秒") print(f"性能提升: {time5 / time6:.2f} 倍")builtin_optimization()2. 集合操作优化def set_optimization(): """集合操作优化""" print("\n=== 集合操作优化 ===") # 1. 列表去重 def list_deduplication(data): """列表去重""" result = [] seen = set() for item in data: if item not in seen: seen.add(item) result.append(item) return result def set_deduplication(data): """集合去重""" return list(set(data)) # 2. 交集计算 def manual_intersection(list1, list2): """手动计算交集""" result = [] for item in list1: if item in list2: result.append(item) return result def set_intersection(list1, list2): """集合计算交集""" return list(set(list1) & set(list2)) # 测试数据 data1 = list(range(1000)) + list(range(500, 1500)) data2 = list(range(800, 1800)) # 测试去重 start = time.perf_counter() result1 = list_deduplication(data1) time1 = time.perf_counter() - start start = time.perf_counter() result2 = set_deduplication(data1) time2 = time.perf_counter() - start # 测试交集 start = time.perf_counter() result3 = manual_intersection(data1, data2) time3 = time.perf_counter() - start start = time.perf_counter() result4 = set_intersection(data1, data2) time4 = time.perf_counter() - start print(f"列表去重时间: {time1:.6f} 秒") print(f"集合去重时间: {time2:.6f} 秒") print(f"性能提升: {time1 / time2:.2f} 倍") print(f"手动交集时间: {time3:.6f} 秒") print(f"集合交集时间: {time4:.6f} 秒") print(f"性能提升: {time3 / time4:.2f} 倍")set_optimization()缓存优化1. 函数缓存from functools import lru_cache, cacheimport timedef cache_optimization(): """缓存优化""" print("\n=== 缓存优化 ===") # 1. 斐波那契数列 - 无缓存 def fibonacci_no_cache(n): """无缓存的斐波那契""" if n < 2: return n return fibonacci_no_cache(n-1) + fibonacci_no_cache(n-2) # 2. 斐波那契数列 - 有缓存 @lru_cache(maxsize=128) def fibonacci_with_cache(n): """有缓存的斐波那契""" if n < 2: return n return fibonacci_with_cache(n-1) + fibonacci_with_cache(n-2) # 3. 斐波那契数列 - 无限制缓存 @cache def fibonacci_unlimited_cache(n): """无限制缓存的斐波那契""" if n < 2: return n return fibonacci_unlimited_cache(n-1) + fibonacci_unlimited_cache(n-2) # 测试性能 n = 35 # 测试无缓存 start = time.perf_counter() result1 = fibonacci_no_cache(n) time1 = time.perf_counter() - start # 测试有缓存 start = time.perf_counter() result2 = fibonacci_with_cache(n) time2 = time.perf_counter() - start # 测试无限制缓存 start = time.perf_counter() result3 = fibonacci_unlimited_cache(n) time3 = time.perf_counter() - start print(f"无缓存时间: {time1:.6f} 秒") print(f"有缓存时间: {time2:.6f} 秒") print(f"无限制缓存时间: {time3:.6f} 秒") print(f"性能提升: {time1 / time2:.2f} 倍") # 验证结果 print(f"结果一致性: {result1 == result2 == result3}") # 缓存信息 print(f"缓存信息: {fibonacci_with_cache.cache_info()}")cache_optimization()2. 自定义缓存def custom_cache_demo(): """自定义缓存演示""" print("\n=== 自定义缓存 ===") class SimpleCache: """简单缓存""" def __init__(self, max_size=100): self.max_size = max_size self.cache = {} self.access_order = [] def get(self, key): """获取缓存""" if key in self.cache: # 更新访问顺序 self.access_order.remove(key) self.access_order.append(key) return self.cache[key] return None def set(self, key, value): """设置缓存""" if key in self.cache: # 更新现有缓存 self.cache[key] = value self.access_order.remove(key) self.access_order.append(key) else: # 添加新缓存 if len(self.cache) >= self.max_size: # 删除最久未使用的 oldest = self.access_order.pop(0) del self.cache[oldest] self.cache[key] = value self.access_order.append(key) def clear(self): """清空缓存""" self.cache.clear() self.access_order.clear() def stats(self): """缓存统计""" return { 'size': len(self.cache), 'max_size': self.max_size, 'hit_rate': getattr(self, 'hits', 0) / max(getattr(self, 'total', 1), 1) } # 使用自定义缓存 cache = SimpleCache(max_size=5) # 添加缓存 for i in range(10): cache.set(f"key_{i}", f"value_{i}") print(f"添加 key_{i}, 缓存大小: {len(cache.cache)}") # 获取缓存 for i in range(5, 10): value = cache.get(f"key_{i}") print(f"获取 key_{i}: {value}") # 缓存统计 print(f"缓存统计: {cache.stats()}")custom_cache_demo()编译优化1. 使用PyPydef pypy_optimization(): """PyPy优化""" print("\n=== PyPy优化 ===") # 这些代码在PyPy中会运行得更快 def cpu_intensive_task(n): """CPU密集型任务""" result = 0 for i in range(n): result += i * i return result # 测试性能 n = 1000000 start = time.perf_counter() result = cpu_intensive_task(n) execution_time = time.perf_counter() - start print(f"CPU密集型任务时间: {execution_time:.6f} 秒") print(f"结果: {result}") print("注意: 在PyPy中运行会更快")pypy_optimization()2. 使用Cythondef cython_optimization(): """Cython优化""" print("\n=== Cython优化 ===") # 这是Python代码,可以编译为Cython def cython_compatible_function(n): """Cython兼容函数""" result = 0 for i in range(n): result += i * i return result # 测试性能 n = 1000000 start = time.perf_counter() result = cython_compatible_function(n) execution_time = time.perf_counter() - start print(f"Cython兼容函数时间: {execution_time:.6f} 秒") print(f"结果: {result}") print("注意: 编译为Cython后会更快")cython_optimization()写在最后

性能优化体现了Python的灵活性:

算法优化:选择合适的数据结构和算法代码优化:避免不必要的计算和函数调用缓存优化:利用空间换时间编译优化:使用更快的Python实现

因此性能优化是Python开发中的重要技能,可能让我们能够:

提高程序的执行效率减少资源消耗改善用户体验创建更高质量的软件

所以掌握性能优化,就是掌握Python的高级技能。它不仅仅是技术技巧,更是编程思维的体现。

转载请注明来自海坡下载,本文标题:《函数调用优化(简单的python核心篇性能优化)》

每一天,每一秒,你所做的决定都会改变你的人生!

发表评论

快捷回复:

评论列表 (暂无评论,16人围观)参与讨论

还没有评论,来说两句吧...