Code Efficiency Best Practices
This guide outlines best practices for writing efficient and performant code with TomatoPy.
Memory Management
Efficient Ingredient Creation
# Good: Create ingredients with minimal memory usage
tomato = Tomato(
ripeness=0.8,
variety="San Marzano",
weight=150
)
# Bad: Creating unnecessary copies
tomato_copy = tomato.copy() # Unnecessary memory usage
Batch Processing
# Good: Process ingredients in batches
def process_ingredients(ingredients, batch_size=100):
for i in range(0, len(ingredients), batch_size):
batch = ingredients[i:i + batch_size]
process_batch(batch)
# Bad: Processing one at a time
for ingredient in ingredients:
process_single(ingredient) # Less efficient
Performance Optimization
Caching Results
from functools import lru_cache
# Good: Cache expensive computations
@lru_cache(maxsize=128)
def analyze_ingredient(ingredient):
return TasteTester().analyze(ingredient)
# Bad: Recomputing every time
def analyze_ingredient(ingredient):
return TasteTester().analyze(ingredient) # No caching
Efficient Data Structures
# Good: Use appropriate data structures
from collections import defaultdict
class Recipe:
def __init__(self):
self.ingredients = defaultdict(float) # Efficient for ingredient tracking
# Bad: Using inefficient structures
class Recipe:
def __init__(self):
self.ingredients = [] # Less efficient for lookups
Resource Management
Context Managers
# Good: Use context managers for resource cleanup
with kitchen.temperature_monitor() as monitor:
result = kitchen.cook(ingredient)
data = monitor.get_data()
# Bad: Manual resource management
monitor = kitchen.temperature_monitor()
try:
result = kitchen.cook(ingredient)
data = monitor.get_data()
finally:
monitor.cleanup() # More error-prone
Connection Pooling
# Good: Use connection pooling
from tomatopy import ConnectionPool
pool = ConnectionPool(max_connections=10)
with pool.get_connection() as conn:
conn.execute_operation()
# Bad: Creating new connections each time
conn = create_connection() # Less efficient
conn.execute_operation()
conn.close()
Algorithm Optimization
Efficient Search
# Good: Use binary search for sorted data
def find_optimal_temperature(sorted_temps, target):
left, right = 0, len(sorted_temps) - 1
while left <= right:
mid = (left + right) // 2
if sorted_temps[mid] == target:
return mid
elif sorted_temps[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
# Bad: Linear search
def find_optimal_temperature(temps, target):
for i, temp in enumerate(temps):
if temp == target:
return i
return -1
Parallel Processing
from concurrent.futures import ThreadPoolExecutor
# Good: Process multiple ingredients in parallel
def process_ingredients_parallel(ingredients):
with ThreadPoolExecutor(max_workers=4) as executor:
results = list(executor.map(process_ingredient, ingredients))
return results
# Bad: Sequential processing
def process_ingredients_sequential(ingredients):
results = []
for ingredient in ingredients:
results.append(process_ingredient(ingredient))
return results
Code Organization
Modular Design
# Good: Modular code structure
class IngredientProcessor:
def process(self, ingredient):
self.validate(ingredient)
self.prepare(ingredient)
self.analyze(ingredient)
# Bad: Monolithic functions
def process_ingredient(ingredient):
# All processing in one function
validate_ingredient(ingredient)
prepare_ingredient(ingredient)
analyze_ingredient(ingredient)
Clean Interfaces
# Good: Clean interface design
class Kitchen:
def cook(self, ingredient, **kwargs):
self._validate_parameters(kwargs)
self._prepare_environment()
return self._execute_cooking(ingredient, kwargs)
# Bad: Complex interface
def cook(ingredient, temperature=None, duration=None, method=None,
stirring_frequency=None, humidity=None, pressure=None):
# Too many parameters
pass
Error Handling
Efficient Error Recovery
# Good: Graceful error recovery
def process_recipe(recipe):
try:
result = kitchen.cook_recipe(recipe)
return result
except TemperatureError:
# Handle temperature error
return adjust_temperature(recipe)
except IngredientError:
# Handle ingredient error
return substitute_ingredients(recipe)
finally:
cleanup_resources()
# Bad: No error recovery
def process_recipe(recipe):
result = kitchen.cook_recipe(recipe)
return result # No error handling
Resource Cleanup
# Good: Proper resource cleanup
class Kitchen:
def __init__(self):
self.resources = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.cleanup()
# Bad: Manual cleanup
def use_kitchen():
kitchen = Kitchen()
try:
# Use kitchen
pass
finally:
kitchen.cleanup()
Testing and Profiling
Performance Testing
# Good: Performance testing
import time
import cProfile
def profile_operation(func):
def wrapper(*args, **kwargs):
profiler = cProfile.Profile()
result = profiler.runcall(func, *args, **kwargs)
profiler.print_stats()
return result
return wrapper
# Bad: No performance testing
def operation():
# No performance monitoring
pass
Memory Profiling
# Good: Memory profiling
from memory_profiler import profile
@profile
def memory_intensive_operation():
# Operation to profile
pass
# Bad: No memory profiling
def memory_intensive_operation():
# No memory monitoring
pass
Best Practices Summary
Memory Management
Use efficient data structures
Implement batch processing
Avoid unnecessary copies
Performance Optimization
Cache expensive computations
Use appropriate algorithms
Implement parallel processing
Resource Management
Use context managers
Implement connection pooling
Clean up resources properly
Code Organization
Follow modular design
Create clean interfaces
Maintain separation of concerns
Error Handling
Implement graceful recovery
Clean up resources properly
Log errors appropriately
Next Steps
Virtual Kitchen Management - Learn kitchen optimization
API Reference - Explore the full API
Tutorials - Learn advanced techniques
Last updated
Was this helpful?