illustris 4fb1bd90db
init
2026-01-08 18:11:30 +05:30

208 lines
6.0 KiB
Python

#!/usr/bin/env python3
"""
Scenario 7: Continuous Profiling with Pyroscope
===============================================
A simple Flask web app instrumented with Pyroscope for continuous profiling.
SETUP:
1. Start Pyroscope: docker run -p 4040:4040 grafana/pyroscope
2. Install deps: pip install flask pyroscope-io
3. Run this app: python3 app.py
4. Generate load: ./loadgen.sh (or curl in a loop)
5. View profiles: http://localhost:4040
The app has intentionally slow endpoints to demonstrate profiling.
"""
import os
import time
import math
import hashlib
from functools import lru_cache
# Try to import pyroscope, gracefully handle if not installed
try:
import pyroscope
PYROSCOPE_AVAILABLE = True
except ImportError:
PYROSCOPE_AVAILABLE = False
print("Pyroscope not installed. Run: pip install pyroscope-io")
print("Continuing without profiling...\n")
from flask import Flask, jsonify
app = Flask(__name__)
# Configure Pyroscope
if PYROSCOPE_AVAILABLE:
pyroscope.configure(
application_name="workshop.flask.app",
server_address="http://localhost:4040",
# Enable profiling for specific aspects
tags={
"env": "workshop",
"version": "1.0.0",
}
)
# ============================================================
# Endpoint 1: CPU-intensive computation
# ============================================================
def compute_primes_slow(n):
"""Intentionally slow prime computation."""
primes = []
for num in range(2, n):
is_prime = True
for i in range(2, int(math.sqrt(num)) + 1):
if num % i == 0:
is_prime = False
break
if is_prime:
primes.append(num)
return primes
@app.route('/api/primes/<int:n>')
def primes_endpoint(n):
"""CPU-bound endpoint - compute primes up to n."""
n = min(n, 50000) # Limit to prevent DoS
start = time.time()
primes = compute_primes_slow(n)
elapsed = time.time() - start
return jsonify({
'count': len(primes),
'limit': n,
'elapsed_ms': round(elapsed * 1000, 2)
})
# ============================================================
# Endpoint 2: Repeated expensive computation (needs caching)
# ============================================================
def expensive_hash(data, iterations=1000):
"""Simulate expensive computation."""
result = data.encode()
for _ in range(iterations):
result = hashlib.sha256(result).digest()
return result.hex()
@app.route('/api/hash/<data>')
def hash_endpoint(data):
"""
This endpoint recomputes the hash every time.
Profile will show expensive_hash taking lots of time.
See hash_cached endpoint for improvement.
"""
start = time.time()
result = expensive_hash(data)
elapsed = time.time() - start
return jsonify({
'input': data,
'hash': result[:16] + '...',
'elapsed_ms': round(elapsed * 1000, 2)
})
@lru_cache(maxsize=1000)
def expensive_hash_cached(data, iterations=1000):
"""Cached version of expensive_hash."""
result = data.encode()
for _ in range(iterations):
result = hashlib.sha256(result).digest()
return result.hex()
@app.route('/api/hash_cached/<data>')
def hash_cached_endpoint(data):
"""Cached version - compare profile with /api/hash."""
start = time.time()
result = expensive_hash_cached(data)
elapsed = time.time() - start
return jsonify({
'input': data,
'hash': result[:16] + '...',
'elapsed_ms': round(elapsed * 1000, 2),
'cache_info': str(expensive_hash_cached.cache_info())
})
# ============================================================
# Endpoint 3: I/O simulation
# ============================================================
@app.route('/api/slow_io')
def slow_io_endpoint():
"""
Simulate slow I/O (database query, external API, etc.)
This won't show much in CPU profiles - it's I/O bound!
"""
time.sleep(0.1) # Simulate 100ms I/O
return jsonify({'status': 'ok', 'simulated_io_ms': 100})
# ============================================================
# Endpoint 4: Mix of work types
# ============================================================
@app.route('/api/mixed/<int:n>')
def mixed_endpoint(n):
"""Mixed workload: some CPU, some I/O."""
n = min(n, 1000)
# CPU work
total = 0
for i in range(n * 100):
total += math.sin(i) * math.cos(i)
# Simulated I/O
time.sleep(0.01)
# More CPU work
data = str(total).encode()
for _ in range(100):
data = hashlib.md5(data).digest()
return jsonify({
'n': n,
'result': data.hex()[:16]
})
# ============================================================
# Health check
# ============================================================
@app.route('/health')
def health():
return jsonify({'status': 'healthy', 'pyroscope': PYROSCOPE_AVAILABLE})
@app.route('/')
def index():
return '''
<h1>Pyroscope Demo App</h1>
<h2>Endpoints:</h2>
<ul>
<li><a href="/api/primes/10000">/api/primes/&lt;n&gt;</a> - CPU intensive</li>
<li><a href="/api/hash/hello">/api/hash/&lt;data&gt;</a> - Expensive (uncached)</li>
<li><a href="/api/hash_cached/hello">/api/hash_cached/&lt;data&gt;</a> - Expensive (cached)</li>
<li><a href="/api/slow_io">/api/slow_io</a> - I/O simulation</li>
<li><a href="/api/mixed/100">/api/mixed/&lt;n&gt;</a> - Mixed workload</li>
<li><a href="/health">/health</a> - Health check</li>
</ul>
<h2>Profiling:</h2>
<p>View profiles at <a href="http://localhost:4040">http://localhost:4040</a></p>
'''
if __name__ == '__main__':
print("Starting Flask app on http://localhost:5000")
print("Pyroscope dashboard: http://localhost:4040")
print("\nGenerate load with: ./loadgen.sh")
print("Or: for i in $(seq 100); do curl -s localhost:5000/api/primes/5000 > /dev/null; done")
app.run(host='0.0.0.0', port=5000, debug=False)