Latest Forecasts
This tutorial shows how to retrieve the most recent forecasts from your saved price nodes using the latest forecasts API. This endpoint is ideal for business intelligence tools, trading dashboards, and applications that need simple, up-to-date forecast data.
The latest forecasts API returns only the most recent vintage of forecasts from our best-performing model, providing a clean and simple data structure.
Use Cases
- Trading dashboards: Real-time price forecasts for decision making
- BI tools: Integration with Tableau, Power BI, or similar platforms
- Automated alerts: Trigger notifications based on forecast conditions
- Simple applications: When you need current forecasts without historical complexity
Prerequisites
- Saved grid selections: You must have price nodes saved in your grid view
- API token: Create one from your user profile
- Python environment: With
pandasandrequestslibraries
Setup Your Grid Selections
Before using the API, configure your saved selections:
- Log into the application at https://app.enertel.ai/app/grid
- Search for and select price nodes and data series of interest
- Save your selections using the save button
Your saved selections become the default response for the latest forecasts API.
Create an API Token
- Click the user icon in the top-right corner
- Navigate to your user profile
- Create a new API token
- Important: Copy and store your token securely - it's only shown once
Code Example
Basic API Call
import pandas as pd
import requests
from datetime import datetime, timedelta
# Configuration
token = '<your-api-token>'
base_url = "https://app.enertel.ai/api"
# Date range for forecasts
start_date = '2024-05-17' # ISO format date
end_date = '2024-05-20' # ISO format date
api_version = 'v3' # Use v3 (v1 and v2 are deprecated)
# Build request
url = f"{base_url}/forecasts/latest"
headers = {"Authorization": f"Bearer {token}"}
params = {
"start": start_date,
"end": end_date,
"api_version": api_version,
}
# Make API call
response = requests.get(url, headers=headers, params=params)
# Check for errors
if response.status_code != 200:
print(f"Error: {response.status_code} - {response.text}")
else:
data = response.json()
print(f"Retrieved forecasts for {len(data)} targets")
Understanding the Response Structure
The latest forecasts API returns a simpler structure than the dashboard API:
Response Array
├── Target 1 (Price Node + Series combination)
│ ├── Target metadata (object_name, series_name, etc.)
│ └── Forecasts array (timestamp predictions)
├── Target 2
└── Target N
Each target corresponds to a specific data series (RTLMP, DALMP) at a specific price node.
Converting to DataFrame
Transform the response into a flat DataFrame for analysis:
def parse_latest_forecasts(response_data):
"""
Convert latest forecasts response to flat DataFrame.
"""
forecasts = []
for target in response_data:
# Extract target metadata
target_metadata = {
'target_id': target['target_id'],
'object_name': target['object_name'],
'series_name': target['series_name'],
'timezone': target['timezone'],
'iso': target.get('iso', ''),
'object_type': target.get('object_type', ''),
'description': target.get('description', '')
}
# Add each forecast with target metadata
for forecast in target['forecasts']:
forecast_row = {**target_metadata, **forecast}
forecasts.append(forecast_row)
return pd.DataFrame(forecasts)
# Parse the response
df = parse_latest_forecasts(data)
print(f"Created DataFrame with {len(df)} forecast points")
print(f"Columns: {list(df.columns)}")
print("\nSample data:")
print(df.head())
Data Analysis Examples
Basic Data Exploration
# Overview of your data
print("Data Summary:")
print(f"Date range: {df['timestamp'].min()} to {df['timestamp'].max()}")
print(f"Objects: {df['object_name'].nunique()} unique price nodes")
print(f"Series: {df['series_name'].unique()}")
print(f"ISOs: {df['iso'].unique()}")
# Check for data completeness
print(f"\nData completeness:")
print(f"Total forecast points: {len(df)}")
print(f"Missing values: {df.isnull().sum().sum()}")
Price Analysis
# Convert timestamp to datetime for analysis
df['timestamp'] = pd.to_datetime(df['timestamp'])
# Summary statistics by series
price_summary = df.groupby('series_name')['p50'].describe()
print("Price forecast summary by series:")
print(price_summary)
# Find highest and lowest forecasted prices
print(f"\nHighest forecasted price: ${df['p50'].max():.2f}")
print(f"Lowest forecasted price: ${df['p50'].min():.2f}")
# Top 10 highest price forecasts
high_prices = df.nlargest(10, 'p50')[['object_name', 'series_name', 'timestamp', 'p50', 'p95']]
print("\nTop 10 highest price forecasts:")
print(high_prices)
Time Series Visualization
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
# Plot forecasts for a specific object and series
object_name = df['object_name'].iloc[0]
series_name = df['series_name'].iloc[0]
# Filter data
plot_data = df[
(df['object_name'] == object_name) &
(df['series_name'] == series_name)
].sort_values('timestamp')
if not plot_data.empty:
plt.figure(figsize=(12, 6))
# Plot median forecast
plt.plot(plot_data['timestamp'], plot_data['p50'],
label='Median Forecast (p50)', color='blue', linewidth=2)
# Add uncertainty bands
plt.fill_between(plot_data['timestamp'],
plot_data['p10'], plot_data['p90'],
alpha=0.3, color='blue', label='80% Confidence Interval')
# Formatting
plt.title(f'{series_name} Forecasts - {object_name}')
plt.xlabel('Time')
plt.ylabel('Price ($/MWh)')
plt.legend()
plt.grid(True, alpha=0.3)
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()
Market Comparison
# Compare average prices across different objects
if df['object_name'].nunique() > 1:
market_comparison = df.groupby(['object_name', 'series_name']).agg({
'p50': ['mean', 'std', 'min', 'max'],
'timestamp': 'count'
}).round(2)
print("Market comparison (price statistics):")
print(market_comparison)
Advanced Usage
Real-time Data Pipeline
import time
from datetime import datetime, timedelta
def get_latest_forecasts_pipeline(token, update_interval_minutes=60):
"""
Continuous pipeline to fetch latest forecasts at regular intervals.
"""
base_url = "https://app.enertel.ai/api"
headers = {"Authorization": f"Bearer {token}"}
while True:
try:
# Get forecasts for next 24 hours
start_time = datetime.now()
end_time = start_time + timedelta(hours=24)
params = {
"start": start_time.strftime('%Y-%m-%d'),
"end": end_time.strftime('%Y-%m-%d'),
"api_version": "v3"
}
print(f"Fetching forecasts at {start_time.strftime('%Y-%m-%d %H:%M:%S')}")
response = requests.get(
f"{base_url}/forecasts/latest",
headers=headers,
params=params,
timeout=30
)
if response.status_code == 200:
data = response.json()
df = parse_latest_forecasts(data)
# Process your data here
print(f"Retrieved {len(df)} forecast points")
# Example: Check for high price alerts
high_price_threshold = 100 # $/MWh
high_prices = df[df['p50'] > high_price_threshold]
if not high_prices.empty:
print(f"HIGH PRICE ALERT: {len(high_prices)} forecasts above ${high_price_threshold}")
print(high_prices[['object_name', 'timestamp', 'p50']].head())
else:
print(f"API error: {response.status_code} - {response.text}")
# Wait before next update
print(f"Waiting {update_interval_minutes} minutes for next update...")
time.sleep(update_interval_minutes * 60)
except KeyboardInterrupt:
print("Pipeline stopped by user")
break
except Exception as e:
print(f"Error in pipeline: {e}")
time.sleep(60) # Wait 1 minute before retrying
# Example usage (commented out to prevent accidental execution)
# get_latest_forecasts_pipeline(token, update_interval_minutes=30)
Integration with BI Tools
def export_for_bi_tools(df, format='csv'):
"""
Export forecast data in formats suitable for BI tools.
"""
# Add derived columns useful for BI
df['forecast_date'] = pd.to_datetime(df['timestamp']).dt.date
df['forecast_hour'] = pd.to_datetime(df['timestamp']).dt.hour
df['price_category'] = pd.cut(df['p50'],
bins=[0, 25, 50, 100, float('inf')],
labels=['Low', 'Medium', 'High', 'Very High'])
# Calculate uncertainty metrics
df['uncertainty_range'] = df['p90'] - df['p10']
df['uncertainty_pct'] = (df['uncertainty_range'] / df['p50']) * 100
if format == 'csv':
filename = f"enertel_forecasts_{datetime.now().strftime('%Y%m%d_%H%M')}.csv"
df.to_csv(filename, index=False)
print(f"Exported to {filename}")
elif format == 'excel':
filename = f"enertel_forecasts_{datetime.now().strftime('%Y%m%d_%H%M')}.xlsx"
with pd.ExcelWriter(filename) as writer:
df.to_excel(writer, sheet_name='Forecasts', index=False)
# Add summary sheet
summary = df.groupby(['object_name', 'series_name']).agg({
'p50': ['mean', 'std', 'min', 'max'],
'uncertainty_pct': 'mean'
}).round(2)
summary.to_excel(writer, sheet_name='Summary')
print(f"Exported to {filename}")
return df
# Export data
# df_enhanced = export_for_bi_tools(df, format='excel')
Best Practices
Error Handling and Retries
import time
def robust_latest_forecasts(token, start_date, end_date, max_retries=3):
"""
Get latest forecasts with robust error handling.
"""
base_url = "https://app.enertel.ai/api"
headers = {"Authorization": f"Bearer {token}"}
for attempt in range(max_retries):
try:
params = {
"start": start_date,
"end": end_date,
"api_version": "v3"
}
response = requests.get(
f"{base_url}/forecasts/latest",
headers=headers,
params=params,
timeout=30
)
if response.status_code == 200:
return parse_latest_forecasts(response.json())
elif response.status_code == 429: # Rate limited
wait_time = 2 ** attempt
print(f"Rate limited, waiting {wait_time} seconds...")
time.sleep(wait_time)
else:
print(f"API error: {response.status_code} - {response.text}")
if attempt == max_retries - 1:
raise Exception(f"API call failed after {max_retries} attempts")
except requests.exceptions.RequestException as e:
print(f"Request failed (attempt {attempt + 1}): {e}")
if attempt == max_retries - 1:
raise
time.sleep(2 ** attempt)
return None
# Use robust function
# df = robust_latest_forecasts(token, '2024-05-17', '2024-05-20')
This tutorial provides a complete framework for working with the latest forecasts API, from basic data retrieval to advanced real-time pipelines and BI tool integration.
The latest forecasts API provides the simplest data structure among our forecast endpoints, making it ideal for applications that need current forecasts without the complexity of multiple models or vintages.