Skip to main content

Dashboard Forecasts

This tutorial demonstrates how to extract, transform, and load comprehensive forecast data from the Enertel dashboard API. Use this approach when building data warehouses or when you need access to multiple models and forecast vintages.

The dashboard forecasts API returns all forecasts for your saved price nodes, including data from multiple machine learning models and historical vintages.

Production Usage

We highly recommend using a service or developer account for production applications. If you don't have an account, please contact our team for setup assistance.

Prerequisites

  1. Saved dashboard selections: You must have price nodes saved in your dashboard
  2. API token: Create one from your user profile
  3. Python environment: With pandas and requests libraries

Setup Your Dashboard

Before using the API, configure your saved selections:

  1. Log into the application at https://app.enertel.ai/app/dashboard
  2. Search for and select price nodes and data series of interest
  3. Save your selections using the save button

Your saved selections become the default response for the dashboard forecasts API.

Create an API Token

  1. Click the user icon in the top-right corner
  2. Navigate to your user profile
  3. Create a new API token
  4. Important: Copy and store your token securely - it's only shown once

Code Example

Basic API Call

import pandas as pd
import requests
from datetime import datetime, timedelta

# Configuration
token = '<your-api-token>'
base_url = "https://app.enertel.ai/api"

# Date range for forecasts
start_date = '2024-05-17' # ISO format date
end_date = '2024-05-20' # ISO format date

# Optional parameters (None uses defaults)
models = None # Defaults to currently promoted 'best' model
features = None # Defaults to your saved dashboard selections

# Build request
url = f"{base_url}/dashboard/forecasts"
headers = {"Authorization": f"Bearer {token}"}

params = {
"start": start_date,
"end": end_date,
}

# Add optional parameters if specified
if models:
params["models"] = ",".join([str(m) for m in models])

if features:
params["features"] = ",".join([str(f) for f in features])

# Make API call
response = requests.get(url, headers=headers, params=params)

# Check for errors
if response.status_code != 200:
print(f"Error: {response.status_code} - {response.text}")
else:
data = response.json()
print(f"Retrieved data for {len(data)} objects")

Understanding the Response Structure

The API returns nested JSON with the following hierarchy:

Objects (Price Nodes)
├── Targets (ISO/Series/Horizon combinations)
│ ├── Vintages (Forecast run times)
│ │ └── Batches (Model/Feature combinations)
│ │ └── Forecasts (Individual predictions)

Converting to DataFrame

Transform the nested response into a flat DataFrame for analysis:

def parse_dashboard_forecasts(response_data):
"""
Convert nested dashboard forecast response to flat DataFrame.
"""
forecasts = []

for obj in response_data: # Each object is a price node
for target in obj["targets"]: # Each target is a unique ISO/data series/horizon
for vintage in target["vintages"]: # Each vintage is a forecast run time
for batch in vintage["batches"]: # Each batch is a model/feature combination
for forecast in batch["forecasts"]: # Each forecast is a timestamp prediction
forecasts.append({
# Object information
"object_name": obj["object_name"],

# Target information
"target_id": target["target_id"],
"target_description": target["description"],
"series_name": target.get("series_name", ""),
"timezone": target.get("timezone", ""),

# Vintage information
"scheduled_at": vintage["scheduled_at"],

# Batch information
"batch_id": batch["batch_id"],
"model_id": batch["model_id"],
"feature_id": batch["feature_id"],

# Forecast data (all percentiles and metadata)
**forecast
})

return pd.DataFrame(forecasts)

# Parse the response
df = parse_dashboard_forecasts(data)
print(f"Created DataFrame with {len(df)} forecast points")
print(f"Columns: {list(df.columns)}")
print("\nSample data:")
print(df.head())

Data Analysis Examples

Basic Data Exploration

# Overview of your data
print("Data Summary:")
print(f"Date range: {df['timestamp'].min()} to {df['timestamp'].max()}")
print(f"Objects: {df['object_name'].nunique()} unique price nodes")
print(f"Models: {df['model_id'].nunique()} different models")
print(f"Series: {df['target_description'].unique()}")

# Check for data completeness
print(f"\nData completeness:")
print(f"Total forecast points: {len(df)}")
print(f"Missing values: {df.isnull().sum().sum()}")

Time Series Visualization

import matplotlib.pyplot as plt
import matplotlib.dates as mdates

# Convert timestamp to datetime
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['scheduled_at'] = pd.to_datetime(df['scheduled_at'])

# Plot forecasts for a specific object
object_name = df['object_name'].iloc[0] # First object in dataset
object_data = df[df['object_name'] == object_name].sort_values('timestamp')

plt.figure(figsize=(12, 6))

# Plot median forecast
plt.plot(object_data['timestamp'], object_data['p50'],
label='Median Forecast (p50)', color='blue', linewidth=2)

# Add uncertainty bands
plt.fill_between(object_data['timestamp'],
object_data['p10'], object_data['p90'],
alpha=0.3, color='blue', label='80% Confidence Interval')

plt.fill_between(object_data['timestamp'],
object_data['p05'], object_data['p95'],
alpha=0.2, color='blue', label='90% Confidence Interval')

# Formatting
plt.title(f'Forecast Distribution - {object_name}')
plt.xlabel('Time')
plt.ylabel('Price ($/MWh)')
plt.legend()
plt.grid(True, alpha=0.3)
plt.xticks(rotation=45)
plt.tight_layout()
plt.show()

Model Comparison

# Compare different models (if multiple models in data)
if df['model_id'].nunique() > 1:
model_comparison = df.groupby(['model_id', 'object_name']).agg({
'p50': ['mean', 'std'],
'timestamp': 'count'
}).round(2)

print("Model Performance Comparison:")
print(model_comparison)

Best Practices

Error Handling

def safe_api_call(url, headers, params, max_retries=3):
"""Make API call with error handling and retries."""
for attempt in range(max_retries):
try:
response = requests.get(url, headers=headers, params=params, timeout=30)

if response.status_code == 200:
return response.json()
elif response.status_code == 429: # Rate limited
print(f"Rate limited, waiting before retry {attempt + 1}")
time.sleep(2 ** attempt) # Exponential backoff
else:
print(f"API error: {response.status_code} - {response.text}")
return None

except requests.exceptions.RequestException as e:
print(f"Request failed (attempt {attempt + 1}): {e}")
if attempt == max_retries - 1:
raise

return None

Data Validation

def validate_forecast_data(df):
"""Validate forecast DataFrame structure and content."""
required_columns = [
'object_name', 'target_id', 'scheduled_at',
'timestamp', 'p50', 'model_id'
]

# Check required columns
missing_cols = [col for col in required_columns if col not in df.columns]
if missing_cols:
raise ValueError(f"Missing required columns: {missing_cols}")

# Check for duplicates
duplicate_cols = ['timestamp', 'feature_id', 'model_id', 'scheduled_at']
if df.duplicated(subset=duplicate_cols).any():
print("Warning: Duplicate forecasts found")

# Check data types
df['timestamp'] = pd.to_datetime(df['timestamp'])
df['scheduled_at'] = pd.to_datetime(df['scheduled_at'])

print("Data validation passed")
return df

# Use validation
df = validate_forecast_data(df)

Advanced Usage

Batch Processing for Large Date Ranges

from datetime import datetime, timedelta

def get_forecasts_batch(start_date, end_date, token, batch_days=14):
"""
Retrieve forecasts in batches to handle large date ranges.
"""
all_data = []
current_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)

while current_date < end_date:
batch_end = min(current_date + timedelta(days=batch_days), end_date)

print(f"Fetching data from {current_date.date()} to {batch_end.date()}")

params = {
"start": current_date.strftime('%Y-%m-%d'),
"end": batch_end.strftime('%Y-%m-%d')
}

batch_data = safe_api_call(
"https://app.enertel.ai/api/dashboard/forecasts",
{"Authorization": f"Bearer {token}"},
params
)

if batch_data:
all_data.extend(batch_data)

current_date = batch_end

return parse_dashboard_forecasts(all_data)

# Example usage for large date range
# large_df = get_forecasts_batch('2024-01-01', '2024-03-01', token)

This tutorial provides a complete framework for working with the dashboard forecasts API, from basic data retrieval to advanced batch processing and validation techniques.

Data Visualization

We recommend using the VS Code extension "Data Wrangler" for interactive DataFrame exploration and visualization.