Home / Docs-Technical WhitePaper / 44-EFT.WP.Data.ModelCards v1.0
Chapter 18 Appendix: Model Card Template
I. Template Scope & Posture
use frozen splits.must—for YAML/JSON Model Cards. Use snake_case keys; cross-volume citations use “Volume vX.Y:Anchor”; the unit system follows SI with check_dim=true; evaluations full skeleton and minimalProvide two drop-in templates—II. Minimal Template (copy-paste ready)
# ===== Minimal Model Card (release-grade) =====
model_id: "<org.project.model>"
title: "<Human-readable Title>"
version: "v1.0"
task: "classification" # or other tasks
io_schema:
inputs: [{name:"<input>", shape:"(<...>)", dtype:"<uint8|float32|...>", range:"<[lo,hi]|N/A>", semantics:"<rgb|tokenized|...>"}]
outputs: [{name:"<output>", shape:"(<...>)", dtype:"float32", range:"[0,1]", semantics:"softmax"}]
batching: {mode:"dynamic", max_batch: 128}
streaming: {enabled:false}
architecture:
version: "v1.0"
backbone: "<resnet50|vit-b|...>"
topology: [{name:"stem", type:"conv"}]
training_data:
refs: ["EFT.WP.Data.DatasetCards v1.0:Ch.11"] # reference only
splits_ref: "<dataset_id@vX.Y>"
contamination_policy: "forbid-cross-split"
leakage_guards: ["per-object"]
preprocess:
pipeline_id: "<prep-name>"
steps: []
feature_space: {type:"dense", shape:"(<...>)", dtype:"float32", normalization:"zscore"}
parameter_lock: true
optimization:
objective: {name:"cross_entropy", reduction:"mean"}
optimizer: {name:"adamw", lr:3.0e-4}
hyperparams: {batch_size:256, epochs:200}
evaluation:
protocol: {splits:"frozen", seeds:[0,1,2,3,4], repeats:5}
metrics: {classification:["f1_macro","roc_auc","ece","brier"]}
calibration: {method:"temperature", params:{t:1.7}}
robustness: {thresholds:{drop_rel_max:0.10}}
fairness: {axes:["class","region"], gap_metric:"abs_diff", threshold:0.05}
safety:
allowed_use: ["academic","benchmark"]
prohibited_use: ["surveillance"]
deployment:
forms: ["rest"]
devices: ["A100"]
latency_targets_ms: {p50: 5}
resources: {M_param:25.6, FLOPs:4.1e9, T_inf:3.8}
metrology: {units:"SI", check_dim:true}
export_manifest:
version: "v1.0"
artifacts: [{path:"model_card.yaml", sha256:"<hex>"}]
references:
- "EFT.WP.Core.DataSpec v1.0:EXPORT"
- "EFT.WP.Core.Metrology v1.0:check_dim"
III. Full Skeleton Template (release-grade, with optional extensions)
# ===== Full Model Card Skeleton =====
model_id: "<org.project.model>"
title: "<Human-readable Title>"
version: "v1.0.0"
task: "<classification|retrieval|generation|asr|segmentation|detection|timeseries|...>"
io_schema:
version: "v1.0"
inputs:
- {name:"<...>", shape:"(<...>)", dtype:"<...>", range:"<[lo,hi]|N/A>", semantics:"<...>"}
outputs:
- {name:"<...>", shape:"(<...>)", dtype:"<...>", range:"[0,1]", semantics:"softmax"}
batching: {mode:"<static|dynamic>", max_batch:<int>}
streaming: {enabled:<bool>, chunk_ms:<int?>, lookahead_ms:<int?>}
constraints:
- {type:"range", target:"outputs[softmax]", rule:"[0,1] & sum==1±1e-6"}
architecture:
version: "v1.0"
backbone: "<resnet50|vit-b|conformer-xs|...>"
topology:
- {name:"<module>", type:"<conv|resblk|transformer_block|mlp|...>", repeat: <int?>, params:{<k:v>}}
positional_encoding: {type:"<sinusoidal|learned|none>", dim:<int?>}
norm: {type:"<bn|ln|rmsnorm>", eps:1e-5, affine:true}
act: {type:"<relu|gelu|silu|tanh>"}
dropout: {p: 0.1}
attention: {type:"<msa|lsa|flash>", heads:<int?>, window:<int?>}
mixed_precision: {train:"<fp16|bf16|fp32>", infer:"<fp16|bf16|fp32>", loss_scale:"<dynamic|static|none>"}
init: {scheme:"<kaiming_uniform|xavier_normal|trunc_normal>", seed:1701}
params_report: {M_param:<number>, FLOPs:<number>, T_inf:<ms>}
constraints: {grad_ckpt:true, amp_safe_ops:["conv","gemm"]}
training_data:
refs:
- "EFT.WP.Data.DatasetCards v1.0:Ch.6"
- "EFT.WP.Data.DatasetCards v1.0:Ch.11"
- "EFT.WP.Data.DatasetCards v1.0:Ch.12"
splits_ref: "<dataset_id@vX.Y>"
sampling_binding:
strategy: "<random|stratified|time-based|spatial-tiles|systematic>"
strata: [{by:"<class|region|snr_bin>", buckets: {"A":100,"B":200}}]
weights: {class:"inverse_freq"}
contamination_policy: "forbid-cross-split"
leakage_guards: ["per-object","per-timewindow","per-scene"]
preprocess:
pipeline_id: "<prep-name>"
steps:
- {name:"<clean|filter|normalize|standardize|resample|impute|encode|tokenize|stft|feature_map|pca|custom>",
enabled:true, idempotent:true, params:{<...>}, inputs:["<...>"], outputs:["<...>"]}
feature_space: {type:"<dense|sparse|sequence|image|audio_spec|tabular|embedding>", shape:"(<...>)", dtype:"<...>", normalization:"<zscore|minmax|robust|unit-norm|none>"}
parameter_lock: true
randomness: {seed:1701, libraries:{numpy:"1.26.4"}}
environment: {os:"ubuntu22.04", toolchain:["python3.11"], containers:["ghcr.io/eift/model-prep:1.0.2"]}
audits: ["nan-check","range-check","leakage","drift"]
path_dependence:
applies_to: ["T_arr"]
delta_form: "const-factor"
path: "gamma(ell)"
measure: "d ell"
see: ["EFT.WP.Core.Equations v1.1:S20-1","EFT.WP.Core.Metrology v1.0:check_dim"]
optimization:
objective: {name:"<cross_entropy|mse|mae|nll|ctc|triplet|contrastive|custom>", reduction:"<mean|sum|none>"}
regularization: {weight_decay:0.05, grad_clip:{type:"norm", value:1.0}}
optimizer: {name:"<adamw|sgd|...>", lr:3.0e-4, betas:[0.9,0.999], eps:1.0e-8, weight_decay:0.05}
scheduler: {name:"<cosine|step|...>", warmup:{steps:500, mode:"linear"}}
hyperparams:
batch_size: 256
accum_steps: 1
epochs: 200
search_space: {lr:{type:"loguniform", low:1.0e-5, high:1.0e-3}}
evaluation:
protocol:
splits: "frozen"
seeds: [0,1,2,3,4]
repeats: 5
significance: {test:"permutation|bootstrap", alpha:0.05}
ci: {method:"bootstrap-bca", level:0.95, samples:1000}
metrics:
classification: ["f1_macro","roc_auc","ece","brier"]
detection: ["mAP@0.50:0.95","mAP@0.50"]
regression: ["rmse","mae","mape","nll"]
calibration:
method: "temperature"
params: {t: 1.7}
eval: {report:["ece","brier","calibration_curve"], ece_bins:15}
uncertainty:
model: "<GUM|linear|montecarlo|bayesian>"
components:
- {name:"<...>", type:"<random|systematic>", value:<...>, unit:"<...>", distribution:"<normal|uniform|...>", coverage:{k:<...>}}
correlation: {posture:"<groups|covariance>"}
propagation: {rule:"<rss|linear|montecarlo|bayesian>"}
coverage_policy: {target_p:0.95, k:2.0}
robustness:
shift_tests: [{name:"snr_drop", severity:[3,6,9]}]
thresholds: {drop_rel_max: 0.10}
fairness:
axes: ["class","region"]
gap_metric: "abs_diff"
threshold: 0.05
ethics:
intended_use: ["academic","benchmark"]
prohibited_use: ["surveillance","biometric_identification"]
disclosures: {human_in_the_loop:true}
usage:
regional_compliance: ["EU-GDPR"]
access_control: {roles:["owner","maintainer","reader"], enforcement:["signed-url","token"]}
deployment:
forms: ["rest","grpc"]
devices: ["A100","CPU-AVX2"]
latency_targets_ms: {p50: 5, p99: 20}
concurrency: 512
resources: {M_param:<number>, FLOPs:<number>, T_inf:<ms>}
metrology: {units:"SI", check_dim:true}
export_manifest:
version: "v1.0"
artifacts:
- {path:"model_card.yaml", sha256:"<hex>"}
- {path:"eval/summary.csv", sha256:"<hex>"}
references:
- "EFT.WP.Core.DataSpec v1.0:EXPORT"
- "EFT.WP.Core.Metrology v1.0:check_dim"
IV. Placeholder Hints & Minimal Regex (quick ref)
- model_id: ^[a-z0-9_\\-\\.]+$; version: ^v\\d+\\.\\d+(\\.\\d+)?$.
- export_manifest.references[*]: ^[^:]+ v\\d+\\.\\d+:[A-Z].+$.
- evaluation.protocol.splits: "frozen"; softmax outputs range="[0,1]" and sum==1±1e-6".
- Metrology: metrology.units="SI" and check_dim=true.
V. Export Manifest Template (Normative)
export_manifest:
version: "v1.0"
artifacts:
- {path:"model_card.yaml", sha256:"<hex>"}
- {path:"eval/summary.csv", sha256:"<hex>"}
- {path:"robustness/summary.csv", sha256:"<hex>"}
- {path:"fairness/by_axis_metrics.csv", sha256:"<hex>"}
references:
- "EFT.WP.Core.DataSpec v1.0:EXPORT"
- "EFT.WP.Core.Metrology v1.0:check_dim"
VI. Pre-Release Blocking Self-Check (list)
- Structure/required: Chapter 15 Schema required keys present; evaluation.protocol.splits="frozen".
- Citations/versioning: export_manifest.references[] use “Volume vX.Y:Anchor”, no shortcodes or missing versions.
- Metrology/units: units="SI", check_dim=true; units consistent for performance/energy/time.
- Path quantities: If T_arr present, delta_form/path/measure registered and validated.
- Leakage/fairness/robustness: leakage audits pass; fairness/robustness thresholds set and met.
- Verifiable artifacts: all artifacts in the manifest have sha256 and are reproducible.
VII. Machine-Readable Blank Template (no-comments; CI-friendly)
model_id: ""
title: ""
version: "v1.0"
task: ""
io_schema: {inputs: [], outputs: [], batching:{mode:"dynamic", max_batch:0}, streaming:{enabled:false}}
architecture: {version:"v1.0", backbone:"", topology: []}
training_data: {refs: [], splits_ref: ""}
preprocess: {pipeline_id:"", steps: [], feature_space:{type:"", shape:"", dtype:"", normalization:"none"}, parameter_lock:true}
optimization: {objective:{name:"", reduction:"mean"}}
hyperparams: {batch_size:0, epochs:0}
evaluation: {protocol:{splits:"frozen", seeds:[], repeats:1}, metrics:{}}
calibration: {method:"", params:{}}
uncertainty: {}
robustness: {}
fairness: {axes:[], gap_metric:"abs_diff", threshold:0.05}
ethics: {intended_use:[], prohibited_use:[]}
usage: {regional_compliance:[], access_control:{roles:[], enforcement:[]}}
deployment: {forms:[], devices:[], latency_targets_ms:{}}
resources: {M_param:0, FLOPs:0, T_inf:0}
metrology: {units:"SI", check_dim:true}
export_manifest: {version:"v1.0", artifacts: [], references:["EFT.WP.Core.DataSpec v1.0:EXPORT","EFT.WP.Core.Metrology v1.0:check_dim"]}
Copyright & License (CC BY 4.0)
Copyright: Unless otherwise noted, the copyright of “Energy Filament Theory” (text, charts, illustrations, symbols, and formulas) belongs to the author “Guanglin Tu”.
License: This work is licensed under the Creative Commons Attribution 4.0 International (CC BY 4.0). You may copy, redistribute, excerpt, adapt, and share for commercial or non‑commercial purposes with proper attribution.
Suggested attribution: Author: “Guanglin Tu”; Work: “Energy Filament Theory”; Source: energyfilament.org; License: CC BY 4.0.
First published: 2025-11-11|Current version:v5.1
License link:https://creativecommons.org/licenses/by/4.0/