{
  "endpoint": "/companies.json",
  "status": "beta",
  "as_of": "2026-05-10",
  "schema_version": "0.1.0-draft",
  "license": "CC-BY-4.0",
  "description": "Curated directory of companies that design, manufacture, host, or marketize compute relevant to autonomous agent workloads.",
  "schema": {
    "id": "stable slug identifier",
    "name": "company name",
    "category": "designer | foundry | memory | hyperscale_cloud | gpu_cloud | decentralized",
    "founded": "year founded",
    "hq": "headquarters city, country",
    "ticker": "public stock ticker (if applicable)",
    "focus": "one-line description of compute role",
    "key_products": "slugs from /hardware/gpus.json or notable platforms",
    "website": "primary URL"
  },
  "count": 31,
  "items": [
    {
      "id": "nvidia",
      "name": "NVIDIA",
      "category": "designer",
      "founded": 1993,
      "hq": "Santa Clara, USA",
      "ticker": "NVDA",
      "focus": "Dominant GPU and AI accelerator designer; CUDA software stack defines the de facto AI platform.",
      "key_products": ["nvidia-b200", "nvidia-h100-sxm", "nvidia-h200", "nvidia-rubin"],
      "website": "https://www.nvidia.com"
    },
    {
      "id": "amd",
      "name": "AMD",
      "category": "designer",
      "founded": 1969,
      "hq": "Santa Clara, USA",
      "ticker": "AMD",
      "focus": "GPU and CPU designer; Instinct MI-series is the primary open challenger to NVIDIA in datacenter AI.",
      "key_products": ["amd-mi355x", "amd-mi300x", "amd-mi325x"],
      "website": "https://www.amd.com"
    },
    {
      "id": "intel",
      "name": "Intel",
      "category": "designer",
      "founded": 1968,
      "hq": "Santa Clara, USA",
      "ticker": "INTC",
      "focus": "x86 CPU incumbent; Gaudi AI accelerators (via Habana) and Xe-HPC GPUs for HPC/AI.",
      "key_products": ["intel-gaudi-3", "intel-gpu-max-1550"],
      "website": "https://www.intel.com"
    },
    {
      "id": "apple",
      "name": "Apple",
      "category": "designer",
      "founded": 1976,
      "hq": "Cupertino, USA",
      "ticker": "AAPL",
      "focus": "Apple Silicon (M-series) with unified memory architecture; large-memory local LLM inference on workstation Macs.",
      "key_products": ["apple-m4-max", "apple-m3-ultra"],
      "website": "https://www.apple.com"
    },
    {
      "id": "google",
      "name": "Google",
      "category": "designer",
      "founded": 1998,
      "hq": "Mountain View, USA",
      "ticker": "GOOGL",
      "focus": "TPU custom ASICs for AI training/inference; Trillium (v6e) is the production frontier chip.",
      "key_products": ["google-tpu-v6e", "google-tpu-v5p"],
      "website": "https://cloud.google.com/tpu"
    },
    {
      "id": "aws",
      "name": "Amazon Web Services",
      "category": "designer",
      "founded": 2006,
      "hq": "Seattle, USA",
      "ticker": "AMZN",
      "focus": "Trainium (training) and Inferentia (inference) custom silicon; vertically integrated AI clouds.",
      "key_products": ["aws-trainium-3", "aws-trainium-2", "aws-inferentia-2"],
      "website": "https://aws.amazon.com/machine-learning/trainium/"
    },
    {
      "id": "microsoft",
      "name": "Microsoft",
      "category": "designer",
      "founded": 1975,
      "hq": "Redmond, USA",
      "ticker": "MSFT",
      "focus": "Maia AI accelerator for in-house Azure AI workloads; Cobalt Arm CPUs for general compute.",
      "key_products": ["microsoft-maia-100"],
      "website": "https://news.microsoft.com/source/features/ai/in-house-chips-silicon-to-service-to-meet-ai-demand/"
    },
    {
      "id": "meta",
      "name": "Meta",
      "category": "designer",
      "founded": 2004,
      "hq": "Menlo Park, USA",
      "ticker": "META",
      "focus": "MTIA accelerators for in-house recommendation and inference workloads.",
      "key_products": ["meta-mtia-v2"],
      "website": "https://ai.meta.com/blog/next-generation-meta-training-inference-accelerator-AI-MTIA/"
    },
    {
      "id": "cerebras",
      "name": "Cerebras Systems",
      "category": "designer",
      "founded": 2016,
      "hq": "Sunnyvale, USA",
      "ticker": null,
      "focus": "Wafer-scale chips (WSE-3); positions on extreme single-system memory bandwidth via 44 GB on-wafer SRAM.",
      "key_products": ["cerebras-wse-3"],
      "website": "https://www.cerebras.ai"
    },
    {
      "id": "groq",
      "name": "Groq",
      "category": "designer",
      "founded": 2016,
      "hq": "Mountain View, USA",
      "ticker": null,
      "focus": "LPU (Language Processing Unit) deterministic inference architecture; very high tokens/sec for LLM serving.",
      "key_products": ["groq-lpu"],
      "website": "https://groq.com"
    },
    {
      "id": "sambanova",
      "name": "SambaNova Systems",
      "category": "designer",
      "founded": 2017,
      "hq": "Palo Alto, USA",
      "ticker": null,
      "focus": "Reconfigurable Dataflow Unit (RDU); SN40L pairs 1.5 TB DRAM with HBM and SRAM for large-model inference.",
      "key_products": ["sambanova-sn40l"],
      "website": "https://sambanova.ai"
    },
    {
      "id": "graphcore",
      "name": "Graphcore",
      "category": "designer",
      "founded": 2016,
      "hq": "Bristol, UK",
      "ticker": null,
      "focus": "IPU (Intelligence Processing Unit) with on-chip SRAM-only memory model; acquired by SoftBank in 2024.",
      "key_products": ["graphcore-bow-ipu"],
      "website": "https://www.graphcore.ai"
    },
    {
      "id": "tenstorrent",
      "name": "Tenstorrent",
      "category": "designer",
      "founded": 2016,
      "hq": "Toronto, Canada",
      "ticker": null,
      "focus": "Open-architecture RISC-V + AI accelerator (Wormhole, Blackhole); ecosystem play targeting custom silicon licensing.",
      "key_products": ["tenstorrent-wormhole-n300"],
      "website": "https://tenstorrent.com"
    },
    {
      "id": "hailo",
      "name": "Hailo",
      "category": "designer",
      "founded": 2017,
      "hq": "Tel Aviv, Israel",
      "ticker": null,
      "focus": "Edge AI accelerators (Hailo-8, Hailo-10) for low-power vision and generative inference at the device.",
      "key_products": ["hailo-10h"],
      "website": "https://hailo.ai"
    },
    {
      "id": "qualcomm",
      "name": "Qualcomm",
      "category": "designer",
      "founded": 1985,
      "hq": "San Diego, USA",
      "ticker": "QCOM",
      "focus": "Snapdragon mobile NPUs; Cloud AI 100 for datacenter inference; large footprint in on-device AI.",
      "key_products": ["qualcomm-cloud-ai-100"],
      "website": "https://www.qualcomm.com"
    },
    {
      "id": "tsmc",
      "name": "TSMC",
      "category": "foundry",
      "founded": 1987,
      "hq": "Hsinchu, Taiwan",
      "ticker": "TSM",
      "focus": "Dominant leading-edge foundry (N3/N3E, N2 ramping); fabricates almost every modern AI accelerator.",
      "key_products": [],
      "website": "https://www.tsmc.com"
    },
    {
      "id": "samsung-foundry",
      "name": "Samsung Foundry",
      "category": "foundry",
      "founded": 2017,
      "hq": "Hwaseong, South Korea",
      "ticker": "005930.KS",
      "focus": "Second-source advanced node foundry (3GAE/2nm); HBM customer integrations.",
      "key_products": [],
      "website": "https://semiconductor.samsung.com/foundry/"
    },
    {
      "id": "intel-foundry",
      "name": "Intel Foundry",
      "category": "foundry",
      "founded": 2021,
      "hq": "Santa Clara, USA",
      "ticker": "INTC",
      "focus": "Re-entering external foundry market with Intel 18A/14A; targeting AI accelerator manufacturing.",
      "key_products": [],
      "website": "https://www.intel.com/content/www/us/en/foundry/overview.html"
    },
    {
      "id": "sk-hynix",
      "name": "SK Hynix",
      "category": "memory",
      "founded": 1983,
      "hq": "Icheon, South Korea",
      "ticker": "000660.KS",
      "focus": "Lead HBM supplier to NVIDIA; first to mass-produce 12-Hi HBM3E and 16-Hi HBM4 samples.",
      "key_products": ["hbm4-16hi-48gb", "hbm3e-12hi-36gb"],
      "website": "https://www.skhynix.com"
    },
    {
      "id": "samsung-memory",
      "name": "Samsung Semiconductor",
      "category": "memory",
      "founded": 1969,
      "hq": "Hwaseong, South Korea",
      "ticker": "005930.KS",
      "focus": "Global DRAM/NAND leader; HBM3E and GDDR7 producer; major LPDDR5X supplier.",
      "key_products": ["hbm3e-12hi-36gb", "gddr7-32gbps", "lpddr5x-8533"],
      "website": "https://semiconductor.samsung.com"
    },
    {
      "id": "micron",
      "name": "Micron Technology",
      "category": "memory",
      "founded": 1978,
      "hq": "Boise, USA",
      "ticker": "MU",
      "focus": "DRAM/NAND with HBM3E and HBM4 ramp; exclusive GDDR6X supplier to NVIDIA.",
      "key_products": ["hbm3e-12hi-36gb", "gddr6x-24gbps"],
      "website": "https://www.micron.com"
    },
    {
      "id": "aws-cloud",
      "name": "AWS (Cloud)",
      "category": "hyperscale_cloud",
      "founded": 2006,
      "hq": "Seattle, USA",
      "ticker": "AMZN",
      "focus": "Largest public cloud; EC2 P5/P5e (H100/H200), Trn2/Trn3 UltraServers, Inf2; Bedrock managed inference.",
      "key_products": ["aws-trainium-3", "nvidia-h100-sxm"],
      "website": "https://aws.amazon.com"
    },
    {
      "id": "google-cloud",
      "name": "Google Cloud",
      "category": "hyperscale_cloud",
      "founded": 2008,
      "hq": "Mountain View, USA",
      "ticker": "GOOGL",
      "focus": "TPU v5p/v6e pods, A3 (H100) and A4 (B200) VMs; Vertex AI managed platform.",
      "key_products": ["google-tpu-v6e", "google-tpu-v5p"],
      "website": "https://cloud.google.com"
    },
    {
      "id": "azure",
      "name": "Microsoft Azure",
      "category": "hyperscale_cloud",
      "founded": 2010,
      "hq": "Redmond, USA",
      "ticker": "MSFT",
      "focus": "ND-series (H100/H200/B200) clusters; OpenAI exclusivity unwound but still primary host.",
      "key_products": ["nvidia-h100-sxm", "nvidia-b200"],
      "website": "https://azure.microsoft.com"
    },
    {
      "id": "oci",
      "name": "Oracle Cloud Infrastructure",
      "category": "hyperscale_cloud",
      "founded": 2016,
      "hq": "Austin, USA",
      "ticker": "ORCL",
      "focus": "Bare-metal H100/H200/B200 superclusters; RDMA-first networking; large NVIDIA wins via dedicated capacity contracts.",
      "key_products": ["nvidia-h100-sxm", "nvidia-b200", "amd-mi355x"],
      "website": "https://www.oracle.com/cloud/"
    },
    {
      "id": "coreweave",
      "name": "CoreWeave",
      "category": "gpu_cloud",
      "founded": 2017,
      "hq": "Roseland, USA",
      "ticker": "CRWV",
      "focus": "Specialized NVIDIA-only GPU cloud; H100/H200/GB200/GB300 at-scale dedicated clusters for AI labs.",
      "key_products": ["nvidia-h100-sxm", "nvidia-gb200", "nvidia-gb300"],
      "website": "https://www.coreweave.com"
    },
    {
      "id": "lambda",
      "name": "Lambda Labs",
      "category": "gpu_cloud",
      "founded": 2012,
      "hq": "San Jose, USA",
      "ticker": null,
      "focus": "GPU cloud and on-demand H100/H200 clusters; popular for research and small-to-mid AI teams.",
      "key_products": ["nvidia-h100-sxm", "nvidia-h200"],
      "website": "https://lambdalabs.com"
    },
    {
      "id": "crusoe",
      "name": "Crusoe Energy",
      "category": "gpu_cloud",
      "founded": 2018,
      "hq": "Denver, USA",
      "ticker": null,
      "focus": "AI cloud powered by stranded/renewable energy; H100/H200/B200 clusters with low-cost power story.",
      "key_products": ["nvidia-h100-sxm", "nvidia-b200"],
      "website": "https://crusoe.ai"
    },
    {
      "id": "io-net",
      "name": "io.net",
      "category": "decentralized",
      "founded": 2022,
      "hq": "Solana ecosystem",
      "ticker": null,
      "focus": "Decentralized GPU network aggregating idle GPUs (consumer + datacenter) into Kubernetes-style clusters.",
      "key_products": ["nvidia-a100-80gb", "nvidia-h100-sxm", "nvidia-rtx-4090"],
      "website": "https://io.net"
    },
    {
      "id": "akash",
      "name": "Akash Network",
      "category": "decentralized",
      "founded": 2018,
      "hq": "Cosmos ecosystem",
      "ticker": null,
      "focus": "Decentralized compute marketplace with reverse-auction pricing; GPU support added 2024.",
      "key_products": ["nvidia-a100-80gb", "nvidia-h100-sxm"],
      "website": "https://akash.network"
    },
    {
      "id": "render",
      "name": "Render Network",
      "category": "decentralized",
      "founded": 2017,
      "hq": "Los Angeles, USA",
      "ticker": null,
      "focus": "Decentralized GPU rendering and (increasingly) AI inference network; consumer-class GPUs predominant.",
      "key_products": ["nvidia-rtx-4090", "amd-radeon-rx-7900-xtx"],
      "website": "https://rendernetwork.com"
    }
  ]
}
