{
  "version": "1.2.0",
  "chapters": [
    {
      "title": "Intro",
      "startTime": 0,
      "endTime": 92
    },
    {
      "title": "Understanding LLMs and Ollama",
      "startTime": 92,
      "endTime": 196
    },
    {
      "title": "Ollama's Elevator Pitch",
      "startTime": 196,
      "endTime": 520
    },
    {
      "title": "Installing and Extending Ollama",
      "startTime": 520,
      "endTime": 1037
    },
    {
      "title": "HuggingFace and Other Libraries",
      "startTime": 1037,
      "endTime": 1164
    },
    {
      "title": "Which Model Should You Use?",
      "startTime": 1164,
      "endTime": 1588
    },
    {
      "title": "Ollama and Its Applications",
      "startTime": 1588,
      "endTime": 1737
    },
    {
      "title": "Retrieval Augmented Generation (RAG)",
      "startTime": 1737,
      "endTime": 2204
    },
    {
      "title": "Deploying Models and API Endpoints",
      "startTime": 2204,
      "endTime": 2438
    },
    {
      "title": "DockerCon Keynote and LLM Demo",
      "startTime": 2438,
      "endTime": 2864
    },
    {
      "title": "Getting Started with Ollama",
      "startTime": 2864,
      "endTime": 3007
    }
  ]
}