🐦 Twitter Post Details

Viewing enriched Twitter post

@vllm_project

Diffusion serving is expensive: dozens of timesteps per image, and a lot of redundant compute between adjacent steps. ⚡vLLM-Omni now supports diffusion cache acceleration backends (TeaCache + Cache-DiT) to reuse intermediate Transformer computations — no retraining, minimal quality impact! 🚀Benchmarks (NVIDIA H200, Qwen-Image 1024x1024): TeaCache 1.91x, Cache-DiT 1.85x. For Qwen-Image-Edit, Cache-DiT hits 2.38x! Blog: https://t.co/TiC0WhbgQp Docs: https://t.co/0qatboeIe3 #vLLM #vLLMOmni #DiffusionModels #AIInference

Media 1

📊 Media Metadata

{
  "media": [
    {
      "url": "https://crmoxkoizveukayfjuyo.supabase.co/storage/v1/object/public/media/posts/2002233999947890810/media_0.jpg?",
      "media_url": "https://crmoxkoizveukayfjuyo.supabase.co/storage/v1/object/public/media/posts/2002233999947890810/media_0.jpg?",
      "type": "photo",
      "filename": "media_0.jpg"
    }
  ],
  "processed_at": "2025-12-21T04:50:13.352375",
  "pipeline_version": "2.0"
}

🔧 Raw API Response

{
  "type": "tweet",
  "id": "2002233999947890810",
  "url": "https://x.com/vllm_project/status/2002233999947890810",
  "twitterUrl": "https://twitter.com/vllm_project/status/2002233999947890810",
  "text": "Diffusion serving is expensive: dozens of timesteps per image, and a lot of redundant compute between adjacent steps.\n\n⚡vLLM-Omni now supports diffusion cache acceleration backends (TeaCache + Cache-DiT) to reuse intermediate Transformer computations — no retraining, minimal quality impact!\n\n🚀Benchmarks (NVIDIA H200, Qwen-Image 1024x1024): TeaCache 1.91x, Cache-DiT 1.85x. For Qwen-Image-Edit, Cache-DiT hits 2.38x! \n\nBlog: https://t.co/TiC0WhbgQp\nDocs: https://t.co/0qatboeIe3\n\n#vLLM #vLLMOmni #DiffusionModels #AIInference",
  "source": "Twitter for iPhone",
  "retweetCount": 26,
  "replyCount": 5,
  "likeCount": 188,
  "quoteCount": 1,
  "viewCount": 10977,
  "createdAt": "Sat Dec 20 04:25:59 +0000 2025",
  "lang": "en",
  "bookmarkCount": 58,
  "isReply": false,
  "inReplyToId": null,
  "conversationId": "2002233999947890810",
  "displayTextRange": [
    0,
    276
  ],
  "inReplyToUserId": null,
  "inReplyToUsername": null,
  "author": {
    "type": "user",
    "userName": "vllm_project",
    "url": "https://x.com/vllm_project",
    "twitterUrl": "https://twitter.com/vllm_project",
    "id": "1774187564276289536",
    "name": "vLLM",
    "isVerified": false,
    "isBlueVerified": true,
    "verifiedType": null,
    "profilePicture": "https://pbs.twimg.com/profile_images/1774187681746182144/N_5NJ8B1_normal.jpg",
    "coverPicture": "https://pbs.twimg.com/profile_banners/1774187564276289536/1733806062",
    "description": "A high-throughput and memory-efficient inference and serving engine for LLMs. Join https://t.co/lxJ0SfX5pJ to discuss together with the community!",
    "location": "",
    "followers": 27216,
    "following": 27,
    "status": "",
    "canDm": true,
    "canMediaTag": true,
    "createdAt": "Sat Mar 30 21:31:01 +0000 2024",
    "entities": {
      "description": {
        "urls": [
          {
            "display_url": "slack.vllm.ai",
            "expanded_url": "http://slack.vllm.ai",
            "url": "https://t.co/lxJ0SfX5pJ",
            "indices": [
              83,
              106
            ]
          }
        ]
      },
      "url": {
        "urls": [
          {
            "display_url": "github.com/vllm-project/v…",
            "expanded_url": "https://github.com/vllm-project/vllm",
            "url": "https://t.co/KmyOI0Gnbj",
            "indices": [
              0,
              23
            ]
          }
        ]
      }
    },
    "fastFollowersCount": 0,
    "favouritesCount": 502,
    "hasCustomTimelines": false,
    "isTranslator": false,
    "mediaCount": 175,
    "statusesCount": 732,
    "withheldInCountries": [],
    "affiliatesHighlightedLabel": {},
    "possiblySensitive": false,
    "pinnedTweetIds": [],
    "profile_bio": {
      "description": "A high-throughput and memory-efficient inference and serving engine for LLMs. Join https://t.co/lxJ0SfX5pJ to discuss together with the community!"
    },
    "isAutomated": false,
    "automatedBy": null
  },
  "extendedEntities": {
    "media": [
      {
        "display_url": "pic.x.com/5v2iJORFaU",
        "expanded_url": "https://x.com/vllm_project/status/2002233999947890810/photo/1",
        "id_str": "2002228203574657024",
        "indices": [
          277,
          300
        ],
        "media_key": "3_2002228203574657024",
        "media_url_https": "https://pbs.twimg.com/media/G8lX8WFWAAAH1KS.jpg",
        "type": "photo",
        "url": "https://t.co/5v2iJORFaU",
        "ext_media_availability": {
          "status": "Available"
        },
        "features": {
          "large": {
            "faces": [
              {
                "x": 1018,
                "y": 184,
                "h": 195,
                "w": 195
              }
            ]
          },
          "medium": {
            "faces": [
              {
                "x": 822,
                "y": 148,
                "h": 157,
                "w": 157
              }
            ]
          },
          "small": {
            "faces": [
              {
                "x": 465,
                "y": 84,
                "h": 89,
                "w": 89
              }
            ]
          },
          "orig": {
            "faces": [
              {
                "x": 1018,
                "y": 184,
                "h": 195,
                "w": 195
              }
            ]
          }
        },
        "sizes": {
          "large": {
            "h": 580,
            "w": 1486,
            "resize": "fit"
          },
          "medium": {
            "h": 468,
            "w": 1200,
            "resize": "fit"
          },
          "small": {
            "h": 265,
            "w": 680,
            "resize": "fit"
          },
          "thumb": {
            "h": 150,
            "w": 150,
            "resize": "crop"
          }
        },
        "original_info": {
          "height": 580,
          "width": 1486,
          "focus_rects": [
            {
              "x": 0,
              "y": 0,
              "w": 1036,
              "h": 580
            },
            {
              "x": 0,
              "y": 0,
              "w": 580,
              "h": 580
            },
            {
              "x": 5,
              "y": 0,
              "w": 509,
              "h": 580
            },
            {
              "x": 114,
              "y": 0,
              "w": 290,
              "h": 580
            },
            {
              "x": 0,
              "y": 0,
              "w": 1486,
              "h": 580
            }
          ]
        },
        "media_results": {
          "result": {
            "media_key": "3_2002228203574657024"
          }
        }
      }
    ]
  },
  "card": null,
  "place": {},
  "entities": {
    "hashtags": [
      {
        "indices": [
          481,
          486
        ],
        "text": "vLLM"
      },
      {
        "indices": [
          487,
          496
        ],
        "text": "vLLMOmni"
      },
      {
        "indices": [
          497,
          513
        ],
        "text": "DiffusionModels"
      },
      {
        "indices": [
          514,
          526
        ],
        "text": "AIInference"
      }
    ],
    "symbols": [],
    "urls": [
      {
        "display_url": "blog.vllm.ai/2025/12/19/vll…",
        "expanded_url": "https://blog.vllm.ai/2025/12/19/vllm-omni-diffusion-cache-acceleration.html",
        "url": "https://t.co/TiC0WhbgQp",
        "indices": [
          426,
          449
        ]
      },
      {
        "display_url": "docs.vllm.ai/projects/vllm-…",
        "expanded_url": "https://docs.vllm.ai/projects/vllm-omni/en/latest/user_guide/diffusion_acceleration",
        "url": "https://t.co/0qatboeIe3",
        "indices": [
          456,
          479
        ]
      }
    ],
    "user_mentions": []
  },
  "quoted_tweet": null,
  "retweeted_tweet": null,
  "article": null
}