🐦 Twitter Post Details

Viewing enriched Twitter post

@Modular

Organizations running large-scale inference now have a clear path away from single-vendor lock-in—without giving up performance. MAX on AMD MI355 delivered: - 2× throughput - 40-60% cost reduction - More tokens/$ across real workloads Published benchmarks with @tensorwave: https://t.co/nWBeQJ3wUj

Media 1

📊 Media Metadata

{
  "media": [
    {
      "type": "photo",
      "url": "https://crmoxkoizveukayfjuyo.supabase.co/storage/v1/object/public/media/posts/1998528157578907737/media_0.jpg?",
      "filename": "media_0.jpg"
    }
  ],
  "processed_at": "2025-12-10T01:05:06.843480",
  "pipeline_version": "2.0"
}

🔧 Raw API Response

{
  "type": "tweet",
  "id": "1998528157578907737",
  "url": "https://x.com/Modular/status/1998528157578907737",
  "twitterUrl": "https://twitter.com/Modular/status/1998528157578907737",
  "text": "Organizations running large-scale inference now have a clear path away from single-vendor lock-in—without giving up performance.\n\nMAX on AMD MI355 delivered:\n- 2× throughput\n- 40-60% cost reduction\n- More tokens/$ across real workloads\n\nPublished benchmarks with @tensorwave: https://t.co/nWBeQJ3wUj",
  "source": "Twitter for iPhone",
  "retweetCount": 1,
  "replyCount": 0,
  "likeCount": 13,
  "quoteCount": 0,
  "viewCount": 775,
  "createdAt": "Tue Dec 09 23:00:18 +0000 2025",
  "lang": "en",
  "bookmarkCount": 3,
  "isReply": false,
  "inReplyToId": null,
  "conversationId": "1998528157578907737",
  "displayTextRange": [
    0,
    276
  ],
  "inReplyToUserId": null,
  "inReplyToUsername": null,
  "author": {
    "type": "user",
    "userName": "Modular",
    "url": "https://x.com/Modular",
    "twitterUrl": "https://twitter.com/Modular",
    "id": "1483918307484848132",
    "name": "Modular",
    "isVerified": false,
    "isBlueVerified": true,
    "verifiedType": "Business",
    "profilePicture": "https://pbs.twimg.com/profile_images/1786875652656025600/mp6VADd5_normal.png",
    "coverPicture": "https://pbs.twimg.com/profile_banners/1483918307484848132/1714859329",
    "description": "Building AI’s unified compute layer. We are hiring → https://t.co/cPTAes0HMt 🚀",
    "location": "",
    "followers": 20759,
    "following": 2,
    "status": "",
    "canDm": false,
    "canMediaTag": true,
    "createdAt": "Wed Jan 19 21:46:38 +0000 2022",
    "entities": {
      "description": {
        "urls": [
          {
            "display_url": "modular.com/careers",
            "expanded_url": "http://modular.com/careers",
            "url": "https://t.co/cPTAes0HMt",
            "indices": [
              53,
              76
            ]
          }
        ]
      },
      "url": {
        "urls": [
          {
            "display_url": "modular.com",
            "expanded_url": "https://www.modular.com",
            "url": "https://t.co/dFAH0NVA0N",
            "indices": [
              0,
              23
            ]
          }
        ]
      }
    },
    "fastFollowersCount": 0,
    "favouritesCount": 672,
    "hasCustomTimelines": false,
    "isTranslator": false,
    "mediaCount": 306,
    "statusesCount": 1004,
    "withheldInCountries": [],
    "affiliatesHighlightedLabel": {},
    "possiblySensitive": false,
    "pinnedTweetIds": [
      "1970881293933273524"
    ],
    "profile_bio": {},
    "isAutomated": false,
    "automatedBy": null
  },
  "extendedEntities": {},
  "card": {
    "binding_values": [
      {
        "key": "description",
        "value": {
          "string_value": "In a recent talk, Nikhil Gupta, ML Engineer at TensorWave, walked through two company case studies that show just how far you can push training and inference when you pair modern AMD hardware with...",
          "type": "STRING"
        }
      },
      {
        "key": "domain",
        "value": {
          "string_value": "tensorwave.com",
          "type": "STRING"
        }
      },
      {
        "key": "vanity_url",
        "value": {
          "scribe_key": "vanity_url",
          "string_value": "tensorwave.com",
          "type": "STRING"
        }
      },
      {
        "key": "title",
        "value": {
          "string_value": "Real AI Workloads on AMD GPUs: Inference, Training, and Scaling",
          "type": "STRING"
        }
      },
      {
        "key": "card_url",
        "value": {
          "scribe_key": "card_url",
          "string_value": "https://t.co/nWBeQJ3wUj",
          "type": "STRING"
        }
      }
    ],
    "card_platform": {
      "platform": {
        "audience": {
          "name": "production"
        },
        "device": {
          "name": "Android",
          "version": "12"
        }
      }
    },
    "name": "summary_large_image",
    "url": "https://t.co/nWBeQJ3wUj",
    "user_refs_results": []
  },
  "place": {},
  "entities": {
    "hashtags": [],
    "symbols": [],
    "urls": [
      {
        "display_url": "tensorwave.com/blog/real-ai-w…",
        "expanded_url": "https://tensorwave.com/blog/real-ai-workloads-on-amd-gpus-inference-training-and-scaling",
        "url": "https://t.co/nWBeQJ3wUj",
        "indices": [
          276,
          299
        ]
      }
    ],
    "user_mentions": [
      {
        "id_str": "1731183443395112960",
        "name": "TensorWave",
        "screen_name": "tensorwave",
        "indices": [
          263,
          274
        ]
      }
    ]
  },
  "quoted_tweet": null,
  "retweeted_tweet": null,
  "article": null
}