🐦 Twitter Post Details

Viewing enriched Twitter post

@PyTorch

Need to accelerate Large-Scale Mixture of Experts Training? With @nvidia NeMo Automodel, an open source library within NVIDIA NeMo framework, developers can now train large-scale MoE models directly in PyTorch using the same familiar tools they already know. Learn how to make MoE training of massive models simple and efficient by reading NVIDIA’s developer blog: 📎 https://t.co/xGnx95jY8y #PyTorch #MoE #OpenSourceAI

Media 1

📊 Media Metadata

{
  "media": [
    {
      "type": "photo",
      "url": "https://crmoxkoizveukayfjuyo.supabase.co/storage/v1/object/public/media/posts/1996734741823779001/media_0.jpg?",
      "filename": "media_0.jpg"
    }
  ],
  "processed_at": "2025-12-08T13:41:31.409420",
  "pipeline_version": "2.0"
}

🔧 Raw API Response

{
  "type": "tweet",
  "id": "1996734741823779001",
  "url": "https://x.com/PyTorch/status/1996734741823779001",
  "twitterUrl": "https://twitter.com/PyTorch/status/1996734741823779001",
  "text": "Need to accelerate Large-Scale Mixture of Experts Training?\n\nWith @nvidia NeMo Automodel, an open source library within NVIDIA NeMo framework, developers can now train large-scale MoE models directly in PyTorch using the same familiar tools they already know.\n\nLearn how to make MoE training of massive models simple and efficient by reading NVIDIA’s developer blog: \n📎 https://t.co/xGnx95jY8y\n\n#PyTorch #MoE #OpenSourceAI",
  "source": "Twitter for iPhone",
  "retweetCount": 14,
  "replyCount": 3,
  "likeCount": 119,
  "quoteCount": 2,
  "viewCount": 11272,
  "createdAt": "Fri Dec 05 00:13:54 +0000 2025",
  "lang": "en",
  "bookmarkCount": 51,
  "isReply": false,
  "inReplyToId": null,
  "conversationId": "1996734741823779001",
  "displayTextRange": [
    0,
    279
  ],
  "inReplyToUserId": null,
  "inReplyToUsername": null,
  "author": {
    "type": "user",
    "userName": "PyTorch",
    "url": "https://x.com/PyTorch",
    "twitterUrl": "https://twitter.com/PyTorch",
    "id": "776585502606721024",
    "name": "PyTorch",
    "isVerified": false,
    "isBlueVerified": true,
    "verifiedType": null,
    "profilePicture": "https://pbs.twimg.com/profile_images/1813965160702451712/yXV1vRhr_normal.jpg",
    "coverPicture": "https://pbs.twimg.com/profile_banners/776585502606721024/1761575044",
    "description": "Tensors and neural networks in Python with strong hardware acceleration. PyTorch is an open source project at the Linux Foundation. #PyTorchFoundation",
    "location": "",
    "followers": 463975,
    "following": 81,
    "status": "",
    "canDm": false,
    "canMediaTag": true,
    "createdAt": "Fri Sep 16 00:56:26 +0000 2016",
    "entities": {
      "description": {
        "urls": []
      },
      "url": {
        "urls": [
          {
            "display_url": "pytorch.org",
            "expanded_url": "http://pytorch.org",
            "url": "https://t.co/6SwTBhUwTJ",
            "indices": [
              0,
              23
            ]
          }
        ]
      }
    },
    "fastFollowersCount": 0,
    "favouritesCount": 844,
    "hasCustomTimelines": true,
    "isTranslator": false,
    "mediaCount": 1243,
    "statusesCount": 2965,
    "withheldInCountries": [],
    "affiliatesHighlightedLabel": {},
    "possiblySensitive": false,
    "pinnedTweetIds": [
      "1995590697567879409"
    ],
    "profile_bio": {},
    "isAutomated": false,
    "automatedBy": null
  },
  "extendedEntities": {},
  "card": {
    "binding_values": [
      {
        "key": "description",
        "value": {
          "string_value": "Training massive mixture-of-experts (MoE) models has long been the domain of a few advanced users with deep infrastructure and distributed-systems expertise. For most developers, the challenge wasn’t…",
          "type": "STRING"
        }
      },
      {
        "key": "domain",
        "value": {
          "string_value": "developer.nvidia.com",
          "type": "STRING"
        }
      },
      {
        "key": "vanity_url",
        "value": {
          "scribe_key": "vanity_url",
          "string_value": "developer.nvidia.com",
          "type": "STRING"
        }
      },
      {
        "key": "title",
        "value": {
          "string_value": "Accelerating Large-Scale Mixture-of-Experts Training in PyTorch | NVIDIA Technical Blog",
          "type": "STRING"
        }
      },
      {
        "key": "card_url",
        "value": {
          "scribe_key": "card_url",
          "string_value": "https://t.co/xGnx95jY8y",
          "type": "STRING"
        }
      }
    ],
    "card_platform": {
      "platform": {
        "audience": {
          "name": "production"
        },
        "device": {
          "name": "Android",
          "version": "12"
        }
      }
    },
    "name": "summary_large_image",
    "url": "https://t.co/xGnx95jY8y",
    "user_refs_results": []
  },
  "place": {},
  "entities": {
    "hashtags": [
      {
        "indices": [
          395,
          403
        ],
        "text": "PyTorch"
      },
      {
        "indices": [
          404,
          408
        ],
        "text": "MoE"
      },
      {
        "indices": [
          409,
          422
        ],
        "text": "OpenSourceAI"
      }
    ],
    "symbols": [],
    "urls": [
      {
        "display_url": "hubs.la/Q03W-xrH0",
        "expanded_url": "https://hubs.la/Q03W-xrH0",
        "url": "https://t.co/xGnx95jY8y",
        "indices": [
          370,
          393
        ]
      }
    ],
    "user_mentions": [
      {
        "id_str": "61559439",
        "name": "NVIDIA",
        "screen_name": "nvidia",
        "indices": [
          66,
          73
        ]
      }
    ]
  },
  "quoted_tweet": null,
  "retweeted_tweet": null,
  "article": null
}