🐦 Twitter Post Details

Viewing enriched Twitter post

@rasbt

As we all know by now, reasoning models often generate longer responses, which raises compute costs. Now, this new paper (https://t.co/SwxBs8RsTq) shows that this behavior comes from the RL training process, not from an actual need for long answers for better accuracy. The RL… https://t.co/JnTmDNiVgg

📊 Media Metadata

{
  "data": [
    {
      "id": "1911494734381563904",
      "type": "photo",
      "url": "https://t.co/JnTmDNiVgg",
      "media_url": null,
      "media_url_https": "https://pbs.twimg.com/media/Gob-VVjXwAAKlT8.jpg",
      "display_url": "pic.x.com/JnTmDNiVgg",
      "expanded_url": "https://x.com/rasbt/status/1911494805101986135/photo/1"
    }
  ],
  "score": 0.979,
  "scored_at": "2025-08-09T13:46:07.549865",
  "import_source": "unknown_source",
  "links_checked": true,
  "checked_at": "2025-08-10T10:31:50.996332"
}

🔧 Raw API Response

{
  "tweet": {
    "bookmark_count": 833,
    "bookmarked": false,
    "created_at": "Sun Apr 13 19:00:50 +0000 2025",
    "conversation_id_str": "1911494805101986135",
    "display_text_range": [
      0,
      277
    ],
    "entities": {
      "hashtags": [],
      "media": [
        {
          "display_url": "pic.x.com/JnTmDNiVgg",
          "expanded_url": "https://x.com/rasbt/status/1911494805101986135/photo/1",
          "id_str": "1911494734381563904",
          "indices": [
            278,
            301
          ],
          "media_key": "3_1911494734381563904",
          "media_url_https": "https://pbs.twimg.com/media/Gob-VVjXwAAKlT8.jpg",
          "type": "photo",
          "url": "https://t.co/JnTmDNiVgg",
          "ext_media_availability": {
            "status": "Available"
          },
          "features": {
            "large": {
              "faces": []
            },
            "medium": {
              "faces": []
            },
            "small": {
              "faces": []
            },
            "orig": {
              "faces": []
            }
          },
          "sizes": {
            "large": {
              "h": 868,
              "w": 1118,
              "resize": "fit"
            },
            "medium": {
              "h": 868,
              "w": 1118,
              "resize": "fit"
            },
            "small": {
              "h": 528,
              "w": 680,
              "resize": "fit"
            },
            "thumb": {
              "h": 150,
              "w": 150,
              "resize": "crop"
            }
          },
          "original_info": {
            "height": 868,
            "width": 1118,
            "focus_rects": [
              {
                "x": 0,
                "y": 242,
                "w": 1118,
                "h": 626
              },
              {
                "x": 97,
                "y": 0,
                "w": 868,
                "h": 868
              },
              {
                "x": 151,
                "y": 0,
                "w": 761,
                "h": 868
              },
              {
                "x": 314,
                "y": 0,
                "w": 434,
                "h": 868
              },
              {
                "x": 0,
                "y": 0,
                "w": 1118,
                "h": 868
              }
            ]
          },
          "allow_download_status": {
            "allow_download": true
          },
          "media_results": {
            "result": {
              "media_key": "3_1911494734381563904"
            }
          }
        }
      ],
      "symbols": [],
      "timestamps": [],
      "urls": [
        {
          "display_url": "arxiv.org/abs/2504.05185",
          "expanded_url": "https://arxiv.org/abs/2504.05185",
          "url": "https://t.co/SwxBs8RsTq",
          "indices": [
            122,
            145
          ]
        }
      ],
      "user_mentions": []
    },
    "extended_entities": {
      "media": [
        {
          "display_url": "pic.x.com/JnTmDNiVgg",
          "expanded_url": "https://x.com/rasbt/status/1911494805101986135/photo/1",
          "id_str": "1911494734381563904",
          "indices": [
            278,
            301
          ],
          "media_key": "3_1911494734381563904",
          "media_url_https": "https://pbs.twimg.com/media/Gob-VVjXwAAKlT8.jpg",
          "type": "photo",
          "url": "https://t.co/JnTmDNiVgg",
          "ext_media_availability": {
            "status": "Available"
          },
          "features": {
            "large": {
              "faces": []
            },
            "medium": {
              "faces": []
            },
            "small": {
              "faces": []
            },
            "orig": {
              "faces": []
            }
          },
          "sizes": {
            "large": {
              "h": 868,
              "w": 1118,
              "resize": "fit"
            },
            "medium": {
              "h": 868,
              "w": 1118,
              "resize": "fit"
            },
            "small": {
              "h": 528,
              "w": 680,
              "resize": "fit"
            },
            "thumb": {
              "h": 150,
              "w": 150,
              "resize": "crop"
            }
          },
          "original_info": {
            "height": 868,
            "width": 1118,
            "focus_rects": [
              {
                "x": 0,
                "y": 242,
                "w": 1118,
                "h": 626
              },
              {
                "x": 97,
                "y": 0,
                "w": 868,
                "h": 868
              },
              {
                "x": 151,
                "y": 0,
                "w": 761,
                "h": 868
              },
              {
                "x": 314,
                "y": 0,
                "w": 434,
                "h": 868
              },
              {
                "x": 0,
                "y": 0,
                "w": 1118,
                "h": 868
              }
            ]
          },
          "allow_download_status": {
            "allow_download": true
          },
          "media_results": {
            "result": {
              "media_key": "3_1911494734381563904"
            }
          }
        }
      ]
    },
    "favorite_count": 1201,
    "favorited": false,
    "full_text": "As we all know by now, reasoning models often generate longer responses, which raises compute costs. Now, this new paper (https://t.co/SwxBs8RsTq) shows that this behavior comes from the RL training process, not from an actual need for long answers for better accuracy. The RL… https://t.co/JnTmDNiVgg",
    "is_quote_status": false,
    "lang": "en",
    "possibly_sensitive": false,
    "possibly_sensitive_editable": true,
    "quote_count": 33,
    "reply_count": 33,
    "retweet_count": 191,
    "retweeted": false,
    "user_id_str": "865622395",
    "id_str": "1911494805101986135",
    "note_tweet": {
      "is_expandable": true,
      "note_tweet_results": {
        "result": {
          "id": "Tm90ZVR3ZWV0OjE5MTE0OTQ4MDQ4NzEyMzc2MzI=",
          "text": "As we all know by now, reasoning models often generate longer responses, which raises compute costs. Now, this new paper (https://t.co/UbBv4rzM09) shows that this behavior comes from the RL training process, not from an actual need for long answers for better accuracy. The RL loss tends to favor longer responses when the model gets negative rewards, which I think explains the \"aha\" moments and longer chains of thought that arise from pure RL training.\n\nI.e., if the model gets a negative reward (i.e., the answer is wrong), the math behind PPO causes the average per-token loss becomes smaller when the response is longer. So, the model is indirectly encouraged to make its responses longer. This is true even if those extra tokens don't actually help solve the problem.\n\nWhat does the response length have to do with the loss? When the reward is negative, longer responses can dilute the penalty per individual token, which results in lower (i.e., better) loss values (even though the model is still getting the answer wrong).\n\nSo the model \"learns\" that longer responses reduce the punishment, even though they are not helping correctness.\n\nIn addition, the researchers show that a second round of RL (using just a few problems that are sometimes solvable) can shorten responses while preserving or even improving accuracy. This has big implications for deployment efficiency.",
          "entity_set": {
            "hashtags": [],
            "symbols": [],
            "urls": [
              {
                "display_url": "arxiv.org/abs/2504.05185",
                "expanded_url": "https://arxiv.org/abs/2504.05185",
                "url": "https://t.co/UbBv4rzM09",
                "indices": [
                  122,
                  145
                ]
              }
            ],
            "user_mentions": []
          },
          "richtext": {
            "richtext_tags": []
          },
          "media": {
            "inline_media": []
          }
        }
      }
    }
  },
  "user": {
    "__typename": "User",
    "id": "VXNlcjo4NjU2MjIzOTU=",
    "rest_id": "865622395",
    "affiliates_highlighted_label": {},
    "has_graduated_access": true,
    "is_blue_verified": true,
    "profile_image_shape": "Circle",
    "legacy": {
      "can_dm": false,
      "can_media_tag": true,
      "created_at": "Sun Oct 07 02:06:16 +0000 2012",
      "default_profile": false,
      "default_profile_image": false,
      "description": "ML/AI researcher & former stats professor turned LLM research engineer. Author of \"Build a Large Language Model From Scratch\" (https://t.co/O8LAAMRzzW).",
      "entities": {
        "description": {
          "urls": [
            {
              "display_url": "amzn.to/4fqvn0D",
              "expanded_url": "https://amzn.to/4fqvn0D",
              "url": "https://t.co/O8LAAMRzzW",
              "indices": [
                127,
                150
              ]
            }
          ]
        },
        "url": {
          "urls": [
            {
              "display_url": "sebastianraschka.com",
              "expanded_url": "https://sebastianraschka.com",
              "url": "https://t.co/HrtQQ5tgJl",
              "indices": [
                0,
                23
              ]
            }
          ]
        }
      },
      "fast_followers_count": 0,
      "favourites_count": 21590,
      "followers_count": 337063,
      "friends_count": 1013,
      "has_custom_timelines": true,
      "is_translator": false,
      "listed_count": 4153,
      "location": "",
      "media_count": 1917,
      "name": "Sebastian Raschka",
      "normal_followers_count": 337063,
      "pinned_tweet_ids_str": [
        "1913589458726690892"
      ],
      "possibly_sensitive": false,
      "profile_banner_url": "https://pbs.twimg.com/profile_banners/865622395/1742309979",
      "profile_image_url_https": "https://pbs.twimg.com/profile_images/1661187442043486209/a3E4t1eV_normal.jpg",
      "profile_interstitial_type": "",
      "screen_name": "rasbt",
      "statuses_count": 17742,
      "translator_type": "none",
      "url": "https://t.co/HrtQQ5tgJl",
      "verified": false,
      "want_retweets": false,
      "withheld_in_countries": []
    },
    "professional": {
      "rest_id": "1487642811856007168",
      "professional_type": "Creator",
      "category": [
        {
          "id": 713,
          "name": "Science & Technology",
          "icon_name": ""
        }
      ]
    },
    "tipjar_settings": {
      "is_enabled": true,
      "bitcoin_handle": "",
      "cash_app_handle": "SebastianRaschka",
      "ethereum_handle": "",
      "venmo_handle": "Sebastian-Raschka"
    }
  },
  "views": "106667"
}