🐦 Twitter Post Details

Viewing enriched Twitter post

@__howardchen

Long context models are popular, but is it the final solution to long text reading? We introduce a fundamentally different method, MemWalker: 1. Build a data structure (memory tree) 2. Traverse it via LLM prompting Outperforms long context, retrieval, & recurrent baselines. (1/n) https://t.co/JrDME0ZnpB

📊 Media Metadata

{
  "media": [
    {
      "id": "",
      "type": "photo",
      "url": null,
      "media_url": "https://pbs.twimg.com/media/F8DFKoFbkAASlK4.png",
      "media_url_https": null,
      "display_url": null,
      "expanded_url": null
    }
  ],
  "nlp": {
    "sentiment": "positive",
    "processed_at": "2025-08-06T12:44:45.789501"
  },
  "score": 1.0,
  "scored_at": "2025-08-09T13:46:07.542498",
  "import_source": "manual_curation_2023",
  "score_components": {
    "author": 0.09,
    "engagement": 0.1523247582167354,
    "quality": 0.18,
    "source": 0.15,
    "nlp": 0.1,
    "recency": 0.010000000000000002
  },
  "source_tagged_at": "2025-08-09T13:42:53.781571",
  "enriched": true,
  "enriched_at": "2025-08-09T13:42:53.781572",
  "links_checked": true,
  "checked_at": "2025-08-10T10:32:29.505194",
  "original_structure": "had_media_only"
}

🔧 Raw API Response

{
  "user": {
    "created_at": "2013-08-24T10:40:25.000Z",
    "default_profile_image": false,
    "description": "PhD student in ML/NLP @princeton_nlp & @PrincetonPLI.\nPreviously @asapp research / @cornell_tech / NTU (Taiwan).",
    "fast_followers_count": 0,
    "favourites_count": 3135,
    "followers_count": 695,
    "friends_count": 1137,
    "has_custom_timelines": true,
    "is_translator": false,
    "listed_count": 11,
    "location": "",
    "media_count": 16,
    "name": "Howard Chen",
    "normal_followers_count": 695,
    "possibly_sensitive": false,
    "profile_banner_url": "https://pbs.twimg.com/profile_banners/1696122612/1662347109",
    "profile_image_url_https": "https://pbs.twimg.com/profile_images/1354799599442030592/QCsWNXWD_normal.jpg",
    "screen_name": "__howardchen",
    "statuses_count": 114,
    "translator_type": "none",
    "url": "https://t.co/6KAR6m6qfv",
    "verified": false,
    "withheld_in_countries": [],
    "id_str": "1696122612"
  },
  "id": "1711584916708938042",
  "conversation_id": "1711584916708938042",
  "full_text": "Long context models are popular, but is it the final solution to long text reading?\nWe introduce a fundamentally different method, MemWalker:\n1. Build a data structure (memory tree)\n2. Traverse it via LLM prompting\nOutperforms long context, retrieval, & recurrent baselines. (1/n) https://t.co/JrDME0ZnpB",
  "reply_count": 21,
  "retweet_count": 133,
  "favorite_count": 846,
  "hashtags": [],
  "symbols": [],
  "user_mentions": [],
  "urls": [],
  "media": [
    {
      "media_url": "https://pbs.twimg.com/media/F8DFKoFbkAASlK4.png",
      "type": "photo"
    }
  ],
  "url": "https://twitter.com/__howardchen/status/1711584916708938042",
  "created_at": "2023-10-10T03:30:18.000Z",
  "#sort_index": "1711584916708938042",
  "view_count": 178971,
  "quote_count": 13,
  "is_quote_tweet": false,
  "is_retweet": false,
  "is_pinned": false,
  "is_truncated": false,
  "startUrl": "https://twitter.com/__howardchen/status/1711584916708938042"
}