🐦 Twitter Post Details

Viewing enriched Twitter post

@iScienceLuvr

CURIE: Evaluating LLMs On Multitask Scientific Long Context Understanding and Reasoning "We introduce CURIE, a scientific long-Context Understanding, Reasoning and Information Extraction benchmark to measure the potential of Large Language Models (LLMs) in scientific… https://t.co/ErIGSJNpEv

Media 1

📊 Media Metadata

{
  "score": 0.78,
  "scored_at": "2025-08-09T13:47:19.522772",
  "import_source": "unknown_source",
  "original_structure": "had_data_only",
  "media": [
    {
      "id": "1902246098997297152",
      "type": "photo",
      "url": "https://t.co/ErIGSJNpEv",
      "media_url": null,
      "media_url_https": "https://pbs.twimg.com/media/GmYiv54bQAA6-iP.jpg",
      "display_url": "pic.x.com/ErIGSJNpEv",
      "expanded_url": "https://x.com/iScienceLuvr/status/1902246106735530191/photo/1"
    }
  ]
}

🔧 Raw API Response

{
  "tweet": {
    "bookmark_count": 43,
    "bookmarked": false,
    "created_at": "Wed Mar 19 06:29:48 +0000 2025",
    "conversation_id_str": "1902246106735530191",
    "display_text_range": [
      0,
      269
    ],
    "entities": {
      "hashtags": [],
      "media": [
        {
          "display_url": "pic.x.com/ErIGSJNpEv",
          "expanded_url": "https://x.com/iScienceLuvr/status/1902246106735530191/photo/1",
          "id_str": "1902246098997297152",
          "indices": [
            270,
            293
          ],
          "media_key": "3_1902246098997297152",
          "media_url_https": "https://pbs.twimg.com/media/GmYiv54bQAA6-iP.jpg",
          "type": "photo",
          "url": "https://t.co/ErIGSJNpEv",
          "ext_media_availability": {
            "status": "Available"
          },
          "features": {
            "large": {
              "faces": [
                {
                  "x": 200,
                  "y": 1044,
                  "h": 51,
                  "w": 51
                }
              ]
            },
            "medium": {
              "faces": [
                {
                  "x": 187,
                  "y": 977,
                  "h": 47,
                  "w": 47
                }
              ]
            },
            "small": {
              "faces": [
                {
                  "x": 106,
                  "y": 554,
                  "h": 27,
                  "w": 27
                }
              ]
            },
            "orig": {
              "faces": [
                {
                  "x": 200,
                  "y": 1044,
                  "h": 51,
                  "w": 51
                }
              ]
            }
          },
          "sizes": {
            "large": {
              "h": 1281,
              "w": 976,
              "resize": "fit"
            },
            "medium": {
              "h": 1200,
              "w": 914,
              "resize": "fit"
            },
            "small": {
              "h": 680,
              "w": 518,
              "resize": "fit"
            },
            "thumb": {
              "h": 150,
              "w": 150,
              "resize": "crop"
            }
          },
          "original_info": {
            "height": 1281,
            "width": 976,
            "focus_rects": [
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 547
              },
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 976
              },
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 1113
              },
              {
                "x": 0,
                "y": 0,
                "w": 641,
                "h": 1281
              },
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 1281
              }
            ]
          },
          "media_results": {
            "result": {
              "media_key": "3_1902246098997297152"
            }
          }
        }
      ],
      "symbols": [],
      "timestamps": [],
      "urls": [],
      "user_mentions": []
    },
    "extended_entities": {
      "media": [
        {
          "display_url": "pic.x.com/ErIGSJNpEv",
          "expanded_url": "https://x.com/iScienceLuvr/status/1902246106735530191/photo/1",
          "id_str": "1902246098997297152",
          "indices": [
            270,
            293
          ],
          "media_key": "3_1902246098997297152",
          "media_url_https": "https://pbs.twimg.com/media/GmYiv54bQAA6-iP.jpg",
          "type": "photo",
          "url": "https://t.co/ErIGSJNpEv",
          "ext_media_availability": {
            "status": "Available"
          },
          "features": {
            "large": {
              "faces": [
                {
                  "x": 200,
                  "y": 1044,
                  "h": 51,
                  "w": 51
                }
              ]
            },
            "medium": {
              "faces": [
                {
                  "x": 187,
                  "y": 977,
                  "h": 47,
                  "w": 47
                }
              ]
            },
            "small": {
              "faces": [
                {
                  "x": 106,
                  "y": 554,
                  "h": 27,
                  "w": 27
                }
              ]
            },
            "orig": {
              "faces": [
                {
                  "x": 200,
                  "y": 1044,
                  "h": 51,
                  "w": 51
                }
              ]
            }
          },
          "sizes": {
            "large": {
              "h": 1281,
              "w": 976,
              "resize": "fit"
            },
            "medium": {
              "h": 1200,
              "w": 914,
              "resize": "fit"
            },
            "small": {
              "h": 680,
              "w": 518,
              "resize": "fit"
            },
            "thumb": {
              "h": 150,
              "w": 150,
              "resize": "crop"
            }
          },
          "original_info": {
            "height": 1281,
            "width": 976,
            "focus_rects": [
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 547
              },
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 976
              },
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 1113
              },
              {
                "x": 0,
                "y": 0,
                "w": 641,
                "h": 1281
              },
              {
                "x": 0,
                "y": 0,
                "w": 976,
                "h": 1281
              }
            ]
          },
          "media_results": {
            "result": {
              "media_key": "3_1902246098997297152"
            }
          }
        }
      ]
    },
    "favorite_count": 83,
    "favorited": false,
    "full_text": "CURIE: Evaluating LLMs On Multitask Scientific Long Context Understanding and Reasoning\n\n\"We introduce CURIE, a scientific long-Context Understanding, Reasoning and Information Extraction benchmark to measure the potential of\rLarge Language Models (LLMs) in scientific… https://t.co/ErIGSJNpEv",
    "is_quote_status": false,
    "lang": "en",
    "possibly_sensitive": false,
    "possibly_sensitive_editable": true,
    "quote_count": 0,
    "reply_count": 1,
    "retweet_count": 17,
    "retweeted": false,
    "user_id_str": "441465751",
    "id_str": "1902246106735530191",
    "note_tweet": {
      "is_expandable": true,
      "note_tweet_results": {
        "result": {
          "id": "Tm90ZVR3ZWV0OjE5MDIyNDYxMDY2MDk2MzUzMjg=",
          "text": "CURIE: Evaluating LLMs On Multitask Scientific Long Context Understanding and Reasoning\n\n\"We introduce CURIE, a scientific long-Context Understanding, Reasoning and Information Extraction benchmark to measure the potential of\rLarge Language Models (LLMs) in scientific problem-solving and assisting scientists in realistic workflows. This benchmark introduces ten challenging tasks with\ra total of 580 problems and solution pairs curated by experts in six disciplines - materials science, condensed matter physics, quantum computing, geospatial analysis, biodiversity, and proteins - covering both experimental and theoretical workflows in science. We evaluate a range of closed and open LLMs on tasks in CURIE\rwhich requires domain expertise, comprehension of long in-context information,\rand multi-step reasoning. While Gemini Flash 2.0 and Claude-3 show consistent\rhigh comprehension across domains, the popular GPT-4o and command-R+ fail\rdramatically on protein sequencing tasks. With the best performance at 32% there\ris much room for improvement for all models.\"",
          "entity_set": {
            "hashtags": [],
            "symbols": [],
            "timestamps": [],
            "urls": [],
            "user_mentions": []
          },
          "richtext": {
            "richtext_tags": []
          }
        }
      }
    }
  },
  "user": {
    "__typename": "User",
    "id": "VXNlcjo0NDE0NjU3NTE=",
    "rest_id": "441465751",
    "affiliates_highlighted_label": {},
    "has_graduated_access": true,
    "is_blue_verified": true,
    "profile_image_shape": "Circle",
    "legacy": {
      "can_dm": true,
      "can_media_tag": true,
      "created_at": "Tue Dec 20 03:45:50 +0000 2011",
      "default_profile": false,
      "default_profile_image": false,
      "description": "CEO @SophontAI |\nPhD at 19 (2023) |\nFounder, ex CEO @MedARC_AI |\nex Research Director Stability AI | \nBiomed. engineer @ 14 |\nTEDx talk➡https://t.co/xPxwKTq6Qb",
      "entities": {
        "description": {
          "urls": [
            {
              "display_url": "bit.ly/3tpAuan",
              "expanded_url": "https://bit.ly/3tpAuan",
              "url": "https://t.co/xPxwKTq6Qb",
              "indices": [
                136,
                159
              ]
            }
          ]
        },
        "url": {
          "urls": [
            {
              "display_url": "tanishq.ai",
              "expanded_url": "https://tanishq.ai",
              "url": "https://t.co/nNzCz2Wt2z",
              "indices": [
                0,
                23
              ]
            }
          ]
        }
      },
      "fast_followers_count": 0,
      "favourites_count": 101129,
      "followers_count": 78647,
      "friends_count": 1223,
      "has_custom_timelines": true,
      "is_translator": false,
      "listed_count": 1168,
      "location": "",
      "media_count": 2363,
      "name": "Tanishq Mathew Abraham, Ph.D.",
      "normal_followers_count": 78647,
      "pinned_tweet_ids_str": [
        "1934689751862530317"
      ],
      "possibly_sensitive": false,
      "profile_banner_url": "https://pbs.twimg.com/profile_banners/441465751/1738204246",
      "profile_image_url_https": "https://pbs.twimg.com/profile_images/1913710019729821696/Qge4zx6u_normal.jpg",
      "profile_interstitial_type": "",
      "screen_name": "iScienceLuvr",
      "statuses_count": 17423,
      "translator_type": "none",
      "url": "https://t.co/nNzCz2Wt2z",
      "verified": false,
      "want_retweets": false,
      "withheld_in_countries": []
    },
    "professional": {
      "rest_id": "1534109693131362304",
      "professional_type": "Creator",
      "category": [
        {
          "id": 713,
          "name": "Science & Technology",
          "icon_name": ""
        }
      ]
    },
    "tipjar_settings": {}
  },
  "views": "6754"
}