@prefix this: . @prefix sub: . @prefix rdfs: . @prefix xsd: . @prefix np: . @prefix npx: . @prefix dcterms: . @prefix prov: . @prefix schema: . sub:Head { this: np:hasAssertion sub:assertion; np:hasProvenance sub:provenance; np:hasPublicationInfo sub:pubinfo; a np:Nanopublication . } sub:assertion { sub:assertion dcterms:creator ; ; , ; rdfs:comment """Language models != world models \"Probing Multimodal LLMs as World Models for Driving\" https://arxiv.orgLanguage models != world models \"Probing Multimodal LLMs as World Models for Driving\" https://arxiv.org/abs/2405.05956/abs/2405.05956 --- A second post https://www.semanticscholar.org/paper/Audio-Visual-Language-Maps-for-Robot-Navigation-Huang-Mees/93565fe6db3948c9c414af1d1edccf4aff5e2e10"""; schema:keywords "AI", "autonomous-driving", "language-models", "multimodal", "probing", "world-models" . } sub:provenance { sub:assertion prov:wasAttributedTo ; prov:wasDerivedFrom . } sub:pubinfo { sub:sig npx:hasAlgorithm "RSA"; npx:hasPublicKey "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyfRdsS1Fon4aRZSbLKNwJ/XNVrFdAXi8tsEItCpX23O4VBsp3XgxkzeA6W+iT1AgN9ppNuiuCiwsYSlA4d9To9SMj4sfpa53JY/zvXwOchD5294x7EScpn/iVDh6ASlH1/PnHlQhtDDGE7hklR7ZqnW8V8Vcys7yGriYM5BNnMvraGZS8Nz8/2jcmDvMH5UCyA+PYZp00B5DhKW6TIi6ww/bRpBv9Z7Mlfa1JeWr8Us7UMKmN0x8dO6MY8gkcmjBgFvafs7M9tNOHKLfJUDqoBKvkFkRwSXzqfEwuuf+B18iq0DMAtli88MYqXa92pnDiuEfpkwHJbztcgcYwEUGvQIDAQAB"; npx:hasSignature "SyEllseZ9tSnuuYqYe1F2dFg3Dmk3AI64svlyFQ+SBAtLYE/9fY4xQ0j9M7LUhfhsz/VuF84tejisvAY94QaYMqxIoupgZCGZ03SQzz4ZwAOZaHJdiXuDOiHoPFzE7ie/VqG6XKGNC1zi9nCAGwbbGaTn6dEpaJ5y5An8ty9dAqKceekISNsIAjisZo5AnkGQhllaVQVorgWmyauHH+BwClF956YJ7MYRs92+1YvdduxTjLHoEaUIJ6chadbHpMFSV2tRPpZyOnuEz0VoSYtAlEWM7T3D8LHg9TPXDRbXO0cARAmZ1YDmzh1sjNipoyvOsdh4u5sYbfg0xW09Xfcfw=="; npx:hasSignatureTarget this: . this: dcterms:created "2024-05-16T17:44:51.560Z"^^xsd:dateTime; "0xFfEB3c7bE84527a44349aFD421dC5a1834DBAa1D"; a npx:ExampleNanopub, . }