@prefix dc: <http://purl.org/dc/terms/> .
@prefix this: <http://purl.org/np/RAw2rhwIG3dWBRWl3hPpdTKJ3g_rwN-eGfM9iuPTTkPOY> .
@prefix sub: <http://purl.org/np/RAw2rhwIG3dWBRWl3hPpdTKJ3g_rwN-eGfM9iuPTTkPOY#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
@prefix prov: <http://www.w3.org/ns/prov#> .
@prefix pav: <http://purl.org/pav/> .
@prefix np: <http://www.nanopub.org/nschema#> .
@prefix doco: <http://purl.org/spar/doco/> .
@prefix c4o: <http://purl.org/spar/c4o/> .
sub:Head {
  this: np:hasAssertion sub:assertion ;
    np:hasProvenance sub:provenance ;
    np:hasPublicationInfo sub:pubinfo ;
    a np:Nanopublication .
}
sub:assertion {
  sub:paragraph c4o:hasContent "In this scenario, the encyclopedia Wikipedia contains a huge amount of data, which may represent the best digital approximation of human knowledge. Recent efforts, most notably DB PEDIA [23], F REEBASE [8], YAGO [21], and W IKIDATA [31], attempt to extract semi-structured data from Wikipedia in order to build KBs that are proven useful for a variety of applications, such as question answering, entity summarization and entity linking (EL), just to name a few. The idea has not only attracted a continuously rising commitment of research communities, but has also become a substantial focus of the largest Web companies. As an anecdotal yet remarkable proof, Google acquired Freebase in 2010, 1 embedded it in its K NOWLEDGE G RAPH , 2 and has lately opted to shut it down to the public. 3 Currently, it is foreseen that Freebase data will eventually migrate to Wikidata 4 via the primary sources tool, 5 which aims at standardizing the flow for data donations." ;
    a doco:Paragraph .
}
sub:provenance {
  sub:assertion prov:hadPrimarySource <http://dx.doi.org/10.3233/SW-170269> ;
    prov:wasAttributedTo <https://orcid.org/0000-0002-5456-7964> .
}
sub:pubinfo {
  this: dc:created "2019-11-10T12:34:11+01:00"^^xsd:dateTime ;
    pav:createdBy <https://orcid.org/0000-0002-7114-6459> .
}