@article{Sharma_Marjit_Biswas_2018, title={Efficiently Processing and Storing Library Linked Data using Apache Spark and Parquet}, volume={37}, url={https://ital.corejournals.org/index.php/ital/article/view/10177}, DOI={10.6017/ital.v37i3.10177}, abstractNote={<p class="AbstractText">Resource Description Framework (RDF) is a commonly used data model in the Semantic Web environment. Libraries and various other communities have been using the RDF data model to store valuable data after it is extracted from traditional storage systems. However, because of the large volume of the data, processing and storing it is becoming a nightmare for traditional data-management tools. This challenge demands a scalable and distributed system that can manage data in parallel. In this article, a distributed solution is proposed for efficiently processing and storing the large volume of library linked data stored in traditional storage systems. Apache Spark is used for parallel processing of large data sets and a column-oriented schema is proposed for storing RDF data. The storage system is built on top of Hadoop Distributed File Systems (HDFS) and uses the Apache Parquet format to store data in a compressed form. The experimental evaluation showed that storage requirements were reduced significantly as compared to Jena TDB, Sesame, RDF/XML, and N-Triples file formats. SPARQL queries are processed using Spark SQL to query the compressed data. The experimental evaluation showed a good query response time, which significantly reduces as the number of worker nodes increases.</p>}, number={3}, journal={Information Technology and Libraries}, author={Sharma, Kumar and Marjit, Ujjal and Biswas, Utpal}, year={2018}, month={Sep.}, pages={29–49} }