% This file was created with JabRef 2.3.1. % Encoding: Cp1252 @ARTICLE{anouncia:2:3:07, author = {S.-Margret Anouncia and R. Saravanan}, title = {Ontology based process plan generation for image processing}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {211 -222}, number = {3}, abstract = {The ultimate aim in a large number of image processing applications is to extract the various features from the image and to analyse them for interpretation or for classification. The steps in the image analysis basically involve proper image preprocessing and image segmentation process. Generally, image processing specialists develop their processing plan by trial error cycles and do not define a real formulation of the plan for the given image. To formulate the process plan generation of image analysis, a convenient knowledge organisation is preferable. In this paper, ontology consisting of all the possible processing tasks of a grey scale image is created and a process plan for the analysis of image is automatically generated using the knowledge represented in the ontology.}, keywords = {ontology; image analysis; image processing; process planning; feature extraction; image preprocessing; image segmentation.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{antoniou:2:3:07, author = {Grigoris Antoniou and Antonis Bikakis and Anna Karamolegou and Nikos Papachristodoulou and Manolis Stratakis}, title = {A context-aware meeting alert using semantic web and rule technology}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {147 - 156}, number = {3}, abstract = {This paper describes a context-aware meeting alert, which aims at alerting the user in time about upcoming scheduled calendar events, considering the state of the user's context. This application integrates semantic web technology in RDF (for representing calendars), semantic web rules (for making a context dependent decision about the precise timing of the alert), and mobile technology for location sensing and message delivery. The outlined work is an experiment seeking to demonstrate the feasibility of applying efficient, semantically sound semantic web reasoning to mobile applications.}, keywords = {semantic web; rule technology; context-aware applications; mobile computing; semantic context modelling; context-based reasoning; calendar metadata; non-monotonic reasoning; defeasible logics; m-computing; meetings alert.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{arroyo:1:1:06, author = {Sinuhe Arroyo and Jose Manuel Lopez-Cobo}, title = {Describing web services with semantic metadata}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {76-82}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {Web services are self-describing, self-contained applications that can be published, located, and invoked through common web protocols. Self-descriptions are in fact a form of metadata that provide details on the services offered. Semantic web services are, thus, an extension of such metadata-based descriptions to richer ontology-based description semantics. This paper provides an account on the main usage scenarios of semantic web services, as a roadmap for metadata research on the topic.}, keywords = {semantic metadata; semantics; ontology; web services; semantic web.} } @ARTICLE{bagui:2:1:07, author = {Sikha Bagui}, title = {A formal definition for translating XML documents to the entity relationship model}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {54-66}, number = {1}, abstract = {In this paper we propose to conceptually model XML data and documents to the ER model. We present a set of rules for converting XML documents and their data to the ER model. This conceptual view of the XML data can then be easily translated to the relational model, which will then open the way for the development of applications that exploit well known reliable relational technologies to manage and manipulate large collections of XML documents and data travelling through the web.}, keywords = {XML data; XML documents; database design; entity relationship diagrams; relational database; conceptual models; formal definition.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{bellomi:1:1:06, author = {Francesco Bellomi and Matteo Cristani}, title = {Supervised document classification based upon domain-specific term taxonomies}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {37-46}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {The classification of documents is an interesting topic of recent terminological investigations, in particular the technological ones. Some sophisticated techniques have been developed which provide the classification based upon the recognition of specific linguistic features, such as specific terms or occurrences of phrases. A limited number of cases exist of real document classification applications that make use of natural language processing techniques providing both statistical analysis and human supervision, where the system fully automates the classification process, but the instruction of the taxonomy is a totally human centred activity. In this paper we focus on an application with the above mentioned features; we then introduce a methodology that makes use of this application. The fundamental argument in favour of a specific methodology is that the analysis which leads to the deployment of the term 'taxonomy' can be seen as an ontology construction: we also discuss this aspect as a general motivation.}, keywords = {document classification; taxonomy; ontology; clustering; statistical natural language processing.} } @ARTICLE{bonson:2:4:07, author = {Enrique Bonson-Ponte and Tomas Escobar-Rodriguez and Francisco Flores-Munoz}, title = {Metadata language for online identification: an XBRL international project}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {259-267}, number = {4}, abstract = {This paper sets out to describe how the XBRL-GDI project (eXtensible Business Reporting Language – General Data of Identification), has responded to a need for the online identification of persons and entities of all types, in a pre-existing environment characterised by the heterogeneity of systems and channels of communication. The democratic structure of the GDI Working Group, together with the intensive use of technological means of communication between members, has enabled the project to be successfully completed. The increasing utilisation of XBRL in various administrative and business fields, including at high levels, serves as an endorsement of the XBRL-GDI project.}, keywords = {metadata language; online identification; regulation; validation; XBRL; internet.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{broeder:1:2:06, author = {Daan Broeder and Peter Wittenburg}, title = {The IMDI metadata framework, its current application and future direction}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {119-132}, number = {2}, abstract = {The IMDI Framework offers next to a suitable set of metadata descriptors for language resources, a set of tools and an infrastructure to use these. This paper gives an overview of all these aspects and at the end describes the intentions and hopes for ensuring the interoperability of the IMDI framework within more general ones in development. An evaluation of the current state of the IMDI Framework is presented with an analysis of the benefits and more problematic issues. Finally we describe work on issues of long-term stability for IMDI by linking up to the work done within the ISO TC37/SC4 subcommittee (TC37/SC4).}, keywords = {IMDI metadata framework; metadata descriptors; language resources; ISO TC37/SC4.} } @ARTICLE{caliusco:1:4:06, author = {Maria-Laura Caliusco and Maria-Rosa Galli and Omar Chiotti}, title = {Technologies for data semantic modelling}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {322-331}, number = {4}, abstract = {The explicit modelling of data semantics promises to drive information integration technology to a new level of flexibility and automation. However, nowadays, the inclusion of semantic data modelling into information system development is constrained by the lack of a supporting tool based on appropriate technologies. Our contribution in this paper is based on the specification of modelling languages that make the task of information semantics definition friendly for people who have no background knowledge of semantic modelling techniques, proposed by artificial intelligence techniques. In addition, we present a prototype that implements these languages.}, keywords = {data semantic modelling; contextual ontologies; modelling languages; data semantics; information integration; information systems.} } @ARTICLE{camacho:2:1:07, author = {David Camacho and Maria R-Moreno}, title = {Towards an automatic monitoring for higher education Learning Design}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {1-10}, number = {1}, abstract = {The development of new Information Technologies (IT) has originated new possibilities to design pedagogical methodologies that provide the necessary knowledge and skills in the higher education. This paper presents a metadata-based model representation that is used to represent, detect, and even automatically correct possible pitfalls in the schedule process of a Learning Design (LD) in e-learning environments. This metadata-based model is combined with Artificial Intelligence techniques, such as, planning and scheduling to monitor how is evolving a particular LD, and to propose solutions in those modules of the design that learning problems among the students have been found.}, keywords = {e-learning; online learning; learning design; higher education; metadata; planning; scheduling; automatic monitoring; artificial intelligence.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{castanos:1:4:06, author = {Silvana Castano and Alfio Ferrara and Stefano Montanelli}, title = {Evolving open and independent ontologies}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {235-249}, number = {4}, abstract = {In this paper, we address the problem of evolving open and independent ontologies, by proposing a novel ontology evolution methodology called H-CHANGE and a set of related techniques. After providing a formal definition of distributed concept, we describe; change detection techniques based on semantic matchmaking for determining the semantics of change; assimilation techniques for evolving ontology metadata according to new incoming external knowledge at different integration levels, ranging from concept merging to concept alignment. Examples of evolving OWL ontologies according to H-CHANGE are also provided.}, keywords = {ontology evolution; ontology matching; knowledge discovery; open networked systems; change semantics; ontology metadata; open networks; ontologies.} } @ARTICLE{ceusters:2:1:07, author = {Werner Ceusters and Barry Smith}, title = {Referent tracking for Digital Rights Management}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {45-53}, number = {1}, abstract = {Digital Rights Management (DRM) covers the description, identification, trading, protection, monitoring and tracking of all forms of rights over both tangible and intangible assets. The Digital Object Identifier (DOI) system provides a framework for the persistent identification of entities involved in this domain. Although the system has been very well designed to manage object identifiers, some important questions relating to the creation and assignment of identifiers are left open. The paradigm of a Referent Tracking System (RTS) recently advanced in the healthcare and life sciences environment is able to fill these gaps. This is demonstrated by pointing out inconsistencies in the existing DOI models and by showing how they can be corrected using an RTS.}, keywords = {referent tracking; digital rights management; DRM; digital object identifier; DOI; healthcare; life sciences.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{corcho:1:1:06, author = {Oscar Corcho}, title = {Ontology based document annotation: trends and open research problems}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {47-57}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {Metadata is used to describe documents and applications, improving information seeking and retrieval and its understanding and use. Metadata can be expressed in a wide variety of vocabularies and languages, and can be created and maintained with a variety of tools. Ontology based annotation refers to the process of creating metadata using ontologies as their vocabularies. We present similarities and differences with respect to other approaches for metadata creation, and describe languages and tools that can be used to implement these annotations.}, keywords = {ontology; metadata; document annotation; ontologies; electronic documents; formal languages.} } @ARTICLE{dogac:1:1:06, author = {Asuman Dogac and Gokce B. Laleci and Yildiray Kabak and Seda Unal and Sam Heard and Thomas Beale and Peter L. Elkin and Farrukh Najmi and Carl Mattocks and David Webber and Martin Kernberg}, title = {Exploiting ebXML registry semantic constructs for handling archetype metadata in healthcare informatics}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {21-36}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {Using archetypes is a promising approach in providing semantic interoperability among healthcare systems. To realise archetype based interoperability, the healthcare systems need to discover the existing archetypes, based on their semantics; annotate their archetypes with ontologies; compose templates from archetypes and retrieve corresponding data from the underlying medical information systems. In this paper, we describe how ebXML Registry semantic constructs can be used for annotating, storing, discovering and retrieving archetypes. For semantic annotation of archetypes, we present an example of an archetype metadata ontology and describe the techniques to access archetype semantics through ebXML query facilities. We present a GUI query facility and describe how the stored procedures, which we introduce, move the semantic support beyond what is currently available in ebXML registries. We also address how archetype data can be retrieved from clinical information systems by using ebXML web services. A comparison of web service technology with the ebXML messaging system is provided to justify the reasons for using web services.}, keywords = {healthcare informatics; semantic interoperability; ebXML registries; archetypes; semantic constructs; metadata; ontologies; medical information systems; web services; health information systems.} } @ARTICLE{ellis:2:1:07, author = {Heidi Ellis and Gregory Hislop}, title = {An ontology for software engineering teaching modules}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {11-22}, number = {1}, abstract = {The World Wide Web is increasingly being used to allow both educators and learners to more easily locate desired information, as well as to support integration of related materials from different sources. This paper reports on an ontology constructed for the Network Community for Software Engineering Education (SWENET) and represents a teaching perspective on software engineering material. The addition of semantic mark up to teaching modules based on the SWENET ontology would allow instructors and learners to access material in the manner that best fits their educational needs, increase reuse of teaching materials and support integration with existing learning objects.}, keywords = {semantic web; software engineering education; body of knowledge; SWEBOK; ontology; teaching modules; reuse; integration; learning objects; online learning; e-learning; web-based education; internet.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{fermoso:2:3:07, author = {Ana Fermoso and Roberto Berjon and Encarnacion Beato and Montserrat Mateos and Miguel Angel Sanchez and Maribel Manzano and M-Jose Gil}, title = {A new system for integrating information from libraries' catalogues to MODS format}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {201 - 210}, number = {3}, abstract = {In this paper, we present the integration of information from library catalogues to the bibliographical format MODS (Metadata Object Description Schema). Hereby the resources of different bibliographical catalogues can be consulted obtaining the results in the same common format as MODS. To achieve our aim we use the XDS system, a software tool that solves the integration of heterogeneous data using XML as integration format. XDS allows integrating data from different relational and XML sources obtaining the results in the XML format specified by user. In this case the information sources store library catalogues and the output format is MODS.}, keywords = {XML; MODS; library catalogues; information integration; heterogeneous information; relational databases; XML databases; bibliographical format; metadata object description.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{flahive:1:4:06, author = {A. Flahive and B.O. Apduhan and J.W. Rahayu and D. Taniar}, title = {Large scale ontology tailoring and simulation in the Semantic Grid Environment}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {265-281}, number = {4}, abstract = {Ontologies contain semantic information often annotated with metadata. They can get quite complex and very large, causing difficulties in using these ontologies on a daily basis. A large cluster computer is required to tailor these base ontologies into smaller and/or extracted sub-ontologies, but this is expensive and not a viable solution for most users. The Semantic Grid has the ability to process large semantic data repositories as fast as or faster than some of the largest cluster computers at a fraction of the cost. This paper explores the viability of using the Semantic Grid to tailor large ontologies, efficiently and cheaply.}, keywords = {ontologies; ontology tailoring; semantic grid; simulation; metadata.} } @ARTICLE{francesconi:1:3:06, author = {E. Francesconi and G. Peruginelli}, title = {Retrieval of Italian legal literature: a case of semantic search using legal vocabulary}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {207-215}, number = {3}, abstract = {Retrieval of legal information, in particular legal literature, is examined in conjunction with the creation of a Portal to Italian legal doctrine. Subject searching is a major requirement for Italian legal literature users and a solution is described for the retrieval of legal literature's resources. Such solution is based on the exploitation of Dublin Core (DC) metadata for both data coming from structured repositories and for web documents, as well as the use of a controlled vocabulary list prepared for accessing indexed papers of the DoGi database. Technical specifications are illustrated, as well as benefits and limitations of such solution.}, keywords = {legal literature; precision; recall; information retrieval; controlled vocabularies; metadata vocabularies; Dublin Core; semantic searches.} } @ARTICLE{garcia-robles:1:3:06, author = {Rocio Garcia-Robles and Alvaro Cabrerizo and Juan Antonio Prieto and Fernando Diaz Del Rio and Anton Civit}, title = {Accessibility via metadata in a semantic web-driven Content Management System}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {224-231}, number = {3}, abstract = {For promoting accessibility, not only binding the W3C-WCAG, but also the discovery of material having appropriate accessibility support and the adjustment of control and display of resources, are important. Both challenges are addressed by the IMS Consortium. In this paper, we survey how far IMS accessibility specifications cover WCAG. We argue why using a semantic framework for describing textual and contextual information in a standardised manner could promote accessibility and reusability. Finally, we explain how some accessibility issues are addressed by XimetriX's ximDEX, a semantic-web CMS. A plan to integrate accessibility specifications and contextual description tools into that CMS is proposed.}, keywords = {metadata; accessibility; content management systems; reusability; Dublin Core; IEEE-LOM; IMS consortium; ACCMD; ACCLIP; semantic web; MPEG-7; RDF; OWL; WCAG; W3C WAI guidelines; metadata vocabularies.} } @ARTICLE{golbeck:1:1:06, author = {Jennifer Golbeck and Bijan Parsia}, title = {Trust network-based filtering of aggregated claims}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {58-65}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {On the semantic web, assertions may be aggregated from many sources, those aggregations filtered, reasoned over, aggregated with other aggregators, displayed, scraped, extracted, recombined, and otherwise processed without significant human oversight. To preserve the connection between assertions and their source, various provenance schemes for semantic web data have been explored. However, the primary focus has been on authenticating the author of a particular statement, e.g., using digital signatures, but there is no provision for relating the authenticity of the source of the assertion and the trustworthiness of the assertion itself. This paper presents a method for using semantic web based trust networks to infer the reputation of sources for a statement and compose the reputation of several sources. By calculating a trust rating for each statement based on the ratings of its sources, the set of statements can be filtered based on the rating.}, keywords = {trust networks; semantic web; knowledge bases; ontologies; social networks; information filtering; aggregated claims; assertions; source reputation; trust rating; trustworthiness.} } @ARTICLE{gotze:1:3:06, author = {Marcel Gotze}, title = {Coding semantics of handwritten annotation}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {216-223}, number = {3}, abstract = {In this paper we discuss a concept for the description of handwritten annotations in digital documents. It is focused on the metadata but also covers the storage of the annotation data itself. For the latter SVG is used that also support the description of information about the handwritten annotation like author, type, or the text it refers to. For this data a possible use of RDF and Dublin Core is introduced that can be included within the metadata element of SVG.}, keywords = {handwritten annotations; digital documents; Dublin Core; RDF; XML; SVG; semantics; World Wide Web; metadata vocabularies.} } @ARTICLE{greenberg:1:1:06, author = {Jane Greenberg and Kristina Spurgin and Abe Crystal}, title = {Functionalities for automatic metadata generation applications: a survey of metadata experts' opinions}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {3-20}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {This paper reports on the automatic metadata generation applications (AMeGA) project's metadata expert survey. Automatic metadata generation research is reviewed and the study's methods, key findings and conclusions are presented. Participants anticipate greater accuracy with automatic techniques for technical metadata (e.g., ID, language, and format metadata) compared to metadata requiring intellectual discretion (e.g., subject and description metadata). Support for implementing automatic techniques paralleled anticipated accuracy results. Metadata experts are in favour of using automatic techniques, although they are generally not in favour of eliminating human evaluation or production for the more intellectually demanding metadata. Results are incorporated into Version 1.0 of the Recommended Functionalities for automatic metadata generation applications (Appendix A).}, keywords = {automatic metadata generation; metadata applications; Dublin core; metadata experts; AMeGA project; metadata functionalities; technical metadata; intellectual discretion; human evaluation; human production.@ARTICLE{greenberg:1:1:06, author = {Jane Greenberg, Kristina Spurgin, Abe Crystal}, title = {Functionalities for automatic metadata generation applications: a survey of metadata experts' opinions}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {3-20}, number = {1}, abstract = {This paper reports on the automatic metadata generation applications (AMeGA) project's metadata expert survey. Automatic metadata generation research is reviewed and the study's methods, key findings and conclusions are presented. Participants anticipate greater accuracy with automatic techniques for technical metadata (e.g., ID, language, and format metadata) compared to metadata requiring intellectual discretion (e.g., subject and description metadata). Support for implementing automatic techniques paralleled anticipated accuracy results. Metadata experts are in favour of using automatic techniques, although they are generally not in favour of eliminating human evaluation or production for the more intellectually demanding metadata. Results are incorporated into Version 1.0 of the Recommended Functionalities for automatic metadata generation applications (Appendix A).}, keywords = {automatic metadata generation; metadata applications; Dublin core; metadata experts; AMeGA project; metadata functionalities; technical metadata; intellectual discretion; human evaluation; human production.}, }} } @ARTICLE{grishchenko:1:2:06, author = {Victor S. Grishchenko}, title = {Distance-based reputation metrics are practical in P2P environments}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {133-140}, number = {2}, abstract = {This article considers computational aspects of web-of-trust-like approaches in distributed, peer-to-peer environments. The key algorithmic issue here is finding the shortest trusted path between two participants. This is a kind of all-pairs-shortest-path problem and it is found to be resolvable assuming scale-free social graph topology and sublinear per-node resource constraints.}, keywords = {web-of-trust; trust; reputation; scale-free; routing; P2P environments; peer-to-peer} } @ARTICLE{hu:2:4:07, author = {Bo Hu and Srinandan Dasmahapatra and Paul Lewis}, title = {Semantic metrics}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {242-258}, number = {4}, abstract = {In the context of the Semantic Web, many ontology-related operations, e.g., ontology ranking, segmentation, alignment, articulation, reuse, evaluation, etc., can be boiled down to one fundamental operation: computing the similarity and/or dissimilarity among ontological entities, and in some cases among ontologies. In this paper, we review conventional metrics for computing distance and we propose a series of metrics conceived purposely for semantics rich entities. We give a formal account of semantic metrics drawn from a variety of research disciplines and enrich them with formalisation based on Description Logics. We argue that concept-based metrics can be aggregated to produce numeric distances at ontology-level. We speculate on the usability of our ideas in potential ontology engineering tasks.}, keywords = {interoperability; semantic web; ontology alignment; semantic metrics; formalisation; ontology engineering.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{hwan:1:4:06, author = {Seung Hwan Kang and Sim Kim Lau}, title = {Development of a web-based student enquiry system: integration of ontology and case-based reasoning}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {282-294}, number = {4}, abstract = {This paper discusses the development of a web-based student enquiry system using techniques in Case-Based Reasoning (CBR) combined with advances in ontology. The system functions as an online enquiry front-desk to allow prospective students to find out if they are eligible for admission to a postgraduate course in an Australian University. Ontology is applied to ensure explicit specifications of concept and definition of terminology can be achieved in the system. CBR technique is applied to achieve the query by applying and adapting past queries.}, keywords = {student enquiry systems; ontology integration; case-based reasoning; CBR; semantic web; ontologies; web-based enquiries; internet; Australia.} } @ARTICLE{kasai:2:1:07, author = {Toshinobu Kasai and Haruhisa Yamaguchi and Kazuo Nagano and Riichiro Mizoguchi}, title = {A Semantic Web system for supporting teachers using ontology alignment}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {35-44}, number = {1}, abstract = {In Japan, it is important to provide teachers of Information Technology Education (IT) with a powerful help system that can locate and provide access to a variety of useful information resources. To this end, we built an ontology of the goal of IT education and its applications based on Semantic Web technology. This application was based on the alignment of ontologies to reuse the results of other research. Furthermore, we propose a Goal Transition Model to show a skeleton of the transition of instructional goals based on ontologies. Finally, we propose support functions that are used in the model.}, keywords = {semantic web; ontologies; IT education; education support; lesson plans; information technology; Japan; instructional goals; teaching.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{khriyenko:1:2:06, author = {Oleksiy Khriyenko and Vagan Terziyan}, title = {A framework for context-sensitive metadata description}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {154-164}, number = {2}, abstract = {Expectations regarding the new generation of Web depend on the success of Semantic Web technology. Resource Description Framework (RDF) is a basis for explicit and machine-readable representation of semantics. However RDF is not suitable for describing dynamic and context-sensitive resources (eg. processes). We present the Context Description Framework (CDF) as an extension of the RDF by adding a 'TrueInContext' component to the basic RDF triple ('subject-predicate-object'), and consider contextual value as a container of RDF statements. We also add a probabilistic component, which allows multilevel contextual dependence descriptions as well as presumes possibility for Bayesian reasoning with the RDF model.}, keywords = {semantic Web; context-sensitive metadata description; probabilistic context; RDF contextual extension; resource description framework; semantics} } @ARTICLE{kim:2:3:07, author = {Henry M. Kim and Arijit Sengupta and Joerg Evermann}, title = {MOQ: Web services ontologies for QoS and general quality evaluations}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {195 - 200}, number = {3}, abstract = {When describing web services, one of the obvious aspects that need representing is 'Quality of Service' (QoS), the capability of a web service to meet an acceptable level of service as per factors such as availability and accessibility. However, too much of a focus on developing functional QoS ontologies has led to an over-emphasis on representing solely QoS metrics and units of measurement. For instance, what does round trip time actually mean? Is the round trip time of every data item measured? Is it an average, or is every nth item measured? Is it the actual measure that is important or just the percentage of items that are beyond a certain range? Arguably existing QoS ontologies cannot readily answer many of these design questions because these questions have less to do with evaluating QoS and more to do with representing 'what is quality?' Therefore, there is an unmet need for web services ontologies that are designed at a higher level encompassing domain independent concepts, and generally applicable beyond QoS evaluations. The MOQ set of ontologies, designed from the premise that quality is 'conformance to requirements', aims to fill this need. Comprised of ontologies of requirement, measurement, traceability, and quality management systems, MOQ can be extended to encompass QoS metrics and measurement units or be designed to interoperate with existing QoS ontology. The use of MOQ minimises the ambiguity in QoS evaluations.}, keywords = {semantic web; ontologies; QoS evaluation; quality of service; web services; requirement; measurement; traceability; quality management.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{koutsomitropoulos:2:4:07, author = {Dimitrios Koutsomitropoulos and George Paloukis and Theodore S. Papatheodorou}, title = {From metadata application profiles to semantic profiling: ontology refinement and profiling to strengthen inference-based queries on the semantic web}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {268-280}, number = {4}, abstract = {Ontologies on the Semantic Web form a basis for representing human-conceivable knowledge in a machine-understandable manner. Ontology development for a specific knowledge domain is, however, a difficult task, because the representation produced has to be adequately detailed and broad enough at the same time. The CIDOC-CRM is such an ontology, pertaining to cultural heritage, which we align to the Semantic Web environment: first, transforming it to OWL and then profiling it not in the usual flat metadata sense, but by refining and extending its conceptual structures, taking advantage of OWL semantics. This kind of profiling maintains applicability of the model, while enabling more expressive reasoning tasks. To this end, we construct a mechanism for acquiring implied and web-distributed information that is used to conduct and present a series of experimental inferences on the CRM-profiled form.}, keywords = {metadata application profiles; semantic profiling; cultural heritage; ontologies; semantic web; interoperability; inference based queries; ontology development; OWL semantics.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{kungas:2:3:07, author = {Peep Kungas and Mihhail Matskin}, title = {From web services annotation and composition to web services domain analysis}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {157 - 178}, number = {3}, abstract = {Automated web service annotation and composition are seen as complimentary technologies. While automated annotation allows to extract web service semantics from existing WSDL documents, automated composition uses this semantics for integrating applications. Anyway, automated composition can be applied not only to constructing new but also to analysis of existent web services. Therefore applicability of both methodologies is essential for increasing the productivity of information system integration. In this paper we propose application of automated composition for analysing web services domains. We identify and analyse some general web services properties in the context of industrial and public web services.}, keywords = {automated web service composition; web service annotation; formal semantics; domain analysis; web services.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{kurth:1:3:06, author = {Martin Kurth and Greg Nehler and Rick Silterra}, title = {Managing resource relationships with vocabularies: a case study}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {176-182}, number = {3}, abstract = {The Kinematic Models for Design Digital Library (KMODDL) exemplifies digital collections in which groups of objects are versions of the same resource and which resources are related to one another taxonomically. Other objects in the collection are supplementary materials that explicitly cite the primary KMODDL resources. To manage the complex relationships among KMODDL objects while maintaining the Dublin Core one-to-one principle, metadata developers established controlled vocabulary encoding schemes that linked related objects. The solution implemented enables users to find in a single search all versions of a resource and all supplementary materials that cite the resource.}, keywords = {application profiles; bibliographic relationships; controlled vocabularies; encoding schemes; functional requirements; bibliographic records; FRBR; kinematics; Dublin Core; digital libraries; metadata vocabularies.} } @ARTICLE{lee:1:3:06, author = {Wonsook Lee and Shigeo Sugimoto}, title = {Toward core subject vocabularies for community-oriented subject gateways}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {167-175}, number = {3}, abstract = {This paper gives an overview of personal name matching. Personal name matching is of great importance for all applications that deal with personal names. The problem with personal names is that they are not unique and sometimes even for one name many variations exist. This leads to the fact that databases on the one hand may have several entries for one and the same person and on the other hand have one entry for many different persons. For the evaluation of personal name matching algorithms, test collections are of great importance. This paper gives an overview of existing test collections and presents two new test collections based on real-world bibliographic data. Additionally, state-of-the art techniques and a new approach based on semantics are also described.}, keywords = {subject gateways; community-oriented gateways; subject vocabularies; core vocabularies; interoperability; vocabulary maintenance systems; metadata vocabularies; Dublin Core.} } @ARTICLE{lytras:2:4:07, author = {Miltiadis D. Lytras and Miguel-Angel Sicilia}, title = {Where is the value in metadata?}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {235-241}, number = {4}, abstract = {Metadata can be defined as structured data about an object that supports some function(s) related to that object described. Thus, the functions enabled by metadata are the key to cost-justifying metadata creation and management efforts. However, the functions of metadata are diverse, and also the kinds of organisations that typically create and use metadata. This results in different possible accounts of the concept of 'metadata value'. This paper explores typical cases of organisations using metadata and then explores how metadata as an organisational asset can be framed in the existing Information Systems (IS) theories.}, keywords = {metadata management; metadata value; information systems; cost-benefit analysis; justification.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{mangold:2:1:07, author = {Christoph Mangold}, title = {A survey and classification of semantic search approaches}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {23-34}, number = {1}, abstract = {A broad range of approaches to semantic document retrieval has been developed in the context of the Semantic Web. This survey builds bridges among them. We introduce a classification scheme for semantic search engines and clarify terminology. We present an overview of ten selected approaches and compare them by means of our classification criteria. Based on this comparison, we identify not only common concepts and outstanding features, but also open issues. Finally, we give directions for future application development and research.}, keywords = {semantic search; semantic web; search engines; information retrieval; survey; document retrieval.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{manouselis:1:2:06, author = {Nikos Manouselis and Constantina Costopoulou}, title = {A metadata model for e-markets}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {141-153}, number = {2}, abstract = {The objective of this paper is to present a specialised metadata model that has been developed in order to store e-market characteristics in a systematic, interoperable and reusable manner. The proposed model is based on the Dublin Core (DC) metadata standard, and termed as the DC E-Market (DC-EM) metadata model. The paper describes the DC-EM metadata model elements, providing guidelines for their encoding in the eXtensible Markup Language (XML). It also presents the prototype of a DC-EM metadata repository, and its application for the analysis of a sample of Greek agricultural e-markets.}, keywords = {e-markets; electronic markets; online markets; Dublin Core; metadata models; XML; Greece; agricultural e-markets.} } @ARTICLE{nevile:1:3:06, author = {Liddy Nevile and Sophie Lissonnet}, title = {Dublin core and museum information: metadata as cultural heritage data}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {198-206}, number = {3}, abstract = {In 2000, the museum community reported on the use of Dublin Core metadata in the museum context and proposed a broader set of elements for museums. Since then, Dublin Core metadata has evolved and it is now possible to use rich descriptions (data) and Semantic Web technologies and maintain the interoperability of Dublin Core metadata. This paper reviews the museum experience in the light of developments in the DCMES. It is based on recent experience with a virtual museum of Indigenous culture using Metadata Application Profiles (MAPs) and schema cross-walks.}, keywords = {Dublin Core; metadata; metadata vocabularies; application profiles; museums; cultural heritage; Quinkan indigenous community; case studies.} } @ARTICLE{duy:1:4:06, author = {Le Duy Ngan and Angel Eck Soong Goh and Tru Hoang Cao}, title = {Multi-ontology matchmaker}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {250-264}, number = {4}, abstract = {The rapid development and the large number of existing web services have led to a demand for a discovery mechanism. Researchers have developed Matchmakers to match web service requesters and providers. Current Matchmakers are adequate when locating web services that use the same ontology but they do not support the matching of semantic web services that use different ontologies. This paper introduces a Matchmaker which locates web services that use different ontologies. The Matchmaker algorithm, which incorporates a means of distinguishing different ontologies, is presented, together with the overall architecture of the Matchmaker engine.}, keywords = {semantic web; web services; discovery systems; ontology matching; matchmaker; ontologies.} } @ARTICLE{phipps:1:3:06, author = {Jon Phipps and Diane I. Hillmann and Gordon W. Paynter}, title = {Orchestrating metadata enhancement services: introducing Lenny}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {189-197}, number = {3}, abstract = {Harvested metadata often suffers from uneven quality to the point that utility is compromised. Although some aggregators have developed methods for evaluating and repairing specific metadata problems, it has been unclear how these methods might be scaled into services that can be used within an automated production environment. One possible solution is a model of service interaction that enables loosely coupled third party services to provide metadata enhancements to a central repository, with interactions orchestrated by a centralised software application. This application, launched by an editor, then works to define collections by requesting web services from allied projects. In this paper, the service orchestration process is described from both perspectives.}, keywords = {metadata services; metadata quality; selection services; service interactions; service orchestration; metadata generation services; metadata augmentation; metadata improvement; metadata transformation; metadata vocabularies; Dublin Core.} } @ARTICLE{poibeau:2:2:07, author = {Thierry Poibeau}, title = {Semantic annotation: mapping text to ontologies}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {67-78}, number = {2}, abstract = {This position paper discusses various issues concerning the semantic web. We claim that reflection on formats has attracted the most important part of the research effort so far, whereas the web (including the future semantic web) is mainly made of texts. From our point of view, research should attach a major role to natural language properties and deal with complexity, ambiguity and polysemy. We show that the initial model from T. Berners-Lee must be refined and made more complex if these natural language technologies are to take language diversity into account}, keywords = {semantic web; semantic annotation; texts; ontologies; natural language processing; complexity; ambiguity; polysemy; language diversity.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{price:2:2:07, author = {Rosanne J. Price and Shonali Krishnaswamy and Nitin Arora}, title = {Current research in conceptual modelling of agent mobility: an ontology-based evaluation}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {79-93}, number = {2}, abstract = {There is increasing recognition of the potential benefits of mobile agent technology for distributed and mobile applications. However, there has been only limited consideration of how to model agent mobility at the conceptual level to support application analysis and design. Furthermore, the fragmented nature and inconsistent terminology characterising such research to date complicates the effort to understand and compare the work. This paper presents an ontology intended to serve as a necessary first step to providing effective conceptual level support for mobile agent applications and then uses the ontology to evaluate the conceptual models of current mobile agent methodologies.}, keywords = {mobile agents; ontology; conceptual modelling; mobility.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{rajugan:2:2:07, author = {R. Rajugan and Elizabeth Chang and Tharam S. Dillon}, title = {Sub-ontologies and ontology views: a theoretical perspective}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {94-111}, number = {2}, abstract = {The success of the Semantic Web (SW) relies heavily on formulation and interoperability of well-defined ontologies and their utilisation. Thus, in this context, it is interesting to investigate concepts like ontology view specification, construction and extraction. Here, in this paper, we address one such issue: ontology views. The proposed ontology view mechanism is a Layered View Model (LVM) and is specified and defined using conceptual and logical level semantics, as opposed to a query language. Our aim here is to provide a detailed theoretical foundation for LVM for ontologies. Also, our proposed concepts are illustrated using an industrial case-study example.}, keywords = {semantic web; data views; ontology views; conceptual semantics; layered view model.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{reuther:1:2:06, author = {Patrick Reuther and Bernd Walter}, title = {Survey on test collections and techniques for personal name matching}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {89-99}, number = {2}, abstract = {This paper gives an overview of personal name matching. Personal name matching is of great importance for all applications that deal with personal names. The problem with personal names is that they are not unique and sometimes even for one name many variations exist. This leads to the fact that databases on the one hand may have several entries for one and the same person and on the other hand have one entry for many different persons. For the evaluation of personal name matching algorithms, test collections are of great importance. This paper gives an overview of existing test collections and presents two new test collections based on real-world bibliographic data. Additionally, state-of-the art techniques and a new approach based on semantics are also described.}, keywords = {personal name matching; duplicate detection; duplicates; name disambiguation; record linkage; data test collections; social networks; co-authorship networks; personal names; semantics.} } @ARTICLE{rifaieh:1:4:06, author = {Rami Rifaieh and Aicha Nabila Benharkat}, title = {Sharing semantics among enterprise information systems with contextual ontologies: theory and practice}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {306-321}, number = {4}, abstract = {The Enterprise Information System (EIS) manages enterprise business, applies strategic and economic decisions, and holds communication with partners. Shared semantics are necessary to leverage system engineering benefits such as reusability and interoperability. In the last decade, research on ontologies and contexts was driven separately as formal support for treating the semantics sharing problem. In this paper, we show how to pair-up contexts and ontologies as a formal background for reaching a suitable global enterprise environment. To promote contextual ontologies, we present how to exploit technically the proposed formalism through an architecture and a prototype called Enterprise Information System Contextual Ontologies.}, keywords = {contextual ontologies; enterprise information systems; EIS; multi-representation; modal description logics; semantics sharing; reusability; interoperability.} } @ARTICLE{sakkopoulos:1:1:06, author = {Evangelos Sakkopoulos and Dimitris Kanellopoulos and Athanasios Tsakalidis}, title = {Semantic mining and web service discovery techniques for media resources management}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {66-75}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {The proposed techniques facilitate semantic discovery and interoperability of web services that manage and deliver web media content. As a test-bed, a web management system is discussed that provides vocational monographs for occupational guidance. The key idea of the proposed web service based architecture is that the WS of participating members share their vocational monographs and related resources, resulting in the overall system capacity increase. Mechanisms and techniques are presented concerning: an 'information desk', which provides an adaptive search user interface; discovery functions based on semantic representation of the WS' monographs; mining into monographs based on their semantic representation; a metric-based evaluation model for the web monographs; dynamic ranking of retrieved content list; consumption mechanisms for the career counsellors' local WS; e-payment; universal usability access. This analysis can be exploited in the practice of a new Occupational Guidance Policy.}, keywords = {web semantics; web services; web mining techniques; vocational monographs; semantic mining; media resources management; semantic discovery; interoperability; web media content; occupational guidance; semantic representation; web monographs; career guidance; semantic web.} } @ARTICLE{sanchez:2:2:07, author = {David Sanchez and Antonio Moreno}, title = {Bringing taxonomic structure to large digital libraries}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {112-122}, number = {2}, abstract = {Digital libraries are invaluable repositories of information. However, in many situations, their size makes it difficult to access the desired resource. In this paper, we present an automatic, unsupervised, domain-independent and scalable approach for structuring the resources available in a certain electronic repository for a particular domain. The system automatically detects and extracts the main topics related to the desired domain, offering a taxonomical structure. This result is complemented by the library's search engine, offering an integrated tool for accessing resources as an automatically composed directory service. The system has been tested for several digital libraries and domains of knowledge, providing good quality results in all cases.}, keywords = {taxonomy learning; digital libraries; web mining; web search engines; resource indexing; knowledge acquisition; ontologies; semantic web; electronic repositories; directory services.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{scharl:2:2:07, author = {Arno Scharl and Albert Weichselbraun and Wei Liu}, title = {Tracking and modelling information diffusion across interactive online media}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {136-145}, number = {2}, abstract = {Information spreads rapidly across websites and other online media. The IDIOM research project analyses this process by identifying redundant content elements, mapping them to ontology concepts, and tracking their temporal and geographic distribution. Linguists define 'idiom' as an expression whose meaning is different from the literal meanings of its component words. Similarly, investigating information diffusion promises insights that cannot be inferred from individual network elements. Previous research often focused on particular media, or neglected important aspects of the human language. IDIOM addresses these gaps to reveal fundamental mechanisms of information diffusion across media with distinct interactive characteristics}, keywords = {information diffusion; ontology extension; natural language processing; interactive media; online media; websites; tracking.}, owner = {msicilia}, timestamp = {2007.09.13} } @ARTICLE{sicilia:1:1:06, author = {Miguel-Angel Sicilia}, title = {Metadata, semantics, and ontology: providing meaning to information resources}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {83-86}, number = {1}, __markedentry = {[MIGUEL ANGEL]}, abstract = {Metadata research has emerged as a new discipline in the last years, and is focused on the provision of semantic descriptions of a diverse kind to digital resources, web resources being the most frequent target. Such associated descriptions are supposed to serve as a foundation for advanced, improved services in several application areas, including search and location, personalisation, and automated delivery of information. In consequence, metadata research focuses both on the development of metadata description languages – of a general purpose or specialised kind – and also on the practicalities of metadata creation, dissemination, assessment, maintenance, and use for diverse scenarios and usage contexts. Ontology has emerged recently as a knowledge representation infrastructure for the provision of shared semantics to metadata, which essentially forms the basis of the vision of the Semantic Web. The combination of metadata description techniques and ontology engineering defines a new landscape for information engineering with specific challenges and promising applications, which requires a truly multi-disciplinary approach. This paper is intended to provide some basic insights for the endeavour of engineering systems based on metadata, semantics, and ontologies, and to foster the interaction of researchers with different backgrounds coming from diverse disciplines.}, keywords = {metadata; semantics; ontology; information resources; semantic web; information engineering; information systems.} } @ARTICLE{sriharee:1:2:06, author = {Natenapa Sriharee and Twittie Senivongse}, title = {Matchmaking and ranking of semantic web services using integrated service profile}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {100-118}, number = {2}, abstract = {Service discovery is a key aspect in the enabling technologies for service-oriented systems, including web services. Growing attention has been paid to the content of business and service descriptions to allow services to be discovered more flexibly and accurately. This paper presents a service description model called an integrated service profile, which describes the capabilities of a service in various aspects, such as attribute- , structure- , behaviour- , and operational rule-based capabilities. An integrated service profile can be used to discover web services semantically. Criteria for considering matching between the service description and the expected capability specified in the request, with respect to each part of the profile, are proposed. A matching algorithm is based on a flexible match approach and can retrieve relevant services by using user's preference criteria. A ranking methodology with an ordinal scale is also proposed to determine the degree of matching among the matched services.}, keywords = {semantic web services; matchmaking; ranking; ontology; service profile; integrated service discovery; flexible matching; preference criteria; service descriptions.} } @ARTICLE{sumalatha:2:4:07, author = {M.R. Sumalatha and V. Vaidehi and A. Kannan}, title = {Semantic query Cache using Dynamic Facts (SCDF): a novel approach to efficient information retrieval}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {281-294}, number = {4}, abstract = {XML query engines can be exploited by caching technology to reduce the latency caused by data transmission. This paper reports on a caching system designed to facilitate XML query processing in the Web. It provides an alternative approach to other architectures, in which the unit of transfer is a page or a tuple. Semantic caching gives enhanced performance based on the result sets of previously answered queries when compared with other systems. A mechanism capable of caching previously computed results for answering future queries with a knowledge-based analysis would be beneficial for improving performance. We have proposed a framework Semantic Cache using Dynamic Facts, which improves the query response reducing network traffic.}, keywords = {semantic caching; SCDF; knowledge base; data service; web service; information retrieval; semantic query cache; dynamic facts; XML query engines.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{tolosana:1:3:06, author = {R. Tolosana-Calasanz and J. Nogueras-Iso and R. Bejar and P.R. Muro-Medrano and F.J. Zarazaga-Soria}, title = {Semantic interoperability based on Dublin Core hierarchical one-to-one mappings}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {183-188}, number = {3}, abstract = {The tendency of current cataloguing systems is to interchange metadata in XML according to the specific standard required by each user on demand. Furthermore, metadata schemas from different domains are not usually semantically distinct but overlap and relate to each other in complex ways. As a consequence, the semantic interoperability has to deal with the equivalences between those descriptions. There exist two main approaches in order to tackle this problem: solutions based on the use of ontologies and solutions based on the creation of specific crosswalks for one-to-one mapping. This paper proposes a hierarchical one-to-one mapping solution for improving semantic interoperability.}, keywords = {metadata standards; metadata servers; catalogue servers; spatial data infrastructures; interoperability; semantic interoperability; crosswalks; XML; Dublin Core; RDF; ISO 19115; metadata vocabularies.} } @ARTICLE{uden:1:4:06, author = {Lorna Uden and Kimmo Salmenjoki and Marjan Hericko and Luka Pavlic}, title = {Metadata for research and development collaboration between universities}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2006}, volume = {1}, pages = {295-305}, number = {4}, abstract = {Knowledge Management (KM) research requires expertise from many different disciplines. To help with KM research activities of different groups that are geographically distributed, a new Knowledge Management in Organisations (KMO) structure is proposed. The organisational structure of KMO is based on decentralised units where autonomy and independence are established in each group. Effective design of the system requires that a proper format for representing and organising the shared knowledge is crucial. This paper describes the development of metadata and ontology for our KMO system to support the sharing of knowledge for the distributed partners between the four universities.}, keywords = {knowledge management; KM; collaborative research; organisational knowledge; knowledge web; semantic web; web technologies; R&D; research and development; university collaboration; metadata; organisational structure; ontologies; knowledge sharing.} } @ARTICLE{wu:2:3:07, author = {Raymond Wu}, title = {Semantic mediation modelling in the service component design}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {179-194}, number = {3}, abstract = {Semantic mediation is one of the big challenges in this century from scale and complexity. The main objective of mediation is to minimise the architecture gap, and reduce expenses. Achieving this objective requires multi-dimensional approaches including service strategy and integration. Therefore, architecture alignment and coherence are extremely important. We further extend our investigation by using AHP and eigenvalue techniques in component evaluation. Our proposal mainly argues that semantic mediation should not be initiated from the macro architecture; instead we propose that mediation should impact components first, to push semantics strategy directly into the micro process to be synthesised into the service layer.}, keywords = {semantic mediation services; component synthesis; transformation metadata; modelling; service component design; AHP; analytical hierarchy process; eigenvalue; component evaluation; semantics.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{wu:2:4:07, author = {Raymond Wu}, title = {An industry case study of micro component design and semantic mediation}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {223-234}, number = {4}, abstract = {This paper is a case study of enterprise integration in financial industry which consists of two parts. The first is component based design, and the second is semantic mediation. It explains why componentisation and semantics are both critical. This research paper uses examples in the industry to demonstrate component-based implementation and semantics mediation. To investigate the fundamental mechanism for the further understanding of the meaning of semantic mediation, we execute benchmarking by using both empirical experiments and theoretical modelling. Our observations show that a significant enhancement can be achieved by using the mediation strategy in semantic metadata.}, keywords = {semantic mediation; service component synthesis; semantics; transformation metadata; enterprise integration; financial services; component based design; benchmarking.}, owner = {msicilia}, timestamp = {2008.07.21} } @ARTICLE{yu:2:2:07, author = {Changrui Yu and Yan Luo and Hongwei Wang}, title = {Extending the Unified Modelling Language to support visualisation of ontology models}, journal = {International Journal of Metadata, Semantics and Ontologies}, year = {2007}, volume = {2}, pages = {123-135}, number = {2}, abstract = {Many features of UML are not generally available in most ontology languages. Meanwhile, ontology languages have some features that UML does not support. This paper focuses on extending UML to support visualisation of ontology models. First, this paper gives a brief review of ontology models and the many mature UML tools. Secondly, the paper identifies similarities and differences between UML and the ontology language DAML, elaborates on the key problems for UML representation of ontology, and analyses deficiencies of UML extension proposed by Baclawski. To reconcile these differences, we then propose a modest extension to UML infrastructure for the most problematic differences.}, keywords = {ontology models; Unified Modelling Language; UML infrastructure; describe logic; visualisation; DAML.}, owner = {msicilia}, timestamp = {2007.09.13} }