<?xml version="1.0" encoding="utf-8"?>
<?xml-stylesheet type='text/xsl' href='/Content/StratML.xslt'?>
<StrategicPlan>
  <Name>AI KR Strategists</Name>
  <Description>This plan defines the roles AI KR Strategists.</Description>
  <OtherInformation>
  </OtherInformation>
  <!--Strategic Plan Core-->
  <StrategicPlanCore>
    <Organization>
      <Name>Artificial Intelligence Knowledge Representation Community Group</Name>
      <Acronym>AIKR CG</Acronym>
      <Identifier>Organization_cd4a9bd6-0ec8-425c-ae47-9599f9b4b209</Identifier>
      <Description>
      </Description>
      <Stakeholder StakeholderTypeType="Individual">
        <Name>Carl Mattocks</Name>
        <Description>
        </Description>
        <Role>
          <Name>CoChair</Name>
        </Role>
      </Stakeholder>
    </Organization>
    <Vision>
      <Description>Work performed and works created for each AI value proposition is clearly and transparently documented and measured.</Description>
      <Identifier>Vision_861566c8-e9be-4642-b52f-f673fa499f4e</Identifier>
    </Vision>
    <Mission>
      <Description>To be responsible and accountable for the selection, development, application and management of Knowledge Representation (KR) for Artificial Intelligence (AI).</Description>
      <Identifier>Mission_861566c8-e9be-4642-b52f-f673fa499f4e</Identifier>
    </Mission>
    <Goal>
      <Name>Strategic Plan</Name>
      <Description>Document the vision, values, goals, objectives for one or more AIKR objects</Description>
      <Identifier>Goal_995c0b60-d64c-445e-86c8-a6f755f5ed9a</Identifier>
      <SequenceIndicator>1</SequenceIndicator>
      <OtherInformation>An AI KR Object may be :
- an algorithm (example - enable an entity to determine consequences; a set of instructions that provide the ability to monitor and/or move the environment; the rules that are used to change/manipulate/interpret data)
- an ontology (which has a set of ontological commitments) See Goal - Ontological Statements (provides sufficient definition to allow measurement to be performed)
- an Intelligent Reasoning (fragmentary) Theory, such as,

- deduction,
- induction,
- abduction,
- by analogy,
- probabilistic,
- case-based 
- a Reasoning Mechanism (computational environment), such as,

- natural language processer,
- rules engine,
- machine learning
- a Vocabulary (medium of human expression)</OtherInformation>
      <Stakeholder StakeholderTypeType="Generic_Group">
        <Name>Human-in-the-Loop Controllers</Name>
        <Description>Human-in-the-Loop Controllers are humans that Train/Test AI systems.

They control the inputs of the humans when humans are in the loop.

As a simple example, they tag the initial images that are fed into the algorithms; later they come back and refine the patterns identified; ultimately they may also come back and test the outcome.

See:
- https://en.wikipedia.org/wiki/Human-in-the-loop 
- https://humansintheloop.org/model-training/</Description>
        <Role>
          <Name>Model, test, evaluate and implement ethical approaches for Supervised Machine Learning of curated (labeled) data sets and the Active Learning training of algorithms via adjustment of parameters</Name>
        </Role>
      </Stakeholder>
      <Objective>
        <Name>Ontology</Name>
        <Description>Employ ontology content that removes ambiguity, supports performance measurement and enables buy in</Description>
        <Identifier>Objective_a3d0d024-38a2-44e2-8c05-feaf13fdfb1d</Identifier>
        <SequenceIndicator>1</SequenceIndicator>
        <OtherInformation>The AI Strategist will work, with the AI KR Strategist and / or other experts, to ensure that ontology content mitigates bias by employing a complete glossary of all the data used and utilizing an accurate representation of the (data) relationship rules identified in processing instructions. That is, the ontology,  Representational Adequacy is able represent all the required knowledge;Inferential Adequacy is able to manipulate the knowledge represented to produce new knowledge (inferred from the original);Acquisitional Efficiency is able to acquire new knowledge using  Human In-The-Loop and  /or Human Out-of-The-Loop methods</OtherInformation>
        <Stakeholder StakeholderTypeType="Generic_Group">
          <Name>AI KR Strategists</Name>
          <Description>
          </Description>
        </Stakeholder>
        <Stakeholder StakeholderTypeType="Generic_Group">
          <Name>AI Strategists</Name>
          <Description>
          </Description>
        </Stakeholder>
      </Objective>
      <Objective>
        <Name>Algorithms</Name>
        <Description>Understand the various different types of algorithms and where they can support business strategy</Description>
        <Identifier>Objective_72826a99-eaf8-482d-b07f-a09afa7d1379</Identifier>
        <SequenceIndicator>2</SequenceIndicator>
        <OtherInformation>Algorithms' capabilities and limitations should be explained in a manner that is adapted to stakeholder concerns and identifies how accuracy, robustness, computational cost and stability will be measured</OtherInformation>
      </Objective>
    </Goal>
    <Goal>
      <Name>Applications</Name>
      <Description>Understand the potential applications of AI to business strategies.</Description>
      <Identifier>Goal_2a903634-050d-43e0-9d2e-f0b1f3328635</Identifier>
      <SequenceIndicator>2</SequenceIndicator>
      <OtherInformation>
      </OtherInformation>
    </Goal>
    <Goal>
      <Name>Requirements</Name>
      <Description>Identify which areas of the requirements warrant AI solutions versus which can be achieved with other types of solutions</Description>
      <Identifier>Goal_a4a5f259-af0b-47f7-8e34-6c8eaefdae19</Identifier>
      <SequenceIndicator>3</SequenceIndicator>
      <OtherInformation>
      </OtherInformation>
    </Goal>
    <Goal>
      <Name>Glossaries</Name>
      <Description>Employ definitions from one or more glossaries when explaining AIKR object audit data, veracity facts and (human, social and technology) risk mitigation factors</Description>
      <Identifier>Goal_0083c58a-3d13-4e0e-95d1-8391c3f6414a</Identifier>
      <SequenceIndicator>4</SequenceIndicator>
      <OtherInformation>So that (business) people more readily understand the value that the glossaries bring.</OtherInformation>
    </Goal>
    <Goal>
      <Name>Risks</Name>
      <Description>Identify and mitigate risks and known threats</Description>
      <Identifier>Goal_bbeed24a-c843-427c-944e-08376a49ab9e</Identifier>
      <SequenceIndicator>5</SequenceIndicator>
      <OtherInformation>A guiding principle is that AIKR systems must mitigate risks.</OtherInformation>
      <Stakeholder StakeholderTypeType="Organization">
        <Name>DARPA</Name>
        <Description>
        </Description>
        <Role>
          <Name>
          </Name>
          <Description>This goal arose in reference to the DARPA initiative.</Description>
        </Role>
      </Stakeholder>
      <Objective>
        <Name>Consequences</Name>
        <Description>Identify and minimize adverse and/or unintended consequences</Description>
        <Identifier>Objective_fab00957-6d05-461b-a684-197efdecef6e</Identifier>
        <SequenceIndicator>1</SequenceIndicator>
        <OtherInformation>"Environment" includes the natural environment, as well as socio-economic and societal environments.
- Minimise the risk of unintended consequences.
- AI shall do no harm
- When you're testing something, you should not alter the environmental conditions.
- The social and societal impact should be carefully considered</OtherInformation>
      </Objective>
      <Objective>
        <Name>Data</Name>
        <Description>Ensure data quality and integrity</Description>
        <Identifier>Objective_545676f5-3cde-4aa5-9e22-1a4a0f108e85</Identifier>
        <SequenceIndicator>2</SequenceIndicator>
        <OtherInformation>Data quality: the data is fit for its intended purpose/use. Is supported by a systematic method for driving agreement on the definitions of categories.

Data integrity: is the maintenance and assurance of the accuracy and consistency of data over its entire life-cycle. Is supported by a monitoring system that compares actual outcome with predicted accuracy</OtherInformation>
      </Objective>
      <Objective>
        <Name>Bias</Name>
        <Description>Identify and reduce bias in AI KR objects</Description>
        <Identifier>Objective_1ea3840a-0a5c-452e-afcf-1a486d38fbc5</Identifier>
        <SequenceIndicator>3</SequenceIndicator>
        <OtherInformation>Bias is disproportionate weight in favour of or against an idea or thing, usually in a way that is closed-minded, prejudicial or unfair.

A bias is a systematic error.</OtherInformation>
      </Objective>
      <Objective>
        <Name>Security </Name>
        <Description>Guard against illegitimate access whilst ensuring legitimate access</Description>
        <Identifier>Objective_036d44e3-15a7-416e-b93e-a6a9b7941229</Identifier>
        <SequenceIndicator>4</SequenceIndicator>
        <OtherInformation>Security means protection as well as the measures taken to be safe or protected.</OtherInformation>
      </Objective>
      <Objective>
        <Name>Control</Name>
        <Description>Design the criteria to control the use (and misuse) of algorithms and data</Description>
        <Identifier>Objective_0548440c-d869-4347-89de-6b8157947b6f</Identifier>
        <SequenceIndicator>5</SequenceIndicator>
        <OtherInformation>Control:
- control of the algorithms: To stop them from learning beyond our ability to control them.
- control of the people who develop (strategists and developers) and use algorithms: they can be used for good or for evil.</OtherInformation>
        <Stakeholder StakeholderTypeType="Generic_Group">
          <Name>AI KR Strategists</Name>
          <Description>
          </Description>
        </Stakeholder>
        <Stakeholder StakeholderTypeType="Generic_Group">
          <Name>AI Developers</Name>
          <Description>
          </Description>
        </Stakeholder>
        <Stakeholder StakeholderTypeType="Generic_Group">
          <Name>Users of AI systems</Name>
          <Description>
          </Description>
        </Stakeholder>
      </Objective>
      <Objective>
        <Name>Intellectual Property</Name>
        <Description>Manage Intellectual Property rights over AI KR works</Description>
        <Identifier>Objective_703a1123-98b1-401c-a691-1cc441b4953b</Identifier>
        <SequenceIndicator>6</SequenceIndicator>
        <OtherInformation>What works can be protected, and what form of protection can be used for them?</OtherInformation>
      </Objective>
      <Objective>
        <Name>Privacy </Name>
        <Description>Protect the rights of the individuals/corporations whose data is processed</Description>
        <Identifier>Objective_46348127-5596-4b21-8d17-5bf451701316</Identifier>
        <SequenceIndicator>7</SequenceIndicator>
        <OtherInformation>Ensuring data is processed with the permission of the people to whom it pertains.

E.g. GDPR, intellectual property, etc.</OtherInformation>
      </Objective>
      <Objective>
        <Name>Governance</Name>
        <Description>Design governance in line with the risk tolerance</Description>
        <Identifier>Objective_5336be3a-69dd-40b8-b18f-fef6cb1e87a0</Identifier>
        <SequenceIndicator>8</SequenceIndicator>
        <OtherInformation>Data and algorithm governance.</OtherInformation>
      </Objective>
    </Goal>
    <Goal>
      <Name>Compliance</Name>
      <Description>Ensure AI Systems comply with all applicable laws and regulations, such as, provision audit data defined by a governance operating model</Description>
      <Identifier>Goal_b71896a0-3d86-4713-a720-15738315e36b</Identifier>
      <SequenceIndicator>6</SequenceIndicator>
      <OtherInformation>Compliance policies and procedures ensure that a planned change to a KR Object usage will comply with applicable laws/regulations during the identification, development, documentation, testing, validation, implementation, modification, use and retirement lifecycle</OtherInformation>
    </Goal>
    <Goal>
      <Name>Ethics</Name>
      <Description>Ensure AI Systems adhere to principles of ethics</Description>
      <Identifier>Goal_bbcb3dc4-5946-4d7d-b43f-0a55af305cc2</Identifier>
      <SequenceIndicator>7</SequenceIndicator>
      <OtherInformation>
      </OtherInformation>
      <Objective>
        <Name>Autonomy</Name>
        <Description>Find the balance between human control/oversight and machine autonomy</Description>
        <Identifier>Objective_28520dbc-b02c-4e4f-a93e-e91ffaff0659</Identifier>
        <SequenceIndicator>1</SequenceIndicator>
        <OtherInformation>Oversight controls will enable the assessment of algorithms, data and design processes</OtherInformation>
      </Objective>
      <Objective>
        <Name>Veracity</Name>
        <Description>Veracity</Description>
        <Identifier>Objective_7a9d8c77-e826-4a55-8641-e7812145de41</Identifier>
        <SequenceIndicator>2</SequenceIndicator>
        <OtherInformation>
        </OtherInformation>
      </Objective>
      <Objective>
        <Name>Accountability</Name>
        <Description>Accountability</Description>
        <Identifier>Objective_fc02c4bf-a0cc-42b3-9452-05d06965e47f</Identifier>
        <SequenceIndicator>3</SequenceIndicator>
        <OtherInformation>
        </OtherInformation>
      </Objective>
      <Objective>
        <Name>Confidentiality</Name>
        <Description>Confidentiality</Description>
        <Identifier>Objective_4fc0efcb-adaa-4ead-926b-4ab5512b62a5</Identifier>
        <SequenceIndicator>4</SequenceIndicator>
        <OtherInformation>
        </OtherInformation>
      </Objective>
    </Goal>
    <Goal>
      <Name>Robustness</Name>
      <Description>Ensure AI Systems are designed to handle uncertainty and tolerate perturbation from a likely threat perspective, such as, design considerations incorporate human, social and technology risk factors</Description>
      <Identifier>Goal_5a34fa22-8d74-402f-b111-d0e585de11a2</Identifier>
      <SequenceIndicator>8</SequenceIndicator>
      <OtherInformation>
      </OtherInformation>
    </Goal>
    <Goal>
      <Name>Outcomes</Name>
      <Description>Track AIKR object performance outcome via KPI (Key Performance Indicator) based on supervised learning models measurements</Description>
      <Identifier>Goal_e2b04ebe-49d3-43f3-a723-a44135690f64</Identifier>
      <SequenceIndicator>9</SequenceIndicator>
      <OtherInformation>
      </OtherInformation>
    </Goal>
    <Goal>
      <Name>Algorithm Evaluation</Name>
      <Description>Evaluate Algorithms</Description>
      <Identifier>Goal_56cd3982-542c-4719-965e-0bcce6606a01</Identifier>
      <SequenceIndicator>10</SequenceIndicator>
      <OtherInformation>Assess how well Algorithm results match actual outcomes to determine
- how sensitive inferences made are to the parameters and
- the proportion of observations made were accurately predicted.

When needed the algorithmic impact assessments will also identify cause and effect of any biases.</OtherInformation>
      <Stakeholder StakeholderTypeType="Organization">
        <Name>Artificial Intelligence Knowledge Representation Community Group (AIKR CG)</Name>
        <Description>
        </Description>
        <Role>
          <Name>Community of Interest</Name>
        </Role>
      </Stakeholder>
      <Objective>
        <Name>Trustworthiness</Name>
        <Description>Advance use of AI safeguards</Description>
        <Identifier>Objective_fa222026-9d57-4423-9433-9933bfe755e0</Identifier>
        <SequenceIndicator>1</SequenceIndicator>
        <OtherInformation>Advance use of AI change management, knowledge representation performance evaluation, algorithmic impact assessment and context aware safeguards for a reliable, safe and transparent outcome</OtherInformation>
      </Objective>
      <Objective>
        <Name>Classification</Name>
        <Description>Track Classification Performance Indicators</Description>
        <Identifier>Objective_964efa5e-58a7-4d9a-a839-daa8aef2a857</Identifier>
        <SequenceIndicator>2</SequenceIndicator>
        <OtherInformation>Ontological Statement: Classification Accuracy is the ratio of number of correct class label predictions to the total number of input samples data. 

Ontological Statement: F1 Score measure the Harmonic Mean between precision and recall. The range for F1 Score is [0, 1]. It tells you how precise your classifier is (how many instances it classifies correctly), as well as how robust it is (it does not miss a significant number of instances).</OtherInformation>
      </Objective>
    </Goal>
    <Goal>
      <Name>KR Objects</Name>
      <Description>Evaluate KR Object Performance</Description>
      <Identifier>Goal_2fdd92f6-fce6-41f7-b914-993aac92123e</Identifier>
      <SequenceIndicator>11</SequenceIndicator>
      <OtherInformation>KR Object oversight mechanisms will define how performance measurements are used via human-in-the-loop, human-on-the-loop, and human-in-command approaches</OtherInformation>
    </Goal>
  </StrategicPlanCore>
  <!--Administrative Information-->
  <AdministrativeInformation>
    <Identifier>StrategyPlan_861566c8-e9be-4642-b52f-f673fa499f4e</Identifier>
    <StartDate>2020-04-01</StartDate>
    <EndDate>
    </EndDate>
    <PublicationDate>2025-12-24</PublicationDate>
    <Source>https://www.stratnavapp.com/StratML/Part1/861566c8-e9be-4642-b52f-f673fa499f4e/Styled</Source>
    <Submitter>
      <Identifier>Submitter_861566c8-e9be-4642-b52f-f673fa499f4e</Identifier>
      <GivenName>Carl</GivenName>
      <Surname>Mattocks</Surname>
      <PhoneNumber>
      </PhoneNumber>
      <EmailAddress>CarlMattocks@WellnessIntelligence.Institute</EmailAddress>
    </Submitter>
  </AdministrativeInformation>
</StrategicPlan>