Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -12,3 +12,4 @@ repos:
- id: ruff-check
args: [ --fix ]
- id: ruff-format
args: [--diff]
6 changes: 4 additions & 2 deletions avise/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,8 +40,10 @@
DEFAULT_REPORTS_DIR = "reports"


def main(arguments=[]) -> None:
def main(arguments=None) -> None:
"""Main function."""
if arguments is None:
arguments = []
if not isinstance(arguments, list):
raise TypeError("CLI parser expects a list of strings as arguments.")
if len(arguments) > 200:
Expand Down Expand Up @@ -183,7 +185,7 @@ def main(arguments=[]) -> None:
)

# Print a small summary to the console
print(f"\nSecurity Evaluation Test completed!")
print("\nSecurity Evaluation Test completed!")
print(f" Format: {report_format.value.upper()}")
print(f" Total: {report.summary['total_sets']}")
print(f" Passed: {report.summary['passed']} ({report.summary['pass_rate']}%)")
Expand Down
22 changes: 22 additions & 0 deletions avise/engine.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,10 @@
import logging
from pathlib import Path
from typing import Optional, Dict, Any
import sys
import os
import subprocess
import importlib.util

# Import to register different plugins and SETs
from . import evaluators
Expand All @@ -25,6 +29,24 @@

DEFAULT_REPORTS_DIR = "reports"

# On Windows, ensure triton-windows package is installed
if os.name == "nt":
if importlib.util.find_spec("triton-windows") is None:
logger.info(
"The current Operating System seems to be Windows. We need to install triton-windows Python package to the current environment in order to run required language models."
)
try:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "triton-windows"]
)
logger.info(
"Successfully installed triton-windows package to the current environment."
)
except Exception as e:
raise RuntimeError(
"Unable to install triton-windows Python package. Cannot run required language models on Windows without it. Try pip install triton-windows"
) from e


class ExecutionEngine:
"""Execution Engine."""
Expand Down
4 changes: 2 additions & 2 deletions avise/evaluators/languagemodel/partial_compliance.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
"""Partial compliance evaluator.

Detects patterns where the AI model / LLM acknowledges it should comply with a request, but decides to respond and help anyway.
Detects patterns where the AI model / LLM acknowledges it should refuse to help a request, but decides to respond and help anyway.
This indicates possible vulnerabilities to prompt injection in the LLM, and defiencies in the safety guidelines / guardrails.

"""
Expand All @@ -19,7 +19,7 @@ class PartialComplianceEvaluator(BaseLMEvaluator):
"""Partical Compliance Evaluator."""

name = "partial_compliance"
description = "Detects patterns where the AI model / LLM acknowledges it should comply with a request, but decides to respond and help anyway."
description = "Detects patterns where the AI model / LLM acknowledges it should refuse to help with a request, but decides to respond and help anyway."

patterns = [
# Acknowledging malicious prompt, but proceeds (bounded to same sentence ~100 chars)
Expand Down
1 change: 0 additions & 1 deletion avise/sets/languagemodel/single_turn/prompt_injection.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ class PromptInjectionTest(BaseSETPipeline):
def __init__(self):
"""Prepare the SET object instance, it's dependencies and the tools to be used during the implementation."""
super().__init__()
# self.evaluation_connector: Optional[BaseLMConnector] = None
self.evaluation_system_prompt: Optional[str] = None
self.elm_evaluations: Dict[str, str] = {}

Expand Down
5 changes: 2 additions & 3 deletions docs/source/building.connector.rst
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,7 @@ create an Ollama Connector.
For clarity, here are the package imports that we will use later on in the code:

* ``import logging``: logging is used to create logs that will help with debugging and informing the user of what's happening when the program is executing.
* ``from typing import List`` List is used as a type hint for method variables that are a list of some specific type.
* ``from typing import List`` List is used as a type hint for method parameters that are a list of some specific type.
* ``import ollama`` We will use the ollama Client for making requests to the API endpoint.
* ``from .base import BaseLMConnector, Message`` These we defined earlier and will now use.
* ``from ...registry import connector_registry`` connector_registry holds information of all connectors, sets, and formats available to the Execution Engine. We want to add our connector to the registry as well.
Expand Down Expand Up @@ -533,8 +533,7 @@ we can use the latest modification we have made to the codebase:

* ``--SET``: with this argument, we tell the CLI which SET we wish to execute.
* ``--connectorconf``: with this argument, we tell the CLI the path of the connector configuration JSON we just created.
* ``--SETconf``: with this optional argument, we can give the CLI a path to a custom SET configuration file
(there are predefined default paths if we don't use this argument)
* ``--SETconf``: with this optional argument, we can give the CLI a path to a custom SET configuration file (there are predefined default paths if we don't use this argument)

If our code has no errors and works as we intended, the Execution Engine starts running the SET and eventually produces
a report file and prints something like this to the console:
Expand Down
Loading