Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(ACI): Delete endpoint for a detector #84279

Open
wants to merge 25 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 12 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions src/sentry/deletions/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,6 +111,7 @@ def load_defaults(manager: DeletionTaskManager) -> None:
from sentry.sentry_apps.models.sentry_app_installation_token import SentryAppInstallationToken
from sentry.sentry_apps.models.servicehook import ServiceHook
from sentry.snuba import models as snuba_models
from sentry.workflow_engine.models import DataSource, Detector

from . import defaults

Expand All @@ -125,6 +126,8 @@ def load_defaults(manager: DeletionTaskManager) -> None:
manager.register(models.Commit, defaults.CommitDeletionTask)
manager.register(models.CommitAuthor, defaults.CommitAuthorDeletionTask)
manager.register(CommitFileChange, BulkModelDeletionTask)
manager.register(Detector, defaults.DetectorDeletionTask)
manager.register(DataSource, defaults.DataSourceDeletionTask)
manager.register(models.Deploy, BulkModelDeletionTask)
manager.register(DiscoverSavedQuery, defaults.DiscoverSavedQueryDeletionTask)
manager.register(DiscoverSavedQueryProject, BulkModelDeletionTask)
Expand Down
2 changes: 2 additions & 0 deletions src/sentry/deletions/defaults/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
from .artifactbundle import * # noqa: F401,F403
from .commit import * # noqa: F401,F403
from .commitauthor import * # noqa: F401,F403
from .data_source import * # noqa: F401,F403
from .detector import * # noqa: F401,F403
from .discoversavedquery import * # noqa: F401,F403
from .group import * # noqa: F401,F403
from .grouphash import * # noqa: F401,F403
Expand Down
9 changes: 9 additions & 0 deletions src/sentry/deletions/defaults/data_source.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from sentry.deletions.base import BaseRelation, ModelDeletionTask, ModelRelation
from sentry.workflow_engine.models.data_source import DataSource


class DataSourceDeletionTask(ModelDeletionTask[DataSource]):
def get_child_relations(self, instance: DataSource) -> list[BaseRelation]:
from sentry.snuba.models import QuerySubscription

return [ModelRelation(QuerySubscription, {"id": instance.query_id})]
19 changes: 19 additions & 0 deletions src/sentry/deletions/defaults/detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from sentry.deletions.base import BaseRelation, ModelDeletionTask, ModelRelation
from sentry.workflow_engine.models.detector import Detector


class DetectorDeletionTask(ModelDeletionTask[Detector]):
def get_child_relations(self, instance: Detector) -> list[BaseRelation]:
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

should we also delete DetectorWorkflow if that relationship exists? as well as related lookup tables (e.g. AlertRuleDetector?)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

DetectorWorkflow does get deleted automatically via cascade delete, the test checks for it. I'm not sure that we want to delete the migration tables data here since that will be temporary.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just note that with cascade delete if there are a large number of related rows the delete can fail. That's probably unlikely here, but jfyi

from sentry.workflow_engine.models import DataConditionGroup, DataSource

# XXX: this assumes a data source is connected to a single detector. it's not possible in the UI
# to do anything else today, but if this changes we'll need to add custom conditional deletion logic

model_relations: list[BaseRelation] = [ModelRelation(DataSource, {"detector": instance.id})]

if instance.workflow_condition_group:
model_relations.append(
ModelRelation(DataConditionGroup, {"id": instance.workflow_condition_group.id})
)
ceorourke marked this conversation as resolved.
Show resolved Hide resolved

return model_relations
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,14 @@
from sentry.apidocs.constants import (
RESPONSE_BAD_REQUEST,
RESPONSE_FORBIDDEN,
RESPONSE_NO_CONTENT,
RESPONSE_NOT_FOUND,
RESPONSE_UNAUTHORIZED,
)
from sentry.apidocs.parameters import DetectorParams, GlobalParams
from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion
from sentry.grouping.grouptype import ErrorGroupType
from sentry.models.project import Project
from sentry.workflow_engine.endpoints.serializers import DetectorSerializer
from sentry.workflow_engine.models import Detector

Expand Down Expand Up @@ -57,7 +61,7 @@ def convert_args(self, request: Request, detector_id, *args, **kwargs):
404: RESPONSE_NOT_FOUND,
},
)
def get(self, request: Request, project, detector):
def get(self, request: Request, project: Project, detector: Detector):
"""
Fetch a detector
`````````````````````````
Expand All @@ -69,3 +73,27 @@ def get(self, request: Request, project, detector):
DetectorSerializer(),
)
return Response(serialized_detector)

@extend_schema(
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🥔 for adding documentation to the endpoint from the beginning. People make experimental endpoints left and right and the endpoint sits there for long before publishing so everyone forgets the context 😅

operation_id="Delete a Detector",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
GlobalParams.PROJECT_ID_OR_SLUG,
DetectorParams.DETECTOR_ID,
],
responses={
204: RESPONSE_NO_CONTENT,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
)
def delete(self, request: Request, project: Project, detector: Detector):
ceorourke marked this conversation as resolved.
Show resolved Hide resolved
"""
Delete a detector
"""
if detector.type == ErrorGroupType.slug:
return Response(status=403)

RegionScheduledDeletion.schedule(detector, days=0, actor=request.user)
# TODO add audit log entry
return Response(status=204)
1 change: 0 additions & 1 deletion src/sentry/workflow_engine/endpoints/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
# - GET /detector w/ filters
# - GET /detector/:id
# - PUT /detector/:id
# - DELETE /detector/:id

# Remaining Workflows Endpoints
# - GET /workflow w/ filters
Expand Down
3 changes: 2 additions & 1 deletion src/sentry/workflow_engine/models/workflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,6 @@
from sentry.db.models.fields.hybrid_cloud_foreign_key import HybridCloudForeignKey
from sentry.models.owner_base import OwnerModel
from sentry.workflow_engine.models.data_condition import DataCondition, is_slow_condition
from sentry.workflow_engine.processors.data_condition_group import evaluate_condition_group
from sentry.workflow_engine.types import WorkflowJob

from .json_config import JSONConfigBase
Expand Down Expand Up @@ -72,6 +71,8 @@ def evaluate_trigger_conditions(self, job: WorkflowJob) -> bool:
Evaluate the conditions for the workflow trigger and return if the evaluation was successful.
If there aren't any workflow trigger conditions, the workflow is considered triggered.
"""
from sentry.workflow_engine.processors.data_condition_group import evaluate_condition_group

if self.when_condition_group is None:
return True

Expand Down
64 changes: 64 additions & 0 deletions tests/sentry/deletions/test_detector.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from sentry.deletions.tasks.scheduled import run_scheduled_deletions
from sentry.incidents.grouptype import MetricAlertFire
from sentry.snuba.models import QuerySubscription
from sentry.testutils.hybrid_cloud import HybridCloudTestMixin
from sentry.workflow_engine.models import (
DataCondition,
DataConditionGroup,
DataSource,
DataSourceDetector,
Detector,
DetectorWorkflow,
)
from tests.sentry.workflow_engine.test_base import BaseWorkflowTest


class DeleteDetectorTest(BaseWorkflowTest, HybridCloudTestMixin):
def setUp(self):
self.data_condition_group = self.create_data_condition_group()
self.data_condition = self.create_data_condition(condition_group=self.data_condition_group)
self.snuba_query = self.create_snuba_query()
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If the query subscription is deleted does that mean we always want to delete the related snuba query? We don't have this set up today but I'm not sure why - if I implemented it for this it could affect other things.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, we could do that. In the past we didn't because potentially one snuba query could be shared, but in practice we haven't really used that.

Could make sense to just check that there aren't other links to the snuba query and then delete it if orphaned.

self.subscription = QuerySubscription.objects.create(
project=self.project,
status=QuerySubscription.Status.ACTIVE.value,
subscription_id="123",
snuba_query=self.snuba_query,
)
self.data_source = self.create_data_source(
organization=self.organization, query_id=self.subscription.id
)
self.detector = self.create_detector(
project_id=self.project.id,
name="Test Detector",
type=MetricAlertFire.slug,
workflow_condition_group=self.data_condition_group,
)
self.workflow = self.create_workflow()
self.data_source_detector = self.create_data_source_detector(
data_source=self.data_source, detector=self.detector
)
self.detector_workflow = DetectorWorkflow.objects.create(
detector=self.detector, workflow=self.workflow
)

def test_simple(self):
data_source_2 = self.create_data_source(organization=self.organization)
data_source_detector_2 = self.create_data_source_detector(
data_source=data_source_2, detector=self.detector
)
self.ScheduledDeletion.schedule(instance=self.detector, days=0)

with self.tasks():
run_scheduled_deletions()

assert not Detector.objects.filter(id=self.detector.id).exists()
assert not DataSourceDetector.objects.filter(
id__in=[self.data_source_detector.id, data_source_detector_2.id]
).exists()
assert not DetectorWorkflow.objects.filter(id=self.detector_workflow.id).exists()
assert not DataConditionGroup.objects.filter(id=self.data_condition_group.id).exists()
assert not DataCondition.objects.filter(id=self.data_condition.id).exists()
assert not DataSource.objects.filter(
id__in=[self.data_source.id, data_source_2.id]
).exists()
assert not QuerySubscription.objects.filter(id=self.subscription.id).exists()
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
from sentry.api.serializers import serialize
from sentry.deletions.models.scheduleddeletion import RegionScheduledDeletion
from sentry.grouping.grouptype import ErrorGroupType
from sentry.incidents.grouptype import MetricAlertFire
from sentry.testutils.cases import APITestCase
from sentry.testutils.outbox import outbox_runner
from sentry.testutils.silo import region_silo_test


Expand All @@ -10,16 +13,61 @@ class ProjectDetectorDetailsBaseTest(APITestCase):
def setUp(self):
super().setUp()
self.login_as(user=self.user)
self.data_source = self.data_source = self.create_data_source(
organization=self.organization
)
self.data_condition_group = self.create_data_condition_group()
self.detector = self.create_detector(
project_id=self.project.id,
name="Test Detector",
type=MetricAlertFire.slug,
workflow_condition_group=self.data_condition_group,
)
self.data_source_detector = self.create_data_source_detector(
data_source=self.data_source, detector=self.detector
)


@region_silo_test
class ProjectDetectorIndexGetTest(ProjectDetectorDetailsBaseTest):
def test_simple(self):
detector = self.create_detector(
project_id=self.project.id, name="Test Detector", type=MetricAlertFire.slug
response = self.get_success_response(
self.organization.slug, self.project.slug, self.detector.id
)
response = self.get_success_response(self.organization.slug, self.project.slug, detector.id)
assert response.data == serialize(detector)
assert response.data == serialize(self.detector)

def test_does_not_exist(self):
self.get_error_response(self.organization.slug, self.project.slug, 3, status_code=404)


@region_silo_test
class ProjectDetectorIndexDeleteTest(ProjectDetectorDetailsBaseTest):
method = "DELETE"

def test_simple(self):
with outbox_runner():
self.get_success_response(self.organization.slug, self.project.slug, self.detector.id)

assert RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=self.detector.id
).exists()

def test_error_group_type(self):
"""
Test that we do not delete the required error detector
"""
data_condition_group = self.create_data_condition_group()
error_detector = self.create_detector(
project_id=self.project.id,
name="Error Detector",
type=ErrorGroupType.slug,
workflow_condition_group=data_condition_group,
)
with outbox_runner():
self.get_error_response(
self.organization.slug, self.project.slug, error_detector.id, status_code=403
)

assert not RegionScheduledDeletion.objects.filter(
model_name="Detector", object_id=error_detector.id
).exists()
Loading