diff --git a/dojo/announcement/views.py b/dojo/announcement/views.py
index 6b0cb16bc3c..26160c3236b 100644
--- a/dojo/announcement/views.py
+++ b/dojo/announcement/views.py
@@ -81,12 +81,11 @@ def dismiss_announcement(request):
extra_tags="alert-success",
)
return HttpResponseRedirect("dashboard")
- else:
- messages.add_message(
- request,
- messages.ERROR,
- _("Failed to remove announcement."),
- extra_tags="alert-danger",
- )
- return render(request, "dojo/dismiss_announcement.html")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ _("Failed to remove announcement."),
+ extra_tags="alert-danger",
+ )
+ return render(request, "dojo/dismiss_announcement.html")
return render(request, "dojo/dismiss_announcement.html")
diff --git a/dojo/api_v2/mixins.py b/dojo/api_v2/mixins.py
index e32683c3742..6c6b4792757 100644
--- a/dojo/api_v2/mixins.py
+++ b/dojo/api_v2/mixins.py
@@ -29,8 +29,7 @@ def delete_preview(self, request, pk=None):
def flatten(elem):
if isinstance(elem, list):
return itertools.chain.from_iterable(map(flatten, elem))
- else:
- return [elem]
+ return [elem]
rels = [
{
diff --git a/dojo/api_v2/permissions.py b/dojo/api_v2/permissions.py
index f7669826830..fe508c92b1b 100644
--- a/dojo/api_v2/permissions.py
+++ b/dojo/api_v2/permissions.py
@@ -35,8 +35,7 @@ def check_post_permission(request, post_model, post_pk, post_permission):
raise ParseError(msg)
object = get_object_or_404(post_model, pk=request.data.get(post_pk))
return user_has_permission(request.user, object, post_permission)
- else:
- return True
+ return True
def check_object_permission(
@@ -49,14 +48,13 @@ def check_object_permission(
):
if request.method == "GET":
return user_has_permission(request.user, object, get_permission)
- elif request.method == "PUT" or request.method == "PATCH":
+ if request.method == "PUT" or request.method == "PATCH":
return user_has_permission(request.user, object, put_permission)
- elif request.method == "DELETE":
+ if request.method == "DELETE":
return user_has_permission(request.user, object, delete_permission)
- elif request.method == "POST":
+ if request.method == "POST":
return user_has_permission(request.user, object, post_permission)
- else:
- return False
+ return False
class UserHasAppAnalysisPermission(permissions.BasePermission):
@@ -113,12 +111,11 @@ def has_permission(self, request, view):
return user_has_configuration_permission(
request.user, "auth.view_group",
)
- elif request.method == "POST":
+ if request.method == "POST":
return user_has_configuration_permission(
request.user, "auth.add_group",
)
- else:
- return True
+ return True
def has_object_permission(self, request, view, obj):
if request.method == "GET":
@@ -130,14 +127,13 @@ def has_object_permission(self, request, view, obj):
) and user_has_permission(
request.user, obj, Permissions.Group_View,
)
- else:
- return check_object_permission(
- request,
- obj,
- Permissions.Group_View,
- Permissions.Group_Edit,
- Permissions.Group_Delete,
- )
+ return check_object_permission(
+ request,
+ obj,
+ Permissions.Group_View,
+ Permissions.Group_Edit,
+ Permissions.Group_Delete,
+ )
class UserHasDojoGroupMemberPermission(permissions.BasePermission):
@@ -188,8 +184,7 @@ def has_permission(self, request, view):
)
)
return has_permission_result
- else:
- return True
+ return True
def has_object_permission(self, request, view, obj):
has_permission_result = True
@@ -293,9 +288,8 @@ def has_permission(self, request, view):
return check_post_permission(
request, Product, "product", Permissions.Engagement_Add,
)
- else:
- # related object only need object permission
- return True
+ # related object only need object permission
+ return True
def has_object_permission(self, request, view, obj):
if UserHasEngagementPermission.path_engagement_post.match(
@@ -308,15 +302,14 @@ def has_object_permission(self, request, view, obj):
Permissions.Engagement_Edit,
Permissions.Engagement_Delete,
)
- else:
- return check_object_permission(
- request,
- obj,
- Permissions.Engagement_View,
- Permissions.Engagement_Edit,
- Permissions.Engagement_Edit,
- Permissions.Engagement_Edit,
- )
+ return check_object_permission(
+ request,
+ obj,
+ Permissions.Engagement_View,
+ Permissions.Engagement_Edit,
+ Permissions.Engagement_Edit,
+ Permissions.Engagement_Edit,
+ )
class UserHasRiskAcceptancePermission(permissions.BasePermission):
@@ -334,9 +327,8 @@ def has_permission(self, request, view):
return check_post_permission(
request, Product, "product", Permissions.Risk_Acceptance,
)
- else:
- # related object only need object permission
- return True
+ # related object only need object permission
+ return True
def has_object_permission(self, request, view, obj):
if UserHasRiskAcceptancePermission.path_risk_acceptance_post.match(
@@ -351,15 +343,14 @@ def has_object_permission(self, request, view, obj):
Permissions.Risk_Acceptance,
Permissions.Risk_Acceptance,
)
- else:
- return check_object_permission(
- request,
- obj,
- Permissions.Risk_Acceptance,
- Permissions.Risk_Acceptance,
- Permissions.Risk_Acceptance,
- Permissions.Risk_Acceptance,
- )
+ return check_object_permission(
+ request,
+ obj,
+ Permissions.Risk_Acceptance,
+ Permissions.Risk_Acceptance,
+ Permissions.Risk_Acceptance,
+ Permissions.Risk_Acceptance,
+ )
class UserHasFindingPermission(permissions.BasePermission):
@@ -382,9 +373,8 @@ def has_permission(self, request, view):
return check_post_permission(
request, Test, "test", Permissions.Finding_Add,
)
- else:
- # related object only need object permission
- return True
+ # related object only need object permission
+ return True
def has_object_permission(self, request, view, obj):
if (
@@ -402,15 +392,14 @@ def has_object_permission(self, request, view, obj):
Permissions.Finding_Edit,
Permissions.Finding_Delete,
)
- else:
- return check_object_permission(
- request,
- obj,
- Permissions.Finding_View,
- Permissions.Finding_Edit,
- Permissions.Finding_Edit,
- Permissions.Finding_Edit,
- )
+ return check_object_permission(
+ request,
+ obj,
+ Permissions.Finding_View,
+ Permissions.Finding_Edit,
+ Permissions.Finding_Edit,
+ Permissions.Finding_Edit,
+ )
class UserHasImportPermission(permissions.BasePermission):
@@ -435,7 +424,7 @@ def has_permission(self, request, view):
return user_has_permission(
request.user, engagement, Permissions.Import_Scan_Result,
)
- elif engagement_id := converted_dict.get("engagement_id"):
+ if engagement_id := converted_dict.get("engagement_id"):
# engagement_id doesn't exist
msg = f'Engagement "{engagement_id}" does not exist'
raise serializers.ValidationError(msg)
@@ -452,19 +441,19 @@ def has_permission(self, request, view):
converted_dict.get("product_type"),
"Need engagement_id or product_name + engagement_name to perform import",
)
- else:
- # the engagement doesn't exist, so we need to check if the user has
- # requested and is allowed to use auto_create
- return check_auto_create_permission(
- request.user,
- converted_dict.get("product"),
- converted_dict.get("product_name"),
- converted_dict.get("engagement"),
- converted_dict.get("engagement_name"),
- converted_dict.get("product_type"),
- converted_dict.get("product_type_name"),
- "Need engagement_id or product_name + engagement_name to perform import",
- )
+ return None
+ # the engagement doesn't exist, so we need to check if the user has
+ # requested and is allowed to use auto_create
+ return check_auto_create_permission(
+ request.user,
+ converted_dict.get("product"),
+ converted_dict.get("product_name"),
+ converted_dict.get("engagement"),
+ converted_dict.get("engagement_name"),
+ converted_dict.get("product_type"),
+ converted_dict.get("product_type_name"),
+ "Need engagement_id or product_name + engagement_name to perform import",
+ )
class UserHasMetaImportPermission(permissions.BasePermission):
@@ -490,13 +479,12 @@ def has_permission(self, request, view):
return user_has_permission(
request.user, product, Permissions.Import_Scan_Result,
)
- elif product_id := converted_dict.get("product_id"):
+ if product_id := converted_dict.get("product_id"):
# product_id doesn't exist
msg = f'Product "{product_id}" does not exist'
raise serializers.ValidationError(msg)
- else:
- msg = "Need product_id or product_name to perform import"
- raise serializers.ValidationError(msg)
+ msg = "Need product_id or product_name to perform import"
+ raise serializers.ValidationError(msg)
class UserHasProductPermission(permissions.BasePermission):
@@ -556,8 +544,7 @@ def has_permission(self, request, view):
return user_has_global_permission(
request.user, Permissions.Product_Type_Add,
)
- else:
- return True
+ return True
def has_object_permission(self, request, view, obj):
return check_object_permission(
@@ -631,7 +618,7 @@ def has_permission(self, request, view):
return user_has_permission(
request.user, test, Permissions.Import_Scan_Result,
)
- elif test_id := converted_dict.get("test_id"):
+ if test_id := converted_dict.get("test_id"):
# test_id doesn't exist
msg = f'Test "{test_id}" does not exist'
raise serializers.ValidationError(msg)
@@ -648,19 +635,19 @@ def has_permission(self, request, view):
converted_dict.get("product_type"),
"Need test_id or product_name + engagement_name + scan_type to perform reimport",
)
- else:
- # the test doesn't exist, so we need to check if the user has
- # requested and is allowed to use auto_create
- return check_auto_create_permission(
- request.user,
- converted_dict.get("product"),
- converted_dict.get("product_name"),
- converted_dict.get("engagement"),
- converted_dict.get("engagement_name"),
- converted_dict.get("product_type"),
- converted_dict.get("product_type_name"),
- "Need test_id or product_name + engagement_name + scan_type to perform reimport",
- )
+ return None
+ # the test doesn't exist, so we need to check if the user has
+ # requested and is allowed to use auto_create
+ return check_auto_create_permission(
+ request.user,
+ converted_dict.get("product"),
+ converted_dict.get("product_name"),
+ converted_dict.get("engagement"),
+ converted_dict.get("engagement_name"),
+ converted_dict.get("product_type"),
+ converted_dict.get("product_type_name"),
+ "Need test_id or product_name + engagement_name + scan_type to perform reimport",
+ )
class UserHasTestPermission(permissions.BasePermission):
@@ -676,9 +663,8 @@ def has_permission(self, request, view):
return check_post_permission(
request, Engagement, "engagement", Permissions.Test_Add,
)
- else:
- # related object only need object permission
- return True
+ # related object only need object permission
+ return True
def has_object_permission(self, request, view, obj):
if UserHasTestPermission.path_tests_post.match(
@@ -691,15 +677,14 @@ def has_object_permission(self, request, view, obj):
Permissions.Test_Edit,
Permissions.Test_Delete,
)
- else:
- return check_object_permission(
- request,
- obj,
- Permissions.Test_View,
- Permissions.Test_Edit,
- Permissions.Test_Edit,
- Permissions.Test_Edit,
- )
+ return check_object_permission(
+ request,
+ obj,
+ Permissions.Test_View,
+ Permissions.Test_Edit,
+ Permissions.Test_Edit,
+ Permissions.Test_Edit,
+ )
class UserHasTestImportPermission(permissions.BasePermission):
@@ -776,8 +761,7 @@ def has_permission(self, request, view):
)
)
return has_permission_result
- else:
- return True
+ return True
def has_object_permission(self, request, view, obj):
has_permission_result = True
@@ -840,8 +824,7 @@ def has_permission(self, request, view):
)
)
return has_permission_result
- else:
- return True
+ return True
def has_object_permission(self, request, view, obj):
has_permission_result = True
@@ -934,9 +917,8 @@ def raise_no_auto_create_import_validation_error(
if product_type_name:
msg = f'Product "{product_name}" does not exist in Product_Type "{product_type_name}"'
raise serializers.ValidationError(msg)
- else:
- msg = f'Product "{product_name}" does not exist'
- raise serializers.ValidationError(msg)
+ msg = f'Product "{product_name}" does not exist'
+ raise serializers.ValidationError(msg)
if engagement_name and not engagement:
msg = f'Engagement "{engagement_name}" does not exist in Product "{product_name}"'
@@ -1021,12 +1003,11 @@ def check_auto_create_permission(
# new product type can be created with current user as owner, so
# all objects in it can be created as well
return True
- else:
- if not user_has_permission(
- user, product_type, Permissions.Product_Type_Add_Product,
- ):
- msg = f'No permission to create products in product_type "{product_type}"'
- raise PermissionDenied(msg)
+ if not user_has_permission(
+ user, product_type, Permissions.Product_Type_Add_Product,
+ ):
+ msg = f'No permission to create products in product_type "{product_type}"'
+ raise PermissionDenied(msg)
# product can be created, so objects in it can be created as well
return True
diff --git a/dojo/api_v2/serializers.py b/dojo/api_v2/serializers.py
index dc8acb40285..87ea0003d49 100644
--- a/dojo/api_v2/serializers.py
+++ b/dojo/api_v2/serializers.py
@@ -227,9 +227,7 @@ def to_internal_value(self, data):
substrings = re.findall(r'(?:"[^"]*"|[^",]+)', s)
data_safe.extend(substrings)
- internal_value = tagulous.utils.render_tags(data_safe)
-
- return internal_value
+ return tagulous.utils.render_tags(data_safe)
def to_representation(self, value):
if not isinstance(value, list):
@@ -305,8 +303,7 @@ def __str__(self):
return json.dumps(
self, sort_keys=True, indent=4, separators=(",", ": "),
)
- else:
- return json.dumps(self)
+ return json.dumps(self)
class RequestResponseSerializerField(serializers.ListSerializer):
@@ -556,8 +553,7 @@ def validate(self, data):
):
msg = "Update of password though API is not allowed"
raise ValidationError(msg)
- else:
- return super().validate(data)
+ return super().validate(data)
class UserContactInfoSerializer(serializers.ModelSerializer):
@@ -822,6 +818,7 @@ def validate(self, data):
)
raise ValidationError(msg)
return data
+ return None
class RawFileSerializer(serializers.ModelSerializer):
@@ -1074,8 +1071,7 @@ def to_representation(self, data):
"title": file.title,
},
)
- new_data = {"engagement_id": engagement.id, "files": new_files}
- return new_data
+ return {"engagement_id": engagement.id, "files": new_files}
class EngagementCheckListSerializer(serializers.ModelSerializer):
@@ -1147,8 +1143,7 @@ def run_validators(self, initial_data):
if "finding, endpoint must make a unique set" in str(exc):
msg = "This endpoint-finding relation already exists"
raise serializers.ValidationError(msg) from exc
- else:
- raise
+ raise
def create(self, validated_data):
endpoint = validated_data.get("endpoint")
@@ -1161,8 +1156,7 @@ def create(self, validated_data):
if "finding, endpoint must make a unique set" in str(ie):
msg = "This endpoint-finding relation already exists"
raise serializers.ValidationError(msg)
- else:
- raise
+ raise
status.mitigated = validated_data.get("mitigated", False)
status.false_positive = validated_data.get("false_positive", False)
status.out_of_scope = validated_data.get("out_of_scope", False)
@@ -1178,8 +1172,7 @@ def update(self, instance, validated_data):
if "finding, endpoint must make a unique set" in str(ie):
msg = "This endpoint-finding relation already exists"
raise serializers.ValidationError(msg)
- else:
- raise
+ raise
class EndpointSerializer(TaggitSerializer, serializers.ModelSerializer):
@@ -1440,8 +1433,7 @@ def to_representation(self, data):
"title": file.title,
},
)
- new_data = {"test_id": test.id, "files": new_files}
- return new_data
+ return {"test_id": test.id, "files": new_files}
class TestImportFindingActionSerializer(serializers.ModelSerializer):
@@ -1697,8 +1689,7 @@ def get_related_fields(self, obj):
return FindingRelatedFieldsSerializer(
required=False,
).to_representation(obj)
- else:
- return None
+ return None
def get_display_status(self, obj) -> str:
return obj.status()
@@ -1742,8 +1733,7 @@ def update(self, instance, validated_data):
# not sure why we are returning a tag_object, but don't want to change
# too much now as we're just fixing a bug
- tag_object = self._save_tags(instance, to_be_tagged)
- return tag_object
+ return self._save_tags(instance, to_be_tagged)
def validate(self, data):
if self.context["request"].method == "PATCH":
@@ -1879,8 +1869,7 @@ def create(self, validated_data):
# not sure why we are returning a tag_object, but don't want to change
# too much now as we're just fixing a bug
- tag_object = self._save_tags(new_finding, to_be_tagged)
- return tag_object
+ return self._save_tags(new_finding, to_be_tagged)
def validate(self, data):
if "reporter" not in data:
@@ -2796,8 +2785,7 @@ def to_representation(self, data):
"title": file.title,
},
)
- new_data = {"finding_id": finding.id, "files": new_files}
- return new_data
+ return {"finding_id": finding.id, "files": new_files}
class FindingCloseSerializer(serializers.ModelSerializer):
@@ -3054,10 +3042,9 @@ class QuestionnaireQuestionSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
if isinstance(instance, TextQuestion):
return TextQuestionSerializer(instance=instance).data
- elif isinstance(instance, ChoiceQuestion):
+ if isinstance(instance, ChoiceQuestion):
return ChoiceQuestionSerializer(instance=instance).data
- else:
- return QuestionSerializer(instance=instance).data
+ return QuestionSerializer(instance=instance).data
class Meta:
model = Question
@@ -3094,10 +3081,9 @@ class QuestionnaireAnswerSerializer(serializers.ModelSerializer):
def to_representation(self, instance):
if isinstance(instance, TextAnswer):
return TextAnswerSerializer(instance=instance).data
- elif isinstance(instance, ChoiceAnswer):
+ if isinstance(instance, ChoiceAnswer):
return ChoiceAnswerSerializer(instance=instance).data
- else:
- return AnswerSerializer(instance=instance).data
+ return AnswerSerializer(instance=instance).data
class Meta:
model = Answer
@@ -3171,8 +3157,7 @@ def create(self, validated_data):
if 'duplicate key value violates unique constraint "dojo_announcement_pkey"' in str(e):
msg = "No more than one Announcement is allowed"
raise serializers.ValidationError(msg)
- else:
- raise
+ raise
class NotificationWebhooksSerializer(serializers.ModelSerializer):
diff --git a/dojo/api_v2/views.py b/dojo/api_v2/views.py
index 7ae9925479a..ae77e923553 100644
--- a/dojo/api_v2/views.py
+++ b/dojo/api_v2/views.py
@@ -879,8 +879,7 @@ def get_queryset(self):
def get_serializer_class(self):
if self.request and self.request.method == "POST":
return serializers.FindingCreateSerializer
- else:
- return serializers.FindingSerializer
+ return serializers.FindingSerializer
@extend_schema(
methods=["POST"],
@@ -1227,10 +1226,9 @@ def remove_tags(self, request, pk=None):
{"success": "Tag(s) Removed"},
status=status.HTTP_204_NO_CONTENT,
)
- else:
- return Response(
- delete_tags.errors, status=status.HTTP_400_BAD_REQUEST,
- )
+ return Response(
+ delete_tags.errors, status=status.HTTP_400_BAD_REQUEST,
+ )
@extend_schema(
responses={
@@ -1368,10 +1366,9 @@ def _add_metadata(self, request, finding):
)
return Response(data=metadata_data.data, status=status.HTTP_200_OK)
- else:
- return Response(
- metadata_data.errors, status=status.HTTP_400_BAD_REQUEST,
- )
+ return Response(
+ metadata_data.errors, status=status.HTTP_400_BAD_REQUEST,
+ )
def _remove_metadata(self, request, finding):
name = request.query_params.get("name", None)
@@ -1458,13 +1455,13 @@ def metadata(self, request, pk=None):
if request.method == "GET":
return self._get_metadata(request, finding)
- elif request.method == "POST":
+ if request.method == "POST":
return self._add_metadata(request, finding)
- elif request.method == "PUT":
+ if request.method == "PUT":
return self._edit_metadata(request, finding)
- elif request.method == "PATCH":
+ if request.method == "PATCH":
return self._edit_metadata(request, finding)
- elif request.method == "DELETE":
+ if request.method == "DELETE":
return self._remove_metadata(request, finding)
return Response(
@@ -2092,8 +2089,7 @@ def get_queryset(self):
def get_serializer_class(self):
if self.request and self.request.method == "POST":
return serializers.StubFindingCreateSerializer
- else:
- return serializers.StubFindingSerializer
+ return serializers.StubFindingSerializer
# Authorization: authenticated, configuration
@@ -2145,8 +2141,7 @@ def get_serializer_class(self):
if self.action == "accept_risks":
return ra_api.AcceptedRiskSerializer
return serializers.TestCreateSerializer
- else:
- return serializers.TestSerializer
+ return serializers.TestSerializer
@extend_schema(
request=serializers.ReportGenerateOptionSerializer,
diff --git a/dojo/apps.py b/dojo/apps.py
index e7a39ab5433..fd3a06575fd 100644
--- a/dojo/apps.py
+++ b/dojo/apps.py
@@ -92,8 +92,7 @@ def get_model_fields_with_extra(model, extra_fields=()):
def get_model_fields(default_fields, extra_fields=()):
- combined = default_fields + extra_fields
- return combined
+ return default_fields + extra_fields
def get_model_default_fields(model):
diff --git a/dojo/authorization/authorization.py b/dojo/authorization/authorization.py
index a542d7c6e01..8f013b60061 100644
--- a/dojo/authorization/authorization.py
+++ b/dojo/authorization/authorization.py
@@ -66,7 +66,7 @@ def user_has_permission(user, obj, permission):
if role_has_permission(product_type_group.role.id, permission):
return True
return False
- elif (
+ if (
isinstance(obj, Product)
and permission.value >= Permissions.Product_View.value
):
@@ -87,51 +87,51 @@ def user_has_permission(user, obj, permission):
if role_has_permission(product_group.role.id, permission):
return True
return False
- elif (
+ if (
isinstance(obj, Engagement)
and permission in Permissions.get_engagement_permissions()
):
return user_has_permission(user, obj.product, permission)
- elif (
+ if (
isinstance(obj, Test)
and permission in Permissions.get_test_permissions()
):
return user_has_permission(user, obj.engagement.product, permission)
- elif (
+ if (
isinstance(obj, Finding) or isinstance(obj, Stub_Finding)
) and permission in Permissions.get_finding_permissions():
return user_has_permission(
user, obj.test.engagement.product, permission,
)
- elif (
+ if (
isinstance(obj, Finding_Group)
and permission in Permissions.get_finding_group_permissions()
):
return user_has_permission(
user, obj.test.engagement.product, permission,
)
- elif (
+ if (
isinstance(obj, Endpoint)
and permission in Permissions.get_endpoint_permissions()
):
return user_has_permission(user, obj.product, permission)
- elif (
+ if (
isinstance(obj, Languages)
and permission in Permissions.get_language_permissions()
):
return user_has_permission(user, obj.product, permission)
- elif (
+ if (
isinstance(obj, App_Analysis)
and permission in Permissions.get_technology_permissions()
):
return user_has_permission(user, obj.product, permission)
- elif (
+ if (
isinstance(obj, Product_API_Scan_Configuration)
and permission
in Permissions.get_product_api_scan_configuration_permissions()
):
return user_has_permission(user, obj.product, permission)
- elif (
+ if (
isinstance(obj, Product_Type_Member)
and permission in Permissions.get_product_type_member_permissions()
):
@@ -140,9 +140,8 @@ def user_has_permission(user, obj, permission):
return obj.user == user or user_has_permission(
user, obj.product_type, permission,
)
- else:
- return user_has_permission(user, obj.product_type, permission)
- elif (
+ return user_has_permission(user, obj.product_type, permission)
+ if (
isinstance(obj, Product_Member)
and permission in Permissions.get_product_member_permissions()
):
@@ -151,19 +150,18 @@ def user_has_permission(user, obj, permission):
return obj.user == user or user_has_permission(
user, obj.product, permission,
)
- else:
- return user_has_permission(user, obj.product, permission)
- elif (
+ return user_has_permission(user, obj.product, permission)
+ if (
isinstance(obj, Product_Type_Group)
and permission in Permissions.get_product_type_group_permissions()
):
return user_has_permission(user, obj.product_type, permission)
- elif (
+ if (
isinstance(obj, Product_Group)
and permission in Permissions.get_product_group_permissions()
):
return user_has_permission(user, obj.product, permission)
- elif (
+ if (
isinstance(obj, Dojo_Group)
and permission in Permissions.get_group_permissions()
):
@@ -173,7 +171,7 @@ def user_has_permission(user, obj, permission):
return group_member is not None and role_has_permission(
group_member.role.id, permission,
)
- elif (
+ if (
isinstance(obj, Dojo_Group_Member)
and permission in Permissions.get_group_member_permissions()
):
@@ -182,9 +180,8 @@ def user_has_permission(user, obj, permission):
return obj.user == user or user_has_permission(
user, obj.group, permission,
)
- else:
- return user_has_permission(user, obj.group, permission)
- elif (
+ return user_has_permission(user, obj.group, permission)
+ if (
isinstance(obj, Cred_Mapping)
and permission in Permissions.get_credential_permissions()
):
@@ -202,9 +199,9 @@ def user_has_permission(user, obj, permission):
return user_has_permission(
user, obj.finding.test.engagement.product, permission,
)
- else:
- msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}"
- raise NoAuthorizationImplementedError(msg)
+ return None
+ msg = f"No authorization implemented for class {type(obj).__name__} and permission {permission}"
+ raise NoAuthorizationImplementedError(msg)
def user_has_global_permission(user, permission):
diff --git a/dojo/cred/queries.py b/dojo/cred/queries.py
index 4dd14385a06..28419772328 100644
--- a/dojo/cred/queries.py
+++ b/dojo/cred/queries.py
@@ -44,8 +44,6 @@ def get_authorized_cred_mappings(permission, queryset=None):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups))
- cred_mappings = cred_mappings.filter(
+ return cred_mappings.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
-
- return cred_mappings
diff --git a/dojo/cred/views.py b/dojo/cred/views.py
index 31f923748b3..2fc373c3ac9 100644
--- a/dojo/cred/views.py
+++ b/dojo/cred/views.py
@@ -641,10 +641,8 @@ def delete_cred_controller(request, destination_url, id, ttid):
if destination_url == "cred":
return HttpResponseRedirect(reverse(destination_url))
- else:
- return HttpResponseRedirect(reverse(destination_url, args=(id, )))
- else:
- tform = CredMappingForm(instance=cred)
+ return HttpResponseRedirect(reverse(destination_url, args=(id, )))
+ tform = CredMappingForm(instance=cred)
add_breadcrumb(title="Delete Credential", top_level=False, request=request)
product_tab = None
diff --git a/dojo/decorators.py b/dojo/decorators.py
index 129106c74de..b6902b8dc10 100644
--- a/dojo/decorators.py
+++ b/dojo/decorators.py
@@ -43,8 +43,7 @@ def __wrapper__(*args, **kwargs):
countdown = kwargs.pop("countdown", 0)
if we_want_async(*args, func=func, **kwargs):
return func.apply_async(args=args, kwargs=kwargs, countdown=countdown)
- else:
- return func(*args, **kwargs)
+ return func(*args, **kwargs)
return __wrapper__
@@ -78,8 +77,7 @@ def __wrapper__(*args, **kwargs):
if _func is None:
# decorator called without parameters
return dojo_model_to_id_internal
- else:
- return dojo_model_to_id_internal(_func)
+ return dojo_model_to_id_internal(_func)
# decorator with parameters needs another wrapper layer
@@ -123,8 +121,7 @@ def __wrapper__(*args, **kwargs):
if _func is None:
# decorator called without parameters
return dojo_model_from_id_internal
- else:
- return dojo_model_from_id_internal(_func)
+ return dojo_model_from_id_internal(_func)
def get_parameter_froms_args_kwargs(args, kwargs, parameter):
diff --git a/dojo/endpoint/queries.py b/dojo/endpoint/queries.py
index 581feefc13b..684eeab7b1a 100644
--- a/dojo/endpoint/queries.py
+++ b/dojo/endpoint/queries.py
@@ -53,12 +53,10 @@ def get_authorized_endpoints(permission, queryset=None, user=None):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups))
- endpoints = endpoints.filter(
+ return endpoints.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
- return endpoints
-
def get_authorized_endpoint_status(permission, queryset=None, user=None):
@@ -101,8 +99,6 @@ def get_authorized_endpoint_status(permission, queryset=None, user=None):
endpoint__product__member=Exists(authorized_product_roles),
endpoint__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
endpoint__product__authorized_group=Exists(authorized_product_groups))
- endpoint_status = endpoint_status.filter(
+ return endpoint_status.filter(
Q(endpoint__product__prod_type__member=True) | Q(endpoint__product__member=True)
| Q(endpoint__product__prod_type__authorized_group=True) | Q(endpoint__product__authorized_group=True))
-
- return endpoint_status
diff --git a/dojo/endpoint/utils.py b/dojo/endpoint/utils.py
index be1c63fb0c0..d5c378e5e97 100644
--- a/dojo/endpoint/utils.py
+++ b/dojo/endpoint/utils.py
@@ -79,17 +79,16 @@ def endpoint_get_or_create(**kwargs):
count = qs.count()
if count == 0:
return Endpoint.objects.get_or_create(**kwargs)
- elif count == 1:
- return qs.order_by("id").first(), False
- else:
- logger.warning(
- f"Endpoints in your database are broken. "
- f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them.",
- )
- # Get the oldest endpoint first, and return that instead
- # a datetime is not captured on the endpoint model, so ID
- # will have to work here instead
+ if count == 1:
return qs.order_by("id").first(), False
+ logger.warning(
+ f"Endpoints in your database are broken. "
+ f"Please access {reverse('endpoint_migrate')} and migrate them to new format or remove them.",
+ )
+ # Get the oldest endpoint first, and return that instead
+ # a datetime is not captured on the endpoint model, so ID
+ # will have to work here instead
+ return qs.order_by("id").first(), False
def clean_hosts_run(apps, change):
@@ -325,7 +324,7 @@ def endpoint_meta_import(file, product, create_endpoints, create_tags, create_me
'The column "hostname" must be present to map host to Endpoint.',
extra_tags="alert-danger")
return HttpResponseRedirect(reverse("import_endpoint_meta", args=(product.id, )))
- elif origin == "API":
+ if origin == "API":
msg = 'The column "hostname" must be present to map host to Endpoint.'
raise ValidationError(msg)
@@ -361,14 +360,14 @@ def endpoint_meta_import(file, product, create_endpoints, create_tags, create_me
for tag in existing_tags:
if item[0] not in tag:
continue
- else:
- # found existing. Update it
- existing_tags.remove(tag)
- break
+ # found existing. Update it
+ existing_tags.remove(tag)
+ break
existing_tags += [item[0] + ":" + item[1]]
# if tags are not supposed to be added, this value remain unchanged
endpoint.tags = existing_tags
endpoint.save()
+ return None
def remove_broken_endpoint_statuses(apps):
diff --git a/dojo/endpoint/views.py b/dojo/endpoint/views.py
index 571f4989ec2..06ee7ac24a1 100644
--- a/dojo/endpoint/views.py
+++ b/dojo/endpoint/views.py
@@ -98,9 +98,8 @@ def get_endpoint_ids(endpoints):
key = f"{e.host}-{e.product.id}"
if key in hosts:
continue
- else:
- hosts.append(key)
- ids.append(e.id)
+ hosts.append(key)
+ ids.append(e.id)
return ids
@@ -307,8 +306,7 @@ def add_meta_data(request, eid):
extra_tags="alert-success")
if "add_another" in request.POST:
return HttpResponseRedirect(reverse("add_endpoint_meta_data", args=(eid,)))
- else:
- return HttpResponseRedirect(reverse("view_endpoint", args=(eid,)))
+ return HttpResponseRedirect(reverse("view_endpoint", args=(eid,)))
else:
form = DojoMetaDataForm()
diff --git a/dojo/engagement/queries.py b/dojo/engagement/queries.py
index 9d8e9b6ae41..97eeb31bdfa 100644
--- a/dojo/engagement/queries.py
+++ b/dojo/engagement/queries.py
@@ -39,8 +39,6 @@ def get_authorized_engagements(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- engagements = engagements.filter(
+ return engagements.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
-
- return engagements
diff --git a/dojo/engagement/views.py b/dojo/engagement/views.py
index 777a5f7a118..d9d3cef0340 100644
--- a/dojo/engagement/views.py
+++ b/dojo/engagement/views.py
@@ -166,15 +166,13 @@ def get_filtered_engagements(request, view):
filter_string_matching = get_system_setting("filter_string_matching", False)
filter_class = EngagementDirectFilterWithoutObjectLookups if filter_string_matching else EngagementDirectFilter
- engagements = filter_class(request.GET, queryset=engagements)
-
- return engagements
+ return filter_class(request.GET, queryset=engagements)
def get_test_counts(engagements):
# Get the test counts per engagement. As a separate query, this is much
# faster than annotating the above `engagements` query.
- engagement_test_counts = {
+ return {
test["engagement"]: test["test_count"]
for test in Test.objects.filter(
engagement__in=engagements,
@@ -184,7 +182,6 @@ def get_test_counts(engagements):
test_count=Count("engagement"),
)
}
- return engagement_test_counts
def engagements(request, view):
@@ -304,9 +301,8 @@ def edit_engagement(request, eid):
if "_Add Tests" in request.POST:
return HttpResponseRedirect(
reverse("add_tests", args=(engagement.id, )))
- else:
- return HttpResponseRedirect(
- reverse("view_engagement", args=(engagement.id, )))
+ return HttpResponseRedirect(
+ reverse("view_engagement", args=(engagement.id, )))
else:
logger.debug(form.errors)
@@ -404,12 +400,11 @@ def copy_engagement(request, eid):
recipients=[engagement.lead],
icon="exclamation-triangle")
return redirect_to_return_url_or_else(request, reverse("view_engagements", args=(product.id, )))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to copy engagement, please try again.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to copy engagement, please try again.",
+ extra_tags="alert-danger")
product_tab = Product_Tab(product, title="Copy Engagement", tab="engagements")
return render(request, "dojo/copy_object.html", {
@@ -427,8 +422,7 @@ def get_template(self):
return "dojo/view_eng.html"
def get_risks_accepted(self, eng):
- risks_accepted = eng.risk_acceptance.all().select_related("owner").annotate(accepted_findings_count=Count("accepted_findings__id"))
- return risks_accepted
+ return eng.risk_acceptance.all().select_related("owner").annotate(accepted_findings_count=Count("accepted_findings__id"))
def get_filtered_tests(
self,
@@ -673,10 +667,10 @@ def add_tests(request, eid):
if "_Add Another Test" in request.POST:
return HttpResponseRedirect(
reverse("add_tests", args=(eng.id, )))
- elif "_Add Findings" in request.POST:
+ if "_Add Findings" in request.POST:
return HttpResponseRedirect(
reverse("add_findings", args=(new_test.id, )))
- elif "_Finished" in request.POST:
+ if "_Finished" in request.POST:
return HttpResponseRedirect(
reverse("view_engagement", args=(eng.id, )))
else:
@@ -751,8 +745,7 @@ def get_form(
"""
if request.method == "POST":
return ImportScanForm(request.POST, request.FILES, **kwargs)
- else:
- return ImportScanForm(**kwargs)
+ return ImportScanForm(**kwargs)
def get_credential_form(
self,
@@ -766,18 +759,17 @@ def get_credential_form(
"""
if request.method == "POST":
return CredMappingForm(request.POST)
- else:
- # If the engagement is not present, return an empty form
- if engagement is None:
- return CredMappingForm()
- # Otherwise get all creds in the associated engagement
- return CredMappingForm(
- initial={
- "cred_user_queryset": Cred_Mapping.objects.filter(
- engagement=engagement,
- ).order_by("cred_id"),
- },
- )
+ # If the engagement is not present, return an empty form
+ if engagement is None:
+ return CredMappingForm()
+ # Otherwise get all creds in the associated engagement
+ return CredMappingForm(
+ initial={
+ "cred_user_queryset": Cred_Mapping.objects.filter(
+ engagement=engagement,
+ ).order_by("cred_id"),
+ },
+ )
def get_jira_form(
self,
@@ -1401,8 +1393,7 @@ def view_edit_risk_acceptance(request, eid, raid, edit_mode=False):
if not errors:
logger.debug("redirecting to return_url")
return redirect_to_return_url_or_else(request, reverse("view_risk_acceptance", args=(eid, raid)))
- else:
- logger.error("errors found")
+ logger.error("errors found")
else:
if edit_mode:
@@ -1549,8 +1540,7 @@ def upload_threatmodel(request, eid):
@user_is_authorized(Engagement, Permissions.Engagement_View, "eid")
def view_threatmodel(request, eid):
eng = get_object_or_404(Engagement, pk=eid)
- response = FileResponse(open(eng.tmodel_path, "rb"))
- return response
+ return FileResponse(open(eng.tmodel_path, "rb"))
@user_is_authorized(Engagement, Permissions.Engagement_View, "eid")
@@ -1589,9 +1579,8 @@ def get_engagements(request):
if not url:
msg = "Please use the export button when exporting engagements"
raise ValidationError(msg)
- else:
- if url.startswith("url="):
- url = url[4:]
+ if url.startswith("url="):
+ url = url[4:]
path_items = list(filter(None, re.split(r"/|\?", url)))
diff --git a/dojo/filters.py b/dojo/filters.py
index 1461966c19e..35ceb205938 100644
--- a/dojo/filters.py
+++ b/dojo/filters.py
@@ -331,8 +331,7 @@ def get_tags_model_from_field_name(field):
def get_tags_label_from_model(model):
if model:
return f"Tags ({model.__name__.title()})"
- else:
- return "Tags (Unknown)"
+ return "Tags (Unknown)"
def get_finding_filterset_fields(metrics=False, similar=False, filter_string_matching=False):
@@ -780,6 +779,7 @@ def any(self, qs, name):
self.start_date = _truncate(start_date - timedelta(days=1))
self.end_date = _truncate(now() + timedelta(days=1))
return qs.all()
+ return None
def current_month(self, qs, name):
self.start_date = local_tz.localize(
@@ -1927,8 +1927,7 @@ def set_hash_codes(self, *args: list, **kwargs: dict):
def filter_queryset(self, *args: list, **kwargs: dict):
queryset = super().filter_queryset(*args, **kwargs)
queryset = get_authorized_findings(Permissions.Finding_View, queryset, self.user)
- queryset = queryset.exclude(pk=self.finding.pk)
- return queryset
+ return queryset.exclude(pk=self.finding.pk)
class SimilarFindingFilter(FindingFilter, SimilarFindingHelper):
diff --git a/dojo/finding/queries.py b/dojo/finding/queries.py
index 7f213805a49..47386e43f86 100644
--- a/dojo/finding/queries.py
+++ b/dojo/finding/queries.py
@@ -68,14 +68,12 @@ def get_authorized_findings(permission, queryset=None, user=None):
test__engagement__product__member=Exists(authorized_product_roles),
test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
test__engagement__product__authorized_group=Exists(authorized_product_groups))
- findings = findings.filter(
+ return findings.filter(
Q(test__engagement__product__prod_type__member=True)
| Q(test__engagement__product__member=True)
| Q(test__engagement__product__prod_type__authorized_group=True)
| Q(test__engagement__product__authorized_group=True))
- return findings
-
def get_authorized_stub_findings(permission):
user = get_current_user()
@@ -101,14 +99,12 @@ def get_authorized_stub_findings(permission):
test__engagement__product__member=Exists(authorized_product_roles),
test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
test__engagement__product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- findings = findings.filter(
+ return findings.filter(
Q(test__engagement__product__prod_type__member=True)
| Q(test__engagement__product__member=True)
| Q(test__engagement__product__prod_type__authorized_group=True)
| Q(test__engagement__product__authorized_group=True))
- return findings
-
def get_authorized_vulnerability_ids(permission, queryset=None, user=None):
@@ -151,10 +147,8 @@ def get_authorized_vulnerability_ids(permission, queryset=None, user=None):
finding__test__engagement__product__member=Exists(authorized_product_roles),
finding__test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
finding__test__engagement__product__authorized_group=Exists(authorized_product_groups))
- vulnerability_ids = vulnerability_ids.filter(
+ return vulnerability_ids.filter(
Q(finding__test__engagement__product__prod_type__member=True)
| Q(finding__test__engagement__product__member=True)
| Q(finding__test__engagement__product__prod_type__authorized_group=True)
| Q(finding__test__engagement__product__authorized_group=True))
-
- return vulnerability_ids
diff --git a/dojo/finding/views.py b/dojo/finding/views.py
index 4b37ebc8a9a..8d453ab5fed 100644
--- a/dojo/finding/views.py
+++ b/dojo/finding/views.py
@@ -311,31 +311,29 @@ def get_test_id(self):
def filter_findings_by_object(self, findings: QuerySet[Finding]):
if product_id := self.get_product_id():
return findings.filter(test__engagement__product__id=product_id)
- elif engagement_id := self.get_engagement_id():
+ if engagement_id := self.get_engagement_id():
return findings.filter(test__engagement=engagement_id)
- elif test_id := self.get_test_id():
+ if test_id := self.get_test_id():
return findings.filter(test=test_id)
- else:
- return findings
+ return findings
def filter_findings_by_filter_name(self, findings: QuerySet[Finding]):
filter_name = self.get_filter_name()
if filter_name == "Open":
return findings.filter(finding_helper.OPEN_FINDINGS_QUERY)
- elif filter_name == "Verified":
+ if filter_name == "Verified":
return findings.filter(finding_helper.VERIFIED_FINDINGS_QUERY)
- elif filter_name == "Out of Scope":
+ if filter_name == "Out of Scope":
return findings.filter(finding_helper.OUT_OF_SCOPE_FINDINGS_QUERY)
- elif filter_name == "False Positive":
+ if filter_name == "False Positive":
return findings.filter(finding_helper.FALSE_POSITIVE_FINDINGS_QUERY)
- elif filter_name == "Inactive":
+ if filter_name == "Inactive":
return findings.filter(finding_helper.INACTIVE_FINDINGS_QUERY)
- elif filter_name == "Accepted":
+ if filter_name == "Accepted":
return findings.filter(finding_helper.ACCEPTED_FINDINGS_QUERY)
- elif filter_name == "Closed":
+ if filter_name == "Closed":
return findings.filter(finding_helper.CLOSED_FINDINGS_QUERY)
- else:
- return findings
+ return findings
def filter_findings_by_form(self, request: HttpRequest, findings: QuerySet[Finding]):
# Set up the args for the form
@@ -358,9 +356,7 @@ def filter_findings_by_form(self, request: HttpRequest, findings: QuerySet[Findi
def get_filtered_findings(self):
findings = get_authorized_findings(Permissions.Finding_View).order_by(self.get_order_by())
findings = self.filter_findings_by_object(findings)
- findings = self.filter_findings_by_filter_name(findings)
-
- return findings
+ return self.filter_findings_by_filter_name(findings)
def get_fully_filtered_findings(self, request: HttpRequest):
findings = self.get_filtered_findings()
@@ -1017,9 +1013,8 @@ def process_finding_form(self, request: HttpRequest, finding: Finding, context:
)
return finding, request, True
- else:
- add_error_message_to_response("The form has errors, please correct them below.")
- add_field_errors_to_response(context["form"])
+ add_error_message_to_response("The form has errors, please correct them below.")
+ add_field_errors_to_response(context["form"])
return finding, request, False
@@ -1074,8 +1069,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
)
return request, True, push_to_jira
- else:
- add_field_errors_to_response(context["jform"])
+ add_field_errors_to_response(context["jform"])
return request, False, False
@@ -1090,8 +1084,7 @@ def process_github_form(self, request: HttpRequest, finding: Finding, context: d
add_external_issue(finding, "github")
return request, True
- else:
- add_field_errors_to_response(context["gform"])
+ add_field_errors_to_response(context["gform"])
return request, False
@@ -1316,10 +1309,9 @@ def close_finding(request, fid):
return HttpResponseRedirect(
reverse("view_test", args=(finding.test.id,)),
)
- else:
- return HttpResponseRedirect(
- reverse("close_finding", args=(finding.id,)),
- )
+ return HttpResponseRedirect(
+ reverse("close_finding", args=(finding.id,)),
+ )
product_tab = Product_Tab(
finding.test.engagement.product, title="Close", tab="findings",
@@ -1502,15 +1494,14 @@ def apply_template_cwe(request, fid):
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("view_finding", args=(fid,)))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to apply CWE template finding, please try again.",
- extra_tags="alert-danger",
- )
- else:
- raise PermissionDenied
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to apply CWE template finding, please try again.",
+ extra_tags="alert-danger",
+ )
+ return None
+ raise PermissionDenied
@user_is_authorized(Finding, Permissions.Finding_Edit, "fid")
@@ -1549,13 +1540,12 @@ def copy_finding(request, fid):
return redirect_to_return_url_or_else(
request, reverse("view_test", args=(test.id,)),
)
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to copy finding, please try again.",
- extra_tags="alert-danger",
- )
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to copy finding, please try again.",
+ extra_tags="alert-danger",
+ )
product_tab = Product_Tab(product, title="Copy Finding", tab="findings")
return render(
@@ -2002,8 +1992,7 @@ def apply_template_to_finding(request, fid, tid):
)
return HttpResponseRedirect(reverse("view_finding", args=(finding.id,)))
- else:
- return HttpResponseRedirect(reverse("view_finding", args=(finding.id,)))
+ return HttpResponseRedirect(reverse("view_finding", args=(finding.id,)))
@user_is_authorized(Test, Permissions.Finding_Add, "tid")
@@ -2063,15 +2052,14 @@ def delete_stub_finding(request, fid):
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("view_test", args=(tid,)))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to delete potential finding, please try again.",
- extra_tags="alert-danger",
- )
- else:
- raise PermissionDenied
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to delete potential finding, please try again.",
+ extra_tags="alert-danger",
+ )
+ return None
+ raise PermissionDenied
@user_is_authorized(Stub_Finding, Permissions.Finding_Edit, "fid")
@@ -2188,13 +2176,12 @@ def promote_to_finding(request, fid):
)
return HttpResponseRedirect(reverse("view_test", args=(test.id,)))
- else:
- form_error = True
- add_error_message_to_response(
- "The form has errors, please correct them below.",
- )
- add_field_errors_to_response(jform)
- add_field_errors_to_response(form)
+ form_error = True
+ add_error_message_to_response(
+ "The form has errors, please correct them below.",
+ )
+ add_field_errors_to_response(jform)
+ add_field_errors_to_response(form)
else:
form = PromoteFindingForm(
initial={
@@ -2356,13 +2343,12 @@ def add_template(request):
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("templates"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Template form has error, please revise and try again.",
- extra_tags="alert-danger",
- )
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Template form has error, please revise and try again.",
+ extra_tags="alert-danger",
+ )
add_breadcrumb(title="Add Template", top_level=False, request=request)
return render(
request, "dojo/add_template.html", {"form": form, "name": "Add Template"},
@@ -2411,13 +2397,12 @@ def edit_template(request, tid):
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("templates"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Template form has error, please revise and try again.",
- extra_tags="alert-danger",
- )
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Template form has error, please revise and try again.",
+ extra_tags="alert-danger",
+ )
count = apply_cwe_mitigation(apply_to_findings=True, template=template, update=False)
add_breadcrumb(title="Edit Template", top_level=False, request=request)
@@ -2447,15 +2432,14 @@ def delete_template(request, tid):
extra_tags="alert-success",
)
return HttpResponseRedirect(reverse("templates"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to delete Template, please revise and try again.",
- extra_tags="alert-danger",
- )
- else:
- raise PermissionDenied
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to delete Template, please revise and try again.",
+ extra_tags="alert-danger",
+ )
+ return None
+ raise PermissionDenied
def download_finding_pic(request, token):
@@ -2661,13 +2645,12 @@ def merge_finding_product(request, pid):
return HttpResponseRedirect(
reverse("edit_finding", args=(finding_to_merge_into.id,)),
)
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to merge findings. Findings to merge contained in finding to merge into.",
- extra_tags="alert-danger",
- )
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to merge findings. Findings to merge contained in finding to merge into.",
+ extra_tags="alert-danger",
+ )
else:
messages.add_message(
request,
@@ -3137,8 +3120,7 @@ def find_available_notetypes(notes):
break
else:
available_note_types.append(note_type_id)
- queryset = Note_Type.objects.filter(id__in=available_note_types).order_by("-id")
- return queryset
+ return Note_Type.objects.filter(id__in=available_note_types).order_by("-id")
def get_missing_mandatory_notetypes(finding):
@@ -3153,8 +3135,7 @@ def get_missing_mandatory_notetypes(finding):
break
else:
notes_to_be_added.append(note_type_id)
- queryset = Note_Type.objects.filter(id__in=notes_to_be_added)
- return queryset
+ return Note_Type.objects.filter(id__in=notes_to_be_added)
@user_is_authorized(Finding, Permissions.Finding_Edit, "original_id")
diff --git a/dojo/finding_group/queries.py b/dojo/finding_group/queries.py
index aae57f53c83..39b91c02665 100644
--- a/dojo/finding_group/queries.py
+++ b/dojo/finding_group/queries.py
@@ -46,10 +46,8 @@ def get_authorized_finding_groups(permission, queryset=None, user=None):
test__engagement__product__member=Exists(authorized_product_roles),
test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
test__engagement__product__authorized_group=Exists(authorized_product_groups))
- finding_groups = finding_groups.filter(
+ return finding_groups.filter(
Q(test__engagement__product__prod_type__member=True)
| Q(test__engagement__product__member=True)
| Q(test__engagement__product__prod_type__authorized_group=True)
| Q(test__engagement__product__authorized_group=True))
-
- return finding_groups
diff --git a/dojo/forms.py b/dojo/forms.py
index acf3546285b..cdff2b53d52 100644
--- a/dojo/forms.py
+++ b/dojo/forms.py
@@ -597,8 +597,7 @@ def clean(self):
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"])
if errors:
raise forms.ValidationError(errors)
- else:
- self.endpoints_to_add_list = endpoints_to_add_list
+ self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
@@ -611,8 +610,7 @@ def clean_scan_date(self):
return date
def get_scan_type(self):
- TGT_scan = self.cleaned_data["scan_type"]
- return TGT_scan
+ return self.cleaned_data["scan_type"]
class ReImportScanForm(forms.Form):
@@ -1146,8 +1144,7 @@ def clean(self):
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"])
if errors:
raise forms.ValidationError(errors)
- else:
- self.endpoints_to_add_list = endpoints_to_add_list
+ self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
@@ -1224,8 +1221,7 @@ def clean(self):
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"])
if errors:
raise forms.ValidationError(errors)
- else:
- self.endpoints_to_add_list = endpoints_to_add_list
+ self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
@@ -1282,8 +1278,7 @@ def clean(self):
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"])
if errors:
raise forms.ValidationError(errors)
- else:
- self.endpoints_to_add_list = endpoints_to_add_list
+ self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
@@ -1406,8 +1401,7 @@ def clean(self):
endpoints_to_add_list, errors = validate_endpoints_to_add(cleaned_data["endpoints_to_add"])
if errors:
raise forms.ValidationError(errors)
- else:
- self.endpoints_to_add_list = endpoints_to_add_list
+ self.endpoints_to_add_list = endpoints_to_add_list
return cleaned_data
@@ -1677,8 +1671,7 @@ def clean(self):
endpoints_to_add_list, errors = validate_endpoints_to_add(endpoint)
if errors:
raise forms.ValidationError(errors)
- else:
- self.endpoints_to_process = endpoints_to_add_list
+ self.endpoints_to_process = endpoints_to_add_list
return cleaned_data
@@ -2683,9 +2676,7 @@ def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def clean(self):
- form_data = self.cleaned_data
-
- return form_data
+ return self.cleaned_data
class CredMappingForm(forms.ModelForm):
@@ -2972,9 +2963,9 @@ def clean(self):
if self.target == "engagement":
msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, or choose to inherit settings from product"
raise ValidationError(msg)
- else:
- msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, leave empty to have no JIRA integration setup"
- raise ValidationError(msg)
+ msg = "JIRA Project needs a JIRA Instance, JIRA Project Key, and Epic issue type name, leave empty to have no JIRA integration setup"
+ raise ValidationError(msg)
+ return None
class GITHUBFindingForm(forms.Form):
@@ -3158,8 +3149,7 @@ class LoginBanner(forms.Form):
)
def clean(self):
- cleaned_data = super().clean()
- return cleaned_data
+ return super().clean()
class AnnouncementCreateForm(forms.ModelForm):
@@ -3393,7 +3383,7 @@ def clean_expiration(self):
if expiration < today:
msg = "The expiration cannot be in the past"
raise forms.ValidationError(msg)
- elif expiration.day == today.day:
+ if expiration.day == today.day:
msg = "The expiration cannot be today"
raise forms.ValidationError(msg)
else:
@@ -3483,8 +3473,7 @@ def __init__(self, attrs=None):
def decompress(self, value):
if value:
return pickle.loads(value)
- else:
- return [None, None, None, None, None, None]
+ return [None, None, None, None, None, None]
def format_output(self, rendered_widgets):
return "
".join(rendered_widgets)
diff --git a/dojo/github_issue_link/views.py b/dojo/github_issue_link/views.py
index f7bb90a37f2..e0ddabd1deb 100644
--- a/dojo/github_issue_link/views.py
+++ b/dojo/github_issue_link/views.py
@@ -52,11 +52,11 @@ def new_github(request):
"Unable to authenticate on GitHub.",
extra_tags="alert-danger")
return HttpResponseRedirect(reverse("github"))
- else:
- gform = GITHUBForm()
- add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request)
- return render(request, "dojo/new_github.html",
- {"gform": gform})
+ return None
+ gform = GITHUBForm()
+ add_breadcrumb(title="New GitHub Configuration", top_level=False, request=request)
+ return render(request, "dojo/new_github.html",
+ {"gform": gform})
@user_is_configuration_authorized("dojo.view_github_conf")
diff --git a/dojo/group/queries.py b/dojo/group/queries.py
index a8b70e6b761..dedb0d35e14 100644
--- a/dojo/group/queries.py
+++ b/dojo/group/queries.py
@@ -38,8 +38,7 @@ def get_authorized_group_members(permission):
def get_authorized_group_members_for_user(user):
groups = get_authorized_groups(Permissions.Group_View)
- group_members = Dojo_Group_Member.objects.filter(user=user, group__in=groups).order_by("group__name").select_related("role", "group")
- return group_members
+ return Dojo_Group_Member.objects.filter(user=user, group__in=groups).order_by("group__name").select_related("role", "group")
def get_group_members_for_group(group):
diff --git a/dojo/group/views.py b/dojo/group/views.py
index 4f7dea473b5..fa2fd1e65b1 100644
--- a/dojo/group/views.py
+++ b/dojo/group/views.py
@@ -185,12 +185,11 @@ def process_forms(self, request: HttpRequest, group: Dojo_Group, context: dict):
extra_tags="alert-success")
return request, True
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Group was not saved successfully.",
- extra_tags="alert_danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Group was not saved successfully.",
+ extra_tags="alert_danger")
return request, False
@@ -450,8 +449,7 @@ def edit_group_member(request, mid):
extra_tags="alert-warning")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
+ return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
if member.role.is_owner and not user_has_permission(request.user, member.group, Permissions.Group_Add_Owner):
messages.add_message(request,
messages.WARNING,
@@ -465,8 +463,7 @@ def edit_group_member(request, mid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
+ return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
add_breadcrumb(title="Edit a Group Member", top_level=False, request=request)
return render(request, "dojo/edit_group_member.html", {
@@ -492,8 +489,7 @@ def delete_group_member(request, mid):
extra_tags="alert-warning")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
+ return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
user = member.user
member.delete()
@@ -503,11 +499,9 @@ def delete_group_member(request, mid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- if user == request.user:
- return HttpResponseRedirect(reverse("groups"))
- else:
- return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
+ if user == request.user:
+ return HttpResponseRedirect(reverse("groups"))
+ return HttpResponseRedirect(reverse("view_group", args=(member.group.id, )))
add_breadcrumb("Delete a group member", top_level=False, request=request)
return render(request, "dojo/delete_group_member.html", {
diff --git a/dojo/importers/auto_create_context.py b/dojo/importers/auto_create_context.py
index 6325ece9699..9f2a1cb7e76 100644
--- a/dojo/importers/auto_create_context.py
+++ b/dojo/importers/auto_create_context.py
@@ -229,16 +229,15 @@ def get_or_create_product_type(
# Look for an existing object
if product_type := self.get_target_product_type_if_exists(product_type_name=product_type_name):
return product_type
- else:
- with transaction.atomic():
- product_type, created = Product_Type.objects.select_for_update().get_or_create(name=product_type_name)
- if created:
- Product_Type_Member.objects.create(
- user=get_current_user(),
- product_type=product_type,
- role=Role.objects.get(is_owner=True),
- )
- return product_type
+ with transaction.atomic():
+ product_type, created = Product_Type.objects.select_for_update().get_or_create(name=product_type_name)
+ if created:
+ Product_Type_Member.objects.create(
+ user=get_current_user(),
+ product_type=product_type,
+ role=Role.objects.get(is_owner=True),
+ )
+ return product_type
def get_or_create_product(
self,
diff --git a/dojo/importers/base_importer.py b/dojo/importers/base_importer.py
index 22e9ee5cbfe..6a05b3934ce 100644
--- a/dojo/importers/base_importer.py
+++ b/dojo/importers/base_importer.py
@@ -255,11 +255,10 @@ def determine_process_method(
parsed_findings,
**kwargs,
)
- else:
- return self.sync_process_findings(
- parsed_findings,
- **kwargs,
- )
+ return self.sync_process_findings(
+ parsed_findings,
+ **kwargs,
+ )
def update_test_meta(self):
"""
@@ -276,7 +275,7 @@ def update_test_meta(self):
if not self.commit_hash.isspace():
self.test.commit_hash = self.commit_hash
- return None
+ return
def update_timestamps(self):
"""
@@ -510,7 +509,7 @@ def verify_tool_configuration_from_test(self):
# Return early as there is no value in validating further
return
# Validate that the test has a value
- elif self.test is not None:
+ if self.test is not None:
# Make sure the Tool_Configuration is connected to the product that the test is
if self.api_scan_configuration.product != self.test.engagement.product:
msg = "API Scan Configuration has to be from same product as the Test"
@@ -536,7 +535,7 @@ def verify_tool_configuration_from_engagement(self):
# Return early as there is no value in validating further
return
# Validate that the engagement has a value
- elif self.engagement is not None:
+ if self.engagement is not None:
# Make sure the Tool_Configuration is connected to the engagement that the test is
if self.api_scan_configuration.product != self.engagement.product:
msg = "API Scan Configuration has to be from same product as the Engagement"
diff --git a/dojo/importers/default_reimporter.py b/dojo/importers/default_reimporter.py
index 290e13f6ac5..9063838c73d 100644
--- a/dojo/importers/default_reimporter.py
+++ b/dojo/importers/default_reimporter.py
@@ -399,12 +399,12 @@ def match_new_finding_to_existing_finding(
test=self.test,
hash_code=unsaved_finding.hash_code,
).exclude(hash_code=None).order_by("id")
- elif self.deduplication_algorithm == "unique_id_from_tool":
+ if self.deduplication_algorithm == "unique_id_from_tool":
return Finding.objects.filter(
test=self.test,
unique_id_from_tool=unsaved_finding.unique_id_from_tool,
).exclude(unique_id_from_tool=None).order_by("id")
- elif self.deduplication_algorithm == "unique_id_from_tool_or_hash_code":
+ if self.deduplication_algorithm == "unique_id_from_tool_or_hash_code":
query = Finding.objects.filter(
Q(test=self.test),
(Q(hash_code__isnull=False) & Q(hash_code=unsaved_finding.hash_code))
@@ -412,7 +412,7 @@ def match_new_finding_to_existing_finding(
).order_by("id")
deduplicationLogger.debug(query.query)
return query
- elif self.deduplication_algorithm == "legacy":
+ if self.deduplication_algorithm == "legacy":
# This is the legacy reimport behavior. Although it's pretty flawed and doesn't match the legacy algorithm for deduplication,
# this is left as is for simplicity.
# Re-writing the legacy deduplication here would be complicated and counter-productive.
@@ -423,9 +423,8 @@ def match_new_finding_to_existing_finding(
test=self.test,
severity=unsaved_finding.severity,
numerical_severity=Finding.get_numerical_severity(unsaved_finding.severity)).order_by("id")
- else:
- logger.error(f'Internal error: unexpected deduplication_algorithm: "{self.deduplication_algorithm}"')
- return None
+ logger.error(f'Internal error: unexpected deduplication_algorithm: "{self.deduplication_algorithm}"')
+ return None
def process_matched_finding(
self,
@@ -441,16 +440,15 @@ def process_matched_finding(
unsaved_finding,
existing_finding,
)
- elif existing_finding.is_mitigated:
+ if existing_finding.is_mitigated:
return self.process_matched_mitigated_finding(
unsaved_finding,
existing_finding,
)
- else:
- return self.process_matched_active_finding(
- unsaved_finding,
- existing_finding,
- )
+ return self.process_matched_active_finding(
+ unsaved_finding,
+ existing_finding,
+ )
def process_matched_special_status_finding(
self,
@@ -480,7 +478,7 @@ def process_matched_special_status_finding(
# We also need to add the finding to 'unchanged_items' as otherwise it will get mitigated by the reimporter
# (Risk accepted findings are not set to mitigated by Defectdojo)
# We however do not exit the loop as we do want to update the endpoints (in case some endpoints were fixed)
- elif existing_finding.risk_accepted and not existing_finding.active:
+ if existing_finding.risk_accepted and not existing_finding.active:
self.unchanged_items.append(existing_finding)
return existing_finding, False
# The finding was not an exact match, so we need to add more details about from the
@@ -521,47 +519,44 @@ def process_matched_mitigated_finding(
logger.debug(msg)
# Return True here to force the loop to continue
return existing_finding, True
- else:
- # even if there is no mitigation time, skip it, because both the current finding and
- # the reimported finding are is_mitigated
- # Return True here to force the loop to continue
- return existing_finding, True
- else:
- if self.do_not_reactivate:
- logger.debug(
- "Skipping reactivating by user's choice do_not_reactivate: "
- f" - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})",
- )
- # Search for an existing note that this finding has been skipped for reactivation
- # before this current time
- reactivated_note_text = f"Finding has skipped reactivation from {self.scan_type} re-upload with user decision do_not_reactivate."
- existing_note = existing_finding.notes.filter(
+ # even if there is no mitigation time, skip it, because both the current finding and
+ # the reimported finding are is_mitigated
+ # Return True here to force the loop to continue
+ return existing_finding, True
+ if self.do_not_reactivate:
+ logger.debug(
+ "Skipping reactivating by user's choice do_not_reactivate: "
+ f" - {existing_finding.id}: {existing_finding.title} "
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
+ )
+ # Search for an existing note that this finding has been skipped for reactivation
+ # before this current time
+ reactivated_note_text = f"Finding has skipped reactivation from {self.scan_type} re-upload with user decision do_not_reactivate."
+ existing_note = existing_finding.notes.filter(
+ entry=reactivated_note_text,
+ author=self.user,
+ )
+ # If a note has not been left before, we can skip this finding
+ if len(existing_note) == 0:
+ note = Notes(
entry=reactivated_note_text,
author=self.user,
)
- # If a note has not been left before, we can skip this finding
- if len(existing_note) == 0:
- note = Notes(
- entry=reactivated_note_text,
- author=self.user,
- )
- note.save()
- existing_finding.notes.add(note)
- existing_finding.save(dedupe_option=False)
- # Return True here to force the loop to continue
- return existing_finding, True
- else:
- logger.debug(
- f"Reactivating: - {existing_finding.id}: {existing_finding.title} "
- f"({existing_finding.component_name} - {existing_finding.component_version})",
- )
- existing_finding.mitigated = None
- existing_finding.is_mitigated = False
- existing_finding.mitigated_by = None
- existing_finding.active = True
- if self.verified is not None:
- existing_finding.verified = self.verified
+ note.save()
+ existing_finding.notes.add(note)
+ existing_finding.save(dedupe_option=False)
+ # Return True here to force the loop to continue
+ return existing_finding, True
+ logger.debug(
+ f"Reactivating: - {existing_finding.id}: {existing_finding.title} "
+ f"({existing_finding.component_name} - {existing_finding.component_version})",
+ )
+ existing_finding.mitigated = None
+ existing_finding.is_mitigated = False
+ existing_finding.mitigated_by = None
+ existing_finding.active = True
+ if self.verified is not None:
+ existing_finding.verified = self.verified
component_name = getattr(unsaved_finding, "component_name", None)
component_version = getattr(unsaved_finding, "component_version", None)
@@ -706,9 +701,8 @@ def finding_post_processing(
# Process vulnerability IDs
if finding_from_report.unsaved_vulnerability_ids:
finding.unsaved_vulnerability_ids = finding_from_report.unsaved_vulnerability_ids
- finding = self.process_vulnerability_ids(finding)
- return finding
+ return self.process_vulnerability_ids(finding)
def process_groups_for_all_findings(
self,
@@ -767,8 +761,7 @@ def process_results(
serialized_to_mitigate,
serialized_untouched,
)
- else:
- return self.new_items, self.reactivated_items, self.to_mitigate, self.untouched
+ return self.new_items, self.reactivated_items, self.to_mitigate, self.untouched
def calculate_unsaved_finding_hash_code(
self,
diff --git a/dojo/importers/endpoint_manager.py b/dojo/importers/endpoint_manager.py
index 2ee3e7d3009..ba7172efaa3 100644
--- a/dojo/importers/endpoint_manager.py
+++ b/dojo/importers/endpoint_manager.py
@@ -57,7 +57,7 @@ def add_endpoints_to_unsaved_finding(
endpoint=ep,
defaults={"date": finding.date})
logger.debug(f"IMPORT_SCAN: {len(endpoints)} imported")
- return None
+ return
@dojo_async_task
@app.task()
@@ -79,7 +79,7 @@ def mitigate_endpoint_status(
endpoint_status.mitigated_by = user
endpoint_status.mitigated = True
endpoint_status.save()
- return None
+ return
@dojo_async_task
@app.task()
@@ -100,7 +100,7 @@ def reactivate_endpoint_status(
endpoint_status.mitigated = False
endpoint_status.last_modified = timezone.now()
endpoint_status.save()
- return None
+ return
def chunk_endpoints(
self,
@@ -158,7 +158,7 @@ def clean_unsaved_endpoints(
endpoint.clean()
except ValidationError as e:
logger.warning(f"DefectDojo is storing broken endpoint because cleaning wasn't successful: {e}")
- return None
+ return
def chunk_endpoints_and_reactivate(
self,
@@ -182,7 +182,7 @@ def chunk_endpoints_and_reactivate(
self.reactivate_endpoint_status(endpoint_status_list, sync=False)
else:
self.reactivate_endpoint_status(endpoint_status_list, sync=True)
- return None
+ return
def chunk_endpoints_and_mitigate(
self,
@@ -207,7 +207,7 @@ def chunk_endpoints_and_mitigate(
self.mitigate_endpoint_status(endpoint_status_list, user, sync=False)
else:
self.mitigate_endpoint_status(endpoint_status_list, user, sync=True)
- return None
+ return
def update_endpoint_status(
self,
@@ -242,4 +242,4 @@ def update_endpoint_status(
)
self.chunk_endpoints_and_reactivate(endpoint_status_to_reactivate)
self.chunk_endpoints_and_mitigate(endpoint_status_to_mitigate, user)
- return None
+ return
diff --git a/dojo/jira_link/helper.py b/dojo/jira_link/helper.py
index b5e3ba8b219..ee844280555 100644
--- a/dojo/jira_link/helper.py
+++ b/dojo/jira_link/helper.py
@@ -99,6 +99,7 @@ def is_push_all_issues(instance):
jira_project = get_jira_project(instance)
if jira_project:
return jira_project.push_all_issues
+ return None
# checks if a finding can be pushed to JIRA
@@ -173,12 +174,11 @@ def get_jira_project(obj, use_inheritance=True):
if obj.jira_project:
return obj.jira_project
# some old jira_issue records don't have a jira_project, so try to go via the finding instead
- elif hasattr(obj, "finding") and obj.finding:
+ if hasattr(obj, "finding") and obj.finding:
return get_jira_project(obj.finding, use_inheritance=use_inheritance)
- elif hasattr(obj, "engagement") and obj.engagement:
+ if hasattr(obj, "engagement") and obj.engagement:
return get_jira_project(obj.finding, use_inheritance=use_inheritance)
- else:
- return None
+ return None
if isinstance(obj, Finding) or isinstance(obj, Stub_Finding):
finding = obj
@@ -205,9 +205,8 @@ def get_jira_project(obj, use_inheritance=True):
if use_inheritance:
logger.debug("delegating to product %s for %s", engagement.product, engagement)
return get_jira_project(engagement.product)
- else:
- logger.debug("not delegating to product %s for %s", engagement.product, engagement)
- return None
+ logger.debug("not delegating to product %s for %s", engagement.product, engagement)
+ return None
if isinstance(obj, Product):
# TODO: refactor relationships, but now this would brake APIv1 (and v2?)
@@ -241,7 +240,7 @@ def get_jira_url(obj):
issue = get_jira_issue(obj)
if issue is not None:
return get_jira_issue_url(issue)
- elif isinstance(obj, Finding):
+ if isinstance(obj, Finding):
# finding must only have url if there is a jira_issue
# engagement can continue to show url of jiraproject instead of jira issue
return None
@@ -320,8 +319,7 @@ def get_jira_issue_template(obj):
if isinstance(obj, Finding_Group):
return os.path.join(template_dir, "jira-finding-group-description.tpl")
- else:
- return os.path.join(template_dir, "jira-description.tpl")
+ return os.path.join(template_dir, "jira-description.tpl")
def get_jira_creation(obj):
@@ -357,6 +355,7 @@ def get_jira_issue(obj):
return obj.jira_issue
except JIRA_Issue.DoesNotExist:
return None
+ return None
def has_jira_configured(obj):
@@ -424,6 +423,7 @@ def get_jira_connection(obj):
if jira_instance is not None:
return get_jira_connection_raw(jira_instance.url, jira_instance.username, jira_instance.password)
+ return None
def jira_get_resolution_id(jira, issue, status):
@@ -468,6 +468,7 @@ def get_jira_updated(finding):
project = get_jira_project(finding)
issue = jira_get_issue(project, j_issue)
return issue.fields.updated
+ return None
# Used for unit testing so geting all the connections is manadatory
@@ -481,6 +482,7 @@ def get_jira_status(finding):
project = get_jira_project(finding)
issue = jira_get_issue(project, j_issue)
return issue.fields.status
+ return None
# Used for unit testing so geting all the connections is manadatory
@@ -494,6 +496,7 @@ def get_jira_comments(finding):
project = get_jira_project(finding)
issue = jira_get_issue(project, j_issue)
return issue.fields.comment.comments
+ return None
# Logs the error to the alerts table, which appears in the notification toolbar
@@ -617,7 +620,7 @@ def jira_priority(obj):
def jira_environment(obj):
if isinstance(obj, Finding):
return "\n".join([str(endpoint) for endpoint in obj.endpoints.all()])
- elif isinstance(obj, Finding_Group):
+ if isinstance(obj, Finding_Group):
envs = [
jira_environment(finding)
for finding in obj.findings.all()
@@ -625,8 +628,7 @@ def jira_environment(obj):
jira_environments = [env for env in envs if env]
return "\n".join(jira_environments)
- else:
- return ""
+ return ""
def push_to_jira(obj, *args, **kwargs):
@@ -638,25 +640,22 @@ def push_to_jira(obj, *args, **kwargs):
finding = obj
if finding.has_jira_issue:
return update_jira_issue_for_finding(finding, *args, **kwargs)
- else:
- return add_jira_issue_for_finding(finding, *args, **kwargs)
+ return add_jira_issue_for_finding(finding, *args, **kwargs)
- elif isinstance(obj, Engagement):
+ if isinstance(obj, Engagement):
engagement = obj
if engagement.has_jira_issue:
return update_epic(engagement, *args, **kwargs)
- else:
- return add_epic(engagement, *args, **kwargs)
+ return add_epic(engagement, *args, **kwargs)
- elif isinstance(obj, Finding_Group):
+ if isinstance(obj, Finding_Group):
group = obj
if group.has_jira_issue:
return update_jira_issue_for_finding_group(group, *args, **kwargs)
- else:
- return add_jira_issue_for_finding_group(group, *args, **kwargs)
+ return add_jira_issue_for_finding_group(group, *args, **kwargs)
- else:
- logger.error("unsupported object passed to push_to_jira: %s %i %s", obj.__name__, obj.id, obj)
+ logger.error("unsupported object passed to push_to_jira: %s %i %s", obj.__name__, obj.id, obj)
+ return None
def add_issues_to_epic(jira, obj, epic_id, issue_keys, ignore_epics=True):
@@ -1022,9 +1021,7 @@ def get_jira_issue_from_jira(find):
jira = get_jira_connection(jira_instance)
logger.debug("getting issue from JIRA")
- issue_from_jira = jira.issue(j_issue.jira_id)
-
- return issue_from_jira
+ return jira.issue(j_issue.jira_id)
except JIRAError as e:
logger.exception(e)
@@ -1191,6 +1188,7 @@ def jira_attachment(finding, jira, issue, file, jira_filename=None):
logger.exception(e)
log_jira_alert("Attachment: " + e.text, finding)
return False
+ return None
def jira_check_attachment(issue, source_file_name):
@@ -1242,9 +1240,9 @@ def close_epic(eng, push_to_jira, **kwargs):
logger.exception(e)
log_jira_generic_alert("Jira Engagement/Epic Close Error", str(e))
return False
- else:
- add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement")
- return False
+ return None
+ add_error_message_to_response("Push to JIRA for Epic skipped because enable_engagement_epic_mapping is not checked for this engagement")
+ return False
@dojo_model_to_id
@@ -1350,9 +1348,8 @@ def jira_get_issue(jira_project, issue_key):
try:
jira_instance = jira_project.jira_instance
jira = get_jira_connection(jira_instance)
- issue = jira.issue(issue_key)
+ return jira.issue(issue_key)
- return issue
except JIRAError as jira_error:
logger.debug("error retrieving jira issue " + issue_key + " " + str(jira_error))
logger.exception(jira_error)
@@ -1386,6 +1383,8 @@ def add_comment(obj, note, force_push=False, **kwargs):
except JIRAError as e:
log_jira_generic_alert("Jira Add Comment Error", str(e))
return False
+ return None
+ return None
def add_simple_jira_comment(jira_instance, jira_issue, comment):
diff --git a/dojo/jira_link/queries.py b/dojo/jira_link/queries.py
index 6d41b3b6e21..b077c076097 100644
--- a/dojo/jira_link/queries.py
+++ b/dojo/jira_link/queries.py
@@ -63,7 +63,7 @@ def get_authorized_jira_projects(permission, user=None):
product__member=Exists(product_authorized_product_roles),
product__prod_type__authorized_group=Exists(product_authorized_product_type_groups),
product__authorized_group=Exists(product_authorized_product_groups))
- jira_projects = jira_projects.filter(
+ return jira_projects.filter(
Q(engagement__product__prod_type__member=True)
| Q(engagement__product__member=True)
| Q(engagement__product__prod_type__authorized_group=True)
@@ -73,8 +73,6 @@ def get_authorized_jira_projects(permission, user=None):
| Q(product__prod_type__authorized_group=True)
| Q(product__authorized_group=True))
- return jira_projects
-
def get_authorized_jira_issues(permission):
user = get_current_user()
@@ -152,7 +150,7 @@ def get_authorized_jira_issues(permission):
finding__test__engagement__product__member=Exists(finding_authorized_product_roles),
finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups),
finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups))
- jira_issues = jira_issues.filter(
+ return jira_issues.filter(
Q(engagement__product__prod_type__member=True)
| Q(engagement__product__member=True)
| Q(engagement__product__prod_type__authorized_group=True)
@@ -165,5 +163,3 @@ def get_authorized_jira_issues(permission):
| Q(finding__test__engagement__product__member=True)
| Q(finding__test__engagement__product__prod_type__authorized_group=True)
| Q(finding__test__engagement__product__authorized_group=True))
-
- return jira_issues
diff --git a/dojo/jira_link/views.py b/dojo/jira_link/views.py
index e618c84f01c..fc2f67a373a 100644
--- a/dojo/jira_link/views.py
+++ b/dojo/jira_link/views.py
@@ -67,10 +67,10 @@ def webhook(request, secret=None):
if not system_settings.enable_jira:
return webhook_responser_handler("info", "Ignoring incoming webhook as JIRA is disabled.")
# If the webhook is not enabled, then return a 404
- elif not system_settings.enable_jira_web_hook:
+ if not system_settings.enable_jira_web_hook:
return webhook_responser_handler("info", "Ignoring incoming webhook as JIRA Webhook is disabled.")
# Determine if the request should be "authenticated"
- elif not system_settings.disable_jira_webhook_secret:
+ if not system_settings.disable_jira_webhook_secret:
# Make sure there is a value for the webhook secret before making a comparison
if not system_settings.jira_webhook_secret:
return webhook_responser_handler("info", "Ignoring incoming webhook as JIRA Webhook secret is empty in Defect Dojo system settings.")
@@ -211,7 +211,7 @@ def check_for_and_create_comment(parsed_json):
"""
comment = parsed_json.get("comment", None)
if comment is None:
- return
+ return None
comment_text = comment.get("body")
commenter = ""
@@ -271,6 +271,7 @@ def check_for_and_create_comment(parsed_json):
finding.jira_issue.jira_change = timezone.now()
finding.jira_issue.save()
finding.save()
+ return None
def get_custom_field(jira, label):
@@ -432,8 +433,7 @@ def post(self, request):
url=request.build_absolute_uri(reverse("jira")))
return HttpResponseRedirect(reverse("jira"))
- else:
- logger.error("jform.errors: %s", jform.errors)
+ logger.error("jform.errors: %s", jform.errors)
return render(request, self.get_template(), {"jform": jform})
diff --git a/dojo/management/commands/jira_status_reconciliation.py b/dojo/management/commands/jira_status_reconciliation.py
index 6ca72dbe1f1..e26aefc0516 100644
--- a/dojo/management/commands/jira_status_reconciliation.py
+++ b/dojo/management/commands/jira_status_reconciliation.py
@@ -86,7 +86,7 @@ def jira_status_reconciliation(*args, **kwargs):
messages.append(message)
logger.info(message)
continue
- elif find.risk_accepted:
+ if find.risk_accepted:
message = "{}; {}/finding/{};{};{};{};{};{};{};{};{};{};{};{}skipping risk accepted findings;{}".format(
find.jira_issue.jira_key, settings.SITE_URL, find.id, find.status(), resolution_name, None, None, None,
find.jira_issue.jira_change, issue_from_jira.fields.updated, find.last_status_update, issue_from_jira.fields.updated, find.last_reviewed, issue_from_jira.fields.updated, "skipped")
@@ -186,6 +186,7 @@ def jira_status_reconciliation(*args, **kwargs):
logger.info("results (semicolon seperated)")
for message in messages:
logger.info(message)
+ return None
class Command(BaseCommand):
diff --git a/dojo/metrics/utils.py b/dojo/metrics/utils.py
index 8ca345b41f7..884658d5ba1 100644
--- a/dojo/metrics/utils.py
+++ b/dojo/metrics/utils.py
@@ -597,5 +597,4 @@ def findings_queryset(
"""
if qs.model is Endpoint_Status:
return Finding.objects.filter(status_finding__in=qs)
- else:
- return qs
+ return qs
diff --git a/dojo/models.py b/dojo/models.py
index 308db965228..2346c1e916c 100644
--- a/dojo/models.py
+++ b/dojo/models.py
@@ -764,9 +764,8 @@ def get_absolute_url(self):
return reverse("product_type", args=[str(self.id)])
def get_breadcrumbs(self):
- bc = [{"title": str(self),
+ return [{"title": str(self),
"url": reverse("edit_product_type", args=(self.id,))}]
- return bc
@cached_property
def critical_present(self):
@@ -774,6 +773,7 @@ def critical_present(self):
test__engagement__product__prod_type=self, severity="Critical")
if c_findings.count() > 0:
return True
+ return None
@cached_property
def high_present(self):
@@ -781,6 +781,7 @@ def high_present(self):
test__engagement__product__prod_type=self, severity="High")
if c_findings.count() > 0:
return True
+ return None
@cached_property
def calc_health(self):
@@ -798,8 +799,7 @@ def calc_health(self):
health = health - ((h_findings.count() - 1) * 2)
if health < 5:
return 5
- else:
- return health
+ return health
# only used by bulk risk acceptance api
@property
@@ -835,9 +835,8 @@ def __str__(self):
return self.name
def get_breadcrumbs(self):
- bc = [{"title": str(self),
+ return [{"title": str(self),
"url": None}]
- return bc
class DojoMeta(models.Model):
@@ -1180,8 +1179,7 @@ def endpoint_host_count(self):
for e in endpoints:
if e.host in hosts:
continue
- else:
- hosts.append(e.host)
+ hosts.append(e.host)
return len(hosts)
@@ -1196,53 +1194,51 @@ def endpoint_count(self):
def open_findings(self, start_date=None, end_date=None):
if start_date is None or end_date is None:
return {}
- else:
- critical = Finding.objects.filter(test__engagement__product=self,
- mitigated__isnull=True,
- verified=True,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- severity="Critical",
- date__range=[start_date,
- end_date]).count()
- high = Finding.objects.filter(test__engagement__product=self,
+ critical = Finding.objects.filter(test__engagement__product=self,
mitigated__isnull=True,
verified=True,
false_p=False,
duplicate=False,
out_of_scope=False,
- severity="High",
+ severity="Critical",
date__range=[start_date,
end_date]).count()
- medium = Finding.objects.filter(test__engagement__product=self,
- mitigated__isnull=True,
- verified=True,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- severity="Medium",
- date__range=[start_date,
- end_date]).count()
- low = Finding.objects.filter(test__engagement__product=self,
- mitigated__isnull=True,
- verified=True,
- false_p=False,
- duplicate=False,
- out_of_scope=False,
- severity="Low",
- date__range=[start_date,
- end_date]).count()
- return {"Critical": critical,
- "High": high,
- "Medium": medium,
- "Low": low,
- "Total": (critical + high + medium + low)}
+ high = Finding.objects.filter(test__engagement__product=self,
+ mitigated__isnull=True,
+ verified=True,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ severity="High",
+ date__range=[start_date,
+ end_date]).count()
+ medium = Finding.objects.filter(test__engagement__product=self,
+ mitigated__isnull=True,
+ verified=True,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ severity="Medium",
+ date__range=[start_date,
+ end_date]).count()
+ low = Finding.objects.filter(test__engagement__product=self,
+ mitigated__isnull=True,
+ verified=True,
+ false_p=False,
+ duplicate=False,
+ out_of_scope=False,
+ severity="Low",
+ date__range=[start_date,
+ end_date]).count()
+ return {"Critical": critical,
+ "High": high,
+ "Medium": medium,
+ "Low": low,
+ "Total": (critical + high + medium + low)}
def get_breadcrumbs(self):
- bc = [{"title": str(self),
+ return [{"title": str(self),
"url": reverse("view_product", args=(self.id,))}]
- return bc
@property
def get_product_type(self):
@@ -1700,9 +1696,8 @@ def __str__(self):
msg = "hyperlink lib did not create URL as was expected"
raise ValueError(msg)
return clean_url
- else:
- msg = "Missing host"
- raise ValueError(msg)
+ msg = "Missing host"
+ raise ValueError(msg)
except:
url = ""
if self.protocol:
@@ -1814,11 +1809,9 @@ def __eq__(self, other):
products_match = (self.product) == other.product
# Check if the contents match
return products_match and contents_match
- else:
- return contents_match
+ return contents_match
- else:
- return NotImplemented
+ return NotImplemented
@property
def is_broken(self):
@@ -1829,8 +1822,7 @@ def is_broken(self):
else:
if self.product:
return False
- else:
- return True
+ return True
@property
def mitigated(self):
@@ -1851,7 +1843,7 @@ def findings_count(self):
return self.findings.all().count()
def active_findings(self):
- findings = self.findings.filter(
+ return self.findings.filter(
active=True,
out_of_scope=False,
mitigated__isnull=True,
@@ -1861,10 +1853,9 @@ def active_findings(self):
status_finding__out_of_scope=False,
status_finding__risk_accepted=False,
).order_by("numerical_severity")
- return findings
def active_verified_findings(self):
- findings = self.findings.filter(
+ return self.findings.filter(
active=True,
verified=True,
out_of_scope=False,
@@ -1875,7 +1866,6 @@ def active_verified_findings(self):
status_finding__out_of_scope=False,
status_finding__risk_accepted=False,
).order_by("numerical_severity")
- return findings
@property
def active_findings_count(self):
@@ -1919,7 +1909,7 @@ def host_findings_count(self):
return self.host_findings().count()
def host_active_findings(self):
- findings = Finding.objects.filter(
+ return Finding.objects.filter(
active=True,
out_of_scope=False,
mitigated__isnull=True,
@@ -1930,10 +1920,9 @@ def host_active_findings(self):
status_finding__risk_accepted=False,
endpoints__in=self.host_endpoints(),
).order_by("numerical_severity")
- return findings
def host_active_verified_findings(self):
- findings = Finding.objects.filter(
+ return Finding.objects.filter(
active=True,
verified=True,
out_of_scope=False,
@@ -1945,7 +1934,6 @@ def host_active_verified_findings(self):
status_finding__risk_accepted=False,
endpoints__in=self.host_endpoints(),
).order_by("numerical_severity")
- return findings
@property
def host_active_findings_count(self):
@@ -2220,8 +2208,7 @@ def get_queryset(self):
super_query = super_query.annotate(created_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_CREATED_FINDING)))
super_query = super_query.annotate(closed_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_CLOSED_FINDING)))
super_query = super_query.annotate(reactivated_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_REACTIVATED_FINDING)))
- super_query = super_query.annotate(untouched_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_UNTOUCHED_FINDING)))
- return super_query
+ return super_query.annotate(untouched_findings_count=Count("findings", filter=Q(test_import_finding_action__action=IMPORT_UNTOUCHED_FINDING)))
class Meta:
ordering = ("-id",)
@@ -2878,53 +2865,47 @@ def hash_fields(self, fields_to_hash):
def duplicate_finding_set(self):
if self.duplicate:
if self.duplicate_finding is not None:
- originals = Finding.objects.get(
+ return Finding.objects.get(
id=self.duplicate_finding.id).original_finding.all().order_by("title")
- return originals # we need to add the duplicate_finding here as well
- else:
- return []
- else:
- return self.original_finding.all().order_by("title")
+ return []
+ return self.original_finding.all().order_by("title")
def get_scanner_confidence_text(self):
if self.scanner_confidence and isinstance(self.scanner_confidence, int):
if self.scanner_confidence <= 2:
return "Certain"
- elif self.scanner_confidence >= 3 and self.scanner_confidence <= 5:
+ if self.scanner_confidence >= 3 and self.scanner_confidence <= 5:
return "Firm"
- else:
- return "Tentative"
+ return "Tentative"
return ""
@staticmethod
def get_numerical_severity(severity):
if severity == "Critical":
return "S0"
- elif severity == "High":
+ if severity == "High":
return "S1"
- elif severity == "Medium":
+ if severity == "Medium":
return "S2"
- elif severity == "Low":
+ if severity == "Low":
return "S3"
- elif severity == "Info":
+ if severity == "Info":
return "S4"
- else:
- return "S5"
+ return "S5"
@staticmethod
def get_number_severity(severity):
if severity == "Critical":
return 4
- elif severity == "High":
+ if severity == "High":
return 3
- elif severity == "Medium":
+ if severity == "Medium":
return 2
- elif severity == "Low":
+ if severity == "Low":
return 1
- elif severity == "Info":
+ if severity == "Info":
return 0
- else:
- return 5
+ return 5
@staticmethod
def get_severity(num_severity):
@@ -2998,8 +2979,7 @@ def sla_age(self):
def get_sla_start_date(self):
if self.sla_start_date:
return self.sla_start_date
- else:
- return self.date
+ return self.date
def get_sla_period(self):
sla_configuration = SLA_Configuration.objects.filter(id=self.test.engagement.product.sla_configuration_id).first()
@@ -3010,7 +2990,7 @@ def get_sla_period(self):
def set_sla_expiration_date(self):
system_settings = System_Settings.objects.get()
if not system_settings.enable_finding_sla:
- return None
+ return
days_remaining = None
sla_period, enforce_period = self.get_sla_period()
@@ -3018,7 +2998,7 @@ def set_sla_expiration_date(self):
days_remaining = sla_period - self.sla_age
else:
self.sla_expiration_date = Finding().sla_expiration_date
- return None
+ return
if days_remaining:
if self.mitigated:
@@ -3036,8 +3016,7 @@ def sla_days_remaining(self):
if isinstance(mitigated_date, datetime):
mitigated_date = self.mitigated.date()
return (self.sla_expiration_date - mitigated_date).days
- else:
- return (self.sla_expiration_date - get_current_date()).days
+ return (self.sla_expiration_date - get_current_date()).days
return None
def sla_deadline(self):
@@ -3079,9 +3058,8 @@ def has_jira_issue(self):
@cached_property
def finding_group(self):
- group = self.finding_group_set.all().first()
+ return self.finding_group_set.all().first()
# logger.debug('finding.finding_group: %s', group)
- return group
@cached_property
def has_jira_group_issue(self):
@@ -3131,21 +3109,20 @@ def get_valid_request_response_pairs(self):
# Get a list of all req/resp pairs
all_req_resps = self.burprawrequestresponse_set.all()
# Filter away those that do not have any contents
- valid_req_resps = all_req_resps.exclude(
+ return all_req_resps.exclude(
burpRequestBase64__exact=empty_value,
burpResponseBase64__exact=empty_value,
)
- return valid_req_resps
-
def get_report_requests(self):
# Get the list of request response pairs that are non empty
request_response_pairs = self.get_valid_request_response_pairs()
# Determine how many to return
if request_response_pairs.count() >= 3:
return request_response_pairs[0:3]
- elif request_response_pairs.count() > 0:
+ if request_response_pairs.count() > 0:
return request_response_pairs
+ return None
def get_request(self):
# Get the list of request response pairs that are non empty
@@ -3163,8 +3140,7 @@ def get_response(self):
reqres = request_response_pairs.first()
res = base64.b64decode(reqres.burpResponseBase64)
# Removes all blank lines
- res = re.sub(r"\n\s*\n", "\n", res)
- return res
+ return re.sub(r"\n\s*\n", "\n", res)
def latest_note(self):
if self.notes.all():
@@ -3250,8 +3226,7 @@ def bitbucket_standalone_prepare_scm_base_link(self, uri):
project = parts_project[0]
if project.startswith("~"):
return parts_scm[0] + "/users/" + parts_project[0][1:] + "/repos/" + parts_project[1] + "/browse"
- else:
- return parts_scm[0] + "/projects/" + parts_project[0] + "/repos/" + parts_project[1] + "/browse"
+ return parts_scm[0] + "/projects/" + parts_project[0] + "/repos/" + parts_project[1] + "/browse"
def bitbucket_standalone_prepare_scm_link(self, uri):
# if commit hash or branch/tag is set for engagement/test -
@@ -3336,9 +3311,7 @@ def vulnerability_ids(self):
vulnerability_ids = [self.cve]
# Remove duplicates
- vulnerability_ids = list(dict.fromkeys(vulnerability_ids))
-
- return vulnerability_ids
+ return list(dict.fromkeys(vulnerability_ids))
def inherit_tags(self, potentially_existing_tags):
# get a copy of the tags to be inherited
@@ -3526,9 +3499,8 @@ def get_absolute_url(self):
return reverse("edit_template", args=[str(self.id)])
def get_breadcrumbs(self):
- bc = [{"title": str(self),
+ return [{"title": str(self),
"url": reverse("view_template", args=(self.id,))}]
- return bc
@cached_property
def vulnerability_ids(self):
@@ -3549,9 +3521,7 @@ def vulnerability_ids(self):
vulnerability_ids = [self.cve]
# Remove duplicates
- vulnerability_ids = list(dict.fromkeys(vulnerability_ids))
-
- return vulnerability_ids
+ return list(dict.fromkeys(vulnerability_ids))
class Vulnerability_Id_Template(models.Model):
@@ -3599,10 +3569,9 @@ class Check_List(models.Model):
def get_status(pass_fail):
if pass_fail == "Pass":
return "success"
- elif pass_fail == "Fail":
+ if pass_fail == "Fail":
return "danger"
- else:
- return "warning"
+ return "warning"
def get_breadcrumb(self):
bc = self.engagement.get_breadcrumb()
@@ -3623,8 +3592,7 @@ def get_request(self):
def get_response(self):
res = str(base64.b64decode(self.burpResponseBase64), errors="ignore")
# Removes all blank lines
- res = re.sub(r"\n\s*\n", "\n", res)
- return res
+ return re.sub(r"\n\s*\n", "\n", res)
class Risk_Acceptance(models.Model):
@@ -3880,16 +3848,15 @@ def false_positive_resolutions(self):
def get_priority(self, status):
if status == "Info":
return self.info_mapping_severity
- elif status == "Low":
+ if status == "Low":
return self.low_mapping_severity
- elif status == "Medium":
+ if status == "Medium":
return self.medium_mapping_severity
- elif status == "High":
+ if status == "High":
return self.high_mapping_severity
- elif status == "Critical":
+ if status == "Critical":
return self.critical_mapping_severity
- else:
- return "N/A"
+ return "N/A"
# declare form here as we can't import forms.py due to circular imports not even locally
@@ -4599,8 +4566,7 @@ class ChoiceAnswer(Answer):
def __str__(self):
if len(self.answer.all()):
return str(self.answer.all()[0])
- else:
- return "No Response"
+ return "No Response"
if settings.ENABLE_AUDITLOG:
diff --git a/dojo/notes/views.py b/dojo/notes/views.py
index a5947971b8a..6dfca7895d1 100644
--- a/dojo/notes/views.py
+++ b/dojo/notes/views.py
@@ -123,11 +123,10 @@ def edit_note(request, id, page, objid):
_("Note edited."),
extra_tags="alert-success")
return HttpResponseRedirect(reverse(reverse_url, args=(object_id, )))
- else:
- messages.add_message(request,
- messages.SUCCESS,
- _("Note was not succesfully edited."),
- extra_tags="alert-danger")
+ messages.add_message(request,
+ messages.SUCCESS,
+ _("Note was not succesfully edited."),
+ extra_tags="alert-danger")
else:
if note_type_activation:
form = TypedNoteForm(available_note_types=available_note_types, instance=note)
@@ -195,5 +194,4 @@ def find_available_notetypes(finding, editing_note):
available_note_types.append(note_type_id)
available_note_types.append(editing_note.note_type_id)
available_note_types = list(set(available_note_types))
- queryset = Note_Type.objects.filter(id__in=available_note_types).order_by("-id")
- return queryset
+ return Note_Type.objects.filter(id__in=available_note_types).order_by("-id")
diff --git a/dojo/notifications/helper.py b/dojo/notifications/helper.py
index 9acbf94d215..ce3f52bf1a5 100644
--- a/dojo/notifications/helper.py
+++ b/dojo/notifications/helper.py
@@ -343,14 +343,13 @@ def webhooks_notification_request(endpoint, event, *args, **kwargs):
timeout = get_system_setting("webhooks_notifications_timeout")
- res = requests.request(
+ return requests.request(
method="POST",
url=endpoint.url,
headers=headers,
json=data,
timeout=timeout,
)
- return res
def test_webhooks_notification(endpoint):
@@ -522,18 +521,17 @@ def get_slack_user_id(user_email):
logger.error("Slack is complaining. See error message below.")
logger.error(user)
raise RuntimeError("Error getting user list from Slack: " + res.text)
- else:
- if "email" in user["user"]["profile"]:
- if user_email == user["user"]["profile"]["email"]:
- if "id" in user["user"]:
- user_id = user["user"]["id"]
- logger.debug(f"Slack user ID is {user_id}")
- slack_user_is_found = True
- else:
- logger.warning(f"A user with email {user_email} could not be found in this Slack workspace.")
-
- if not slack_user_is_found:
- logger.warning("The Slack user was not found.")
+ if "email" in user["user"]["profile"]:
+ if user_email == user["user"]["profile"]["email"]:
+ if "id" in user["user"]:
+ user_id = user["user"]["id"]
+ logger.debug(f"Slack user ID is {user_id}")
+ slack_user_is_found = True
+ else:
+ logger.warning(f"A user with email {user_email} could not be found in this Slack workspace.")
+
+ if not slack_user_is_found:
+ logger.warning("The Slack user was not found.")
return user_id
diff --git a/dojo/notifications/views.py b/dojo/notifications/views.py
index 6a2495330d7..7fe5562ee7e 100644
--- a/dojo/notifications/views.py
+++ b/dojo/notifications/views.py
@@ -158,8 +158,7 @@ def get_form(
) -> NotificationsWebhookForm:
if request.method == "POST":
return NotificationsWebhookForm(request.POST, is_superuser=request.user.is_superuser, **kwargs)
- else:
- return NotificationsWebhookForm(is_superuser=request.user.is_superuser, **kwargs)
+ return NotificationsWebhookForm(is_superuser=request.user.is_superuser, **kwargs)
def preprocess_request(self, request: HttpRequest):
# Check Webhook notifications are enabled
@@ -182,10 +181,9 @@ def get_initial_context(self, request: HttpRequest, nwhs: Notification_Webhooks)
}
def get_notification_webhooks(self, request: HttpRequest):
- nwhs = Notification_Webhooks.objects.all().order_by("name")
+ return Notification_Webhooks.objects.all().order_by("name")
# TODO: finished pagination
# TODO: restrict based on user - not only superadmins have access and they see everything
- return nwhs
def get(self, request: HttpRequest):
# Run common checks
@@ -377,8 +375,7 @@ def get_form(
) -> NotificationsWebhookForm:
if request.method == "POST":
return DeleteNotificationsWebhookForm(request.POST, **kwargs)
- else:
- return DeleteNotificationsWebhookForm(**kwargs)
+ return DeleteNotificationsWebhookForm(**kwargs)
def get_initial_context(self, request: HttpRequest, nwh: Notification_Webhooks):
return {
diff --git a/dojo/object/views.py b/dojo/object/views.py
index dfb4f590556..0cca584b0be 100644
--- a/dojo/object/views.py
+++ b/dojo/object/views.py
@@ -30,14 +30,14 @@ def new_object(request, pid):
"Added Tracked File to a Product",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_objects", args=(pid,)))
- else:
- tform = ObjectSettingsForm()
- product_tab = Product_Tab(prod, title="Add Tracked Files to a Product", tab="settings")
+ return None
+ tform = ObjectSettingsForm()
+ product_tab = Product_Tab(prod, title="Add Tracked Files to a Product", tab="settings")
- return render(request, "dojo/new_object.html",
- {"tform": tform,
- "product_tab": product_tab,
- "pid": prod.id})
+ return render(request, "dojo/new_object.html",
+ {"tform": tform,
+ "product_tab": product_tab,
+ "pid": prod.id})
@user_is_authorized(Product, Permissions.Product_Tracking_Files_View, "pid")
@@ -101,8 +101,7 @@ def delete_object(request, pid, ttid):
"Tracked Product Files Deleted.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_objects", args=(pid,)))
- else:
- tform = DeleteObjectsSettingsForm(instance=object)
+ tform = DeleteObjectsSettingsForm(instance=object)
product_tab = Product_Tab(product, title="Delete Product Tool Configuration", tab="settings")
return render(request,
diff --git a/dojo/pipeline.py b/dojo/pipeline.py
index ea020d2d926..ee2dc0ae186 100644
--- a/dojo/pipeline.py
+++ b/dojo/pipeline.py
@@ -31,7 +31,7 @@ def social_uid(backend, details, response, *args, **kwargs):
"first_name": first_name,
"last_name": last_name,
"uid": uid}
- elif settings.GOOGLE_OAUTH_ENABLED and isinstance(backend, GoogleOAuth2):
+ if settings.GOOGLE_OAUTH_ENABLED and isinstance(backend, GoogleOAuth2):
"""Return user details from Google account"""
if "sub" in response:
google_uid = response["sub"]
@@ -51,15 +51,13 @@ def social_uid(backend, details, response, *args, **kwargs):
"first_name": first_name,
"last_name": last_name,
"uid": google_uid}
- else:
- uid = backend.get_user_id(details, response)
- # Used for most backends
- if uid:
- return {"uid": uid}
- # Until OKTA PR in social-core is merged
- # This modified way needs to work
- else:
- return {"uid": response.get("preferred_username")}
+ uid = backend.get_user_id(details, response)
+ # Used for most backends
+ if uid:
+ return {"uid": uid}
+ # Until OKTA PR in social-core is merged
+ # This modified way needs to work
+ return {"uid": response.get("preferred_username")}
def modify_permissions(backend, uid, user=None, social=None, *args, **kwargs):
@@ -107,8 +105,7 @@ def update_azure_groups(backend, uid, user=None, social=None, *args, **kwargs):
def is_group_id(group):
if re.search(r"^[a-zA-Z0-9]{8,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{4,}-[a-zA-Z0-9]{12,}$", group):
return True
- else:
- return False
+ return False
def assign_user_to_groups(user, group_names, social_provider):
@@ -183,7 +180,6 @@ def sanitize_username(username):
def create_user(strategy, details, backend, user=None, *args, **kwargs):
if not settings.SOCIAL_AUTH_CREATE_USER:
- return
- else:
- details["username"] = sanitize_username(details.get("username"))
- return social_core.pipeline.user.create_user(strategy, details, backend, user, args, kwargs)
+ return None
+ details["username"] = sanitize_username(details.get("username"))
+ return social_core.pipeline.user.create_user(strategy, details, backend, user, args, kwargs)
diff --git a/dojo/product/queries.py b/dojo/product/queries.py
index 8d562c0f9a4..0be35276ffe 100644
--- a/dojo/product/queries.py
+++ b/dojo/product/queries.py
@@ -59,20 +59,17 @@ def get_authorized_products(permission, user=None):
member=Exists(authorized_product_roles),
prod_type__authorized_group=Exists(authorized_product_type_groups),
authorized_group=Exists(authorized_product_groups)).order_by("name")
- products = products.filter(
+ return products.filter(
Q(prod_type__member=True) | Q(member=True)
| Q(prod_type__authorized_group=True) | Q(authorized_group=True))
- return products
-
def get_authorized_members_for_product(product, permission):
user = get_current_user()
if user.is_superuser or user_has_permission(user, product, permission):
return Product_Member.objects.filter(product=product).order_by("user__first_name", "user__last_name").select_related("role", "user")
- else:
- return None
+ return None
def get_authorized_groups_for_product(product, permission):
@@ -81,8 +78,7 @@ def get_authorized_groups_for_product(product, permission):
if user.is_superuser or user_has_permission(user, product, permission):
authorized_groups = get_authorized_groups(Permissions.Group_View)
return Product_Group.objects.filter(product=product, group__in=authorized_groups).order_by("group__name").select_related("role")
- else:
- return None
+ return None
def get_authorized_product_members(permission):
@@ -164,12 +160,10 @@ def get_authorized_app_analysis(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- app_analysis = app_analysis.filter(
+ return app_analysis.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
- return app_analysis
-
def get_authorized_dojo_meta(permission):
user = get_current_user()
@@ -246,7 +240,7 @@ def get_authorized_dojo_meta(permission):
finding__test__engagement__product__prod_type__authorized_group=Exists(finding_authorized_product_type_groups),
finding__test__engagement__product__authorized_group=Exists(finding_authorized_product_groups),
).order_by("id")
- dojo_meta = dojo_meta.filter(
+ return dojo_meta.filter(
Q(product__prod_type__member=True)
| Q(product__member=True)
| Q(product__prod_type__authorized_group=True)
@@ -260,8 +254,6 @@ def get_authorized_dojo_meta(permission):
| Q(finding__test__engagement__product__prod_type__authorized_group=True)
| Q(finding__test__engagement__product__authorized_group=True))
- return dojo_meta
-
def get_authorized_languages(permission):
user = get_current_user()
@@ -297,12 +289,10 @@ def get_authorized_languages(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- languages = languages.filter(
+ return languages.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
- return languages
-
def get_authorized_engagement_presets(permission):
user = get_current_user()
@@ -338,12 +328,10 @@ def get_authorized_engagement_presets(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- engagement_presets = engagement_presets.filter(
+ return engagement_presets.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
- return engagement_presets
-
def get_authorized_product_api_scan_configurations(permission):
user = get_current_user()
@@ -379,8 +367,6 @@ def get_authorized_product_api_scan_configurations(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- product_api_scan_configurations = product_api_scan_configurations.filter(
+ return product_api_scan_configurations.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
-
- return product_api_scan_configurations
diff --git a/dojo/product/views.py b/dojo/product/views.py
index e887938d450..6680c2e5340 100644
--- a/dojo/product/views.py
+++ b/dojo/product/views.py
@@ -349,11 +349,10 @@ def identify_view(request):
return view
msg = 'invalid view, view must be "Endpoint" or "Finding"'
raise ValueError(msg)
- else:
- if get_data.get("finding__severity", None):
- return "Endpoint"
- elif get_data.get("false_positive", None):
- return "Endpoint"
+ if get_data.get("finding__severity", None):
+ return "Endpoint"
+ if get_data.get("false_positive", None):
+ return "Endpoint"
referer = request.META.get("HTTP_REFERER", None)
if referer:
if referer.find("type=Endpoint") > -1:
@@ -904,9 +903,8 @@ def new_product(request, ptid=None):
if not error:
return HttpResponseRedirect(reverse("view_product", args=(product.id,)))
- else:
- # engagement was saved, but JIRA errors, so goto edit_product
- return HttpResponseRedirect(reverse("edit_product", args=(product.id,)))
+ # engagement was saved, but JIRA errors, so goto edit_product
+ return HttpResponseRedirect(reverse("edit_product", args=(product.id,)))
else:
if get_system_setting("enable_jira"):
jira_project_form = JIRAProjectForm()
@@ -1029,9 +1027,8 @@ def delete_product(request, pid):
extra_tags="alert-success")
logger.debug("delete_product: POST RETURN")
return HttpResponseRedirect(reverse("product"))
- else:
- logger.debug("delete_product: POST INVALID FORM")
- logger.error(form.errors)
+ logger.debug("delete_product: POST INVALID FORM")
+ logger.error(form.errors)
logger.debug("delete_product: GET")
@@ -1104,16 +1101,13 @@ def new_eng_for_app(request, pid, cicd=False):
if not error:
if "_Add Tests" in request.POST:
return HttpResponseRedirect(reverse("add_tests", args=(engagement.id,)))
- elif "_Import Scan Results" in request.POST:
+ if "_Import Scan Results" in request.POST:
return HttpResponseRedirect(reverse("import_scan_results", args=(engagement.id,)))
- else:
- return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id,)))
- else:
- # engagement was saved, but JIRA errors, so goto edit_engagement
- logger.debug("new_eng_for_app: jira errors")
- return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id,)))
- else:
- logger.debug(form.errors)
+ return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id,)))
+ # engagement was saved, but JIRA errors, so goto edit_engagement
+ logger.debug("new_eng_for_app: jira errors")
+ return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id,)))
+ logger.debug(form.errors)
else:
form = EngForm(initial={"lead": request.user, "target_start": timezone.now().date(),
"target_end": timezone.now().date() + timedelta(days=7), "product": product}, cicd=cicd,
@@ -1223,8 +1217,7 @@ def add_meta_data(request, pid):
extra_tags="alert-success")
if "add_another" in request.POST:
return HttpResponseRedirect(reverse("add_meta_data", args=(pid,)))
- else:
- return HttpResponseRedirect(reverse("view_product", args=(pid,)))
+ return HttpResponseRedirect(reverse("view_product", args=(pid,)))
else:
form = DojoMetaDataForm()
@@ -1288,12 +1281,11 @@ def get_engagement(self, product: Product):
def get_test(self, engagement: Engagement, test_type: Test_Type):
if test := Test.objects.filter(engagement=engagement).first():
return test
- else:
- return Test.objects.create(
- engagement=engagement,
- test_type=test_type,
- target_start=timezone.now(),
- target_end=timezone.now())
+ return Test.objects.create(
+ engagement=engagement,
+ test_type=test_type,
+ target_start=timezone.now(),
+ target_end=timezone.now())
def create_nested_objects(self, product: Product):
engagement = self.get_engagement(product)
@@ -1406,9 +1398,8 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict):
finding.save()
return finding, request, True
- else:
- add_error_message_to_response("The form has errors, please correct them below.")
- add_field_errors_to_response(context["form"])
+ add_error_message_to_response("The form has errors, please correct them below.")
+ add_field_errors_to_response(context["form"])
return finding, request, False
@@ -1451,8 +1442,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
)
return request, True, push_to_jira
- else:
- add_field_errors_to_response(context["jform"])
+ add_field_errors_to_response(context["jform"])
return request, False, False
@@ -1464,8 +1454,7 @@ def process_github_form(self, request: HttpRequest, finding: Finding, context: d
add_external_issue(finding, "github")
return request, True
- else:
- add_field_errors_to_response(context["gform"])
+ add_field_errors_to_response(context["gform"])
return request, False
@@ -1537,10 +1526,8 @@ def post(self, request: HttpRequest, product_id: int):
if success:
if "_Finished" in request.POST:
return HttpResponseRedirect(reverse("view_test", args=(test.id,)))
- else:
- return HttpResponseRedirect(reverse("add_findings", args=(test.id,)))
- else:
- context["form_error"] = True
+ return HttpResponseRedirect(reverse("add_findings", args=(test.id,)))
+ context["form_error"] = True
# Render the form
return render(request, self.get_template(), context)
@@ -1720,8 +1707,7 @@ def edit_product_member(request, memberid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id,)))
- else:
- return HttpResponseRedirect(reverse("view_product", args=(member.product.id,)))
+ return HttpResponseRedirect(reverse("view_product", args=(member.product.id,)))
product_tab = Product_Tab(member.product, title=_("Edit Product Member"), tab="settings")
return render(request, "dojo/edit_product_member.html", {
"memberid": memberid,
@@ -1745,11 +1731,9 @@ def delete_product_member(request, memberid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id,)))
- else:
- if user == request.user:
- return HttpResponseRedirect(reverse("product"))
- else:
- return HttpResponseRedirect(reverse("view_product", args=(member.product.id,)))
+ if user == request.user:
+ return HttpResponseRedirect(reverse("product"))
+ return HttpResponseRedirect(reverse("view_product", args=(member.product.id,)))
product_tab = Product_Tab(member.product, title=_("Delete Product Member"), tab="settings")
return render(request, "dojo/delete_product_member.html", {
"memberid": memberid,
@@ -1781,8 +1765,7 @@ def add_api_scan_configuration(request, pid):
extra_tags="alert-success")
if "add_another" in request.POST:
return HttpResponseRedirect(reverse("add_api_scan_configuration", args=(pid,)))
- else:
- return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,)))
+ return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,)))
except Exception as e:
logger.exception(e)
messages.add_message(request,
@@ -1879,8 +1862,7 @@ def delete_api_scan_configuration(request, pid, pascid):
_("API Scan Configuration deleted."),
extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_api_scan_configurations", args=(pid,)))
- else:
- form = DeleteProduct_API_Scan_ConfigurationForm(instance=product_api_scan_configuration)
+ form = DeleteProduct_API_Scan_ConfigurationForm(instance=product_api_scan_configuration)
product_tab = Product_Tab(get_object_or_404(Product, id=pid), title=_("Delete Tool Configuration"), tab="settings")
return render(request,
@@ -1914,8 +1896,7 @@ def edit_product_group(request, groupid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View Group"):
return HttpResponseRedirect(reverse("view_group", args=(group.group.id,)))
- else:
- return HttpResponseRedirect(reverse("view_product", args=(group.product.id,)))
+ return HttpResponseRedirect(reverse("view_product", args=(group.product.id,)))
product_tab = Product_Tab(group.product, title=_("Edit Product Group"), tab="settings")
return render(request, "dojo/edit_product_group.html", {
@@ -1940,10 +1921,9 @@ def delete_product_group(request, groupid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View Group"):
return HttpResponseRedirect(reverse("view_group", args=(group.group.id,)))
- else:
- # TODO: If user was in the group that was deleted and no longer has access, redirect back to product listing
- # page
- return HttpResponseRedirect(reverse("view_product", args=(group.product.id,)))
+ # TODO: If user was in the group that was deleted and no longer has access, redirect back to product listing
+ # page
+ return HttpResponseRedirect(reverse("view_product", args=(group.product.id,)))
product_tab = Product_Tab(group.product, title=_("Delete Product Group"), tab="settings")
return render(request, "dojo/delete_product_group.html", {
diff --git a/dojo/product_type/queries.py b/dojo/product_type/queries.py
index 737584a5b05..5129cfd789b 100644
--- a/dojo/product_type/queries.py
+++ b/dojo/product_type/queries.py
@@ -35,9 +35,7 @@ def get_authorized_product_types(permission):
product_types = Product_Type.objects.annotate(
member=Exists(authorized_roles),
authorized_group=Exists(authorized_groups)).order_by("name")
- product_types = product_types.filter(Q(member=True) | Q(authorized_group=True))
-
- return product_types
+ return product_types.filter(Q(member=True) | Q(authorized_group=True))
def get_authorized_members_for_product_type(product_type, permission):
@@ -45,8 +43,7 @@ def get_authorized_members_for_product_type(product_type, permission):
if user.is_superuser or user_has_permission(user, product_type, permission):
return Product_Type_Member.objects.filter(product_type=product_type).order_by("user__first_name", "user__last_name").select_related("role", "product_type", "user")
- else:
- return None
+ return None
def get_authorized_groups_for_product_type(product_type, permission):
@@ -55,8 +52,7 @@ def get_authorized_groups_for_product_type(product_type, permission):
if user.is_superuser or user_has_permission(user, product_type, permission):
authorized_groups = get_authorized_groups(Permissions.Group_View)
return Product_Type_Group.objects.filter(product_type=product_type, group__in=authorized_groups).order_by("group__name").select_related("role", "group")
- else:
- return None
+ return None
def get_authorized_product_type_members(permission):
diff --git a/dojo/product_type/views.py b/dojo/product_type/views.py
index 302aa6dbbf9..63c38d8df4d 100644
--- a/dojo/product_type/views.py
+++ b/dojo/product_type/views.py
@@ -242,8 +242,7 @@ def edit_product_type_member(request, memberid):
extra_tags="alert-warning")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, )))
+ return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, )))
if member.role.is_owner and not user_has_permission(request.user, member.product_type, Permissions.Product_Type_Member_Add_Owner):
messages.add_message(request,
messages.WARNING,
@@ -257,8 +256,7 @@ def edit_product_type_member(request, memberid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, )))
+ return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, )))
add_breadcrumb(title=page_name, top_level=False, request=request)
return render(request, "dojo/edit_product_type_member.html", {
"name": page_name,
@@ -292,11 +290,9 @@ def delete_product_type_member(request, memberid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View User"):
return HttpResponseRedirect(reverse("view_user", args=(member.user.id, )))
- else:
- if user == request.user:
- return HttpResponseRedirect(reverse("product_type"))
- else:
- return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, )))
+ if user == request.user:
+ return HttpResponseRedirect(reverse("product_type"))
+ return HttpResponseRedirect(reverse("view_product_type", args=(member.product_type.id, )))
add_breadcrumb(title=page_name, top_level=False, request=request)
return render(request, "dojo/delete_product_type_member.html", {
"name": page_name,
@@ -365,8 +361,7 @@ def edit_product_type_group(request, groupid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View Group"):
return HttpResponseRedirect(reverse("view_group", args=(group.group.id,)))
- else:
- return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id,)))
+ return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id,)))
add_breadcrumb(title=page_name, top_level=False, request=request)
return render(request, "dojo/edit_product_type_group.html", {
@@ -392,10 +387,9 @@ def delete_product_type_group(request, groupid):
extra_tags="alert-success")
if is_title_in_breadcrumbs("View Group"):
return HttpResponseRedirect(reverse("view_group", args=(group.group.id, )))
- else:
- # TODO: If user was in the group that was deleted and no longer has access, redirect them to the product
- # types page
- return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id, )))
+ # TODO: If user was in the group that was deleted and no longer has access, redirect them to the product
+ # types page
+ return HttpResponseRedirect(reverse("view_product_type", args=(group.product_type.id, )))
add_breadcrumb(page_name, top_level=False, request=request)
return render(request, "dojo/delete_product_type_group.html", {
diff --git a/dojo/regulations/views.py b/dojo/regulations/views.py
index f4d5004d074..e9a5f1a9f55 100644
--- a/dojo/regulations/views.py
+++ b/dojo/regulations/views.py
@@ -45,7 +45,7 @@ def edit_regulations(request, ttid):
"Regulation Deleted.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("regulations"))
- elif request.method == "POST":
+ if request.method == "POST":
tform = RegulationForm(request.POST, instance=regulation)
if tform.is_valid():
tform.save()
diff --git a/dojo/remote_user.py b/dojo/remote_user.py
index 44355d9f453..764af4e548b 100644
--- a/dojo/remote_user.py
+++ b/dojo/remote_user.py
@@ -20,32 +20,28 @@ def authenticate(self, request):
self.header = settings.AUTH_REMOTEUSER_USERNAME_HEADER
if self.header in request.META:
return super().authenticate(request)
- else:
- return None
- else:
- logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s",
- IPAddress(request.META["REMOTE_ADDR"]),
- settings.AUTH_REMOTEUSER_TRUSTED_PROXY)
return None
+ logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s",
+ IPAddress(request.META["REMOTE_ADDR"]),
+ settings.AUTH_REMOTEUSER_TRUSTED_PROXY)
+ return None
class RemoteUserMiddleware(OriginalRemoteUserMiddleware):
def process_request(self, request):
if not settings.AUTH_REMOTEUSER_ENABLED:
- return
+ return None
# process only if request is comming from the trusted proxy node
if IPAddress(request.META["REMOTE_ADDR"]) in settings.AUTH_REMOTEUSER_TRUSTED_PROXY:
self.header = settings.AUTH_REMOTEUSER_USERNAME_HEADER
if self.header in request.META:
return super().process_request(request)
- else:
- return
- else:
- logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s",
- IPAddress(request.META["REMOTE_ADDR"]),
- settings.AUTH_REMOTEUSER_TRUSTED_PROXY)
- return
+ return None
+ logger.debug("Requested came from untrusted proxy %s; This is list of trusted proxies: %s",
+ IPAddress(request.META["REMOTE_ADDR"]),
+ settings.AUTH_REMOTEUSER_TRUSTED_PROXY)
+ return None
class PersistentRemoteUserMiddleware(RemoteUserMiddleware):
diff --git a/dojo/reports/views.py b/dojo/reports/views.py
index ca13eae54ca..aacf4369333 100644
--- a/dojo/reports/views.py
+++ b/dojo/reports/views.py
@@ -122,8 +122,7 @@ def post(self, request: HttpRequest) -> HttpResponse:
if form.is_valid():
self._set_state(request)
return render(request, self.get_template(), self.get_context())
- else:
- raise PermissionDenied
+ raise PermissionDenied
def _set_state(self, request: HttpRequest):
self.request = request
@@ -154,8 +153,7 @@ def get_form(self, request):
def get_template(self):
if self.report_format == "HTML":
return "dojo/custom_html_report.html"
- else:
- raise PermissionDenied
+ raise PermissionDenied
def get_context(self):
return {
@@ -310,8 +308,7 @@ def product_endpoint_report(request, pid):
"user": request.user,
"title": "Generate Report",
})
- else:
- raise Http404
+ raise Http404
product_tab = Product_Tab(product, "Product Endpoint Report", tab="endpoints")
return render(request,
@@ -351,9 +348,8 @@ def generate_report(request, obj, host_view=False):
if obj is None:
msg = "No object is given to generate report for"
raise Exception(msg)
- else:
- msg = f"Report cannot be generated for object of type {type(obj).__name__}"
- raise Exception(msg)
+ msg = f"Report cannot be generated for object of type {type(obj).__name__}"
+ raise Exception(msg)
report_format = request.GET.get("report_type", "HTML")
include_finding_notes = int(request.GET.get("include_finding_notes", 0))
@@ -584,8 +580,7 @@ def generate_report(request, obj, host_view=False):
"context": context,
})
- else:
- raise Http404
+ raise Http404
paged_findings = get_page_items(request, findings.qs.distinct().order_by("numerical_severity"), 25)
product_tab = None
@@ -654,9 +649,8 @@ def get_findings(request):
if not url:
msg = "Please use the report button when viewing findings"
raise Http404(msg)
- else:
- if url.startswith("url="):
- url = url[4:]
+ if url.startswith("url="):
+ url = url[4:]
views = ["all", "open", "inactive", "verified",
"closed", "accepted", "out_of_scope",
diff --git a/dojo/risk_acceptance/helper.py b/dojo/risk_acceptance/helper.py
index a1d628b33df..3ebbe9bf6a4 100644
--- a/dojo/risk_acceptance/helper.py
+++ b/dojo/risk_acceptance/helper.py
@@ -201,16 +201,14 @@ def accepted_message_creator(risk_acceptance, heads_up_days=0):
escape_for_jira(risk_acceptance.name),
get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))),
len(risk_acceptance.accepted_findings.all()), timezone.localtime(risk_acceptance.expiration_date).strftime("%b %d, %Y"))
- else:
- return "Finding has been risk accepted"
+ return "Finding has been risk accepted"
def unaccepted_message_creator(risk_acceptance, heads_up_days=0):
if risk_acceptance:
return "finding was unaccepted/deleted from risk acceptance [({})|{}]".format(escape_for_jira(risk_acceptance.name),
get_full_url(reverse("view_risk_acceptance", args=(risk_acceptance.engagement.id, risk_acceptance.id))))
- else:
- return "Finding is no longer risk accepted"
+ return "Finding is no longer risk accepted"
def post_jira_comment(finding, message_factory, heads_up_days=0):
diff --git a/dojo/risk_acceptance/queries.py b/dojo/risk_acceptance/queries.py
index 9cbf89fb5c2..72282af21e7 100644
--- a/dojo/risk_acceptance/queries.py
+++ b/dojo/risk_acceptance/queries.py
@@ -39,8 +39,6 @@ def get_authorized_risk_acceptances(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- risk_acceptances = risk_acceptances.filter(
+ return risk_acceptances.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
-
- return risk_acceptances
diff --git a/dojo/search/views.py b/dojo/search/views.py
index 3e3a75923ca..604e9ecd68c 100644
--- a/dojo/search/views.py
+++ b/dojo/search/views.py
@@ -31,6 +31,45 @@
def simple_search(request):
+
+ """
+ query: some keywords
+ operators: {}
+ keywords: ['some', 'keywords']
+
+ query: some key-word
+ operators: {}
+ keywords: ['some', 'key-word']
+
+ query: keyword with "space inside"
+ operators: {}
+ keywords: ['keyword', 'with', 'space inside']
+
+ query: tag:anchore word tags:php
+ operators: {'tag': ['anchore'], 'tags': ['php']}
+ keywords: ['word']
+
+ query: tags:php,magento
+ operators: {'tags': ['php,magento']}
+ keywords: []
+
+ query: tags:php tags:magento
+ operators: {'tags': ['php', 'magento']}
+ keywords: []
+
+ query: tags:"php, magento"
+ operators: {'tags': ['php, magento']}
+ keywords: []
+
+ query: tags:anchorse some "space inside"
+ operators: {'tags': ['anchorse']}
+ keywords: ['some', 'space inside']
+
+ query: tags:anchore vulnerability_id:CVE-2020-1234 jquery
+ operators: {'tags': ['anchore'], 'vulnerability_id': ['CVE-2020-1234']}
+ keywords: ['jquery']
+ """
+
tests = None
findings = None
finding_templates = None
@@ -364,44 +403,6 @@ def simple_search(request):
response.delete_cookie("highlight", path="/")
return response
- """
- query: some keywords
- operators: {}
- keywords: ['some', 'keywords']
-
- query: some key-word
- operators: {}
- keywords: ['some', 'key-word']
-
- query: keyword with "space inside"
- operators: {}
- keywords: ['keyword', 'with', 'space inside']
-
- query: tag:anchore word tags:php
- operators: {'tag': ['anchore'], 'tags': ['php']}
- keywords: ['word']
-
- query: tags:php,magento
- operators: {'tags': ['php,magento']}
- keywords: []
-
- query: tags:php tags:magento
- operators: {'tags': ['php', 'magento']}
- keywords: []
-
- query: tags:"php, magento"
- operators: {'tags': ['php, magento']}
- keywords: []
-
- query: tags:anchorse some "space inside"
- operators: {'tags': ['anchorse']}
- keywords: ['some', 'space inside']
-
- query: tags:anchore vulnerability_id:CVE-2020-1234 jquery
- operators: {'tags': ['anchore'], 'vulnerability_id': ['CVE-2020-1234']}
- keywords: ['jquery']
- """
-
# it's not google grade parsing, but let's do some basic stuff right
def parse_search_query(clean_query):
@@ -448,8 +449,7 @@ def vulnerability_id_fix(keyword):
if vulnerability_ids:
return " ".join(vulnerability_ids)
- else:
- return keyword
+ return keyword
def apply_tag_filters(qs, operators, skip_relations=False):
diff --git a/dojo/sla_config/views.py b/dojo/sla_config/views.py
index f95461283fa..c07e8dadc2a 100644
--- a/dojo/sla_config/views.py
+++ b/dojo/sla_config/views.py
@@ -56,14 +56,13 @@ def edit_sla_config(request, slaid):
"SLA Configuration Deleted.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("sla_config"))
- else:
- messages.add_message(request,
- messages.ERROR,
- "The Default SLA Configuration cannot be deleted.",
- extra_tags="alert-danger")
- return HttpResponseRedirect(reverse("sla_config"))
+ messages.add_message(request,
+ messages.ERROR,
+ "The Default SLA Configuration cannot be deleted.",
+ extra_tags="alert-danger")
+ return HttpResponseRedirect(reverse("sla_config"))
- elif request.method == "POST":
+ if request.method == "POST":
form = SLAConfigForm(request.POST, instance=sla_config)
if form.is_valid():
form.save(commit=True)
diff --git a/dojo/survey/views.py b/dojo/survey/views.py
index 29b4a2fc81d..d83803f2efd 100644
--- a/dojo/survey/views.py
+++ b/dojo/survey/views.py
@@ -77,12 +77,11 @@ def delete_engagement_survey(request, eid, sid):
"Questionnaire deleted successfully.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id, )))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to delete Questionnaire.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to delete Questionnaire.",
+ extra_tags="alert-danger")
add_breadcrumb(
title="Delete " + survey.survey.name + " Questionnaire",
@@ -145,12 +144,11 @@ def answer_questionnaire(request, eid, sid):
"Successfully answered, all answers valid.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_engagement", args=(engagement.id, )))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Questionnaire has errors, please correct.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Questionnaire has errors, please correct.",
+ extra_tags="alert-danger")
add_breadcrumb(
title="Answer " + survey.survey.name + " Survey",
top_level=False,
@@ -243,12 +241,11 @@ def add_questionnaire(request, eid):
if "respond_survey" in request.POST:
return HttpResponseRedirect(reverse("answer_questionnaire", args=(eid, survey.id)))
return HttpResponseRedirect(reverse("view_engagement", args=(eid,)))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Questionnaire could not be added.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Questionnaire could not be added.",
+ extra_tags="alert-danger")
form.fields["survey"].queryset = surveys
add_breadcrumb(title="Add Questionnaire", top_level=False, request=request)
@@ -290,12 +287,11 @@ def edit_questionnaire(request, sid):
"Questionnaire successfully updated, you may now add/edit questions.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("edit_questionnaire", args=(survey.id,)))
- else:
- messages.add_message(
- request,
- messages.SUCCESS,
- "No changes detected, questionnaire not updated.",
- extra_tags="alert-warning")
+ messages.add_message(
+ request,
+ messages.SUCCESS,
+ "No changes detected, questionnaire not updated.",
+ extra_tags="alert-warning")
if "add_questions" in request.POST:
return HttpResponseRedirect(reverse("edit_questionnaire_questions", args=(survey.id,)))
else:
@@ -360,14 +356,12 @@ def create_questionnaire(request):
extra_tags="alert-success")
if "add_questions" in request.POST:
return HttpResponseRedirect(reverse("edit_questionnaire_questions", args=(survey.id,)))
- else:
- return HttpResponseRedirect(reverse("questionnaire"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Please correct any errors displayed below.",
- extra_tags="alert-danger")
+ return HttpResponseRedirect(reverse("questionnaire"))
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Please correct any errors displayed below.",
+ extra_tags="alert-danger")
add_breadcrumb(title="Create Questionnaire", top_level=False, request=request)
return render(request, "defectDojo-engagement-survey/create_questionnaire.html", {
@@ -411,12 +405,11 @@ def edit_questionnaire_questions(request, sid):
"Questionnaire questions successfully saved.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("questionnaire"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Questionnaire questions not saved, please correct any errors displayed below.",
- extra_tags="alert-success")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Questionnaire questions not saved, please correct any errors displayed below.",
+ extra_tags="alert-success")
add_breadcrumb(title="Update Questionnaire Questions", top_level=False, request=request)
return render(request, "defectDojo-engagement-survey/edit_survey_questions.html", {
@@ -488,8 +481,7 @@ def create_question(request):
"Text Question added successfully.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("questions"))
- else:
- error = True
+ error = True
elif type == "choice":
if choiceQuestionFrom.is_valid():
@@ -511,8 +503,7 @@ def create_question(request):
"Choice Question added successfully.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("questions"))
- else:
- error = True
+ error = True
if "_popup" in request.GET and not error:
resp = f''
@@ -638,12 +629,11 @@ def add_empty_questionnaire(request):
if "respond_survey" in request.POST:
return HttpResponseRedirect(reverse("dashboard"))
return HttpResponseRedirect(reverse("questionnaire"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Questionnaire could not be added.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Questionnaire could not be added.",
+ extra_tags="alert-danger")
form.fields["survey"].queryset = surveys
add_breadcrumb(title="Add Empty Questionnaire", top_level=False, request=request)
@@ -695,12 +685,11 @@ def delete_empty_questionnaire(request, esid):
"Questionnaire deleted successfully.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("survey"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to delete Questionnaire.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to delete Questionnaire.",
+ extra_tags="alert-danger")
add_breadcrumb(
title="Delete " + survey.survey.name + " Questionnaire",
@@ -731,12 +720,11 @@ def delete_general_questionnaire(request, esid):
"Questionnaire deleted successfully.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("questionnaire"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to delete questionnaire.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to delete questionnaire.",
+ extra_tags="alert-danger")
add_breadcrumb(
title="Delete " + survey.survey.name + " Questionnaire",
@@ -815,12 +803,11 @@ def answer_empty_survey(request, esid):
extra_tags="alert-success")
return HttpResponseRedirect(
reverse("dashboard"))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Questionnaire has errors, please correct.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Questionnaire has errors, please correct.",
+ extra_tags="alert-danger")
add_breadcrumb(
title="Answer Empty " + engagement_survey.name + " Questionnaire",
top_level=False,
@@ -857,12 +844,11 @@ def engagement_empty_survey(request, esid):
"Engagement created and questionnaire successfully linked.",
extra_tags="alert-success")
return HttpResponseRedirect(reverse("edit_engagement", args=(engagement.id, )))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Questionnaire could not be added.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Questionnaire could not be added.",
+ extra_tags="alert-danger")
add_breadcrumb(
title="Link Questionnaire to new Engagement",
top_level=False,
diff --git a/dojo/system_settings/views.py b/dojo/system_settings/views.py
index 3690201a050..4c952d57a0f 100644
--- a/dojo/system_settings/views.py
+++ b/dojo/system_settings/views.py
@@ -116,7 +116,7 @@ def get_celery_status(
context["celery_msg"] = "Celery needs to have the setting CELERY_RESULT_BACKEND = 'db+sqlite:///dojo.celeryresults.sqlite' set in settings.py."
context["celery_status"] = "Unknown"
- return None
+ return
def get_template(self) -> str:
return "dojo/system_settings.html"
diff --git a/dojo/tags_signals.py b/dojo/tags_signals.py
index f7e09fa9b0c..605996a602c 100644
--- a/dojo/tags_signals.py
+++ b/dojo/tags_signals.py
@@ -77,3 +77,4 @@ def get_product(instance):
return instance.engagement.product
if isinstance(instance, Finding):
return instance.test.engagement.product
+ return None
diff --git a/dojo/templatetags/display_tags.py b/dojo/templatetags/display_tags.py
index bd0497a6f38..483e16fe4f2 100644
--- a/dojo/templatetags/display_tags.py
+++ b/dojo/templatetags/display_tags.py
@@ -87,6 +87,7 @@ def markdown_render(value):
"markdown.extensions.toc",
"markdown.extensions.tables"])
return mark_safe(bleach.clean(markdown_text, tags=markdown_tags, attributes=markdown_attrs, css_sanitizer=markdown_styles))
+ return None
def text_shortener(value, length):
@@ -368,8 +369,7 @@ def overdue(date1):
def notspecified(text):
if text:
return text
- else:
- return mark_safe('Not Specified')
+ return mark_safe('Not Specified')
@register.tag
@@ -510,32 +510,29 @@ def business_criticality_icon(value):
return mark_safe(stars(1, 5, "Very Low"))
if value == Product.NONE_CRITICALITY:
return mark_safe(stars(0, 5, "None"))
- else:
- return "" # mark_safe(not_specified_icon('Business Criticality Not Specified'))
+ return "" # mark_safe(not_specified_icon('Business Criticality Not Specified'))
@register.filter
def last_value(value):
if "/" in value:
return value.rsplit("/")[-1:][0]
- else:
- return value
+ return value
@register.filter
def platform_icon(value):
if value == Product.WEB_PLATFORM:
return mark_safe(icon("list-alt", "Web"))
- elif value == Product.DESKTOP_PLATFORM:
+ if value == Product.DESKTOP_PLATFORM:
return mark_safe(icon("desktop", "Desktop"))
- elif value == Product.MOBILE_PLATFORM:
+ if value == Product.MOBILE_PLATFORM:
return mark_safe(icon("mobile", "Mobile"))
- elif value == Product.WEB_SERVICE_PLATFORM:
+ if value == Product.WEB_SERVICE_PLATFORM:
return mark_safe(icon("plug", "Web Service"))
- elif value == Product.IOT:
+ if value == Product.IOT:
return mark_safe(icon("random", "Internet of Things"))
- else:
- return "" # mark_safe(not_specified_icon('Platform Not Specified'))
+ return "" # mark_safe(not_specified_icon('Platform Not Specified'))
@register.filter
@@ -546,8 +543,7 @@ def lifecycle_icon(value):
return mark_safe(icon("ship", "Sustain"))
if value == Product.RETIREMENT:
return mark_safe(icon("moon-o", "Retire"))
- else:
- return "" # mark_safe(not_specified_icon('Lifecycle Not Specified'))
+ return "" # mark_safe(not_specified_icon('Lifecycle Not Specified'))
@register.filter
@@ -564,24 +560,21 @@ def origin_icon(value):
return mark_safe(icon("code", "Open Source"))
if value == Product.OUTSOURCED_ORIGIN:
return mark_safe(icon("globe", "Outsourced"))
- else:
- return "" # mark_safe(not_specified_icon('Origin Not Specified'))
+ return "" # mark_safe(not_specified_icon('Origin Not Specified'))
@register.filter
def external_audience_icon(value):
if value:
return mark_safe(icon("users", "External Audience"))
- else:
- return ""
+ return ""
@register.filter
def internet_accessible_icon(value):
if value:
return mark_safe(icon("cloud", "Internet Accessible"))
- else:
- return ""
+ return ""
@register.filter
@@ -708,9 +701,7 @@ def get_severity_count(id, table):
elif table == "product":
display_counts.append("Total: " + str(total) + " Active Findings")
- display_counts = ", ".join([str(item) for item in display_counts])
-
- return display_counts
+ return ", ".join([str(item) for item in display_counts])
@register.filter
@@ -798,8 +789,7 @@ def first_vulnerability_id(finding):
vulnerability_ids = finding.vulnerability_ids
if vulnerability_ids:
return vulnerability_ids[0]
- else:
- return None
+ return None
@register.filter
@@ -810,8 +800,7 @@ def additional_vulnerability_ids(finding):
for vulnerability_id in vulnerability_ids[1:]:
references.append(vulnerability_id)
return references
- else:
- return None
+ return None
@register.filter
diff --git a/dojo/templatetags/event_tags.py b/dojo/templatetags/event_tags.py
index 2b40868a049..ff1ffe8f068 100644
--- a/dojo/templatetags/event_tags.py
+++ b/dojo/templatetags/event_tags.py
@@ -80,7 +80,6 @@ def nice_title(title):
pat = re.compile(r"Finding [0-9][0-9][0-9]:*")
s = pat.split(title, 2)
try:
- ret = s[1]
- return ret
+ return s[1]
except:
return title
diff --git a/dojo/templatetags/get_attribute.py b/dojo/templatetags/get_attribute.py
index 49f98941df0..34e06a216c0 100644
--- a/dojo/templatetags/get_attribute.py
+++ b/dojo/templatetags/get_attribute.py
@@ -8,5 +8,4 @@ def get_attribute(obj, name):
if hasattr(obj, name):
return getattr(obj, name)
- else:
- return ""
+ return ""
diff --git a/dojo/templatetags/get_banner.py b/dojo/templatetags/get_banner.py
index 26ab7d3bbe8..47465aa6c22 100644
--- a/dojo/templatetags/get_banner.py
+++ b/dojo/templatetags/get_banner.py
@@ -22,9 +22,7 @@ def get_banner_conf(attribute):
value,
attributes=allowed_attributes,
css_sanitizer=CSSSanitizer(allowed_css_properties=["color", "font-weight"])))
- else:
- return value
- else:
- return False
+ return value
+ return False
except Exception:
return False
diff --git a/dojo/templatetags/get_config_setting.py b/dojo/templatetags/get_config_setting.py
index 1425985c4cd..ca917968b75 100644
--- a/dojo/templatetags/get_config_setting.py
+++ b/dojo/templatetags/get_config_setting.py
@@ -9,7 +9,5 @@ def get_config_setting(config_setting):
if hasattr(settings, config_setting):
if getattr(settings, config_setting, None):
return True
- else:
- return False
- else:
return False
+ return False
diff --git a/dojo/templatetags/get_endpoint_status.py b/dojo/templatetags/get_endpoint_status.py
index 2d9f09d8d14..42a5bdb8eaa 100644
--- a/dojo/templatetags/get_endpoint_status.py
+++ b/dojo/templatetags/get_endpoint_status.py
@@ -43,8 +43,7 @@ def endpoint_display_status(endpoint, finding):
statuses.append("Mitigated")
if statuses:
return ", ".join(statuses)
- else:
- return "Active"
+ return "Active"
@register.filter
diff --git a/dojo/templatetags/get_note_status.py b/dojo/templatetags/get_note_status.py
index ab5b6485858..5d719f427c7 100644
--- a/dojo/templatetags/get_note_status.py
+++ b/dojo/templatetags/get_note_status.py
@@ -7,3 +7,4 @@
def get_public_notes(notes):
if notes:
return notes.filter(private=False)
+ return None
diff --git a/dojo/templatetags/get_notetype_availability.py b/dojo/templatetags/get_notetype_availability.py
index 59673b3a4e9..4947d9a5e0f 100644
--- a/dojo/templatetags/get_notetype_availability.py
+++ b/dojo/templatetags/get_notetype_availability.py
@@ -7,6 +7,4 @@
def get_notetype_notes_count(notes):
notes_without_type = notes.filter(note_type=None).count()
notes_count = notes.count()
- notes_with_type = notes_count - notes_without_type
-
- return notes_with_type
+ return notes_count - notes_without_type
diff --git a/dojo/test/queries.py b/dojo/test/queries.py
index 2a2cef6f8d9..28a9249d543 100644
--- a/dojo/test/queries.py
+++ b/dojo/test/queries.py
@@ -46,14 +46,12 @@ def get_authorized_tests(permission, product=None):
engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
engagement__product__authorized_group=Exists(authorized_product_groups))
- tests = tests.filter(
+ return tests.filter(
Q(engagement__product__prod_type__member=True)
| Q(engagement__product__member=True)
| Q(engagement__product__prod_type__authorized_group=True)
| Q(engagement__product__authorized_group=True))
- return tests
-
def get_authorized_test_imports(permission):
user = get_current_user()
@@ -89,10 +87,8 @@ def get_authorized_test_imports(permission):
test__engagement__product__member=Exists(authorized_product_roles),
test__engagement__product__prod_type__authorized_group=Exists(authorized_product_type_groups),
test__engagement__product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- test_imports = test_imports.filter(
+ return test_imports.filter(
Q(test__engagement__product__prod_type__member=True)
| Q(test__engagement__product__member=True)
| Q(test__engagement__product__prod_type__authorized_group=True)
| Q(test__engagement__product__authorized_group=True))
-
- return test_imports
diff --git a/dojo/test/views.py b/dojo/test/views.py
index ee492cf5505..b93ebe12933 100644
--- a/dojo/test/views.py
+++ b/dojo/test/views.py
@@ -381,12 +381,11 @@ def copy_test(request, tid):
recipients=[test.engagement.lead],
icon="exclamation-triangle")
return redirect_to_return_url_or_else(request, reverse("view_engagement", args=(engagement.id, )))
- else:
- messages.add_message(
- request,
- messages.ERROR,
- "Unable to copy test, please try again.",
- extra_tags="alert-danger")
+ messages.add_message(
+ request,
+ messages.ERROR,
+ "Unable to copy test, please try again.",
+ extra_tags="alert-danger")
product_tab = Product_Tab(product, title="Copy Test", tab="engagements")
return render(request, "dojo/copy_object.html", {
@@ -547,9 +546,8 @@ def process_finding_form(self, request: HttpRequest, test: Test, context: dict):
finding.save()
return finding, request, True
- else:
- add_error_message_to_response("The form has errors, please correct them below.")
- add_field_errors_to_response(context["form"])
+ add_error_message_to_response("The form has errors, please correct them below.")
+ add_field_errors_to_response(context["form"])
return finding, request, False
@@ -591,8 +589,7 @@ def process_jira_form(self, request: HttpRequest, finding: Finding, context: dic
)
return request, True, push_to_jira
- else:
- add_field_errors_to_response(context["jform"])
+ add_field_errors_to_response(context["jform"])
return request, False, False
@@ -672,10 +669,8 @@ def post(self, request: HttpRequest, test_id: int):
if success:
if "_Finished" in request.POST:
return HttpResponseRedirect(reverse("view_test", args=(test.id,)))
- else:
- return HttpResponseRedirect(reverse("add_findings", args=(test.id,)))
- else:
- context["form_error"] = True
+ return HttpResponseRedirect(reverse("add_findings", args=(test.id,)))
+ context["form_error"] = True
# Render the form
return render(request, self.get_template(), context)
@@ -754,11 +749,10 @@ def add_temp_finding(request, tid, fid):
extra_tags="alert-success")
return HttpResponseRedirect(reverse("view_test", args=(test.id,)))
- else:
- messages.add_message(request,
- messages.ERROR,
- _("The form has errors, please correct them below."),
- extra_tags="alert-danger")
+ messages.add_message(request,
+ messages.ERROR,
+ _("The form has errors, please correct them below."),
+ extra_tags="alert-danger")
else:
form = AddFindingForm(req_resp=None, product=test.engagement.product, initial={"active": False,
@@ -830,8 +824,7 @@ def get_form(
"""
if request.method == "POST":
return ReImportScanForm(request.POST, request.FILES, test=test, **kwargs)
- else:
- return ReImportScanForm(test=test, **kwargs)
+ return ReImportScanForm(test=test, **kwargs)
def get_jira_form(
self,
diff --git a/dojo/tool_config/factory.py b/dojo/tool_config/factory.py
index 61fce9caa51..3715a52906f 100644
--- a/dojo/tool_config/factory.py
+++ b/dojo/tool_config/factory.py
@@ -19,5 +19,4 @@ def create_API(tool_configuration):
if tool_configuration.tool_type.name in SCAN_APIS:
api_class = SCAN_APIS.get(tool_configuration.tool_type.name)
return api_class(tool_configuration)
- else:
- return None
+ return None
diff --git a/dojo/tool_product/queries.py b/dojo/tool_product/queries.py
index 6bc23bdb98b..df95594688b 100644
--- a/dojo/tool_product/queries.py
+++ b/dojo/tool_product/queries.py
@@ -39,8 +39,6 @@ def get_authorized_tool_product_settings(permission):
product__member=Exists(authorized_product_roles),
product__prod_type__authorized_group=Exists(authorized_product_type_groups),
product__authorized_group=Exists(authorized_product_groups)).order_by("id")
- tool_product_settings = tool_product_settings.filter(
+ return tool_product_settings.filter(
Q(product__prod_type__member=True) | Q(product__member=True)
| Q(product__prod_type__authorized_group=True) | Q(product__authorized_group=True))
-
- return tool_product_settings
diff --git a/dojo/tool_product/views.py b/dojo/tool_product/views.py
index 2e606956b8e..def26f088d2 100644
--- a/dojo/tool_product/views.py
+++ b/dojo/tool_product/views.py
@@ -102,8 +102,7 @@ def delete_tool_product(request, pid, ttid):
_("Tool Product Successfully Deleted."),
extra_tags="alert-success")
return HttpResponseRedirect(reverse("all_tool_product", args=(pid, )))
- else:
- tform = ToolProductSettingsForm(instance=tool_product)
+ tform = ToolProductSettingsForm(instance=tool_product)
product_tab = Product_Tab(product, title=_("Delete Product Tool Configuration"), tab="settings")
diff --git a/dojo/tools/acunetix/parse_acunetix_xml.py b/dojo/tools/acunetix/parse_acunetix_xml.py
index 22171bf24b1..4b86d947318 100644
--- a/dojo/tools/acunetix/parse_acunetix_xml.py
+++ b/dojo/tools/acunetix/parse_acunetix_xml.py
@@ -145,8 +145,7 @@ def get_cwe_number(self, cwe):
"""
if cwe is None:
return None
- else:
- return int(cwe.split("-")[1])
+ return int(cwe.split("-")[1])
def get_severity(self, severity):
"""
@@ -156,14 +155,13 @@ def get_severity(self, severity):
"""
if severity == "high":
return "High"
- elif severity == "medium":
+ if severity == "medium":
return "Medium"
- elif severity == "low":
+ if severity == "low":
return "Low"
- elif severity == "informational":
+ if severity == "informational":
return "Info"
- else:
- return "Critical"
+ return "Critical"
def get_false_positive(self, false_p):
"""
@@ -173,5 +171,4 @@ def get_false_positive(self, false_p):
"""
if false_p:
return True
- else:
- return False
+ return False
diff --git a/dojo/tools/acunetix/parser.py b/dojo/tools/acunetix/parser.py
index 289496a03f8..789fc23607d 100644
--- a/dojo/tools/acunetix/parser.py
+++ b/dojo/tools/acunetix/parser.py
@@ -17,5 +17,6 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if ".xml" in str(filename):
return AcunetixXMLParser().get_findings(filename, test)
- elif ".json" in str(filename):
+ if ".json" in str(filename):
return AcunetixJSONParser().get_findings(filename, test)
+ return None
diff --git a/dojo/tools/anchore_grype/parser.py b/dojo/tools/anchore_grype/parser.py
index c457f63e65f..48e18b686e0 100644
--- a/dojo/tools/anchore_grype/parser.py
+++ b/dojo/tools/anchore_grype/parser.py
@@ -187,10 +187,9 @@ def get_findings(self, file, test):
def _convert_severity(self, val):
if "Unknown" == val:
return "Info"
- elif "Negligible" == val:
+ if "Negligible" == val:
return "Info"
- else:
- return val.title()
+ return val.title()
def get_cvss(self, cvss):
if cvss:
@@ -213,5 +212,4 @@ def get_vulnerability_ids(self, vuln_id, related_vulnerabilities):
vulnerability_ids.append(related_vulnerability.get("id"))
if vulnerability_ids:
return vulnerability_ids
- else:
- return None
+ return None
diff --git a/dojo/tools/api_blackduck/api_client.py b/dojo/tools/api_blackduck/api_client.py
index 47a49643b11..b354f85d753 100644
--- a/dojo/tools/api_blackduck/api_client.py
+++ b/dojo/tools/api_blackduck/api_client.py
@@ -36,11 +36,13 @@ def get_project_by_name(self, project_name):
for project in self.client.get_resource("projects"):
if project["name"] == project_name:
return project
+ return None
def get_version_by_name(self, project, version_name):
for version in self.client.get_resource("versions", project):
if version["versionName"] == version_name:
return version
+ return None
def get_vulnerable_bom_components(self, version):
return self.client.get_resource("vulnerable-components", version)
diff --git a/dojo/tools/api_bugcrowd/api_client.py b/dojo/tools/api_bugcrowd/api_client.py
index bf76608380d..68e73367d1b 100644
--- a/dojo/tools/api_bugcrowd/api_client.py
+++ b/dojo/tools/api_bugcrowd/api_client.py
@@ -112,18 +112,16 @@ def test_connection(self):
f"you can use these as Service key 1 for filtering submissions "
f'You also have targets "{target_names}" that can be used in Service key 2'
)
- else:
- msg = (
- "Bugcrowd API test not successful, no targets were defined in Bugcrowd which is used for "
- f"filtering, check your configuration, HTTP response was: {response_targets.text}"
- )
- raise Exception(msg)
- else:
msg = (
- "Bugcrowd API test not successful, could not retrieve the programs or submissions, check your "
- f"configuration, HTTP response for programs was: {response_programs.text}, HTTP response for submissions was: {response_subs.text}"
+ "Bugcrowd API test not successful, no targets were defined in Bugcrowd which is used for "
+ f"filtering, check your configuration, HTTP response was: {response_targets.text}"
)
raise Exception(msg)
+ msg = (
+ "Bugcrowd API test not successful, could not retrieve the programs or submissions, check your "
+ f"configuration, HTTP response for programs was: {response_programs.text}, HTTP response for submissions was: {response_subs.text}"
+ )
+ raise Exception(msg)
def test_product_connection(self, api_scan_configuration):
submissions = []
diff --git a/dojo/tools/api_bugcrowd/parser.py b/dojo/tools/api_bugcrowd/parser.py
index f6e2fa134b2..bbff76ef4b8 100644
--- a/dojo/tools/api_bugcrowd/parser.py
+++ b/dojo/tools/api_bugcrowd/parser.py
@@ -195,13 +195,12 @@ def include_finding(self, entry):
if entry["attributes"]["state"] in allowed_states:
return True
- else:
- msg = (
- "{} not in allowed bugcrowd submission states".format(
- entry["attributes"]["state"],
- )
+ msg = (
+ "{} not in allowed bugcrowd submission states".format(
+ entry["attributes"]["state"],
)
- raise ValueError(msg)
+ )
+ raise ValueError(msg)
def convert_log_timestamp(self, timestamp):
"""Convert a log entry's timestamp to a DefectDojo date"""
@@ -212,16 +211,15 @@ def convert_severity(self, bugcrowd_severity):
"""Convert severity value"""
if bugcrowd_severity == 5:
return "Info"
- elif bugcrowd_severity == 4:
+ if bugcrowd_severity == 4:
return "Low"
- elif bugcrowd_severity == 3:
+ if bugcrowd_severity == 3:
return "Medium"
- elif bugcrowd_severity == 2:
+ if bugcrowd_severity == 2:
return "High"
- elif bugcrowd_severity == 1:
+ if bugcrowd_severity == 1:
return "Critical"
- else:
- return "Info"
+ return "Info"
def is_active(self, bugcrowd_state):
return (bugcrowd_state == "unresolved") or not (
diff --git a/dojo/tools/api_cobalt/api_client.py b/dojo/tools/api_cobalt/api_client.py
index c18234ae73d..0161715c73c 100644
--- a/dojo/tools/api_cobalt/api_client.py
+++ b/dojo/tools/api_cobalt/api_client.py
@@ -41,13 +41,12 @@ def get_assets(self):
if response.ok:
return response.json().get("data")
- else:
- msg = (
- "Unable to get assets due to {} - {}".format(
- response.status_code, response.content.decode("utf-8"),
- )
+ msg = (
+ "Unable to get assets due to {} - {}".format(
+ response.status_code, response.content.decode("utf-8"),
)
- raise Exception(msg)
+ )
+ raise Exception(msg)
def get_findings(self, asset_id):
"""
@@ -62,13 +61,12 @@ def get_findings(self, asset_id):
if response.ok:
return response.json()
- else:
- msg = (
- "Unable to get asset findings due to {} - {}".format(
- response.status_code, response.content.decode("utf-8"),
- )
+ msg = (
+ "Unable to get asset findings due to {} - {}".format(
+ response.status_code, response.content.decode("utf-8"),
)
- raise Exception(msg)
+ )
+ raise Exception(msg)
def test_connection(self):
# Request orgs for the org name
@@ -91,14 +89,13 @@ def test_connection(self):
org = list(orgs)[0]
org_name = org["resource"]["name"]
return f'You have access to the "{org_name}" organization'
- else:
- msg = (
- "Connection failed (error: {} - {})".format(
- response_assets.status_code,
- response_assets.content.decode("utf-8"),
- )
+ msg = (
+ "Connection failed (error: {} - {})".format(
+ response_assets.status_code,
+ response_assets.content.decode("utf-8"),
)
- raise Exception(msg)
+ )
+ raise Exception(msg)
def test_product_connection(self, api_scan_configuration):
asset = self.get_asset(api_scan_configuration.service_key_1)
diff --git a/dojo/tools/api_cobalt/importer.py b/dojo/tools/api_cobalt/importer.py
index 068745cfee7..8c74c6c8cfe 100644
--- a/dojo/tools/api_cobalt/importer.py
+++ b/dojo/tools/api_cobalt/importer.py
@@ -16,8 +16,7 @@ class CobaltApiImporter:
def get_findings(self, test):
client, config = self.prepare_client(test)
- findings = client.get_findings(config.service_key_1)
- return findings
+ return client.get_findings(config.service_key_1)
def prepare_client(self, test):
product = test.engagement.product
diff --git a/dojo/tools/api_cobalt/parser.py b/dojo/tools/api_cobalt/parser.py
index fa82acabf53..5ec50de6c45 100644
--- a/dojo/tools/api_cobalt/parser.py
+++ b/dojo/tools/api_cobalt/parser.py
@@ -132,8 +132,7 @@ def include_finding(self, resource):
if resource["state"] in allowed_states:
return True
- else:
- return False
+ return False
def convert_endpoints(self, affected_targets):
"""Convert Cobalt affected_targets into DefectDojo endpoints"""
@@ -152,16 +151,15 @@ def convert_severity(self, cobalt_severity):
"""Convert severity value"""
if cobalt_severity == "informational":
return "Info"
- elif cobalt_severity == "low":
+ if cobalt_severity == "low":
return "Low"
- elif cobalt_severity == "medium":
+ if cobalt_severity == "medium":
return "Medium"
- elif cobalt_severity == "high":
+ if cobalt_severity == "high":
return "High"
- elif cobalt_severity == "critical":
+ if cobalt_severity == "critical":
return "Critical"
- else:
- return "Info"
+ return "Info"
def is_active(self, cobalt_state):
return (
diff --git a/dojo/tools/api_edgescan/api_client.py b/dojo/tools/api_edgescan/api_client.py
index e74c6b94095..c7fdc735172 100644
--- a/dojo/tools/api_edgescan/api_client.py
+++ b/dojo/tools/api_edgescan/api_client.py
@@ -28,6 +28,7 @@ def get_extra_options(tool_config):
except (JSONDecodeError, TypeError):
msg = "JSON not provided in Extras field."
raise ValueError(msg)
+ return None
def get_findings(self, asset_ids):
if asset_ids:
@@ -47,14 +48,12 @@ def get_findings(self, asset_ids):
return response.json()
def get_headers(self):
- headers = {
+ return {
"X-API-TOKEN": self.api_key,
"Content-Type": "application/json",
"User-Agent": "DefectDojo",
}
- return headers
-
def get_proxies(self):
if self.options and "proxy" in self.options:
return {"https": self.options["proxy"]}
diff --git a/dojo/tools/api_edgescan/importer.py b/dojo/tools/api_edgescan/importer.py
index e4e9bf0c98e..6d1ca4de90d 100644
--- a/dojo/tools/api_edgescan/importer.py
+++ b/dojo/tools/api_edgescan/importer.py
@@ -12,8 +12,7 @@ class EdgescanImporter:
def get_findings(self, test):
client, config = self.prepare_client(test)
- findings = client.get_findings(config.service_key_1)
- return findings
+ return client.get_findings(config.service_key_1)
def prepare_client(self, test):
product = test.engagement.product
diff --git a/dojo/tools/api_sonarqube/importer.py b/dojo/tools/api_sonarqube/importer.py
index 567454961eb..7e5856707d4 100644
--- a/dojo/tools/api_sonarqube/importer.py
+++ b/dojo/tools/api_sonarqube/importer.py
@@ -356,32 +356,31 @@ def clean_cwe(raw_html):
search = re.search(r"CWE-(\d+)", raw_html)
if search:
return int(search.group(1))
+ return None
@staticmethod
def convert_sonar_severity(sonar_severity):
sev = sonar_severity.lower()
if sev == "blocker":
return "Critical"
- elif sev == "critical":
+ if sev == "critical":
return "High"
- elif sev == "major":
+ if sev == "major":
return "Medium"
- elif sev == "minor":
+ if sev == "minor":
return "Low"
- else:
- return "Info"
+ return "Info"
@staticmethod
def convert_scanner_confidence(sonar_scanner_confidence):
sev = sonar_scanner_confidence.lower()
if sev == "high":
return 1
- elif sev == "medium":
+ if sev == "medium":
return 4
- elif sev == "low":
- return 7
- else:
+ if sev == "low":
return 7
+ return 7
@staticmethod
def get_references(vuln_details):
diff --git a/dojo/tools/api_sonarqube/updater.py b/dojo/tools/api_sonarqube/updater.py
index 980079f8942..c8bcd7e0664 100644
--- a/dojo/tools/api_sonarqube/updater.py
+++ b/dojo/tools/api_sonarqube/updater.py
@@ -72,7 +72,7 @@ def get_sonarqube_required_transitions_for(
):
# If current and target is the same... do nothing
if current_status == target_status:
- return
+ return None
# Check if there is at least one transition from current_status...
if not [
@@ -80,7 +80,7 @@ def get_sonarqube_required_transitions_for(
for x in self.MAPPING_SONARQUBE_STATUS_TRANSITION
if current_status in x.get("from")
]:
- return
+ return None
# Starting from target_status... find out possible origin statuses that
# can transition to target_status
@@ -113,6 +113,8 @@ def get_sonarqube_required_transitions_for(
if possible_transition:
transitions_result.extendleft(possible_transition)
return list(transitions_result)
+ return None
+ return None
def update_sonarqube_finding(self, finding):
sonarqube_issue = finding.sonarqube_issue
diff --git a/dojo/tools/api_vulners/importer.py b/dojo/tools/api_vulners/importer.py
index 8ebbbe83f60..0b49306f7e9 100644
--- a/dojo/tools/api_vulners/importer.py
+++ b/dojo/tools/api_vulners/importer.py
@@ -16,13 +16,11 @@ class VulnersImporter:
def get_findings(self, test):
client, _config = self.prepare_client(test)
- findings = client.get_findings()
- return findings
+ return client.get_findings()
def get_vulns_description(self, test, vulns_id):
client, _config = self.prepare_client(test)
- description = client.get_vulns_description(vulns_id)
- return description
+ return client.get_vulns_description(vulns_id)
def prepare_client(self, test):
product = test.engagement.product
diff --git a/dojo/tools/appspider/parser.py b/dojo/tools/appspider/parser.py
index bf9ed6eb415..d6ccf54611c 100644
--- a/dojo/tools/appspider/parser.py
+++ b/dojo/tools/appspider/parser.py
@@ -18,7 +18,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if filename is None:
- return
+ return None
vscan = ElementTree.parse(filename)
root = vscan.getroot()
diff --git a/dojo/tools/aqua/parser.py b/dojo/tools/aqua/parser.py
index 8dc92dd14d1..076c2d71dc5 100644
--- a/dojo/tools/aqua/parser.py
+++ b/dojo/tools/aqua/parser.py
@@ -204,22 +204,20 @@ def aqua_severity_of(score):
return "High"
if score == "medium":
return "Medium"
- elif score == "low":
+ if score == "low":
return "Low"
- elif score == "negligible":
+ if score == "negligible":
return "Info"
- else:
- return "Critical"
+ return "Critical"
def severity_of(score):
if score == 0:
return "Info"
- elif score < 4:
+ if score < 4:
return "Low"
- elif 4.0 < score < 7.0:
+ if 4.0 < score < 7.0:
return "Medium"
- elif 7.0 < score < 9.0:
+ if 7.0 < score < 9.0:
return "High"
- else:
- return "Critical"
+ return "Critical"
diff --git a/dojo/tools/asff/parser.py b/dojo/tools/asff/parser.py
index 74630290638..ccd5eb3110c 100644
--- a/dojo/tools/asff/parser.py
+++ b/dojo/tools/asff/parser.py
@@ -116,7 +116,7 @@ def get_findings(self, file, test):
def get_severity(self, data):
if data.get("Label"):
return SEVERITY_MAPPING[data.get("Label")]
- elif isinstance(data.get("Normalized"), int):
+ if isinstance(data.get("Normalized"), int):
# 0 - INFORMATIONAL
# 1-39 - LOW
# 40-69 - MEDIUM
@@ -124,12 +124,11 @@ def get_severity(self, data):
# 90-100 - CRITICAL
if data.get("Normalized") > 89:
return "Critical"
- elif data.get("Normalized") > 69:
+ if data.get("Normalized") > 69:
return "High"
- elif data.get("Normalized") > 39:
+ if data.get("Normalized") > 39:
return "Medium"
- elif data.get("Normalized") > 0:
+ if data.get("Normalized") > 0:
return "Low"
- else:
- return "Info"
+ return "Info"
return None
diff --git a/dojo/tools/auditjs/parser.py b/dojo/tools/auditjs/parser.py
index 8135fe1fc55..6299308f79c 100644
--- a/dojo/tools/auditjs/parser.py
+++ b/dojo/tools/auditjs/parser.py
@@ -25,14 +25,13 @@ def get_severity(self, cvss):
cvss = float(cvss)
if cvss > 0 and cvss < 4:
return "Low"
- elif cvss >= 4 and cvss < 7:
+ if cvss >= 4 and cvss < 7:
return "Medium"
- elif cvss >= 7 and cvss < 9:
+ if cvss >= 7 and cvss < 9:
return "High"
- elif cvss >= 9:
+ if cvss >= 9:
return "Critical"
- else:
- return "Informational"
+ return "Informational"
def get_findings(self, filename, test):
try:
diff --git a/dojo/tools/aws_prowler/parser.py b/dojo/tools/aws_prowler/parser.py
index 8a084ff6f37..7093a596012 100644
--- a/dojo/tools/aws_prowler/parser.py
+++ b/dojo/tools/aws_prowler/parser.py
@@ -23,11 +23,10 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, file, test):
if file.name.lower().endswith(".csv"):
return self.process_csv(file, test)
- elif file.name.lower().endswith(".json"):
+ if file.name.lower().endswith(".json"):
return self.process_json(file, test)
- else:
- msg = "Unknown file format"
- raise ValueError(msg)
+ msg = "Unknown file format"
+ raise ValueError(msg)
def process_csv(self, file, test):
content = file.read()
@@ -218,8 +217,7 @@ def process_json(self, file, test):
def formatview(self, depth):
if depth > 1:
return "* "
- else:
- return ""
+ return ""
# Criticality rating
def getCriticalityRating(self, result, level, severity):
@@ -233,10 +231,9 @@ def getCriticalityRating(self, result, level, severity):
if severity == "Informational":
return "Low"
return severity
+ if level == "Level 1":
+ criticality = "Critical"
else:
- if level == "Level 1":
- criticality = "Critical"
- else:
- criticality = "High"
+ criticality = "High"
return criticality
diff --git a/dojo/tools/aws_prowler_v3plus/parser.py b/dojo/tools/aws_prowler_v3plus/parser.py
index 5d550dcf5c6..c764667dfb4 100644
--- a/dojo/tools/aws_prowler_v3plus/parser.py
+++ b/dojo/tools/aws_prowler_v3plus/parser.py
@@ -17,8 +17,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, file, test):
if file.name.lower().endswith(".ocsf.json"):
return AWSProwlerV4Parser().process_ocsf_json(file, test)
- elif file.name.lower().endswith(".json"):
+ if file.name.lower().endswith(".json"):
return AWSProwlerV3Parser().process_json(file, test)
- else:
- msg = "Unknown file format"
- raise ValueError(msg)
+ msg = "Unknown file format"
+ raise ValueError(msg)
diff --git a/dojo/tools/azure_security_center_recommendations/parser.py b/dojo/tools/azure_security_center_recommendations/parser.py
index 7fbfac83c91..9838f65ae58 100644
--- a/dojo/tools/azure_security_center_recommendations/parser.py
+++ b/dojo/tools/azure_security_center_recommendations/parser.py
@@ -22,9 +22,8 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, file, test):
if file.name.lower().endswith(".csv"):
return self.process_csv(file, test)
- else:
- msg = "Unknown file format"
- raise ValueError(msg)
+ msg = "Unknown file format"
+ raise ValueError(msg)
def process_csv(self, file, test):
content = file.read()
diff --git a/dojo/tools/bandit/parser.py b/dojo/tools/bandit/parser.py
index 1ad385114ac..3e4e54fcd8b 100644
--- a/dojo/tools/bandit/parser.py
+++ b/dojo/tools/bandit/parser.py
@@ -66,9 +66,8 @@ def get_findings(self, filename, test):
def convert_confidence(self, value):
if "high" == value.lower():
return 2
- elif "medium" == value.lower():
+ if "medium" == value.lower():
return 3
- elif "low" == value.lower():
+ if "low" == value.lower():
return 6
- else:
- return None
+ return None
diff --git a/dojo/tools/bearer_cli/parser.py b/dojo/tools/bearer_cli/parser.py
index 4f91bb8632a..6484fd66269 100644
--- a/dojo/tools/bearer_cli/parser.py
+++ b/dojo/tools/bearer_cli/parser.py
@@ -29,8 +29,7 @@ def get_findings(self, file, test):
if bearerfinding["fingerprint"] in dupes:
continue
- else:
- dupes.add(bearerfinding["fingerprint"])
+ dupes.add(bearerfinding["fingerprint"])
finding = Finding(
title=bearerfinding["title"] + " in " + bearerfinding["filename"] + ":" + str(bearerfinding["line_number"]),
diff --git a/dojo/tools/blackduck/importer.py b/dojo/tools/blackduck/importer.py
index 3e7cde7abe7..80db2714490 100644
--- a/dojo/tools/blackduck/importer.py
+++ b/dojo/tools/blackduck/importer.py
@@ -24,8 +24,7 @@ def parse_findings(self, report: Path) -> Iterable[BlackduckFinding]:
if zipfile.is_zipfile(str(report)):
return self._process_zipfile(report)
- else:
- return self._process_csvfile(report)
+ return self._process_csvfile(report)
def _process_csvfile(self, report):
"""
diff --git a/dojo/tools/blackduck/parser.py b/dojo/tools/blackduck/parser.py
index a79e9db9677..d462f7207b3 100644
--- a/dojo/tools/blackduck/parser.py
+++ b/dojo/tools/blackduck/parser.py
@@ -28,10 +28,9 @@ def get_findings(self, filename, test):
def normalize_findings(self, filename):
importer = BlackduckImporter()
- findings = sorted(
+ return sorted(
importer.parse_findings(filename), key=lambda f: f.vuln_id,
)
- return findings
def ingest_findings(self, normalized_findings, test):
dupes = {}
diff --git a/dojo/tools/blackduck_binary_analysis/parser.py b/dojo/tools/blackduck_binary_analysis/parser.py
index 7e545e67517..b9b2e6ab655 100644
--- a/dojo/tools/blackduck_binary_analysis/parser.py
+++ b/dojo/tools/blackduck_binary_analysis/parser.py
@@ -29,10 +29,9 @@ def get_findings(self, filename, test):
def sort_findings(self, filename):
importer = BlackduckBinaryAnalysisImporter()
- findings = sorted(
+ return sorted(
importer.parse_findings(filename), key=lambda f: f.cve,
)
- return findings
def ingest_findings(self, sorted_findings, test):
findings = {}
@@ -138,9 +137,7 @@ def format_description(self, i):
return description
def format_mitigation(self, i):
- mitigation = f"Upgrade {str(i.component)} to latest version: {str(i.latest_version)}.\n"
-
- return mitigation
+ return f"Upgrade {str(i.component)} to latest version: {str(i.latest_version)}.\n"
def format_impact(self, i):
impact = "The use of vulnerable third-party open source software in applications can have numerous negative impacts:\n\n"
diff --git a/dojo/tools/blackduck_component_risk/importer.py b/dojo/tools/blackduck_component_risk/importer.py
index 5478fa0d51b..c33fa7fd671 100644
--- a/dojo/tools/blackduck_component_risk/importer.py
+++ b/dojo/tools/blackduck_component_risk/importer.py
@@ -29,9 +29,8 @@ def parse_findings(self, report: Path) -> (dict, dict, dict):
report = Path(report.temporary_file_path())
if zipfile.is_zipfile(str(report)):
return self._process_zipfile(report)
- else:
- msg = f"File {report} not a zip!"
- raise ValueError(msg)
+ msg = f"File {report} not a zip!"
+ raise ValueError(msg)
def _process_zipfile(self, report: Path) -> (dict, dict, dict):
"""
diff --git a/dojo/tools/blackduck_component_risk/parser.py b/dojo/tools/blackduck_component_risk/parser.py
index 60003fb4093..270b3481725 100644
--- a/dojo/tools/blackduck_component_risk/parser.py
+++ b/dojo/tools/blackduck_component_risk/parser.py
@@ -206,10 +206,9 @@ def security_title(self, vulns):
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
- title = "Security Risk: {}:{}".format(
+ return "Security Risk: {}:{}".format(
vulns[0]["Component name"], vulns[0]["Component version name"],
)
- return title
def security_description(self, vulns):
"""
@@ -289,10 +288,9 @@ def security_mitigation(self, vulns):
:param vulns: Dictionary {component_version_identifier: [vulns]}
:return:
"""
- mit = "Update component {}:{} to a secure version".format(
+ return "Update component {}:{} to a secure version".format(
vulns[0]["Component name"], vulns[0]["Component version name"],
)
- return mit
def security_impact(self, vulns):
"""
diff --git a/dojo/tools/burp_api/parser.py b/dojo/tools/burp_api/parser.py
index ec801d8e285..c54726f384e 100644
--- a/dojo/tools/burp_api/parser.py
+++ b/dojo/tools/burp_api/parser.py
@@ -159,9 +159,8 @@ def convert_confidence(issue):
value = issue.get("confidence", "undefined").lower()
if "certain" == value:
return 2
- elif "firm" == value:
+ if "firm" == value:
return 3
- elif "tentative" == value:
+ if "tentative" == value:
return 6
- else:
- return None
+ return None
diff --git a/dojo/tools/burp_enterprise/parser.py b/dojo/tools/burp_enterprise/parser.py
index b652dda32c9..aab8e565242 100644
--- a/dojo/tools/burp_enterprise/parser.py
+++ b/dojo/tools/burp_enterprise/parser.py
@@ -23,8 +23,7 @@ def get_findings(self, filename, test):
tree = etree.parse(filename, parser)
if tree:
return self.get_items(tree, test)
- else:
- return ()
+ return ()
def get_content(self, container):
s = ""
@@ -193,8 +192,7 @@ def get_cwe(self, vuln_references):
cweSearch = re.search("CWE-([0-9]*)", vuln_references, re.IGNORECASE)
if cweSearch:
return cweSearch.group(1)
- else:
- return 0
+ return 0
def create_findings(self, items, test):
# Dictonary to hold the aggregated findings with:
diff --git a/dojo/tools/burp_graphql/parser.py b/dojo/tools/burp_graphql/parser.py
index c0266941222..9b37760e2a8 100644
--- a/dojo/tools/burp_graphql/parser.py
+++ b/dojo/tools/burp_graphql/parser.py
@@ -222,5 +222,4 @@ def get_cwe(self, cwe_html):
cweSearch = re.search("CWE-([0-9]*)", cwe_html, re.IGNORECASE)
if cweSearch:
return cweSearch.group(1)
- else:
- return 0
+ return 0
diff --git a/dojo/tools/checkmarx/parser.py b/dojo/tools/checkmarx/parser.py
index 02e242c0d12..c278612344e 100644
--- a/dojo/tools/checkmarx/parser.py
+++ b/dojo/tools/checkmarx/parser.py
@@ -21,8 +21,7 @@ def get_label_for_scan_types(self, scan_type):
def get_description_for_scan_types(self, scan_type):
if scan_type == "Checkmarx Scan":
return "Simple Report. Aggregates vulnerabilities per categories, cwe, name, sinkFilename"
- else:
- return "Detailed Report. Import all vulnerabilities from checkmarx without aggregation"
+ return "Detailed Report. Import all vulnerabilities from checkmarx without aggregation"
# mode:
# None (default): aggregates vulnerabilites per sink filename (legacy behavior)
@@ -333,8 +332,7 @@ def get_description_detailed(self, pathnode, findingdetail):
codefragment.find("Code").text.strip(),
)
- findingdetail = f"{findingdetail}-----\n"
- return findingdetail
+ return f"{findingdetail}-----\n"
# Get name, cwe and categories from the global query tag (1 query = 1 type
# of vulnerability)
@@ -362,16 +360,14 @@ def isVerified(self, state):
def get_findings(self, file, test):
if file.name.strip().lower().endswith(".json"):
return self._get_findings_json(file, test)
- else:
- return self._get_findings_xml(file, test)
+ return self._get_findings_xml(file, test)
def _parse_date(self, value):
if isinstance(value, str):
return parser.parse(value).date()
- elif isinstance(value, dict) and isinstance(value.get("seconds"), int):
+ if isinstance(value, dict) and isinstance(value.get("seconds"), int):
return datetime.datetime.utcfromtimestamp(value.get("seconds")).date()
- else:
- return None
+ return None
def _get_findings_json(self, file, test):
""""""
diff --git a/dojo/tools/checkmarx_one/parser.py b/dojo/tools/checkmarx_one/parser.py
index 64c52c763f1..a48023e5d6f 100644
--- a/dojo/tools/checkmarx_one/parser.py
+++ b/dojo/tools/checkmarx_one/parser.py
@@ -22,22 +22,19 @@ def get_description_for_scan_types(self, scan_type):
def _parse_date(self, value):
if isinstance(value, str):
return parser.parse(value)
- elif isinstance(value, dict) and isinstance(value.get("seconds"), int):
+ if isinstance(value, dict) and isinstance(value.get("seconds"), int):
return datetime.datetime.utcfromtimestamp(value.get("seconds"))
- else:
- return None
+ return None
def _parse_cwe(self, cwe):
if isinstance(cwe, str):
cwe_num = re.findall(r"\d+", cwe)
if cwe_num:
return cwe_num[0]
- else:
- return None
- elif isinstance(cwe, int):
- return cwe
- else:
return None
+ if isinstance(cwe, int):
+ return cwe
+ return None
def parse_vulnerabilities_from_scan_list(
self,
@@ -114,8 +111,7 @@ def parse_sca_vulnerabilities(
cwe_store: list,
) -> List[Finding]:
# Not implemented yet
- findings = []
- return findings
+ return []
def parse_sast_vulnerabilities(
self,
diff --git a/dojo/tools/chefinspect/parser.py b/dojo/tools/chefinspect/parser.py
index 06769d4d033..aeb256345e1 100644
--- a/dojo/tools/chefinspect/parser.py
+++ b/dojo/tools/chefinspect/parser.py
@@ -17,14 +17,13 @@ def convert_score(self, raw_value):
val = float(raw_value)
if val == 0.0:
return "Info"
- elif val < 0.4:
+ if val < 0.4:
return "Low"
- elif val < 0.7:
+ if val < 0.7:
return "Medium"
- elif val < 0.9:
+ if val < 0.9:
return "High"
- else:
- return "Critical"
+ return "Critical"
def get_findings(self, file, test):
lines = file.read()
diff --git a/dojo/tools/clair/clairklar_parser.py b/dojo/tools/clair/clairklar_parser.py
index efef6483d58..bc168fbabab 100644
--- a/dojo/tools/clair/clairklar_parser.py
+++ b/dojo/tools/clair/clairklar_parser.py
@@ -79,7 +79,7 @@ def get_item_clairklar(self, item_node, test):
if "Link" in item_node:
link = item_node["Link"]
- finding = Finding(
+ return Finding(
title=item_node["Name"]
+ " - "
+ "("
@@ -101,4 +101,3 @@ def get_item_clairklar(self, item_node, test):
dynamic_finding=False,
impact="No impact provided",
)
- return finding
diff --git a/dojo/tools/clair/parser.py b/dojo/tools/clair/parser.py
index 8b82aa8ec6b..99bf78729c0 100644
--- a/dojo/tools/clair/parser.py
+++ b/dojo/tools/clair/parser.py
@@ -19,10 +19,10 @@ def get_findings(self, json_output, test):
if tree:
if self.scanner == "clair":
return ClairScan().get_items_clair(tree, test)
- elif self.scanner == "clairklar":
+ if self.scanner == "clairklar":
return ClairKlarScan().get_items_klar(tree, test)
- else:
- return []
+ return None
+ return []
def parse_json(self, json_output):
try:
diff --git a/dojo/tools/cloudsploit/parser.py b/dojo/tools/cloudsploit/parser.py
index 7ad446bcf74..ccf3181aa1d 100644
--- a/dojo/tools/cloudsploit/parser.py
+++ b/dojo/tools/cloudsploit/parser.py
@@ -75,5 +75,4 @@ def convert_severity(self, status):
return "Medium"
if status == "FAIL":
return "Critical"
- else:
- return "Info"
+ return "Info"
diff --git a/dojo/tools/codechecker/parser.py b/dojo/tools/codechecker/parser.py
index 41998099e56..5e96c75be35 100644
--- a/dojo/tools/codechecker/parser.py
+++ b/dojo/tools/codechecker/parser.py
@@ -19,11 +19,12 @@ def get_requires_file(self, scan_type):
def get_findings(self, json_output, test):
if json_output is None:
- return
+ return None
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree)
+ return None
def parse_json(self, json_output):
data = json_output.read()
@@ -99,7 +100,7 @@ def get_item(vuln):
else:
title = unique_id_from_tool
- finding = Finding(
+ return Finding(
title=title,
description=description,
severity=severity,
@@ -119,8 +120,6 @@ def get_item(vuln):
],
)
- return finding
-
def get_mapped_severity(severity):
switcher = {
diff --git a/dojo/tools/contrast/parser.py b/dojo/tools/contrast/parser.py
index fb31316e5f1..97e8fbf641e 100644
--- a/dojo/tools/contrast/parser.py
+++ b/dojo/tools/contrast/parser.py
@@ -124,8 +124,7 @@ def format_description(self, row):
+ row.get("Vulnerability Name")
+ "\n"
)
- description = description + "**Status:** " + row.get("Status") + "\n"
- return description
+ return description + "**Status:** " + row.get("Status") + "\n"
def format_cwe(self, url):
# Get the last path
diff --git a/dojo/tools/crashtest_security/parser.py b/dojo/tools/crashtest_security/parser.py
index 2c118d84665..71278115ecb 100644
--- a/dojo/tools/crashtest_security/parser.py
+++ b/dojo/tools/crashtest_security/parser.py
@@ -131,14 +131,13 @@ def get_severity(self, cvss_base_score):
"""
if cvss_base_score == 0:
return "Info"
- elif cvss_base_score < 4:
+ if cvss_base_score < 4:
return "Low"
- elif cvss_base_score < 7:
+ if cvss_base_score < 7:
return "Medium"
- elif cvss_base_score < 9:
+ if cvss_base_score < 9:
return "High"
- else:
- return "Critical"
+ return "Critical"
class CrashtestSecurityXmlParser:
@@ -153,8 +152,7 @@ def get_findings(self, xml_output, test):
if tree:
return self.get_items(tree, test)
- else:
- return []
+ return []
def parse_xml(self, xml_output):
"""
@@ -244,8 +242,7 @@ def get_findings(self, filename, test):
if filename.name.lower().endswith(".xml"):
return CrashtestSecurityXmlParser().get_findings(filename, test)
- elif filename.name.lower().endswith(".json"):
+ if filename.name.lower().endswith(".json"):
return CrashtestSecurityJsonParser().get_findings(filename, test)
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/crunch42/parser.py b/dojo/tools/crunch42/parser.py
index 02868e45b55..d4d19ff35e4 100644
--- a/dojo/tools/crunch42/parser.py
+++ b/dojo/tools/crunch42/parser.py
@@ -38,8 +38,7 @@ def get_findings(self, filename, test):
for moduleTree in reportTree:
temp += self.process_tree(moduleTree, test)
return temp
- else:
- return self.process_tree(reportTree, test)
+ return self.process_tree(reportTree, test)
def get_items(self, tree, test):
items = {}
@@ -72,7 +71,7 @@ def get_item(self, issue, title, test):
else:
severity = "Critical"
# create the finding object
- finding = Finding(
+ return Finding(
unique_id_from_tool=fingerprint,
title=title,
test=test,
@@ -87,4 +86,3 @@ def get_item(self, issue, title, test):
static_finding=True,
dynamic_finding=False,
)
- return finding
diff --git a/dojo/tools/cyclonedx/json_parser.py b/dojo/tools/cyclonedx/json_parser.py
index 265b5e02872..6a329cfdfa7 100644
--- a/dojo/tools/cyclonedx/json_parser.py
+++ b/dojo/tools/cyclonedx/json_parser.py
@@ -144,4 +144,4 @@ def _flatten_components(self, components, flatted_components):
# tools don't provide it
if "bom-ref" in component:
flatted_components[component["bom-ref"]] = component
- return None
+ return
diff --git a/dojo/tools/cyclonedx/parser.py b/dojo/tools/cyclonedx/parser.py
index 8fe80a51136..d01798e3583 100644
--- a/dojo/tools/cyclonedx/parser.py
+++ b/dojo/tools/cyclonedx/parser.py
@@ -20,5 +20,4 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, file, test):
if file.name.strip().lower().endswith(".json"):
return CycloneDXJSONParser()._get_findings_json(file, test)
- else:
- return CycloneDXXMLParser()._get_findings_xml(file, test)
+ return CycloneDXXMLParser()._get_findings_xml(file, test)
diff --git a/dojo/tools/deepfence_threatmapper/compliance.py b/dojo/tools/deepfence_threatmapper/compliance.py
index 5cd4f5b6340..f948a18c929 100644
--- a/dojo/tools/deepfence_threatmapper/compliance.py
+++ b/dojo/tools/deepfence_threatmapper/compliance.py
@@ -32,7 +32,7 @@ def get_findings(self, row, headers, test):
description += "**test_number:** " + str(test_number) + "\n"
description += "**count:** " + str(count) + "\n"
description += "**doc_id:** " + str(doc_id) + "\n"
- finding = Finding(
+ return Finding(
title="Threatmapper_Compliance_Report-" + test_number,
description=description,
severity=self.compliance_severity(status),
@@ -40,7 +40,6 @@ def get_findings(self, row, headers, test):
dynamic_finding=True,
test=test,
)
- return finding
def compliance_severity(self, input):
if input == "pass":
diff --git a/dojo/tools/deepfence_threatmapper/malware.py b/dojo/tools/deepfence_threatmapper/malware.py
index f1931e42623..f764a2ce4b0 100644
--- a/dojo/tools/deepfence_threatmapper/malware.py
+++ b/dojo/tools/deepfence_threatmapper/malware.py
@@ -21,7 +21,7 @@ def get_findings(self, row, headers, test):
description += "**NodeType:** " + str(NodeType) + "\n"
description += "**Container Name:** " + str(Container_Name) + "\n"
description += "**Kubernetes Cluster Name:** " + str(Kubernetes_Cluster_Name) + "\n"
- finding = Finding(
+ return Finding(
title=Rule_Name,
description=description,
file_path=File_Name,
@@ -30,10 +30,8 @@ def get_findings(self, row, headers, test):
dynamic_finding=True,
test=test,
)
- return finding
def severity(self, input):
if input is None:
return "Info"
- else:
- return input.capitalize()
+ return input.capitalize()
diff --git a/dojo/tools/deepfence_threatmapper/secret.py b/dojo/tools/deepfence_threatmapper/secret.py
index fd102be834a..2eae14bc76b 100644
--- a/dojo/tools/deepfence_threatmapper/secret.py
+++ b/dojo/tools/deepfence_threatmapper/secret.py
@@ -38,5 +38,4 @@ def get_findings(self, row, headers, test):
def severity(self, input):
if input is None:
return "Info"
- else:
- return input.capitalize()
+ return input.capitalize()
diff --git a/dojo/tools/deepfence_threatmapper/vulnerability.py b/dojo/tools/deepfence_threatmapper/vulnerability.py
index 61c1e505cdc..b76505613af 100644
--- a/dojo/tools/deepfence_threatmapper/vulnerability.py
+++ b/dojo/tools/deepfence_threatmapper/vulnerability.py
@@ -29,7 +29,7 @@ def get_findings(self, row, headers, test):
description += "**host_name:** " + str(host_name) + "\n"
description += "**cloud_account_id:** " + str(cloud_account_id) + "\n"
description += "**masked:** " + str(masked) + "\n"
- finding = Finding(
+ return Finding(
title="Threatmapper_Vuln_Report-" + cve_id,
description=description,
component_name=cve_caused_by_package,
@@ -41,10 +41,8 @@ def get_findings(self, row, headers, test):
cve=cve_id,
test=test,
)
- return finding
def severity(self, input):
if input is None:
return "Info"
- else:
- return input.capitalize()
+ return input.capitalize()
diff --git a/dojo/tools/dependency_check/parser.py b/dojo/tools/dependency_check/parser.py
index 96940049984..1d4a167429d 100644
--- a/dojo/tools/dependency_check/parser.py
+++ b/dojo/tools/dependency_check/parser.py
@@ -46,12 +46,11 @@ def get_filename_and_path_from_dependency(
return related_dependency.findtext(
f"{namespace}fileName",
), related_dependency.findtext(f"{namespace}filePath")
- else:
- # without filename, it would be just a duplicate finding so we have to skip it. filename
- # is only present for relateddependencies since v6.0.0
- # logger.debug('related_dependency: %s',
- # ElementTree.tostring(related_dependency, encoding='utf8', method='xml'))
- return None, None
+ # without filename, it would be just a duplicate finding so we have to skip it. filename
+ # is only present for relateddependencies since v6.0.0
+ # logger.debug('related_dependency: %s',
+ # ElementTree.tostring(related_dependency, encoding='utf8', method='xml'))
+ return None, None
def get_component_name_and_version_from_dependency(
self, dependency, related_dependency, namespace,
diff --git a/dojo/tools/dependency_track/parser.py b/dojo/tools/dependency_track/parser.py
index 2e3467623f9..eecc09670a3 100644
--- a/dojo/tools/dependency_track/parser.py
+++ b/dojo/tools/dependency_track/parser.py
@@ -92,16 +92,15 @@ def _convert_dependency_track_severity_to_dojo_severity(self, dependency_track_s
severity = dependency_track_severity.lower()
if severity == "critical":
return "Critical"
- elif severity == "high":
+ if severity == "high":
return "High"
- elif severity == "medium":
+ if severity == "medium":
return "Medium"
- elif severity == "low":
+ if severity == "low":
return "Low"
- elif severity.startswith("info"):
+ if severity.startswith("info"):
return "Informational"
- else:
- return None
+ return None
def _convert_dependency_track_finding_to_dojo_finding(self, dependency_track_finding, test):
"""
diff --git a/dojo/tools/dockerbench/parser.py b/dojo/tools/dockerbench/parser.py
index 120da8eb6fc..c8aa321f7b2 100644
--- a/dojo/tools/dockerbench/parser.py
+++ b/dojo/tools/dockerbench/parser.py
@@ -111,7 +111,7 @@ def get_item(vuln, test, test_start, test_end, description):
vuln["remediation-impact"],
)
- finding = Finding(
+ return Finding(
title=title,
date=datetime.fromtimestamp(int(test_end)),
test=test,
@@ -122,5 +122,3 @@ def get_item(vuln, test, test_start, test_end, description):
static_finding=True,
dynamic_finding=False,
)
-
- return finding
diff --git a/dojo/tools/drheader/parser.py b/dojo/tools/drheader/parser.py
index 158da541bd3..bf8435f63ab 100644
--- a/dojo/tools/drheader/parser.py
+++ b/dojo/tools/drheader/parser.py
@@ -50,7 +50,6 @@ def get_findings(self, filename, test):
for finding in item["report"]:
items.append(self.return_finding(test=test, finding=finding, url=url))
return items
- else:
- for finding in data:
- items.append(self.return_finding(test=test, finding=finding))
- return items
+ for finding in data:
+ items.append(self.return_finding(test=test, finding=finding))
+ return items
diff --git a/dojo/tools/eslint/parser.py b/dojo/tools/eslint/parser.py
index 9e282cca41d..329e2fac751 100644
--- a/dojo/tools/eslint/parser.py
+++ b/dojo/tools/eslint/parser.py
@@ -16,10 +16,9 @@ def get_description_for_scan_types(self, scan_type):
def _convert_eslint_severity_to_dojo_severity(self, eslint_severity):
if eslint_severity == 2:
return "High"
- elif eslint_severity == 1:
+ if eslint_severity == 1:
return "Medium"
- else:
- return "Info"
+ return "Info"
def get_findings(self, filename, test):
tree = filename.read()
diff --git a/dojo/tools/fortify/parser.py b/dojo/tools/fortify/parser.py
index b6f7e5185c8..7d2b15c0e25 100644
--- a/dojo/tools/fortify/parser.py
+++ b/dojo/tools/fortify/parser.py
@@ -15,8 +15,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if str(filename.name).endswith(".xml"):
return FortifyXMLParser().parse_xml(filename, test)
- elif str(filename.name).endswith(".fpr"):
+ if str(filename.name).endswith(".fpr"):
return FortifyFPRParser().parse_fpr(filename, test)
- else:
- msg = "Filename extension not recognized. Use .xml or .fpr"
- raise ValueError(msg)
+ msg = "Filename extension not recognized. Use .xml or .fpr"
+ raise ValueError(msg)
diff --git a/dojo/tools/generic/csv_parser.py b/dojo/tools/generic/csv_parser.py
index 001ea2ad91f..4e8acb461d9 100644
--- a/dojo/tools/generic/csv_parser.py
+++ b/dojo/tools/generic/csv_parser.py
@@ -105,5 +105,4 @@ def _convert_bool(self, val):
def get_severity(self, input):
if input in ["Info", "Low", "Medium", "High", "Critical"]:
return input
- else:
- return "Info"
+ return "Info"
diff --git a/dojo/tools/generic/parser.py b/dojo/tools/generic/parser.py
index e2fb66086bb..cf03d9753bc 100644
--- a/dojo/tools/generic/parser.py
+++ b/dojo/tools/generic/parser.py
@@ -20,12 +20,12 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if filename.name.lower().endswith(".csv"):
return GenericCSVParser()._get_findings_csv(filename)
- elif filename.name.lower().endswith(".json"):
+ if filename.name.lower().endswith(".json"):
data = json.load(filename)
test_internal = GenericJSONParser()._get_test_json(data)
return test_internal.findings
- else: # default to CSV like before
- return GenericCSVParser()._get_findings_csv(filename)
+ # default to CSV like before
+ return GenericCSVParser()._get_findings_csv(filename)
def get_tests(self, scan_type, filename):
# if the file is a CSV just use the old function
diff --git a/dojo/tools/github_vulnerability/parser.py b/dojo/tools/github_vulnerability/parser.py
index b03dbc01e5d..c0ad99ac9a4 100644
--- a/dojo/tools/github_vulnerability/parser.py
+++ b/dojo/tools/github_vulnerability/parser.py
@@ -131,7 +131,7 @@ def get_findings(self, filename, test):
else:
dupes[dupe_key] = finding
return list(dupes.values())
- elif isinstance(data, list):
+ if isinstance(data, list):
findings = []
for vuln in data:
url = vuln["url"]
@@ -185,6 +185,7 @@ def get_findings(self, filename, test):
)
findings.append(finding)
return findings
+ return None
def _search_vulnerability_alerts(self, data):
if isinstance(data, list):
@@ -204,5 +205,4 @@ def _search_vulnerability_alerts(self, data):
def _convert_security(self, val):
if val.lower() == "moderate":
return "Medium"
- else:
- return val.title()
+ return val.title()
diff --git a/dojo/tools/gitlab_dep_scan/parser.py b/dojo/tools/gitlab_dep_scan/parser.py
index 2ec561500cd..cc365c8acba 100644
--- a/dojo/tools/gitlab_dep_scan/parser.py
+++ b/dojo/tools/gitlab_dep_scan/parser.py
@@ -15,11 +15,12 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, json_output, test):
if json_output is None:
- return
+ return None
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree, test)
+ return None
def parse_json(self, json_output):
try:
diff --git a/dojo/tools/gitlab_sast/parser.py b/dojo/tools/gitlab_sast/parser.py
index f4d169b2059..ebe5071ce6e 100644
--- a/dojo/tools/gitlab_sast/parser.py
+++ b/dojo/tools/gitlab_sast/parser.py
@@ -18,11 +18,12 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, json_output, test):
if json_output is None:
- return
+ return None
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree)
+ return None
def get_tests(self, scan_type, handle):
tree = self.parse_json(handle)
diff --git a/dojo/tools/harbor_vulnerability/parser.py b/dojo/tools/harbor_vulnerability/parser.py
index c70c7031a5c..b1f2ab23633 100644
--- a/dojo/tools/harbor_vulnerability/parser.py
+++ b/dojo/tools/harbor_vulnerability/parser.py
@@ -105,5 +105,4 @@ def get_findings(self, filename, test):
def transpose_severity(severity):
if severity in Finding.SEVERITIES:
return severity
- else:
- return "Info"
+ return "Info"
diff --git a/dojo/tools/hcl_appscan/parser.py b/dojo/tools/hcl_appscan/parser.py
index 00124b3f6c4..eaff922e2e8 100644
--- a/dojo/tools/hcl_appscan/parser.py
+++ b/dojo/tools/hcl_appscan/parser.py
@@ -119,5 +119,4 @@ def get_findings(self, file, test):
except UnboundLocalError:
pass
return findings
- else:
- return findings
+ return findings
diff --git a/dojo/tools/huskyci/parser.py b/dojo/tools/huskyci/parser.py
index 028f4e18453..faa6120b141 100644
--- a/dojo/tools/huskyci/parser.py
+++ b/dojo/tools/huskyci/parser.py
@@ -20,11 +20,12 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, json_output, test):
if json_output is None:
- return
+ return None
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree, test)
+ return None
def parse_json(self, json_output):
try:
@@ -71,7 +72,7 @@ def get_item(item_node, test):
if "securitytool" in item_node:
description += "\nSecurity Tool: " + item_node.get("securitytool")
- finding = Finding(
+ return Finding(
title=item_node.get("title"),
test=test,
severity=item_node.get("severity"),
@@ -88,5 +89,3 @@ def get_item(item_node, test):
dynamic_finding=False,
impact="No impact provided",
)
-
- return finding
diff --git a/dojo/tools/hydra/parser.py b/dojo/tools/hydra/parser.py
index c42e8637f2e..bbdffa0abe1 100644
--- a/dojo/tools/hydra/parser.py
+++ b/dojo/tools/hydra/parser.py
@@ -39,9 +39,7 @@ def get_findings(self, json_output, test):
report = self.__parse_json(json_output)
metadata = HydraScanMetadata(report["generator"])
- findings = self.__extract_findings(report["results"], metadata, test)
-
- return findings
+ return self.__extract_findings(report["results"], metadata, test)
def __extract_findings(
self, raw_findings, metadata: HydraScanMetadata, test,
diff --git a/dojo/tools/intsights/parser.py b/dojo/tools/intsights/parser.py
index e49c61b852f..74eda25dc80 100644
--- a/dojo/tools/intsights/parser.py
+++ b/dojo/tools/intsights/parser.py
@@ -29,7 +29,7 @@ def _build_finding_description(self, alert: dict) -> str:
Returns: A markdown formatted description
"""
- description = "\n".join(
+ return "\n".join(
[
alert["description"],
f'**Date Found**: `{alert.get("report_date", "None provided")} `',
@@ -41,7 +41,6 @@ def _build_finding_description(self, alert: dict) -> str:
f'**Alert Link**: {alert.get("alert_link", "None provided")}',
],
)
- return description
def get_findings(self, file, test):
duplicates = {}
diff --git a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py
index 053df04aa0e..456b23a7330 100644
--- a/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py
+++ b/dojo/tools/jfrog_xray_on_demand_binary_scan/parser.py
@@ -66,8 +66,7 @@ def get_references(vulnerability):
else:
ref += "- " + reference + "\n"
return ref
- else:
- return None
+ return None
def get_remediation(extended_information):
@@ -125,8 +124,7 @@ def process_component(component):
def get_cve(vulnerability):
if "cves" in vulnerability:
- cves = vulnerability["cves"]
- return cves
+ return vulnerability["cves"]
return []
diff --git a/dojo/tools/kubebench/parser.py b/dojo/tools/kubebench/parser.py
index f288da95426..a2263dce841 100644
--- a/dojo/tools/kubebench/parser.py
+++ b/dojo/tools/kubebench/parser.py
@@ -17,8 +17,7 @@ def get_findings(self, json_output, test):
tree = json.load(json_output)
if "Controls" in tree:
return self.get_chapters(tree["Controls"], test)
- else:
- return self.get_chapters(tree, test)
+ return self.get_chapters(tree, test)
def get_chapters(self, tree, test):
items = []
@@ -105,7 +104,7 @@ def get_item(vuln, test, description):
mitigation = vuln.get("remediation", None)
vuln_id_from_tool = test_number
- finding = Finding(
+ return Finding(
title=title,
test=test,
description=description,
@@ -115,5 +114,3 @@ def get_item(vuln, test, description):
static_finding=True,
dynamic_finding=False,
)
-
- return finding
diff --git a/dojo/tools/kubescape/parser.py b/dojo/tools/kubescape/parser.py
index 877a903db1a..c371f477901 100644
--- a/dojo/tools/kubescape/parser.py
+++ b/dojo/tools/kubescape/parser.py
@@ -29,12 +29,13 @@ def __hyperlink(link: str) -> str:
def severity_mapper(self, input):
if input <= 4:
return "Low"
- elif input <= 7:
+ if input <= 7:
return "Medium"
- elif input <= 9:
+ if input <= 9:
return "High"
- elif input <= 10:
+ if input <= 10:
return "Critical"
+ return None
def parse_resource_id(self, resource_id):
try:
diff --git a/dojo/tools/microfocus_webinspect/parser.py b/dojo/tools/microfocus_webinspect/parser.py
index 58713a66410..d5a2611f95c 100644
--- a/dojo/tools/microfocus_webinspect/parser.py
+++ b/dojo/tools/microfocus_webinspect/parser.py
@@ -97,16 +97,15 @@ def get_findings(self, file, test):
def convert_severity(val):
if val == "0":
return "Info"
- elif val == "1":
+ if val == "1":
return "Low"
- elif val == "2":
+ if val == "2":
return "Medium"
- elif val == "3":
+ if val == "3":
return "High"
- elif val == "4":
+ if val == "4":
return "Critical"
- else:
- return "Info"
+ return "Info"
@staticmethod
def get_cwe(val):
@@ -114,5 +113,4 @@ def get_cwe(val):
cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE)
if cweSearch:
return int(cweSearch.group(1))
- else:
- return 0
+ return 0
diff --git a/dojo/tools/mobsf/parser.py b/dojo/tools/mobsf/parser.py
index 9e8ccf91029..b86d7bf041b 100644
--- a/dojo/tools/mobsf/parser.py
+++ b/dojo/tools/mobsf/parser.py
@@ -378,8 +378,7 @@ def getSeverityForPermission(self, status):
"""
if "dangerous" == status:
return "High"
- else:
- return "Info"
+ return "Info"
# Criticality rating
def getCriticalityRating(self, rating):
diff --git a/dojo/tools/mobsfscan/parser.py b/dojo/tools/mobsfscan/parser.py
index ae7eecc1224..bd9fd5167a3 100644
--- a/dojo/tools/mobsfscan/parser.py
+++ b/dojo/tools/mobsfscan/parser.py
@@ -29,53 +29,52 @@ def get_findings(self, filename, test):
data = json.load(filename)
if len(data.get("results")) == 0:
return []
- else:
- dupes = {}
- for key, item in data.get("results").items():
- metadata = item.get("metadata")
- cwe = int(
- re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group(
- 2,
- ),
- )
- masvs = metadata.get("masvs")
- owasp_mobile = metadata.get("owasp-mobile")
- description = "\n".join(
- [
- f"**Description:** `{metadata.get('description')}`",
- f"**OWASP MASVS:** `{masvs}`",
- f"**OWASP Mobile:** `{owasp_mobile}`",
- ],
- )
- references = metadata.get("reference")
- if metadata.get("severity") in self.SEVERITY:
- severity = self.SEVERITY[metadata.get("severity")]
- else:
- severity = "Info"
+ dupes = {}
+ for key, item in data.get("results").items():
+ metadata = item.get("metadata")
+ cwe = int(
+ re.match(r"(cwe|CWE)-([0-9]+)", metadata.get("cwe")).group(
+ 2,
+ ),
+ )
+ masvs = metadata.get("masvs")
+ owasp_mobile = metadata.get("owasp-mobile")
+ description = "\n".join(
+ [
+ f"**Description:** `{metadata.get('description')}`",
+ f"**OWASP MASVS:** `{masvs}`",
+ f"**OWASP Mobile:** `{owasp_mobile}`",
+ ],
+ )
+ references = metadata.get("reference")
+ if metadata.get("severity") in self.SEVERITY:
+ severity = self.SEVERITY[metadata.get("severity")]
+ else:
+ severity = "Info"
- finding = Finding(
- title=f"{key}",
- test=test,
- severity=severity,
- nb_occurences=1,
- cwe=cwe,
- description=description,
- references=references,
- )
- if item.get("files"):
- for file in item.get("files"):
- file_path = file.get("file_path")
- line = file.get("match_lines")[0]
- finding.file_path = file_path
- finding.line = line
+ finding = Finding(
+ title=f"{key}",
+ test=test,
+ severity=severity,
+ nb_occurences=1,
+ cwe=cwe,
+ description=description,
+ references=references,
+ )
+ if item.get("files"):
+ for file in item.get("files"):
+ file_path = file.get("file_path")
+ line = file.get("match_lines")[0]
+ finding.file_path = file_path
+ finding.line = line
- dupe_key = hashlib.sha256(
- (key + str(cwe) + masvs + owasp_mobile).encode("utf-8"),
- ).hexdigest()
+ dupe_key = hashlib.sha256(
+ (key + str(cwe) + masvs + owasp_mobile).encode("utf-8"),
+ ).hexdigest()
- if dupe_key in dupes:
- finding = dupes[dupe_key]
- finding.nb_occurences += 1
- else:
- dupes[dupe_key] = finding
- return list(dupes.values())
+ if dupe_key in dupes:
+ finding = dupes[dupe_key]
+ finding.nb_occurences += 1
+ else:
+ dupes[dupe_key] = finding
+ return list(dupes.values())
diff --git a/dojo/tools/mozilla_observatory/parser.py b/dojo/tools/mozilla_observatory/parser.py
index 783e0ada6f2..19e4c7febd7 100644
--- a/dojo/tools/mozilla_observatory/parser.py
+++ b/dojo/tools/mozilla_observatory/parser.py
@@ -61,9 +61,8 @@ def get_findings(self, file, test):
def get_severity(self, num_severity):
if 0 > num_severity >= -10:
return "Low"
- elif -11 >= num_severity > -26:
+ if -11 >= num_severity > -26:
return "Medium"
- elif num_severity <= -26:
+ if num_severity <= -26:
return "High"
- else:
- return "Info"
+ return "Info"
diff --git a/dojo/tools/ms_defender/parser.py b/dojo/tools/ms_defender/parser.py
index 07cf6de4049..ccf348cb468 100644
--- a/dojo/tools/ms_defender/parser.py
+++ b/dojo/tools/ms_defender/parser.py
@@ -34,29 +34,28 @@ def get_findings(self, file, test):
zipdata = {name: input_zip.read(name) for name in input_zip.namelist()}
if zipdata.get("machines/") is None or zipdata.get("vulnerabilities/") is None:
return []
- else:
- vulnerabilityfiles = []
- machinefiles = []
- for content in list(zipdata):
- if "vulnerabilities/" in content and "vulnerabilities/" != content:
- vulnerabilityfiles.append(content)
- if "machines/" in content and "machines/" != content:
- machinefiles.append(content)
- vulnerabilities = []
- machines = {}
- for vulnerabilityfile in vulnerabilityfiles:
- output = json.loads(zipdata[vulnerabilityfile].decode("ascii"))["value"]
- for data in output:
- vulnerabilities.append(data)
- for machinefile in machinefiles:
- output = json.loads(zipdata[machinefile].decode("ascii"))["value"]
- for data in output:
- machines[data.get("id")] = data
- for vulnerability in vulnerabilities:
- try:
- self.process_zip(vulnerability, machines[vulnerability["machineId"]])
- except (IndexError, KeyError):
- self.process_json(vulnerability)
+ vulnerabilityfiles = []
+ machinefiles = []
+ for content in list(zipdata):
+ if "vulnerabilities/" in content and "vulnerabilities/" != content:
+ vulnerabilityfiles.append(content)
+ if "machines/" in content and "machines/" != content:
+ machinefiles.append(content)
+ vulnerabilities = []
+ machines = {}
+ for vulnerabilityfile in vulnerabilityfiles:
+ output = json.loads(zipdata[vulnerabilityfile].decode("ascii"))["value"]
+ for data in output:
+ vulnerabilities.append(data)
+ for machinefile in machinefiles:
+ output = json.loads(zipdata[machinefile].decode("ascii"))["value"]
+ for data in output:
+ machines[data.get("id")] = data
+ for vulnerability in vulnerabilities:
+ try:
+ self.process_zip(vulnerability, machines[vulnerability["machineId"]])
+ except (IndexError, KeyError):
+ self.process_json(vulnerability)
else:
return []
return self.findings
@@ -141,5 +140,4 @@ def process_zip(self, vulnerability, machine):
def severity_check(self, input):
if input in ["Informational", "Low", "Medium", "High", "Critical"]:
return input
- else:
- return "Informational"
+ return "Informational"
diff --git a/dojo/tools/neuvector/parser.py b/dojo/tools/neuvector/parser.py
index 7cf278ce7e3..468f4104a03 100644
--- a/dojo/tools/neuvector/parser.py
+++ b/dojo/tools/neuvector/parser.py
@@ -116,16 +116,15 @@ def get_item(vulnerability, test):
def convert_severity(severity):
if severity.lower() == "critical":
return "Critical"
- elif severity.lower() == "high":
+ if severity.lower() == "high":
return "High"
- elif severity.lower() == "medium":
+ if severity.lower() == "medium":
return "Medium"
- elif severity.lower() == "low":
+ if severity.lower() == "low":
return "Low"
- elif severity == "":
+ if severity == "":
return "Info"
- else:
- return severity.title()
+ return severity.title()
class NeuVectorParser:
@@ -144,6 +143,5 @@ def get_findings(self, filename, test):
if filename.name.lower().endswith(".json"):
return NeuVectorJsonParser().parse(filename, test)
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/neuvector_compliance/parser.py b/dojo/tools/neuvector_compliance/parser.py
index adf05d0729d..b3bd18bf6cf 100644
--- a/dojo/tools/neuvector_compliance/parser.py
+++ b/dojo/tools/neuvector_compliance/parser.py
@@ -101,7 +101,7 @@ def get_item(node, test):
for m in messages:
full_description += f"{str(m).rstrip()}\n"
- finding = Finding(
+ return Finding(
title=title,
test=test,
description=full_description,
@@ -112,25 +112,22 @@ def get_item(node, test):
dynamic_finding=False,
)
- return finding
-
# see neuvector/share/clus_apis.go
def convert_severity(severity):
if severity.lower() == "high":
return "High"
- elif severity.lower() == "warn":
+ if severity.lower() == "warn":
return "Medium"
- elif severity.lower() == "info":
+ if severity.lower() == "info":
return "Low"
- elif severity.lower() == "pass":
+ if severity.lower() == "pass":
return "Info"
- elif severity.lower() == "note":
+ if severity.lower() == "note":
return "Info"
- elif severity.lower() == "error":
+ if severity.lower() == "error":
return "Info"
- else:
- return severity.title()
+ return severity.title()
class NeuVectorComplianceParser:
@@ -149,6 +146,5 @@ def get_findings(self, filename, test):
if filename.name.lower().endswith(".json"):
return parse(filename, test)
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/nikto/parser.py b/dojo/tools/nikto/parser.py
index 57908f3d5db..c3d332d29c3 100644
--- a/dojo/tools/nikto/parser.py
+++ b/dojo/tools/nikto/parser.py
@@ -27,8 +27,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if filename.name.lower().endswith(".xml"):
return NiktoXMLParser().process_xml(filename, test)
- elif filename.name.lower().endswith(".json"):
+ if filename.name.lower().endswith(".json"):
return NiktoJSONParser().process_json(filename, test)
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/nmap/parser.py b/dojo/tools/nmap/parser.py
index 5e101f2bb9f..f1a62892c69 100644
--- a/dojo/tools/nmap/parser.py
+++ b/dojo/tools/nmap/parser.py
@@ -146,14 +146,13 @@ def convert_cvss_score(self, raw_value):
val = float(raw_value)
if val == 0.0:
return "Info"
- elif val < 4.0:
+ if val < 4.0:
return "Low"
- elif val < 7.0:
+ if val < 7.0:
return "Medium"
- elif val < 9.0:
+ if val < 9.0:
return "High"
- else:
- return "Critical"
+ return "Critical"
def manage_vulner_script(
self, test, dupes, script_element, endpoint, report_date=None,
diff --git a/dojo/tools/noseyparker/parser.py b/dojo/tools/noseyparker/parser.py
index 8c4a80190d5..aa35489a657 100644
--- a/dojo/tools/noseyparker/parser.py
+++ b/dojo/tools/noseyparker/parser.py
@@ -29,8 +29,8 @@ def get_findings(self, file, test):
# Turn JSONL file into DataFrame
if file is None:
- return
- elif file.name.lower().endswith(".jsonl"):
+ return None
+ if file.name.lower().endswith(".jsonl"):
# Process JSON lines into Dict
data = [json.loads(line) for line in file]
diff --git a/dojo/tools/npm_audit/parser.py b/dojo/tools/npm_audit/parser.py
index fc07e281007..f5143f72af3 100644
--- a/dojo/tools/npm_audit/parser.py
+++ b/dojo/tools/npm_audit/parser.py
@@ -24,7 +24,7 @@ def get_findings(self, json_output, test):
def parse_json(self, json_output):
if json_output is None:
- return
+ return None
try:
data = json_output.read()
try:
@@ -46,9 +46,7 @@ def parse_json(self, json_output):
msg = "npm audit report contains errors: %s, %s"
raise ValueError(msg, code, summary)
- subtree = tree.get("advisories")
-
- return subtree
+ return tree.get("advisories")
def get_items(self, tree, test):
items = {}
diff --git a/dojo/tools/npm_audit_7_plus/parser.py b/dojo/tools/npm_audit_7_plus/parser.py
index 65b78b4052c..88198844a04 100644
--- a/dojo/tools/npm_audit_7_plus/parser.py
+++ b/dojo/tools/npm_audit_7_plus/parser.py
@@ -44,7 +44,7 @@ def get_findings(self, json_output, test):
def parse_json(self, json_output):
"""Parse the json format to get findings."""
if json_output is None:
- return
+ return None
try:
data = json_output.read()
try:
diff --git a/dojo/tools/nsp/parser.py b/dojo/tools/nsp/parser.py
index 0b4da91c4e4..466ed6dfa60 100644
--- a/dojo/tools/nsp/parser.py
+++ b/dojo/tools/nsp/parser.py
@@ -17,8 +17,7 @@ def get_findings(self, json_output, test):
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree, test)
- else:
- return []
+ return []
def parse_json(self, json_output):
try:
@@ -56,7 +55,7 @@ def get_item(item_node, test):
else:
severity = "Critical"
- finding = Finding(
+ return Finding(
title=item_node["title"]
+ " - "
+ "("
@@ -89,5 +88,3 @@ def get_item(item_node, test):
mitigated=None,
impact="No impact provided",
)
-
- return finding
diff --git a/dojo/tools/nuclei/parser.py b/dojo/tools/nuclei/parser.py
index dc79eacaf65..4c843c6dca1 100644
--- a/dojo/tools/nuclei/parser.py
+++ b/dojo/tools/nuclei/parser.py
@@ -33,7 +33,7 @@ def get_findings(self, filename, test):
data = []
if filecontent == "" or len(filecontent) == 0:
return []
- elif filecontent[0] == "[":
+ if filecontent[0] == "[":
content = json.loads(filecontent)
for template in content:
data.append(template)
diff --git a/dojo/tools/openvas/csv_parser.py b/dojo/tools/openvas/csv_parser.py
index 4d3011d82f9..c93a411bc92 100644
--- a/dojo/tools/openvas/csv_parser.py
+++ b/dojo/tools/openvas/csv_parser.py
@@ -21,10 +21,9 @@ def map_column_value(self, finding, column_value):
def evaluate_bool_value(column_value):
if column_value.lower() == "true":
return True
- elif column_value.lower() == "false":
+ if column_value.lower() == "false":
return False
- else:
- return None
+ return None
def process_column(self, column_name, column_value, finding):
if (
diff --git a/dojo/tools/openvas/parser.py b/dojo/tools/openvas/parser.py
index a103a4d8921..9f366c17694 100644
--- a/dojo/tools/openvas/parser.py
+++ b/dojo/tools/openvas/parser.py
@@ -15,5 +15,6 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if str(filename.name).endswith(".csv"):
return OpenVASCSVParser().get_findings(filename, test)
- elif str(filename.name).endswith(".xml"):
+ if str(filename.name).endswith(".xml"):
return OpenVASXMLParser().get_findings(filename, test)
+ return None
diff --git a/dojo/tools/openvas/xml_parser.py b/dojo/tools/openvas/xml_parser.py
index bd9d365e0da..32b7d001ca9 100644
--- a/dojo/tools/openvas/xml_parser.py
+++ b/dojo/tools/openvas/xml_parser.py
@@ -50,11 +50,10 @@ def convert_cvss_score(self, raw_value):
val = float(raw_value)
if val == 0.0:
return "Info"
- elif val < 4.0:
+ if val < 4.0:
return "Low"
- elif val < 7.0:
+ if val < 7.0:
return "Medium"
- elif val < 9.0:
+ if val < 9.0:
return "High"
- else:
- return "Critical"
+ return "Critical"
diff --git a/dojo/tools/ort/parser.py b/dojo/tools/ort/parser.py
index f314365ed88..9f0927bd1cf 100644
--- a/dojo/tools/ort/parser.py
+++ b/dojo/tools/ort/parser.py
@@ -24,8 +24,7 @@ def get_findings(self, json_output, test):
evaluated_model = self.parse_json(json_output)
if evaluated_model:
return self.get_items(evaluated_model, test)
- else:
- return []
+ return []
def parse_json(self, json_output):
try:
@@ -78,17 +77,14 @@ def is_rule_violation_unresolved(rule_violation):
def find_in_dependency_tree(tree, package_id):
if "pkg" in tree and tree["pkg"] == package_id:
return True
- else:
- if "children" in tree:
- found_in_child = False
- for child in tree["children"]:
- if found_in_child:
- break
- else:
- found_in_child = find_in_dependency_tree(child, package_id)
- return found_in_child
- else:
- return False
+ if "children" in tree:
+ found_in_child = False
+ for child in tree["children"]:
+ if found_in_child:
+ break
+ found_in_child = find_in_dependency_tree(child, package_id)
+ return found_in_child
+ return False
def get_project_ids_for_package(dependency_trees, package_id):
@@ -172,7 +168,7 @@ def get_item(model, test):
severity = get_severity(model.rule_violation)
- finding = Finding(
+ return Finding(
title=model.rule_violation["rule"],
test=test,
references=model.rule_violation["message"],
@@ -181,8 +177,6 @@ def get_item(model, test):
static_finding=True,
)
- return finding
-
# TODO: with python 3.7
# @dataclass
@@ -200,9 +194,8 @@ def get_item(model, test):
def get_severity(rule_violation):
if rule_violation["severity"] == "ERROR":
return "High"
- elif rule_violation["severity"] == "WARNING":
+ if rule_violation["severity"] == "WARNING":
return "Medium"
- elif rule_violation["severity"] == "HINT":
+ if rule_violation["severity"] == "HINT":
return "Info"
- else:
- return "Critical"
+ return "Critical"
diff --git a/dojo/tools/ossindex_devaudit/parser.py b/dojo/tools/ossindex_devaudit/parser.py
index ed89887e29c..95ddb102d26 100644
--- a/dojo/tools/ossindex_devaudit/parser.py
+++ b/dojo/tools/ossindex_devaudit/parser.py
@@ -24,8 +24,7 @@ def get_findings(self, json_file, test):
if tree:
return list(self.get_items(tree, test))
- else:
- return []
+ return []
def parse_json(self, json_file):
if json_file is None:
@@ -71,7 +70,7 @@ def get_item(
msg = "Attempting to convert the CWE value to an integer failed"
raise ValueError(msg)
- finding = Finding(
+ return Finding(
title=dependency_source
+ ":"
+ dependency_name
@@ -97,8 +96,6 @@ def get_item(
impact="No impact provided by scan",
)
- return finding
-
def get_severity(cvss_score):
result = "Info"
diff --git a/dojo/tools/php_security_audit_v2/parser.py b/dojo/tools/php_security_audit_v2/parser.py
index e677e252545..674f35f44c8 100644
--- a/dojo/tools/php_security_audit_v2/parser.py
+++ b/dojo/tools/php_security_audit_v2/parser.py
@@ -73,9 +73,8 @@ def get_severity_word(severity):
if sev == 5:
return "Critical"
- elif sev == 4:
+ if sev == 4:
return "High"
- elif sev == 3:
+ if sev == 3:
return "Medium"
- else:
- return "Low"
+ return "Low"
diff --git a/dojo/tools/php_symfony_security_check/parser.py b/dojo/tools/php_symfony_security_check/parser.py
index e3788759c7d..2fd2cace6ba 100644
--- a/dojo/tools/php_symfony_security_check/parser.py
+++ b/dojo/tools/php_symfony_security_check/parser.py
@@ -19,7 +19,7 @@ def get_findings(self, json_file, test):
def parse_json(self, json_file):
if json_file is None:
- return
+ return None
try:
data = json_file.read()
try:
diff --git a/dojo/tools/popeye/parser.py b/dojo/tools/popeye/parser.py
index e3806c6f8d7..78c516a1b44 100644
--- a/dojo/tools/popeye/parser.py
+++ b/dojo/tools/popeye/parser.py
@@ -78,15 +78,13 @@ def get_findings(self, file, test):
def get_popeye_level_string(self, level):
if level == 1:
return "Info"
- elif level == 2:
+ if level == 2:
return "Warning"
- else:
- return "Error"
+ return "Error"
def get_defect_dojo_severity(self, level):
if level == 1:
return "Info"
- elif level == 2:
+ if level == 2:
return "Low"
- else:
- return "High"
+ return "High"
diff --git a/dojo/tools/pwn_sast/parser.py b/dojo/tools/pwn_sast/parser.py
index d66afa35127..dac79b67d95 100644
--- a/dojo/tools/pwn_sast/parser.py
+++ b/dojo/tools/pwn_sast/parser.py
@@ -119,3 +119,4 @@ def get_findings(self, filename, test):
findings[unique_finding_key] = finding
return list(findings.values())
+ return None
diff --git a/dojo/tools/qualys/csv_parser.py b/dojo/tools/qualys/csv_parser.py
index 16ad062fc69..2f88814b447 100644
--- a/dojo/tools/qualys/csv_parser.py
+++ b/dojo/tools/qualys/csv_parser.py
@@ -28,9 +28,7 @@ def parse_csv(csv_file) -> [Finding]:
)
report_findings = get_report_findings(csv_reader)
- dojo_findings = build_findings_from_dict(report_findings)
-
- return dojo_findings
+ return build_findings_from_dict(report_findings)
def get_report_findings(csv_reader) -> [dict]:
@@ -93,6 +91,7 @@ def _extract_cvss_vectors(cvss_base, cvss_temporal):
)
return cvss_vector
+ return None
def _clean_cve_data(cve_string: str) -> list:
@@ -131,8 +130,7 @@ def get_severity(value: str) -> str:
if settings.USE_QUALYS_LEGACY_SEVERITY_PARSING:
return legacy_severity_lookup.get(value, "Info")
- else:
- return qualys_severity_lookup.get(value, "Info")
+ return qualys_severity_lookup.get(value, "Info")
def build_findings_from_dict(report_findings: [dict]) -> [Finding]:
diff --git a/dojo/tools/qualys/parser.py b/dojo/tools/qualys/parser.py
index ade88d2d325..96f14a9441b 100644
--- a/dojo/tools/qualys/parser.py
+++ b/dojo/tools/qualys/parser.py
@@ -310,5 +310,4 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, file, test):
if file.name.lower().endswith(".csv"):
return csv_parser.parse_csv(file)
- else:
- return qualys_parser(file)
+ return qualys_parser(file)
diff --git a/dojo/tools/qualys_infrascan_webgui/parser.py b/dojo/tools/qualys_infrascan_webgui/parser.py
index f252e7d5414..2d26eeafc06 100644
--- a/dojo/tools/qualys_infrascan_webgui/parser.py
+++ b/dojo/tools/qualys_infrascan_webgui/parser.py
@@ -114,16 +114,15 @@ def qualys_convert_severity(raw_val):
val = str(raw_val).strip()
if "1" == val:
return "Info"
- elif "2" == val:
+ if "2" == val:
return "Low"
- elif "3" == val:
+ if "3" == val:
return "Medium"
- elif "4" == val:
+ if "4" == val:
return "High"
- elif "5" == val:
+ if "5" == val:
return "Critical"
- else:
- return "Info"
+ return "Info"
class QualysInfrascanWebguiParser:
diff --git a/dojo/tools/qualys_webapp/parser.py b/dojo/tools/qualys_webapp/parser.py
index 59c0d2b855c..9565f1dd1ee 100644
--- a/dojo/tools/qualys_webapp/parser.py
+++ b/dojo/tools/qualys_webapp/parser.py
@@ -37,8 +37,7 @@ def get_cwe(cwe):
cweSearch = re.search("CWE-([0-9]*)", cwe, re.IGNORECASE)
if cweSearch:
return cweSearch.group(1)
- else:
- return 0
+ return 0
def attach_unique_extras(
@@ -171,8 +170,7 @@ def decode_tag(tag):
if tag is not None:
if tag.get("base64") == "true":
return base64.b64decode(tag.text).decode("utf8", "replace")
- else:
- return tag.text
+ return tag.text
return ""
diff --git a/dojo/tools/retirejs/parser.py b/dojo/tools/retirejs/parser.py
index 4cb162e8a42..edbda16a87f 100644
--- a/dojo/tools/retirejs/parser.py
+++ b/dojo/tools/retirejs/parser.py
@@ -62,7 +62,7 @@ def get_item(self, item_node, test, file):
elif "osvdb" in item_node["identifiers"]:
title = "".join(item_node["identifiers"]["osvdb"])
- finding = Finding(
+ return Finding(
title=title,
test=test,
cwe=1035, # Vulnerable Third Party Component
@@ -74,5 +74,3 @@ def get_item(self, item_node, test, file):
duplicate=False,
out_of_scope=False,
)
-
- return finding
diff --git a/dojo/tools/risk_recon/parser.py b/dojo/tools/risk_recon/parser.py
index 30c08e5161e..4ddcf64e16c 100644
--- a/dojo/tools/risk_recon/parser.py
+++ b/dojo/tools/risk_recon/parser.py
@@ -36,6 +36,7 @@ def get_findings(self, filename, test):
findings = data.get("findings")
return self._get_findings_internal(findings, test)
+ return None
def _get_findings_internal(self, findings, test):
dupes = {}
diff --git a/dojo/tools/rusty_hog/parser.py b/dojo/tools/rusty_hog/parser.py
index fa2a4f6ebc2..a4582106f0d 100644
--- a/dojo/tools/rusty_hog/parser.py
+++ b/dojo/tools/rusty_hog/parser.py
@@ -19,8 +19,7 @@ def get_findings(self, json_output, test):
return self.get_items(tree, test)
def parse_json(self, json_output):
- tree = json.load(json_output)
- return tree
+ return json.load(json_output)
def get_items(self, json_output, scanner, test):
items = {}
@@ -79,7 +78,7 @@ def __getitem(self, vulnerabilities, scanner):
for vulnerability in vulnerabilities:
if scanner == "Rusty Hog":
break
- elif scanner == "Choctaw Hog":
+ if scanner == "Choctaw Hog":
"""Choctaw Hog"""
found_secret_string = vulnerability.get("stringsFound")
description = f"**This string was found:** {found_secret_string}"
diff --git a/dojo/tools/sarif/parser.py b/dojo/tools/sarif/parser.py
index b707205f9fe..eb83977f0c9 100644
--- a/dojo/tools/sarif/parser.py
+++ b/dojo/tools/sarif/parser.py
@@ -146,9 +146,9 @@ def get_message_from_multiformatMessageString(data, rule):
text = text.replace(substitution_str, arguments[i])
else:
return text
- else:
- # TODO: manage markdown
- return data.get("text")
+ return None
+ # TODO: manage markdown
+ return data.get("text")
def cve_try(val):
@@ -156,8 +156,7 @@ def cve_try(val):
cveSearch = re.search("(CVE-[0-9]+-[0-9]+)", val, re.IGNORECASE)
if cveSearch:
return cveSearch.group(1).upper()
- else:
- return None
+ return None
def get_title(result, rule):
@@ -327,14 +326,13 @@ def cvss_to_severity(cvss):
if cvss >= 9:
return severity_mapping.get(5)
- elif cvss >= 7:
+ if cvss >= 7:
return severity_mapping.get(4)
- elif cvss >= 4:
+ if cvss >= 4:
return severity_mapping.get(3)
- elif cvss > 0:
+ if cvss > 0:
return severity_mapping.get(2)
- else:
- return severity_mapping.get(1)
+ return severity_mapping.get(1)
def get_severity(result, rule):
@@ -346,12 +344,11 @@ def get_severity(result, rule):
if "note" == severity:
return "Info"
- elif "warning" == severity:
+ if "warning" == severity:
return "Medium"
- elif "error" == severity:
+ if "error" == severity:
return "High"
- else:
- return "Medium"
+ return "Medium"
def get_item(result, rules, artifacts, run_date):
diff --git a/dojo/tools/scout_suite/parser.py b/dojo/tools/scout_suite/parser.py
index 45dd1dbdf0c..a95b91dcd02 100644
--- a/dojo/tools/scout_suite/parser.py
+++ b/dojo/tools/scout_suite/parser.py
@@ -150,8 +150,7 @@ def __get_items(self, data):
def formatview(self, depth):
if depth > 1:
return "* "
- else:
- return ""
+ return ""
def recursive_print(self, src, depth=0, key=""):
def tabs(n):
diff --git a/dojo/tools/semgrep/parser.py b/dojo/tools/semgrep/parser.py
index aa4f7307503..a9afb107426 100644
--- a/dojo/tools/semgrep/parser.py
+++ b/dojo/tools/semgrep/parser.py
@@ -132,15 +132,14 @@ def get_findings(self, filename, test):
def convert_severity(self, val):
if "CRITICAL" == val.upper():
return "Critical"
- elif "WARNING" == val.upper():
+ if "WARNING" == val.upper():
return "Medium"
- elif "ERROR" == val.upper() or "HIGH" == val.upper():
+ if "ERROR" == val.upper() or "HIGH" == val.upper():
return "High"
- elif "INFO" == val.upper():
+ if "INFO" == val.upper():
return "Info"
- else:
- msg = f"Unknown value for severity: {val}"
- raise ValueError(msg)
+ msg = f"Unknown value for severity: {val}"
+ raise ValueError(msg)
def get_description(self, item):
description = ""
diff --git a/dojo/tools/snyk/parser.py b/dojo/tools/snyk/parser.py
index 4d1a0e89437..634a16df73b 100644
--- a/dojo/tools/snyk/parser.py
+++ b/dojo/tools/snyk/parser.py
@@ -23,8 +23,7 @@ def get_findings(self, json_output, test):
for moduleTree in reportTree:
temp += self.process_tree(moduleTree, test)
return temp
- else:
- return self.process_tree(reportTree, test)
+ return self.process_tree(reportTree, test)
def process_tree(self, tree, test):
return list(self.get_items(tree, test)) if tree else []
@@ -238,7 +237,7 @@ def get_code_item(self, vulnerability, test):
else:
severity = "Critical"
# create the finding object
- finding = Finding(
+ return Finding(
title=ruleId + "_" + locations_uri,
test=test,
severity=severity,
@@ -259,4 +258,3 @@ def get_code_item(self, vulnerability, test):
static_finding=True,
dynamic_finding=False,
)
- return finding
diff --git a/dojo/tools/snyk_code/parser.py b/dojo/tools/snyk_code/parser.py
index a35b37251cf..875e49e006e 100644
--- a/dojo/tools/snyk_code/parser.py
+++ b/dojo/tools/snyk_code/parser.py
@@ -23,8 +23,7 @@ def get_findings(self, json_output, test):
for moduleTree in reportTree:
temp += self.process_tree(moduleTree, test)
return temp
- else:
- return self.process_tree(reportTree, test)
+ return self.process_tree(reportTree, test)
def process_tree(self, tree, test):
return list(self.get_items(tree, test)) if tree else []
@@ -235,7 +234,7 @@ def get_code_item(self, vulnerability, test):
else:
severity = "Critical"
# create the finding object
- finding = Finding(
+ return Finding(
vuln_id_from_tool=ruleId,
file_path=locations_uri,
title=ruleId + "_" + locations_uri,
@@ -258,4 +257,3 @@ def get_code_item(self, vulnerability, test):
static_finding=True,
dynamic_finding=False,
)
- return finding
diff --git a/dojo/tools/sonarqube/parser.py b/dojo/tools/sonarqube/parser.py
index b06a7e83fad..efba2488e9d 100644
--- a/dojo/tools/sonarqube/parser.py
+++ b/dojo/tools/sonarqube/parser.py
@@ -27,18 +27,16 @@ def get_label_for_scan_types(self, scan_type):
def get_description_for_scan_types(self, scan_type):
if scan_type == "SonarQube Scan":
return "Aggregates findings per cwe, title, description, file_path. SonarQube output file can be imported in HTML format or JSON format. You can get the JSON output directly if you use the SonarQube API or generate with https://github.com/soprasteria/sonar-report version >= 1.1.0, recommend version >= 3.1.2"
- else:
- return "Import all findings from sonarqube html report or JSON format. SonarQube output file can be imported in HTML format or JSON format. Generate with https://github.com/soprasteria/sonar-report version >= 1.1.0, recommend version >= 3.1.2"
+ return "Import all findings from sonarqube html report or JSON format. SonarQube output file can be imported in HTML format or JSON format. Generate with https://github.com/soprasteria/sonar-report version >= 1.1.0, recommend version >= 3.1.2"
def get_findings(self, file, test):
if file.name.endswith(".json"):
json_content = json.load(file)
if json_content.get("date") and json_content.get("projectName") and json_content.get("hotspotKeys"):
return SonarQubeSoprasteriaJSON().get_json_items(json_content, test, self.mode)
- elif json_content.get("paging") and json_content.get("components"):
+ if json_content.get("paging") and json_content.get("components"):
return SonarQubeRESTAPIJSON().get_json_items(json_content, test, self.mode)
- else:
- return []
+ return []
if file.name.endswith(".zip"):
if str(file.__class__) == "":
input_zip = zipfile.ZipFile(file.name, "r")
@@ -46,13 +44,12 @@ def get_findings(self, file, test):
input_zip = zipfile.ZipFile(file, "r")
zipdata = {name: input_zip.read(name) for name in input_zip.namelist()}
return SonarQubeRESTAPIZIP().get_items(zipdata, test, self.mode)
- else:
- parser = etree.HTMLParser()
- tree = etree.parse(file, parser)
- if self.mode not in [None, "detailed"]:
- raise ValueError(
- "Internal error: Invalid mode "
- + self.mode
- + ". Expected: one of None, 'detailed'",
- )
- return SonarQubeSoprasteriaHTML().get_items(tree, test, self.mode)
+ parser = etree.HTMLParser()
+ tree = etree.parse(file, parser)
+ if self.mode not in [None, "detailed"]:
+ raise ValueError(
+ "Internal error: Invalid mode "
+ + self.mode
+ + ". Expected: one of None, 'detailed'",
+ )
+ return SonarQubeSoprasteriaHTML().get_items(tree, test, self.mode)
diff --git a/dojo/tools/sonarqube/sonarqube_restapi_json.py b/dojo/tools/sonarqube/sonarqube_restapi_json.py
index bb735f038c7..9a8e3bab226 100644
--- a/dojo/tools/sonarqube/sonarqube_restapi_json.py
+++ b/dojo/tools/sonarqube/sonarqube_restapi_json.py
@@ -233,12 +233,11 @@ def get_json_items(self, json_content, test, mode):
def severitytranslator(self, severity):
if severity == "BLOCKER":
return "High"
- elif severity == "MAJOR":
+ if severity == "MAJOR":
return "Medium"
- elif severity == "MINOR":
+ if severity == "MINOR":
return "Low"
- else:
- return severity.lower().capitalize()
+ return severity.lower().capitalize()
def returncomponent(self, json_content, key):
components = json_content.get("components")
diff --git a/dojo/tools/sonarqube/soprasteria_helper.py b/dojo/tools/sonarqube/soprasteria_helper.py
index 47ddc3ddf79..2e7259e6376 100644
--- a/dojo/tools/sonarqube/soprasteria_helper.py
+++ b/dojo/tools/sonarqube/soprasteria_helper.py
@@ -14,16 +14,15 @@ def convert_sonar_severity(self, sonar_severity):
sev = sonar_severity.lower()
if sev == "blocker":
return "Critical"
- elif sev == "critical":
+ if sev == "critical":
return "High"
- elif sev == "major":
+ if sev == "major":
return "Medium"
- elif sev == "minor":
+ if sev == "minor":
return "Low"
- elif sev in ["high", "medium", "low"]:
+ if sev in ["high", "medium", "low"]:
return sev.capitalize()
- else:
- return "Info"
+ return "Info"
def get_description(self, vuln_details):
rule_description = etree.tostring(
@@ -32,8 +31,7 @@ def get_description(self, vuln_details):
rule_description = rule_description.split("See", 1)[0]
rule_description = (str(rule_description)).replace("", "**")
rule_description = (str(rule_description)).replace("
", "**")
- rule_description = strip_tags(rule_description).strip()
- return rule_description
+ return strip_tags(rule_description).strip()
def get_references(self, rule_name, vuln_details):
rule_references = rule_name
@@ -46,8 +44,7 @@ def get_cwe(self, vuln_references):
cweSearch = re.search("CWE-([0-9]*)", vuln_references, re.IGNORECASE)
if cweSearch:
return cweSearch.group(1)
- else:
- return 0
+ return 0
# Process one vuln from the report for "SonarQube Scan"
# Create the finding and add it into the dupes list
diff --git a/dojo/tools/sonatype/parser.py b/dojo/tools/sonatype/parser.py
index e1b7bac1675..b82f1937c77 100644
--- a/dojo/tools/sonatype/parser.py
+++ b/dojo/tools/sonatype/parser.py
@@ -78,9 +78,8 @@ def get_finding(security_issue, component, test):
def get_severity(vulnerability):
if vulnerability["severity"] <= 3.9:
return "Low"
- elif vulnerability["severity"] <= 6.9:
+ if vulnerability["severity"] <= 6.9:
return "Medium"
- elif vulnerability["severity"] <= 8.9:
+ if vulnerability["severity"] <= 8.9:
return "High"
- else:
- return "Critical"
+ return "Critical"
diff --git a/dojo/tools/ssh_audit/parser.py b/dojo/tools/ssh_audit/parser.py
index 5245b791b5e..e1980a2e55b 100644
--- a/dojo/tools/ssh_audit/parser.py
+++ b/dojo/tools/ssh_audit/parser.py
@@ -23,14 +23,13 @@ def convert_cvss_score(self, raw_value):
val = float(raw_value)
if val == 0.0:
return "Info"
- elif val < 4.0:
+ if val < 4.0:
return "Low"
- elif val < 7.0:
+ if val < 7.0:
return "Medium"
- elif val < 9.0:
+ if val < 9.0:
return "High"
- else:
- return "Critical"
+ return "Critical"
def get_findings(self, filename, test):
items = []
diff --git a/dojo/tools/sslyze/parser.py b/dojo/tools/sslyze/parser.py
index a80965f294b..a90edaa5db3 100644
--- a/dojo/tools/sslyze/parser.py
+++ b/dojo/tools/sslyze/parser.py
@@ -22,8 +22,7 @@ def get_findings(self, filename, test):
if filename.name.lower().endswith(".xml"):
return SSLyzeXMLParser().get_findings(filename, test)
- elif filename.name.lower().endswith(".json"):
+ if filename.name.lower().endswith(".json"):
return SSLyzeJSONParser().get_findings(filename, test)
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/sslyze/parser_json.py b/dojo/tools/sslyze/parser_json.py
index 28ec63f9e4c..e8e03d1030b 100644
--- a/dojo/tools/sslyze/parser_json.py
+++ b/dojo/tools/sslyze/parser_json.py
@@ -71,12 +71,13 @@
class SSLyzeJSONParser:
def get_findings(self, json_output, test):
if json_output is None:
- return
+ return None
tree = self.parse_json(json_output)
if tree:
return self.get_items(tree, test)
+ return None
def parse_json(self, json_output):
try:
@@ -403,7 +404,7 @@ def get_weak_protocol(cipher, text, node, test, endpoint):
return get_finding(
title, description, None, REFERENCES, test, endpoint,
)
- elif "result" in weak_node:
+ if "result" in weak_node:
weak_node_result = weak_node["result"]
if (
"accepted_cipher_suites" in weak_node_result
@@ -622,5 +623,4 @@ def get_endpoint(node):
port = si_node["port"]
if hostname is not None:
return Endpoint(host=hostname, port=port)
- else:
- return None
+ return None
diff --git a/dojo/tools/stackhawk/parser.py b/dojo/tools/stackhawk/parser.py
index 99d708cdc80..20462d804de 100644
--- a/dojo/tools/stackhawk/parser.py
+++ b/dojo/tools/stackhawk/parser.py
@@ -33,9 +33,7 @@ def get_findings(self, json_output, test):
completed_scan = self.__parse_json(json_output)
metadata = StackHawkScanMetadata(completed_scan)
- findings = self.__extract_findings(completed_scan, metadata, test)
-
- return findings
+ return self.__extract_findings(completed_scan, metadata, test)
def __extract_findings(
self, completed_scan, metadata: StackHawkScanMetadata, test,
@@ -142,12 +140,11 @@ def __hyperlink(link: str) -> str:
def __endpoint_status(status: str) -> str:
if status == "NEW":
return "** - New**"
- elif status == "RISK_ACCEPTED":
+ if status == "RISK_ACCEPTED":
return '** - Marked "Risk Accepted"**'
- elif status == "FALSE_POSITIVE":
+ if status == "FALSE_POSITIVE":
return '** - Marked "False Positive"**'
- else:
- return ""
+ return ""
@staticmethod
def __are_all_endpoints_in_status(paths, check_status: str) -> bool:
diff --git a/dojo/tools/sysdig_reports/parser.py b/dojo/tools/sysdig_reports/parser.py
index 2db34b4a526..f4241e3bdca 100644
--- a/dojo/tools/sysdig_reports/parser.py
+++ b/dojo/tools/sysdig_reports/parser.py
@@ -27,15 +27,14 @@ def get_findings(self, filename, test):
if filename.name.lower().endswith(".csv"):
arr_data = CSVParser().parse(filename=filename)
return self.parse_csv(arr_data=arr_data, test=test)
- elif filename.name.lower().endswith(".json"):
+ if filename.name.lower().endswith(".json"):
scan_data = filename.read()
try:
data = json.loads(str(scan_data, "utf-8"))
except Exception:
data = json.loads(scan_data)
return self.parse_json(data=data, test=test)
- else:
- return ()
+ return ()
def parse_json(self, data, test):
vulnerability = data.get("data", None)
diff --git a/dojo/tools/tenable/csv_format.py b/dojo/tools/tenable/csv_format.py
index 5949db58156..c1ea9fc2c8d 100644
--- a/dojo/tools/tenable/csv_format.py
+++ b/dojo/tools/tenable/csv_format.py
@@ -70,8 +70,7 @@ def detect_delimiter(self, content: str):
first_line = content.split("\n")[0]
if ";" in first_line:
return ";"
- else:
- return "," # default to comma if no semicolon found
+ return "," # default to comma if no semicolon found
def get_findings(self, filename: str, test: Test):
# Read the CSV
diff --git a/dojo/tools/tenable/parser.py b/dojo/tools/tenable/parser.py
index 2c8e00c4687..e6809190a09 100644
--- a/dojo/tools/tenable/parser.py
+++ b/dojo/tools/tenable/parser.py
@@ -19,8 +19,7 @@ def get_findings(self, filename, test):
".xml",
) or filename.name.lower().endswith(".nessus"):
return TenableXMLParser().get_findings(filename, test)
- elif filename.name.lower().endswith(".csv"):
+ if filename.name.lower().endswith(".csv"):
return TenableCSVParser().get_findings(filename, test)
- else:
- msg = "Filename extension not recognized. Use .xml, .nessus or .csv"
- raise ValueError(msg)
+ msg = "Filename extension not recognized. Use .xml, .nessus or .csv"
+ raise ValueError(msg)
diff --git a/dojo/tools/trivy/parser.py b/dojo/tools/trivy/parser.py
index defc54a9229..1fde84a80f0 100644
--- a/dojo/tools/trivy/parser.py
+++ b/dojo/tools/trivy/parser.py
@@ -57,18 +57,16 @@ def get_description_for_scan_types(self, scan_type):
def convert_cvss_score(self, raw_value):
if raw_value is None:
return "Info"
- else:
- val = float(raw_value)
- if val == 0.0:
- return "Info"
- elif val < 4.0:
- return "Low"
- elif val < 7.0:
- return "Medium"
- elif val < 9.0:
- return "High"
- else:
- return "Critical"
+ val = float(raw_value)
+ if val == 0.0:
+ return "Info"
+ if val < 4.0:
+ return "Low"
+ if val < 7.0:
+ return "Medium"
+ if val < 9.0:
+ return "High"
+ return "Critical"
def get_findings(self, scan_file, test):
scan_data = scan_file.read()
@@ -82,71 +80,69 @@ def get_findings(self, scan_file, test):
if data is None:
return []
# Legacy format with results
- elif isinstance(data, list):
+ if isinstance(data, list):
return self.get_result_items(test, data)
- else:
- schema_version = data.get("SchemaVersion", None)
- artifact_name = data.get("ArtifactName", "")
- cluster_name = data.get("ClusterName")
- if schema_version == 2:
- results = data.get("Results", [])
- return self.get_result_items(test, results, artifact_name=artifact_name)
- elif cluster_name:
- findings = []
- vulnerabilities = data.get("Vulnerabilities", [])
- for service in vulnerabilities:
- namespace = service.get("Namespace")
- kind = service.get("Kind")
- name = service.get("Name")
- service_name = ""
- if namespace:
- service_name = f"{namespace} / "
- if kind:
- service_name += f"{kind} / "
- if name:
- service_name += f"{name} / "
- if len(service_name) >= 3:
- service_name = service_name[:-3]
- findings += self.get_result_items(
- test, service.get("Results", []), service_name,
- )
- misconfigurations = data.get("Misconfigurations", [])
- for service in misconfigurations:
- namespace = service.get("Namespace")
- kind = service.get("Kind")
- name = service.get("Name")
- service_name = ""
- if namespace:
- service_name = f"{namespace} / "
- if kind:
- service_name += f"{kind} / "
- if name:
- service_name += f"{name} / "
- if len(service_name) >= 3:
- service_name = service_name[:-3]
- findings += self.get_result_items(
- test, service.get("Results", []), service_name,
- )
- resources = data.get("Resources", [])
- for resource in resources:
- namespace = resource.get("Namespace")
- kind = resource.get("Kind")
- name = resource.get("Name")
- if namespace:
- resource_name = f"{namespace} / "
- if kind:
- resource_name += f"{kind} / "
- if name:
- resource_name += f"{name} / "
- if len(resource_name) >= 3:
- resource_name = resource_name[:-3]
- findings += self.get_result_items(
- test, resource.get("Results", []), resource_name,
- )
- return findings
- else:
- msg = "Schema of Trivy json report is not supported"
- raise ValueError(msg)
+ schema_version = data.get("SchemaVersion", None)
+ artifact_name = data.get("ArtifactName", "")
+ cluster_name = data.get("ClusterName")
+ if schema_version == 2:
+ results = data.get("Results", [])
+ return self.get_result_items(test, results, artifact_name=artifact_name)
+ if cluster_name:
+ findings = []
+ vulnerabilities = data.get("Vulnerabilities", [])
+ for service in vulnerabilities:
+ namespace = service.get("Namespace")
+ kind = service.get("Kind")
+ name = service.get("Name")
+ service_name = ""
+ if namespace:
+ service_name = f"{namespace} / "
+ if kind:
+ service_name += f"{kind} / "
+ if name:
+ service_name += f"{name} / "
+ if len(service_name) >= 3:
+ service_name = service_name[:-3]
+ findings += self.get_result_items(
+ test, service.get("Results", []), service_name,
+ )
+ misconfigurations = data.get("Misconfigurations", [])
+ for service in misconfigurations:
+ namespace = service.get("Namespace")
+ kind = service.get("Kind")
+ name = service.get("Name")
+ service_name = ""
+ if namespace:
+ service_name = f"{namespace} / "
+ if kind:
+ service_name += f"{kind} / "
+ if name:
+ service_name += f"{name} / "
+ if len(service_name) >= 3:
+ service_name = service_name[:-3]
+ findings += self.get_result_items(
+ test, service.get("Results", []), service_name,
+ )
+ resources = data.get("Resources", [])
+ for resource in resources:
+ namespace = resource.get("Namespace")
+ kind = resource.get("Kind")
+ name = resource.get("Name")
+ if namespace:
+ resource_name = f"{namespace} / "
+ if kind:
+ resource_name += f"{kind} / "
+ if name:
+ resource_name += f"{name} / "
+ if len(resource_name) >= 3:
+ resource_name = resource_name[:-3]
+ findings += self.get_result_items(
+ test, resource.get("Results", []), resource_name,
+ )
+ return findings
+ msg = "Schema of Trivy json report is not supported"
+ raise ValueError(msg)
def get_result_items(self, test, results, service_name=None, artifact_name=""):
items = []
diff --git a/dojo/tools/trufflehog/parser.py b/dojo/tools/trufflehog/parser.py
index 9dd8234d09a..c51f3f8163e 100644
--- a/dojo/tools/trufflehog/parser.py
+++ b/dojo/tools/trufflehog/parser.py
@@ -26,10 +26,9 @@ def get_findings(self, filename, test):
if "SourceMetadata" in json_data:
return self.get_findings_v3(dict_strs, test)
- elif "path" in json_data:
+ if "path" in json_data:
return self.get_findings_v2(dict_strs, test)
- else:
- return []
+ return []
def get_findings_v2(self, data, test):
dupes = {}
@@ -210,6 +209,5 @@ def walk_dict(self, obj, tab_count=1):
value, tab_count=(tab_count + 1),
)
continue
- else:
- return_string += f"{tab_string}{key}: {value}\n"
+ return_string += f"{tab_string}{key}: {value}\n"
return return_string
diff --git a/dojo/tools/trustwave_fusion_api/parser.py b/dojo/tools/trustwave_fusion_api/parser.py
index 53358b26880..8ee522acc35 100644
--- a/dojo/tools/trustwave_fusion_api/parser.py
+++ b/dojo/tools/trustwave_fusion_api/parser.py
@@ -49,12 +49,11 @@ def convert_severity(self, num_severity):
"""Convert severity value"""
if num_severity >= -10:
return "Low"
- elif -11 >= num_severity > -26:
+ if -11 >= num_severity > -26:
return "Medium"
- elif num_severity <= -26:
+ if num_severity <= -26:
return "High"
- else:
- return "Info"
+ return "Info"
def get_item(vuln, test):
diff --git a/dojo/tools/twistlock/parser.py b/dojo/tools/twistlock/parser.py
index 53a7f21fd16..740d72f8e68 100644
--- a/dojo/tools/twistlock/parser.py
+++ b/dojo/tools/twistlock/parser.py
@@ -67,7 +67,7 @@ def parse_issue(self, row, test):
def parse(self, filename, test):
if filename is None:
- return
+ return None
content = filename.read()
dupes = {}
if isinstance(content, bytes):
@@ -190,16 +190,15 @@ def get_item(vulnerability, test):
def convert_severity(severity):
if severity.lower() == "important":
return "High"
- elif severity.lower() == "moderate":
+ if severity.lower() == "moderate":
return "Medium"
- elif severity.lower() == "information":
+ if severity.lower() == "information":
return "Info"
- elif severity.lower() == "informational":
+ if severity.lower() == "informational":
return "Info"
- elif severity == "":
+ if severity == "":
return "Info"
- else:
- return severity.title()
+ return severity.title()
class TwistlockParser:
@@ -218,8 +217,7 @@ def get_findings(self, filename, test):
if filename.name.lower().endswith(".json"):
return TwistlockJsonParser().parse(filename, test)
- elif filename.name.lower().endswith(".csv"):
+ if filename.name.lower().endswith(".csv"):
return TwistlockCSVParser().parse(filename, test)
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/utils.py b/dojo/tools/utils.py
index 1a4ab328d21..f18b1f4f16e 100644
--- a/dojo/tools/utils.py
+++ b/dojo/tools/utils.py
@@ -16,7 +16,7 @@ def get_npm_cwe(item_node):
if cwe_node:
if isinstance(cwe_node, list):
return int(cwe_node[0][4:])
- elif cwe_node.startswith("CWE-"):
+ if cwe_node.startswith("CWE-"):
cwe_string = cwe_node[4:]
if cwe_string:
return int(cwe_string)
diff --git a/dojo/tools/vcg/parser.py b/dojo/tools/vcg/parser.py
index 0d29448a2ce..7b35eb81ddb 100644
--- a/dojo/tools/vcg/parser.py
+++ b/dojo/tools/vcg/parser.py
@@ -65,8 +65,7 @@ def get_field_from_xml(issue, field):
and issue.find(field).text is not None
):
return issue.find(field).text
- else:
- return None
+ return None
def __init__(self):
pass
@@ -97,8 +96,7 @@ def parse_issue(self, issue, test):
data.code_line = self.get_field_from_xml(issue, "CodeLine")
# data.line = self.get_field_from_xml(issue, 'CodeLine')
- finding = data.to_finding(test)
- return finding
+ return data.to_finding(test)
def parse(self, content, test):
dupes = {}
@@ -133,8 +131,7 @@ class VCGCsvParser:
def get_field_from_row(row, column):
if row[column] is not None:
return row[column]
- else:
- return None
+ return None
def parse_issue(self, row, test):
if not row:
@@ -168,8 +165,7 @@ def parse_issue(self, row, test):
data.line = self.get_field_from_row(row, line_column)
data.code_line = self.get_field_from_row(row, code_line_column)
- finding = data.to_finding(test)
- return finding
+ return data.to_finding(test)
def parse(self, content, test):
dupes = {}
@@ -219,8 +215,7 @@ def get_findings(self, filename, test):
# 'utf-8' This line was added to pass a unittest in test_parsers.TestParsers.test_file_existence.
if filename.name.lower().endswith(".xml"):
return list(VCGXmlParser().parse(content, test).values())
- elif filename.name.lower().endswith(".csv"):
+ if filename.name.lower().endswith(".csv"):
return list(VCGCsvParser().parse(content, test).values())
- else:
- msg = "Unknown File Format"
- raise ValueError(msg)
+ msg = "Unknown File Format"
+ raise ValueError(msg)
diff --git a/dojo/tools/veracode/json_parser.py b/dojo/tools/veracode/json_parser.py
index fe707b964c1..fe48bbb46fd 100644
--- a/dojo/tools/veracode/json_parser.py
+++ b/dojo/tools/veracode/json_parser.py
@@ -133,9 +133,9 @@ def create_finding_from_details(self, finding_details, scan_type, policy_violate
# Fill in extra info based on the scan type
if scan_type == "STATIC":
return self.add_static_details(finding, finding_details, backup_title=cwe_title)
- elif scan_type == "DYNAMIC":
+ if scan_type == "DYNAMIC":
return self.add_dynamic_details(finding, finding_details, backup_title=cwe_title)
- elif scan_type == "SCA":
+ if scan_type == "SCA":
return self.add_sca_details(finding, finding_details, backup_title=cwe_title)
return None
diff --git a/dojo/tools/veracode/parser.py b/dojo/tools/veracode/parser.py
index ec3f5ba00d9..80f2e68c186 100644
--- a/dojo/tools/veracode/parser.py
+++ b/dojo/tools/veracode/parser.py
@@ -17,8 +17,7 @@ def get_description_for_scan_types(self, scan_type):
def get_findings(self, filename, test):
if filename.name.lower().endswith(".xml"):
return VeracodeXMLParser().get_findings(filename, test)
- elif filename.name.lower().endswith(".json"):
+ if filename.name.lower().endswith(".json"):
return VeracodeJSONParser().get_findings(filename, test)
- else:
- msg = "Filename extension not recognized. Use .xml or .json"
- raise ValueError(msg)
+ msg = "Filename extension not recognized. Use .xml or .json"
+ raise ValueError(msg)
diff --git a/dojo/tools/veracode/xml_parser.py b/dojo/tools/veracode/xml_parser.py
index b53493fef5e..25908491739 100644
--- a/dojo/tools/veracode/xml_parser.py
+++ b/dojo/tools/veracode/xml_parser.py
@@ -272,8 +272,7 @@ def _get_cwe(val):
cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE)
if cweSearch:
return int(cweSearch.group(1))
- else:
- return None
+ return None
@classmethod
def __xml_sca_flaw_to_finding(
diff --git a/dojo/tools/veracode_sca/parser.py b/dojo/tools/veracode_sca/parser.py
index 8058bbae8fc..66c7e36ca89 100644
--- a/dojo/tools/veracode_sca/parser.py
+++ b/dojo/tools/veracode_sca/parser.py
@@ -237,11 +237,10 @@ def fix_severity(self, severity):
def __cvss_to_severity(cls, cvss):
if cvss >= 9:
return cls.vc_severity_mapping.get(5)
- elif cvss >= 7:
+ if cvss >= 7:
return cls.vc_severity_mapping.get(4)
- elif cvss >= 4:
+ if cvss >= 4:
return cls.vc_severity_mapping.get(3)
- elif cvss > 0:
+ if cvss > 0:
return cls.vc_severity_mapping.get(2)
- else:
- return cls.vc_severity_mapping.get(1)
+ return cls.vc_severity_mapping.get(1)
diff --git a/dojo/tools/wapiti/parser.py b/dojo/tools/wapiti/parser.py
index deb6309d5af..3b6c6dfd4fd 100644
--- a/dojo/tools/wapiti/parser.py
+++ b/dojo/tools/wapiti/parser.py
@@ -108,5 +108,4 @@ def get_cwe(val):
cweSearch = re.search("CWE-(\\d+)", val, re.IGNORECASE)
if cweSearch:
return int(cweSearch.group(1))
- else:
- return None
+ return None
diff --git a/dojo/tools/wfuzz/parser.py b/dojo/tools/wfuzz/parser.py
index 41d4ebeee69..2042fe5c17d 100644
--- a/dojo/tools/wfuzz/parser.py
+++ b/dojo/tools/wfuzz/parser.py
@@ -15,12 +15,13 @@ class WFuzzParser:
def severity_mapper(self, input):
if 200 <= int(input) <= 299:
return "High"
- elif 300 <= int(input) <= 399:
+ if 300 <= int(input) <= 399:
return "Low"
- elif 400 <= int(input) <= 499:
+ if 400 <= int(input) <= 499:
return "Medium"
- elif 500 <= int(input):
+ if 500 <= int(input):
return "Low"
+ return None
def get_scan_types(self):
return ["WFuzz JSON report"]
diff --git a/dojo/tools/whitehat_sentinel/parser.py b/dojo/tools/whitehat_sentinel/parser.py
index eeb97ee8f5e..0c5ca4ff024 100644
--- a/dojo/tools/whitehat_sentinel/parser.py
+++ b/dojo/tools/whitehat_sentinel/parser.py
@@ -42,7 +42,7 @@ def get_findings(self, file, test):
# Convert a WhiteHat Vuln with Attack Vectors to a list of DefectDojo
# findings
- dojo_findings = self._convert_whitehat_sentinel_vulns_to_dojo_finding(
+ return self._convert_whitehat_sentinel_vulns_to_dojo_finding(
findings_collection["collection"], test,
)
@@ -51,7 +51,6 @@ def get_findings(self, file, test):
#
# # Append DefectDojo findings to list
# dojo_findings.append(dojo_finding)
- return dojo_findings
def _convert_whitehat_severity_id_to_dojo_severity(
self, whitehat_severity_id: int,
@@ -87,6 +86,7 @@ def _parse_cwe_from_tags(self, whitehat_sentinel_tags) -> str:
for tag in whitehat_sentinel_tags:
if tag.startswith("CWE-"):
return tag.split("-")[1]
+ return None
def _parse_description(self, whitehat_sentinel_description: dict):
"""
diff --git a/dojo/tools/wiz/parser.py b/dojo/tools/wiz/parser.py
index f3125544748..ff98d94e499 100644
--- a/dojo/tools/wiz/parser.py
+++ b/dojo/tools/wiz/parser.py
@@ -204,6 +204,5 @@ def get_findings(self, filename, test):
return WizParserByTitle().parse_findings(test, reader)
if all(field in reader.fieldnames for field in ["Name", "DetailedName"]):
return WizParserByDetailedName().parse_findings(test, reader)
- else:
- msg = "This CSV format of Wiz is not supported"
- raise ValueError(msg)
+ msg = "This CSV format of Wiz is not supported"
+ raise ValueError(msg)
diff --git a/dojo/tools/xanitizer/parser.py b/dojo/tools/xanitizer/parser.py
index 7ec42343a62..b6a7cabdd55 100644
--- a/dojo/tools/xanitizer/parser.py
+++ b/dojo/tools/xanitizer/parser.py
@@ -24,8 +24,7 @@ def get_findings(self, filename, test):
root = self.parse_xml(filename)
if root is not None:
return self.get_findings_internal(root, test)
- else:
- return []
+ return []
def parse_xml(self, filename):
try:
@@ -161,7 +160,7 @@ def generate_file_path(self, finding):
"relativePath",
):
return finding.find("endNode").get("relativePath")
- elif finding.find("node") is not None and finding.find("node").get(
+ if finding.find("node") is not None and finding.find("node").get(
"relativePath",
):
return finding.find("node").get("relativePath")
diff --git a/dojo/tools/yarn_audit/parser.py b/dojo/tools/yarn_audit/parser.py
index b13c2ffd684..8bc0c8adfd7 100644
--- a/dojo/tools/yarn_audit/parser.py
+++ b/dojo/tools/yarn_audit/parser.py
@@ -25,13 +25,12 @@ def get_findings(self, json_output, test):
lines = lines.split("\n")
tree = (json.loads(line) for line in lines if "{" in line)
return self.get_items_yarn(tree, test)
- elif '"value"' in lines:
+ if '"value"' in lines:
lines = lines.split("\n")
tree = (json.loads(line) for line in lines if "{" in line)
return self.get_items_yarn2(tree, test)
- else:
- tree = json.loads(lines)
- return self.get_items_auditci(tree, test)
+ tree = json.loads(lines)
+ return self.get_items_auditci(tree, test)
def get_items_yarn(self, tree, test):
items = {}
diff --git a/dojo/user/utils.py b/dojo/user/utils.py
index 9d4f0949d8e..2ba2cbc1d0f 100644
--- a/dojo/user/utils.py
+++ b/dojo/user/utils.py
@@ -13,42 +13,37 @@ def __init__(self, *args, **kwargs):
def display_name(self):
if self.name == "bannerconf":
return "Login Banner"
- elif self.name == "cred user":
+ if self.name == "cred user":
return "Credentials"
- elif self.name == "github conf":
+ if self.name == "github conf":
return "GitHub Configurations"
- elif self.name == "engagement survey":
+ if self.name == "engagement survey":
return "Questionnaires"
- elif self.name == "permission":
+ if self.name == "permission":
return "Configuration Permissions"
- elif self.name == "sla configuration":
+ if self.name == "sla configuration":
return "SLA Configurations"
- else:
- return self.name.title() + "s"
+ return self.name.title() + "s"
def view_codename(self):
if self.view:
return f'view_{self.name.replace(" ", "_")}'
- else:
- return None
+ return None
def add_codename(self):
if self.add:
return f'add_{self.name.replace(" ", "_")}'
- else:
- return None
+ return None
def change_codename(self):
if self.change:
return f'change_{self.name.replace(" ", "_")}'
- else:
- return None
+ return None
def delete_codename(self):
if self.delete:
return f'delete_{self.name.replace(" ", "_")}'
- else:
- return None
+ return None
def codenames(self):
codenames = []
@@ -95,7 +90,7 @@ def get_configuration_permissions_fields():
questionnaire_permissions = []
rules_permissions = []
- permission_fields = [
+ return [
Permission_Helper(name="cred user", app="dojo", view=True, add=True, change=True, delete=True),
Permission_Helper(name="development environment", app="dojo", add=True, change=True, delete=True),
Permission_Helper(name="finding template", app="dojo", view=True, add=True, change=True, delete=True),
@@ -118,8 +113,6 @@ def get_configuration_permissions_fields():
Permission_Helper(name="user", app="auth", view=True, add=True, change=True, delete=True),
]
- return permission_fields
-
def get_configuration_permissions_codenames():
codenames = []
diff --git a/dojo/user/validators.py b/dojo/user/validators.py
index c393dc41c9d..83ee954419e 100644
--- a/dojo/user/validators.py
+++ b/dojo/user/validators.py
@@ -13,8 +13,7 @@ def validate(self, password, user=None):
raise ValidationError(
self.get_help_text(),
code="password_too_short")
- else:
- return None
+ return
def get_help_text(self):
return gettext("Password must be at least %s characters long.") % get_system_setting("minimum_password_length")
@@ -26,8 +25,7 @@ def validate(self, password, user=None):
raise ValidationError(
self.get_help_text(),
code="password_too_short")
- else:
- return None
+ return
def get_help_text(self):
return gettext("Password must be less than %s characters long.") % get_system_setting("maximum_password_length")
@@ -39,8 +37,7 @@ def validate(self, password, user=None):
raise ValidationError(
self.get_help_text(),
code="password_no_number")
- else:
- return None
+ return
def get_help_text(self):
return gettext("Password must contain at least 1 digit, 0-9.")
@@ -52,8 +49,7 @@ def validate(self, password, user=None):
raise ValidationError(
self.get_help_text(),
code="password_no_upper")
- else:
- return None
+ return
def get_help_text(self):
return gettext("Password must contain at least 1 uppercase letter, A-Z.")
@@ -65,8 +61,7 @@ def validate(self, password, user=None):
raise ValidationError(
self.get_help_text(),
code="password_no_lower")
- else:
- return None
+ return
def get_help_text(self):
return gettext("Password must contain at least 1 lowercase letter, a-z.")
@@ -79,8 +74,7 @@ def validate(self, password, user=None):
raise ValidationError(
self.get_help_text(),
code="password_no_symbol")
- else:
- return None
+ return
def get_help_text(self):
return gettext("The password must contain at least 1 special character, "
@@ -91,5 +85,4 @@ class DojoCommonPasswordValidator(CommonPasswordValidator):
def validate(self, password, user=None):
if get_system_setting("non_common_password_required"):
return super().validate(password, user)
- else:
- return None
+ return None
diff --git a/dojo/user/views.py b/dojo/user/views.py
index 1034b4c3638..f43b6b7b600 100644
--- a/dojo/user/views.py
+++ b/dojo/user/views.py
@@ -158,13 +158,12 @@ def logout_view(request):
if not settings.SHOW_LOGIN_FORM:
return login_view(request)
- else:
- messages.add_message(request,
- messages.SUCCESS,
- _("You have logged out successfully."),
- extra_tags="alert-success")
+ messages.add_message(request,
+ messages.SUCCESS,
+ _("You have logged out successfully."),
+ extra_tags="alert-success")
- return HttpResponseRedirect(reverse("login"))
+ return HttpResponseRedirect(reverse("login"))
@user_passes_test(lambda u: u.is_active)
diff --git a/dojo/utils.py b/dojo/utils.py
index 9446888b3e3..683bec737fc 100644
--- a/dojo/utils.py
+++ b/dojo/utils.py
@@ -180,7 +180,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
.order_by("id")
)
- elif deduplication_algorithm == "unique_id_from_tool":
+ if deduplication_algorithm == "unique_id_from_tool":
return (
Finding.objects.filter(
**custom_filter,
@@ -190,7 +190,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
.order_by("id")
)
- elif deduplication_algorithm == "unique_id_from_tool_or_hash_code":
+ if deduplication_algorithm == "unique_id_from_tool_or_hash_code":
query = Finding.objects.filter(
Q(**custom_filter),
(
@@ -201,7 +201,7 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
deduplicationLogger.debug(query.query)
return query
- elif deduplication_algorithm == "legacy":
+ if deduplication_algorithm == "legacy":
# This is the legacy reimport behavior. Although it's pretty flawed and
# doesn't match the legacy algorithm for deduplication, this is left as is for simplicity.
# Re-writing the legacy deduplication here would be complicated and counter-productive.
@@ -216,9 +216,8 @@ def match_finding_to_existing_findings(finding, product=None, engagement=None, t
).order_by("id")
)
- else:
- logger.error("Internal error: unexpected deduplication_algorithm: '%s' ", deduplication_algorithm)
- return None
+ logger.error("Internal error: unexpected deduplication_algorithm: '%s' ", deduplication_algorithm)
+ return None
# true if both findings are on an engagement that have a different "deduplication on engagement" configuration
@@ -321,6 +320,7 @@ def do_dedupe_finding(new_finding, *args, **kwargs):
deduplicate_legacy(new_finding)
else:
deduplicationLogger.debug("dedupe: skipping dedupe because it's disabled in system settings get()")
+ return None
def deduplicate_legacy(new_finding):
@@ -713,8 +713,7 @@ def add_breadcrumb(parent=None,
if clear:
request.session["dojo_breadcrumbs"] = None
return
- else:
- crumbs = request.session.get("dojo_breadcrumbs", None)
+ crumbs = request.session.get("dojo_breadcrumbs", None)
if top_level or crumbs is None:
crumbs = [
@@ -842,27 +841,26 @@ def get_punchcard_data(objs, start_date, weeks, view="Finding"):
if created < start_of_week:
raise ValueError("date found outside supported range: " + str(created))
+ if created >= start_of_week and created < start_of_next_week:
+ # add day count to current week data
+ day_counts[day_offset[created.weekday()]] = day_count
+ highest_day_count = max(highest_day_count, day_count)
else:
- if created >= start_of_week and created < start_of_next_week:
- # add day count to current week data
- day_counts[day_offset[created.weekday()]] = day_count
- highest_day_count = max(highest_day_count, day_count)
- else:
- # created >= start_of_next_week, so store current week, prepare for next
- while created >= start_of_next_week:
- week_data, label = get_week_data(start_of_week, tick, day_counts)
- punchcard.extend(week_data)
- ticks.append(label)
- tick += 1
-
- # new week, new values!
- day_counts = [0, 0, 0, 0, 0, 0, 0]
- start_of_week = start_of_next_week
- start_of_next_week += relativedelta(weeks=1)
-
- # finally a day that falls into the week bracket
- day_counts[day_offset[created.weekday()]] = day_count
- highest_day_count = max(highest_day_count, day_count)
+ # created >= start_of_next_week, so store current week, prepare for next
+ while created >= start_of_next_week:
+ week_data, label = get_week_data(start_of_week, tick, day_counts)
+ punchcard.extend(week_data)
+ ticks.append(label)
+ tick += 1
+
+ # new week, new values!
+ day_counts = [0, 0, 0, 0, 0, 0, 0]
+ start_of_week = start_of_next_week
+ start_of_next_week += relativedelta(weeks=1)
+
+ # finally a day that falls into the week bracket
+ day_counts[day_offset[created.weekday()]] = day_count
+ highest_day_count = max(highest_day_count, day_count)
# add week in progress + empty weeks on the end if needed
while tick < weeks + 1:
@@ -1217,8 +1215,7 @@ def __next__(self):
data = self.flo.read(self.chunk_size)
if data:
return data
- else:
- raise StopIteration
+ raise StopIteration
def __iter__(self):
return self
@@ -1288,9 +1285,7 @@ def template_search_helper(fields=None, query_string=None):
return findings
entry_query = build_query(query_string, fields)
- found_entries = findings.filter(entry_query)
-
- return found_entries
+ return findings.filter(entry_query)
def get_page_items(request, items, page_size, prefix=""):
@@ -1432,8 +1427,7 @@ def decrypt(key, iv, encrypted_text):
encrypted_text_bytes = binascii.a2b_hex(encrypted_text)
decryptor = cipher.decryptor()
decrypted_text = decryptor.update(encrypted_text_bytes) + decryptor.finalize()
- decrypted_text = _unpad_string(decrypted_text)
- return decrypted_text
+ return _unpad_string(decrypted_text)
def _pad_string(value):
@@ -1729,9 +1723,8 @@ def get_full_url(relative_url):
def get_site_url():
if settings.SITE_URL:
return settings.SITE_URL
- else:
- logger.warning("SITE URL undefined in settings, full_url cannot be created")
- return "settings.SITE_URL"
+ logger.warning("SITE URL undefined in settings, full_url cannot be created")
+ return "settings.SITE_URL"
@receiver(post_save, sender=User)
@@ -1797,11 +1790,10 @@ def redirect_to_return_url_or_else(request, or_else):
if return_url:
# logger.debug('redirecting to %s: ', return_url.strip())
return redirect(request, return_url.strip())
- elif or_else:
+ if or_else:
return redirect(request, or_else)
- else:
- messages.add_message(request, messages.ERROR, "Unable to redirect anywhere.", extra_tags="alert-danger")
- return redirect(request, request.get_full_path())
+ messages.add_message(request, messages.ERROR, "Unable to redirect anywhere.", extra_tags="alert-danger")
+ return redirect(request, request.get_full_path())
def redirect(request, redirect_to):
@@ -2248,6 +2240,7 @@ def get_product(obj):
if isinstance(obj, Product):
return obj
+ return None
def prod_name(obj):
diff --git a/ruff.toml b/ruff.toml
index 5d3eecbe4d5..50f8a2baf0e 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -52,6 +52,7 @@ select = [
"LOG",
"G001", "G002", "G1", "G2",
"INP",
+ "RET",
"SLOT",
"PIE",
"T20",
diff --git a/tests/product_test.py b/tests/product_test.py
index bc3a64c0d4c..f0bdf0172dd 100644
--- a/tests/product_test.py
+++ b/tests/product_test.py
@@ -25,8 +25,7 @@ def __exit__(self, *_):
while time.time() < self.timeout:
if self.page_has_loaded():
return True
- else:
- time.sleep(0.2)
+ time.sleep(0.2)
msg = f"Timeout waiting for {self.timeout}s"
raise Exception(msg)
diff --git a/unittests/dojo_test_case.py b/unittests/dojo_test_case.py
index 425e96f5047..f72918cf938 100644
--- a/unittests/dojo_test_case.py
+++ b/unittests/dojo_test_case.py
@@ -352,18 +352,15 @@ def empty_jira_project_for_product(self, product, expected_delta_jira_project_db
def get_jira_issue_status(self, finding_id):
finding = Finding.objects.get(id=finding_id)
- updated = jira_helper.get_jira_status(finding)
- return updated
+ return jira_helper.get_jira_status(finding)
def get_jira_issue_updated(self, finding_id):
finding = Finding.objects.get(id=finding_id)
- updated = jira_helper.get_jira_updated(finding)
- return updated
+ return jira_helper.get_jira_updated(finding)
def get_jira_comments(self, finding_id):
finding = Finding.objects.get(id=finding_id)
- comments = jira_helper.get_jira_comments(finding)
- return comments
+ return jira_helper.get_jira_comments(finding)
def get_jira_issue_updated_map(self, test_id):
findings = Test.objects.get(id=test_id).finding_set.all()
@@ -710,12 +707,10 @@ def do_finding_remove_tags_api(self, http_method, finding_id, tags=None, expecte
return response.data
def put_finding_remove_tags_api(self, finding_id, tags, *args, **kwargs):
- response = self.do_finding_remove_tags_api(self.client.put, finding_id, tags, *args, **kwargs)
- return response
+ return self.do_finding_remove_tags_api(self.client.put, finding_id, tags, *args, **kwargs)
def patch_finding_remove_tags_api(self, finding_id, tags, *args, **kwargs):
- response = self.do_finding_remove_tags_api(self.client.patch, finding_id, tags, *args, **kwargs)
- return response
+ return self.do_finding_remove_tags_api(self.client.patch, finding_id, tags, *args, **kwargs)
def do_finding_notes_api(self, http_method, finding_id, note=None):
data = None
diff --git a/unittests/test_apply_finding_template.py b/unittests/test_apply_finding_template.py
index 58c188449d8..69f641206fe 100644
--- a/unittests/test_apply_finding_template.py
+++ b/unittests/test_apply_finding_template.py
@@ -122,9 +122,7 @@ def make_request(self, user_is_staff, finding_id, template_id, data=None):
else:
request = FindingTemplateTestUtil.create_get_request(user, self.apply_template_url)
- v = views.apply_template_to_finding(request, finding_id, template_id)
-
- return v
+ return views.apply_template_to_finding(request, finding_id, template_id)
def test_apply_template_to_finding_with_data_does_not_display_error_success(self):
result = self.make_request(user_is_staff=True, finding_id=1, template_id=1,
@@ -236,9 +234,7 @@ def make_request(self, user_is_staff, finding_id, data=None):
else:
request = FindingTemplateTestUtil.create_get_request(user, self.choose_template_url)
- v = views.find_template_to_apply(request, finding_id)
-
- return v
+ return views.find_template_to_apply(request, finding_id)
def test_unauthorized_find_template_to_apply_fails(self):
result = self.make_request(user_is_staff=False, finding_id=1)
@@ -275,9 +271,7 @@ def make_request(self, user_is_staff, finding_id, template_id, data=None):
else:
request = FindingTemplateTestUtil.create_get_request(user, self.finding_template_options_url)
- v = views.choose_finding_template_options(request, finding_id, template_id)
-
- return v
+ return views.choose_finding_template_options(request, finding_id, template_id)
def test_unauthorized_choose_finding_template_options_fails(self):
result = self.make_request(user_is_staff=False, finding_id=1, template_id=1)
diff --git a/unittests/test_import_reimport.py b/unittests/test_import_reimport.py
index 1015f206d7a..2d68989c180 100644
--- a/unittests/test_import_reimport.py
+++ b/unittests/test_import_reimport.py
@@ -1865,9 +1865,7 @@ def import_scan_with_params_ui(self, filename, scan_type="ZAP Scan", engagement=
if service is not None:
payload["service"] = service
- result = self.import_scan_ui(engagement, payload)
-
- return result
+ return self.import_scan_ui(engagement, payload)
def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan", minimum_severity="Low", active=True, verified=False, push_to_jira=None, tags=None, close_old_findings=True, scan_date=None):
# Mimic old functionality for active/verified to avoid breaking tests
@@ -1898,8 +1896,7 @@ def reimport_scan_with_params_ui(self, test_id, filename, scan_type="ZAP Scan",
if scan_date is not None:
payload["scan_date"] = scan_date
- result = self.reimport_scan_ui(test_id, payload)
- return result
+ return self.reimport_scan_ui(test_id, payload)
# Observations:
# - When reopening a mitigated finding, almost no fields are updated such as title, description, severity, impact, references, ....
diff --git a/unittests/test_rest_framework.py b/unittests/test_rest_framework.py
index e4c4ef361e6..ee758ddaedb 100644
--- a/unittests/test_rest_framework.py
+++ b/unittests/test_rest_framework.py
@@ -265,21 +265,28 @@ def _check_helper(check):
if obj is None:
self._check_or_fail(is_nullable, f"{self._get_prefix()} is not nullable yet the value returned was null")
- elif schema_type == TYPE_BOOLEAN:
+ return None
+ if schema_type == TYPE_BOOLEAN:
_check_helper(isinstance(obj, bool))
- elif schema_type == TYPE_INTEGER:
+ return None
+ if schema_type == TYPE_INTEGER:
_check_helper(isinstance(obj, int))
- elif schema_type == TYPE_NUMBER:
+ return None
+ if schema_type == TYPE_NUMBER:
_check_helper(obj.isdecimal())
- elif schema_type == TYPE_ARRAY:
+ return None
+ if schema_type == TYPE_ARRAY:
_check_helper(isinstance(obj, list))
- elif schema_type == TYPE_OBJECT:
+ return None
+ if schema_type == TYPE_OBJECT:
_check_helper(isinstance(obj, OrderedDict) or isinstance(obj, dict))
- elif schema_type == TYPE_STRING:
+ return None
+ if schema_type == TYPE_STRING:
_check_helper(isinstance(obj, str))
- else:
- # Default case
- _check_helper(check=False)
+ return None
+ # Default case
+ _check_helper(check=False)
+ return None
# print('_check_type ok for: %s: %s' % (schema, obj))
diff --git a/unittests/tools/test_api_sonarqube_importer.py b/unittests/tools/test_api_sonarqube_importer.py
index ed157ed2046..2c5564fbecf 100644
--- a/unittests/tools/test_api_sonarqube_importer.py
+++ b/unittests/tools/test_api_sonarqube_importer.py
@@ -10,56 +10,47 @@
def dummy_product(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/product.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_issues(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_rule(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_rule_wo_html_desc(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/rule_wo_html_desc.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_no_hotspot(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/no_vuln.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_one_hotspot(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/one_vuln.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_many_hotspots(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/many_vulns.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_hotspot_rule(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_hotspot_rule_wo_risk_description(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule_wo_risk_description.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def empty_list(self, *args, **kwargs):
diff --git a/unittests/tools/test_api_sonarqube_parser.py b/unittests/tools/test_api_sonarqube_parser.py
index fe4334408cd..176219291a5 100644
--- a/unittests/tools/test_api_sonarqube_parser.py
+++ b/unittests/tools/test_api_sonarqube_parser.py
@@ -16,26 +16,22 @@
def dummy_product(self, *args, **kwargs):
with open("unittests/scans/api_sonarqube/product.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_issues(self, *args, **kwargs):
with open("unittests/scans/api_sonarqube/issues.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_rule(self, *args, **kwargs):
with open("unittests/scans/api_sonarqube/rule.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def dummy_hotspot_rule(self, *args, **kwargs):
with open(get_unit_tests_path() + "/scans/api_sonarqube/hotspots/rule.json", encoding="utf-8") as json_file:
- data = json.load(json_file)
- return data
+ return json.load(json_file)
def empty_list(self, *args, **kwargs):