diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000000..5ebf729c54
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+patreon: inventree
+ko_fi: inventree
diff --git a/InvenTree/InvenTree/models.py b/InvenTree/InvenTree/models.py
index b42d54cbe9..0fe3136871 100644
--- a/InvenTree/InvenTree/models.py
+++ b/InvenTree/InvenTree/models.py
@@ -45,6 +45,62 @@ def rename_attachment(instance, filename):
return os.path.join(instance.getSubdir(), filename)
+class DataImportMixin(object):
+ """
+ Model mixin class which provides support for 'data import' functionality.
+
+ Models which implement this mixin should provide information on the fields available for import
+ """
+
+ # Define a map of fields avaialble for import
+ IMPORT_FIELDS = {}
+
+ @classmethod
+ def get_import_fields(cls):
+ """
+ Return all available import fields
+
+ Where information on a particular field is not explicitly provided,
+ introspect the base model to (attempt to) find that information.
+
+ """
+ fields = cls.IMPORT_FIELDS
+
+ for name, field in fields.items():
+
+ # Attempt to extract base field information from the model
+ base_field = None
+
+ for f in cls._meta.fields:
+ if f.name == name:
+ base_field = f
+ break
+
+ if base_field:
+ if 'label' not in field:
+ field['label'] = base_field.verbose_name
+
+ if 'help_text' not in field:
+ field['help_text'] = base_field.help_text
+
+ fields[name] = field
+
+ return fields
+
+ @classmethod
+ def get_required_import_fields(cls):
+ """ Return all *required* import fields """
+ fields = {}
+
+ for name, field in cls.get_import_fields().items():
+ required = field.get('required', False)
+
+ if required:
+ fields[name] = field
+
+ return fields
+
+
class ReferenceIndexingMixin(models.Model):
"""
A mixin for keeping track of numerical copies of the "reference" field.
diff --git a/InvenTree/InvenTree/serializers.py b/InvenTree/InvenTree/serializers.py
index ffc84a5f71..98e03fdc0e 100644
--- a/InvenTree/InvenTree/serializers.py
+++ b/InvenTree/InvenTree/serializers.py
@@ -5,8 +5,8 @@ Serializers used in various InvenTree apps
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
-
import os
+import tablib
from decimal import Decimal
@@ -332,3 +332,309 @@ class InvenTreeDecimalField(serializers.FloatField):
return Decimal(str(data))
except:
raise serializers.ValidationError(_("Invalid value"))
+
+
+class DataFileUploadSerializer(serializers.Serializer):
+ """
+ Generic serializer for uploading a data file, and extracting a dataset.
+
+ - Validates uploaded file
+ - Extracts column names
+ - Extracts data rows
+ """
+
+ # Implementing class should register a target model (database model) to be used for import
+ TARGET_MODEL = None
+
+ class Meta:
+ fields = [
+ 'data_file',
+ ]
+
+ data_file = serializers.FileField(
+ label=_("Data File"),
+ help_text=_("Select data file for upload"),
+ required=True,
+ allow_empty_file=False,
+ )
+
+ def validate_data_file(self, data_file):
+ """
+ Perform validation checks on the uploaded data file.
+ """
+
+ self.filename = data_file.name
+
+ name, ext = os.path.splitext(data_file.name)
+
+ # Remove the leading . from the extension
+ ext = ext[1:]
+
+ accepted_file_types = [
+ 'xls', 'xlsx',
+ 'csv', 'tsv',
+ 'xml',
+ ]
+
+ if ext not in accepted_file_types:
+ raise serializers.ValidationError(_("Unsupported file type"))
+
+ # Impose a 50MB limit on uploaded BOM files
+ max_upload_file_size = 50 * 1024 * 1024
+
+ if data_file.size > max_upload_file_size:
+ raise serializers.ValidationError(_("File is too large"))
+
+ # Read file data into memory (bytes object)
+ try:
+ data = data_file.read()
+ except Exception as e:
+ raise serializers.ValidationError(str(e))
+
+ if ext in ['csv', 'tsv', 'xml']:
+ try:
+ data = data.decode()
+ except Exception as e:
+ raise serializers.ValidationError(str(e))
+
+ # Convert to a tablib dataset (we expect headers)
+ try:
+ self.dataset = tablib.Dataset().load(data, ext, headers=True)
+ except Exception as e:
+ raise serializers.ValidationError(str(e))
+
+ if len(self.dataset.headers) == 0:
+ raise serializers.ValidationError(_("No columns found in file"))
+
+ if len(self.dataset) == 0:
+ raise serializers.ValidationError(_("No data rows found in file"))
+
+ return data_file
+
+ def match_column(self, column_name, field_names, exact=False):
+ """
+ Attempt to match a column name (from the file) to a field (defined in the model)
+
+ Order of matching is:
+ - Direct match
+ - Case insensitive match
+ - Fuzzy match
+ """
+
+ column_name = column_name.strip()
+
+ column_name_lower = column_name.lower()
+
+ if column_name in field_names:
+ return column_name
+
+ for field_name in field_names:
+ if field_name.lower() == column_name_lower:
+ return field_name
+
+ if exact:
+ # Finished available 'exact' matches
+ return None
+
+ # TODO: Fuzzy pattern matching for column names
+
+ # No matches found
+ return None
+
+ def extract_data(self):
+ """
+ Returns dataset extracted from the file
+ """
+
+ # Provide a dict of available import fields for the model
+ model_fields = {}
+
+ # Keep track of columns we have already extracted
+ matched_columns = set()
+
+ if self.TARGET_MODEL:
+ try:
+ model_fields = self.TARGET_MODEL.get_import_fields()
+ except:
+ pass
+
+ # Extract a list of valid model field names
+ model_field_names = [key for key in model_fields.keys()]
+
+ # Provide a dict of available columns from the dataset
+ file_columns = {}
+
+ for header in self.dataset.headers:
+ column = {}
+
+ # Attempt to "match" file columns to model fields
+ match = self.match_column(header, model_field_names, exact=True)
+
+ if match is not None and match not in matched_columns:
+ matched_columns.add(match)
+ column['value'] = match
+ else:
+ column['value'] = None
+
+ file_columns[header] = column
+
+ return {
+ 'file_fields': file_columns,
+ 'model_fields': model_fields,
+ 'rows': [row.values() for row in self.dataset.dict],
+ 'filename': self.filename,
+ }
+
+ def save(self):
+ ...
+
+
+class DataFileExtractSerializer(serializers.Serializer):
+ """
+ Generic serializer for extracting data from an imported dataset.
+
+ - User provides an array of matched headers
+ - User provides an array of raw data rows
+ """
+
+ # Implementing class should register a target model (database model) to be used for import
+ TARGET_MODEL = None
+
+ class Meta:
+ fields = [
+ 'columns',
+ 'rows',
+ ]
+
+ # Mapping of columns
+ columns = serializers.ListField(
+ child=serializers.CharField(
+ allow_blank=True,
+ ),
+ )
+
+ rows = serializers.ListField(
+ child=serializers.ListField(
+ child=serializers.CharField(
+ allow_blank=True,
+ allow_null=True,
+ ),
+ )
+ )
+
+ def validate(self, data):
+
+ data = super().validate(data)
+
+ self.columns = data.get('columns', [])
+ self.rows = data.get('rows', [])
+
+ if len(self.rows) == 0:
+ raise serializers.ValidationError(_("No data rows provided"))
+
+ if len(self.columns) == 0:
+ raise serializers.ValidationError(_("No data columns supplied"))
+
+ self.validate_extracted_columns()
+
+ return data
+
+ @property
+ def data(self):
+
+ if self.TARGET_MODEL:
+ try:
+ model_fields = self.TARGET_MODEL.get_import_fields()
+ except:
+ model_fields = {}
+
+ rows = []
+
+ for row in self.rows:
+ """
+ Optionally pre-process each row, before sending back to the client
+ """
+
+ processed_row = self.process_row(self.row_to_dict(row))
+
+ if processed_row:
+ rows.append({
+ "original": row,
+ "data": processed_row,
+ })
+
+ return {
+ 'fields': model_fields,
+ 'columns': self.columns,
+ 'rows': rows,
+ }
+
+ def process_row(self, row):
+ """
+ Process a 'row' of data, which is a mapped column:value dict
+
+ Returns either a mapped column:value dict, or None.
+
+ If the function returns None, the column is ignored!
+ """
+
+ # Default implementation simply returns the original row data
+ return row
+
+ def row_to_dict(self, row):
+ """
+ Convert a "row" to a named data dict
+ """
+
+ row_dict = {
+ 'errors': {},
+ }
+
+ for idx, value in enumerate(row):
+
+ if idx < len(self.columns):
+ col = self.columns[idx]
+
+ if col:
+ row_dict[col] = value
+
+ return row_dict
+
+ def validate_extracted_columns(self):
+ """
+ Perform custom validation of header mapping.
+ """
+
+ if self.TARGET_MODEL:
+ try:
+ model_fields = self.TARGET_MODEL.get_import_fields()
+ except:
+ model_fields = {}
+
+ cols_seen = set()
+
+ for name, field in model_fields.items():
+
+ required = field.get('required', False)
+
+ # Check for missing required columns
+ if required:
+ if name not in self.columns:
+ raise serializers.ValidationError(_("Missing required column") + f": '{name}'")
+
+ for col in self.columns:
+
+ if not col:
+ continue
+
+ # Check for duplicated columns
+ if col in cols_seen:
+ raise serializers.ValidationError(_("Duplicate column") + f": '{col}'")
+
+ cols_seen.add(col)
+
+ def save(self):
+ """
+ No "save" action for this serializer
+ """
+ ...
diff --git a/InvenTree/InvenTree/version.py b/InvenTree/InvenTree/version.py
index 19235f0e0a..09de857b68 100644
--- a/InvenTree/InvenTree/version.py
+++ b/InvenTree/InvenTree/version.py
@@ -12,11 +12,17 @@ import common.models
INVENTREE_SW_VERSION = "0.6.0 dev"
# InvenTree API version
-INVENTREE_API_VERSION = 24
+INVENTREE_API_VERSION = 26
"""
Increment this API version number whenever there is a significant change to the API that any clients need to know about
+v26 -> 2022-02-17
+ - Adds API endpoint for uploading a BOM file and extracting data
+
+v25 -> 2022-02-17
+ - Adds ability to filter "part" list endpoint by "in_bom_for" argument
+
v24 -> 2022-02-10
- Adds API endpoint for deleting (cancelling) build order outputs
diff --git a/InvenTree/build/serializers.py b/InvenTree/build/serializers.py
index 0b0858cb8a..c7577fa68c 100644
--- a/InvenTree/build/serializers.py
+++ b/InvenTree/build/serializers.py
@@ -208,7 +208,7 @@ class BuildOutputCreateSerializer(serializers.Serializer):
raise ValidationError(_("Integer quantity required for trackable parts"))
if part.has_trackable_parts():
- raise ValidationError(_("Integer quantity required, as the bill of materials contains tracakble parts"))
+ raise ValidationError(_("Integer quantity required, as the bill of materials contains trackable parts"))
return quantity
diff --git a/InvenTree/part/api.py b/InvenTree/part/api.py
index 4c52b87520..954060c456 100644
--- a/InvenTree/part/api.py
+++ b/InvenTree/part/api.py
@@ -995,6 +995,23 @@ class PartList(generics.ListCreateAPIView):
except (ValueError, Part.DoesNotExist):
pass
+ # Filter only parts which are in the "BOM" for a given part
+ in_bom_for = params.get('in_bom_for', None)
+
+ if in_bom_for is not None:
+ try:
+ in_bom_for = Part.objects.get(pk=in_bom_for)
+
+ # Extract a list of parts within the BOM
+ bom_parts = in_bom_for.get_parts_in_bom()
+ print("bom_parts:", bom_parts)
+ print([p.pk for p in bom_parts])
+
+ queryset = queryset.filter(pk__in=[p.pk for p in bom_parts])
+
+ except (ValueError, Part.DoesNotExist):
+ pass
+
# Filter by whether the BOM has been validated (or not)
bom_valid = params.get('bom_valid', None)
@@ -1533,13 +1550,15 @@ class BomList(generics.ListCreateAPIView):
]
-class BomExtract(generics.CreateAPIView):
+class BomImportUpload(generics.CreateAPIView):
"""
- API endpoint for extracting BOM data from a BOM file.
+ API endpoint for uploading a complete Bill of Materials.
+
+ It is assumed that the BOM has been extracted from a file using the BomExtract endpoint.
"""
- queryset = Part.objects.none()
- serializer_class = part_serializers.BomExtractSerializer
+ queryset = Part.objects.all()
+ serializer_class = part_serializers.BomImportUploadSerializer
def create(self, request, *args, **kwargs):
"""
@@ -1556,15 +1575,22 @@ class BomExtract(generics.CreateAPIView):
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
-class BomUpload(generics.CreateAPIView):
+class BomImportExtract(generics.CreateAPIView):
"""
- API endpoint for uploading a complete Bill of Materials.
-
- It is assumed that the BOM has been extracted from a file using the BomExtract endpoint.
+ API endpoint for extracting BOM data from a BOM file.
"""
- queryset = Part.objects.all()
- serializer_class = part_serializers.BomUploadSerializer
+ queryset = Part.objects.none()
+ serializer_class = part_serializers.BomImportExtractSerializer
+
+
+class BomImportSubmit(generics.CreateAPIView):
+ """
+ API endpoint for submitting BOM data from a BOM file
+ """
+
+ queryset = BomItem.objects.none()
+ serializer_class = part_serializers.BomImportSubmitSerializer
class BomDetail(generics.RetrieveUpdateDestroyAPIView):
@@ -1719,9 +1745,10 @@ bom_api_urls = [
url(r'^.*$', BomDetail.as_view(), name='api-bom-item-detail'),
])),
- url(r'^extract/', BomExtract.as_view(), name='api-bom-extract'),
-
- url(r'^upload/', BomUpload.as_view(), name='api-bom-upload'),
+ # API endpoint URLs for importing BOM data
+ url(r'^import/upload/', BomImportUpload.as_view(), name='api-bom-import-upload'),
+ url(r'^import/extract/', BomImportExtract.as_view(), name='api-bom-import-extract'),
+ url(r'^import/submit/', BomImportSubmit.as_view(), name='api-bom-import-submit'),
# Catch-all
url(r'^.*$', BomList.as_view(), name='api-bom-list'),
diff --git a/InvenTree/part/models.py b/InvenTree/part/models.py
index b312937e30..33ad8bf612 100644
--- a/InvenTree/part/models.py
+++ b/InvenTree/part/models.py
@@ -46,7 +46,7 @@ from common.models import InvenTreeSetting
from InvenTree import helpers
from InvenTree import validators
-from InvenTree.models import InvenTreeTree, InvenTreeAttachment
+from InvenTree.models import InvenTreeTree, InvenTreeAttachment, DataImportMixin
from InvenTree.fields import InvenTreeURLField
from InvenTree.helpers import decimal2string, normalize, decimal2money
import InvenTree.tasks
@@ -483,6 +483,36 @@ class Part(MPTTModel):
def __str__(self):
return f"{self.full_name} - {self.description}"
+ def get_parts_in_bom(self):
+ """
+ Return a list of all parts in the BOM for this part.
+ Takes into account substitutes, variant parts, and inherited BOM items
+ """
+
+ parts = set()
+
+ for bom_item in self.get_bom_items():
+ for part in bom_item.get_valid_parts_for_allocation():
+ parts.add(part)
+
+ return parts
+
+ def check_if_part_in_bom(self, other_part):
+ """
+ Check if the other_part is in the BOM for this part.
+
+ Note:
+ - Accounts for substitute parts
+ - Accounts for variant BOMs
+ """
+
+ for bom_item in self.get_bom_items():
+ if other_part in bom_item.get_valid_parts_for_allocation():
+ return True
+
+ # No matches found
+ return False
+
def check_add_to_bom(self, parent, raise_error=False, recursive=True):
"""
Check if this Part can be added to the BOM of another part.
@@ -2550,7 +2580,7 @@ class PartCategoryParameterTemplate(models.Model):
help_text=_('Default Parameter Value'))
-class BomItem(models.Model):
+class BomItem(models.Model, DataImportMixin):
""" A BomItem links a part to its component items.
A part can have a BOM (bill of materials) which defines
which parts are required (and in what quantity) to make it.
@@ -2568,6 +2598,39 @@ class BomItem(models.Model):
allow_variants: Stock for part variants can be substituted for this BomItem
"""
+ # Fields available for bulk import
+ IMPORT_FIELDS = {
+ 'quantity': {
+ 'required': True
+ },
+ 'reference': {},
+ 'overage': {},
+ 'allow_variants': {},
+ 'inherited': {},
+ 'optional': {},
+ 'note': {},
+ 'part': {
+ 'label': _('Part'),
+ 'help_text': _('Part ID or part name'),
+ },
+ 'part_id': {
+ 'label': _('Part ID'),
+ 'help_text': _('Unique part ID value')
+ },
+ 'part_name': {
+ 'label': _('Part Name'),
+ 'help_text': _('Part name'),
+ },
+ 'part_ipn': {
+ 'label': _('Part IPN'),
+ 'help_text': _('Part IPN value'),
+ },
+ 'level': {
+ 'label': _('Level'),
+ 'help_text': _('BOM level'),
+ }
+ }
+
@staticmethod
def get_api_url():
return reverse('api-bom-list')
diff --git a/InvenTree/part/serializers.py b/InvenTree/part/serializers.py
index 195ce15e4f..549b546a5b 100644
--- a/InvenTree/part/serializers.py
+++ b/InvenTree/part/serializers.py
@@ -4,8 +4,6 @@ JSON serializers for Part app
import imghdr
from decimal import Decimal
-import os
-import tablib
from django.urls import reverse_lazy
from django.db import models, transaction
@@ -17,7 +15,9 @@ from rest_framework import serializers
from sql_util.utils import SubqueryCount, SubquerySum
from djmoney.contrib.django_rest_framework import MoneyField
-from InvenTree.serializers import (InvenTreeAttachmentSerializerField,
+from InvenTree.serializers import (DataFileUploadSerializer,
+ DataFileExtractSerializer,
+ InvenTreeAttachmentSerializerField,
InvenTreeDecimalField,
InvenTreeImageSerializerField,
InvenTreeModelSerializer,
@@ -709,307 +709,129 @@ class PartCopyBOMSerializer(serializers.Serializer):
)
-class BomExtractSerializer(serializers.Serializer):
+class BomImportUploadSerializer(DataFileUploadSerializer):
"""
Serializer for uploading a file and extracting data from it.
-
- Note: 2022-02-04 - This needs a *serious* refactor in future, probably
-
- When parsing the file, the following things happen:
-
- a) Check file format and validity
- b) Look for "required" fields
- c) Look for "part" fields - used to "infer" part
-
- Once the file itself has been validated, we iterate through each data row:
-
- - If the "level" column is provided, ignore anything below level 1
- - Try to "guess" the part based on part_id / part_name / part_ipn
- - Extract other fields as required
-
"""
+ TARGET_MODEL = BomItem
+
class Meta:
fields = [
- 'bom_file',
+ 'data_file',
'part',
- 'clear_existing',
+ 'clear_existing_bom',
]
- # These columns must be present
- REQUIRED_COLUMNS = [
- 'quantity',
- ]
-
- # We need at least one column to specify a "part"
- PART_COLUMNS = [
- 'part',
- 'part_id',
- 'part_name',
- 'part_ipn',
- ]
-
- # These columns are "optional"
- OPTIONAL_COLUMNS = [
- 'allow_variants',
- 'inherited',
- 'optional',
- 'overage',
- 'note',
- 'reference',
- ]
-
- def find_matching_column(self, col_name, columns):
-
- # Direct match
- if col_name in columns:
- return col_name
-
- col_name = col_name.lower().strip()
-
- for col in columns:
- if col.lower().strip() == col_name:
- return col
-
- # No match
- return None
-
- def find_matching_data(self, row, col_name, columns):
- """
- Extract data from the row, based on the "expected" column name
- """
-
- col_name = self.find_matching_column(col_name, columns)
-
- return row.get(col_name, None)
-
- bom_file = serializers.FileField(
- label=_("BOM File"),
- help_text=_("Select Bill of Materials file"),
+ part = serializers.PrimaryKeyRelatedField(
+ queryset=Part.objects.all(),
required=True,
- allow_empty_file=False,
+ allow_null=False,
+ many=False,
)
- def validate_bom_file(self, bom_file):
- """
- Perform validation checks on the uploaded BOM file
- """
-
- self.filename = bom_file.name
-
- name, ext = os.path.splitext(bom_file.name)
-
- # Remove the leading . from the extension
- ext = ext[1:]
-
- accepted_file_types = [
- 'xls', 'xlsx',
- 'csv', 'tsv',
- 'xml',
- ]
-
- if ext not in accepted_file_types:
- raise serializers.ValidationError(_("Unsupported file type"))
-
- # Impose a 50MB limit on uploaded BOM files
- max_upload_file_size = 50 * 1024 * 1024
-
- if bom_file.size > max_upload_file_size:
- raise serializers.ValidationError(_("File is too large"))
-
- # Read file data into memory (bytes object)
- try:
- data = bom_file.read()
- except Exception as e:
- raise serializers.ValidationError(str(e))
-
- if ext in ['csv', 'tsv', 'xml']:
- try:
- data = data.decode()
- except Exception as e:
- raise serializers.ValidationError(str(e))
-
- # Convert to a tablib dataset (we expect headers)
- try:
- self.dataset = tablib.Dataset().load(data, ext, headers=True)
- except Exception as e:
- raise serializers.ValidationError(str(e))
-
- for header in self.REQUIRED_COLUMNS:
-
- match = self.find_matching_column(header, self.dataset.headers)
-
- if match is None:
- raise serializers.ValidationError(_("Missing required column") + f": '{header}'")
-
- part_column_matches = {}
-
- part_match = False
-
- for col in self.PART_COLUMNS:
- col_match = self.find_matching_column(col, self.dataset.headers)
-
- part_column_matches[col] = col_match
-
- if col_match is not None:
- part_match = True
-
- if not part_match:
- raise serializers.ValidationError(_("No part column found"))
-
- if len(self.dataset) == 0:
- raise serializers.ValidationError(_("No data rows found"))
-
- return bom_file
-
- def extract_data(self):
- """
- Read individual rows out of the BOM file
- """
-
- rows = []
- errors = []
-
- found_parts = set()
-
- headers = self.dataset.headers
-
- level_column = self.find_matching_column('level', headers)
-
- for row in self.dataset.dict:
-
- row_error = {}
-
- """
- If the "level" column is specified, and this is not a top-level BOM item, ignore the row!
- """
- if level_column is not None:
- level = row.get('level', None)
-
- if level is not None:
- try:
- level = int(level)
- if level != 1:
- continue
- except:
- pass
-
- """
- Next, we try to "guess" the part, based on the provided data.
-
- A) If the part_id is supplied, use that!
- B) If the part name and/or part_ipn are supplied, maybe we can use those?
- """
- part_id = self.find_matching_data(row, 'part_id', headers)
- part_name = self.find_matching_data(row, 'part_name', headers)
- part_ipn = self.find_matching_data(row, 'part_ipn', headers)
-
- part = None
-
- if part_id is not None:
- try:
- part = Part.objects.get(pk=part_id)
- except (ValueError, Part.DoesNotExist):
- pass
-
- # Optionally, specify using field "part"
- if part is None:
- pk = self.find_matching_data(row, 'part', headers)
-
- if pk is not None:
- try:
- part = Part.objects.get(pk=pk)
- except (ValueError, Part.DoesNotExist):
- pass
-
- if part is None:
-
- if part_name or part_ipn:
- queryset = Part.objects.all()
-
- if part_name:
- queryset = queryset.filter(name=part_name)
-
- if part_ipn:
- queryset = queryset.filter(IPN=part_ipn)
-
- # Only if we have a single direct match
- if queryset.exists():
- if queryset.count() == 1:
- part = queryset.first()
- else:
- # Multiple matches!
- row_error['part'] = _('Multiple matching parts found')
-
- if part is None:
- if 'part' not in row_error:
- row_error['part'] = _('No matching part found')
- else:
- if part.pk in found_parts:
- row_error['part'] = _("Duplicate part selected")
-
- elif not part.component:
- row_error['part'] = _('Part is not designated as a component')
-
- found_parts.add(part.pk)
-
- row['part'] = part.pk if part is not None else None
-
- """
- Read out the 'quantity' column - check that it is valid
- """
- quantity = self.find_matching_data(row, 'quantity', self.dataset.headers)
-
- # Ensure quantity field is provided
- row['quantity'] = quantity
-
- if quantity is None:
- row_error['quantity'] = _('Quantity not provided')
- else:
- try:
- quantity = Decimal(quantity)
-
- if quantity <= 0:
- row_error['quantity'] = _('Quantity must be greater than zero')
- except:
- row_error['quantity'] = _('Invalid quantity')
-
- # For each "optional" column, ensure the column names are allocated correctly
- for field_name in self.OPTIONAL_COLUMNS:
- if field_name not in row:
- row[field_name] = self.find_matching_data(row, field_name, self.dataset.headers)
-
- rows.append(row)
- errors.append(row_error)
-
- return {
- 'rows': rows,
- 'errors': errors,
- 'headers': headers,
- 'filename': self.filename,
- }
-
- part = serializers.PrimaryKeyRelatedField(queryset=Part.objects.filter(assembly=True), required=True)
-
- clear_existing = serializers.BooleanField(
- label=_("Clear Existing BOM"),
- help_text=_("Delete existing BOM data first"),
+ clear_existing_bom = serializers.BooleanField(
+ label=_('Clear Existing BOM'),
+ help_text=_('Delete existing BOM items before uploading')
)
def save(self):
data = self.validated_data
- master_part = data['part']
- clear_existing = data['clear_existing']
+ if data.get('clear_existing_bom', False):
+ part = data['part']
- if clear_existing:
-
- # Remove all existing BOM items
- master_part.bom_items.all().delete()
+ with transaction.atomic():
+ part.bom_items.all().delete()
-class BomUploadSerializer(serializers.Serializer):
+class BomImportExtractSerializer(DataFileExtractSerializer):
+ """
+ """
+
+ TARGET_MODEL = BomItem
+
+ def validate_extracted_columns(self):
+ super().validate_extracted_columns()
+
+ part_columns = ['part', 'part_name', 'part_ipn', 'part_id']
+
+ if not any([col in self.columns for col in part_columns]):
+ # At least one part column is required!
+ raise serializers.ValidationError(_("No part column specified"))
+
+ def process_row(self, row):
+
+ # Skip any rows which are at a lower "level"
+ level = row.get('level', None)
+
+ if level is not None:
+ try:
+ level = int(level)
+ if level != 1:
+ # Skip this row
+ return None
+ except:
+ pass
+
+ # Attempt to extract a valid part based on the provided data
+ part_id = row.get('part_id', row.get('part', None))
+ part_name = row.get('part_name', row.get('part', None))
+ part_ipn = row.get('part_ipn', None)
+
+ part = None
+
+ if part_id is not None:
+ try:
+ part = Part.objects.get(pk=part_id)
+ except (ValueError, Part.DoesNotExist):
+ pass
+
+ # No direct match, where else can we look?
+ if part is None:
+ if part_name or part_ipn:
+ queryset = Part.objects.all()
+
+ if part_name:
+ queryset = queryset.filter(name=part_name)
+
+ if part_ipn:
+ queryset = queryset.filter(IPN=part_ipn)
+
+ if queryset.exists():
+ if queryset.count() == 1:
+ part = queryset.first()
+ else:
+ row['errors']['part'] = _('Multiple matching parts found')
+
+ if part is None:
+ row['errors']['part'] = _('No matching part found')
+ else:
+ if not part.component:
+ row['errors']['part'] = _('Part is not designated as a component')
+
+ # Update the 'part' value in the row
+ row['part'] = part.pk if part is not None else None
+
+ # Check the provided 'quantity' value
+ quantity = row.get('quantity', None)
+
+ if quantity is None:
+ row['errors']['quantity'] = _('Quantity not provided')
+ else:
+ try:
+ quantity = Decimal(quantity)
+
+ if quantity <= 0:
+ row['errors']['quantity'] = _('Quantity must be greater than zero')
+ except:
+ row['errors']['quantity'] = _('Invalid quantity')
+
+ return row
+
+
+class BomImportSubmitSerializer(serializers.Serializer):
"""
Serializer for uploading a BOM against a specified part.
diff --git a/InvenTree/part/templates/part/upload_bom.html b/InvenTree/part/templates/part/upload_bom.html
index 151a4b5424..9db26f7b39 100644
--- a/InvenTree/part/templates/part/upload_bom.html
+++ b/InvenTree/part/templates/part/upload_bom.html
@@ -77,15 +77,15 @@ $('#bom-template-download').click(function() {
$('#bom-upload').click(function() {
- constructForm('{% url "api-bom-extract" %}', {
+ constructForm('{% url "api-bom-import-upload" %}', {
method: 'POST',
fields: {
- bom_file: {},
+ data_file: {},
part: {
value: {{ part.pk }},
hidden: true,
},
- clear_existing: {},
+ clear_existing_bom: {},
},
title: '{% trans "Upload BOM File" %}',
onSuccess: function(response) {
@@ -93,16 +93,24 @@ $('#bom-upload').click(function() {
// Clear existing entries from the table
$('.bom-import-row').remove();
- // Disable the "submit" button
- $('#bom-submit').show();
+ selectImportFields(
+ '{% url "api-bom-import-extract" %}',
+ response,
+ {
+ success: function(response) {
+ constructBomUploadTable(response);
- constructBomUploadTable(response);
+ // Show the "submit" button
+ $('#bom-submit').show();
- $('#bom-submit').click(function() {
- submitBomTable({{ part.pk }}, {
- bom_data: response,
- });
- });
+ $('#bom-submit').click(function() {
+ submitBomTable({{ part.pk }}, {
+ bom_data: response,
+ });
+ });
+ }
+ }
+ );
}
});
diff --git a/InvenTree/part/test_bom_import.py b/InvenTree/part/test_bom_import.py
index ce622ed991..8903660f39 100644
--- a/InvenTree/part/test_bom_import.py
+++ b/InvenTree/part/test_bom_import.py
@@ -41,8 +41,6 @@ class BomUploadTest(InvenTreeAPITestCase):
assembly=False,
)
- self.url = reverse('api-bom-extract')
-
def post_bom(self, filename, file_data, part=None, clear_existing=None, expected_code=None, content_type='text/plain'):
bom_file = SimpleUploadedFile(
@@ -58,11 +56,9 @@ class BomUploadTest(InvenTreeAPITestCase):
clear_existing = False
response = self.post(
- self.url,
+ reverse('api-bom-import-upload'),
data={
- 'bom_file': bom_file,
- 'part': part,
- 'clear_existing': clear_existing,
+ 'data_file': bom_file,
},
expected_code=expected_code,
format='multipart',
@@ -76,14 +72,12 @@ class BomUploadTest(InvenTreeAPITestCase):
"""
response = self.post(
- self.url,
+ reverse('api-bom-import-upload'),
data={},
expected_code=400
)
- self.assertIn('No file was submitted', str(response.data['bom_file']))
- self.assertIn('This field is required', str(response.data['part']))
- self.assertIn('This field is required', str(response.data['clear_existing']))
+ self.assertIn('No file was submitted', str(response.data['data_file']))
def test_unsupported_file(self):
"""
@@ -96,7 +90,7 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
- self.assertIn('Unsupported file type', str(response.data['bom_file']))
+ self.assertIn('Unsupported file type', str(response.data['data_file']))
def test_broken_file(self):
"""
@@ -109,7 +103,7 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
- self.assertIn('The submitted file is empty', str(response.data['bom_file']))
+ self.assertIn('The submitted file is empty', str(response.data['data_file']))
response = self.post_bom(
'test.xls',
@@ -118,11 +112,11 @@ class BomUploadTest(InvenTreeAPITestCase):
content_type='application/xls',
)
- self.assertIn('Unsupported format, or corrupt file', str(response.data['bom_file']))
+ self.assertIn('Unsupported format, or corrupt file', str(response.data['data_file']))
- def test_invalid_upload(self):
+ def test_missing_rows(self):
"""
- Test upload of an invalid file
+ Test upload of an invalid file (without data rows)
"""
dataset = tablib.Dataset()
@@ -139,7 +133,7 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
- self.assertIn("Missing required column: 'quantity'", str(response.data))
+ self.assertIn('No data rows found in file', str(response.data))
# Try again, with an .xlsx file
response = self.post_bom(
@@ -149,32 +143,61 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
+ self.assertIn('No data rows found in file', str(response.data))
+
+ def test_missing_columns(self):
+ """
+ Upload extracted data, but with missing columns
+ """
+
+ url = reverse('api-bom-import-extract')
+
+ rows = [
+ ['1', 'test'],
+ ['2', 'test'],
+ ]
+
+ # Post without columns
+ response = self.post(
+ url,
+ {},
+ expected_code=400,
+ )
+
+ self.assertIn('This field is required', str(response.data['rows']))
+ self.assertIn('This field is required', str(response.data['columns']))
+
+ response = self.post(
+ url,
+ {
+ 'rows': rows,
+ 'columns': ['part', 'reference'],
+ },
+ expected_code=400
+ )
+
self.assertIn("Missing required column: 'quantity'", str(response.data))
- # Add the quantity field (or close enough)
- dataset.headers.append('quAntiTy ')
-
- response = self.post_bom(
- 'test.csv',
- bytes(dataset.csv, 'utf8'),
- content_type='text/csv',
+ response = self.post(
+ url,
+ {
+ 'rows': rows,
+ 'columns': ['quantity', 'reference'],
+ },
expected_code=400,
)
- self.assertIn('No part column found', str(response.data))
+ self.assertIn('No part column specified', str(response.data))
- dataset.headers.append('part_id')
- dataset.headers.append('part_name')
-
- response = self.post_bom(
- 'test.csv',
- bytes(dataset.csv, 'utf8'),
- content_type='text/csv',
- expected_code=400,
+ response = self.post(
+ url,
+ {
+ 'rows': rows,
+ 'columns': ['quantity', 'part'],
+ },
+ expected_code=201,
)
- self.assertIn('No data rows found', str(response.data))
-
def test_invalid_data(self):
"""
Upload data which contains errors
@@ -195,25 +218,31 @@ class BomUploadTest(InvenTreeAPITestCase):
dataset.append([cmp.pk, idx])
- # Add a duplicate part too
- dataset.append([components.first().pk, 'invalid'])
+ url = reverse('api-bom-import-extract')
- response = self.post_bom(
- 'test.csv',
- bytes(dataset.csv, 'utf8'),
- content_type='text/csv',
- expected_code=201
+ response = self.post(
+ url,
+ {
+ 'columns': dataset.headers,
+ 'rows': [row for row in dataset],
+ },
)
- errors = response.data['errors']
+ rows = response.data['rows']
- self.assertIn('Quantity must be greater than zero', str(errors[0]))
- self.assertIn('Part is not designated as a component', str(errors[5]))
- self.assertIn('Duplicate part selected', str(errors[-1]))
- self.assertIn('Invalid quantity', str(errors[-1]))
+ # Returned data must be the same as the original dataset
+ self.assertEqual(len(rows), len(dataset))
- for idx, row in enumerate(response.data['rows'][:-1]):
- self.assertEqual(str(row['part']), str(components[idx].pk))
+ for idx, row in enumerate(rows):
+ data = row['data']
+ cmp = components[idx]
+
+ # Should have guessed the correct part
+ data['part'] = cmp.pk
+
+ # Check some specific error messages
+ self.assertEqual(rows[0]['data']['errors']['quantity'], 'Quantity must be greater than zero')
+ self.assertEqual(rows[5]['data']['errors']['part'], 'Part is not designated as a component')
def test_part_guess(self):
"""
@@ -233,9 +262,14 @@ class BomUploadTest(InvenTreeAPITestCase):
10,
])
- response = self.post_bom(
- 'test.csv',
- bytes(dataset.csv, 'utf8'),
+ url = reverse('api-bom-import-extract')
+
+ response = self.post(
+ url,
+ {
+ 'columns': dataset.headers,
+ 'rows': [row for row in dataset],
+ },
expected_code=201,
)
@@ -244,7 +278,7 @@ class BomUploadTest(InvenTreeAPITestCase):
self.assertEqual(len(rows), 10)
for idx in range(10):
- self.assertEqual(rows[idx]['part'], components[idx].pk)
+ self.assertEqual(rows[idx]['data']['part'], components[idx].pk)
# Should also be able to 'guess' part by the IPN value
dataset = tablib.Dataset()
@@ -257,9 +291,12 @@ class BomUploadTest(InvenTreeAPITestCase):
10,
])
- response = self.post_bom(
- 'test.csv',
- bytes(dataset.csv, 'utf8'),
+ response = self.post(
+ url,
+ {
+ 'columns': dataset.headers,
+ 'rows': [row for row in dataset],
+ },
expected_code=201,
)
@@ -268,13 +305,15 @@ class BomUploadTest(InvenTreeAPITestCase):
self.assertEqual(len(rows), 10)
for idx in range(10):
- self.assertEqual(rows[idx]['part'], components[idx].pk)
+ self.assertEqual(rows[idx]['data']['part'], components[idx].pk)
def test_levels(self):
"""
Test that multi-level BOMs are correctly handled during upload
"""
+ url = reverse('api-bom-import-extract')
+
dataset = tablib.Dataset()
dataset.headers = ['level', 'part', 'quantity']
@@ -288,11 +327,21 @@ class BomUploadTest(InvenTreeAPITestCase):
2,
])
- response = self.post_bom(
- 'test.csv',
- bytes(dataset.csv, 'utf8'),
+ response = self.post(
+ url,
+ {
+ 'rows': [row for row in dataset],
+ 'columns': dataset.headers,
+ },
expected_code=201,
)
+ rows = response.data['rows']
+
# Only parts at index 1, 4, 7 should have been returned
self.assertEqual(len(response.data['rows']), 3)
+
+ # Check the returned PK values
+ self.assertEqual(rows[0]['data']['part'], components[1].pk)
+ self.assertEqual(rows[1]['data']['part'], components[4].pk)
+ self.assertEqual(rows[2]['data']['part'], components[7].pk)
diff --git a/InvenTree/report/api.py b/InvenTree/report/api.py
index af88f4799f..c7d2b15e4d 100644
--- a/InvenTree/report/api.py
+++ b/InvenTree/report/api.py
@@ -225,13 +225,14 @@ class ReportPrintMixin:
outputs.append(report.render_as_string(request))
else:
outputs.append(report.render(request))
- except TemplateDoesNotExist:
-
- filename = report.template
+ except TemplateDoesNotExist as e:
+ template = str(e)
+ if not template:
+ template = report.template
return Response(
{
- 'error': _(f"Template file '{filename}' is missing or does not exist"),
+ 'error': _(f"Template file '{template}' is missing or does not exist"),
},
status=400,
)
@@ -269,13 +270,16 @@ class ReportPrintMixin:
else:
pdf = outputs[0].get_document().write_pdf()
- except TemplateDoesNotExist:
+ except TemplateDoesNotExist as e:
- filename = report.template
+ template = str(e)
+
+ if not template:
+ template = report.template
return Response(
{
- 'error': _(f"Template file '{filename}' is missing or does not exist"),
+ 'error': _(f"Template file '{template}' is missing or does not exist"),
},
status=400,
)
diff --git a/InvenTree/report/models.py b/InvenTree/report/models.py
index be8d803edf..3ee19bd5e6 100644
--- a/InvenTree/report/models.py
+++ b/InvenTree/report/models.py
@@ -14,12 +14,12 @@ import datetime
from django.urls import reverse
from django.db import models
from django.conf import settings
+from django.core.cache import cache
from django.core.exceptions import ValidationError, FieldError
from django.template.loader import render_to_string
from django.template import Template, Context
-from django.core.files.storage import FileSystemStorage
from django.core.validators import FileExtensionValidator
import build.models
@@ -43,32 +43,12 @@ except OSError as err: # pragma: no cover
logger = logging.getLogger("inventree")
-class ReportFileUpload(FileSystemStorage):
- """
- Custom implementation of FileSystemStorage class.
-
- When uploading a report (or a snippet / asset / etc),
- it is often important to ensure the filename is not arbitrarily *changed*,
- if the name of the uploaded file is identical to the currently stored file.
-
- For example, a snippet or asset file is referenced in a template by filename,
- and we do not want that filename to change when we upload a new *version*
- of the snippet or asset file.
-
- This uploader class performs the following pseudo-code function:
-
- - If the model is *new*, proceed as normal
- - If the model is being updated:
- a) If the new filename is *different* from the existing filename, proceed as normal
- b) If the new filename is *identical* to the existing filename, we want to overwrite the existing file
- """
-
- def get_available_name(self, name, max_length=None):
-
- return super().get_available_name(name, max_length)
-
-
def rename_template(instance, filename):
+ """
+ Helper function for 'renaming' uploaded report files.
+ Pass responsibility back to the calling class,
+ to ensure that files are uploaded to the correct directory.
+ """
return instance.rename_file(filename)
@@ -155,7 +135,23 @@ class ReportBase(models.Model):
filename = os.path.basename(filename)
- return os.path.join('report', 'report_template', self.getSubdir(), filename)
+ path = os.path.join('report', 'report_template', self.getSubdir(), filename)
+
+ fullpath = os.path.join(settings.MEDIA_ROOT, path)
+ fullpath = os.path.abspath(fullpath)
+
+ # If the report file is the *same* filename as the one being uploaded,
+ # remove the original one from the media directory
+ if str(filename) == str(self.template):
+
+ if os.path.exists(fullpath):
+ logger.info(f"Deleting existing report template: '{filename}'")
+ os.remove(fullpath)
+
+ # Ensure that the cache is cleared for this template!
+ cache.delete(fullpath)
+
+ return path
@property
def extension(self):
@@ -522,16 +518,20 @@ def rename_snippet(instance, filename):
path = os.path.join('report', 'snippets', filename)
+ fullpath = os.path.join(settings.MEDIA_ROOT, path)
+ fullpath = os.path.abspath(fullpath)
+
# If the snippet file is the *same* filename as the one being uploaded,
# delete the original one from the media directory
if str(filename) == str(instance.snippet):
- fullpath = os.path.join(settings.MEDIA_ROOT, path)
- fullpath = os.path.abspath(fullpath)
if os.path.exists(fullpath):
logger.info(f"Deleting existing snippet file: '{filename}'")
os.remove(fullpath)
+ # Ensure that the cache is deleted for this snippet
+ cache.delete(fullpath)
+
return path
diff --git a/InvenTree/stock/api.py b/InvenTree/stock/api.py
index a13c7f37c3..9723e01c09 100644
--- a/InvenTree/stock/api.py
+++ b/InvenTree/stock/api.py
@@ -109,6 +109,31 @@ class StockItemSerialize(generics.CreateAPIView):
return context
+class StockItemInstall(generics.CreateAPIView):
+ """
+ API endpoint for installing a particular stock item into this stock item.
+
+ - stock_item.part must be in the BOM for this part
+ - stock_item must currently be "in stock"
+ - stock_item must be serialized (and not belong to another item)
+ """
+
+ queryset = StockItem.objects.none()
+ serializer_class = StockSerializers.InstallStockItemSerializer
+
+ def get_serializer_context(self):
+
+ context = super().get_serializer_context()
+ context['request'] = self.request
+
+ try:
+ context['item'] = StockItem.objects.get(pk=self.kwargs.get('pk', None))
+ except:
+ pass
+
+ return context
+
+
class StockAdjustView(generics.CreateAPIView):
"""
A generic class for handling stocktake actions.
@@ -503,11 +528,34 @@ class StockList(generics.ListCreateAPIView):
serial_numbers = data.get('serial_numbers', '')
# Assign serial numbers for a trackable part
- if serial_numbers and part.trackable:
+ if serial_numbers:
+
+ if not part.trackable:
+ raise ValidationError({
+ 'serial_numbers': [_("Serial numbers cannot be supplied for a non-trackable part")]
+ })
# If serial numbers are specified, check that they match!
try:
serials = extract_serial_numbers(serial_numbers, quantity, part.getLatestSerialNumberInt())
+
+ # Determine if any of the specified serial numbers already exist!
+ existing = []
+
+ for serial in serials:
+ if part.checkIfSerialNumberExists(serial):
+ existing.append(serial)
+
+ if len(existing) > 0:
+
+ msg = _("The following serial numbers already exist")
+ msg += " : "
+ msg += ",".join([str(e) for e in existing])
+
+ raise ValidationError({
+ 'serial_numbers': [msg],
+ })
+
except DjangoValidationError as e:
raise ValidationError({
'quantity': e.messages,
@@ -1256,6 +1304,7 @@ stock_api_urls = [
# Detail views for a single stock item
url(r'^(?P
- {% trans "Install another Stock Item into this item." %}
-
- {% trans "Stock items can only be installed if they meet the following criteria" %}:
-
-
-
-
- {% trans "Install this Stock Item in another stock item." %} -
-- {% trans "Stock items can only be installed if they meet the following criteria" %}: - -