diff --git a/contentcuration/contentcuration/models.py b/contentcuration/contentcuration/models.py index 2810b6c74d..f41be19138 100644 --- a/contentcuration/contentcuration/models.py +++ b/contentcuration/contentcuration/models.py @@ -6,6 +6,7 @@ import json from django.conf import settings from django.contrib import admin +from django.core.cache import cache from django.core.files.storage import FileSystemStorage from django.db import IntegrityError, connections, models, connection from django.db.models import Q, Sum, Max, Count, Case, When, IntegerField @@ -345,7 +346,7 @@ class ContentNode(MPTTModel, models.Model): # TODO: disallow nulls once existing models have been set original_channel_id = UUIDField(primary_key=False, editable=False, null=True, db_index=True) # Original channel copied from source_channel_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate channel copied from - original_source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary) + original_source_node_id = UUIDField(primary_key=False, editable=False, null=True, db_index=True) # Original node_id of node copied from (TODO: original_node_id clashes with original_node field - temporary) source_node_id = UUIDField(primary_key=False, editable=False, null=True) # Immediate node_id of node copied from # Fields specific to content generated by Ricecooker @@ -377,29 +378,43 @@ class ContentNode(MPTTModel, models.Model): objects = TreeManager() def get_original_node(self): - + key = "original_channel_{}".format(self.original_source_node_id or self.pk) + cached_data = cache.get(key) + if cached_data: + return cached_data + original_node = self.original_node or self if self.original_channel_id and self.original_source_node_id: original_channel = Channel.objects.get(pk=self.original_channel_id) - return original_channel.main_tree.get_descendants().filter(node_id=self.original_source_node_id).first() or self - - return self.original_node or self + original_node = original_channel.main_tree.get_descendants().filter(node_id=self.original_source_node_id).first() or self + cache.set(key, original_node, None) + return original_node + + def get_associated_presets(self): + key = "associated_presets_{}".format(self.kind_id) + cached_data = cache.get(key) + if cached_data: + return cached_data + presets = FormatPreset.objects.filter(kind=self.kind).values() + cache.set(key, presets, None) + return presets def get_channel(self): root = self.get_root() - channel = root.channel_main or root.channel_trash or root.channel_language or root.channel_previous + channel = root.channel_main or root.channel_trash or root.channel_staging or root.channel_previous if channel: return channel.first() return channel def save(self, *args, **kwargs): # Detect if node has been moved to another tree - if self.pk is not None and ContentNode.objects.filter(pk=self.pk).exists(): + if self.pk and ContentNode.objects.filter(pk=self.pk).exists(): original = ContentNode.objects.get(pk=self.pk) if original.parent and original.parent_id != self.parent_id and not original.parent.changed: original.parent.changed = True original.parent.save() super(ContentNode, self).save(*args, **kwargs) + post_save_changes = False if self.original_node is None: self.original_node = self diff --git a/contentcuration/contentcuration/serializers.py b/contentcuration/contentcuration/serializers.py index 66b8c21eb4..8cdfdd5659 100644 --- a/contentcuration/contentcuration/serializers.py +++ b/contentcuration/contentcuration/serializers.py @@ -39,10 +39,7 @@ class FormatPresetSerializer(serializers.ModelSerializer): name = serializers.SerializerMethodField('retrieve_name') def retrieve_mimetypes(self, preset): - mimetypes = [] - for m in preset.allowed_formats.all(): - mimetypes.append(m.mimetype) - return mimetypes + return preset.allowed_formats.values_list('mimetype', flat=True) def retrieve_name(self, preset): return preset.id @@ -119,7 +116,6 @@ def update(self, instance, validated_data): class FileSerializer(BulkSerializerMixin, serializers.ModelSerializer): file_on_disk = serializers.SerializerMethodField('get_file_url') storage_url = serializers.SerializerMethodField('retrieve_storage_url') - recommended_kind = serializers.SerializerMethodField('retrieve_recommended_kind') mimetype = serializers.SerializerMethodField('retrieve_extension') language = LanguageSerializer(many=False, required=False, allow_null=True) display_name = serializers.SerializerMethodField('retrieve_display_name') @@ -135,14 +131,6 @@ def get_file_url(self, obj): def retrieve_storage_url(self, obj): return generate_storage_url(str(obj)) - def retrieve_recommended_kind(self, obj): - if obj.contentnode is not None and obj.contentnode.kind: - return obj.contentnode.kind.pk - preset = FormatPreset.objects.filter(allowed_formats__extension=obj.file_format.extension).first() - if preset is not None: - return preset.kind.pk - return None - def retrieve_extension(self, obj): return obj.file_format.mimetype @@ -153,7 +141,7 @@ def retrieve_display_name(self, obj): class Meta: model = File - fields = ('id', 'checksum', 'display_name', 'file_size', 'language', 'file_on_disk', 'contentnode', 'file_format', 'preset', 'original_filename','recommended_kind', 'storage_url', 'mimetype', 'source_url') + fields = ('id', 'checksum', 'display_name', 'file_size', 'language', 'file_on_disk', 'contentnode', 'file_format', 'preset', 'original_filename', 'storage_url', 'mimetype', 'source_url') list_serializer_class = FileListSerializer class ContentKindSerializer(serializers.ModelSerializer): @@ -242,7 +230,6 @@ class Meta: model = ContentTag fields = ('tag_name', 'channel', 'id') - class AssessmentListSerializer(serializers.ListSerializer): def update(self, instance, validated_data): ret = [] @@ -279,7 +266,6 @@ def update(self, instance, validated_data): return ret - class AssessmentItemSerializer(BulkSerializerMixin, serializers.ModelSerializer): contentnode = serializers.PrimaryKeyRelatedField(queryset=ContentNode.objects.all()) id = serializers.IntegerField(required=False) @@ -292,6 +278,7 @@ class Meta: class SimplifiedContentNodeSerializer(BulkSerializerMixin, serializers.ModelSerializer): + id = serializers.CharField(required=False) children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) metadata = serializers.SerializerMethodField('retrieve_metadata') @@ -313,85 +300,12 @@ def retrieve_metadata(self, node): # "resource_size" : assessment_size + resource_size, } - class Meta: - model = ContentNode - fields = ('title', 'id', 'sort_order', 'kind', 'children', 'parent', 'metadata',) - - -class ContentNodeSerializer(BulkSerializerMixin, serializers.ModelSerializer): - children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) - tags = TagSerializer(many=True) - id = serializers.CharField(required=False) - - ancestors = serializers.SerializerMethodField('get_node_ancestors') - descendants = serializers.SerializerMethodField('get_node_descendants') - files = FileSerializer(many=True, read_only=True) - assessment_items = AssessmentItemSerializer(many=True, read_only=True) - associated_presets = serializers.SerializerMethodField('retrieve_associated_presets') - metadata = serializers.SerializerMethodField('retrieve_metadata') - original_channel = serializers.SerializerMethodField('retrieve_original_channel') - valid = serializers.SerializerMethodField('check_valid') - - def check_valid(self, node): - if node.kind_id == content_kinds.TOPIC: - return True - elif node.kind_id == content_kinds.EXERCISE: - for aitem in node.assessment_items.exclude(type=exercises.PERSEUS_QUESTION): - answers = json.loads(aitem.answers) - correct_answers = filter(lambda a: a['correct'], answers) - if aitem.question == "" or len(answers) == 0 or len(correct_answers) == 0 or\ - any(filter(lambda a: a['answer'] == "", answers)) or\ - (aitem.type == exercises.SINGLE_SELECTION and len(correct_answers) > 1) or\ - any(filter(lambda h: h['hint'] == "", json.loads(aitem.hints))): - return False - return True - else: - return node.files.filter(preset__supplementary=False).exists() - - def retrieve_original_channel(self, node): - original = node.get_original_node() - channel = original.get_channel() if original else None - return {"id": channel.pk, "name": channel.name} if channel else None - - def retrieve_metadata(self, node): - if node.kind_id == content_kinds.TOPIC: - # TODO: Account for files duplicated in tree - # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ - # .filter(Q(contentnode_id__in=descendants.values_list('id', flat=True)) | Q(assessment_item_id__in=descendants.values_list('assessment_items__id', flat=True)))\ - # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) - descendants = node.get_descendants(include_self=True).annotate(change_count=Case(When(changed=True, then=Value(1)),default=Value(0),output_field=IntegerField())) - aggregated = descendants.aggregate(resource_size=Sum('files__file_size'), is_changed=Sum('change_count'), assessment_size=Sum('assessment_items__files__file_size')) - return { - "total_count" : node.get_descendant_count(), - "resource_count" : node.get_descendants().exclude(kind=content_kinds.TOPIC).count(), - "max_sort_order" : node.children.aggregate(max_sort_order=Max('sort_order'))['max_sort_order'] or 1, - "resource_size" : (aggregated.get('resource_size') or 0) + (aggregated.get('assessment_size') or 0), - "has_changed_descendant" : aggregated.get('is_changed') != 0 - } - else: - # TODO: Account for files duplicated on node - # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ - # .filter(Q(contentnode=node) | Q(assessment_item_id__in=node.assessment_items.values_list('id', flat=True)))\ - # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) - assessment_size = node.assessment_items.aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0 - resource_size = node.files.aggregate(resource_size=Sum('file_size')).get('resource_size') or 0 - return { - "total_count" : 1, - "resource_count" : 1, - "max_sort_order" : node.sort_order, - "resource_size" : assessment_size + resource_size, - "has_changed_descendant" : node.changed - } - @staticmethod def setup_eager_loading(queryset): """ Perform necessary eager loading of data. """ queryset = queryset.prefetch_related('children').prefetch_related('files').prefetch_related('assessment_items') return queryset - def retrieve_associated_presets(self, node): - return FormatPreset.objects.filter(kind=node.kind).values() - def to_internal_value(self, data): """ In order to be able to handle passing tag_name in array, @@ -493,24 +407,15 @@ def update(self, instance, validated_data): def get_node_ancestors(self,node): return node.get_ancestors().values_list('id', flat=True) - def get_node_descendants(self, node): - return node.get_descendants().values_list('id', flat=True) - class Meta: - list_serializer_class = CustomListSerializer model = ContentNode - fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'original_node', 'cloned_source', 'original_channel','original_source_node_id', 'source_node_id', 'node_id', - 'copyright_holder', 'license', 'license_description', 'kind', 'children', 'parent', 'content_id','associated_presets', 'valid', 'original_channel_id', 'source_channel_id', - 'descendants', 'ancestors', 'tags', 'files', 'metadata', 'created', 'modified', 'published', 'extra_fields', 'assessment_items', 'source_id', 'source_domain') + fields = ('title', 'id', 'sort_order', 'kind', 'children', 'parent', 'metadata',) -class RootNodeSerializer(serializers.ModelSerializer): - children = serializers.PrimaryKeyRelatedField(many=True, read_only=True) - id = serializers.CharField(required=False) - metadata = serializers.SerializerMethodField('retrieve_metadata') +class RootNodeSerializer(SimplifiedContentNodeSerializer): channel_name = serializers.SerializerMethodField('retrieve_channel_name') def retrieve_metadata(self, node): - descendants = node.get_descendants(include_self=True).annotate(change_count=Case(When(changed=True, then=Value(1)),default=Value(0),output_field=IntegerField())) + descendants = node.get_descendants(include_self=True) return { "total_count" : node.get_descendant_count(), "resource_count" : descendants.exclude(kind_id=content_kinds.TOPIC).count(), @@ -520,12 +425,99 @@ def retrieve_metadata(self, node): } def retrieve_channel_name(self, node): - return node.get_channel().name if node.get_channel() else None + channel = node.get_channel() + return channel.name if channel else None class Meta: model = ContentNode fields = ('title', 'id', 'kind', 'children', 'metadata', 'published', 'channel_name') +class ContentNodeSerializer(SimplifiedContentNodeSerializer): + ancestors = serializers.SerializerMethodField('get_node_ancestors') + valid = serializers.SerializerMethodField('check_valid') + associated_presets = serializers.SerializerMethodField('retrieve_associated_presets') + + def retrieve_associated_presets(self, node): + return node.get_associated_presets() + + def check_valid(self, node): + if node.kind_id == content_kinds.TOPIC: + return True + elif node.kind_id == content_kinds.EXERCISE: + for aitem in node.assessment_items.exclude(type=exercises.PERSEUS_QUESTION): + answers = json.loads(aitem.answers) + correct_answers = filter(lambda a: a['correct'], answers) + if aitem.question == "" or len(answers) == 0 or len(correct_answers) == 0 or\ + any(filter(lambda a: a['answer'] == "", answers)) or\ + (aitem.type == exercises.SINGLE_SELECTION and len(correct_answers) > 1) or\ + any(filter(lambda h: h['hint'] == "", json.loads(aitem.hints))): + return False + return True + else: + return node.files.filter(preset__supplementary=False).exists() + + def retrieve_metadata(self, node): + if node.kind_id == content_kinds.TOPIC: + descendants = node.get_descendants(include_self=True) + return { + "total_count" : node.get_descendant_count(), + "resource_count" : descendants.exclude(kind=content_kinds.TOPIC).count(), + "max_sort_order" : node.children.aggregate(max_sort_order=Max('sort_order'))['max_sort_order'] or 1, + "resource_size" : 0, # Make separate request + "has_changed_descendant" : descendants.filter(changed=True).exists(), + } + else: + # TODO: Account for files duplicated on node + # size_q = File.objects.select_related('contentnode').select_related('assessment_item')\ + # .filter(Q(contentnode=node) | Q(assessment_item_id__in=node.assessment_items.values_list('id', flat=True)))\ + # .only('checksum', 'file_size').distinct().aggregate(resource_size=Sum('file_size')) + assessment_size = node.assessment_items.aggregate(resource_size=Sum('files__file_size'))['resource_size'] or 0 + resource_size = node.files.aggregate(resource_size=Sum('file_size')).get('resource_size') or 0 + resource_count = 1 + if node.kind_id == content_kinds.EXERCISE: + resource_count = node.assessment_items.filter(deleted=False).count() + + return { + "total_count" : 1, + "resource_count" : resource_count, + "max_sort_order" : node.sort_order, + "resource_size" : assessment_size + resource_size, + "has_changed_descendant" : node.changed, + } + + class Meta: + list_serializer_class = CustomListSerializer + model = ContentNode + fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'copyright_holder', 'license', 'license_description','assessment_items', 'files', + 'kind', 'parent', 'children', 'published', 'associated_presets', 'valid', 'metadata', 'ancestors', 'tags', 'extra_fields') + +class ContentNodeEditSerializer(ContentNodeSerializer): + original_channel = serializers.SerializerMethodField('retrieve_original_channel') + files = FileSerializer(many=True, read_only=True) + tags = TagSerializer(many=True) + assessment_items = AssessmentItemSerializer(many=True, read_only=True) + + def retrieve_original_channel(self, node): + original = node.get_original_node() + channel = original.get_channel() if original else None + return {"id": channel.pk, "name": channel.name} if channel else None + + class Meta: + list_serializer_class = CustomListSerializer + model = ContentNode + fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'copyright_holder', 'license', 'license_description','assessment_items', 'files', + 'kind', 'parent', 'children', 'published', 'associated_presets', 'valid', 'metadata', 'ancestors', 'tags', 'extra_fields', 'original_channel') + + +class ContentNodeCompleteSerializer(ContentNodeEditSerializer): + class Meta: + list_serializer_class = CustomListSerializer + model = ContentNode + fields = ('title', 'changed', 'id', 'description', 'sort_order','author', 'node_id', 'copyright_holder', 'license', 'license_description', 'kind', + 'original_channel','original_source_node_id', 'source_node_id', 'content_id', 'original_channel_id', 'source_channel_id', 'source_id', 'source_domain', + 'children', 'parent', 'tags', 'created', 'modified', 'published', 'extra_fields', 'assessment_items', 'files', 'valid', 'metadata') + + class ChannelSerializer(serializers.ModelSerializer): has_changed = serializers.SerializerMethodField('check_for_changes') main_tree = RootNodeSerializer(read_only=True) @@ -607,7 +599,6 @@ class Meta: model = Channel fields = ('id', 'created', 'name', 'view_only', 'published', 'pending_editors', 'editors', 'description', 'size', 'count', 'version', 'public', 'thumbnail_url', 'thumbnail', 'deleted') - class UserSerializer(serializers.ModelSerializer): class Meta: model = User diff --git a/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_item.handlebars b/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_item.handlebars index edc2afce0c..94837ff07c 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_item.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_item.handlebars @@ -1,17 +1,16 @@
{{#unless isfolder}}{{/unless}} - {{node.title}} +
{{node.title}}
{{#if isfolder}} {{#if isempty}} (empty) {{else}} + {{#format_count "Resource" node.metadata.resource_count}}{{/format_count}} {{/if}} - {{else}} - ({{#format_file_size node.metadata.resource_size}}{{/format_file_size}}) {{/if}}
{{#if isfolder}}
-{{/if}} \ No newline at end of file +{{/if}} diff --git a/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars b/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars index 080c13feaa..2799e15365 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/export/hbtemplates/export_modal.handlebars @@ -27,7 +27,7 @@ {{#format_count "Resource" node.metadata.resource_count}}{{/format_count}} - (Calculating size...) + (Calculating...) CANCEL diff --git a/contentcuration/contentcuration/static/js/edit_channel/export/views.js b/contentcuration/contentcuration/static/js/edit_channel/export/views.js index b7300e6212..36dc70b62e 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/export/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/export/views.js @@ -25,8 +25,7 @@ var ExportModalView = BaseViews.BaseModalView.extend({ }); var self = this; - this.retrieve_nodes(this.model.get('children')).then(function(collection){ - var size = collection.reduce(function(size, node){ return size + node.get('metadata').resource_size; }, 0); + this.model.calculate_size().then(function(size){ self.$("#export_size").text("(" + stringHelper.format_size(size) + ")"); }); }, @@ -62,7 +61,7 @@ var ExportListView = BaseViews.BaseListView.extend({ this.$el.html(this.template({id: this.model.get("id")})); var self = this; this.fetch_model(this.model).then(function(fetched){ - self.collection.get_all_fetch(fetched.get("children")).then(function(fetchedCollection){ + self.collection.get_all_fetch_simplified(fetched.get("children")).then(function(fetchedCollection){ self.load_content(fetchedCollection); }); }) diff --git a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars index 6f81a76d7d..a2b3db9584 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_dialog.handlebars @@ -7,5 +7,8 @@ - No files selected + + 0 Topics, 0 Resources + (0B) + \ No newline at end of file diff --git a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars index c9a0f63d87..e74b072d0d 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/import/hbtemplates/import_list_item.handlebars @@ -7,8 +7,12 @@
{{node.title}}
{{#if isfolder}} {{#if node.children}} + {{#if isfolder}} + {{#unless is_channel}}{{#format_count "Resource" node.metadata.resource_count}}{{/format_count}}{{/unless}} + {{else}} + {{#format_file_size node.metadata.resource_size}}{{/format_file_size}} + {{/if}} - {{!-- {{#unless is_channel}}{{#format_file_size node.metadata.resource_size}}{{/format_file_size}}{{/unless}} --}} {{else}} (empty) {{/if}} diff --git a/contentcuration/contentcuration/static/js/edit_channel/import/views.js b/contentcuration/contentcuration/static/js/edit_channel/import/views.js index 2ad150a59f..80abca953c 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/import/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/import/views.js @@ -83,12 +83,16 @@ var ImportView = BaseViews.BaseListView.extend({ }else{ totalCount += entry.get("metadata").total_count; } - }); var data = this.importList.get_metadata(); totalCount = totalCount - data.count; - this.$("#import_file_count").html(totalCount + " Topic" + ((totalCount == 1)? ", " : "s, ") + data.count + " Resource" + ((data.count == 1)? " " : "s ") + stringHelper.format_size(data.size)); + this.$("#import_file_count").html(totalCount + " Topic" + ((totalCount == 1)? ", " : "s, ") + data.count + " Resource" + ((data.count == 1)? "" : "s")); + var self = this; + this.$("#import_file_size").html("Calculating...") + collection.calculate_size().then(function(size){ + self.$("#import_file_size").html(stringHelper.format_size(size)); + }); }, import_content:function(){ var self = this; @@ -172,7 +176,6 @@ var ImportList = BaseViews.BaseListView.extend({ this.metadata = {"count" : 0, "size":0}; this.views.forEach(function(entry){ self.metadata.count += entry.metadata.count; - self.metadata.size += entry.metadata.size; }); return this.metadata; } diff --git a/contentcuration/contentcuration/static/js/edit_channel/models.js b/contentcuration/contentcuration/static/js/edit_channel/models.js index f5e97c3528..91012bc418 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/models.js +++ b/contentcuration/contentcuration/static/js/edit_channel/models.js @@ -126,12 +126,11 @@ var InvitationCollection = BaseCollection.extend({ }); /**** CHANNEL AND CONTENT MODELS ****/ -function fetch_nodes_by_ids(ids){ - var self = this; +function fetch_nodes(ids, url){ return new Promise(function(resolve, reject){ $.ajax({ method:"POST", - url: window.Urls.get_nodes_by_ids(), + url: url, data: JSON.stringify(ids), error: reject, success: function(data) { @@ -140,22 +139,10 @@ function fetch_nodes_by_ids(ids){ }); }); } -function fetch_nodes_by_ids_simplified(ids){ - var self = this; - return new Promise(function(resolve, reject){ - $.ajax({ - method:"POST", - url: window.Urls.get_nodes_by_ids_simplified(), - data: JSON.stringify(ids), - error: reject, - success: function(data) { - resolve(new ContentNodeCollection(JSON.parse(data))); - } - }); - }); +function fetch_nodes_by_ids(ids){ + return fetch_nodes(ids, window.Urls.get_nodes_by_ids()); } - var ContentNodeModel = BaseModel.extend({ root_list:"contentnode-list", model_name:"ContentNodeModel", @@ -214,6 +201,21 @@ var ContentNodeModel = BaseModel.extend({ data['randomize'] = (data['randomize'] !== undefined)? data['randomize'] : window.preferences.auto_randomize_questions; this.set('extra_fields', data); } + }, + calculate_size: function(){ + var self = this; + var promise = new Promise(function(resolve, reject){ + $.ajax({ + method:"POST", + url: window.Urls.get_total_size(), + data: JSON.stringify([self.id]), + error:reject, + success: function(data) { + resolve(JSON.parse(data).size); + } + }); + }); + return promise; } }); @@ -248,28 +250,61 @@ var ContentNodeCollection = BaseCollection.extend({ }); }); }); - return promise; }, - get_all_fetch: function(ids, force_fetch){ - force_fetch = (force_fetch)? true : false; - var self = this; + get_descendant_ids: function(){ + var self = this; return new Promise(function(resolve, reject){ - var idlists = _.partition(ids, function(id){return force_fetch || !self.get({'id': id});}); - var returnCollection = new ContentNodeCollection(self.filter(function(n){ return idlists[1].indexOf(n.id) >= 0; })) - fetch_nodes_by_ids(idlists[0]).then(function(fetched){ - returnCollection.add(fetched.toJSON()); - resolve(returnCollection); - }); + $.ajax({ + method:"POST", + url: window.Urls.get_node_descendants(), + data: JSON.stringify(self.pluck('id')), + success: function(data) { + resolve(JSON.parse(data).node_ids.split(" ")); + }, + error:reject + }); }); + }, + calculate_size: function(){ + var self = this; + return new Promise(function(resolve, reject){ + $.ajax({ + method:"POST", + url: window.Urls.get_total_size(), + data: JSON.stringify(self.pluck('id')), + success: function(data) { + resolve(JSON.parse(data).size); + }, + error:reject + }); + }); + }, + has_all_data: function(){ + return this.every(function(node){ + return _.every(node.get('files'), function(file){ + return typeof file == 'object'; + }); + }); + }, + get_all_fetch: function(ids, force_fetch){ + return this.get_fetch_nodes(ids, window.Urls.get_nodes_by_ids(), force_fetch); }, get_all_fetch_simplified: function(ids, force_fetch){ - force_fetch = (force_fetch)? true : false; + return this.get_fetch_nodes(ids, window.Urls.get_nodes_by_ids_simplified(), force_fetch); + }, + fetch_nodes_by_ids_complete: function(ids, force_fetch){ + return this.get_fetch_nodes(ids, window.Urls.get_nodes_by_ids_complete(), force_fetch); + }, + get_fetch_nodes: function(ids, url, force_fetch){ + force_fetch = (force_fetch)? true : false; var self = this; return new Promise(function(resolve, reject){ var idlists = _.partition(ids, function(id){return force_fetch || !self.get({'id': id});}); var returnCollection = new ContentNodeCollection(self.filter(function(n){ return idlists[1].indexOf(n.id) >= 0; })) - fetch_nodes_by_ids_simplified(idlists[0]).then(function(fetched){ + fetch_nodes(idlists[0], url).then(function(fetched){ returnCollection.add(fetched.toJSON()); + self.add(fetched.toJSON()); + self.sort(); resolve(returnCollection); }); }); @@ -283,7 +318,7 @@ var ContentNodeCollection = BaseCollection.extend({ }, duplicate:function(target_parent){ var self = this; - var promise = new Promise(function(resolve, reject){ + return new Promise(function(resolve, reject){ var sort_order =(target_parent) ? target_parent.get("metadata").max_sort_order + 1 : 1; var parent_id = target_parent.get("id"); @@ -297,15 +332,11 @@ var ContentNodeCollection = BaseCollection.extend({ url: window.Urls.duplicate_nodes(), data: JSON.stringify(data), success: function(data) { - copied_list = JSON.parse(data).node_ids.split(" "); - self.get_all_fetch(copied_list).then(function(fetched){ - resolve(fetched); - }); + resolve(new ContentNodeCollection(JSON.parse(data))); }, error:reject }); }); - return promise; }, move:function(target_parent, max_order, min_order){ var self = this; diff --git a/contentcuration/contentcuration/static/js/edit_channel/move/views.js b/contentcuration/contentcuration/static/js/edit_channel/move/views.js index b72ce30889..3d28a02b1a 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/move/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/move/views.js @@ -34,8 +34,6 @@ var MoveView = BaseViews.BaseListView.extend({ this.onmove = options.onmove; this.collection = options.collection; - // Calculate valid moves using node descendants - this.to_move_ids = _.uniq(this.collection.reduce(function(l,n){ return l.concat(n.get('descendants')).concat(n.id);}, [])); this.render(); }, events: { @@ -69,13 +67,19 @@ var MoveView = BaseViews.BaseListView.extend({ clipboard_node.set({'title': 'My Clipboard'}); fetched.add(clipboard_node); - // Render list - this.targetList = new MoveList({ - model: null, - el: $("#target_list_area"), - is_target: true, - collection: fetched, - container: this + // Calculate valid moves using node descendants + var self = this; + this.collection.get_descendant_ids().then(function(ids){ + self.to_move_ids = ids; + + // Render list + self.targetList = new MoveList({ + model: null, + el: $("#target_list_area"), + is_target: true, + collection: fetched, + container: self + }); }); }, diff --git a/contentcuration/contentcuration/static/js/edit_channel/preview/views.js b/contentcuration/contentcuration/static/js/edit_channel/preview/views.js index 1d84ccb25f..2f023fc135 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/preview/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/preview/views.js @@ -55,8 +55,9 @@ var PreviewView = BaseViews.BaseView.extend({ }, load_default_value:function(){ this.current_preview = null; - if(this.model.get('files').length){ - this.current_preview = _.min(this.model.get("files"), function(file){return file.preset.order}); + var preview_files = _.filter(this.model.get("files"), function(f){ return f.preset.display; }); + if(preview_files.length){ + this.current_preview = _.min(preview_files, function(file){return file.preset.order}); } }, load_presets:function(){ @@ -118,7 +119,7 @@ var PreviewView = BaseViews.BaseView.extend({ checksum:this.current_preview.checksum, subtitles : this.get_subtitles() })); - if(force_load && this.current_preview.recommended_kind === "video"){ + if(force_load && this.model.get('kind') === "video"){ $("#preview_window video").load(); } } diff --git a/contentcuration/contentcuration/static/js/edit_channel/router.js b/contentcuration/contentcuration/static/js/edit_channel/router.js index 18ef9cee92..1a52086c28 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/router.js +++ b/contentcuration/contentcuration/static/js/edit_channel/router.js @@ -46,7 +46,6 @@ ChannelEditRouter = Backbone.Router.extend({ clipboard_page:function(){ this.open_channel(true, true, window.current_user.get_clipboard()); }, - open_channel: function(edit_mode_on, is_clipboard, root){ window.fileformats = this.fileformats ; window.channels = this.channelCollection; diff --git a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars index ac20bb5fb2..d50117e3a9 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/hbtemplates/content_list_item.handlebars @@ -21,13 +21,15 @@ {{#if time}} {{time}}{{/if}} {{#if isfolder}} {{#format_count "Resource" node.metadata.resource_count}}{{/format_count}}{{/if}}   - {{#if isexercise}} - - {{#format_count "Question" num_questions}}{{/format_count}} - {{else}} - - {{#format_file_size node.metadata.resource_size}}{{/format_file_size}} - {{/if}} + {{#unless isfolder}} + {{#if isexercise}} + + {{#format_count "Question" node.metadata.resource_count}}{{/format_count}} + {{else}} + + {{#format_file_size node.metadata.resource_size}}{{/format_file_size}} + {{/if}} + {{/unless}}   {{#if node.changed}} {{#if node.published}}Updated{{else}}New{{/if}} diff --git a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js index 53c507ad7a..df17ccfa2c 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/tree_edit/views.js @@ -297,8 +297,7 @@ var ContentItem = BaseViews.BaseWorkspaceListNodeItemView.extend({ checked: this.checked, isexercise: this.model.get("kind") === "exercise", description_first: description[0], - description_overflow: description[1], - num_questions: _.where(this.model.get('assessment_items'), {'deleted': false}).length + description_overflow: description[1] })); this.handle_checked(); if(this.isSelected){ diff --git a/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars b/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars index b2bc5074ed..9ce6f95488 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars +++ b/contentcuration/contentcuration/static/js/edit_channel/uploader/hbtemplates/edit_metadata_dialog.handlebars @@ -8,7 +8,7 @@
-
+
Loading content...
diff --git a/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js b/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js index f9625fe4c8..3ab996dcf9 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js @@ -36,7 +36,7 @@ var MetadataModalView = BaseViews.BaseModalView.extend({ }); }, close_uploader:function(event){ - if(!this.allow_edit || !this.metadata_view.check_for_changes() || !event){ + if(!this.allow_edit || (this.metadata_view && !this.metadata_view.check_for_changes()) || !event){ this.close(); $(".modal-backdrop").remove(); }else if(confirm("Unsaved Metadata Detected! Exiting now will" @@ -57,7 +57,6 @@ var EditMetadataView = BaseViews.BaseEditableListView.extend({ _.bindAll(this, 'render_details', 'render_preview', 'render_questions', 'enable_submit', 'disable_submit', 'save_and_keep_open', 'save_nodes', 'save_and_finish','process_updated_collection', 'close_upload', 'copy_items'); this.bind_edit_functions(); - this.collection = options.collection; this.new_content = options.new_content; this.new_exercise = options.new_exercise; this.onsave = options.onsave; @@ -80,10 +79,15 @@ var EditMetadataView = BaseViews.BaseEditableListView.extend({ }, render: function() { this.$el.html(this.template({allow_edit: this.allow_edit})); - this.load_list(); - if(this.collection.length > 1){ - this.load_editor(this.edit_list.selected_items); - } + + var self = this; + this.collection.fetch_nodes_by_ids_complete(this.collection.pluck('id'), !this.collection.has_all_data()).then(function(fetched){ + self.collection.reset(fetched.toJSON()); + self.load_list(); + if(self.collection.length > 1){ + self.load_editor(self.edit_list.selected_items); + } + }); }, render_details:function(){ this.switchPanel("details"); @@ -239,6 +243,7 @@ var EditMetadataView = BaseViews.BaseEditableListView.extend({ } }, check_for_changes:function(){ + if(!this.edit_list) return false; return _.findWhere(this.edit_list.views, {edited : true}) != null; }, undo_changes:function(){ @@ -515,7 +520,8 @@ var EditMetadataEditor = BaseViews.BaseView.extend({ } }, get_license: function(license_id){ - if(!license_id || license_id <= 0){ return null; } + if(isNaN(license_id)){ return license_id; } + else if(!license_id || license_id <= 0){ return null; } return window.licenses.get({id: license_id}).get('license_name'); }, display_license_description: function(license_id){ diff --git a/contentcuration/contentcuration/static/js/edit_channel/views.js b/contentcuration/contentcuration/static/js/edit_channel/views.js index 731a702ab6..e75e20538d 100644 --- a/contentcuration/contentcuration/static/js/edit_channel/views.js +++ b/contentcuration/contentcuration/static/js/edit_channel/views.js @@ -200,9 +200,6 @@ var BaseWorkspaceView = BaseView.extend({ }); }, handle_move:function(target, moved, original_parents){ - // Recalculate counts - this.reload_ancestors(original_parents, true); - // Remove where nodes originally were moved.forEach(function(node){ window.workspace_manager.remove(node.id)}); @@ -210,6 +207,9 @@ var BaseWorkspaceView = BaseView.extend({ var content = window.workspace_manager.get(target.id); if(content && content.list) content.list.add_nodes(moved); + + // Recalculate counts + this.reload_ancestors(original_parents, true); } }); @@ -503,11 +503,10 @@ var BaseWorkspaceListView = BaseEditableListView.extend({ }); collection.move(self.model, max, min).then(function(savedCollection){ self.retrieve_nodes($.unique(reload_list), true).then(function(fetched){ - self.reload_ancestors(fetched); + self.container.handle_move(self.model, savedCollection, fetched); resolve(true); }); }).catch(function(error){ - // console.log(error.responseText); alert(error.responseText); $(".content-list").sortable( "cancel" ); $(".content-list").sortable( "enable" ); @@ -536,6 +535,7 @@ var BaseWorkspaceListView = BaseEditableListView.extend({ var new_view = self.create_new_view(entry); self.$(self.list_selector).append(new_view.el); }); + this.model.set('children', this.model.get('children').concat(collection.pluck('id'))); this.reload_ancestors(collection, false); this.handle_if_empty(); }, @@ -545,7 +545,6 @@ var BaseWorkspaceListView = BaseEditableListView.extend({ var new_topic = this.collection.create({ "kind":"topic", "title": "Topic", - "sort_order" : this.collection.length, "author": get_author(), }, { success:function(new_topic){ @@ -605,7 +604,6 @@ var BaseWorkspaceListView = BaseEditableListView.extend({ var new_exercise = this.collection.create({ "kind":"exercise", "title": (this.model.get('parent'))? this.model.get('title') + " Exercise" : "Exercise", // Avoid having exercises prefilled with 'email clipboard' - "sort_order" : this.collection.length, "author": get_author(), "copyright_holder": (window.preferences.copyright_holder === null) ? get_author() : window.preferences.copyright_holder, "license_description": (window.preferences.license_description && window.preferences.license==="Special Permissions") ? window.preferences.license_description : "" diff --git a/contentcuration/contentcuration/static/less/export.less b/contentcuration/contentcuration/static/less/export.less index fb1c8a3770..ab8c47aa85 100644 --- a/contentcuration/contentcuration/static/less/export.less +++ b/contentcuration/contentcuration/static/less/export.less @@ -11,44 +11,32 @@ border: none; padding-bottom: 5px; } - .modal-title{ max-width:80%; font-size:16pt; } - .required{ margin-left:5px; color:#EF2121; } - .modal-dialog{ width: @exporter-width; } - .modal-content{ - min-height:@exporter-height; background-color:@exporter-background-color; - - label, #export_license_select{ - font-size: 14pt; - } - + label, #export_license_select{ font-size: 14pt; } + #export_totals{ margin-right:10px; } #version_number{ color:gray; padding-right:10px; } - - #export_totals{ - margin-right:10px; - } - .annotation{ color:@gray-400; + font-size:10pt; font-weight:normal; + margin-left: 10px; } - #export_buttons{ margin-top:40px; margin-bottom:30px; @@ -57,7 +45,6 @@ vertical-align:middle; } } - #export_preview{ margin: 10px; min-height: @exporter-list-height; @@ -67,25 +54,16 @@ border-left:2px solid @blue-500; .export_item{ padding:5px; + .export_folder{ cursor:pointer; } + .folder_item{ font-weight:bold; } + .subdirectory{ display:none; } .export_item_title{ font-size:13pt; - } - - .export_folder{ - cursor:pointer; - } - - .folder_item{ - font-weight:bold; - } - - .subdirectory{ - display:none; + max-width: 80%; + display: inline; } } } } - - } } diff --git a/contentcuration/contentcuration/static/less/import.less b/contentcuration/contentcuration/static/less/import.less index 0cbaeb925d..6686e3a4cc 100644 --- a/contentcuration/contentcuration/static/less/import.less +++ b/contentcuration/contentcuration/static/less/import.less @@ -129,8 +129,8 @@ #import_content_submit{ margin-right:20px; } - #import_file_count{ - margin-right:20px; + #import_file_metadata{ + padding-right:20px; font-size:12pt; margin-top:2px; } diff --git a/contentcuration/contentcuration/static/less/uploader.less b/contentcuration/contentcuration/static/less/uploader.less index e902ec2524..7069beb21f 100644 --- a/contentcuration/contentcuration/static/less/uploader.less +++ b/contentcuration/contentcuration/static/less/uploader.less @@ -15,6 +15,7 @@ width: @metadata-width * 0.28; margin-right:10px; } + .tab_button{ display:none; } #uploader_select_all{ margin-left:20px; *{ cursor:pointer; } @@ -53,6 +54,13 @@ padding: 10px 0; margin-bottom: 30px; overflow:hidden; + #metadata_placeholder{ + min-height: 300px; + padding-top: 15%; + font-size: 14pt; + font-weight: bold; + color: @gray-400; + } .file_editor_row{ margin:0px; } #title_error{ display:none; } h4 { diff --git a/contentcuration/contentcuration/urls.py b/contentcuration/contentcuration/urls.py index 5bdf89e656..7f6ce29ea9 100644 --- a/contentcuration/contentcuration/urls.py +++ b/contentcuration/contentcuration/urls.py @@ -27,6 +27,8 @@ import contentcuration.view.settings_views as settings_views import contentcuration.view.internal_views as internal_views import contentcuration.view.zip_views as zip_views +import contentcuration.view.file_views as file_views +import contentcuration.view.node_views as node_views from rest_framework.authtoken import views as auth_view from contentcuration import api @@ -63,7 +65,7 @@ class ContentKindViewSet(viewsets.ModelViewSet): class ContentNodeViewSet(BulkModelViewSet): queryset = ContentNode.objects.all() - serializer_class = serializers.ContentNodeSerializer + serializer_class = serializers.ContentNodeCompleteSerializer def get_queryset(self): queryset = ContentNode.objects.all() @@ -108,26 +110,37 @@ class AssessmentItemViewSet(BulkModelViewSet): url(r'^admin/', include(admin.site.urls)), url(r'^api/', include(router.urls)), url(r'^api/', include(bulkrouter.urls)), - url(r'^api/duplicate_nodes/$', views.duplicate_nodes, name='duplicate_nodes'), - url(r'^api/move_nodes/$', views.move_nodes, name='move_nodes'), url(r'^api/publish_channel/$', views.publish_channel, name='publish_channel'), - url(r'^api/generate_thumbnail/$', views.generate_thumbnail, name='generate_thumbnail'), url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), - url(r'^file_upload/', views.file_upload, name="file_upload"), - url(r'^file_create/', views.file_create, name="file_create"), url(r'^channels/$', views.channel_list, name='channels'), url(r'^channels/(?P[^/]+)/edit', views.channel, name='channel'), url(r'^channels/(?P[^/]+)/view', views.channel_view_only, name='channel_view_only'), - url(r'^thumbnail_upload/', views.thumbnail_upload, name='thumbnail_upload'), - url(r'^exercise_image_upload/', views.exercise_image_upload, name='exercise_image_upload'), - url(r'^image_upload/', views.image_upload, name='image_upload'), - url(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"), url(r'^unsupported_browser/$', views.unsupported_browser, name='unsupported_browser'), url(r'^unauthorized/$', views.unauthorized, name='unauthorized'), url(r'^accessible_channels/$', views.accessible_channels, name='accessible_channels'), url(r'^healthz$', views.health, name='health'), - url(r'^get_nodes_by_ids$', views.get_nodes_by_ids, name='get_nodes_by_ids'), - url(r'^get_nodes_by_ids_simplified$', views.get_nodes_by_ids_simplified, name='get_nodes_by_ids_simplified'), +] + +# Add node api enpoints +urlpatterns += [ + url(r'^api/get_nodes_by_ids$', node_views.get_nodes_by_ids, name='get_nodes_by_ids'), + url(r'^api/get_total_size$', node_views.get_total_size, name='get_total_size'), + url(r'^api/duplicate_nodes/$', node_views.duplicate_nodes, name='duplicate_nodes'), + url(r'^api/move_nodes/$', node_views.move_nodes, name='move_nodes'), + url(r'^api/get_node_descendants/$', node_views.get_node_descendants, name='get_node_descendants'), + url(r'^api/get_nodes_by_ids_simplified$', node_views.get_nodes_by_ids_simplified, name='get_nodes_by_ids_simplified'), + url(r'^api/get_nodes_by_ids_complete$', node_views.get_nodes_by_ids_complete, name='get_nodes_by_ids_complete'), +] + +# Add file api enpoints +urlpatterns += [ + url(r'^api/thumbnail_upload/', file_views.thumbnail_upload, name='thumbnail_upload'), + url(r'^api/exercise_image_upload/', file_views.exercise_image_upload, name='exercise_image_upload'), + url(r'^api/image_upload/', file_views.image_upload, name='image_upload'), + url(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"), + url(r'^api/file_upload/', file_views.file_upload, name="file_upload"), + url(r'^api/file_create/', file_views.file_create, name="file_create"), + url(r'^api/generate_thumbnail/$', file_views.generate_thumbnail, name='generate_thumbnail'), ] # Add account/registration endpoints diff --git a/contentcuration/contentcuration/view/file_views.py b/contentcuration/contentcuration/view/file_views.py new file mode 100644 index 0000000000..7d4b9e669f --- /dev/null +++ b/contentcuration/contentcuration/view/file_views.py @@ -0,0 +1,117 @@ +import json +import logging +import os +from django.http import HttpResponse, HttpResponseBadRequest +from django.views.decorators.csrf import csrf_exempt +from django.conf import settings +from django.core.urlresolvers import reverse_lazy +from django.core.files import File as DjFile +from rest_framework.renderers import JSONRenderer +from contentcuration.api import write_file_to_storage +from contentcuration.utils.files import generate_thumbnail_from_node +from contentcuration.models import File, FormatPreset, ContentNode, License, generate_file_on_disk_name, generate_storage_url +from contentcuration.serializers import FileSerializer, ContentNodeEditSerializer +from le_utils.constants import format_presets, content_kinds, file_formats, exercises, licenses +from pressurecooker.videos import guess_video_preset_by_resolution + +def file_upload(request): + if request.method == 'POST': + #Implement logic for switching out files without saving it yet + filename, ext = os.path.splitext(request.FILES.values()[0]._name) + size = request.FILES.values()[0]._size + file_object = File(file_size=size, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, preset_id=request.META.get('HTTP_PRESET')) + file_object.save() + return HttpResponse(json.dumps({ + "success": True, + "filename": str(file_object), + "file": JSONRenderer().render(FileSerializer(file_object).data) + })) + +def file_create(request): + if request.method == 'POST': + original_filename, ext = os.path.splitext(request.FILES.values()[0]._name) + size = request.FILES.values()[0]._size + presets = FormatPreset.objects.filter(allowed_formats__extension__contains=ext[1:]) + kind = presets.first().kind + preferences = json.loads(request.user.preferences) + author = preferences.get('author') if isinstance(preferences.get('author'), basestring) else request.user.get_full_name() + license = License.objects.filter(license_name=preferences.get('license')).first() # Use filter/first in case preference hasn't been set + license_id = license.pk if license else settings.DEFAULT_LICENSE + new_node = ContentNode(title=original_filename, kind=kind, license_id=license_id, author=author, copyright_holder=preferences.get('copyright_holder')) + if license.license_name == licenses.SPECIAL_PERMISSIONS: + new_node.license_description = preferences.get('license_description') + new_node.save() + file_object = File(file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, contentnode=new_node, file_size=size) + file_object.save() + if kind.pk == content_kinds.VIDEO: + file_object.preset_id = guess_video_preset_by_resolution(str(file_object.file_on_disk)) + elif presets.filter(supplementary=False).count() == 1: + file_object.preset = presets.filter(supplementary=False).first() + + file_object.save() + + try: + if preferences.get('auto_derive_video_thumbnail') and new_node.kind_id == content_kinds.VIDEO \ + or preferences.get('auto_derive_audio_thumbnail') and new_node.kind_id == content_kinds.AUDIO \ + or preferences.get('auto_derive_html5_thumbnail') and new_node.kind_id == content_kinds.HTML5 \ + or preferences.get('auto_derive_document_thumbnail') and new_node.kind_id == content_kinds.DOCUMENT: + generate_thumbnail_from_node(new_node, set_node=True) + except Exception: + pass + + return HttpResponse(json.dumps({ + "success": True, + "node": JSONRenderer().render(ContentNodeEditSerializer(new_node).data) + })) + +def generate_thumbnail(request): + logging.debug("Entering the generate_thumbnail endpoint") + + if request.method != 'POST': + raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") + else: + data = json.loads(request.body) + node = ContentNode.objects.get(pk=data["node_id"]) + + thumbnail_object = generate_thumbnail_from_node(node) + + return HttpResponse(json.dumps({ + "success": True, + "file": JSONRenderer().render(FileSerializer(thumbnail_object).data), + "path": generate_storage_url(str(thumbnail_object)), + })) + +def thumbnail_upload(request): + if request.method == 'POST': + fobj = request.FILES.values()[0] + formatted_filename = write_file_to_storage(fobj) + + return HttpResponse(json.dumps({ + "success": True, + "formatted_filename": formatted_filename, + "file": None, + "path": generate_storage_url(formatted_filename), + })) + +def image_upload(request): + if request.method == 'POST': + name, ext = os.path.splitext(request.FILES.values()[0]._name) # gets file extension without leading period + file_object = File(contentnode_id=request.META.get('HTTP_NODE'),original_filename=name, preset_id=request.META.get('HTTP_PRESET'), file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:]) + file_object.save() + return HttpResponse(json.dumps({ + "success": True, + "file": JSONRenderer().render(FileSerializer(file_object).data), + "path": generate_storage_url(str(file_object)), + })) + +def exercise_image_upload(request): + if request.method == 'POST': + ext = os.path.splitext(request.FILES.values()[0]._name)[1][1:] # gets file extension without leading period + file_object = File(preset_id=format_presets.EXERCISE_IMAGE, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext) + file_object.save() + return HttpResponse(json.dumps({ + "success": True, + "formatted_filename": exercises.CONTENT_STORAGE_FORMAT.format(str(file_object)), + "file_id": file_object.pk, + "path": generate_storage_url(str(file_object)), + })) diff --git a/contentcuration/contentcuration/view/node_views.py b/contentcuration/contentcuration/view/node_views.py new file mode 100644 index 0000000000..03a5316f54 --- /dev/null +++ b/contentcuration/contentcuration/view/node_views.py @@ -0,0 +1,290 @@ +import copy +import json +import logging +import os +import uuid +from django.http import HttpResponse, HttpResponseBadRequest +from django.views.decorators.csrf import csrf_exempt +from django.conf import settings +from django.core.cache import cache +from django.core.exceptions import ObjectDoesNotExist +from django.db import transaction +from django.db.models import Q, Case, When, Value, IntegerField, Max, Sum +from rest_framework.renderers import JSONRenderer +from contentcuration.utils.files import duplicate_file +from contentcuration.models import File, ContentNode, ContentTag, AssessmentItem +from contentcuration.serializers import ContentNodeSerializer, ContentNodeEditSerializer, SimplifiedContentNodeSerializer +from le_utils.constants import format_presets, content_kinds, file_formats, licenses + +def get_total_size(request): + if request.method == 'POST': + data = json.loads(request.body) + sizes = ContentNode.objects.prefetch_related('assessment_items').prefetch_related('files').prefetch_related('children')\ + .filter(id__in=data).get_descendants(include_self=True)\ + .aggregate(resource_size=Sum('files__file_size'), assessment_size=Sum('assessment_items__files__file_size')) + + return HttpResponse(json.dumps({'success':True, 'size': (sizes['resource_size'] or 0) + (sizes['assessment_size'] or 0)})) + +def delete_nodes(request): + if request.method == 'POST': + data = json.loads(request.body) + nodes = ContentNode.objects.filter(pk__in=data['nodes']).delete() + return HttpResponse({'success':True}) + +def get_node_descendants(request): + if request.method == 'POST': + data = json.loads(request.body) + nodes = ContentNode.objects.filter(pk__in=data).get_descendants(include_self=True).values_list('id', flat=True) + return HttpResponse(json.dumps({'success':True, "node_ids": " ".join(nodes)})) + + +def get_nodes_by_ids(request): + if request.method == 'POST': + nodes = ContentNode.objects.prefetch_related('children').prefetch_related('files')\ + .prefetch_related('assessment_items').prefetch_related('tags').filter(pk__in=json.loads(request.body))\ + .defer('node_id', 'original_source_node_id', 'source_node_id', 'content_id', 'original_channel_id', 'source_channel_id', 'source_id', 'source_domain', 'created', 'modified') + return HttpResponse(JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data)) + +def get_nodes_by_ids_simplified(request): + if request.method == 'POST': + nodes = ContentNode.objects.prefetch_related('children').filter(pk__in=json.loads(request.body)) + return HttpResponse(JSONRenderer().render(SimplifiedContentNodeSerializer(nodes, many=True).data)) + +def get_nodes_by_ids_complete(request): + if request.method == 'POST': + nodes = ContentNode.objects.prefetch_related('children').prefetch_related('files')\ + .prefetch_related('assessment_items').prefetch_related('tags').filter(pk__in=json.loads(request.body)) + return HttpResponse(JSONRenderer().render(ContentNodeEditSerializer(nodes, many=True).data)) + +def duplicate_nodes(request): + logging.debug("Entering the copy_node endpoint") + + if request.method != 'POST': + return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") + else: + data = json.loads(request.body) + + try: + nodes = data["nodes"] + sort_order = data.get("sort_order") or 1 + target_parent = data["target_parent"] + channel_id = data["channel_id"] + new_nodes = [] + + with transaction.atomic(): + with ContentNode.objects.disable_mptt_updates(): + for node_data in nodes: + new_node = _duplicate_node_bulk(node_data['id'], sort_order=sort_order, parent=target_parent, channel_id=channel_id) + new_nodes.append(new_node.pk) + sort_order+=1 + + except KeyError: + raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) + + serialized = ContentNodeEditSerializer(ContentNode.objects.filter(pk__in=new_nodes), many=True).data + return HttpResponse(JSONRenderer().render(serialized)) + +def _duplicate_node_bulk(node, sort_order=None, parent=None, channel_id=None): + if isinstance(node, int) or isinstance(node, basestring): + node = ContentNode.objects.get(pk=node) + + # keep track of the in-memory models so that we can bulk-create them at the end (for efficiency) + to_create = { + "nodes": [], + "node_files": [], + "assessment_files": [], + "assessments": [], + } + + # perform the actual recursive node cloning + new_node = _duplicate_node_bulk_recursive(node=node, sort_order=sort_order, parent=parent, channel_id=channel_id, to_create=to_create) + + # create nodes, one level at a time, starting from the top of the tree (so that we have IDs to pass as "parent" for next level down) + for node_level in to_create["nodes"]: + for node in node_level: + node.parent_id = node.parent.id + ContentNode.objects.bulk_create(node_level) + for node in node_level: + for tag in node._meta.tags_to_add: + node.tags.add(tag) + + # rebuild MPTT tree for this channel (since we're inside "disable_mptt_updates", and bulk_create doesn't trigger rebuild signals anyway) + ContentNode.objects.partial_rebuild(to_create["nodes"][0][0].tree_id) + + ai_node_ids = [] + + # create each of the assessment items + for a in to_create["assessments"]: + a.contentnode_id = a.contentnode.id + ai_node_ids.append(a.contentnode_id) + AssessmentItem.objects.bulk_create(to_create["assessments"]) + + # build up a mapping of contentnode/assessment_id onto assessment item IDs, so we can point files to them correctly after + aid_mapping = {} + for a in AssessmentItem.objects.filter(contentnode_id__in=ai_node_ids): + aid_mapping[a.contentnode_id + ":" + a.assessment_id] = a.id + + # create the file objects, for both nodes and assessment items + for f in to_create["node_files"]: + f.contentnode_id = f.contentnode.id + for f in to_create["assessment_files"]: + f.assessment_item_id = aid_mapping[f.assessment_item.contentnode_id + ":" + f.assessment_item.assessment_id] + File.objects.bulk_create(to_create["node_files"] + to_create["assessment_files"]) + + return new_node + +def _duplicate_node_bulk_recursive(node, sort_order, parent, channel_id, to_create, level=0): + + if isinstance(node, int) or isinstance(node, basestring): + node = ContentNode.objects.get(pk=node) + + if isinstance(parent, int) or isinstance(parent, basestring): + parent = ContentNode.objects.get(pk=parent) + + # clone the model (in-memory) and update the fields on the cloned model + new_node = copy.copy(node) + new_node.id = None + new_node.tree_id = parent.tree_id + new_node.parent = parent + new_node.sort_order = sort_order or node.sort_order + new_node.changed = True + new_node.cloned_source = node + new_node.source_channel_id = node.get_channel().id if node.get_channel() else None + new_node.node_id = uuid.uuid4().hex + new_node.source_node_id = node.node_id + + # store the new unsaved model in a list, at the appropriate level, for later creation + while len(to_create["nodes"]) <= level: + to_create["nodes"].append([]) + to_create["nodes"][level].append(new_node) + + # find or create any tags that are needed, and store them under _meta on the node so we can add them to it later + new_node._meta.tags_to_add = [] + for tag in node.tags.all(): + new_tag, is_new = ContentTag.objects.get_or_create( + tag_name=tag.tag_name, + channel_id=channel_id, + ) + new_node._meta.tags_to_add.append(new_tag) + + # clone the file objects for later saving + for fobj in node.files.all(): + f = duplicate_file(fobj, node=new_node, save=False) + to_create["node_files"].append(f) + + # copy assessment item objects, and associated files + for aiobj in node.assessment_items.prefetch_related("files").all(): + aiobj_copy = copy.copy(aiobj) + aiobj_copy.id = None + aiobj_copy.contentnode = new_node + to_create["assessments"].append(aiobj_copy) + for fobj in aiobj.files.all(): + f = duplicate_file(fobj, assessment_item=aiobj_copy, save=False) + to_create["assessment_files"].append(f) + + # recurse down the tree and clone the children + for child in node.children.all(): + _duplicate_node_bulk_recursive(node=child, sort_order=None, parent=new_node, channel_id=channel_id, to_create=to_create, level=level+1) + + return new_node + +def _duplicate_node(node, sort_order=None, parent=None, channel_id=None): + if isinstance(node, int) or isinstance(node, basestring): + node = ContentNode.objects.get(pk=node) + + original_channel = node.get_original_node().get_channel() if node.get_original_node() else None + + new_node = ContentNode.objects.create( + title=node.title, + description=node.description, + kind=node.kind, + license=node.license, + parent=ContentNode.objects.get(pk=parent) if parent else None, + sort_order=sort_order or node.sort_order, + copyright_holder=node.copyright_holder, + changed=True, + original_node=node.original_node or node, + cloned_source=node, + original_channel_id = node.original_channel_id or original_channel.id if original_channel else None, + source_channel_id = node.get_channel().id if node.get_channel() else None, + original_source_node_id = node.original_source_node_id or node.node_id, + source_node_id = node.node_id, + author=node.author, + content_id=node.content_id, + extra_fields=node.extra_fields, + ) + + # add tags now + for tag in node.tags.all(): + new_tag, is_new = ContentTag.objects.get_or_create( + tag_name=tag.tag_name, + channel_id=channel_id, + ) + new_node.tags.add(new_tag) + + # copy file object too + for fobj in node.files.all(): + duplicate_file(fobj, node=new_node) + + # copy assessment item object too + for aiobj in node.assessment_items.all(): + aiobj_copy = copy.copy(aiobj) + aiobj_copy.id = None + aiobj_copy.contentnode = new_node + aiobj_copy.save() + for fobj in aiobj.files.all(): + duplicate_file(fobj, assessment_item=aiobj_copy) + + for c in node.children.all(): + _duplicate_node(c, parent=new_node.id) + + return new_node + +def move_nodes(request): + logging.debug("Entering the move_nodes endpoint") + + if request.method != 'POST': + raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") + else: + data = json.loads(request.body) + + try: + nodes = data["nodes"] + target_parent = ContentNode.objects.get(pk=data["target_parent"]) + channel_id = data["channel_id"] + min_order = data.get("min_order") or 0 + max_order = data.get("max_order") or min_order + len(nodes) + + except KeyError: + raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) + + all_ids = [] + with transaction.atomic(): + with ContentNode.objects.delay_mptt_updates(): + for n in nodes: + min_order = min_order + float(max_order - min_order) / 2 + node = ContentNode.objects.get(pk=n['id']) + _move_node(node, parent=target_parent, sort_order=min_order, channel_id=channel_id) + all_ids.append(n['id']) + + serialized = ContentNodeEditSerializer(ContentNode.objects.filter(pk__in=all_ids), many=True).data + return HttpResponse(JSONRenderer().render(serialized)) + +def _move_node(node, parent=None, sort_order=None, channel_id=None): + node.parent = parent or node.parent + node.sort_order = sort_order or node.sort_order + node.changed = True + descendants = node.get_descendants(include_self=True) + node.save() + + for tag in ContentTag.objects.filter(tagged_content__in=descendants).distinct(): + # If moving from another channel + if tag.channel_id != channel_id: + t, is_new = ContentTag.objects.get_or_create(tag_name=tag.tag_name, channel_id=channel_id) + + # Set descendants with this tag to correct tag + for n in descendants.filter(tags=tag): + n.tags.remove(tag) + n.tags.add(t) + + return node diff --git a/contentcuration/contentcuration/views.py b/contentcuration/contentcuration/views.py index b0f31cb9f0..0dc331f058 100644 --- a/contentcuration/contentcuration/views.py +++ b/contentcuration/contentcuration/views.py @@ -20,14 +20,14 @@ from django.core.exceptions import ObjectDoesNotExist from django.core.context_processors import csrf from django.db import transaction -from django.db.models import Q, Case, When, Value, IntegerField, Max +from django.db.models import Q, Case, When, Value, IntegerField, Max, Sum from django.core.urlresolvers import reverse_lazy from django.core.files import File as DjFile from rest_framework.renderers import JSONRenderer from contentcuration.api import write_file_to_storage, check_supported_browsers from contentcuration.utils.files import extract_thumbnail_wrapper, compress_video_wrapper, generate_thumbnail_from_node, duplicate_file from contentcuration.models import Exercise, AssessmentItem, Channel, License, FileFormat, File, FormatPreset, ContentKind, ContentNode, ContentTag, User, Invitation, generate_file_on_disk_name, generate_storage_url -from contentcuration.serializers import RootNodeSerializer, AssessmentItemSerializer, AccessibleChannelListSerializer, ChannelListSerializer, ChannelSerializer, LicenseSerializer, FileFormatSerializer, FormatPresetSerializer, ContentKindSerializer, ContentNodeSerializer, SimplifiedContentNodeSerializer, TagSerializer, UserSerializer, CurrentUserSerializer, UserChannelListSerializer, FileSerializer +from contentcuration.serializers import RootNodeSerializer, AssessmentItemSerializer, AccessibleChannelListSerializer, ChannelListSerializer, ChannelSerializer, LicenseSerializer, FileFormatSerializer, FormatPresetSerializer, ContentKindSerializer, ContentNodeSerializer, TagSerializer, UserSerializer, CurrentUserSerializer, UserChannelListSerializer, FileSerializer from le_utils.constants import format_presets, content_kinds, file_formats, exercises, licenses from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication from rest_framework.permissions import IsAuthenticated @@ -36,17 +36,6 @@ from pressurecooker.images import create_tiled_image from pressurecooker.encodings import write_base64_to_file -def get_nodes_by_ids(request): - if request.method == 'POST': - nodes = ContentNode.objects.prefetch_related('files').prefetch_related('assessment_items')\ - .prefetch_related('tags').prefetch_related('children').filter(pk__in=json.loads(request.body)) - return HttpResponse(JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data)) - -def get_nodes_by_ids_simplified(request): - if request.method == 'POST': - nodes = ContentNode.objects.prefetch_related('children').filter(pk__in=json.loads(request.body)) - return HttpResponse(JSONRenderer().render(SimplifiedContentNodeSerializer(nodes, many=True).data)) - def base(request): if not check_supported_browsers(request.META.get('HTTP_USER_AGENT')): return redirect(reverse_lazy('unsupported_browser')) @@ -150,291 +139,6 @@ def channel_view_only(request, channel_id): return channel_page(request, channel) -def file_upload(request): - if request.method == 'POST': - preset = FormatPreset.objects.get(id=request.META.get('HTTP_PRESET')) - #Implement logic for switching out files without saving it yet - filename, ext = os.path.splitext(request.FILES.values()[0]._name) - size = request.FILES.values()[0]._size - file_object = File(file_size=size, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, preset=preset) - file_object.save() - return HttpResponse(json.dumps({ - "success": True, - "filename": str(file_object), - "file": JSONRenderer().render(FileSerializer(file_object).data) - })) - -def file_create(request): - if request.method == 'POST': - original_filename, ext = os.path.splitext(request.FILES.values()[0]._name) - size = request.FILES.values()[0]._size - presets = FormatPreset.objects.filter(allowed_formats__extension__contains=ext[1:]) - kind = presets.first().kind - preferences = json.loads(request.user.preferences) - author = preferences.get('author') if isinstance(preferences.get('author'), basestring) else request.user.get_full_name() - license = License.objects.filter(license_name=preferences.get('license')).first() # Use filter/first in case preference hasn't been set - license_id = license.pk if license else settings.DEFAULT_LICENSE - new_node = ContentNode(title=original_filename, kind=kind, license_id=license_id, author=author, copyright_holder=preferences.get('copyright_holder')) - if license.license_name == licenses.SPECIAL_PERMISSIONS: - new_node.license_description = preferences.get('license_description') - new_node.save() - file_object = File(file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, contentnode=new_node, file_size=size) - file_object.save() - if kind.pk == content_kinds.VIDEO: - file_object.preset_id = guess_video_preset_by_resolution(str(file_object.file_on_disk)) - elif presets.filter(supplementary=False).count() == 1: - file_object.preset = presets.filter(supplementary=False).first() - - file_object.save() - - try: - if preferences.get('auto_derive_video_thumbnail') and new_node.kind_id == content_kinds.VIDEO \ - or preferences.get('auto_derive_audio_thumbnail') and new_node.kind_id == content_kinds.AUDIO \ - or preferences.get('auto_derive_html5_thumbnail') and new_node.kind_id == content_kinds.HTML5 \ - or preferences.get('auto_derive_document_thumbnail') and new_node.kind_id == content_kinds.DOCUMENT: - generate_thumbnail_from_node(new_node, set_node=True) - except Exception: - pass - - return HttpResponse(json.dumps({ - "success": True, - "node": JSONRenderer().render(ContentNodeSerializer(new_node).data) - })) - -def generate_thumbnail(request): - logging.debug("Entering the generate_thumbnail endpoint") - - if request.method != 'POST': - return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") - else: - data = json.loads(request.body) - node = ContentNode.objects.get(pk=data["node_id"]) - - thumbnail_object = generate_thumbnail_from_node(node) - - return HttpResponse(json.dumps({ - "success": True, - "file": JSONRenderer().render(FileSerializer(thumbnail_object).data), - "path": generate_storage_url(str(thumbnail_object)), - })) - -def thumbnail_upload(request): - if request.method == 'POST': - fobj = request.FILES.values()[0] - formatted_filename = write_file_to_storage(fobj) - - return HttpResponse(json.dumps({ - "success": True, - "formatted_filename": formatted_filename, - "file": None, - "path": generate_storage_url(formatted_filename), - })) - -def image_upload(request): - if request.method == 'POST': - name, ext = os.path.splitext(request.FILES.values()[0]._name) # gets file extension without leading period - file_object = File(contentnode_id=request.META.get('HTTP_NODE'),original_filename=name, preset_id=request.META.get('HTTP_PRESET'), file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:]) - file_object.save() - return HttpResponse(json.dumps({ - "success": True, - "file": JSONRenderer().render(FileSerializer(file_object).data), - "path": generate_storage_url(str(file_object)), - })) - -def exercise_image_upload(request): - if request.method == 'POST': - ext = os.path.splitext(request.FILES.values()[0]._name)[1][1:] # gets file extension without leading period - file_object = File(preset_id=format_presets.EXERCISE_IMAGE, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext) - file_object.save() - return HttpResponse(json.dumps({ - "success": True, - "formatted_filename": exercises.CONTENT_STORAGE_FORMAT.format(str(file_object)), - "file_id": file_object.pk, - "path": generate_storage_url(str(file_object)), - })) - -def duplicate_nodes(request): - logging.debug("Entering the copy_node endpoint") - - if request.method != 'POST': - return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") - else: - data = json.loads(request.body) - - try: - nodes = data["nodes"] - sort_order = data.get("sort_order") or 1 - target_parent = data["target_parent"] - channel_id = data["channel_id"] - new_nodes = [] - - with transaction.atomic(): - with ContentNode.objects.disable_mptt_updates(): - for node_data in nodes: - new_node = _duplicate_node_bulk(node_data['id'], sort_order=sort_order, parent=target_parent, channel_id=channel_id) - new_nodes.append(new_node.pk) - sort_order+=1 - - except KeyError: - raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) - - return HttpResponse(json.dumps({ - "success": True, - "node_ids": " ".join(new_nodes) - })) - -def _duplicate_node_bulk(node, sort_order=None, parent=None, channel_id=None): - if isinstance(node, int) or isinstance(node, basestring): - node = ContentNode.objects.get(pk=node) - - # keep track of the in-memory models so that we can bulk-create them at the end (for efficiency) - to_create = { - "nodes": [], - "node_files": [], - "assessment_files": [], - "assessments": [], - } - - # perform the actual recursive node cloning - new_node = _duplicate_node_bulk_recursive(node=node, sort_order=sort_order, parent=parent, channel_id=channel_id, to_create=to_create) - - # create nodes, one level at a time, starting from the top of the tree (so that we have IDs to pass as "parent" for next level down) - for node_level in to_create["nodes"]: - for node in node_level: - node.parent_id = node.parent.id - ContentNode.objects.bulk_create(node_level) - for node in node_level: - for tag in node._meta.tags_to_add: - node.tags.add(tag) - - # rebuild MPTT tree for this channel (since we're inside "disable_mptt_updates", and bulk_create doesn't trigger rebuild signals anyway) - ContentNode.objects.partial_rebuild(to_create["nodes"][0][0].tree_id) - - ai_node_ids = [] - - # create each of the assessment items - for a in to_create["assessments"]: - a.contentnode_id = a.contentnode.id - ai_node_ids.append(a.contentnode_id) - AssessmentItem.objects.bulk_create(to_create["assessments"]) - - # build up a mapping of contentnode/assessment_id onto assessment item IDs, so we can point files to them correctly after - aid_mapping = {} - for a in AssessmentItem.objects.filter(contentnode_id__in=ai_node_ids): - aid_mapping[a.contentnode_id + ":" + a.assessment_id] = a.id - - # create the file objects, for both nodes and assessment items - for f in to_create["node_files"]: - f.contentnode_id = f.contentnode.id - for f in to_create["assessment_files"]: - f.assessment_item_id = aid_mapping[f.assessment_item.contentnode_id + ":" + f.assessment_item.assessment_id] - File.objects.bulk_create(to_create["node_files"] + to_create["assessment_files"]) - - return new_node - -def _duplicate_node_bulk_recursive(node, sort_order, parent, channel_id, to_create, level=0): - - if isinstance(node, int) or isinstance(node, basestring): - node = ContentNode.objects.get(pk=node) - - if isinstance(parent, int) or isinstance(parent, basestring): - parent = ContentNode.objects.get(pk=parent) - - # clone the model (in-memory) and update the fields on the cloned model - new_node = copy.copy(node) - new_node.id = None - new_node.tree_id = parent.tree_id - new_node.parent = parent - new_node.sort_order = sort_order or node.sort_order - new_node.changed = True - new_node.cloned_source = node - new_node.source_channel_id = node.get_channel().id if node.get_channel() else None - new_node.node_id = uuid.uuid4().hex - new_node.source_node_id = node.node_id - - # store the new unsaved model in a list, at the appropriate level, for later creation - while len(to_create["nodes"]) <= level: - to_create["nodes"].append([]) - to_create["nodes"][level].append(new_node) - - # find or create any tags that are needed, and store them under _meta on the node so we can add them to it later - new_node._meta.tags_to_add = [] - for tag in node.tags.all(): - new_tag, is_new = ContentTag.objects.get_or_create( - tag_name=tag.tag_name, - channel_id=channel_id, - ) - new_node._meta.tags_to_add.append(new_tag) - - # clone the file objects for later saving - for fobj in node.files.all(): - f = duplicate_file(fobj, node=new_node, save=False) - to_create["node_files"].append(f) - - # copy assessment item objects, and associated files - for aiobj in node.assessment_items.prefetch_related("files").all(): - aiobj_copy = copy.copy(aiobj) - aiobj_copy.id = None - aiobj_copy.contentnode = new_node - to_create["assessments"].append(aiobj_copy) - for fobj in aiobj.files.all(): - f = duplicate_file(fobj, assessment_item=aiobj_copy, save=False) - to_create["assessment_files"].append(f) - - # recurse down the tree and clone the children - for child in node.children.all(): - _duplicate_node_bulk_recursive(node=child, sort_order=None, parent=new_node, channel_id=channel_id, to_create=to_create, level=level+1) - - return new_node - -def move_nodes(request): - logging.debug("Entering the move_nodes endpoint") - - if request.method != 'POST': - return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.") - else: - data = json.loads(request.body) - - try: - nodes = data["nodes"] - target_parent = ContentNode.objects.get(pk=data["target_parent"]) - channel_id = data["channel_id"] - min_order = data.get("min_order") or 0 - max_order = data.get("max_order") or min_order + len(nodes) - - except KeyError: - raise ObjectDoesNotExist("Missing attribute from data: {}".format(data)) - - all_ids = [] - with transaction.atomic(): - for n in nodes: - min_order = min_order + float(max_order - min_order) / 2 - node = ContentNode.objects.get(pk=n['id']) - _move_node(node, parent=target_parent, sort_order=min_order, channel_id=channel_id) - all_ids.append(n['id']) - - serialized = ContentNodeSerializer(ContentNode.objects.filter(pk__in=all_ids), many=True).data - return HttpResponse(JSONRenderer().render(serialized)) - -def _move_node(node, parent=None, sort_order=None, channel_id=None): - node.parent = parent - node.sort_order = sort_order - node.changed = True - descendants = node.get_descendants(include_self=True) - node.save() - - for tag in ContentTag.objects.filter(tagged_content__in=descendants).distinct(): - # If moving from another channel - if tag.channel_id != channel_id: - t, is_new = ContentTag.objects.get_or_create(tag_name=tag.tag_name, channel_id=channel_id) - - # Set descendants with this tag to correct tag - for n in descendants.filter(tags=tag): - n.tags.remove(tag) - n.tags.add(t) - - return node - @csrf_exempt def publish_channel(request): logging.debug("Entering the publish_channel endpoint")