diff --git a/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js b/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js
index f9625fe4c8..3ab996dcf9 100644
--- a/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js
+++ b/contentcuration/contentcuration/static/js/edit_channel/uploader/views.js
@@ -36,7 +36,7 @@ var MetadataModalView = BaseViews.BaseModalView.extend({
});
},
close_uploader:function(event){
- if(!this.allow_edit || !this.metadata_view.check_for_changes() || !event){
+ if(!this.allow_edit || (this.metadata_view && !this.metadata_view.check_for_changes()) || !event){
this.close();
$(".modal-backdrop").remove();
}else if(confirm("Unsaved Metadata Detected! Exiting now will"
@@ -57,7 +57,6 @@ var EditMetadataView = BaseViews.BaseEditableListView.extend({
_.bindAll(this, 'render_details', 'render_preview', 'render_questions', 'enable_submit', 'disable_submit',
'save_and_keep_open', 'save_nodes', 'save_and_finish','process_updated_collection', 'close_upload', 'copy_items');
this.bind_edit_functions();
- this.collection = options.collection;
this.new_content = options.new_content;
this.new_exercise = options.new_exercise;
this.onsave = options.onsave;
@@ -80,10 +79,15 @@ var EditMetadataView = BaseViews.BaseEditableListView.extend({
},
render: function() {
this.$el.html(this.template({allow_edit: this.allow_edit}));
- this.load_list();
- if(this.collection.length > 1){
- this.load_editor(this.edit_list.selected_items);
- }
+
+ var self = this;
+ this.collection.fetch_nodes_by_ids_complete(this.collection.pluck('id'), !this.collection.has_all_data()).then(function(fetched){
+ self.collection.reset(fetched.toJSON());
+ self.load_list();
+ if(self.collection.length > 1){
+ self.load_editor(self.edit_list.selected_items);
+ }
+ });
},
render_details:function(){
this.switchPanel("details");
@@ -239,6 +243,7 @@ var EditMetadataView = BaseViews.BaseEditableListView.extend({
}
},
check_for_changes:function(){
+ if(!this.edit_list) return false;
return _.findWhere(this.edit_list.views, {edited : true}) != null;
},
undo_changes:function(){
@@ -515,7 +520,8 @@ var EditMetadataEditor = BaseViews.BaseView.extend({
}
},
get_license: function(license_id){
- if(!license_id || license_id <= 0){ return null; }
+ if(isNaN(license_id)){ return license_id; }
+ else if(!license_id || license_id <= 0){ return null; }
return window.licenses.get({id: license_id}).get('license_name');
},
display_license_description: function(license_id){
diff --git a/contentcuration/contentcuration/static/js/edit_channel/views.js b/contentcuration/contentcuration/static/js/edit_channel/views.js
index 731a702ab6..e75e20538d 100644
--- a/contentcuration/contentcuration/static/js/edit_channel/views.js
+++ b/contentcuration/contentcuration/static/js/edit_channel/views.js
@@ -200,9 +200,6 @@ var BaseWorkspaceView = BaseView.extend({
});
},
handle_move:function(target, moved, original_parents){
- // Recalculate counts
- this.reload_ancestors(original_parents, true);
-
// Remove where nodes originally were
moved.forEach(function(node){ window.workspace_manager.remove(node.id)});
@@ -210,6 +207,9 @@ var BaseWorkspaceView = BaseView.extend({
var content = window.workspace_manager.get(target.id);
if(content && content.list)
content.list.add_nodes(moved);
+
+ // Recalculate counts
+ this.reload_ancestors(original_parents, true);
}
});
@@ -503,11 +503,10 @@ var BaseWorkspaceListView = BaseEditableListView.extend({
});
collection.move(self.model, max, min).then(function(savedCollection){
self.retrieve_nodes($.unique(reload_list), true).then(function(fetched){
- self.reload_ancestors(fetched);
+ self.container.handle_move(self.model, savedCollection, fetched);
resolve(true);
});
}).catch(function(error){
- // console.log(error.responseText);
alert(error.responseText);
$(".content-list").sortable( "cancel" );
$(".content-list").sortable( "enable" );
@@ -536,6 +535,7 @@ var BaseWorkspaceListView = BaseEditableListView.extend({
var new_view = self.create_new_view(entry);
self.$(self.list_selector).append(new_view.el);
});
+ this.model.set('children', this.model.get('children').concat(collection.pluck('id')));
this.reload_ancestors(collection, false);
this.handle_if_empty();
},
@@ -545,7 +545,6 @@ var BaseWorkspaceListView = BaseEditableListView.extend({
var new_topic = this.collection.create({
"kind":"topic",
"title": "Topic",
- "sort_order" : this.collection.length,
"author": get_author(),
}, {
success:function(new_topic){
@@ -605,7 +604,6 @@ var BaseWorkspaceListView = BaseEditableListView.extend({
var new_exercise = this.collection.create({
"kind":"exercise",
"title": (this.model.get('parent'))? this.model.get('title') + " Exercise" : "Exercise", // Avoid having exercises prefilled with 'email clipboard'
- "sort_order" : this.collection.length,
"author": get_author(),
"copyright_holder": (window.preferences.copyright_holder === null) ? get_author() : window.preferences.copyright_holder,
"license_description": (window.preferences.license_description && window.preferences.license==="Special Permissions") ? window.preferences.license_description : ""
diff --git a/contentcuration/contentcuration/static/less/export.less b/contentcuration/contentcuration/static/less/export.less
index fb1c8a3770..ab8c47aa85 100644
--- a/contentcuration/contentcuration/static/less/export.less
+++ b/contentcuration/contentcuration/static/less/export.less
@@ -11,44 +11,32 @@
border: none;
padding-bottom: 5px;
}
-
.modal-title{
max-width:80%;
font-size:16pt;
}
-
.required{
margin-left:5px;
color:#EF2121;
}
-
.modal-dialog{
width: @exporter-width;
}
-
.modal-content{
-
min-height:@exporter-height;
background-color:@exporter-background-color;
-
- label, #export_license_select{
- font-size: 14pt;
- }
-
+ label, #export_license_select{ font-size: 14pt; }
+ #export_totals{ margin-right:10px; }
#version_number{
color:gray;
padding-right:10px;
}
-
- #export_totals{
- margin-right:10px;
- }
-
.annotation{
color:@gray-400;
+ font-size:10pt;
font-weight:normal;
+ margin-left: 10px;
}
-
#export_buttons{
margin-top:40px;
margin-bottom:30px;
@@ -57,7 +45,6 @@
vertical-align:middle;
}
}
-
#export_preview{
margin: 10px;
min-height: @exporter-list-height;
@@ -67,25 +54,16 @@
border-left:2px solid @blue-500;
.export_item{
padding:5px;
+ .export_folder{ cursor:pointer; }
+ .folder_item{ font-weight:bold; }
+ .subdirectory{ display:none; }
.export_item_title{
font-size:13pt;
- }
-
- .export_folder{
- cursor:pointer;
- }
-
- .folder_item{
- font-weight:bold;
- }
-
- .subdirectory{
- display:none;
+ max-width: 80%;
+ display: inline;
}
}
}
}
-
-
}
}
diff --git a/contentcuration/contentcuration/static/less/import.less b/contentcuration/contentcuration/static/less/import.less
index 0cbaeb925d..6686e3a4cc 100644
--- a/contentcuration/contentcuration/static/less/import.less
+++ b/contentcuration/contentcuration/static/less/import.less
@@ -129,8 +129,8 @@
#import_content_submit{
margin-right:20px;
}
- #import_file_count{
- margin-right:20px;
+ #import_file_metadata{
+ padding-right:20px;
font-size:12pt;
margin-top:2px;
}
diff --git a/contentcuration/contentcuration/static/less/uploader.less b/contentcuration/contentcuration/static/less/uploader.less
index e902ec2524..7069beb21f 100644
--- a/contentcuration/contentcuration/static/less/uploader.less
+++ b/contentcuration/contentcuration/static/less/uploader.less
@@ -15,6 +15,7 @@
width: @metadata-width * 0.28;
margin-right:10px;
}
+ .tab_button{ display:none; }
#uploader_select_all{
margin-left:20px;
*{ cursor:pointer; }
@@ -53,6 +54,13 @@
padding: 10px 0;
margin-bottom: 30px;
overflow:hidden;
+ #metadata_placeholder{
+ min-height: 300px;
+ padding-top: 15%;
+ font-size: 14pt;
+ font-weight: bold;
+ color: @gray-400;
+ }
.file_editor_row{ margin:0px; }
#title_error{ display:none; }
h4 {
diff --git a/contentcuration/contentcuration/urls.py b/contentcuration/contentcuration/urls.py
index 5bdf89e656..7f6ce29ea9 100644
--- a/contentcuration/contentcuration/urls.py
+++ b/contentcuration/contentcuration/urls.py
@@ -27,6 +27,8 @@
import contentcuration.view.settings_views as settings_views
import contentcuration.view.internal_views as internal_views
import contentcuration.view.zip_views as zip_views
+import contentcuration.view.file_views as file_views
+import contentcuration.view.node_views as node_views
from rest_framework.authtoken import views as auth_view
from contentcuration import api
@@ -63,7 +65,7 @@ class ContentKindViewSet(viewsets.ModelViewSet):
class ContentNodeViewSet(BulkModelViewSet):
queryset = ContentNode.objects.all()
- serializer_class = serializers.ContentNodeSerializer
+ serializer_class = serializers.ContentNodeCompleteSerializer
def get_queryset(self):
queryset = ContentNode.objects.all()
@@ -108,26 +110,37 @@ class AssessmentItemViewSet(BulkModelViewSet):
url(r'^admin/', include(admin.site.urls)),
url(r'^api/', include(router.urls)),
url(r'^api/', include(bulkrouter.urls)),
- url(r'^api/duplicate_nodes/$', views.duplicate_nodes, name='duplicate_nodes'),
- url(r'^api/move_nodes/$', views.move_nodes, name='move_nodes'),
url(r'^api/publish_channel/$', views.publish_channel, name='publish_channel'),
- url(r'^api/generate_thumbnail/$', views.generate_thumbnail, name='generate_thumbnail'),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
- url(r'^file_upload/', views.file_upload, name="file_upload"),
- url(r'^file_create/', views.file_create, name="file_create"),
url(r'^channels/$', views.channel_list, name='channels'),
url(r'^channels/(?P
[^/]+)/edit', views.channel, name='channel'),
url(r'^channels/(?P[^/]+)/view', views.channel_view_only, name='channel_view_only'),
- url(r'^thumbnail_upload/', views.thumbnail_upload, name='thumbnail_upload'),
- url(r'^exercise_image_upload/', views.exercise_image_upload, name='exercise_image_upload'),
- url(r'^image_upload/', views.image_upload, name='image_upload'),
- url(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"),
url(r'^unsupported_browser/$', views.unsupported_browser, name='unsupported_browser'),
url(r'^unauthorized/$', views.unauthorized, name='unauthorized'),
url(r'^accessible_channels/$', views.accessible_channels, name='accessible_channels'),
url(r'^healthz$', views.health, name='health'),
- url(r'^get_nodes_by_ids$', views.get_nodes_by_ids, name='get_nodes_by_ids'),
- url(r'^get_nodes_by_ids_simplified$', views.get_nodes_by_ids_simplified, name='get_nodes_by_ids_simplified'),
+]
+
+# Add node api enpoints
+urlpatterns += [
+ url(r'^api/get_nodes_by_ids$', node_views.get_nodes_by_ids, name='get_nodes_by_ids'),
+ url(r'^api/get_total_size$', node_views.get_total_size, name='get_total_size'),
+ url(r'^api/duplicate_nodes/$', node_views.duplicate_nodes, name='duplicate_nodes'),
+ url(r'^api/move_nodes/$', node_views.move_nodes, name='move_nodes'),
+ url(r'^api/get_node_descendants/$', node_views.get_node_descendants, name='get_node_descendants'),
+ url(r'^api/get_nodes_by_ids_simplified$', node_views.get_nodes_by_ids_simplified, name='get_nodes_by_ids_simplified'),
+ url(r'^api/get_nodes_by_ids_complete$', node_views.get_nodes_by_ids_complete, name='get_nodes_by_ids_complete'),
+]
+
+# Add file api enpoints
+urlpatterns += [
+ url(r'^api/thumbnail_upload/', file_views.thumbnail_upload, name='thumbnail_upload'),
+ url(r'^api/exercise_image_upload/', file_views.exercise_image_upload, name='exercise_image_upload'),
+ url(r'^api/image_upload/', file_views.image_upload, name='image_upload'),
+ url(r'^zipcontent/(?P[^/]+)/(?P.*)', zip_views.ZipContentView.as_view(), {}, "zipcontent"),
+ url(r'^api/file_upload/', file_views.file_upload, name="file_upload"),
+ url(r'^api/file_create/', file_views.file_create, name="file_create"),
+ url(r'^api/generate_thumbnail/$', file_views.generate_thumbnail, name='generate_thumbnail'),
]
# Add account/registration endpoints
diff --git a/contentcuration/contentcuration/view/file_views.py b/contentcuration/contentcuration/view/file_views.py
new file mode 100644
index 0000000000..7d4b9e669f
--- /dev/null
+++ b/contentcuration/contentcuration/view/file_views.py
@@ -0,0 +1,117 @@
+import json
+import logging
+import os
+from django.http import HttpResponse, HttpResponseBadRequest
+from django.views.decorators.csrf import csrf_exempt
+from django.conf import settings
+from django.core.urlresolvers import reverse_lazy
+from django.core.files import File as DjFile
+from rest_framework.renderers import JSONRenderer
+from contentcuration.api import write_file_to_storage
+from contentcuration.utils.files import generate_thumbnail_from_node
+from contentcuration.models import File, FormatPreset, ContentNode, License, generate_file_on_disk_name, generate_storage_url
+from contentcuration.serializers import FileSerializer, ContentNodeEditSerializer
+from le_utils.constants import format_presets, content_kinds, file_formats, exercises, licenses
+from pressurecooker.videos import guess_video_preset_by_resolution
+
+def file_upload(request):
+ if request.method == 'POST':
+ #Implement logic for switching out files without saving it yet
+ filename, ext = os.path.splitext(request.FILES.values()[0]._name)
+ size = request.FILES.values()[0]._size
+ file_object = File(file_size=size, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, preset_id=request.META.get('HTTP_PRESET'))
+ file_object.save()
+ return HttpResponse(json.dumps({
+ "success": True,
+ "filename": str(file_object),
+ "file": JSONRenderer().render(FileSerializer(file_object).data)
+ }))
+
+def file_create(request):
+ if request.method == 'POST':
+ original_filename, ext = os.path.splitext(request.FILES.values()[0]._name)
+ size = request.FILES.values()[0]._size
+ presets = FormatPreset.objects.filter(allowed_formats__extension__contains=ext[1:])
+ kind = presets.first().kind
+ preferences = json.loads(request.user.preferences)
+ author = preferences.get('author') if isinstance(preferences.get('author'), basestring) else request.user.get_full_name()
+ license = License.objects.filter(license_name=preferences.get('license')).first() # Use filter/first in case preference hasn't been set
+ license_id = license.pk if license else settings.DEFAULT_LICENSE
+ new_node = ContentNode(title=original_filename, kind=kind, license_id=license_id, author=author, copyright_holder=preferences.get('copyright_holder'))
+ if license.license_name == licenses.SPECIAL_PERMISSIONS:
+ new_node.license_description = preferences.get('license_description')
+ new_node.save()
+ file_object = File(file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, contentnode=new_node, file_size=size)
+ file_object.save()
+ if kind.pk == content_kinds.VIDEO:
+ file_object.preset_id = guess_video_preset_by_resolution(str(file_object.file_on_disk))
+ elif presets.filter(supplementary=False).count() == 1:
+ file_object.preset = presets.filter(supplementary=False).first()
+
+ file_object.save()
+
+ try:
+ if preferences.get('auto_derive_video_thumbnail') and new_node.kind_id == content_kinds.VIDEO \
+ or preferences.get('auto_derive_audio_thumbnail') and new_node.kind_id == content_kinds.AUDIO \
+ or preferences.get('auto_derive_html5_thumbnail') and new_node.kind_id == content_kinds.HTML5 \
+ or preferences.get('auto_derive_document_thumbnail') and new_node.kind_id == content_kinds.DOCUMENT:
+ generate_thumbnail_from_node(new_node, set_node=True)
+ except Exception:
+ pass
+
+ return HttpResponse(json.dumps({
+ "success": True,
+ "node": JSONRenderer().render(ContentNodeEditSerializer(new_node).data)
+ }))
+
+def generate_thumbnail(request):
+ logging.debug("Entering the generate_thumbnail endpoint")
+
+ if request.method != 'POST':
+ raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
+ else:
+ data = json.loads(request.body)
+ node = ContentNode.objects.get(pk=data["node_id"])
+
+ thumbnail_object = generate_thumbnail_from_node(node)
+
+ return HttpResponse(json.dumps({
+ "success": True,
+ "file": JSONRenderer().render(FileSerializer(thumbnail_object).data),
+ "path": generate_storage_url(str(thumbnail_object)),
+ }))
+
+def thumbnail_upload(request):
+ if request.method == 'POST':
+ fobj = request.FILES.values()[0]
+ formatted_filename = write_file_to_storage(fobj)
+
+ return HttpResponse(json.dumps({
+ "success": True,
+ "formatted_filename": formatted_filename,
+ "file": None,
+ "path": generate_storage_url(formatted_filename),
+ }))
+
+def image_upload(request):
+ if request.method == 'POST':
+ name, ext = os.path.splitext(request.FILES.values()[0]._name) # gets file extension without leading period
+ file_object = File(contentnode_id=request.META.get('HTTP_NODE'),original_filename=name, preset_id=request.META.get('HTTP_PRESET'), file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:])
+ file_object.save()
+ return HttpResponse(json.dumps({
+ "success": True,
+ "file": JSONRenderer().render(FileSerializer(file_object).data),
+ "path": generate_storage_url(str(file_object)),
+ }))
+
+def exercise_image_upload(request):
+ if request.method == 'POST':
+ ext = os.path.splitext(request.FILES.values()[0]._name)[1][1:] # gets file extension without leading period
+ file_object = File(preset_id=format_presets.EXERCISE_IMAGE, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext)
+ file_object.save()
+ return HttpResponse(json.dumps({
+ "success": True,
+ "formatted_filename": exercises.CONTENT_STORAGE_FORMAT.format(str(file_object)),
+ "file_id": file_object.pk,
+ "path": generate_storage_url(str(file_object)),
+ }))
diff --git a/contentcuration/contentcuration/view/node_views.py b/contentcuration/contentcuration/view/node_views.py
new file mode 100644
index 0000000000..03a5316f54
--- /dev/null
+++ b/contentcuration/contentcuration/view/node_views.py
@@ -0,0 +1,290 @@
+import copy
+import json
+import logging
+import os
+import uuid
+from django.http import HttpResponse, HttpResponseBadRequest
+from django.views.decorators.csrf import csrf_exempt
+from django.conf import settings
+from django.core.cache import cache
+from django.core.exceptions import ObjectDoesNotExist
+from django.db import transaction
+from django.db.models import Q, Case, When, Value, IntegerField, Max, Sum
+from rest_framework.renderers import JSONRenderer
+from contentcuration.utils.files import duplicate_file
+from contentcuration.models import File, ContentNode, ContentTag, AssessmentItem
+from contentcuration.serializers import ContentNodeSerializer, ContentNodeEditSerializer, SimplifiedContentNodeSerializer
+from le_utils.constants import format_presets, content_kinds, file_formats, licenses
+
+def get_total_size(request):
+ if request.method == 'POST':
+ data = json.loads(request.body)
+ sizes = ContentNode.objects.prefetch_related('assessment_items').prefetch_related('files').prefetch_related('children')\
+ .filter(id__in=data).get_descendants(include_self=True)\
+ .aggregate(resource_size=Sum('files__file_size'), assessment_size=Sum('assessment_items__files__file_size'))
+
+ return HttpResponse(json.dumps({'success':True, 'size': (sizes['resource_size'] or 0) + (sizes['assessment_size'] or 0)}))
+
+def delete_nodes(request):
+ if request.method == 'POST':
+ data = json.loads(request.body)
+ nodes = ContentNode.objects.filter(pk__in=data['nodes']).delete()
+ return HttpResponse({'success':True})
+
+def get_node_descendants(request):
+ if request.method == 'POST':
+ data = json.loads(request.body)
+ nodes = ContentNode.objects.filter(pk__in=data).get_descendants(include_self=True).values_list('id', flat=True)
+ return HttpResponse(json.dumps({'success':True, "node_ids": " ".join(nodes)}))
+
+
+def get_nodes_by_ids(request):
+ if request.method == 'POST':
+ nodes = ContentNode.objects.prefetch_related('children').prefetch_related('files')\
+ .prefetch_related('assessment_items').prefetch_related('tags').filter(pk__in=json.loads(request.body))\
+ .defer('node_id', 'original_source_node_id', 'source_node_id', 'content_id', 'original_channel_id', 'source_channel_id', 'source_id', 'source_domain', 'created', 'modified')
+ return HttpResponse(JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data))
+
+def get_nodes_by_ids_simplified(request):
+ if request.method == 'POST':
+ nodes = ContentNode.objects.prefetch_related('children').filter(pk__in=json.loads(request.body))
+ return HttpResponse(JSONRenderer().render(SimplifiedContentNodeSerializer(nodes, many=True).data))
+
+def get_nodes_by_ids_complete(request):
+ if request.method == 'POST':
+ nodes = ContentNode.objects.prefetch_related('children').prefetch_related('files')\
+ .prefetch_related('assessment_items').prefetch_related('tags').filter(pk__in=json.loads(request.body))
+ return HttpResponse(JSONRenderer().render(ContentNodeEditSerializer(nodes, many=True).data))
+
+def duplicate_nodes(request):
+ logging.debug("Entering the copy_node endpoint")
+
+ if request.method != 'POST':
+ return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
+ else:
+ data = json.loads(request.body)
+
+ try:
+ nodes = data["nodes"]
+ sort_order = data.get("sort_order") or 1
+ target_parent = data["target_parent"]
+ channel_id = data["channel_id"]
+ new_nodes = []
+
+ with transaction.atomic():
+ with ContentNode.objects.disable_mptt_updates():
+ for node_data in nodes:
+ new_node = _duplicate_node_bulk(node_data['id'], sort_order=sort_order, parent=target_parent, channel_id=channel_id)
+ new_nodes.append(new_node.pk)
+ sort_order+=1
+
+ except KeyError:
+ raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
+
+ serialized = ContentNodeEditSerializer(ContentNode.objects.filter(pk__in=new_nodes), many=True).data
+ return HttpResponse(JSONRenderer().render(serialized))
+
+def _duplicate_node_bulk(node, sort_order=None, parent=None, channel_id=None):
+ if isinstance(node, int) or isinstance(node, basestring):
+ node = ContentNode.objects.get(pk=node)
+
+ # keep track of the in-memory models so that we can bulk-create them at the end (for efficiency)
+ to_create = {
+ "nodes": [],
+ "node_files": [],
+ "assessment_files": [],
+ "assessments": [],
+ }
+
+ # perform the actual recursive node cloning
+ new_node = _duplicate_node_bulk_recursive(node=node, sort_order=sort_order, parent=parent, channel_id=channel_id, to_create=to_create)
+
+ # create nodes, one level at a time, starting from the top of the tree (so that we have IDs to pass as "parent" for next level down)
+ for node_level in to_create["nodes"]:
+ for node in node_level:
+ node.parent_id = node.parent.id
+ ContentNode.objects.bulk_create(node_level)
+ for node in node_level:
+ for tag in node._meta.tags_to_add:
+ node.tags.add(tag)
+
+ # rebuild MPTT tree for this channel (since we're inside "disable_mptt_updates", and bulk_create doesn't trigger rebuild signals anyway)
+ ContentNode.objects.partial_rebuild(to_create["nodes"][0][0].tree_id)
+
+ ai_node_ids = []
+
+ # create each of the assessment items
+ for a in to_create["assessments"]:
+ a.contentnode_id = a.contentnode.id
+ ai_node_ids.append(a.contentnode_id)
+ AssessmentItem.objects.bulk_create(to_create["assessments"])
+
+ # build up a mapping of contentnode/assessment_id onto assessment item IDs, so we can point files to them correctly after
+ aid_mapping = {}
+ for a in AssessmentItem.objects.filter(contentnode_id__in=ai_node_ids):
+ aid_mapping[a.contentnode_id + ":" + a.assessment_id] = a.id
+
+ # create the file objects, for both nodes and assessment items
+ for f in to_create["node_files"]:
+ f.contentnode_id = f.contentnode.id
+ for f in to_create["assessment_files"]:
+ f.assessment_item_id = aid_mapping[f.assessment_item.contentnode_id + ":" + f.assessment_item.assessment_id]
+ File.objects.bulk_create(to_create["node_files"] + to_create["assessment_files"])
+
+ return new_node
+
+def _duplicate_node_bulk_recursive(node, sort_order, parent, channel_id, to_create, level=0):
+
+ if isinstance(node, int) or isinstance(node, basestring):
+ node = ContentNode.objects.get(pk=node)
+
+ if isinstance(parent, int) or isinstance(parent, basestring):
+ parent = ContentNode.objects.get(pk=parent)
+
+ # clone the model (in-memory) and update the fields on the cloned model
+ new_node = copy.copy(node)
+ new_node.id = None
+ new_node.tree_id = parent.tree_id
+ new_node.parent = parent
+ new_node.sort_order = sort_order or node.sort_order
+ new_node.changed = True
+ new_node.cloned_source = node
+ new_node.source_channel_id = node.get_channel().id if node.get_channel() else None
+ new_node.node_id = uuid.uuid4().hex
+ new_node.source_node_id = node.node_id
+
+ # store the new unsaved model in a list, at the appropriate level, for later creation
+ while len(to_create["nodes"]) <= level:
+ to_create["nodes"].append([])
+ to_create["nodes"][level].append(new_node)
+
+ # find or create any tags that are needed, and store them under _meta on the node so we can add them to it later
+ new_node._meta.tags_to_add = []
+ for tag in node.tags.all():
+ new_tag, is_new = ContentTag.objects.get_or_create(
+ tag_name=tag.tag_name,
+ channel_id=channel_id,
+ )
+ new_node._meta.tags_to_add.append(new_tag)
+
+ # clone the file objects for later saving
+ for fobj in node.files.all():
+ f = duplicate_file(fobj, node=new_node, save=False)
+ to_create["node_files"].append(f)
+
+ # copy assessment item objects, and associated files
+ for aiobj in node.assessment_items.prefetch_related("files").all():
+ aiobj_copy = copy.copy(aiobj)
+ aiobj_copy.id = None
+ aiobj_copy.contentnode = new_node
+ to_create["assessments"].append(aiobj_copy)
+ for fobj in aiobj.files.all():
+ f = duplicate_file(fobj, assessment_item=aiobj_copy, save=False)
+ to_create["assessment_files"].append(f)
+
+ # recurse down the tree and clone the children
+ for child in node.children.all():
+ _duplicate_node_bulk_recursive(node=child, sort_order=None, parent=new_node, channel_id=channel_id, to_create=to_create, level=level+1)
+
+ return new_node
+
+def _duplicate_node(node, sort_order=None, parent=None, channel_id=None):
+ if isinstance(node, int) or isinstance(node, basestring):
+ node = ContentNode.objects.get(pk=node)
+
+ original_channel = node.get_original_node().get_channel() if node.get_original_node() else None
+
+ new_node = ContentNode.objects.create(
+ title=node.title,
+ description=node.description,
+ kind=node.kind,
+ license=node.license,
+ parent=ContentNode.objects.get(pk=parent) if parent else None,
+ sort_order=sort_order or node.sort_order,
+ copyright_holder=node.copyright_holder,
+ changed=True,
+ original_node=node.original_node or node,
+ cloned_source=node,
+ original_channel_id = node.original_channel_id or original_channel.id if original_channel else None,
+ source_channel_id = node.get_channel().id if node.get_channel() else None,
+ original_source_node_id = node.original_source_node_id or node.node_id,
+ source_node_id = node.node_id,
+ author=node.author,
+ content_id=node.content_id,
+ extra_fields=node.extra_fields,
+ )
+
+ # add tags now
+ for tag in node.tags.all():
+ new_tag, is_new = ContentTag.objects.get_or_create(
+ tag_name=tag.tag_name,
+ channel_id=channel_id,
+ )
+ new_node.tags.add(new_tag)
+
+ # copy file object too
+ for fobj in node.files.all():
+ duplicate_file(fobj, node=new_node)
+
+ # copy assessment item object too
+ for aiobj in node.assessment_items.all():
+ aiobj_copy = copy.copy(aiobj)
+ aiobj_copy.id = None
+ aiobj_copy.contentnode = new_node
+ aiobj_copy.save()
+ for fobj in aiobj.files.all():
+ duplicate_file(fobj, assessment_item=aiobj_copy)
+
+ for c in node.children.all():
+ _duplicate_node(c, parent=new_node.id)
+
+ return new_node
+
+def move_nodes(request):
+ logging.debug("Entering the move_nodes endpoint")
+
+ if request.method != 'POST':
+ raise HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
+ else:
+ data = json.loads(request.body)
+
+ try:
+ nodes = data["nodes"]
+ target_parent = ContentNode.objects.get(pk=data["target_parent"])
+ channel_id = data["channel_id"]
+ min_order = data.get("min_order") or 0
+ max_order = data.get("max_order") or min_order + len(nodes)
+
+ except KeyError:
+ raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
+
+ all_ids = []
+ with transaction.atomic():
+ with ContentNode.objects.delay_mptt_updates():
+ for n in nodes:
+ min_order = min_order + float(max_order - min_order) / 2
+ node = ContentNode.objects.get(pk=n['id'])
+ _move_node(node, parent=target_parent, sort_order=min_order, channel_id=channel_id)
+ all_ids.append(n['id'])
+
+ serialized = ContentNodeEditSerializer(ContentNode.objects.filter(pk__in=all_ids), many=True).data
+ return HttpResponse(JSONRenderer().render(serialized))
+
+def _move_node(node, parent=None, sort_order=None, channel_id=None):
+ node.parent = parent or node.parent
+ node.sort_order = sort_order or node.sort_order
+ node.changed = True
+ descendants = node.get_descendants(include_self=True)
+ node.save()
+
+ for tag in ContentTag.objects.filter(tagged_content__in=descendants).distinct():
+ # If moving from another channel
+ if tag.channel_id != channel_id:
+ t, is_new = ContentTag.objects.get_or_create(tag_name=tag.tag_name, channel_id=channel_id)
+
+ # Set descendants with this tag to correct tag
+ for n in descendants.filter(tags=tag):
+ n.tags.remove(tag)
+ n.tags.add(t)
+
+ return node
diff --git a/contentcuration/contentcuration/views.py b/contentcuration/contentcuration/views.py
index b0f31cb9f0..0dc331f058 100644
--- a/contentcuration/contentcuration/views.py
+++ b/contentcuration/contentcuration/views.py
@@ -20,14 +20,14 @@
from django.core.exceptions import ObjectDoesNotExist
from django.core.context_processors import csrf
from django.db import transaction
-from django.db.models import Q, Case, When, Value, IntegerField, Max
+from django.db.models import Q, Case, When, Value, IntegerField, Max, Sum
from django.core.urlresolvers import reverse_lazy
from django.core.files import File as DjFile
from rest_framework.renderers import JSONRenderer
from contentcuration.api import write_file_to_storage, check_supported_browsers
from contentcuration.utils.files import extract_thumbnail_wrapper, compress_video_wrapper, generate_thumbnail_from_node, duplicate_file
from contentcuration.models import Exercise, AssessmentItem, Channel, License, FileFormat, File, FormatPreset, ContentKind, ContentNode, ContentTag, User, Invitation, generate_file_on_disk_name, generate_storage_url
-from contentcuration.serializers import RootNodeSerializer, AssessmentItemSerializer, AccessibleChannelListSerializer, ChannelListSerializer, ChannelSerializer, LicenseSerializer, FileFormatSerializer, FormatPresetSerializer, ContentKindSerializer, ContentNodeSerializer, SimplifiedContentNodeSerializer, TagSerializer, UserSerializer, CurrentUserSerializer, UserChannelListSerializer, FileSerializer
+from contentcuration.serializers import RootNodeSerializer, AssessmentItemSerializer, AccessibleChannelListSerializer, ChannelListSerializer, ChannelSerializer, LicenseSerializer, FileFormatSerializer, FormatPresetSerializer, ContentKindSerializer, ContentNodeSerializer, TagSerializer, UserSerializer, CurrentUserSerializer, UserChannelListSerializer, FileSerializer
from le_utils.constants import format_presets, content_kinds, file_formats, exercises, licenses
from rest_framework.authentication import SessionAuthentication, BasicAuthentication, TokenAuthentication
from rest_framework.permissions import IsAuthenticated
@@ -36,17 +36,6 @@
from pressurecooker.images import create_tiled_image
from pressurecooker.encodings import write_base64_to_file
-def get_nodes_by_ids(request):
- if request.method == 'POST':
- nodes = ContentNode.objects.prefetch_related('files').prefetch_related('assessment_items')\
- .prefetch_related('tags').prefetch_related('children').filter(pk__in=json.loads(request.body))
- return HttpResponse(JSONRenderer().render(ContentNodeSerializer(nodes, many=True).data))
-
-def get_nodes_by_ids_simplified(request):
- if request.method == 'POST':
- nodes = ContentNode.objects.prefetch_related('children').filter(pk__in=json.loads(request.body))
- return HttpResponse(JSONRenderer().render(SimplifiedContentNodeSerializer(nodes, many=True).data))
-
def base(request):
if not check_supported_browsers(request.META.get('HTTP_USER_AGENT')):
return redirect(reverse_lazy('unsupported_browser'))
@@ -150,291 +139,6 @@ def channel_view_only(request, channel_id):
return channel_page(request, channel)
-def file_upload(request):
- if request.method == 'POST':
- preset = FormatPreset.objects.get(id=request.META.get('HTTP_PRESET'))
- #Implement logic for switching out files without saving it yet
- filename, ext = os.path.splitext(request.FILES.values()[0]._name)
- size = request.FILES.values()[0]._size
- file_object = File(file_size=size, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, preset=preset)
- file_object.save()
- return HttpResponse(json.dumps({
- "success": True,
- "filename": str(file_object),
- "file": JSONRenderer().render(FileSerializer(file_object).data)
- }))
-
-def file_create(request):
- if request.method == 'POST':
- original_filename, ext = os.path.splitext(request.FILES.values()[0]._name)
- size = request.FILES.values()[0]._size
- presets = FormatPreset.objects.filter(allowed_formats__extension__contains=ext[1:])
- kind = presets.first().kind
- preferences = json.loads(request.user.preferences)
- author = preferences.get('author') if isinstance(preferences.get('author'), basestring) else request.user.get_full_name()
- license = License.objects.filter(license_name=preferences.get('license')).first() # Use filter/first in case preference hasn't been set
- license_id = license.pk if license else settings.DEFAULT_LICENSE
- new_node = ContentNode(title=original_filename, kind=kind, license_id=license_id, author=author, copyright_holder=preferences.get('copyright_holder'))
- if license.license_name == licenses.SPECIAL_PERMISSIONS:
- new_node.license_description = preferences.get('license_description')
- new_node.save()
- file_object = File(file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:], original_filename=request.FILES.values()[0]._name, contentnode=new_node, file_size=size)
- file_object.save()
- if kind.pk == content_kinds.VIDEO:
- file_object.preset_id = guess_video_preset_by_resolution(str(file_object.file_on_disk))
- elif presets.filter(supplementary=False).count() == 1:
- file_object.preset = presets.filter(supplementary=False).first()
-
- file_object.save()
-
- try:
- if preferences.get('auto_derive_video_thumbnail') and new_node.kind_id == content_kinds.VIDEO \
- or preferences.get('auto_derive_audio_thumbnail') and new_node.kind_id == content_kinds.AUDIO \
- or preferences.get('auto_derive_html5_thumbnail') and new_node.kind_id == content_kinds.HTML5 \
- or preferences.get('auto_derive_document_thumbnail') and new_node.kind_id == content_kinds.DOCUMENT:
- generate_thumbnail_from_node(new_node, set_node=True)
- except Exception:
- pass
-
- return HttpResponse(json.dumps({
- "success": True,
- "node": JSONRenderer().render(ContentNodeSerializer(new_node).data)
- }))
-
-def generate_thumbnail(request):
- logging.debug("Entering the generate_thumbnail endpoint")
-
- if request.method != 'POST':
- return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
- else:
- data = json.loads(request.body)
- node = ContentNode.objects.get(pk=data["node_id"])
-
- thumbnail_object = generate_thumbnail_from_node(node)
-
- return HttpResponse(json.dumps({
- "success": True,
- "file": JSONRenderer().render(FileSerializer(thumbnail_object).data),
- "path": generate_storage_url(str(thumbnail_object)),
- }))
-
-def thumbnail_upload(request):
- if request.method == 'POST':
- fobj = request.FILES.values()[0]
- formatted_filename = write_file_to_storage(fobj)
-
- return HttpResponse(json.dumps({
- "success": True,
- "formatted_filename": formatted_filename,
- "file": None,
- "path": generate_storage_url(formatted_filename),
- }))
-
-def image_upload(request):
- if request.method == 'POST':
- name, ext = os.path.splitext(request.FILES.values()[0]._name) # gets file extension without leading period
- file_object = File(contentnode_id=request.META.get('HTTP_NODE'),original_filename=name, preset_id=request.META.get('HTTP_PRESET'), file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext[1:])
- file_object.save()
- return HttpResponse(json.dumps({
- "success": True,
- "file": JSONRenderer().render(FileSerializer(file_object).data),
- "path": generate_storage_url(str(file_object)),
- }))
-
-def exercise_image_upload(request):
- if request.method == 'POST':
- ext = os.path.splitext(request.FILES.values()[0]._name)[1][1:] # gets file extension without leading period
- file_object = File(preset_id=format_presets.EXERCISE_IMAGE, file_on_disk=DjFile(request.FILES.values()[0]), file_format_id=ext)
- file_object.save()
- return HttpResponse(json.dumps({
- "success": True,
- "formatted_filename": exercises.CONTENT_STORAGE_FORMAT.format(str(file_object)),
- "file_id": file_object.pk,
- "path": generate_storage_url(str(file_object)),
- }))
-
-def duplicate_nodes(request):
- logging.debug("Entering the copy_node endpoint")
-
- if request.method != 'POST':
- return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
- else:
- data = json.loads(request.body)
-
- try:
- nodes = data["nodes"]
- sort_order = data.get("sort_order") or 1
- target_parent = data["target_parent"]
- channel_id = data["channel_id"]
- new_nodes = []
-
- with transaction.atomic():
- with ContentNode.objects.disable_mptt_updates():
- for node_data in nodes:
- new_node = _duplicate_node_bulk(node_data['id'], sort_order=sort_order, parent=target_parent, channel_id=channel_id)
- new_nodes.append(new_node.pk)
- sort_order+=1
-
- except KeyError:
- raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
-
- return HttpResponse(json.dumps({
- "success": True,
- "node_ids": " ".join(new_nodes)
- }))
-
-def _duplicate_node_bulk(node, sort_order=None, parent=None, channel_id=None):
- if isinstance(node, int) or isinstance(node, basestring):
- node = ContentNode.objects.get(pk=node)
-
- # keep track of the in-memory models so that we can bulk-create them at the end (for efficiency)
- to_create = {
- "nodes": [],
- "node_files": [],
- "assessment_files": [],
- "assessments": [],
- }
-
- # perform the actual recursive node cloning
- new_node = _duplicate_node_bulk_recursive(node=node, sort_order=sort_order, parent=parent, channel_id=channel_id, to_create=to_create)
-
- # create nodes, one level at a time, starting from the top of the tree (so that we have IDs to pass as "parent" for next level down)
- for node_level in to_create["nodes"]:
- for node in node_level:
- node.parent_id = node.parent.id
- ContentNode.objects.bulk_create(node_level)
- for node in node_level:
- for tag in node._meta.tags_to_add:
- node.tags.add(tag)
-
- # rebuild MPTT tree for this channel (since we're inside "disable_mptt_updates", and bulk_create doesn't trigger rebuild signals anyway)
- ContentNode.objects.partial_rebuild(to_create["nodes"][0][0].tree_id)
-
- ai_node_ids = []
-
- # create each of the assessment items
- for a in to_create["assessments"]:
- a.contentnode_id = a.contentnode.id
- ai_node_ids.append(a.contentnode_id)
- AssessmentItem.objects.bulk_create(to_create["assessments"])
-
- # build up a mapping of contentnode/assessment_id onto assessment item IDs, so we can point files to them correctly after
- aid_mapping = {}
- for a in AssessmentItem.objects.filter(contentnode_id__in=ai_node_ids):
- aid_mapping[a.contentnode_id + ":" + a.assessment_id] = a.id
-
- # create the file objects, for both nodes and assessment items
- for f in to_create["node_files"]:
- f.contentnode_id = f.contentnode.id
- for f in to_create["assessment_files"]:
- f.assessment_item_id = aid_mapping[f.assessment_item.contentnode_id + ":" + f.assessment_item.assessment_id]
- File.objects.bulk_create(to_create["node_files"] + to_create["assessment_files"])
-
- return new_node
-
-def _duplicate_node_bulk_recursive(node, sort_order, parent, channel_id, to_create, level=0):
-
- if isinstance(node, int) or isinstance(node, basestring):
- node = ContentNode.objects.get(pk=node)
-
- if isinstance(parent, int) or isinstance(parent, basestring):
- parent = ContentNode.objects.get(pk=parent)
-
- # clone the model (in-memory) and update the fields on the cloned model
- new_node = copy.copy(node)
- new_node.id = None
- new_node.tree_id = parent.tree_id
- new_node.parent = parent
- new_node.sort_order = sort_order or node.sort_order
- new_node.changed = True
- new_node.cloned_source = node
- new_node.source_channel_id = node.get_channel().id if node.get_channel() else None
- new_node.node_id = uuid.uuid4().hex
- new_node.source_node_id = node.node_id
-
- # store the new unsaved model in a list, at the appropriate level, for later creation
- while len(to_create["nodes"]) <= level:
- to_create["nodes"].append([])
- to_create["nodes"][level].append(new_node)
-
- # find or create any tags that are needed, and store them under _meta on the node so we can add them to it later
- new_node._meta.tags_to_add = []
- for tag in node.tags.all():
- new_tag, is_new = ContentTag.objects.get_or_create(
- tag_name=tag.tag_name,
- channel_id=channel_id,
- )
- new_node._meta.tags_to_add.append(new_tag)
-
- # clone the file objects for later saving
- for fobj in node.files.all():
- f = duplicate_file(fobj, node=new_node, save=False)
- to_create["node_files"].append(f)
-
- # copy assessment item objects, and associated files
- for aiobj in node.assessment_items.prefetch_related("files").all():
- aiobj_copy = copy.copy(aiobj)
- aiobj_copy.id = None
- aiobj_copy.contentnode = new_node
- to_create["assessments"].append(aiobj_copy)
- for fobj in aiobj.files.all():
- f = duplicate_file(fobj, assessment_item=aiobj_copy, save=False)
- to_create["assessment_files"].append(f)
-
- # recurse down the tree and clone the children
- for child in node.children.all():
- _duplicate_node_bulk_recursive(node=child, sort_order=None, parent=new_node, channel_id=channel_id, to_create=to_create, level=level+1)
-
- return new_node
-
-def move_nodes(request):
- logging.debug("Entering the move_nodes endpoint")
-
- if request.method != 'POST':
- return HttpResponseBadRequest("Only POST requests are allowed on this endpoint.")
- else:
- data = json.loads(request.body)
-
- try:
- nodes = data["nodes"]
- target_parent = ContentNode.objects.get(pk=data["target_parent"])
- channel_id = data["channel_id"]
- min_order = data.get("min_order") or 0
- max_order = data.get("max_order") or min_order + len(nodes)
-
- except KeyError:
- raise ObjectDoesNotExist("Missing attribute from data: {}".format(data))
-
- all_ids = []
- with transaction.atomic():
- for n in nodes:
- min_order = min_order + float(max_order - min_order) / 2
- node = ContentNode.objects.get(pk=n['id'])
- _move_node(node, parent=target_parent, sort_order=min_order, channel_id=channel_id)
- all_ids.append(n['id'])
-
- serialized = ContentNodeSerializer(ContentNode.objects.filter(pk__in=all_ids), many=True).data
- return HttpResponse(JSONRenderer().render(serialized))
-
-def _move_node(node, parent=None, sort_order=None, channel_id=None):
- node.parent = parent
- node.sort_order = sort_order
- node.changed = True
- descendants = node.get_descendants(include_self=True)
- node.save()
-
- for tag in ContentTag.objects.filter(tagged_content__in=descendants).distinct():
- # If moving from another channel
- if tag.channel_id != channel_id:
- t, is_new = ContentTag.objects.get_or_create(tag_name=tag.tag_name, channel_id=channel_id)
-
- # Set descendants with this tag to correct tag
- for n in descendants.filter(tags=tag):
- n.tags.remove(tag)
- n.tags.add(t)
-
- return node
-
@csrf_exempt
def publish_channel(request):
logging.debug("Entering the publish_channel endpoint")