Parent: [4b552d] (diff)

Child: [1552ad] (diff)

Download this file

teamforge-import.py    956 lines (851 with data), 41.0 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
import logging
from getpass import getpass
from optparse import OptionParser
from pylons import c
import re
import os
import os.path
from time import mktime
import time
import json
from urlparse import urlparse
from urllib import FancyURLopener
from datetime import datetime
from ConfigParser import ConfigParser
import random
import string
import sqlalchemy
from suds.client import Client
from suds import WebFault
from ming.orm.ormsession import ThreadLocalORMSession
from ming.base import Object
from allura import model as M
from allura.lib import helpers as h
from allura.lib import utils
log = logging.getLogger('teamforge-import')
'''
http://help.collab.net/index.jsp?topic=/teamforge520/reference/api-services.html
http://www.open.collab.net/nonav/community/cif/csfe/50/javadoc/index.html?com/collabnet/ce/soap50/webservices/page/package-summary.html
'''
options = None
s = None # security token
client = None # main api client
users = {}
def make_client(api_url, app):
return Client(api_url + app + '?wsdl', location=api_url + app)
def main():
global options, s, client, users
defaults=dict(
api_url=None,
attachment_url='/sf/%s/do/%s/',
default_wiki_text='PRODUCT NAME HERE',
username=None,
password=None,
output_dir='teamforge-export/',
list_project_ids=False,
neighborhood=None,
neighborhood_shortname=None,
skip_frs_download=False,
skip_unsupported_check=False)
optparser = get_parser(defaults)
options, project_ids = optparser.parse_args()
if options.config_file:
config = ConfigParser()
config.read(options.config_file)
defaults.update(
(k, eval(v)) for k,v in config.items('teamforge-import'))
optparser = get_parser(defaults)
options, project_ids = optparser.parse_args()
# neither specified, so do both
if not options.extract and not options.load:
options.extract = True
options.load = True
if options.extract:
client = make_client(options.api_url, 'CollabNet')
api_v = client.service.getApiVersion()
if not api_v.startswith('5.4.'):
log.warning('Unexpected API Version %s. May not work correctly.' % api_v)
s = client.service.login(options.username, options.password or getpass('Password: '))
teamforge_v = client.service.getVersion(s)
if not teamforge_v.startswith('5.4.'):
log.warning('Unexpected TeamForge Version %s. May not work correctly.' % teamforge_v)
if options.load:
if not options.neighborhood:
log.error('You must specify a neighborhood when loading')
return
try:
nbhd = M.Neighborhood.query.get(name=options.neighborhood)
except:
log.exception('error querying mongo')
log.error('This should be run as "paster script production.ini ../scripts/teamforge-import.py -- ...options.."')
return
assert nbhd
if not project_ids:
if not options.extract:
log.error('You must specify project ids')
return
projects = client.service.getProjectList(s)
project_ids = [p.id for p in projects.dataRows]
if options.list_project_ids:
print ' '.join(project_ids)
return
if not os.path.exists(options.output_dir):
os.makedirs(options.output_dir)
for pid in project_ids:
if options.extract:
try:
project = client.service.getProjectData(s, pid)
log.info('Project: %s %s %s' % (project.id, project.title, project.path))
out_dir = os.path.join(options.output_dir, project.id)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
get_project(project)
get_files(project)
get_homepage_wiki(project)
get_discussion(project)
get_news(project)
if not options.skip_unsupported_check:
check_unsupported_tools(project)
with open(os.path.join(options.output_dir, 'users.json'), 'w') as user_file:
json.dump(users, user_file, default=str)
except:
log.exception('Error extracting %s' % pid)
if options.load:
try:
project = create_project(pid, nbhd)
except:
log.exception('Error creating %s' % pid)
def load_users():
''' load the users data from file, if it hasn't been already '''
global users
user_filename = os.path.join(options.output_dir, 'users.json')
if not users and os.path.exists(user_filename):
with open(user_filename) as user_file:
users = json.load(user_file, object_hook=Object) # Object for attribute access
def save_user(usernames):
if isinstance(usernames, basestring):
usernames = [usernames]
load_users()
for username in usernames:
if username not in users:
user_data = client.service.getUserData(s, username)
users[username] = Object(user_data)
if users[username].status != 'Active':
log.warn('user: %s status: %s' % (username, users[username].status))
def get_project(project):
global client
cats = make_client(options.api_url, 'CategorizationApp')
data = client.service.getProjectData(s, project.id)
access_level = { 1: 'public', 4: 'private', 3: 'gated community'}[
client.service.getProjectAccessLevel(s, project.id)
]
admins = client.service.listProjectAdmins(s, project.id).dataRows
members = client.service.getProjectMemberList(s, project.id).dataRows
groups = client.service.getProjectGroupList(s, project.id).dataRows
categories = cats.service.getProjectCategories(s, project.id).dataRows
save(json.dumps(dict(
data = dict(data),
access_level = access_level,
admins = map(dict, admins),
members = map(dict, members),
groups = map(dict, groups),
categories = map(dict, categories),
), default=str),
project, project.id+'.json')
if len(groups):
log.warn('Project has groups %s' % groups)
for u in admins:
if not u.status != 'active':
log.warn('inactive admin %s' % u)
if u.superUser:
log.warn('super user admin %s' % u)
save_user(data.createdBy)
save_user(u.userName for u in admins)
save_user(u.userName for u in members)
def get_user(orig_username):
'returns an allura User object'
sf_username = orig_username.replace('_','-').lower()
# FIXME username translation is hardcoded here:
sf_username = dict(
rlevy = 'ramilevy',
mkeisler = 'mkeisler',
bthale = 'bthale',
mmuller = 'mattjustmull',
MalcolmDwyer = 'slagheap',
tjyang = 'tjyang',
manaic = 'maniac76',
srinid = 'cnudav',
es = 'est016',
david_peyer = 'david-mmi',
okruse = 'ottokruse',
jvp = 'jvpmoto',
dmorelli = 'dmorelli',
).get(sf_username, sf_username + '-mmi')
if len(sf_username) > 15:
adjusted_username = sf_username[0:15-4] + '-mmi'
log.error('invalid sf_username length: %s Changing it to %s' % (sf_username, adjusted_username))
sf_username = adjusted_username
u = M.User.by_username(sf_username)
if not u:
load_users()
user = users[orig_username]
if user.status != 'Active':
log.warn('Inactive user %s %s' % (orig_username, user.status))
if not 3 <= len(user.fullName) <= 32:
raise Exception('invalid fullName length: %s' % user.fullName)
if '@' not in user.email:
raise Exception('invalid email: %s' % user.email)
# FIXME: hardcoded SFX integration
from sfx.model import tables as T
nu = T.users.insert()
nu.execute(user_name=sf_username,
email=user.email.lower(),
realname=user.fullName,
status='A' if user.status == 'Active' else 'D',
language=275, # english trove id
timezone=user.timeZone,
user_pw=''.join(random.sample(string.printable, 32)),
unix_pw=''.join(random.sample(string.printable, 32)),
user_pw_modtime=int(time.time()),
mail_siteupdates=0,
add_date=int(time.time()),
)
user_id = sqlalchemy.select([T.users.c.user_id], T.users.c.user_name==sf_username).execute().fetchone().user_id
npref = T.user_preferences.insert()
npref.execute(user_id=user_id, preference_name='country', preference_value='US')
npref.execute(user_id=user_id, preference_name='opt_research', preference_value=0)
npref.execute(user_id=user_id, preference_name='opt_thirdparty', preference_value=0)
new_audit = T.audit_trail_user.insert()
new_audit.execute(
date=int(time.time()),
username='nobody',
ip_address='(imported)',
operation_resource=user_id,
operation='%s user account created by TeamForge import script' % user.status,
operation_target='',
)
u = M.User.by_username(sf_username)
assert u
return u
def convert_project_shortname(teamforge_path):
'convert from TeamForge to SF, and validate early'
tf_shortname = teamforge_path.split('.')[-1]
sf_shortname = tf_shortname.replace('_','-')
# FIXME hardcoded translations
sf_shortname = {
'i1': 'motorola-i1',
'i9': 'motorola-i9',
'devplatformforocap': 'ocap-dev-pltfrm',
}.get(sf_shortname, sf_shortname)
if not 3 <= len(sf_shortname) <= 15:
raise ValueError('Project name length must be between 3 & 15, inclusive: %s (%s)' %
(sf_shortname, len(sf_shortname)))
return sf_shortname
# FIXME hardcoded
skip_perms_usernames = set([
'faisal_saeed','dsarkisian','debonairamit','nishanthiremath','Bhuvnesh','bluetooth','cnkurzke','makow2','jannes1','Joel_Hegberg','Farroc','brian_chen','eirikur',
'dmitry_flyorov','bipingm','MornayJo','ibv','b_weisshaar','k9srb','johnmmills','a_gomolitsky','filim','kapoor','ljzegers','jrukes','dwilson9','jlin','quickie',
'johnbell','nnikolenko','Gaetan','Giannetta','Katia','jackhan','jacobwangus','adwankar','dinobrusco','qbarnes','ilmojung','clifford_chan','nbaig','fhutchi1',
'rinofarina','baiyanbin','muralidhar','duanyiruo','bredding','mkolkey','manvith','nanduk','engyihan','deepsie','dabon','dino_jiang','mattrose','peter_j_wilhelm',
'emx2500','jmcguire','lfilimowski','guruppandit','abhilashisme','edwinhm','rabbi','ferrans','guna','kevin_robinson','adathiruthi','kochen','onehap','kalanithi',
'jamesn','obu001','chetanv','Avinash','HugoBoss','Han_Wei','mhooper','g16872','mfcarignano','jim_burke','kevin','arunkarra','adam_feng','pavan_scm','kostya_katz',
'ppazderka','eileenzhuang','pyammine','judyho','ashoykh','rdemento','ibrahim','min_wang','arvind_setlur','moorthy_karthik','daniel_nelson','dms','esnmurthy',
'rasa_bonyadlou','prashantjoshi','edkeating','billsaez','cambalindo','jims','bozkoyun','andry_deltsov','bpowers','manuel_milli','maryparsons','spriporov','yutianli',
'xiebin','tnemeth1','udayaps','zzzzuser','timberger','sbarve1','zarman','rwallace67','thangavelu_arum','yuhuaixie','tingup','sekchai','sasanplus','rupal','sebastien_hertz',
'sab8123','rony_lim','slava_kirillin','smwest','wendydu_yq','sco002','RonFred','spatnala','vd','Sunny','tthompson','sunijams','slaw','rodovich','zhangqingqi82','venki',
'yuntaom','xiaojin','walterciocosta','straus','Thomas','stupka','wangyu','yaowang','wisekb','tyler_louie','smartgarfield','shekar_mahalingam',
'venkata_akella','v_yellapragada','vavasthi','rpatel','zhengfang','sweetybala','vap','sergey','ymhuang','spatel78745'
])
def create_project(pid, nbhd):
M.session.artifact_orm_session._get().skip_mod_date = True
data = loadjson(pid, pid+'.json')
#pprint(data)
log.info('Loading: %s %s %s' % (pid, data.data.title, data.data.path))
shortname = convert_project_shortname(data.data.path)
project = M.Project.query.get(shortname=shortname)
if not project:
private = (data.access_level == 'private')
log.debug('Creating %s private=%s' % (shortname, private))
project = nbhd.register_project(shortname,
get_user(data.data.createdBy),
private_project=private)
project.notifications_disabled = True
project.name = data.data.title
project.short_description = data.data.description
project.last_updated = datetime.strptime(data.data.lastModifiedDate, '%Y-%m-%d %H:%M:%S')
M.main_orm_session.flush(project)
# TODO: push last_updated to gutenberg?
# TODO: try to set createdDate?
role_admin = M.ProjectRole.by_name('Admin', project)
admin_usernames = set()
for admin in data.admins:
if admin.userName in skip_perms_usernames:
continue
admin_usernames.add(admin.userName)
user = get_user(admin.userName)
c.user = user
pr = user.project_role(project)
pr.roles = [ role_admin._id ]
ThreadLocalORMSession.flush_all()
role_developer = M.ProjectRole.by_name('Developer', project)
for member in data.members:
if member.userName in skip_perms_usernames:
continue
if member.userName in admin_usernames:
continue
user = get_user(member.userName)
pr = user.project_role(project)
pr.roles = [ role_developer._id ]
ThreadLocalORMSession.flush_all()
project.labels = [cat.path.split('projects/categorization.root.')[1] for cat in data.categories]
icon_file = 'emsignia-MOBILITY-red.png'
if 'nsn' in project.labels or 'msi' in project.labels:
icon_file = 'emsignia-SOLUTIONS-blue.gif'
if project.icon:
M.ProjectFile.remove(dict(project_id=project._id, category='icon'))
with open(os.path.join('..','scripts',icon_file)) as fp:
M.ProjectFile.save_image(
icon_file, fp, content_type=utils.guess_mime_type(icon_file),
square=True, thumbnail_size=(48,48),
thumbnail_meta=dict(project_id=project._id,category='icon'))
ThreadLocalORMSession.flush_all()
dirs = os.listdir(os.path.join(options.output_dir, pid))
frs_mapping = loadjson(pid, 'frs_mapping.json')
if 'wiki' in dirs:
import_wiki(project,pid)
if not project.app_instance('downloads'):
project.install_app('Downloads', 'downloads')
if 'forum' in dirs:
import_discussion(project, pid, frs_mapping, shortname)
if 'news' in dirs:
import_news(project, pid, frs_mapping, shortname)
project.notifications_disabled = False
ThreadLocalORMSession.flush_all()
return project
def import_wiki(project, pid):
from forgewiki import model as WM
def upload_attachments(page, pid, beginning):
dirpath = os.path.join(options.output_dir, pid, 'wiki', beginning)
if not os.path.exists(dirpath): return
files = os.listdir(dirpath)
for f in files:
with open(os.path.join(options.output_dir, pid, 'wiki', beginning, f)) as fp:
page.attach(f, fp, content_type=utils.guess_mime_type(f))
pages = os.listdir(os.path.join(options.output_dir, pid, 'wiki'))
# handle the homepage content
if 'homepage_text.markdown' in pages:
home_app = project.app_instance('home')
h.set_context(project.shortname, 'home')
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
home_app.config.options['show_discussion'] = False
home_app.config.options['show_left_bar'] = False
home_app.config.options['show_right_bar'] = False
home_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'create'),
M.ACE.allow(role_admin, 'edit'),
M.ACE.allow(role_admin, 'delete'),
M.ACE.allow(role_admin, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
p = WM.Page.upsert('Home')
p.text = wiki2markdown(load(pid, 'wiki', 'homepage_text.markdown'))
upload_attachments(p, pid, 'homepage')
if 'HomePage.json' in pages and 'HomePage.markdown' in pages:
wiki_app = project.app_instance('wiki')
if not wiki_app:
wiki_app = project.install_app('Wiki', 'wiki')
h.set_context(project.shortname, 'wiki')
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
wiki_app.config.options['show_discussion'] = False
wiki_app.config.options['show_left_bar'] = False
wiki_app.config.options['show_right_bar'] = False
wiki_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_admin, 'create'),
M.ACE.allow(role_admin, 'edit'),
M.ACE.allow(role_admin, 'delete'),
M.ACE.allow(role_admin, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
# make all the wiki pages
for page in pages:
ending = page[-5:]
beginning = page[:-5]
markdown_file = '%s.markdown' % beginning
if '.json' == ending and markdown_file in pages:
page_data = loadjson(pid, 'wiki', page)
content = load(pid, 'wiki', markdown_file)
if page == 'HomePage.json':
globals = WM.Globals.query.get(app_config_id=wiki_app.config._id)
if globals is not None:
globals.root = page_data.title
else:
globals = WM.Globals(app_config_id=wiki_app.config._id, root=page_data.title)
p = WM.Page.upsert(page_data.title)
p.viewable_by = ['all']
p.text = wiki2markdown(content)
# upload attachments
upload_attachments(p, pid, beginning)
if not p.history().first():
p.commit()
ThreadLocalORMSession.flush_all()
def import_discussion(project, pid, frs_mapping, sf_project_shortname):
from forgediscussion import model as DM
discuss_app = project.app_instance('discussion')
if not discuss_app:
discuss_app = project.install_app('Discussion', 'discussion')
h.set_context(project.shortname, 'discussion')
# set permissions and config options
role_admin = M.ProjectRole.by_name('Admin')._id
role_developer = M.ProjectRole.by_name('Developer')._id
role_auth = M.ProjectRole.by_name('*authenticated')._id
role_anon = M.ProjectRole.by_name('*anonymous')._id
discuss_app.config.acl = [
M.ACE.allow(role_anon, 'read'),
M.ACE.allow(role_auth, 'post'),
M.ACE.allow(role_auth, 'unmoderated_post'),
M.ACE.allow(role_developer, 'moderate'),
M.ACE.allow(role_admin, 'configure'),
M.ACE.allow(role_admin, 'admin')]
ThreadLocalORMSession.flush_all()
DM.Forum.query.remove(dict(app_config_id=discuss_app.config._id,shortname='general'))
forums = os.listdir(os.path.join(options.output_dir, pid, 'forum'))
for forum in forums:
ending = forum[-5:]
forum_name = forum[:-5]
if '.json' == ending and forum_name in forums:
forum_data = loadjson(pid, 'forum', forum)
fo = DM.Forum.query.get(shortname=forum_name, app_config_id=discuss_app.config._id)
if not fo:
fo = DM.Forum(app_config_id=discuss_app.config._id, shortname=forum_name)
fo.name = forum_data.title
fo.description = forum_data.description
fo_num_topics = 0
fo_num_posts = 0
topics = os.listdir(os.path.join(options.output_dir, pid, 'forum', forum_name))
for topic in topics:
ending = topic[-5:]
topic_name = topic[:-5]
if '.json' == ending and topic_name in topics:
fo_num_topics += 1
topic_data = loadjson(pid, 'forum', forum_name, topic)
to = DM.ForumThread.query.get(
subject=topic_data.title,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
if not to:
to = DM.ForumThread(
subject=topic_data.title,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
to_num_replies = 0
oldest_post = None
newest_post = None
posts = sorted(os.listdir(os.path.join(options.output_dir, pid, 'forum', forum_name, topic_name)))
for post in posts:
ending = post[-5:]
post_name = post[:-5]
if '.json' == ending:
to_num_replies += 1
post_data = loadjson(pid, 'forum', forum_name, topic_name, post)
p = DM.ForumPost.query.get(
_id='%s%s@import' % (post_name,str(discuss_app.config._id)),
thread_id=to._id,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
if not p:
p = DM.ForumPost(
_id='%s%s@import' % (post_name,str(discuss_app.config._id)),
thread_id=to._id,
discussion_id=fo._id,
app_config_id=discuss_app.config._id)
create_date = datetime.strptime(post_data.createdDate, '%Y-%m-%d %H:%M:%S')
p.timestamp = create_date
p.author_id = str(get_user(post_data.createdByUserName)._id)
p.text = convert_post_content(frs_mapping, sf_project_shortname, post_data.content)
p.status = 'ok'
if post_data.replyToId:
p.parent_id = '%s%s@import' % (post_data.replyToId,str(discuss_app.config._id))
slug, full_slug = p.make_slugs(parent = p.parent, timestamp = create_date)
p.slug = slug
p.full_slug = full_slug
if oldest_post == None or oldest_post.timestamp > create_date:
oldest_post = p
if newest_post == None or newest_post.timestamp < create_date:
newest_post = p
ThreadLocalORMSession.flush_all()
to.num_replies = to_num_replies
to.first_post_id = oldest_post._id
to.last_post_date = newest_post.timestamp
to.mod_date = newest_post.timestamp
fo_num_posts += to_num_replies
fo.num_topics = fo_num_topics
fo.num_posts = fo_num_posts
ThreadLocalORMSession.flush_all()
def import_news(project, pid, frs_mapping, sf_project_shortname):
from forgeblog import model as BM
posts = os.listdir(os.path.join(options.output_dir, pid, 'news'))
if len(posts):
news_app = project.app_instance('news')
if not news_app:
news_app = project.install_app('blog', 'news', mount_label='News')
h.set_context(project.shortname, 'news')
# make all the blog posts
for post in posts:
if '.json' == post[-5:]:
post_data = loadjson(pid, 'news', post)
p = BM.BlogPost.query.get(title=post_data.title,app_config_id=news_app.config._id)
if not p:
p = BM.BlogPost(title=post_data.title,app_config_id=news_app.config._id)
p.text = convert_post_content(frs_mapping, sf_project_shortname, post_data.body)
create_date = datetime.strptime(post_data.createdOn, '%Y-%m-%d %H:%M:%S')
p.timestamp = create_date
p.mod_date = create_date
p.state = 'published'
if not p.slug:
p.make_slug()
if not p.history().first():
p.commit()
ThreadLocalORMSession.flush_all()
M.Thread(discussion_id=p.app_config.discussion_id,
ref_id=p.index_id(),
subject='%s discussion' % p.title)
user = get_user(post_data.createdByUsername)
p.history().first().author=dict(
id=user._id,
username=user.username,
display_name=user.get_pref('display_name'))
ThreadLocalORMSession.flush_all()
def check_unsupported_tools(project):
docs = make_client(options.api_url, 'DocumentApp')
doc_count = 0
for doc in docs.service.getDocumentFolderList(s, project.id, recursive=True).dataRows:
if doc.title == 'Root Folder':
continue
doc_count += 1
if doc_count:
log.warn('Migrating documents is not supported, but found %s docs' % doc_count)
scm = make_client(options.api_url, 'ScmApp')
for repo in scm.service.getRepositoryList(s, project.id).dataRows:
log.warn('Migrating SCM repos is not supported, but found %s' % repo.repositoryPath)
tasks = make_client(options.api_url, 'TaskApp')
task_count = len(tasks.service.getTaskList(s, project.id, filters=None).dataRows)
if task_count:
log.warn('Migrating tasks is not supported, but found %s tasks' % task_count)
tracker = make_client(options.api_url, 'TrackerApp')
tracker_count = len(tracker.service.getArtifactList(s, project.id, filters=None).dataRows)
if tracker_count:
log.warn('Migrating trackers is not supported, but found %s tracker artifacts' % task_count)
def load(project_id, *paths):
in_file = os.path.join(options.output_dir, project_id, *paths)
with open(in_file) as input:
content = input.read()
return content
def loadjson(*args):
# Object for attribute access
return json.loads(load(*args), object_hook=Object)
def save(content, project, *paths):
out_file = os.path.join(options.output_dir, project.id, *paths)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
with open(out_file, 'w') as out:
out.write(content)
class StatusCheckingURLopener(FancyURLopener):
def http_error_default(self, url, fp, errcode, errmsg, headers):
raise Exception(errcode)
statusCheckingURLopener = StatusCheckingURLopener()
def download_file(tool, url_path, *filepaths):
if tool == 'wiki':
action = 'viewAttachment'
elif tool == 'frs':
action = 'downloadFile'
else:
raise ValueError('tool %s not supported' % tool)
action_url = options.attachment_url % (tool, action)
out_file = os.path.join(options.output_dir, *filepaths)
if not os.path.exists(os.path.dirname(out_file)):
os.makedirs(os.path.dirname(out_file))
hostname = urlparse(options.api_url).hostname
scheme = urlparse(options.api_url).scheme
url = scheme + '://' + hostname + action_url + url_path
log.debug('fetching %s' % url)
statusCheckingURLopener.retrieve(url, out_file)
return out_file
bracket_macro = re.compile(r'\[(.*?)\]')
h1 = re.compile(r'^!!!', re.MULTILINE)
h2 = re.compile(r'^!!', re.MULTILINE)
h3 = re.compile(r'^!', re.MULTILINE)
re_stats = re.compile(r'#+ .* [Ss]tatistics\n+(.*\[sf:.*?Statistics\].*)+')
def wiki2markdown(markup):
'''
Partial implementation of http://help.collab.net/index.jsp?topic=/teamforge520/reference/wiki-wikisyntax.html
'''
def bracket_handler(matchobj):
snippet = matchobj.group(1)
ext = snippet.rsplit('.')[-1].lower()
# TODO: support [foo|bar.jpg]
if snippet.startswith('sf:'):
# can't handle these macros
return matchobj.group(0)
elif ext in ('jpg', 'gif', 'png'):
filename = snippet.split('/')[-1]
return '[[img src=%s]]' % filename
elif '|' in snippet:
text, link = snippet.split('|', 1)
return '[%s](%s)' % (text, link)
else:
# regular link
return '<%s>' % snippet
markup = bracket_macro.sub(bracket_handler, markup)
markup = h1.sub('#', markup)
markup = h2.sub('##', markup)
markup = h3.sub('###', markup)
markup = re_stats.sub('', markup)
return markup
re_rel = re.compile(r'\b(rel\d+)\b')
def convert_post_content(frs_mapping, sf_project_shortname, text):
def rel_handler(matchobj):
relno = matchobj.group(1)
path = frs_mapping.get(relno)
if path:
return '<a href="/projects/%s/files/%s">%s</a>' % (
sf_project_shortname, path, path)
else:
return relno
text = re_rel.sub(rel_handler, text)
return text
def find_image_references(markup):
'yields filenames'
for matchobj in bracket_macro.finditer(markup):
snippet = matchobj.group(1)
ext = snippet.rsplit('.')[-1].lower()
if ext in ('jpg', 'gif', 'png'):
yield snippet
def get_news(project):
'''
Extracts news posts
'''
app = make_client(options.api_url, 'NewsApp')
# find the forums
posts = app.service.getNewsPostList(s, project.id)
for post in posts.dataRows:
save(json.dumps(dict(post), default=str), project, 'news', post.id+'.json')
save_user(post.createdByUsername)
def get_discussion(project):
'''
Extracts discussion forums and posts
'''
app = make_client(options.api_url, 'DiscussionApp')
# find the forums
forums = app.service.getForumList(s, project.id)
for forum in forums.dataRows:
forumname = forum.path.split('.')[-1]
log.info('Retrieving data for forum: %s' % forumname)
save(json.dumps(dict(forum), default=str), project, 'forum', forumname+'.json')
# topic in this forum
topics = app.service.getTopicList(s, forum.id)
for topic in topics.dataRows:
save(json.dumps(dict(topic), default=str), project, 'forum', forumname, topic.id+'.json')
# posts in this topic
posts = app.service.getPostList(s, topic.id)
for post in posts.dataRows:
save(json.dumps(dict(post), default=str), project, 'forum', forumname, topic.id, post.id+'.json')
save_user(post.createdByUserName)
def get_homepage_wiki(project):
'''
Extracts home page and wiki pages
'''
wiki = make_client(options.api_url, 'WikiApp')
pages = {}
wiki_pages = wiki.service.getWikiPageList(s, project.id)
for wiki_page in wiki_pages.dataRows:
wiki_page = wiki.service.getWikiPageData(s, wiki_page.id)
pagename = wiki_page.path.split('/')[-1]
save(json.dumps(dict(wiki_page), default=str), project, 'wiki', pagename+'.json')
if not wiki_page.wikiText:
log.debug('skip blank wiki page %s' % wiki_page.path)
continue
pages[pagename] = wiki_page.wikiText
# PageApp does not provide a useful way to determine the Project Home special wiki page
# so use some heuristics
homepage = None
if '$ProjectHome' in pages and options.default_wiki_text not in pages['$ProjectHome']:
homepage = pages.pop('$ProjectHome')
elif 'HomePage' in pages and options.default_wiki_text not in pages['HomePage']:
homepage = pages.pop('HomePage')
elif '$ProjectHome' in pages:
homepage = pages.pop('$ProjectHome')
elif 'HomePage' in pages:
homepage = pages.pop('HomePage')
else:
log.warn('did not find homepage')
if homepage:
save(homepage, project, 'wiki', 'homepage_text.markdown')
for img_ref in find_image_references(homepage):
filename = img_ref.split('/')[-1]
download_file('wiki', project.path + '/wiki/' + img_ref, project.id, 'wiki', 'homepage', filename)
for path, text in pages.iteritems():
if options.default_wiki_text in text:
log.debug('skipping default wiki page %s' % path)
else:
save(text, project, 'wiki', path+'.markdown')
for img_ref in find_image_references(text):
filename = img_ref.split('/')[-1]
download_file('wiki', project.path + '/wiki/' + img_ref, project.id, 'wiki', path, filename)
def _dir_sql(created_on, project, dir_name, rel_path):
assert options.neighborhood_shortname
if not rel_path:
parent_directory = "'1'"
else:
parent_directory = "(SELECT pfs_path FROM pfs_path WHERE path_name = '%s/')" % rel_path
sql = """
UPDATE pfs
SET file_crtime = '%s'
WHERE source_pk = (SELECT project.project FROM project WHERE project.project_name = '%s.%s')
AND source_table = 'project'
AND pfs_type = 'd'
AND pfs_name = '%s'
AND parent_directory = %s;
""" % (created_on, convert_project_shortname(project.path), options.neighborhood_shortname, dir_name, parent_directory)
return sql
def get_files(project):
frs = make_client(options.api_url, 'FrsApp')
valid_pfs_filename = re.compile(r'(?![. ])[-_ +.,=#~@!()\[\]a-zA-Z0-9]+(?<! )$')
pfs_output_dir = os.path.join(os.path.abspath(options.output_dir), 'PFS', convert_project_shortname(project.path))
sql_updates = ''
def handle_path(obj, prev_path):
path_component = obj.title.strip().replace('/', ' ').replace('&','').replace(':','')
path = os.path.join(prev_path, path_component)
if not valid_pfs_filename.match(path_component):
log.error('Invalid filename: "%s"' % path)
save(json.dumps(dict(obj), default=str),
project, 'frs', path+'.json')
return path
frs_mapping = {}
for pkg in frs.service.getPackageList(s, project.id).dataRows:
pkg_path = handle_path(pkg, '')
pkg_details = frs.service.getPackageData(s, pkg.id) # download count
save(json.dumps(dict(pkg_details), default=str),
project, 'frs', pkg_path+'_details.json')
for rel in frs.service.getReleaseList(s, pkg.id).dataRows:
rel_path = handle_path(rel, pkg_path)
frs_mapping[rel['id']] = rel_path
rel_details = frs.service.getReleaseData(s, rel.id) # download count
save(json.dumps(dict(rel_details), default=str),
project, 'frs', rel_path+'_details.json')
for file in frs.service.getFrsFileList(s, rel.id).dataRows:
details = frs.service.getFrsFileData(s, file.id)
file_path = handle_path(file, rel_path)
save(json.dumps(dict(file,
lastModifiedBy=details.lastModifiedBy,
lastModifiedDate=details.lastModifiedDate,
),
default=str),
project,
'frs',
file_path+'.json'
)
if not options.skip_frs_download:
download_file('frs', rel.path + '/' + file.id, pfs_output_dir, file_path)
mtime = int(mktime(details.lastModifiedDate.timetuple()))
os.utime(os.path.join(pfs_output_dir, file_path), (mtime, mtime))
# releases
created_on = int(mktime(rel.createdOn.timetuple()))
mtime = int(mktime(rel.lastModifiedOn.timetuple()))
if os.path.exists(os.path.join(pfs_output_dir, rel_path)):
os.utime(os.path.join(pfs_output_dir, rel_path), (mtime, mtime))
sql_updates += _dir_sql(created_on, project, rel.title.strip(), pkg_path)
# packages
created_on = int(mktime(pkg.createdOn.timetuple()))
mtime = int(mktime(pkg.lastModifiedOn.timetuple()))
if os.path.exists(os.path.join(pfs_output_dir, pkg_path)):
os.utime(os.path.join(pfs_output_dir, pkg_path), (mtime, mtime))
sql_updates += _dir_sql(created_on, project, pkg.title.strip(), '')
# save pfs update sql for this project
with open(os.path.join(options.output_dir, 'pfs_updates.sql'), 'a') as out:
out.write('/* %s */' % project.id)
out.write(sql_updates)
save(json.dumps(frs_mapping), project, 'frs_mapping.json')
def get_parser(defaults):
optparser = OptionParser(
usage=('%prog [--options] [projID projID projID]\n'
'If no project ids are given, all projects will be migrated'))
optparser.set_defaults(**defaults)
# Command-line-only options
optparser.add_option(
'--extract-only', action='store_true', dest='extract',
help='Store data from the TeamForge API on the local filesystem; not load into Allura')
optparser.add_option(
'--load-only', action='store_true', dest='load',
help='Load into Allura previously-extracted data')
optparser.add_option(
'--config-file', dest='config_file',
help='Load options from config file')
# Command-line options with defaults in config file
optparser.add_option(
'--api-url', dest='api_url', help='e.g. https://hostname/ce-soap50/services/')
optparser.add_option(
'--attachment-url', dest='attachment_url')
optparser.add_option(
'--default-wiki-text', dest='default_wiki_text',
help='used in determining if a wiki page text is default or changed')
optparser.add_option(
'-u', '--username', dest='username')
optparser.add_option(
'-p', '--password', dest='password')
optparser.add_option(
'-o', '--output-dir', dest='output_dir')
optparser.add_option(
'--list-project-ids', action='store_true', dest='list_project_ids')
optparser.add_option(
'-n', '--neighborhood', dest='neighborhood',
help='Neighborhood full name, to load in to')
optparser.add_option(
'--n-shortname', dest='neighborhood_shortname',
help='Neighborhood shortname, for PFS extract SQL')
optparser.add_option(
'--skip-frs-download', action='store_true', dest='skip_frs_download')
optparser.add_option(
'--skip-unsupported-check', action='store_true', dest='skip_unsupported_check')
return optparser
if __name__ == '__main__':
logging.basicConfig(level=logging.WARN)
log.setLevel(logging.DEBUG)
main()
def test_convert_post_content():
text = '''rel100? or ?rel101 or rel102 or rel103a or rel104'''
mapping = dict(
rel100='rel/100/',
rel101='rel/101/',
rel102='rel/102/',
rel103='rel/103/',
rel104='rel/104/')
converted = convert_post_content(mapping, 'foo.bar', text)
assert 'href="/projects/foo.bar/files/rel/100' in converted, converted
assert 'href="/projects/foo.bar/files/rel/101' in converted, converted
assert 'href="/projects/foo.bar/files/rel/102' in converted, converted
assert 'href="/projects/foo.bar/files/rel/103' not in converted, converted
assert 'href="/projects/foo.bar/files/rel/104' in converted, converted
def test_convert_markup():
markup = '''
!this is the first headline
Please note that this project is for distributing, discussing, and supporting the open source software we release.
[http://www.google.com]
[SourceForge |http://www.sf.net]
[$ProjectHome/myimage.jpg]
[$ProjectHome/anotherimage.jpg]
!!! Project Statistics
|[sf:frsStatistics]|[sf:artifactStatistics]|
'''
new_markup = wiki2markdown(markup)
assert '\n[[img src=myimage.jpg]]\n[[img src=anotherimage.jpg]]\n' in new_markup
assert '\n###this is the first' in new_markup
assert '<http://www.google.com>' in new_markup
assert '[SourceForge ](http://www.sf.net)' in new_markup
assert '\n# Project Statistics' not in new_markup
assert '[sf:frsStatistics]' not in new_markup