Switch to unified view

a/trac-import/TracExport.py b/trac-import/TracExport.py
...
...
13
13
14
class TracExport(object):
14
class TracExport(object):
15
15
16
    TICKET_URL = '/ticket/%d'
16
    TICKET_URL = '/ticket/%d'
17
    QUERY_MAX_ID_URL  = '/query?col=id&order=id&desc=1&max=2'
17
    QUERY_MAX_ID_URL  = '/query?col=id&order=id&desc=1&max=2'
18
    QUERY_BY_PAGE_URL = '/query?col=id&order=id&max=100&page=%d'
18
    QUERY_BY_PAGE_URL = '/query?col=id&col=time&col=changetime&order=id&max=100&page=%d'
19
19
20
    FIELD_MAP = {
20
    FIELD_MAP = {
21
        'reporter': 'submitter',
21
        'reporter': 'submitter',
22
        'owner': 'assigned_to',
22
        'owner': 'assigned_to',
23
    }
23
    }
24
24
25
    def __init__(self, base_url):
25
    def __init__(self, base_url):
26
        self.base_url = base_url
26
        self.base_url = base_url
27
        # Contains additional info for a ticket which cannot
28
        # be get with single-ticket export (create/mod times is
29
        # and example).
30
        self.ticket_map = {}
27
31
28
    def remap_fields(self, dict):
32
    def remap_fields(self, dict):
29
        "Remap fields to adhere to standard taxonomy."
33
        "Remap fields to adhere to standard taxonomy."
30
        out = {}
34
        out = {}
31
        for k, v in dict.iteritems():
35
        for k, v in dict.iteritems():
...
...
73
        return res
77
        return res
74
78
75
    def get_ticket(self, id):
79
    def get_ticket(self, id):
76
        t = self.parse_ticket_body(id)
80
        t = self.parse_ticket_body(id)
77
        t['comments'] = self.parse_ticket_comments(id)
81
        t['comments'] = self.parse_ticket_comments(id)
82
        if id in self.ticket_map:
83
            t.update(self.ticket_map[id])
78
        return t
84
        return t
79
85
80
    def get_ticket_ids_csv(self):
86
    def get_ticket_ids_csv(self):
81
        url = self.full_url(self.QUERY_URL, 'csv')
87
        url = self.full_url(self.QUERY_URL, 'csv')
82
        print url
88
        print url
...
...
108
        reader = csv.DictReader(f)
114
        reader = csv.DictReader(f)
109
        fields = reader.next()
115
        fields = reader.next()
110
        print fields
116
        print fields
111
        return int(fields['id'])
117
        return int(fields['id'])
112
        
118
        
119
    @staticmethod
120
    def trac2z_date(s):
121
        assert len(s) == 25
122
        assert s.endswith('+00:00')
123
        return s[0:10] + 'T' + s[11:19] + 'Z'
124
113
    def enumerate_ticket_ids(self, page=1):
125
    def enumerate_ticket_ids(self, page=1, limit=-1):
114
        'Go thru ticket list and collect available ticket ids.'
126
        'Go thru ticket list and collect available ticket ids.'
115
        # We could just do CSV export, which by default dumps entire list
127
        # We could just do CSV export, which by default dumps entire list
116
        # Alas, for many busy servers with long ticket list, it will just 
128
        # Alas, for many busy servers with long ticket list, it will just 
117
        # time out. So, let's paginate it instead.
129
        # time out. So, let's paginate it instead.
118
        res = []
130
        res = []
119
        while True:
131
        while limit != 0:
120
            url = self.full_url(self.QUERY_BY_PAGE_URL % page, 'csv')
132
            url = self.full_url(self.QUERY_BY_PAGE_URL % page, 'csv')
121
            try:
133
            try:
122
                f = self.csvopen(url)
134
                f = self.csvopen(url)
123
            except urllib2.HTTPError, e:
135
            except urllib2.HTTPError, e:
124
                if 'emulated' in e.msg:
136
                if 'emulated' in e.msg:
...
...
126
                    if 'beyond the number of pages in the query' in body:
138
                    if 'beyond the number of pages in the query' in body:
127
                        break
139
                        break
128
                raise
140
                raise
129
            reader = csv.reader(f)
141
            reader = csv.reader(f)
130
            cols = reader.next()
142
            cols = reader.next()
131
            ids = [int(r[0]) for r in reader if r and r[0][0].isdigit()]
143
            for r in reader:
132
            res += ids
144
                if r and r[0].isdigit():
145
                    id = int(r[0])
146
                    self.ticket_map[id] = {'date': self.trac2z_date(r[1]), 'date_updated': self.trac2z_date(r[2])}
147
                    res.append(id)
133
            page += 1
148
            page += 1
149
            if limit > 0:
150
                limit -= 1
134
151
135
        return res
152
        return res
136
        
153
        
137
class DateJSONEncoder(json.JSONEncoder):
154
class DateJSONEncoder(json.JSONEncoder):
138
    def default(self, obj):
155
    def default(self, obj):
...
...
149
#    pprint(d)
166
#    pprint(d)
150
#    d = ex.get_ticket(9)
167
#    d = ex.get_ticket(9)
151
#    pprint(d)
168
#    pprint(d)
152
#    d = ex.get_max_ticket_id()
169
#    d = ex.get_max_ticket_id()
153
#    d = ex.get_ticket_ids()
170
#    d = ex.get_ticket_ids()
171
    d = ex.enumerate_ticket_ids(limit=1)
154
    ids = [3]
172
    ids = [3]
155
    doc = [ex.get_ticket(i) for i in ids]
173
    doc = [ex.get_ticket(i) for i in ids]
156
    print json.dumps(doc, cls=DateJSONEncoder, indent=2)
174
    print json.dumps(doc, cls=DateJSONEncoder, indent=2)
175
#    print d
176
#    print ex.ticket_map
157
177