##// END OF EJS Templates
transaction: pass the transaction to 'pending' callback...
Pierre-Yves David -
r23280:b01c491a default
parent child Browse files
Show More
@@ -1,378 +1,378
1 # changelog.py - changelog class for mercurial
1 # changelog.py - changelog class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 import weakref
8 import weakref
9 from node import bin, hex, nullid
9 from node import bin, hex, nullid
10 from i18n import _
10 from i18n import _
11 import util, error, revlog, encoding
11 import util, error, revlog, encoding
12
12
13 _defaultextra = {'branch': 'default'}
13 _defaultextra = {'branch': 'default'}
14
14
15 def _string_escape(text):
15 def _string_escape(text):
16 """
16 """
17 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
17 >>> d = {'nl': chr(10), 'bs': chr(92), 'cr': chr(13), 'nul': chr(0)}
18 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
18 >>> s = "ab%(nl)scd%(bs)s%(bs)sn%(nul)sab%(cr)scd%(bs)s%(nl)s" % d
19 >>> s
19 >>> s
20 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
20 'ab\\ncd\\\\\\\\n\\x00ab\\rcd\\\\\\n'
21 >>> res = _string_escape(s)
21 >>> res = _string_escape(s)
22 >>> s == res.decode('string_escape')
22 >>> s == res.decode('string_escape')
23 True
23 True
24 """
24 """
25 # subset of the string_escape codec
25 # subset of the string_escape codec
26 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
26 text = text.replace('\\', '\\\\').replace('\n', '\\n').replace('\r', '\\r')
27 return text.replace('\0', '\\0')
27 return text.replace('\0', '\\0')
28
28
29 def decodeextra(text):
29 def decodeextra(text):
30 """
30 """
31 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
31 >>> sorted(decodeextra(encodeextra({'foo': 'bar', 'baz': chr(0) + '2'})
32 ... ).iteritems())
32 ... ).iteritems())
33 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
33 [('baz', '\\x002'), ('branch', 'default'), ('foo', 'bar')]
34 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
34 >>> sorted(decodeextra(encodeextra({'foo': 'bar',
35 ... 'baz': chr(92) + chr(0) + '2'})
35 ... 'baz': chr(92) + chr(0) + '2'})
36 ... ).iteritems())
36 ... ).iteritems())
37 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
37 [('baz', '\\\\\\x002'), ('branch', 'default'), ('foo', 'bar')]
38 """
38 """
39 extra = _defaultextra.copy()
39 extra = _defaultextra.copy()
40 for l in text.split('\0'):
40 for l in text.split('\0'):
41 if l:
41 if l:
42 if '\\0' in l:
42 if '\\0' in l:
43 # fix up \0 without getting into trouble with \\0
43 # fix up \0 without getting into trouble with \\0
44 l = l.replace('\\\\', '\\\\\n')
44 l = l.replace('\\\\', '\\\\\n')
45 l = l.replace('\\0', '\0')
45 l = l.replace('\\0', '\0')
46 l = l.replace('\n', '')
46 l = l.replace('\n', '')
47 k, v = l.decode('string_escape').split(':', 1)
47 k, v = l.decode('string_escape').split(':', 1)
48 extra[k] = v
48 extra[k] = v
49 return extra
49 return extra
50
50
51 def encodeextra(d):
51 def encodeextra(d):
52 # keys must be sorted to produce a deterministic changelog entry
52 # keys must be sorted to produce a deterministic changelog entry
53 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
53 items = [_string_escape('%s:%s' % (k, d[k])) for k in sorted(d)]
54 return "\0".join(items)
54 return "\0".join(items)
55
55
56 def stripdesc(desc):
56 def stripdesc(desc):
57 """strip trailing whitespace and leading and trailing empty lines"""
57 """strip trailing whitespace and leading and trailing empty lines"""
58 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
58 return '\n'.join([l.rstrip() for l in desc.splitlines()]).strip('\n')
59
59
60 class appender(object):
60 class appender(object):
61 '''the changelog index must be updated last on disk, so we use this class
61 '''the changelog index must be updated last on disk, so we use this class
62 to delay writes to it'''
62 to delay writes to it'''
63 def __init__(self, vfs, name, mode, buf):
63 def __init__(self, vfs, name, mode, buf):
64 self.data = buf
64 self.data = buf
65 fp = vfs(name, mode)
65 fp = vfs(name, mode)
66 self.fp = fp
66 self.fp = fp
67 self.offset = fp.tell()
67 self.offset = fp.tell()
68 self.size = vfs.fstat(fp).st_size
68 self.size = vfs.fstat(fp).st_size
69
69
70 def end(self):
70 def end(self):
71 return self.size + len("".join(self.data))
71 return self.size + len("".join(self.data))
72 def tell(self):
72 def tell(self):
73 return self.offset
73 return self.offset
74 def flush(self):
74 def flush(self):
75 pass
75 pass
76 def close(self):
76 def close(self):
77 self.fp.close()
77 self.fp.close()
78
78
79 def seek(self, offset, whence=0):
79 def seek(self, offset, whence=0):
80 '''virtual file offset spans real file and data'''
80 '''virtual file offset spans real file and data'''
81 if whence == 0:
81 if whence == 0:
82 self.offset = offset
82 self.offset = offset
83 elif whence == 1:
83 elif whence == 1:
84 self.offset += offset
84 self.offset += offset
85 elif whence == 2:
85 elif whence == 2:
86 self.offset = self.end() + offset
86 self.offset = self.end() + offset
87 if self.offset < self.size:
87 if self.offset < self.size:
88 self.fp.seek(self.offset)
88 self.fp.seek(self.offset)
89
89
90 def read(self, count=-1):
90 def read(self, count=-1):
91 '''only trick here is reads that span real file and data'''
91 '''only trick here is reads that span real file and data'''
92 ret = ""
92 ret = ""
93 if self.offset < self.size:
93 if self.offset < self.size:
94 s = self.fp.read(count)
94 s = self.fp.read(count)
95 ret = s
95 ret = s
96 self.offset += len(s)
96 self.offset += len(s)
97 if count > 0:
97 if count > 0:
98 count -= len(s)
98 count -= len(s)
99 if count != 0:
99 if count != 0:
100 doff = self.offset - self.size
100 doff = self.offset - self.size
101 self.data.insert(0, "".join(self.data))
101 self.data.insert(0, "".join(self.data))
102 del self.data[1:]
102 del self.data[1:]
103 s = self.data[0][doff:doff + count]
103 s = self.data[0][doff:doff + count]
104 self.offset += len(s)
104 self.offset += len(s)
105 ret += s
105 ret += s
106 return ret
106 return ret
107
107
108 def write(self, s):
108 def write(self, s):
109 self.data.append(str(s))
109 self.data.append(str(s))
110 self.offset += len(s)
110 self.offset += len(s)
111
111
112 def _divertopener(opener, target):
112 def _divertopener(opener, target):
113 """build an opener that writes in 'target.a' instead of 'target'"""
113 """build an opener that writes in 'target.a' instead of 'target'"""
114 def _divert(name, mode='r'):
114 def _divert(name, mode='r'):
115 if name != target:
115 if name != target:
116 return opener(name, mode)
116 return opener(name, mode)
117 return opener(name + ".a", mode)
117 return opener(name + ".a", mode)
118 return _divert
118 return _divert
119
119
120 def _delayopener(opener, target, buf):
120 def _delayopener(opener, target, buf):
121 """build an opener that stores chunks in 'buf' instead of 'target'"""
121 """build an opener that stores chunks in 'buf' instead of 'target'"""
122 def _delay(name, mode='r'):
122 def _delay(name, mode='r'):
123 if name != target:
123 if name != target:
124 return opener(name, mode)
124 return opener(name, mode)
125 return appender(opener, name, mode, buf)
125 return appender(opener, name, mode, buf)
126 return _delay
126 return _delay
127
127
128 class changelog(revlog.revlog):
128 class changelog(revlog.revlog):
129 def __init__(self, opener):
129 def __init__(self, opener):
130 revlog.revlog.__init__(self, opener, "00changelog.i")
130 revlog.revlog.__init__(self, opener, "00changelog.i")
131 if self._initempty:
131 if self._initempty:
132 # changelogs don't benefit from generaldelta
132 # changelogs don't benefit from generaldelta
133 self.version &= ~revlog.REVLOGGENERALDELTA
133 self.version &= ~revlog.REVLOGGENERALDELTA
134 self._generaldelta = False
134 self._generaldelta = False
135 self._realopener = opener
135 self._realopener = opener
136 self._delayed = False
136 self._delayed = False
137 self._delaybuf = None
137 self._delaybuf = None
138 self._divert = False
138 self._divert = False
139 self.filteredrevs = frozenset()
139 self.filteredrevs = frozenset()
140
140
141 def tip(self):
141 def tip(self):
142 """filtered version of revlog.tip"""
142 """filtered version of revlog.tip"""
143 for i in xrange(len(self) -1, -2, -1):
143 for i in xrange(len(self) -1, -2, -1):
144 if i not in self.filteredrevs:
144 if i not in self.filteredrevs:
145 return self.node(i)
145 return self.node(i)
146
146
147 def __iter__(self):
147 def __iter__(self):
148 """filtered version of revlog.__iter__"""
148 """filtered version of revlog.__iter__"""
149 if len(self.filteredrevs) == 0:
149 if len(self.filteredrevs) == 0:
150 return revlog.revlog.__iter__(self)
150 return revlog.revlog.__iter__(self)
151
151
152 def filterediter():
152 def filterediter():
153 for i in xrange(len(self)):
153 for i in xrange(len(self)):
154 if i not in self.filteredrevs:
154 if i not in self.filteredrevs:
155 yield i
155 yield i
156
156
157 return filterediter()
157 return filterediter()
158
158
159 def revs(self, start=0, stop=None):
159 def revs(self, start=0, stop=None):
160 """filtered version of revlog.revs"""
160 """filtered version of revlog.revs"""
161 for i in super(changelog, self).revs(start, stop):
161 for i in super(changelog, self).revs(start, stop):
162 if i not in self.filteredrevs:
162 if i not in self.filteredrevs:
163 yield i
163 yield i
164
164
165 @util.propertycache
165 @util.propertycache
166 def nodemap(self):
166 def nodemap(self):
167 # XXX need filtering too
167 # XXX need filtering too
168 self.rev(self.node(0))
168 self.rev(self.node(0))
169 return self._nodecache
169 return self._nodecache
170
170
171 def hasnode(self, node):
171 def hasnode(self, node):
172 """filtered version of revlog.hasnode"""
172 """filtered version of revlog.hasnode"""
173 try:
173 try:
174 i = self.rev(node)
174 i = self.rev(node)
175 return i not in self.filteredrevs
175 return i not in self.filteredrevs
176 except KeyError:
176 except KeyError:
177 return False
177 return False
178
178
179 def headrevs(self):
179 def headrevs(self):
180 if self.filteredrevs:
180 if self.filteredrevs:
181 try:
181 try:
182 return self.index.headrevsfiltered(self.filteredrevs)
182 return self.index.headrevsfiltered(self.filteredrevs)
183 # AttributeError covers non-c-extension environments and
183 # AttributeError covers non-c-extension environments and
184 # old c extensions without filter handling.
184 # old c extensions without filter handling.
185 except AttributeError:
185 except AttributeError:
186 return self._headrevs()
186 return self._headrevs()
187
187
188 return super(changelog, self).headrevs()
188 return super(changelog, self).headrevs()
189
189
190 def strip(self, *args, **kwargs):
190 def strip(self, *args, **kwargs):
191 # XXX make something better than assert
191 # XXX make something better than assert
192 # We can't expect proper strip behavior if we are filtered.
192 # We can't expect proper strip behavior if we are filtered.
193 assert not self.filteredrevs
193 assert not self.filteredrevs
194 super(changelog, self).strip(*args, **kwargs)
194 super(changelog, self).strip(*args, **kwargs)
195
195
196 def rev(self, node):
196 def rev(self, node):
197 """filtered version of revlog.rev"""
197 """filtered version of revlog.rev"""
198 r = super(changelog, self).rev(node)
198 r = super(changelog, self).rev(node)
199 if r in self.filteredrevs:
199 if r in self.filteredrevs:
200 raise error.FilteredLookupError(hex(node), self.indexfile,
200 raise error.FilteredLookupError(hex(node), self.indexfile,
201 _('filtered node'))
201 _('filtered node'))
202 return r
202 return r
203
203
204 def node(self, rev):
204 def node(self, rev):
205 """filtered version of revlog.node"""
205 """filtered version of revlog.node"""
206 if rev in self.filteredrevs:
206 if rev in self.filteredrevs:
207 raise error.FilteredIndexError(rev)
207 raise error.FilteredIndexError(rev)
208 return super(changelog, self).node(rev)
208 return super(changelog, self).node(rev)
209
209
210 def linkrev(self, rev):
210 def linkrev(self, rev):
211 """filtered version of revlog.linkrev"""
211 """filtered version of revlog.linkrev"""
212 if rev in self.filteredrevs:
212 if rev in self.filteredrevs:
213 raise error.FilteredIndexError(rev)
213 raise error.FilteredIndexError(rev)
214 return super(changelog, self).linkrev(rev)
214 return super(changelog, self).linkrev(rev)
215
215
216 def parentrevs(self, rev):
216 def parentrevs(self, rev):
217 """filtered version of revlog.parentrevs"""
217 """filtered version of revlog.parentrevs"""
218 if rev in self.filteredrevs:
218 if rev in self.filteredrevs:
219 raise error.FilteredIndexError(rev)
219 raise error.FilteredIndexError(rev)
220 return super(changelog, self).parentrevs(rev)
220 return super(changelog, self).parentrevs(rev)
221
221
222 def flags(self, rev):
222 def flags(self, rev):
223 """filtered version of revlog.flags"""
223 """filtered version of revlog.flags"""
224 if rev in self.filteredrevs:
224 if rev in self.filteredrevs:
225 raise error.FilteredIndexError(rev)
225 raise error.FilteredIndexError(rev)
226 return super(changelog, self).flags(rev)
226 return super(changelog, self).flags(rev)
227
227
228 def delayupdate(self, tr):
228 def delayupdate(self, tr):
229 "delay visibility of index updates to other readers"
229 "delay visibility of index updates to other readers"
230
230
231 if not self._delayed:
231 if not self._delayed:
232 if len(self) == 0:
232 if len(self) == 0:
233 self._divert = True
233 self._divert = True
234 if self._realopener.exists(self.indexfile + '.a'):
234 if self._realopener.exists(self.indexfile + '.a'):
235 self._realopener.unlink(self.indexfile + '.a')
235 self._realopener.unlink(self.indexfile + '.a')
236 self.opener = _divertopener(self._realopener, self.indexfile)
236 self.opener = _divertopener(self._realopener, self.indexfile)
237 else:
237 else:
238 self._delaybuf = []
238 self._delaybuf = []
239 self.opener = _delayopener(self._realopener, self.indexfile,
239 self.opener = _delayopener(self._realopener, self.indexfile,
240 self._delaybuf)
240 self._delaybuf)
241 self._delayed = True
241 self._delayed = True
242 tr.addpending('cl-%i' % id(self), self._writepending)
242 tr.addpending('cl-%i' % id(self), self._writepending)
243 trp = weakref.proxy(tr)
243 trp = weakref.proxy(tr)
244 tr.addfinalize('cl-%i' % id(self), lambda: self._finalize(trp))
244 tr.addfinalize('cl-%i' % id(self), lambda: self._finalize(trp))
245
245
246 def _finalize(self, tr):
246 def _finalize(self, tr):
247 "finalize index updates"
247 "finalize index updates"
248 self._delayed = False
248 self._delayed = False
249 self.opener = self._realopener
249 self.opener = self._realopener
250 # move redirected index data back into place
250 # move redirected index data back into place
251 if self._divert:
251 if self._divert:
252 assert not self._delaybuf
252 assert not self._delaybuf
253 tmpname = self.indexfile + ".a"
253 tmpname = self.indexfile + ".a"
254 nfile = self.opener.open(tmpname)
254 nfile = self.opener.open(tmpname)
255 nfile.close()
255 nfile.close()
256 self.opener.rename(tmpname, self.indexfile)
256 self.opener.rename(tmpname, self.indexfile)
257 elif self._delaybuf:
257 elif self._delaybuf:
258 fp = self.opener(self.indexfile, 'a')
258 fp = self.opener(self.indexfile, 'a')
259 fp.write("".join(self._delaybuf))
259 fp.write("".join(self._delaybuf))
260 fp.close()
260 fp.close()
261 self._delaybuf = None
261 self._delaybuf = None
262 self._divert = False
262 self._divert = False
263 # split when we're done
263 # split when we're done
264 self.checkinlinesize(tr)
264 self.checkinlinesize(tr)
265
265
266 def readpending(self, file):
266 def readpending(self, file):
267 r = revlog.revlog(self.opener, file)
267 r = revlog.revlog(self.opener, file)
268 self.index = r.index
268 self.index = r.index
269 self.nodemap = r.nodemap
269 self.nodemap = r.nodemap
270 self._nodecache = r._nodecache
270 self._nodecache = r._nodecache
271 self._chunkcache = r._chunkcache
271 self._chunkcache = r._chunkcache
272
272
273 def _writepending(self):
273 def _writepending(self, tr):
274 "create a file containing the unfinalized state for pretxnchangegroup"
274 "create a file containing the unfinalized state for pretxnchangegroup"
275 if self._delaybuf:
275 if self._delaybuf:
276 # make a temporary copy of the index
276 # make a temporary copy of the index
277 fp1 = self._realopener(self.indexfile)
277 fp1 = self._realopener(self.indexfile)
278 fp2 = self._realopener(self.indexfile + ".a", "w")
278 fp2 = self._realopener(self.indexfile + ".a", "w")
279 fp2.write(fp1.read())
279 fp2.write(fp1.read())
280 # add pending data
280 # add pending data
281 fp2.write("".join(self._delaybuf))
281 fp2.write("".join(self._delaybuf))
282 fp2.close()
282 fp2.close()
283 # switch modes so finalize can simply rename
283 # switch modes so finalize can simply rename
284 self._delaybuf = None
284 self._delaybuf = None
285 self._divert = True
285 self._divert = True
286 self.opener = _divertopener(self._realopener, self.indexfile)
286 self.opener = _divertopener(self._realopener, self.indexfile)
287
287
288 if self._divert:
288 if self._divert:
289 return True
289 return True
290
290
291 return False
291 return False
292
292
293 def checkinlinesize(self, tr, fp=None):
293 def checkinlinesize(self, tr, fp=None):
294 if not self._delayed:
294 if not self._delayed:
295 revlog.revlog.checkinlinesize(self, tr, fp)
295 revlog.revlog.checkinlinesize(self, tr, fp)
296
296
297 def read(self, node):
297 def read(self, node):
298 """
298 """
299 format used:
299 format used:
300 nodeid\n : manifest node in ascii
300 nodeid\n : manifest node in ascii
301 user\n : user, no \n or \r allowed
301 user\n : user, no \n or \r allowed
302 time tz extra\n : date (time is int or float, timezone is int)
302 time tz extra\n : date (time is int or float, timezone is int)
303 : extra is metadata, encoded and separated by '\0'
303 : extra is metadata, encoded and separated by '\0'
304 : older versions ignore it
304 : older versions ignore it
305 files\n\n : files modified by the cset, no \n or \r allowed
305 files\n\n : files modified by the cset, no \n or \r allowed
306 (.*) : comment (free text, ideally utf-8)
306 (.*) : comment (free text, ideally utf-8)
307
307
308 changelog v0 doesn't use extra
308 changelog v0 doesn't use extra
309 """
309 """
310 text = self.revision(node)
310 text = self.revision(node)
311 if not text:
311 if not text:
312 return (nullid, "", (0, 0), [], "", _defaultextra)
312 return (nullid, "", (0, 0), [], "", _defaultextra)
313 last = text.index("\n\n")
313 last = text.index("\n\n")
314 desc = encoding.tolocal(text[last + 2:])
314 desc = encoding.tolocal(text[last + 2:])
315 l = text[:last].split('\n')
315 l = text[:last].split('\n')
316 manifest = bin(l[0])
316 manifest = bin(l[0])
317 user = encoding.tolocal(l[1])
317 user = encoding.tolocal(l[1])
318
318
319 tdata = l[2].split(' ', 2)
319 tdata = l[2].split(' ', 2)
320 if len(tdata) != 3:
320 if len(tdata) != 3:
321 time = float(tdata[0])
321 time = float(tdata[0])
322 try:
322 try:
323 # various tools did silly things with the time zone field.
323 # various tools did silly things with the time zone field.
324 timezone = int(tdata[1])
324 timezone = int(tdata[1])
325 except ValueError:
325 except ValueError:
326 timezone = 0
326 timezone = 0
327 extra = _defaultextra
327 extra = _defaultextra
328 else:
328 else:
329 time, timezone = float(tdata[0]), int(tdata[1])
329 time, timezone = float(tdata[0]), int(tdata[1])
330 extra = decodeextra(tdata[2])
330 extra = decodeextra(tdata[2])
331
331
332 files = l[3:]
332 files = l[3:]
333 return (manifest, user, (time, timezone), files, desc, extra)
333 return (manifest, user, (time, timezone), files, desc, extra)
334
334
335 def add(self, manifest, files, desc, transaction, p1, p2,
335 def add(self, manifest, files, desc, transaction, p1, p2,
336 user, date=None, extra=None):
336 user, date=None, extra=None):
337 # Convert to UTF-8 encoded bytestrings as the very first
337 # Convert to UTF-8 encoded bytestrings as the very first
338 # thing: calling any method on a localstr object will turn it
338 # thing: calling any method on a localstr object will turn it
339 # into a str object and the cached UTF-8 string is thus lost.
339 # into a str object and the cached UTF-8 string is thus lost.
340 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
340 user, desc = encoding.fromlocal(user), encoding.fromlocal(desc)
341
341
342 user = user.strip()
342 user = user.strip()
343 # An empty username or a username with a "\n" will make the
343 # An empty username or a username with a "\n" will make the
344 # revision text contain two "\n\n" sequences -> corrupt
344 # revision text contain two "\n\n" sequences -> corrupt
345 # repository since read cannot unpack the revision.
345 # repository since read cannot unpack the revision.
346 if not user:
346 if not user:
347 raise error.RevlogError(_("empty username"))
347 raise error.RevlogError(_("empty username"))
348 if "\n" in user:
348 if "\n" in user:
349 raise error.RevlogError(_("username %s contains a newline")
349 raise error.RevlogError(_("username %s contains a newline")
350 % repr(user))
350 % repr(user))
351
351
352 desc = stripdesc(desc)
352 desc = stripdesc(desc)
353
353
354 if date:
354 if date:
355 parseddate = "%d %d" % util.parsedate(date)
355 parseddate = "%d %d" % util.parsedate(date)
356 else:
356 else:
357 parseddate = "%d %d" % util.makedate()
357 parseddate = "%d %d" % util.makedate()
358 if extra:
358 if extra:
359 branch = extra.get("branch")
359 branch = extra.get("branch")
360 if branch in ("default", ""):
360 if branch in ("default", ""):
361 del extra["branch"]
361 del extra["branch"]
362 elif branch in (".", "null", "tip"):
362 elif branch in (".", "null", "tip"):
363 raise error.RevlogError(_('the name \'%s\' is reserved')
363 raise error.RevlogError(_('the name \'%s\' is reserved')
364 % branch)
364 % branch)
365 if extra:
365 if extra:
366 extra = encodeextra(extra)
366 extra = encodeextra(extra)
367 parseddate = "%s %s" % (parseddate, extra)
367 parseddate = "%s %s" % (parseddate, extra)
368 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
368 l = [hex(manifest), user, parseddate] + sorted(files) + ["", desc]
369 text = "\n".join(l)
369 text = "\n".join(l)
370 return self.addrevision(text, transaction, len(self), p1, p2)
370 return self.addrevision(text, transaction, len(self), p1, p2)
371
371
372 def branchinfo(self, rev):
372 def branchinfo(self, rev):
373 """return the branch name and open/close state of a revision
373 """return the branch name and open/close state of a revision
374
374
375 This function exists because creating a changectx object
375 This function exists because creating a changectx object
376 just to access this is costly."""
376 just to access this is costly."""
377 extra = self.read(rev)[5]
377 extra = self.read(rev)[5]
378 return encoding.tolocal(extra.get("branch")), 'close' in extra
378 return encoding.tolocal(extra.get("branch")), 'close' in extra
@@ -1,429 +1,431
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b in backupentries:
46 for f, b in backupentries:
47 if b:
47 if b:
48 filepath = opener.join(f)
48 filepath = opener.join(f)
49 backuppath = opener.join(b)
49 backuppath = opener.join(b)
50 try:
50 try:
51 util.copyfile(backuppath, filepath)
51 util.copyfile(backuppath, filepath)
52 backupfiles.append(b)
52 backupfiles.append(b)
53 except IOError:
53 except IOError:
54 report(_("failed to recover %s\n") % f)
54 report(_("failed to recover %s\n") % f)
55 raise
55 raise
56 else:
56 else:
57 try:
57 try:
58 opener.unlink(f)
58 opener.unlink(f)
59 except (IOError, OSError), inst:
59 except (IOError, OSError), inst:
60 if inst.errno != errno.ENOENT:
60 if inst.errno != errno.ENOENT:
61 raise
61 raise
62
62
63 opener.unlink(journal)
63 opener.unlink(journal)
64 backuppath = "%s.backupfiles" % journal
64 backuppath = "%s.backupfiles" % journal
65 if opener.exists(backuppath):
65 if opener.exists(backuppath):
66 opener.unlink(backuppath)
66 opener.unlink(backuppath)
67 for f in backupfiles:
67 for f in backupfiles:
68 opener.unlink(f)
68 opener.unlink(f)
69
69
70 class transaction(object):
70 class transaction(object):
71 def __init__(self, report, opener, journal, after=None, createmode=None,
71 def __init__(self, report, opener, journal, after=None, createmode=None,
72 onclose=None, onabort=None):
72 onclose=None, onabort=None):
73 """Begin a new transaction
73 """Begin a new transaction
74
74
75 Begins a new transaction that allows rolling back writes in the event of
75 Begins a new transaction that allows rolling back writes in the event of
76 an exception.
76 an exception.
77
77
78 * `after`: called after the transaction has been committed
78 * `after`: called after the transaction has been committed
79 * `createmode`: the mode of the journal file that will be created
79 * `createmode`: the mode of the journal file that will be created
80 * `onclose`: called as the transaction is closing, but before it is
80 * `onclose`: called as the transaction is closing, but before it is
81 closed
81 closed
82 * `onabort`: called as the transaction is aborting, but before any files
82 * `onabort`: called as the transaction is aborting, but before any files
83 have been truncated
83 have been truncated
84 """
84 """
85 self.count = 1
85 self.count = 1
86 self.usages = 1
86 self.usages = 1
87 self.report = report
87 self.report = report
88 self.opener = opener
88 self.opener = opener
89 self.after = after
89 self.after = after
90 self.onclose = onclose
90 self.onclose = onclose
91 self.onabort = onabort
91 self.onabort = onabort
92 self.entries = []
92 self.entries = []
93 self.map = {}
93 self.map = {}
94 self.journal = journal
94 self.journal = journal
95 self._queue = []
95 self._queue = []
96 # a dict of arguments to be passed to hooks
96 # a dict of arguments to be passed to hooks
97 self.hookargs = {}
97 self.hookargs = {}
98 self.file = opener.open(self.journal, "w")
98 self.file = opener.open(self.journal, "w")
99
99
100 # a list of ('path', 'backuppath') entries.
100 # a list of ('path', 'backuppath') entries.
101 # if 'backuppath' is empty, no file existed at backup time
101 # if 'backuppath' is empty, no file existed at backup time
102 self._backupentries = []
102 self._backupentries = []
103 self._backupmap = {}
103 self._backupmap = {}
104 self._backupjournal = "%s.backupfiles" % journal
104 self._backupjournal = "%s.backupfiles" % journal
105 self._backupsfile = opener.open(self._backupjournal, 'w')
105 self._backupsfile = opener.open(self._backupjournal, 'w')
106 self._backupsfile.write('%d\n' % version)
106 self._backupsfile.write('%d\n' % version)
107
107
108 if createmode is not None:
108 if createmode is not None:
109 opener.chmod(self.journal, createmode & 0666)
109 opener.chmod(self.journal, createmode & 0666)
110 opener.chmod(self._backupjournal, createmode & 0666)
110 opener.chmod(self._backupjournal, createmode & 0666)
111
111
112 # hold file generations to be performed on commit
112 # hold file generations to be performed on commit
113 self._filegenerators = {}
113 self._filegenerators = {}
114 # hold callbalk to write pending data for hooks
114 # hold callbalk to write pending data for hooks
115 self._pendingcallback = {}
115 self._pendingcallback = {}
116 # True is any pending data have been written ever
116 # True is any pending data have been written ever
117 self._anypending = False
117 self._anypending = False
118 # holds callback to call when writing the transaction
118 # holds callback to call when writing the transaction
119 self._finalizecallback = {}
119 self._finalizecallback = {}
120 # hold callbalk for post transaction close
120 # hold callbalk for post transaction close
121 self._postclosecallback = {}
121 self._postclosecallback = {}
122
122
123 def __del__(self):
123 def __del__(self):
124 if self.journal:
124 if self.journal:
125 self._abort()
125 self._abort()
126
126
127 @active
127 @active
128 def startgroup(self):
128 def startgroup(self):
129 """delay registration of file entry
129 """delay registration of file entry
130
130
131 This is used by strip to delay vision of strip offset. The transaction
131 This is used by strip to delay vision of strip offset. The transaction
132 sees either none or all of the strip actions to be done."""
132 sees either none or all of the strip actions to be done."""
133 self._queue.append([])
133 self._queue.append([])
134
134
135 @active
135 @active
136 def endgroup(self):
136 def endgroup(self):
137 """apply delayed registration of file entry.
137 """apply delayed registration of file entry.
138
138
139 This is used by strip to delay vision of strip offset. The transaction
139 This is used by strip to delay vision of strip offset. The transaction
140 sees either none or all of the strip actions to be done."""
140 sees either none or all of the strip actions to be done."""
141 q = self._queue.pop()
141 q = self._queue.pop()
142 for f, o, data in q:
142 for f, o, data in q:
143 self._addentry(f, o, data)
143 self._addentry(f, o, data)
144
144
145 @active
145 @active
146 def add(self, file, offset, data=None):
146 def add(self, file, offset, data=None):
147 """record the state of an append-only file before update"""
147 """record the state of an append-only file before update"""
148 if file in self.map or file in self._backupmap:
148 if file in self.map or file in self._backupmap:
149 return
149 return
150 if self._queue:
150 if self._queue:
151 self._queue[-1].append((file, offset, data))
151 self._queue[-1].append((file, offset, data))
152 return
152 return
153
153
154 self._addentry(file, offset, data)
154 self._addentry(file, offset, data)
155
155
156 def _addentry(self, file, offset, data):
156 def _addentry(self, file, offset, data):
157 """add a append-only entry to memory and on-disk state"""
157 """add a append-only entry to memory and on-disk state"""
158 if file in self.map or file in self._backupmap:
158 if file in self.map or file in self._backupmap:
159 return
159 return
160 self.entries.append((file, offset, data))
160 self.entries.append((file, offset, data))
161 self.map[file] = len(self.entries) - 1
161 self.map[file] = len(self.entries) - 1
162 # add enough data to the journal to do the truncate
162 # add enough data to the journal to do the truncate
163 self.file.write("%s\0%d\n" % (file, offset))
163 self.file.write("%s\0%d\n" % (file, offset))
164 self.file.flush()
164 self.file.flush()
165
165
166 @active
166 @active
167 def addbackup(self, file, hardlink=True, vfs=None):
167 def addbackup(self, file, hardlink=True, vfs=None):
168 """Adds a backup of the file to the transaction
168 """Adds a backup of the file to the transaction
169
169
170 Calling addbackup() creates a hardlink backup of the specified file
170 Calling addbackup() creates a hardlink backup of the specified file
171 that is used to recover the file in the event of the transaction
171 that is used to recover the file in the event of the transaction
172 aborting.
172 aborting.
173
173
174 * `file`: the file path, relative to .hg/store
174 * `file`: the file path, relative to .hg/store
175 * `hardlink`: use a hardlink to quickly create the backup
175 * `hardlink`: use a hardlink to quickly create the backup
176 """
176 """
177 if self._queue:
177 if self._queue:
178 msg = 'cannot use transaction.addbackup inside "group"'
178 msg = 'cannot use transaction.addbackup inside "group"'
179 raise RuntimeError(msg)
179 raise RuntimeError(msg)
180
180
181 if file in self.map or file in self._backupmap:
181 if file in self.map or file in self._backupmap:
182 return
182 return
183 backupfile = "%s.backup.%s" % (self.journal, file)
183 backupfile = "%s.backup.%s" % (self.journal, file)
184 if vfs is None:
184 if vfs is None:
185 vfs = self.opener
185 vfs = self.opener
186 if vfs.exists(file):
186 if vfs.exists(file):
187 filepath = vfs.join(file)
187 filepath = vfs.join(file)
188 backuppath = self.opener.join(backupfile)
188 backuppath = self.opener.join(backupfile)
189 util.copyfiles(filepath, backuppath, hardlink=hardlink)
189 util.copyfiles(filepath, backuppath, hardlink=hardlink)
190 else:
190 else:
191 backupfile = ''
191 backupfile = ''
192
192
193 self._backupentries.append((file, backupfile))
193 self._backupentries.append((file, backupfile))
194 self._backupmap[file] = len(self._backupentries) - 1
194 self._backupmap[file] = len(self._backupentries) - 1
195 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
195 self._backupsfile.write("%s\0%s\n" % (file, backupfile))
196 self._backupsfile.flush()
196 self._backupsfile.flush()
197
197
198 @active
198 @active
199 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
199 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
200 """add a function to generates some files at transaction commit
200 """add a function to generates some files at transaction commit
201
201
202 The `genfunc` argument is a function capable of generating proper
202 The `genfunc` argument is a function capable of generating proper
203 content of each entry in the `filename` tuple.
203 content of each entry in the `filename` tuple.
204
204
205 At transaction close time, `genfunc` will be called with one file
205 At transaction close time, `genfunc` will be called with one file
206 object argument per entries in `filenames`.
206 object argument per entries in `filenames`.
207
207
208 The transaction itself is responsible for the backup, creation and
208 The transaction itself is responsible for the backup, creation and
209 final write of such file.
209 final write of such file.
210
210
211 The `genid` argument is used to ensure the same set of file is only
211 The `genid` argument is used to ensure the same set of file is only
212 generated once. Call to `addfilegenerator` for a `genid` already
212 generated once. Call to `addfilegenerator` for a `genid` already
213 present will overwrite the old entry.
213 present will overwrite the old entry.
214
214
215 The `order` argument may be used to control the order in which multiple
215 The `order` argument may be used to control the order in which multiple
216 generator will be executed.
216 generator will be executed.
217 """
217 """
218 # For now, we are unable to do proper backup and restore of custom vfs
218 # For now, we are unable to do proper backup and restore of custom vfs
219 # but for bookmarks that are handled outside this mechanism.
219 # but for bookmarks that are handled outside this mechanism.
220 assert vfs is None or filenames == ('bookmarks',)
220 assert vfs is None or filenames == ('bookmarks',)
221 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
221 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
222
222
223 def _generatefiles(self):
223 def _generatefiles(self):
224 # write files registered for generation
224 # write files registered for generation
225 for entry in sorted(self._filegenerators.values()):
225 for entry in sorted(self._filegenerators.values()):
226 order, filenames, genfunc, vfs = entry
226 order, filenames, genfunc, vfs = entry
227 if vfs is None:
227 if vfs is None:
228 vfs = self.opener
228 vfs = self.opener
229 files = []
229 files = []
230 try:
230 try:
231 for name in filenames:
231 for name in filenames:
232 # Some files are already backed up when creating the
232 # Some files are already backed up when creating the
233 # localrepo. Until this is properly fixed we disable the
233 # localrepo. Until this is properly fixed we disable the
234 # backup for them.
234 # backup for them.
235 if name not in ('phaseroots', 'bookmarks'):
235 if name not in ('phaseroots', 'bookmarks'):
236 self.addbackup(name)
236 self.addbackup(name)
237 files.append(vfs(name, 'w', atomictemp=True))
237 files.append(vfs(name, 'w', atomictemp=True))
238 genfunc(*files)
238 genfunc(*files)
239 finally:
239 finally:
240 for f in files:
240 for f in files:
241 f.close()
241 f.close()
242
242
243 @active
243 @active
244 def find(self, file):
244 def find(self, file):
245 if file in self.map:
245 if file in self.map:
246 return self.entries[self.map[file]]
246 return self.entries[self.map[file]]
247 if file in self._backupmap:
247 if file in self._backupmap:
248 return self._backupentries[self._backupmap[file]]
248 return self._backupentries[self._backupmap[file]]
249 return None
249 return None
250
250
251 @active
251 @active
252 def replace(self, file, offset, data=None):
252 def replace(self, file, offset, data=None):
253 '''
253 '''
254 replace can only replace already committed entries
254 replace can only replace already committed entries
255 that are not pending in the queue
255 that are not pending in the queue
256 '''
256 '''
257
257
258 if file not in self.map:
258 if file not in self.map:
259 raise KeyError(file)
259 raise KeyError(file)
260 index = self.map[file]
260 index = self.map[file]
261 self.entries[index] = (file, offset, data)
261 self.entries[index] = (file, offset, data)
262 self.file.write("%s\0%d\n" % (file, offset))
262 self.file.write("%s\0%d\n" % (file, offset))
263 self.file.flush()
263 self.file.flush()
264
264
265 @active
265 @active
266 def nest(self):
266 def nest(self):
267 self.count += 1
267 self.count += 1
268 self.usages += 1
268 self.usages += 1
269 return self
269 return self
270
270
271 def release(self):
271 def release(self):
272 if self.count > 0:
272 if self.count > 0:
273 self.usages -= 1
273 self.usages -= 1
274 # if the transaction scopes are left without being closed, fail
274 # if the transaction scopes are left without being closed, fail
275 if self.count > 0 and self.usages == 0:
275 if self.count > 0 and self.usages == 0:
276 self._abort()
276 self._abort()
277
277
278 def running(self):
278 def running(self):
279 return self.count > 0
279 return self.count > 0
280
280
281 def addpending(self, category, callback):
281 def addpending(self, category, callback):
282 """add a callback to be called when the transaction is pending
282 """add a callback to be called when the transaction is pending
283
283
284 The transaction will be given as callback's first argument.
285
284 Category is a unique identifier to allow overwriting an old callback
286 Category is a unique identifier to allow overwriting an old callback
285 with a newer callback.
287 with a newer callback.
286 """
288 """
287 self._pendingcallback[category] = callback
289 self._pendingcallback[category] = callback
288
290
289 @active
291 @active
290 def writepending(self):
292 def writepending(self):
291 '''write pending file to temporary version
293 '''write pending file to temporary version
292
294
293 This is used to allow hooks to view a transaction before commit'''
295 This is used to allow hooks to view a transaction before commit'''
294 categories = sorted(self._pendingcallback)
296 categories = sorted(self._pendingcallback)
295 for cat in categories:
297 for cat in categories:
296 # remove callback since the data will have been flushed
298 # remove callback since the data will have been flushed
297 any = self._pendingcallback.pop(cat)()
299 any = self._pendingcallback.pop(cat)(self)
298 self._anypending = self._anypending or any
300 self._anypending = self._anypending or any
299 return self._anypending
301 return self._anypending
300
302
301 @active
303 @active
302 def addfinalize(self, category, callback):
304 def addfinalize(self, category, callback):
303 """add a callback to be called when the transaction is closed
305 """add a callback to be called when the transaction is closed
304
306
305 Category is a unique identifier to allow overwriting old callbacks with
307 Category is a unique identifier to allow overwriting old callbacks with
306 newer callbacks.
308 newer callbacks.
307 """
309 """
308 self._finalizecallback[category] = callback
310 self._finalizecallback[category] = callback
309
311
310 @active
312 @active
311 def addpostclose(self, category, callback):
313 def addpostclose(self, category, callback):
312 """add a callback to be called after the transaction is closed
314 """add a callback to be called after the transaction is closed
313
315
314 Category is a unique identifier to allow overwriting an old callback
316 Category is a unique identifier to allow overwriting an old callback
315 with a newer callback.
317 with a newer callback.
316 """
318 """
317 self._postclosecallback[category] = callback
319 self._postclosecallback[category] = callback
318
320
319 @active
321 @active
320 def close(self):
322 def close(self):
321 '''commit the transaction'''
323 '''commit the transaction'''
322 if self.count == 1 and self.onclose is not None:
324 if self.count == 1 and self.onclose is not None:
323 self._generatefiles()
325 self._generatefiles()
324 categories = sorted(self._finalizecallback)
326 categories = sorted(self._finalizecallback)
325 for cat in categories:
327 for cat in categories:
326 self._finalizecallback[cat]()
328 self._finalizecallback[cat]()
327 self.onclose()
329 self.onclose()
328
330
329 self.count -= 1
331 self.count -= 1
330 if self.count != 0:
332 if self.count != 0:
331 return
333 return
332 self.file.close()
334 self.file.close()
333 self._backupsfile.close()
335 self._backupsfile.close()
334 self.entries = []
336 self.entries = []
335 if self.after:
337 if self.after:
336 self.after()
338 self.after()
337 if self.opener.isfile(self.journal):
339 if self.opener.isfile(self.journal):
338 self.opener.unlink(self.journal)
340 self.opener.unlink(self.journal)
339 if self.opener.isfile(self._backupjournal):
341 if self.opener.isfile(self._backupjournal):
340 self.opener.unlink(self._backupjournal)
342 self.opener.unlink(self._backupjournal)
341 for _f, b in self._backupentries:
343 for _f, b in self._backupentries:
342 if b:
344 if b:
343 self.opener.unlink(b)
345 self.opener.unlink(b)
344 self._backupentries = []
346 self._backupentries = []
345 self.journal = None
347 self.journal = None
346 # run post close action
348 # run post close action
347 categories = sorted(self._postclosecallback)
349 categories = sorted(self._postclosecallback)
348 for cat in categories:
350 for cat in categories:
349 self._postclosecallback[cat]()
351 self._postclosecallback[cat]()
350
352
351 @active
353 @active
352 def abort(self):
354 def abort(self):
353 '''abort the transaction (generally called on error, or when the
355 '''abort the transaction (generally called on error, or when the
354 transaction is not explicitly committed before going out of
356 transaction is not explicitly committed before going out of
355 scope)'''
357 scope)'''
356 self._abort()
358 self._abort()
357
359
358 def _abort(self):
360 def _abort(self):
359 self.count = 0
361 self.count = 0
360 self.usages = 0
362 self.usages = 0
361 self.file.close()
363 self.file.close()
362 self._backupsfile.close()
364 self._backupsfile.close()
363
365
364 if self.onabort is not None:
366 if self.onabort is not None:
365 self.onabort()
367 self.onabort()
366
368
367 try:
369 try:
368 if not self.entries and not self._backupentries:
370 if not self.entries and not self._backupentries:
369 if self.journal:
371 if self.journal:
370 self.opener.unlink(self.journal)
372 self.opener.unlink(self.journal)
371 if self._backupjournal:
373 if self._backupjournal:
372 self.opener.unlink(self._backupjournal)
374 self.opener.unlink(self._backupjournal)
373 return
375 return
374
376
375 self.report(_("transaction abort!\n"))
377 self.report(_("transaction abort!\n"))
376
378
377 try:
379 try:
378 _playback(self.journal, self.report, self.opener,
380 _playback(self.journal, self.report, self.opener,
379 self.entries, self._backupentries, False)
381 self.entries, self._backupentries, False)
380 self.report(_("rollback completed\n"))
382 self.report(_("rollback completed\n"))
381 except Exception:
383 except Exception:
382 self.report(_("rollback failed - please run hg recover\n"))
384 self.report(_("rollback failed - please run hg recover\n"))
383 finally:
385 finally:
384 self.journal = None
386 self.journal = None
385
387
386
388
387 def rollback(opener, file, report):
389 def rollback(opener, file, report):
388 """Rolls back the transaction contained in the given file
390 """Rolls back the transaction contained in the given file
389
391
390 Reads the entries in the specified file, and the corresponding
392 Reads the entries in the specified file, and the corresponding
391 '*.backupfiles' file, to recover from an incomplete transaction.
393 '*.backupfiles' file, to recover from an incomplete transaction.
392
394
393 * `file`: a file containing a list of entries, specifying where
395 * `file`: a file containing a list of entries, specifying where
394 to truncate each file. The file should contain a list of
396 to truncate each file. The file should contain a list of
395 file\0offset pairs, delimited by newlines. The corresponding
397 file\0offset pairs, delimited by newlines. The corresponding
396 '*.backupfiles' file should contain a list of file\0backupfile
398 '*.backupfiles' file should contain a list of file\0backupfile
397 pairs, delimited by \0.
399 pairs, delimited by \0.
398 """
400 """
399 entries = []
401 entries = []
400 backupentries = []
402 backupentries = []
401
403
402 fp = opener.open(file)
404 fp = opener.open(file)
403 lines = fp.readlines()
405 lines = fp.readlines()
404 fp.close()
406 fp.close()
405 for l in lines:
407 for l in lines:
406 try:
408 try:
407 f, o = l.split('\0')
409 f, o = l.split('\0')
408 entries.append((f, int(o), None))
410 entries.append((f, int(o), None))
409 except ValueError:
411 except ValueError:
410 report(_("couldn't read journal entry %r!\n") % l)
412 report(_("couldn't read journal entry %r!\n") % l)
411
413
412 backupjournal = "%s.backupfiles" % file
414 backupjournal = "%s.backupfiles" % file
413 if opener.exists(backupjournal):
415 if opener.exists(backupjournal):
414 fp = opener.open(backupjournal)
416 fp = opener.open(backupjournal)
415 lines = fp.readlines()
417 lines = fp.readlines()
416 if lines:
418 if lines:
417 ver = lines[0][:-1]
419 ver = lines[0][:-1]
418 if ver == str(version):
420 if ver == str(version):
419 for line in lines[1:]:
421 for line in lines[1:]:
420 if line:
422 if line:
421 # Shave off the trailing newline
423 # Shave off the trailing newline
422 line = line[:-1]
424 line = line[:-1]
423 f, b = line.split('\0')
425 f, b = line.split('\0')
424 backupentries.append((f, b))
426 backupentries.append((f, b))
425 else:
427 else:
426 report(_("journal was created by a newer version of "
428 report(_("journal was created by a newer version of "
427 "Mercurial"))
429 "Mercurial"))
428
430
429 _playback(file, report, opener, entries, backupentries)
431 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now