##// END OF EJS Templates
transaction: use 'location' instead of 'vfs' objects for file generation...
Pierre-Yves David -
r23317:197e17be default
parent child Browse files
Show More
@@ -1,442 +1,442
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex, bin
9 from mercurial.node import hex, bin
10 from mercurial import encoding, error, util, obsolete, lock as lockmod
10 from mercurial import encoding, error, util, obsolete, lock as lockmod
11 import errno
11 import errno
12
12
13 class bmstore(dict):
13 class bmstore(dict):
14 """Storage for bookmarks.
14 """Storage for bookmarks.
15
15
16 This object should do all bookmark reads and writes, so that it's
16 This object should do all bookmark reads and writes, so that it's
17 fairly simple to replace the storage underlying bookmarks without
17 fairly simple to replace the storage underlying bookmarks without
18 having to clone the logic surrounding bookmarks.
18 having to clone the logic surrounding bookmarks.
19
19
20 This particular bmstore implementation stores bookmarks as
20 This particular bmstore implementation stores bookmarks as
21 {hash}\s{name}\n (the same format as localtags) in
21 {hash}\s{name}\n (the same format as localtags) in
22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
22 .hg/bookmarks. The mapping is stored as {name: nodeid}.
23
23
24 This class does NOT handle the "current" bookmark state at this
24 This class does NOT handle the "current" bookmark state at this
25 time.
25 time.
26 """
26 """
27
27
28 def __init__(self, repo):
28 def __init__(self, repo):
29 dict.__init__(self)
29 dict.__init__(self)
30 self._repo = repo
30 self._repo = repo
31 try:
31 try:
32 for line in repo.vfs('bookmarks'):
32 for line in repo.vfs('bookmarks'):
33 line = line.strip()
33 line = line.strip()
34 if not line:
34 if not line:
35 continue
35 continue
36 if ' ' not in line:
36 if ' ' not in line:
37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
37 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n')
38 % line)
38 % line)
39 continue
39 continue
40 sha, refspec = line.split(' ', 1)
40 sha, refspec = line.split(' ', 1)
41 refspec = encoding.tolocal(refspec)
41 refspec = encoding.tolocal(refspec)
42 try:
42 try:
43 self[refspec] = repo.changelog.lookup(sha)
43 self[refspec] = repo.changelog.lookup(sha)
44 except LookupError:
44 except LookupError:
45 pass
45 pass
46 except IOError, inst:
46 except IOError, inst:
47 if inst.errno != errno.ENOENT:
47 if inst.errno != errno.ENOENT:
48 raise
48 raise
49
49
50 def recordchange(self, tr):
50 def recordchange(self, tr):
51 """record that bookmarks have been changed in a transaction
51 """record that bookmarks have been changed in a transaction
52
52
53 The transaction is then responsible for updating the file content."""
53 The transaction is then responsible for updating the file content."""
54 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
54 tr.addfilegenerator('bookmarks', ('bookmarks',), self._write,
55 vfs=self._repo.vfs)
55 location='plain')
56 tr.hookargs['bookmark_moved'] = '1'
56 tr.hookargs['bookmark_moved'] = '1'
57
57
58 def write(self):
58 def write(self):
59 '''Write bookmarks
59 '''Write bookmarks
60
60
61 Write the given bookmark => hash dictionary to the .hg/bookmarks file
61 Write the given bookmark => hash dictionary to the .hg/bookmarks file
62 in a format equal to those of localtags.
62 in a format equal to those of localtags.
63
63
64 We also store a backup of the previous state in undo.bookmarks that
64 We also store a backup of the previous state in undo.bookmarks that
65 can be copied back on rollback.
65 can be copied back on rollback.
66 '''
66 '''
67 repo = self._repo
67 repo = self._repo
68 if repo._bookmarkcurrent not in self:
68 if repo._bookmarkcurrent not in self:
69 unsetcurrent(repo)
69 unsetcurrent(repo)
70
70
71 wlock = repo.wlock()
71 wlock = repo.wlock()
72 try:
72 try:
73
73
74 file = repo.vfs('bookmarks', 'w', atomictemp=True)
74 file = repo.vfs('bookmarks', 'w', atomictemp=True)
75 self._write(file)
75 self._write(file)
76 file.close()
76 file.close()
77
77
78 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
78 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
79 try:
79 try:
80 repo.svfs.utime('00changelog.i', None)
80 repo.svfs.utime('00changelog.i', None)
81 except OSError:
81 except OSError:
82 pass
82 pass
83
83
84 finally:
84 finally:
85 wlock.release()
85 wlock.release()
86
86
87 def _write(self, fp):
87 def _write(self, fp):
88 for name, node in self.iteritems():
88 for name, node in self.iteritems():
89 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
89 fp.write("%s %s\n" % (hex(node), encoding.fromlocal(name)))
90
90
91 def readcurrent(repo):
91 def readcurrent(repo):
92 '''Get the current bookmark
92 '''Get the current bookmark
93
93
94 If we use gittish branches we have a current bookmark that
94 If we use gittish branches we have a current bookmark that
95 we are on. This function returns the name of the bookmark. It
95 we are on. This function returns the name of the bookmark. It
96 is stored in .hg/bookmarks.current
96 is stored in .hg/bookmarks.current
97 '''
97 '''
98 mark = None
98 mark = None
99 try:
99 try:
100 file = repo.opener('bookmarks.current')
100 file = repo.opener('bookmarks.current')
101 except IOError, inst:
101 except IOError, inst:
102 if inst.errno != errno.ENOENT:
102 if inst.errno != errno.ENOENT:
103 raise
103 raise
104 return None
104 return None
105 try:
105 try:
106 # No readline() in osutil.posixfile, reading everything is cheap
106 # No readline() in osutil.posixfile, reading everything is cheap
107 mark = encoding.tolocal((file.readlines() or [''])[0])
107 mark = encoding.tolocal((file.readlines() or [''])[0])
108 if mark == '' or mark not in repo._bookmarks:
108 if mark == '' or mark not in repo._bookmarks:
109 mark = None
109 mark = None
110 finally:
110 finally:
111 file.close()
111 file.close()
112 return mark
112 return mark
113
113
114 def setcurrent(repo, mark):
114 def setcurrent(repo, mark):
115 '''Set the name of the bookmark that we are currently on
115 '''Set the name of the bookmark that we are currently on
116
116
117 Set the name of the bookmark that we are on (hg update <bookmark>).
117 Set the name of the bookmark that we are on (hg update <bookmark>).
118 The name is recorded in .hg/bookmarks.current
118 The name is recorded in .hg/bookmarks.current
119 '''
119 '''
120 if mark not in repo._bookmarks:
120 if mark not in repo._bookmarks:
121 raise AssertionError('bookmark %s does not exist!' % mark)
121 raise AssertionError('bookmark %s does not exist!' % mark)
122
122
123 current = repo._bookmarkcurrent
123 current = repo._bookmarkcurrent
124 if current == mark:
124 if current == mark:
125 return
125 return
126
126
127 wlock = repo.wlock()
127 wlock = repo.wlock()
128 try:
128 try:
129 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
129 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
130 file.write(encoding.fromlocal(mark))
130 file.write(encoding.fromlocal(mark))
131 file.close()
131 file.close()
132 finally:
132 finally:
133 wlock.release()
133 wlock.release()
134 repo._bookmarkcurrent = mark
134 repo._bookmarkcurrent = mark
135
135
136 def unsetcurrent(repo):
136 def unsetcurrent(repo):
137 wlock = repo.wlock()
137 wlock = repo.wlock()
138 try:
138 try:
139 try:
139 try:
140 repo.vfs.unlink('bookmarks.current')
140 repo.vfs.unlink('bookmarks.current')
141 repo._bookmarkcurrent = None
141 repo._bookmarkcurrent = None
142 except OSError, inst:
142 except OSError, inst:
143 if inst.errno != errno.ENOENT:
143 if inst.errno != errno.ENOENT:
144 raise
144 raise
145 finally:
145 finally:
146 wlock.release()
146 wlock.release()
147
147
148 def iscurrent(repo, mark=None, parents=None):
148 def iscurrent(repo, mark=None, parents=None):
149 '''Tell whether the current bookmark is also active
149 '''Tell whether the current bookmark is also active
150
150
151 I.e., the bookmark listed in .hg/bookmarks.current also points to a
151 I.e., the bookmark listed in .hg/bookmarks.current also points to a
152 parent of the working directory.
152 parent of the working directory.
153 '''
153 '''
154 if not mark:
154 if not mark:
155 mark = repo._bookmarkcurrent
155 mark = repo._bookmarkcurrent
156 if not parents:
156 if not parents:
157 parents = [p.node() for p in repo[None].parents()]
157 parents = [p.node() for p in repo[None].parents()]
158 marks = repo._bookmarks
158 marks = repo._bookmarks
159 return (mark in marks and marks[mark] in parents)
159 return (mark in marks and marks[mark] in parents)
160
160
161 def updatecurrentbookmark(repo, oldnode, curbranch):
161 def updatecurrentbookmark(repo, oldnode, curbranch):
162 try:
162 try:
163 return update(repo, oldnode, repo.branchtip(curbranch))
163 return update(repo, oldnode, repo.branchtip(curbranch))
164 except error.RepoLookupError:
164 except error.RepoLookupError:
165 if curbranch == "default": # no default branch!
165 if curbranch == "default": # no default branch!
166 return update(repo, oldnode, repo.lookup("tip"))
166 return update(repo, oldnode, repo.lookup("tip"))
167 else:
167 else:
168 raise util.Abort(_("branch %s not found") % curbranch)
168 raise util.Abort(_("branch %s not found") % curbranch)
169
169
170 def deletedivergent(repo, deletefrom, bm):
170 def deletedivergent(repo, deletefrom, bm):
171 '''Delete divergent versions of bm on nodes in deletefrom.
171 '''Delete divergent versions of bm on nodes in deletefrom.
172
172
173 Return True if at least one bookmark was deleted, False otherwise.'''
173 Return True if at least one bookmark was deleted, False otherwise.'''
174 deleted = False
174 deleted = False
175 marks = repo._bookmarks
175 marks = repo._bookmarks
176 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
176 divergent = [b for b in marks if b.split('@', 1)[0] == bm.split('@', 1)[0]]
177 for mark in divergent:
177 for mark in divergent:
178 if mark == '@' or '@' not in mark:
178 if mark == '@' or '@' not in mark:
179 # can't be divergent by definition
179 # can't be divergent by definition
180 continue
180 continue
181 if mark and marks[mark] in deletefrom:
181 if mark and marks[mark] in deletefrom:
182 if mark != bm:
182 if mark != bm:
183 del marks[mark]
183 del marks[mark]
184 deleted = True
184 deleted = True
185 return deleted
185 return deleted
186
186
187 def calculateupdate(ui, repo, checkout):
187 def calculateupdate(ui, repo, checkout):
188 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
188 '''Return a tuple (targetrev, movemarkfrom) indicating the rev to
189 check out and where to move the active bookmark from, if needed.'''
189 check out and where to move the active bookmark from, if needed.'''
190 movemarkfrom = None
190 movemarkfrom = None
191 if checkout is None:
191 if checkout is None:
192 curmark = repo._bookmarkcurrent
192 curmark = repo._bookmarkcurrent
193 if iscurrent(repo):
193 if iscurrent(repo):
194 movemarkfrom = repo['.'].node()
194 movemarkfrom = repo['.'].node()
195 elif curmark:
195 elif curmark:
196 ui.status(_("updating to active bookmark %s\n") % curmark)
196 ui.status(_("updating to active bookmark %s\n") % curmark)
197 checkout = curmark
197 checkout = curmark
198 return (checkout, movemarkfrom)
198 return (checkout, movemarkfrom)
199
199
200 def update(repo, parents, node):
200 def update(repo, parents, node):
201 deletefrom = parents
201 deletefrom = parents
202 marks = repo._bookmarks
202 marks = repo._bookmarks
203 update = False
203 update = False
204 cur = repo._bookmarkcurrent
204 cur = repo._bookmarkcurrent
205 if not cur:
205 if not cur:
206 return False
206 return False
207
207
208 if marks[cur] in parents:
208 if marks[cur] in parents:
209 new = repo[node]
209 new = repo[node]
210 divs = [repo[b] for b in marks
210 divs = [repo[b] for b in marks
211 if b.split('@', 1)[0] == cur.split('@', 1)[0]]
211 if b.split('@', 1)[0] == cur.split('@', 1)[0]]
212 anc = repo.changelog.ancestors([new.rev()])
212 anc = repo.changelog.ancestors([new.rev()])
213 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
213 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
214 if validdest(repo, repo[marks[cur]], new):
214 if validdest(repo, repo[marks[cur]], new):
215 marks[cur] = new.node()
215 marks[cur] = new.node()
216 update = True
216 update = True
217
217
218 if deletedivergent(repo, deletefrom, cur):
218 if deletedivergent(repo, deletefrom, cur):
219 update = True
219 update = True
220
220
221 if update:
221 if update:
222 marks.write()
222 marks.write()
223 return update
223 return update
224
224
225 def listbookmarks(repo):
225 def listbookmarks(repo):
226 # We may try to list bookmarks on a repo type that does not
226 # We may try to list bookmarks on a repo type that does not
227 # support it (e.g., statichttprepository).
227 # support it (e.g., statichttprepository).
228 marks = getattr(repo, '_bookmarks', {})
228 marks = getattr(repo, '_bookmarks', {})
229
229
230 d = {}
230 d = {}
231 hasnode = repo.changelog.hasnode
231 hasnode = repo.changelog.hasnode
232 for k, v in marks.iteritems():
232 for k, v in marks.iteritems():
233 # don't expose local divergent bookmarks
233 # don't expose local divergent bookmarks
234 if hasnode(v) and ('@' not in k or k.endswith('@')):
234 if hasnode(v) and ('@' not in k or k.endswith('@')):
235 d[k] = hex(v)
235 d[k] = hex(v)
236 return d
236 return d
237
237
238 def pushbookmark(repo, key, old, new):
238 def pushbookmark(repo, key, old, new):
239 w = l = tr = None
239 w = l = tr = None
240 try:
240 try:
241 w = repo.wlock()
241 w = repo.wlock()
242 l = repo.lock()
242 l = repo.lock()
243 tr = repo.transaction('bookmarks')
243 tr = repo.transaction('bookmarks')
244 marks = repo._bookmarks
244 marks = repo._bookmarks
245 existing = hex(marks.get(key, ''))
245 existing = hex(marks.get(key, ''))
246 if existing != old and existing != new:
246 if existing != old and existing != new:
247 return False
247 return False
248 if new == '':
248 if new == '':
249 del marks[key]
249 del marks[key]
250 else:
250 else:
251 if new not in repo:
251 if new not in repo:
252 return False
252 return False
253 marks[key] = repo[new].node()
253 marks[key] = repo[new].node()
254 marks.recordchange(tr)
254 marks.recordchange(tr)
255 tr.close()
255 tr.close()
256 return True
256 return True
257 finally:
257 finally:
258 lockmod.release(tr, l, w)
258 lockmod.release(tr, l, w)
259
259
260 def compare(repo, srcmarks, dstmarks,
260 def compare(repo, srcmarks, dstmarks,
261 srchex=None, dsthex=None, targets=None):
261 srchex=None, dsthex=None, targets=None):
262 '''Compare bookmarks between srcmarks and dstmarks
262 '''Compare bookmarks between srcmarks and dstmarks
263
263
264 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
264 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
265 differ, invalid)", each are list of bookmarks below:
265 differ, invalid)", each are list of bookmarks below:
266
266
267 :addsrc: added on src side (removed on dst side, perhaps)
267 :addsrc: added on src side (removed on dst side, perhaps)
268 :adddst: added on dst side (removed on src side, perhaps)
268 :adddst: added on dst side (removed on src side, perhaps)
269 :advsrc: advanced on src side
269 :advsrc: advanced on src side
270 :advdst: advanced on dst side
270 :advdst: advanced on dst side
271 :diverge: diverge
271 :diverge: diverge
272 :differ: changed, but changeset referred on src is unknown on dst
272 :differ: changed, but changeset referred on src is unknown on dst
273 :invalid: unknown on both side
273 :invalid: unknown on both side
274 :same: same on both side
274 :same: same on both side
275
275
276 Each elements of lists in result tuple is tuple "(bookmark name,
276 Each elements of lists in result tuple is tuple "(bookmark name,
277 changeset ID on source side, changeset ID on destination
277 changeset ID on source side, changeset ID on destination
278 side)". Each changeset IDs are 40 hexadecimal digit string or
278 side)". Each changeset IDs are 40 hexadecimal digit string or
279 None.
279 None.
280
280
281 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
281 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
282 "invalid" list may be unknown for repo.
282 "invalid" list may be unknown for repo.
283
283
284 This function expects that "srcmarks" and "dstmarks" return
284 This function expects that "srcmarks" and "dstmarks" return
285 changeset ID in 40 hexadecimal digit string for specified
285 changeset ID in 40 hexadecimal digit string for specified
286 bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
286 bookmark. If not so (e.g. bmstore "repo._bookmarks" returning
287 binary value), "srchex" or "dsthex" should be specified to convert
287 binary value), "srchex" or "dsthex" should be specified to convert
288 into such form.
288 into such form.
289
289
290 If "targets" is specified, only bookmarks listed in it are
290 If "targets" is specified, only bookmarks listed in it are
291 examined.
291 examined.
292 '''
292 '''
293 if not srchex:
293 if not srchex:
294 srchex = lambda x: x
294 srchex = lambda x: x
295 if not dsthex:
295 if not dsthex:
296 dsthex = lambda x: x
296 dsthex = lambda x: x
297
297
298 if targets:
298 if targets:
299 bset = set(targets)
299 bset = set(targets)
300 else:
300 else:
301 srcmarkset = set(srcmarks)
301 srcmarkset = set(srcmarks)
302 dstmarkset = set(dstmarks)
302 dstmarkset = set(dstmarks)
303 bset = srcmarkset | dstmarkset
303 bset = srcmarkset | dstmarkset
304
304
305 results = ([], [], [], [], [], [], [], [])
305 results = ([], [], [], [], [], [], [], [])
306 addsrc = results[0].append
306 addsrc = results[0].append
307 adddst = results[1].append
307 adddst = results[1].append
308 advsrc = results[2].append
308 advsrc = results[2].append
309 advdst = results[3].append
309 advdst = results[3].append
310 diverge = results[4].append
310 diverge = results[4].append
311 differ = results[5].append
311 differ = results[5].append
312 invalid = results[6].append
312 invalid = results[6].append
313 same = results[7].append
313 same = results[7].append
314
314
315 for b in sorted(bset):
315 for b in sorted(bset):
316 if b not in srcmarks:
316 if b not in srcmarks:
317 if b in dstmarks:
317 if b in dstmarks:
318 adddst((b, None, dsthex(dstmarks[b])))
318 adddst((b, None, dsthex(dstmarks[b])))
319 else:
319 else:
320 invalid((b, None, None))
320 invalid((b, None, None))
321 elif b not in dstmarks:
321 elif b not in dstmarks:
322 addsrc((b, srchex(srcmarks[b]), None))
322 addsrc((b, srchex(srcmarks[b]), None))
323 else:
323 else:
324 scid = srchex(srcmarks[b])
324 scid = srchex(srcmarks[b])
325 dcid = dsthex(dstmarks[b])
325 dcid = dsthex(dstmarks[b])
326 if scid == dcid:
326 if scid == dcid:
327 same((b, scid, dcid))
327 same((b, scid, dcid))
328 elif scid in repo and dcid in repo:
328 elif scid in repo and dcid in repo:
329 sctx = repo[scid]
329 sctx = repo[scid]
330 dctx = repo[dcid]
330 dctx = repo[dcid]
331 if sctx.rev() < dctx.rev():
331 if sctx.rev() < dctx.rev():
332 if validdest(repo, sctx, dctx):
332 if validdest(repo, sctx, dctx):
333 advdst((b, scid, dcid))
333 advdst((b, scid, dcid))
334 else:
334 else:
335 diverge((b, scid, dcid))
335 diverge((b, scid, dcid))
336 else:
336 else:
337 if validdest(repo, dctx, sctx):
337 if validdest(repo, dctx, sctx):
338 advsrc((b, scid, dcid))
338 advsrc((b, scid, dcid))
339 else:
339 else:
340 diverge((b, scid, dcid))
340 diverge((b, scid, dcid))
341 else:
341 else:
342 # it is too expensive to examine in detail, in this case
342 # it is too expensive to examine in detail, in this case
343 differ((b, scid, dcid))
343 differ((b, scid, dcid))
344
344
345 return results
345 return results
346
346
347 def _diverge(ui, b, path, localmarks):
347 def _diverge(ui, b, path, localmarks):
348 if b == '@':
348 if b == '@':
349 b = ''
349 b = ''
350 # find a unique @ suffix
350 # find a unique @ suffix
351 for x in range(1, 100):
351 for x in range(1, 100):
352 n = '%s@%d' % (b, x)
352 n = '%s@%d' % (b, x)
353 if n not in localmarks:
353 if n not in localmarks:
354 break
354 break
355 # try to use an @pathalias suffix
355 # try to use an @pathalias suffix
356 # if an @pathalias already exists, we overwrite (update) it
356 # if an @pathalias already exists, we overwrite (update) it
357 if path.startswith("file:"):
357 if path.startswith("file:"):
358 path = util.url(path).path
358 path = util.url(path).path
359 for p, u in ui.configitems("paths"):
359 for p, u in ui.configitems("paths"):
360 if u.startswith("file:"):
360 if u.startswith("file:"):
361 u = util.url(u).path
361 u = util.url(u).path
362 if path == u:
362 if path == u:
363 n = '%s@%s' % (b, p)
363 n = '%s@%s' % (b, p)
364 return n
364 return n
365
365
366 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
366 def updatefromremote(ui, repo, remotemarks, path, trfunc, explicit=()):
367 ui.debug("checking for updated bookmarks\n")
367 ui.debug("checking for updated bookmarks\n")
368 localmarks = repo._bookmarks
368 localmarks = repo._bookmarks
369 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
369 (addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same
370 ) = compare(repo, remotemarks, localmarks, dsthex=hex)
370 ) = compare(repo, remotemarks, localmarks, dsthex=hex)
371
371
372 status = ui.status
372 status = ui.status
373 warn = ui.warn
373 warn = ui.warn
374 if ui.configbool('ui', 'quietbookmarkmove', False):
374 if ui.configbool('ui', 'quietbookmarkmove', False):
375 status = warn = ui.debug
375 status = warn = ui.debug
376
376
377 explicit = set(explicit)
377 explicit = set(explicit)
378 changed = []
378 changed = []
379 for b, scid, dcid in addsrc:
379 for b, scid, dcid in addsrc:
380 if scid in repo: # add remote bookmarks for changes we already have
380 if scid in repo: # add remote bookmarks for changes we already have
381 changed.append((b, bin(scid), status,
381 changed.append((b, bin(scid), status,
382 _("adding remote bookmark %s\n") % (b)))
382 _("adding remote bookmark %s\n") % (b)))
383 for b, scid, dcid in advsrc:
383 for b, scid, dcid in advsrc:
384 changed.append((b, bin(scid), status,
384 changed.append((b, bin(scid), status,
385 _("updating bookmark %s\n") % (b)))
385 _("updating bookmark %s\n") % (b)))
386 # remove normal movement from explicit set
386 # remove normal movement from explicit set
387 explicit.difference_update(d[0] for d in changed)
387 explicit.difference_update(d[0] for d in changed)
388
388
389 for b, scid, dcid in diverge:
389 for b, scid, dcid in diverge:
390 if b in explicit:
390 if b in explicit:
391 explicit.discard(b)
391 explicit.discard(b)
392 changed.append((b, bin(scid), status,
392 changed.append((b, bin(scid), status,
393 _("importing bookmark %s\n") % (b)))
393 _("importing bookmark %s\n") % (b)))
394 else:
394 else:
395 db = _diverge(ui, b, path, localmarks)
395 db = _diverge(ui, b, path, localmarks)
396 changed.append((db, bin(scid), warn,
396 changed.append((db, bin(scid), warn,
397 _("divergent bookmark %s stored as %s\n")
397 _("divergent bookmark %s stored as %s\n")
398 % (b, db)))
398 % (b, db)))
399 for b, scid, dcid in adddst + advdst:
399 for b, scid, dcid in adddst + advdst:
400 if b in explicit:
400 if b in explicit:
401 explicit.discard(b)
401 explicit.discard(b)
402 changed.append((b, bin(scid), status,
402 changed.append((b, bin(scid), status,
403 _("importing bookmark %s\n") % (b)))
403 _("importing bookmark %s\n") % (b)))
404
404
405 if changed:
405 if changed:
406 tr = trfunc()
406 tr = trfunc()
407 for b, node, writer, msg in sorted(changed):
407 for b, node, writer, msg in sorted(changed):
408 localmarks[b] = node
408 localmarks[b] = node
409 writer(msg)
409 writer(msg)
410 localmarks.recordchange(tr)
410 localmarks.recordchange(tr)
411
411
412 def diff(ui, dst, src):
412 def diff(ui, dst, src):
413 ui.status(_("searching for changed bookmarks\n"))
413 ui.status(_("searching for changed bookmarks\n"))
414
414
415 smarks = src.listkeys('bookmarks')
415 smarks = src.listkeys('bookmarks')
416 dmarks = dst.listkeys('bookmarks')
416 dmarks = dst.listkeys('bookmarks')
417
417
418 diff = sorted(set(smarks) - set(dmarks))
418 diff = sorted(set(smarks) - set(dmarks))
419 for k in diff:
419 for k in diff:
420 mark = ui.debugflag and smarks[k] or smarks[k][:12]
420 mark = ui.debugflag and smarks[k] or smarks[k][:12]
421 ui.write(" %-25s %s\n" % (k, mark))
421 ui.write(" %-25s %s\n" % (k, mark))
422
422
423 if len(diff) <= 0:
423 if len(diff) <= 0:
424 ui.status(_("no changed bookmarks found\n"))
424 ui.status(_("no changed bookmarks found\n"))
425 return 1
425 return 1
426 return 0
426 return 0
427
427
428 def validdest(repo, old, new):
428 def validdest(repo, old, new):
429 """Is the new bookmark destination a valid update from the old one"""
429 """Is the new bookmark destination a valid update from the old one"""
430 repo = repo.unfiltered()
430 repo = repo.unfiltered()
431 if old == new:
431 if old == new:
432 # Old == new -> nothing to update.
432 # Old == new -> nothing to update.
433 return False
433 return False
434 elif not old:
434 elif not old:
435 # old is nullrev, anything is valid.
435 # old is nullrev, anything is valid.
436 # (new != nullrev has been excluded by the previous check)
436 # (new != nullrev has been excluded by the previous check)
437 return True
437 return True
438 elif repo.obsstore:
438 elif repo.obsstore:
439 return new.node() in obsolete.foreground(repo, [old.node()])
439 return new.node() in obsolete.foreground(repo, [old.node()])
440 else:
440 else:
441 # still an independent clause as it is lazyer (and therefore faster)
441 # still an independent clause as it is lazyer (and therefore faster)
442 return old.descendant(new)
442 return old.descendant(new)
@@ -1,502 +1,505
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import os
16 import errno
16 import errno
17 import error, util
17 import error, util
18
18
19 version = 2
19 version = 2
20
20
21 def active(func):
21 def active(func):
22 def _active(self, *args, **kwds):
22 def _active(self, *args, **kwds):
23 if self.count == 0:
23 if self.count == 0:
24 raise error.Abort(_(
24 raise error.Abort(_(
25 'cannot use transaction when it is already committed/aborted'))
25 'cannot use transaction when it is already committed/aborted'))
26 return func(self, *args, **kwds)
26 return func(self, *args, **kwds)
27 return _active
27 return _active
28
28
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 unlink=True):
30 unlink=True):
31 for f, o, _ignore in entries:
31 for f, o, _ignore in entries:
32 if o or not unlink:
32 if o or not unlink:
33 try:
33 try:
34 fp = opener(f, 'a')
34 fp = opener(f, 'a')
35 fp.truncate(o)
35 fp.truncate(o)
36 fp.close()
36 fp.close()
37 except IOError:
37 except IOError:
38 report(_("failed to truncate %s\n") % f)
38 report(_("failed to truncate %s\n") % f)
39 raise
39 raise
40 else:
40 else:
41 try:
41 try:
42 opener.unlink(f)
42 opener.unlink(f)
43 except (IOError, OSError), inst:
43 except (IOError, OSError), inst:
44 if inst.errno != errno.ENOENT:
44 if inst.errno != errno.ENOENT:
45 raise
45 raise
46
46
47 backupfiles = []
47 backupfiles = []
48 for l, f, b, c in backupentries:
48 for l, f, b, c in backupentries:
49 if l not in vfsmap and c:
49 if l not in vfsmap and c:
50 report("couldn't handle %s: unknown cache location %s\n"
50 report("couldn't handle %s: unknown cache location %s\n"
51 % (b, l))
51 % (b, l))
52 vfs = vfsmap[l]
52 vfs = vfsmap[l]
53 try:
53 try:
54 if f and b:
54 if f and b:
55 filepath = vfs.join(f)
55 filepath = vfs.join(f)
56 backuppath = vfs.join(b)
56 backuppath = vfs.join(b)
57 try:
57 try:
58 util.copyfile(backuppath, filepath)
58 util.copyfile(backuppath, filepath)
59 backupfiles.append(b)
59 backupfiles.append(b)
60 except IOError:
60 except IOError:
61 report(_("failed to recover %s\n") % f)
61 report(_("failed to recover %s\n") % f)
62 else:
62 else:
63 target = f or b
63 target = f or b
64 try:
64 try:
65 vfs.unlink(target)
65 vfs.unlink(target)
66 except (IOError, OSError), inst:
66 except (IOError, OSError), inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 except (IOError, OSError, util.Abort), inst:
69 except (IOError, OSError, util.Abort), inst:
70 if not c:
70 if not c:
71 raise
71 raise
72
72
73 opener.unlink(journal)
73 opener.unlink(journal)
74 backuppath = "%s.backupfiles" % journal
74 backuppath = "%s.backupfiles" % journal
75 if opener.exists(backuppath):
75 if opener.exists(backuppath):
76 opener.unlink(backuppath)
76 opener.unlink(backuppath)
77 try:
77 try:
78 for f in backupfiles:
78 for f in backupfiles:
79 if opener.exists(f):
79 if opener.exists(f):
80 opener.unlink(f)
80 opener.unlink(f)
81 except (IOError, OSError, util.Abort), inst:
81 except (IOError, OSError, util.Abort), inst:
82 # only pure backup file remains, it is sage to ignore any error
82 # only pure backup file remains, it is sage to ignore any error
83 pass
83 pass
84
84
85 class transaction(object):
85 class transaction(object):
86 def __init__(self, report, opener, vfsmap, journal, after=None,
86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 createmode=None, onclose=None, onabort=None):
87 createmode=None, onclose=None, onabort=None):
88 """Begin a new transaction
88 """Begin a new transaction
89
89
90 Begins a new transaction that allows rolling back writes in the event of
90 Begins a new transaction that allows rolling back writes in the event of
91 an exception.
91 an exception.
92
92
93 * `after`: called after the transaction has been committed
93 * `after`: called after the transaction has been committed
94 * `createmode`: the mode of the journal file that will be created
94 * `createmode`: the mode of the journal file that will be created
95 * `onclose`: called as the transaction is closing, but before it is
95 * `onclose`: called as the transaction is closing, but before it is
96 closed
96 closed
97 * `onabort`: called as the transaction is aborting, but before any files
97 * `onabort`: called as the transaction is aborting, but before any files
98 have been truncated
98 have been truncated
99 """
99 """
100 self.count = 1
100 self.count = 1
101 self.usages = 1
101 self.usages = 1
102 self.report = report
102 self.report = report
103 # a vfs to the store content
103 # a vfs to the store content
104 self.opener = opener
104 self.opener = opener
105 # a map to access file in various {location -> vfs}
105 # a map to access file in various {location -> vfs}
106 vfsmap = vfsmap.copy()
106 vfsmap = vfsmap.copy()
107 vfsmap[''] = opener # set default value
107 vfsmap[''] = opener # set default value
108 self._vfsmap = vfsmap
108 self._vfsmap = vfsmap
109 self.after = after
109 self.after = after
110 self.onclose = onclose
110 self.onclose = onclose
111 self.onabort = onabort
111 self.onabort = onabort
112 self.entries = []
112 self.entries = []
113 self.map = {}
113 self.map = {}
114 self.journal = journal
114 self.journal = journal
115 self._queue = []
115 self._queue = []
116 # a dict of arguments to be passed to hooks
116 # a dict of arguments to be passed to hooks
117 self.hookargs = {}
117 self.hookargs = {}
118 self.file = opener.open(self.journal, "w")
118 self.file = opener.open(self.journal, "w")
119
119
120 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'backuppath' is empty, no file existed at backup time
122 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'path' is empty, this is a temporary transaction file
123 # - if 'location' is not empty, the path is outside main opener reach.
123 # - if 'location' is not empty, the path is outside main opener reach.
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 # (cache is currently unused)
125 # (cache is currently unused)
126 self._backupentries = []
126 self._backupentries = []
127 self._backupmap = {}
127 self._backupmap = {}
128 self._backupjournal = "%s.backupfiles" % journal
128 self._backupjournal = "%s.backupfiles" % journal
129 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 self._backupsfile.write('%d\n' % version)
130 self._backupsfile.write('%d\n' % version)
131
131
132 if createmode is not None:
132 if createmode is not None:
133 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self.journal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
135
135
136 # hold file generations to be performed on commit
136 # hold file generations to be performed on commit
137 self._filegenerators = {}
137 self._filegenerators = {}
138 # hold callbalk to write pending data for hooks
138 # hold callbalk to write pending data for hooks
139 self._pendingcallback = {}
139 self._pendingcallback = {}
140 # True is any pending data have been written ever
140 # True is any pending data have been written ever
141 self._anypending = False
141 self._anypending = False
142 # holds callback to call when writing the transaction
142 # holds callback to call when writing the transaction
143 self._finalizecallback = {}
143 self._finalizecallback = {}
144 # hold callbalk for post transaction close
144 # hold callbalk for post transaction close
145 self._postclosecallback = {}
145 self._postclosecallback = {}
146
146
147 def __del__(self):
147 def __del__(self):
148 if self.journal:
148 if self.journal:
149 self._abort()
149 self._abort()
150
150
151 @active
151 @active
152 def startgroup(self):
152 def startgroup(self):
153 """delay registration of file entry
153 """delay registration of file entry
154
154
155 This is used by strip to delay vision of strip offset. The transaction
155 This is used by strip to delay vision of strip offset. The transaction
156 sees either none or all of the strip actions to be done."""
156 sees either none or all of the strip actions to be done."""
157 self._queue.append([])
157 self._queue.append([])
158
158
159 @active
159 @active
160 def endgroup(self):
160 def endgroup(self):
161 """apply delayed registration of file entry.
161 """apply delayed registration of file entry.
162
162
163 This is used by strip to delay vision of strip offset. The transaction
163 This is used by strip to delay vision of strip offset. The transaction
164 sees either none or all of the strip actions to be done."""
164 sees either none or all of the strip actions to be done."""
165 q = self._queue.pop()
165 q = self._queue.pop()
166 for f, o, data in q:
166 for f, o, data in q:
167 self._addentry(f, o, data)
167 self._addentry(f, o, data)
168
168
169 @active
169 @active
170 def add(self, file, offset, data=None):
170 def add(self, file, offset, data=None):
171 """record the state of an append-only file before update"""
171 """record the state of an append-only file before update"""
172 if file in self.map or file in self._backupmap:
172 if file in self.map or file in self._backupmap:
173 return
173 return
174 if self._queue:
174 if self._queue:
175 self._queue[-1].append((file, offset, data))
175 self._queue[-1].append((file, offset, data))
176 return
176 return
177
177
178 self._addentry(file, offset, data)
178 self._addentry(file, offset, data)
179
179
180 def _addentry(self, file, offset, data):
180 def _addentry(self, file, offset, data):
181 """add a append-only entry to memory and on-disk state"""
181 """add a append-only entry to memory and on-disk state"""
182 if file in self.map or file in self._backupmap:
182 if file in self.map or file in self._backupmap:
183 return
183 return
184 self.entries.append((file, offset, data))
184 self.entries.append((file, offset, data))
185 self.map[file] = len(self.entries) - 1
185 self.map[file] = len(self.entries) - 1
186 # add enough data to the journal to do the truncate
186 # add enough data to the journal to do the truncate
187 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.write("%s\0%d\n" % (file, offset))
188 self.file.flush()
188 self.file.flush()
189
189
190 @active
190 @active
191 def addbackup(self, file, hardlink=True, location=''):
191 def addbackup(self, file, hardlink=True, location=''):
192 """Adds a backup of the file to the transaction
192 """Adds a backup of the file to the transaction
193
193
194 Calling addbackup() creates a hardlink backup of the specified file
194 Calling addbackup() creates a hardlink backup of the specified file
195 that is used to recover the file in the event of the transaction
195 that is used to recover the file in the event of the transaction
196 aborting.
196 aborting.
197
197
198 * `file`: the file path, relative to .hg/store
198 * `file`: the file path, relative to .hg/store
199 * `hardlink`: use a hardlink to quickly create the backup
199 * `hardlink`: use a hardlink to quickly create the backup
200 """
200 """
201 if self._queue:
201 if self._queue:
202 msg = 'cannot use transaction.addbackup inside "group"'
202 msg = 'cannot use transaction.addbackup inside "group"'
203 raise RuntimeError(msg)
203 raise RuntimeError(msg)
204
204
205 if file in self.map or file in self._backupmap:
205 if file in self.map or file in self._backupmap:
206 return
206 return
207 dirname, filename = os.path.split(file)
207 dirname, filename = os.path.split(file)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 backupfile = os.path.join(dirname, backupfilename)
209 backupfile = os.path.join(dirname, backupfilename)
210 vfs = self._vfsmap[location]
210 vfs = self._vfsmap[location]
211 if vfs.exists(file):
211 if vfs.exists(file):
212 filepath = vfs.join(file)
212 filepath = vfs.join(file)
213 backuppath = vfs.join(backupfile)
213 backuppath = vfs.join(backupfile)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 else:
215 else:
216 backupfile = ''
216 backupfile = ''
217
217
218 self._addbackupentry((location, file, backupfile, False))
218 self._addbackupentry((location, file, backupfile, False))
219
219
220 def _addbackupentry(self, entry):
220 def _addbackupentry(self, entry):
221 """register a new backup entry and write it to disk"""
221 """register a new backup entry and write it to disk"""
222 self._backupentries.append(entry)
222 self._backupentries.append(entry)
223 self._backupmap[file] = len(self._backupentries) - 1
223 self._backupmap[file] = len(self._backupentries) - 1
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 self._backupsfile.flush()
225 self._backupsfile.flush()
226
226
227 @active
227 @active
228 def registertmp(self, tmpfile):
228 def registertmp(self, tmpfile):
229 """register a temporary transaction file
229 """register a temporary transaction file
230
230
231 Such file will be delete when the transaction exit (on both failure and
231 Such file will be delete when the transaction exit (on both failure and
232 success).
232 success).
233 """
233 """
234 self._addbackupentry(('', '', tmpfile, False))
234 self._addbackupentry(('', '', tmpfile, False))
235
235
236 @active
236 @active
237 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 location=''):
238 """add a function to generates some files at transaction commit
239 """add a function to generates some files at transaction commit
239
240
240 The `genfunc` argument is a function capable of generating proper
241 The `genfunc` argument is a function capable of generating proper
241 content of each entry in the `filename` tuple.
242 content of each entry in the `filename` tuple.
242
243
243 At transaction close time, `genfunc` will be called with one file
244 At transaction close time, `genfunc` will be called with one file
244 object argument per entries in `filenames`.
245 object argument per entries in `filenames`.
245
246
246 The transaction itself is responsible for the backup, creation and
247 The transaction itself is responsible for the backup, creation and
247 final write of such file.
248 final write of such file.
248
249
249 The `genid` argument is used to ensure the same set of file is only
250 The `genid` argument is used to ensure the same set of file is only
250 generated once. Call to `addfilegenerator` for a `genid` already
251 generated once. Call to `addfilegenerator` for a `genid` already
251 present will overwrite the old entry.
252 present will overwrite the old entry.
252
253
253 The `order` argument may be used to control the order in which multiple
254 The `order` argument may be used to control the order in which multiple
254 generator will be executed.
255 generator will be executed.
256
257 The `location` arguments may be used to indicate the files are located
258 outside of the the standard directory for transaction. It should match
259 one of the key of the `transaction.vfsmap` dictionnary.
255 """
260 """
256 # For now, we are unable to do proper backup and restore of custom vfs
261 # For now, we are unable to do proper backup and restore of custom vfs
257 # but for bookmarks that are handled outside this mechanism.
262 # but for bookmarks that are handled outside this mechanism.
258 assert vfs is None or filenames == ('bookmarks',)
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
259 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
260
264
261 def _generatefiles(self):
265 def _generatefiles(self):
262 # write files registered for generation
266 # write files registered for generation
263 for entry in sorted(self._filegenerators.values()):
267 for entry in sorted(self._filegenerators.values()):
264 order, filenames, genfunc, vfs = entry
268 order, filenames, genfunc, location = entry
265 if vfs is None:
269 vfs = self._vfsmap[location]
266 vfs = self.opener
267 files = []
270 files = []
268 try:
271 try:
269 for name in filenames:
272 for name in filenames:
270 # Some files are already backed up when creating the
273 # Some files are already backed up when creating the
271 # localrepo. Until this is properly fixed we disable the
274 # localrepo. Until this is properly fixed we disable the
272 # backup for them.
275 # backup for them.
273 if name not in ('phaseroots', 'bookmarks'):
276 if name not in ('phaseroots', 'bookmarks'):
274 self.addbackup(name)
277 self.addbackup(name, location=location)
275 files.append(vfs(name, 'w', atomictemp=True))
278 files.append(vfs(name, 'w', atomictemp=True))
276 genfunc(*files)
279 genfunc(*files)
277 finally:
280 finally:
278 for f in files:
281 for f in files:
279 f.close()
282 f.close()
280
283
281 @active
284 @active
282 def find(self, file):
285 def find(self, file):
283 if file in self.map:
286 if file in self.map:
284 return self.entries[self.map[file]]
287 return self.entries[self.map[file]]
285 if file in self._backupmap:
288 if file in self._backupmap:
286 return self._backupentries[self._backupmap[file]]
289 return self._backupentries[self._backupmap[file]]
287 return None
290 return None
288
291
289 @active
292 @active
290 def replace(self, file, offset, data=None):
293 def replace(self, file, offset, data=None):
291 '''
294 '''
292 replace can only replace already committed entries
295 replace can only replace already committed entries
293 that are not pending in the queue
296 that are not pending in the queue
294 '''
297 '''
295
298
296 if file not in self.map:
299 if file not in self.map:
297 raise KeyError(file)
300 raise KeyError(file)
298 index = self.map[file]
301 index = self.map[file]
299 self.entries[index] = (file, offset, data)
302 self.entries[index] = (file, offset, data)
300 self.file.write("%s\0%d\n" % (file, offset))
303 self.file.write("%s\0%d\n" % (file, offset))
301 self.file.flush()
304 self.file.flush()
302
305
303 @active
306 @active
304 def nest(self):
307 def nest(self):
305 self.count += 1
308 self.count += 1
306 self.usages += 1
309 self.usages += 1
307 return self
310 return self
308
311
309 def release(self):
312 def release(self):
310 if self.count > 0:
313 if self.count > 0:
311 self.usages -= 1
314 self.usages -= 1
312 # if the transaction scopes are left without being closed, fail
315 # if the transaction scopes are left without being closed, fail
313 if self.count > 0 and self.usages == 0:
316 if self.count > 0 and self.usages == 0:
314 self._abort()
317 self._abort()
315
318
316 def running(self):
319 def running(self):
317 return self.count > 0
320 return self.count > 0
318
321
319 def addpending(self, category, callback):
322 def addpending(self, category, callback):
320 """add a callback to be called when the transaction is pending
323 """add a callback to be called when the transaction is pending
321
324
322 The transaction will be given as callback's first argument.
325 The transaction will be given as callback's first argument.
323
326
324 Category is a unique identifier to allow overwriting an old callback
327 Category is a unique identifier to allow overwriting an old callback
325 with a newer callback.
328 with a newer callback.
326 """
329 """
327 self._pendingcallback[category] = callback
330 self._pendingcallback[category] = callback
328
331
329 @active
332 @active
330 def writepending(self):
333 def writepending(self):
331 '''write pending file to temporary version
334 '''write pending file to temporary version
332
335
333 This is used to allow hooks to view a transaction before commit'''
336 This is used to allow hooks to view a transaction before commit'''
334 categories = sorted(self._pendingcallback)
337 categories = sorted(self._pendingcallback)
335 for cat in categories:
338 for cat in categories:
336 # remove callback since the data will have been flushed
339 # remove callback since the data will have been flushed
337 any = self._pendingcallback.pop(cat)(self)
340 any = self._pendingcallback.pop(cat)(self)
338 self._anypending = self._anypending or any
341 self._anypending = self._anypending or any
339 return self._anypending
342 return self._anypending
340
343
341 @active
344 @active
342 def addfinalize(self, category, callback):
345 def addfinalize(self, category, callback):
343 """add a callback to be called when the transaction is closed
346 """add a callback to be called when the transaction is closed
344
347
345 The transaction will be given as callback's first argument.
348 The transaction will be given as callback's first argument.
346
349
347 Category is a unique identifier to allow overwriting old callbacks with
350 Category is a unique identifier to allow overwriting old callbacks with
348 newer callbacks.
351 newer callbacks.
349 """
352 """
350 self._finalizecallback[category] = callback
353 self._finalizecallback[category] = callback
351
354
352 @active
355 @active
353 def addpostclose(self, category, callback):
356 def addpostclose(self, category, callback):
354 """add a callback to be called after the transaction is closed
357 """add a callback to be called after the transaction is closed
355
358
356 The transaction will be given as callback's first argument.
359 The transaction will be given as callback's first argument.
357
360
358 Category is a unique identifier to allow overwriting an old callback
361 Category is a unique identifier to allow overwriting an old callback
359 with a newer callback.
362 with a newer callback.
360 """
363 """
361 self._postclosecallback[category] = callback
364 self._postclosecallback[category] = callback
362
365
363 @active
366 @active
364 def close(self):
367 def close(self):
365 '''commit the transaction'''
368 '''commit the transaction'''
366 if self.count == 1:
369 if self.count == 1:
367 self._generatefiles()
370 self._generatefiles()
368 categories = sorted(self._finalizecallback)
371 categories = sorted(self._finalizecallback)
369 for cat in categories:
372 for cat in categories:
370 self._finalizecallback[cat](self)
373 self._finalizecallback[cat](self)
371 if self.onclose is not None:
374 if self.onclose is not None:
372 self.onclose()
375 self.onclose()
373
376
374 self.count -= 1
377 self.count -= 1
375 if self.count != 0:
378 if self.count != 0:
376 return
379 return
377 self.file.close()
380 self.file.close()
378 self._backupsfile.close()
381 self._backupsfile.close()
379 # cleanup temporary files
382 # cleanup temporary files
380 for l, f, b, c in self._backupentries:
383 for l, f, b, c in self._backupentries:
381 if l not in self._vfsmap and c:
384 if l not in self._vfsmap and c:
382 self.report("couldn't remote %s: unknown cache location %s\n"
385 self.report("couldn't remote %s: unknown cache location %s\n"
383 % (b, l))
386 % (b, l))
384 continue
387 continue
385 vfs = self._vfsmap[l]
388 vfs = self._vfsmap[l]
386 if not f and b and vfs.exists(b):
389 if not f and b and vfs.exists(b):
387 try:
390 try:
388 vfs.unlink(b)
391 vfs.unlink(b)
389 except (IOError, OSError, util.Abort), inst:
392 except (IOError, OSError, util.Abort), inst:
390 if not c:
393 if not c:
391 raise
394 raise
392 # Abort may be raise by read only opener
395 # Abort may be raise by read only opener
393 self.report("couldn't remote %s: %s\n"
396 self.report("couldn't remote %s: %s\n"
394 % (vfs.join(b), inst))
397 % (vfs.join(b), inst))
395 self.entries = []
398 self.entries = []
396 if self.after:
399 if self.after:
397 self.after()
400 self.after()
398 if self.opener.isfile(self.journal):
401 if self.opener.isfile(self.journal):
399 self.opener.unlink(self.journal)
402 self.opener.unlink(self.journal)
400 if self.opener.isfile(self._backupjournal):
403 if self.opener.isfile(self._backupjournal):
401 self.opener.unlink(self._backupjournal)
404 self.opener.unlink(self._backupjournal)
402 for _l, _f, b, c in self._backupentries:
405 for _l, _f, b, c in self._backupentries:
403 if l not in self._vfsmap and c:
406 if l not in self._vfsmap and c:
404 self.report("couldn't remote %s: unknown cache location"
407 self.report("couldn't remote %s: unknown cache location"
405 "%s\n" % (b, l))
408 "%s\n" % (b, l))
406 continue
409 continue
407 vfs = self._vfsmap[l]
410 vfs = self._vfsmap[l]
408 if b and vfs.exists(b):
411 if b and vfs.exists(b):
409 try:
412 try:
410 vfs.unlink(b)
413 vfs.unlink(b)
411 except (IOError, OSError, util.Abort), inst:
414 except (IOError, OSError, util.Abort), inst:
412 if not c:
415 if not c:
413 raise
416 raise
414 # Abort may be raise by read only opener
417 # Abort may be raise by read only opener
415 self.report("couldn't remote %s: %s\n"
418 self.report("couldn't remote %s: %s\n"
416 % (vfs.join(b), inst))
419 % (vfs.join(b), inst))
417 self._backupentries = []
420 self._backupentries = []
418 self.journal = None
421 self.journal = None
419 # run post close action
422 # run post close action
420 categories = sorted(self._postclosecallback)
423 categories = sorted(self._postclosecallback)
421 for cat in categories:
424 for cat in categories:
422 self._postclosecallback[cat](self)
425 self._postclosecallback[cat](self)
423
426
424 @active
427 @active
425 def abort(self):
428 def abort(self):
426 '''abort the transaction (generally called on error, or when the
429 '''abort the transaction (generally called on error, or when the
427 transaction is not explicitly committed before going out of
430 transaction is not explicitly committed before going out of
428 scope)'''
431 scope)'''
429 self._abort()
432 self._abort()
430
433
431 def _abort(self):
434 def _abort(self):
432 self.count = 0
435 self.count = 0
433 self.usages = 0
436 self.usages = 0
434 self.file.close()
437 self.file.close()
435 self._backupsfile.close()
438 self._backupsfile.close()
436
439
437 if self.onabort is not None:
440 if self.onabort is not None:
438 self.onabort()
441 self.onabort()
439
442
440 try:
443 try:
441 if not self.entries and not self._backupentries:
444 if not self.entries and not self._backupentries:
442 if self.journal:
445 if self.journal:
443 self.opener.unlink(self.journal)
446 self.opener.unlink(self.journal)
444 if self._backupjournal:
447 if self._backupjournal:
445 self.opener.unlink(self._backupjournal)
448 self.opener.unlink(self._backupjournal)
446 return
449 return
447
450
448 self.report(_("transaction abort!\n"))
451 self.report(_("transaction abort!\n"))
449
452
450 try:
453 try:
451 _playback(self.journal, self.report, self.opener, self._vfsmap,
454 _playback(self.journal, self.report, self.opener, self._vfsmap,
452 self.entries, self._backupentries, False)
455 self.entries, self._backupentries, False)
453 self.report(_("rollback completed\n"))
456 self.report(_("rollback completed\n"))
454 except Exception:
457 except Exception:
455 self.report(_("rollback failed - please run hg recover\n"))
458 self.report(_("rollback failed - please run hg recover\n"))
456 finally:
459 finally:
457 self.journal = None
460 self.journal = None
458
461
459
462
460 def rollback(opener, vfsmap, file, report):
463 def rollback(opener, vfsmap, file, report):
461 """Rolls back the transaction contained in the given file
464 """Rolls back the transaction contained in the given file
462
465
463 Reads the entries in the specified file, and the corresponding
466 Reads the entries in the specified file, and the corresponding
464 '*.backupfiles' file, to recover from an incomplete transaction.
467 '*.backupfiles' file, to recover from an incomplete transaction.
465
468
466 * `file`: a file containing a list of entries, specifying where
469 * `file`: a file containing a list of entries, specifying where
467 to truncate each file. The file should contain a list of
470 to truncate each file. The file should contain a list of
468 file\0offset pairs, delimited by newlines. The corresponding
471 file\0offset pairs, delimited by newlines. The corresponding
469 '*.backupfiles' file should contain a list of file\0backupfile
472 '*.backupfiles' file should contain a list of file\0backupfile
470 pairs, delimited by \0.
473 pairs, delimited by \0.
471 """
474 """
472 entries = []
475 entries = []
473 backupentries = []
476 backupentries = []
474
477
475 fp = opener.open(file)
478 fp = opener.open(file)
476 lines = fp.readlines()
479 lines = fp.readlines()
477 fp.close()
480 fp.close()
478 for l in lines:
481 for l in lines:
479 try:
482 try:
480 f, o = l.split('\0')
483 f, o = l.split('\0')
481 entries.append((f, int(o), None))
484 entries.append((f, int(o), None))
482 except ValueError:
485 except ValueError:
483 report(_("couldn't read journal entry %r!\n") % l)
486 report(_("couldn't read journal entry %r!\n") % l)
484
487
485 backupjournal = "%s.backupfiles" % file
488 backupjournal = "%s.backupfiles" % file
486 if opener.exists(backupjournal):
489 if opener.exists(backupjournal):
487 fp = opener.open(backupjournal)
490 fp = opener.open(backupjournal)
488 lines = fp.readlines()
491 lines = fp.readlines()
489 if lines:
492 if lines:
490 ver = lines[0][:-1]
493 ver = lines[0][:-1]
491 if ver == str(version):
494 if ver == str(version):
492 for line in lines[1:]:
495 for line in lines[1:]:
493 if line:
496 if line:
494 # Shave off the trailing newline
497 # Shave off the trailing newline
495 line = line[:-1]
498 line = line[:-1]
496 l, f, b, c = line.split('\0')
499 l, f, b, c = line.split('\0')
497 backupentries.append((l, f, b, bool(c)))
500 backupentries.append((l, f, b, bool(c)))
498 else:
501 else:
499 report(_("journal was created by a different version of "
502 report(_("journal was created by a different version of "
500 "Mercurial"))
503 "Mercurial"))
501
504
502 _playback(file, report, opener, vfsmap, entries, backupentries)
505 _playback(file, report, opener, vfsmap, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now