##// END OF EJS Templates
transaction: change list of journal entries into a dictionary...
Joerg Sonnenberger -
r46483:a985c4fb default
parent child Browse files
Show More
@@ -1,541 +1,544 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 discovery,
21 discovery,
22 error,
22 error,
23 exchange,
23 exchange,
24 obsolete,
24 obsolete,
25 obsutil,
25 obsutil,
26 pathutil,
26 pathutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33 from .utils import (
33 from .utils import (
34 hashutil,
34 hashutil,
35 stringutil,
35 stringutil,
36 )
36 )
37
37
38
38
39 def backupbundle(
39 def backupbundle(
40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
41 ):
41 ):
42 """create a bundle with the specified revisions as a backup"""
42 """create a bundle with the specified revisions as a backup"""
43
43
44 backupdir = b"strip-backup"
44 backupdir = b"strip-backup"
45 vfs = repo.vfs
45 vfs = repo.vfs
46 if not vfs.isdir(backupdir):
46 if not vfs.isdir(backupdir):
47 vfs.mkdir(backupdir)
47 vfs.mkdir(backupdir)
48
48
49 # Include a hash of all the nodes in the filename for uniqueness
49 # Include a hash of all the nodes in the filename for uniqueness
50 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 allcommits = repo.set(b'%ln::%ln', bases, heads)
51 allhashes = sorted(c.hex() for c in allcommits)
51 allhashes = sorted(c.hex() for c in allcommits)
52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
53 name = b"%s/%s-%s-%s.hg" % (
53 name = b"%s/%s-%s-%s.hg" % (
54 backupdir,
54 backupdir,
55 short(node),
55 short(node),
56 hex(totalhash[:4]),
56 hex(totalhash[:4]),
57 suffix,
57 suffix,
58 )
58 )
59
59
60 cgversion = changegroup.localversion(repo)
60 cgversion = changegroup.localversion(repo)
61 comp = None
61 comp = None
62 if cgversion != b'01':
62 if cgversion != b'01':
63 bundletype = b"HG20"
63 bundletype = b"HG20"
64 if compress:
64 if compress:
65 comp = b'BZ'
65 comp = b'BZ'
66 elif compress:
66 elif compress:
67 bundletype = b"HG10BZ"
67 bundletype = b"HG10BZ"
68 else:
68 else:
69 bundletype = b"HG10UN"
69 bundletype = b"HG10UN"
70
70
71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
72 contentopts = {
72 contentopts = {
73 b'cg.version': cgversion,
73 b'cg.version': cgversion,
74 b'obsolescence': obsolescence,
74 b'obsolescence': obsolescence,
75 b'phases': True,
75 b'phases': True,
76 }
76 }
77 return bundle2.writenewbundle(
77 return bundle2.writenewbundle(
78 repo.ui,
78 repo.ui,
79 repo,
79 repo,
80 b'strip',
80 b'strip',
81 name,
81 name,
82 bundletype,
82 bundletype,
83 outgoing,
83 outgoing,
84 contentopts,
84 contentopts,
85 vfs,
85 vfs,
86 compression=comp,
86 compression=comp,
87 )
87 )
88
88
89
89
90 def _collectfiles(repo, striprev):
90 def _collectfiles(repo, striprev):
91 """find out the filelogs affected by the strip"""
91 """find out the filelogs affected by the strip"""
92 files = set()
92 files = set()
93
93
94 for x in pycompat.xrange(striprev, len(repo)):
94 for x in pycompat.xrange(striprev, len(repo)):
95 files.update(repo[x].files())
95 files.update(repo[x].files())
96
96
97 return sorted(files)
97 return sorted(files)
98
98
99
99
100 def _collectrevlog(revlog, striprev):
100 def _collectrevlog(revlog, striprev):
101 _, brokenset = revlog.getstrippoint(striprev)
101 _, brokenset = revlog.getstrippoint(striprev)
102 return [revlog.linkrev(r) for r in brokenset]
102 return [revlog.linkrev(r) for r in brokenset]
103
103
104
104
105 def _collectbrokencsets(repo, files, striprev):
105 def _collectbrokencsets(repo, files, striprev):
106 """return the changesets which will be broken by the truncation"""
106 """return the changesets which will be broken by the truncation"""
107 s = set()
107 s = set()
108
108
109 for revlog in manifestrevlogs(repo):
109 for revlog in manifestrevlogs(repo):
110 s.update(_collectrevlog(revlog, striprev))
110 s.update(_collectrevlog(revlog, striprev))
111 for fname in files:
111 for fname in files:
112 s.update(_collectrevlog(repo.file(fname), striprev))
112 s.update(_collectrevlog(repo.file(fname), striprev))
113
113
114 return s
114 return s
115
115
116
116
117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
118 # This function requires the caller to lock the repo, but it operates
118 # This function requires the caller to lock the repo, but it operates
119 # within a transaction of its own, and thus requires there to be no current
119 # within a transaction of its own, and thus requires there to be no current
120 # transaction when it is called.
120 # transaction when it is called.
121 if repo.currenttransaction() is not None:
121 if repo.currenttransaction() is not None:
122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
123
123
124 # Simple way to maintain backwards compatibility for this
124 # Simple way to maintain backwards compatibility for this
125 # argument.
125 # argument.
126 if backup in [b'none', b'strip']:
126 if backup in [b'none', b'strip']:
127 backup = False
127 backup = False
128
128
129 repo = repo.unfiltered()
129 repo = repo.unfiltered()
130 repo.destroying()
130 repo.destroying()
131 vfs = repo.vfs
131 vfs = repo.vfs
132 # load bookmark before changelog to avoid side effect from outdated
132 # load bookmark before changelog to avoid side effect from outdated
133 # changelog (see repo._refreshchangelog)
133 # changelog (see repo._refreshchangelog)
134 repo._bookmarks
134 repo._bookmarks
135 cl = repo.changelog
135 cl = repo.changelog
136
136
137 # TODO handle undo of merge sets
137 # TODO handle undo of merge sets
138 if isinstance(nodelist, bytes):
138 if isinstance(nodelist, bytes):
139 nodelist = [nodelist]
139 nodelist = [nodelist]
140 striplist = [cl.rev(node) for node in nodelist]
140 striplist = [cl.rev(node) for node in nodelist]
141 striprev = min(striplist)
141 striprev = min(striplist)
142
142
143 files = _collectfiles(repo, striprev)
143 files = _collectfiles(repo, striprev)
144 saverevs = _collectbrokencsets(repo, files, striprev)
144 saverevs = _collectbrokencsets(repo, files, striprev)
145
145
146 # Some revisions with rev > striprev may not be descendants of striprev.
146 # Some revisions with rev > striprev may not be descendants of striprev.
147 # We have to find these revisions and put them in a bundle, so that
147 # We have to find these revisions and put them in a bundle, so that
148 # we can restore them after the truncations.
148 # we can restore them after the truncations.
149 # To create the bundle we use repo.changegroupsubset which requires
149 # To create the bundle we use repo.changegroupsubset which requires
150 # the list of heads and bases of the set of interesting revisions.
150 # the list of heads and bases of the set of interesting revisions.
151 # (head = revision in the set that has no descendant in the set;
151 # (head = revision in the set that has no descendant in the set;
152 # base = revision in the set that has no ancestor in the set)
152 # base = revision in the set that has no ancestor in the set)
153 tostrip = set(striplist)
153 tostrip = set(striplist)
154 saveheads = set(saverevs)
154 saveheads = set(saverevs)
155 for r in cl.revs(start=striprev + 1):
155 for r in cl.revs(start=striprev + 1):
156 if any(p in tostrip for p in cl.parentrevs(r)):
156 if any(p in tostrip for p in cl.parentrevs(r)):
157 tostrip.add(r)
157 tostrip.add(r)
158
158
159 if r not in tostrip:
159 if r not in tostrip:
160 saverevs.add(r)
160 saverevs.add(r)
161 saveheads.difference_update(cl.parentrevs(r))
161 saveheads.difference_update(cl.parentrevs(r))
162 saveheads.add(r)
162 saveheads.add(r)
163 saveheads = [cl.node(r) for r in saveheads]
163 saveheads = [cl.node(r) for r in saveheads]
164
164
165 # compute base nodes
165 # compute base nodes
166 if saverevs:
166 if saverevs:
167 descendants = set(cl.descendants(saverevs))
167 descendants = set(cl.descendants(saverevs))
168 saverevs.difference_update(descendants)
168 saverevs.difference_update(descendants)
169 savebases = [cl.node(r) for r in saverevs]
169 savebases = [cl.node(r) for r in saverevs]
170 stripbases = [cl.node(r) for r in tostrip]
170 stripbases = [cl.node(r) for r in tostrip]
171
171
172 stripobsidx = obsmarkers = ()
172 stripobsidx = obsmarkers = ()
173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
175 if obsmarkers:
175 if obsmarkers:
176 stripobsidx = [
176 stripobsidx = [
177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
178 ]
178 ]
179
179
180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
181
181
182 backupfile = None
182 backupfile = None
183 node = nodelist[-1]
183 node = nodelist[-1]
184 if backup:
184 if backup:
185 backupfile = _createstripbackup(repo, stripbases, node, topic)
185 backupfile = _createstripbackup(repo, stripbases, node, topic)
186 # create a changegroup for all the branches we need to keep
186 # create a changegroup for all the branches we need to keep
187 tmpbundlefile = None
187 tmpbundlefile = None
188 if saveheads:
188 if saveheads:
189 # do not compress temporary bundle if we remove it from disk later
189 # do not compress temporary bundle if we remove it from disk later
190 #
190 #
191 # We do not include obsolescence, it might re-introduce prune markers
191 # We do not include obsolescence, it might re-introduce prune markers
192 # we are trying to strip. This is harmless since the stripped markers
192 # we are trying to strip. This is harmless since the stripped markers
193 # are already backed up and we did not touched the markers for the
193 # are already backed up and we did not touched the markers for the
194 # saved changesets.
194 # saved changesets.
195 tmpbundlefile = backupbundle(
195 tmpbundlefile = backupbundle(
196 repo,
196 repo,
197 savebases,
197 savebases,
198 saveheads,
198 saveheads,
199 node,
199 node,
200 b'temp',
200 b'temp',
201 compress=False,
201 compress=False,
202 obsolescence=False,
202 obsolescence=False,
203 )
203 )
204
204
205 with ui.uninterruptible():
205 with ui.uninterruptible():
206 try:
206 try:
207 with repo.transaction(b"strip") as tr:
207 with repo.transaction(b"strip") as tr:
208 # TODO this code violates the interface abstraction of the
208 # TODO this code violates the interface abstraction of the
209 # transaction and makes assumptions that file storage is
209 # transaction and makes assumptions that file storage is
210 # using append-only files. We'll need some kind of storage
210 # using append-only files. We'll need some kind of storage
211 # API to handle stripping for us.
211 # API to handle stripping for us.
212 offset = len(tr._entries)
212 oldfiles = set(tr._offsetmap.keys())
213
213
214 tr.startgroup()
214 tr.startgroup()
215 cl.strip(striprev, tr)
215 cl.strip(striprev, tr)
216 stripmanifest(repo, striprev, tr, files)
216 stripmanifest(repo, striprev, tr, files)
217
217
218 for fn in files:
218 for fn in files:
219 repo.file(fn).strip(striprev, tr)
219 repo.file(fn).strip(striprev, tr)
220 tr.endgroup()
220 tr.endgroup()
221
221
222 for i in pycompat.xrange(offset, len(tr._entries)):
222 entries = tr.readjournal()
223 file, troffset = tr._entries[i]
223
224 for file, troffset in entries:
225 if file in oldfiles:
226 continue
224 with repo.svfs(file, b'a', checkambig=True) as fp:
227 with repo.svfs(file, b'a', checkambig=True) as fp:
225 fp.truncate(troffset)
228 fp.truncate(troffset)
226 if troffset == 0:
229 if troffset == 0:
227 repo.store.markremoved(file)
230 repo.store.markremoved(file)
228
231
229 deleteobsmarkers(repo.obsstore, stripobsidx)
232 deleteobsmarkers(repo.obsstore, stripobsidx)
230 del repo.obsstore
233 del repo.obsstore
231 repo.invalidatevolatilesets()
234 repo.invalidatevolatilesets()
232 repo._phasecache.filterunknown(repo)
235 repo._phasecache.filterunknown(repo)
233
236
234 if tmpbundlefile:
237 if tmpbundlefile:
235 ui.note(_(b"adding branch\n"))
238 ui.note(_(b"adding branch\n"))
236 f = vfs.open(tmpbundlefile, b"rb")
239 f = vfs.open(tmpbundlefile, b"rb")
237 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
240 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
238 if not repo.ui.verbose:
241 if not repo.ui.verbose:
239 # silence internal shuffling chatter
242 # silence internal shuffling chatter
240 repo.ui.pushbuffer()
243 repo.ui.pushbuffer()
241 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
244 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
242 txnname = b'strip'
245 txnname = b'strip'
243 if not isinstance(gen, bundle2.unbundle20):
246 if not isinstance(gen, bundle2.unbundle20):
244 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
247 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
245 with repo.transaction(txnname) as tr:
248 with repo.transaction(txnname) as tr:
246 bundle2.applybundle(
249 bundle2.applybundle(
247 repo, gen, tr, source=b'strip', url=tmpbundleurl
250 repo, gen, tr, source=b'strip', url=tmpbundleurl
248 )
251 )
249 if not repo.ui.verbose:
252 if not repo.ui.verbose:
250 repo.ui.popbuffer()
253 repo.ui.popbuffer()
251 f.close()
254 f.close()
252
255
253 with repo.transaction(b'repair') as tr:
256 with repo.transaction(b'repair') as tr:
254 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
257 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
255 repo._bookmarks.applychanges(repo, tr, bmchanges)
258 repo._bookmarks.applychanges(repo, tr, bmchanges)
256
259
257 # remove undo files
260 # remove undo files
258 for undovfs, undofile in repo.undofiles():
261 for undovfs, undofile in repo.undofiles():
259 try:
262 try:
260 undovfs.unlink(undofile)
263 undovfs.unlink(undofile)
261 except OSError as e:
264 except OSError as e:
262 if e.errno != errno.ENOENT:
265 if e.errno != errno.ENOENT:
263 ui.warn(
266 ui.warn(
264 _(b'error removing %s: %s\n')
267 _(b'error removing %s: %s\n')
265 % (
268 % (
266 undovfs.join(undofile),
269 undovfs.join(undofile),
267 stringutil.forcebytestr(e),
270 stringutil.forcebytestr(e),
268 )
271 )
269 )
272 )
270
273
271 except: # re-raises
274 except: # re-raises
272 if backupfile:
275 if backupfile:
273 ui.warn(
276 ui.warn(
274 _(b"strip failed, backup bundle stored in '%s'\n")
277 _(b"strip failed, backup bundle stored in '%s'\n")
275 % vfs.join(backupfile)
278 % vfs.join(backupfile)
276 )
279 )
277 if tmpbundlefile:
280 if tmpbundlefile:
278 ui.warn(
281 ui.warn(
279 _(b"strip failed, unrecovered changes stored in '%s'\n")
282 _(b"strip failed, unrecovered changes stored in '%s'\n")
280 % vfs.join(tmpbundlefile)
283 % vfs.join(tmpbundlefile)
281 )
284 )
282 ui.warn(
285 ui.warn(
283 _(
286 _(
284 b"(fix the problem, then recover the changesets with "
287 b"(fix the problem, then recover the changesets with "
285 b"\"hg unbundle '%s'\")\n"
288 b"\"hg unbundle '%s'\")\n"
286 )
289 )
287 % vfs.join(tmpbundlefile)
290 % vfs.join(tmpbundlefile)
288 )
291 )
289 raise
292 raise
290 else:
293 else:
291 if tmpbundlefile:
294 if tmpbundlefile:
292 # Remove temporary bundle only if there were no exceptions
295 # Remove temporary bundle only if there were no exceptions
293 vfs.unlink(tmpbundlefile)
296 vfs.unlink(tmpbundlefile)
294
297
295 repo.destroyed()
298 repo.destroyed()
296 # return the backup file path (or None if 'backup' was False) so
299 # return the backup file path (or None if 'backup' was False) so
297 # extensions can use it
300 # extensions can use it
298 return backupfile
301 return backupfile
299
302
300
303
301 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
304 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
302 """perform a "soft" strip using the archived phase"""
305 """perform a "soft" strip using the archived phase"""
303 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
306 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
304 if not tostrip:
307 if not tostrip:
305 return None
308 return None
306
309
307 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
310 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
308 if backup:
311 if backup:
309 node = tostrip[0]
312 node = tostrip[0]
310 backupfile = _createstripbackup(repo, tostrip, node, topic)
313 backupfile = _createstripbackup(repo, tostrip, node, topic)
311
314
312 with repo.transaction(b'strip') as tr:
315 with repo.transaction(b'strip') as tr:
313 phases.retractboundary(repo, tr, phases.archived, tostrip)
316 phases.retractboundary(repo, tr, phases.archived, tostrip)
314 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
317 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
315 repo._bookmarks.applychanges(repo, tr, bmchanges)
318 repo._bookmarks.applychanges(repo, tr, bmchanges)
316 return backupfile
319 return backupfile
317
320
318
321
319 def _bookmarkmovements(repo, tostrip):
322 def _bookmarkmovements(repo, tostrip):
320 # compute necessary bookmark movement
323 # compute necessary bookmark movement
321 bm = repo._bookmarks
324 bm = repo._bookmarks
322 updatebm = []
325 updatebm = []
323 for m in bm:
326 for m in bm:
324 rev = repo[bm[m]].rev()
327 rev = repo[bm[m]].rev()
325 if rev in tostrip:
328 if rev in tostrip:
326 updatebm.append(m)
329 updatebm.append(m)
327 newbmtarget = None
330 newbmtarget = None
328 # If we need to move bookmarks, compute bookmark
331 # If we need to move bookmarks, compute bookmark
329 # targets. Otherwise we can skip doing this logic.
332 # targets. Otherwise we can skip doing this logic.
330 if updatebm:
333 if updatebm:
331 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
334 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
332 # but is much faster
335 # but is much faster
333 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
336 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
334 if newbmtarget:
337 if newbmtarget:
335 newbmtarget = repo[newbmtarget.first()].node()
338 newbmtarget = repo[newbmtarget.first()].node()
336 else:
339 else:
337 newbmtarget = b'.'
340 newbmtarget = b'.'
338 return newbmtarget, updatebm
341 return newbmtarget, updatebm
339
342
340
343
341 def _createstripbackup(repo, stripbases, node, topic):
344 def _createstripbackup(repo, stripbases, node, topic):
342 # backup the changeset we are about to strip
345 # backup the changeset we are about to strip
343 vfs = repo.vfs
346 vfs = repo.vfs
344 cl = repo.changelog
347 cl = repo.changelog
345 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
348 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
346 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
349 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
347 repo.ui.log(
350 repo.ui.log(
348 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
351 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
349 )
352 )
350 return backupfile
353 return backupfile
351
354
352
355
353 def safestriproots(ui, repo, nodes):
356 def safestriproots(ui, repo, nodes):
354 """return list of roots of nodes where descendants are covered by nodes"""
357 """return list of roots of nodes where descendants are covered by nodes"""
355 torev = repo.unfiltered().changelog.rev
358 torev = repo.unfiltered().changelog.rev
356 revs = {torev(n) for n in nodes}
359 revs = {torev(n) for n in nodes}
357 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
360 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
358 # orphaned = affected - wanted
361 # orphaned = affected - wanted
359 # affected = descendants(roots(wanted))
362 # affected = descendants(roots(wanted))
360 # wanted = revs
363 # wanted = revs
361 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
364 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
362 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
365 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
363 notstrip = revs - tostrip
366 notstrip = revs - tostrip
364 if notstrip:
367 if notstrip:
365 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
368 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
366 ui.warn(
369 ui.warn(
367 _(b'warning: orphaned descendants detected, not stripping %s\n')
370 _(b'warning: orphaned descendants detected, not stripping %s\n')
368 % nodestr
371 % nodestr
369 )
372 )
370 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
373 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
371
374
372
375
373 class stripcallback(object):
376 class stripcallback(object):
374 """used as a transaction postclose callback"""
377 """used as a transaction postclose callback"""
375
378
376 def __init__(self, ui, repo, backup, topic):
379 def __init__(self, ui, repo, backup, topic):
377 self.ui = ui
380 self.ui = ui
378 self.repo = repo
381 self.repo = repo
379 self.backup = backup
382 self.backup = backup
380 self.topic = topic or b'backup'
383 self.topic = topic or b'backup'
381 self.nodelist = []
384 self.nodelist = []
382
385
383 def addnodes(self, nodes):
386 def addnodes(self, nodes):
384 self.nodelist.extend(nodes)
387 self.nodelist.extend(nodes)
385
388
386 def __call__(self, tr):
389 def __call__(self, tr):
387 roots = safestriproots(self.ui, self.repo, self.nodelist)
390 roots = safestriproots(self.ui, self.repo, self.nodelist)
388 if roots:
391 if roots:
389 strip(self.ui, self.repo, roots, self.backup, self.topic)
392 strip(self.ui, self.repo, roots, self.backup, self.topic)
390
393
391
394
392 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
395 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
393 """like strip, but works inside transaction and won't strip irreverent revs
396 """like strip, but works inside transaction and won't strip irreverent revs
394
397
395 nodelist must explicitly contain all descendants. Otherwise a warning will
398 nodelist must explicitly contain all descendants. Otherwise a warning will
396 be printed that some nodes are not stripped.
399 be printed that some nodes are not stripped.
397
400
398 Will do a backup if `backup` is True. The last non-None "topic" will be
401 Will do a backup if `backup` is True. The last non-None "topic" will be
399 used as the backup topic name. The default backup topic name is "backup".
402 used as the backup topic name. The default backup topic name is "backup".
400 """
403 """
401 tr = repo.currenttransaction()
404 tr = repo.currenttransaction()
402 if not tr:
405 if not tr:
403 nodes = safestriproots(ui, repo, nodelist)
406 nodes = safestriproots(ui, repo, nodelist)
404 return strip(ui, repo, nodes, backup=backup, topic=topic)
407 return strip(ui, repo, nodes, backup=backup, topic=topic)
405 # transaction postclose callbacks are called in alphabet order.
408 # transaction postclose callbacks are called in alphabet order.
406 # use '\xff' as prefix so we are likely to be called last.
409 # use '\xff' as prefix so we are likely to be called last.
407 callback = tr.getpostclose(b'\xffstrip')
410 callback = tr.getpostclose(b'\xffstrip')
408 if callback is None:
411 if callback is None:
409 callback = stripcallback(ui, repo, backup=backup, topic=topic)
412 callback = stripcallback(ui, repo, backup=backup, topic=topic)
410 tr.addpostclose(b'\xffstrip', callback)
413 tr.addpostclose(b'\xffstrip', callback)
411 if topic:
414 if topic:
412 callback.topic = topic
415 callback.topic = topic
413 callback.addnodes(nodelist)
416 callback.addnodes(nodelist)
414
417
415
418
416 def stripmanifest(repo, striprev, tr, files):
419 def stripmanifest(repo, striprev, tr, files):
417 for revlog in manifestrevlogs(repo):
420 for revlog in manifestrevlogs(repo):
418 revlog.strip(striprev, tr)
421 revlog.strip(striprev, tr)
419
422
420
423
421 def manifestrevlogs(repo):
424 def manifestrevlogs(repo):
422 yield repo.manifestlog.getstorage(b'')
425 yield repo.manifestlog.getstorage(b'')
423 if scmutil.istreemanifest(repo):
426 if scmutil.istreemanifest(repo):
424 # This logic is safe if treemanifest isn't enabled, but also
427 # This logic is safe if treemanifest isn't enabled, but also
425 # pointless, so we skip it if treemanifest isn't enabled.
428 # pointless, so we skip it if treemanifest isn't enabled.
426 for unencoded, encoded, size in repo.store.datafiles():
429 for unencoded, encoded, size in repo.store.datafiles():
427 if unencoded.startswith(b'meta/') and unencoded.endswith(
430 if unencoded.startswith(b'meta/') and unencoded.endswith(
428 b'00manifest.i'
431 b'00manifest.i'
429 ):
432 ):
430 dir = unencoded[5:-12]
433 dir = unencoded[5:-12]
431 yield repo.manifestlog.getstorage(dir)
434 yield repo.manifestlog.getstorage(dir)
432
435
433
436
434 def rebuildfncache(ui, repo):
437 def rebuildfncache(ui, repo):
435 """Rebuilds the fncache file from repo history.
438 """Rebuilds the fncache file from repo history.
436
439
437 Missing entries will be added. Extra entries will be removed.
440 Missing entries will be added. Extra entries will be removed.
438 """
441 """
439 repo = repo.unfiltered()
442 repo = repo.unfiltered()
440
443
441 if b'fncache' not in repo.requirements:
444 if b'fncache' not in repo.requirements:
442 ui.warn(
445 ui.warn(
443 _(
446 _(
444 b'(not rebuilding fncache because repository does not '
447 b'(not rebuilding fncache because repository does not '
445 b'support fncache)\n'
448 b'support fncache)\n'
446 )
449 )
447 )
450 )
448 return
451 return
449
452
450 with repo.lock():
453 with repo.lock():
451 fnc = repo.store.fncache
454 fnc = repo.store.fncache
452 fnc.ensureloaded(warn=ui.warn)
455 fnc.ensureloaded(warn=ui.warn)
453
456
454 oldentries = set(fnc.entries)
457 oldentries = set(fnc.entries)
455 newentries = set()
458 newentries = set()
456 seenfiles = set()
459 seenfiles = set()
457
460
458 progress = ui.makeprogress(
461 progress = ui.makeprogress(
459 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
462 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
460 )
463 )
461 for rev in repo:
464 for rev in repo:
462 progress.update(rev)
465 progress.update(rev)
463
466
464 ctx = repo[rev]
467 ctx = repo[rev]
465 for f in ctx.files():
468 for f in ctx.files():
466 # This is to minimize I/O.
469 # This is to minimize I/O.
467 if f in seenfiles:
470 if f in seenfiles:
468 continue
471 continue
469 seenfiles.add(f)
472 seenfiles.add(f)
470
473
471 i = b'data/%s.i' % f
474 i = b'data/%s.i' % f
472 d = b'data/%s.d' % f
475 d = b'data/%s.d' % f
473
476
474 if repo.store._exists(i):
477 if repo.store._exists(i):
475 newentries.add(i)
478 newentries.add(i)
476 if repo.store._exists(d):
479 if repo.store._exists(d):
477 newentries.add(d)
480 newentries.add(d)
478
481
479 progress.complete()
482 progress.complete()
480
483
481 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
484 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
482 # This logic is safe if treemanifest isn't enabled, but also
485 # This logic is safe if treemanifest isn't enabled, but also
483 # pointless, so we skip it if treemanifest isn't enabled.
486 # pointless, so we skip it if treemanifest isn't enabled.
484 for dir in pathutil.dirs(seenfiles):
487 for dir in pathutil.dirs(seenfiles):
485 i = b'meta/%s/00manifest.i' % dir
488 i = b'meta/%s/00manifest.i' % dir
486 d = b'meta/%s/00manifest.d' % dir
489 d = b'meta/%s/00manifest.d' % dir
487
490
488 if repo.store._exists(i):
491 if repo.store._exists(i):
489 newentries.add(i)
492 newentries.add(i)
490 if repo.store._exists(d):
493 if repo.store._exists(d):
491 newentries.add(d)
494 newentries.add(d)
492
495
493 addcount = len(newentries - oldentries)
496 addcount = len(newentries - oldentries)
494 removecount = len(oldentries - newentries)
497 removecount = len(oldentries - newentries)
495 for p in sorted(oldentries - newentries):
498 for p in sorted(oldentries - newentries):
496 ui.write(_(b'removing %s\n') % p)
499 ui.write(_(b'removing %s\n') % p)
497 for p in sorted(newentries - oldentries):
500 for p in sorted(newentries - oldentries):
498 ui.write(_(b'adding %s\n') % p)
501 ui.write(_(b'adding %s\n') % p)
499
502
500 if addcount or removecount:
503 if addcount or removecount:
501 ui.write(
504 ui.write(
502 _(b'%d items added, %d removed from fncache\n')
505 _(b'%d items added, %d removed from fncache\n')
503 % (addcount, removecount)
506 % (addcount, removecount)
504 )
507 )
505 fnc.entries = newentries
508 fnc.entries = newentries
506 fnc._dirty = True
509 fnc._dirty = True
507
510
508 with repo.transaction(b'fncache') as tr:
511 with repo.transaction(b'fncache') as tr:
509 fnc.write(tr)
512 fnc.write(tr)
510 else:
513 else:
511 ui.write(_(b'fncache already up to date\n'))
514 ui.write(_(b'fncache already up to date\n'))
512
515
513
516
514 def deleteobsmarkers(obsstore, indices):
517 def deleteobsmarkers(obsstore, indices):
515 """Delete some obsmarkers from obsstore and return how many were deleted
518 """Delete some obsmarkers from obsstore and return how many were deleted
516
519
517 'indices' is a list of ints which are the indices
520 'indices' is a list of ints which are the indices
518 of the markers to be deleted.
521 of the markers to be deleted.
519
522
520 Every invocation of this function completely rewrites the obsstore file,
523 Every invocation of this function completely rewrites the obsstore file,
521 skipping the markers we want to be removed. The new temporary file is
524 skipping the markers we want to be removed. The new temporary file is
522 created, remaining markers are written there and on .close() this file
525 created, remaining markers are written there and on .close() this file
523 gets atomically renamed to obsstore, thus guaranteeing consistency."""
526 gets atomically renamed to obsstore, thus guaranteeing consistency."""
524 if not indices:
527 if not indices:
525 # we don't want to rewrite the obsstore with the same content
528 # we don't want to rewrite the obsstore with the same content
526 return
529 return
527
530
528 left = []
531 left = []
529 current = obsstore._all
532 current = obsstore._all
530 n = 0
533 n = 0
531 for i, m in enumerate(current):
534 for i, m in enumerate(current):
532 if i in indices:
535 if i in indices:
533 n += 1
536 n += 1
534 continue
537 continue
535 left.append(m)
538 left.append(m)
536
539
537 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
540 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
538 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
541 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
539 newobsstorefile.write(bytes)
542 newobsstorefile.write(bytes)
540 newobsstorefile.close()
543 newobsstorefile.close()
541 return n
544 return n
@@ -1,732 +1,737 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 GEN_GROUP_ALL = b'all'
33 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.ProgrammingError(
41 raise error.ProgrammingError(
42 b'cannot use transaction when it is already committed/aborted'
42 b'cannot use transaction when it is already committed/aborted'
43 )
43 )
44 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
45
45
46 return _active
46 return _active
47
47
48
48
49 def _playback(
49 def _playback(
50 journal,
50 journal,
51 report,
51 report,
52 opener,
52 opener,
53 vfsmap,
53 vfsmap,
54 entries,
54 entries,
55 backupentries,
55 backupentries,
56 unlink=True,
56 unlink=True,
57 checkambigfiles=None,
57 checkambigfiles=None,
58 ):
58 ):
59 for f, o in entries:
59 for f, o in entries:
60 if o or not unlink:
60 if o or not unlink:
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 try:
62 try:
63 fp = opener(f, b'a', checkambig=checkambig)
63 fp = opener(f, b'a', checkambig=checkambig)
64 if fp.tell() < o:
64 if fp.tell() < o:
65 raise error.Abort(
65 raise error.Abort(
66 _(
66 _(
67 b"attempted to truncate %s to %d bytes, but it was "
67 b"attempted to truncate %s to %d bytes, but it was "
68 b"already %d bytes\n"
68 b"already %d bytes\n"
69 )
69 )
70 % (f, o, fp.tell())
70 % (f, o, fp.tell())
71 )
71 )
72 fp.truncate(o)
72 fp.truncate(o)
73 fp.close()
73 fp.close()
74 except IOError:
74 except IOError:
75 report(_(b"failed to truncate %s\n") % f)
75 report(_(b"failed to truncate %s\n") % f)
76 raise
76 raise
77 else:
77 else:
78 try:
78 try:
79 opener.unlink(f)
79 opener.unlink(f)
80 except (IOError, OSError) as inst:
80 except (IOError, OSError) as inst:
81 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
82 raise
82 raise
83
83
84 backupfiles = []
84 backupfiles = []
85 for l, f, b, c in backupentries:
85 for l, f, b, c in backupentries:
86 if l not in vfsmap and c:
86 if l not in vfsmap and c:
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 vfs = vfsmap[l]
88 vfs = vfsmap[l]
89 try:
89 try:
90 if f and b:
90 if f and b:
91 filepath = vfs.join(f)
91 filepath = vfs.join(f)
92 backuppath = vfs.join(b)
92 backuppath = vfs.join(b)
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 try:
94 try:
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 backupfiles.append(b)
96 backupfiles.append(b)
97 except IOError:
97 except IOError:
98 report(_(b"failed to recover %s\n") % f)
98 report(_(b"failed to recover %s\n") % f)
99 else:
99 else:
100 target = f or b
100 target = f or b
101 try:
101 try:
102 vfs.unlink(target)
102 vfs.unlink(target)
103 except (IOError, OSError) as inst:
103 except (IOError, OSError) as inst:
104 if inst.errno != errno.ENOENT:
104 if inst.errno != errno.ENOENT:
105 raise
105 raise
106 except (IOError, OSError, error.Abort):
106 except (IOError, OSError, error.Abort):
107 if not c:
107 if not c:
108 raise
108 raise
109
109
110 backuppath = b"%s.backupfiles" % journal
110 backuppath = b"%s.backupfiles" % journal
111 if opener.exists(backuppath):
111 if opener.exists(backuppath):
112 opener.unlink(backuppath)
112 opener.unlink(backuppath)
113 opener.unlink(journal)
113 opener.unlink(journal)
114 try:
114 try:
115 for f in backupfiles:
115 for f in backupfiles:
116 if opener.exists(f):
116 if opener.exists(f):
117 opener.unlink(f)
117 opener.unlink(f)
118 except (IOError, OSError, error.Abort):
118 except (IOError, OSError, error.Abort):
119 # only pure backup file remains, it is sage to ignore any error
119 # only pure backup file remains, it is sage to ignore any error
120 pass
120 pass
121
121
122
122
123 class transaction(util.transactional):
123 class transaction(util.transactional):
124 def __init__(
124 def __init__(
125 self,
125 self,
126 report,
126 report,
127 opener,
127 opener,
128 vfsmap,
128 vfsmap,
129 journalname,
129 journalname,
130 undoname=None,
130 undoname=None,
131 after=None,
131 after=None,
132 createmode=None,
132 createmode=None,
133 validator=None,
133 validator=None,
134 releasefn=None,
134 releasefn=None,
135 checkambigfiles=None,
135 checkambigfiles=None,
136 name='<unnamed>',
136 name='<unnamed>',
137 ):
137 ):
138 """Begin a new transaction
138 """Begin a new transaction
139
139
140 Begins a new transaction that allows rolling back writes in the event of
140 Begins a new transaction that allows rolling back writes in the event of
141 an exception.
141 an exception.
142
142
143 * `after`: called after the transaction has been committed
143 * `after`: called after the transaction has been committed
144 * `createmode`: the mode of the journal file that will be created
144 * `createmode`: the mode of the journal file that will be created
145 * `releasefn`: called after releasing (with transaction and result)
145 * `releasefn`: called after releasing (with transaction and result)
146
146
147 `checkambigfiles` is a set of (path, vfs-location) tuples,
147 `checkambigfiles` is a set of (path, vfs-location) tuples,
148 which determine whether file stat ambiguity should be avoided
148 which determine whether file stat ambiguity should be avoided
149 for corresponded files.
149 for corresponded files.
150 """
150 """
151 self._count = 1
151 self._count = 1
152 self._usages = 1
152 self._usages = 1
153 self._report = report
153 self._report = report
154 # a vfs to the store content
154 # a vfs to the store content
155 self._opener = opener
155 self._opener = opener
156 # a map to access file in various {location -> vfs}
156 # a map to access file in various {location -> vfs}
157 vfsmap = vfsmap.copy()
157 vfsmap = vfsmap.copy()
158 vfsmap[b''] = opener # set default value
158 vfsmap[b''] = opener # set default value
159 self._vfsmap = vfsmap
159 self._vfsmap = vfsmap
160 self._after = after
160 self._after = after
161 self._entries = []
161 self._offsetmap = {}
162 self._map = {}
163 self._journal = journalname
162 self._journal = journalname
164 self._undoname = undoname
163 self._undoname = undoname
165 self._queue = []
164 self._queue = []
166 # A callback to do something just after releasing transaction.
165 # A callback to do something just after releasing transaction.
167 if releasefn is None:
166 if releasefn is None:
168 releasefn = lambda tr, success: None
167 releasefn = lambda tr, success: None
169 self._releasefn = releasefn
168 self._releasefn = releasefn
170
169
171 self._checkambigfiles = set()
170 self._checkambigfiles = set()
172 if checkambigfiles:
171 if checkambigfiles:
173 self._checkambigfiles.update(checkambigfiles)
172 self._checkambigfiles.update(checkambigfiles)
174
173
175 self._names = [name]
174 self._names = [name]
176
175
177 # A dict dedicated to precisely tracking the changes introduced in the
176 # A dict dedicated to precisely tracking the changes introduced in the
178 # transaction.
177 # transaction.
179 self.changes = {}
178 self.changes = {}
180
179
181 # a dict of arguments to be passed to hooks
180 # a dict of arguments to be passed to hooks
182 self.hookargs = {}
181 self.hookargs = {}
183 self._file = opener.open(self._journal, b"w")
182 self._file = opener.open(self._journal, b"w+")
184
183
185 # a list of ('location', 'path', 'backuppath', cache) entries.
184 # a list of ('location', 'path', 'backuppath', cache) entries.
186 # - if 'backuppath' is empty, no file existed at backup time
185 # - if 'backuppath' is empty, no file existed at backup time
187 # - if 'path' is empty, this is a temporary transaction file
186 # - if 'path' is empty, this is a temporary transaction file
188 # - if 'location' is not empty, the path is outside main opener reach.
187 # - if 'location' is not empty, the path is outside main opener reach.
189 # use 'location' value as a key in a vfsmap to find the right 'vfs'
188 # use 'location' value as a key in a vfsmap to find the right 'vfs'
190 # (cache is currently unused)
189 # (cache is currently unused)
191 self._backupentries = []
190 self._backupentries = []
192 self._backupmap = {}
191 self._backupmap = {}
193 self._backupjournal = b"%s.backupfiles" % self._journal
192 self._backupjournal = b"%s.backupfiles" % self._journal
194 self._backupsfile = opener.open(self._backupjournal, b'w')
193 self._backupsfile = opener.open(self._backupjournal, b'w')
195 self._backupsfile.write(b'%d\n' % version)
194 self._backupsfile.write(b'%d\n' % version)
196
195
197 if createmode is not None:
196 if createmode is not None:
198 opener.chmod(self._journal, createmode & 0o666)
197 opener.chmod(self._journal, createmode & 0o666)
199 opener.chmod(self._backupjournal, createmode & 0o666)
198 opener.chmod(self._backupjournal, createmode & 0o666)
200
199
201 # hold file generations to be performed on commit
200 # hold file generations to be performed on commit
202 self._filegenerators = {}
201 self._filegenerators = {}
203 # hold callback to write pending data for hooks
202 # hold callback to write pending data for hooks
204 self._pendingcallback = {}
203 self._pendingcallback = {}
205 # True is any pending data have been written ever
204 # True is any pending data have been written ever
206 self._anypending = False
205 self._anypending = False
207 # holds callback to call when writing the transaction
206 # holds callback to call when writing the transaction
208 self._finalizecallback = {}
207 self._finalizecallback = {}
209 # holds callback to call when validating the transaction
208 # holds callback to call when validating the transaction
210 # should raise exception if anything is wrong
209 # should raise exception if anything is wrong
211 self._validatecallback = {}
210 self._validatecallback = {}
212 if validator is not None:
211 if validator is not None:
213 self._validatecallback[b'001-userhooks'] = validator
212 self._validatecallback[b'001-userhooks'] = validator
214 # hold callback for post transaction close
213 # hold callback for post transaction close
215 self._postclosecallback = {}
214 self._postclosecallback = {}
216 # holds callbacks to call during abort
215 # holds callbacks to call during abort
217 self._abortcallback = {}
216 self._abortcallback = {}
218
217
219 def __repr__(self):
218 def __repr__(self):
220 name = '/'.join(self._names)
219 name = '/'.join(self._names)
221 return '<transaction name=%s, count=%d, usages=%d>' % (
220 return '<transaction name=%s, count=%d, usages=%d>' % (
222 name,
221 name,
223 self._count,
222 self._count,
224 self._usages,
223 self._usages,
225 )
224 )
226
225
227 def __del__(self):
226 def __del__(self):
228 if self._journal:
227 if self._journal:
229 self._abort()
228 self._abort()
230
229
231 @active
230 @active
232 def startgroup(self):
231 def startgroup(self):
233 """delay registration of file entry
232 """delay registration of file entry
234
233
235 This is used by strip to delay vision of strip offset. The transaction
234 This is used by strip to delay vision of strip offset. The transaction
236 sees either none or all of the strip actions to be done."""
235 sees either none or all of the strip actions to be done."""
237 self._queue.append([])
236 self._queue.append([])
238
237
239 @active
238 @active
240 def endgroup(self):
239 def endgroup(self):
241 """apply delayed registration of file entry.
240 """apply delayed registration of file entry.
242
241
243 This is used by strip to delay vision of strip offset. The transaction
242 This is used by strip to delay vision of strip offset. The transaction
244 sees either none or all of the strip actions to be done."""
243 sees either none or all of the strip actions to be done."""
245 q = self._queue.pop()
244 q = self._queue.pop()
246 for f, o in q:
245 for f, o in q:
247 self._addentry(f, o)
246 self._addentry(f, o)
248
247
249 @active
248 @active
250 def add(self, file, offset):
249 def add(self, file, offset):
251 """record the state of an append-only file before update"""
250 """record the state of an append-only file before update"""
252 if file in self._map or file in self._backupmap:
251 if file in self._offsetmap or file in self._backupmap:
253 return
252 return
254 if self._queue:
253 if self._queue:
255 self._queue[-1].append((file, offset))
254 self._queue[-1].append((file, offset))
256 return
255 return
257
256
258 self._addentry(file, offset)
257 self._addentry(file, offset)
259
258
260 def _addentry(self, file, offset):
259 def _addentry(self, file, offset):
261 """add a append-only entry to memory and on-disk state"""
260 """add a append-only entry to memory and on-disk state"""
262 if file in self._map or file in self._backupmap:
261 if file in self._offsetmap or file in self._backupmap:
263 return
262 return
264 self._entries.append((file, offset))
263 self._offsetmap[file] = offset
265 self._map[file] = len(self._entries) - 1
266 # add enough data to the journal to do the truncate
264 # add enough data to the journal to do the truncate
267 self._file.write(b"%s\0%d\n" % (file, offset))
265 self._file.write(b"%s\0%d\n" % (file, offset))
268 self._file.flush()
266 self._file.flush()
269
267
270 @active
268 @active
271 def addbackup(self, file, hardlink=True, location=b''):
269 def addbackup(self, file, hardlink=True, location=b''):
272 """Adds a backup of the file to the transaction
270 """Adds a backup of the file to the transaction
273
271
274 Calling addbackup() creates a hardlink backup of the specified file
272 Calling addbackup() creates a hardlink backup of the specified file
275 that is used to recover the file in the event of the transaction
273 that is used to recover the file in the event of the transaction
276 aborting.
274 aborting.
277
275
278 * `file`: the file path, relative to .hg/store
276 * `file`: the file path, relative to .hg/store
279 * `hardlink`: use a hardlink to quickly create the backup
277 * `hardlink`: use a hardlink to quickly create the backup
280 """
278 """
281 if self._queue:
279 if self._queue:
282 msg = b'cannot use transaction.addbackup inside "group"'
280 msg = b'cannot use transaction.addbackup inside "group"'
283 raise error.ProgrammingError(msg)
281 raise error.ProgrammingError(msg)
284
282
285 if file in self._map or file in self._backupmap:
283 if file in self._offsetmap or file in self._backupmap:
286 return
284 return
287 vfs = self._vfsmap[location]
285 vfs = self._vfsmap[location]
288 dirname, filename = vfs.split(file)
286 dirname, filename = vfs.split(file)
289 backupfilename = b"%s.backup.%s" % (self._journal, filename)
287 backupfilename = b"%s.backup.%s" % (self._journal, filename)
290 backupfile = vfs.reljoin(dirname, backupfilename)
288 backupfile = vfs.reljoin(dirname, backupfilename)
291 if vfs.exists(file):
289 if vfs.exists(file):
292 filepath = vfs.join(file)
290 filepath = vfs.join(file)
293 backuppath = vfs.join(backupfile)
291 backuppath = vfs.join(backupfile)
294 util.copyfile(filepath, backuppath, hardlink=hardlink)
292 util.copyfile(filepath, backuppath, hardlink=hardlink)
295 else:
293 else:
296 backupfile = b''
294 backupfile = b''
297
295
298 self._addbackupentry((location, file, backupfile, False))
296 self._addbackupentry((location, file, backupfile, False))
299
297
300 def _addbackupentry(self, entry):
298 def _addbackupentry(self, entry):
301 """register a new backup entry and write it to disk"""
299 """register a new backup entry and write it to disk"""
302 self._backupentries.append(entry)
300 self._backupentries.append(entry)
303 self._backupmap[entry[1]] = len(self._backupentries) - 1
301 self._backupmap[entry[1]] = len(self._backupentries) - 1
304 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
302 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
305 self._backupsfile.flush()
303 self._backupsfile.flush()
306
304
307 @active
305 @active
308 def registertmp(self, tmpfile, location=b''):
306 def registertmp(self, tmpfile, location=b''):
309 """register a temporary transaction file
307 """register a temporary transaction file
310
308
311 Such files will be deleted when the transaction exits (on both
309 Such files will be deleted when the transaction exits (on both
312 failure and success).
310 failure and success).
313 """
311 """
314 self._addbackupentry((location, b'', tmpfile, False))
312 self._addbackupentry((location, b'', tmpfile, False))
315
313
316 @active
314 @active
317 def addfilegenerator(
315 def addfilegenerator(
318 self, genid, filenames, genfunc, order=0, location=b''
316 self, genid, filenames, genfunc, order=0, location=b''
319 ):
317 ):
320 """add a function to generates some files at transaction commit
318 """add a function to generates some files at transaction commit
321
319
322 The `genfunc` argument is a function capable of generating proper
320 The `genfunc` argument is a function capable of generating proper
323 content of each entry in the `filename` tuple.
321 content of each entry in the `filename` tuple.
324
322
325 At transaction close time, `genfunc` will be called with one file
323 At transaction close time, `genfunc` will be called with one file
326 object argument per entries in `filenames`.
324 object argument per entries in `filenames`.
327
325
328 The transaction itself is responsible for the backup, creation and
326 The transaction itself is responsible for the backup, creation and
329 final write of such file.
327 final write of such file.
330
328
331 The `genid` argument is used to ensure the same set of file is only
329 The `genid` argument is used to ensure the same set of file is only
332 generated once. Call to `addfilegenerator` for a `genid` already
330 generated once. Call to `addfilegenerator` for a `genid` already
333 present will overwrite the old entry.
331 present will overwrite the old entry.
334
332
335 The `order` argument may be used to control the order in which multiple
333 The `order` argument may be used to control the order in which multiple
336 generator will be executed.
334 generator will be executed.
337
335
338 The `location` arguments may be used to indicate the files are located
336 The `location` arguments may be used to indicate the files are located
339 outside of the the standard directory for transaction. It should match
337 outside of the the standard directory for transaction. It should match
340 one of the key of the `transaction.vfsmap` dictionary.
338 one of the key of the `transaction.vfsmap` dictionary.
341 """
339 """
342 # For now, we are unable to do proper backup and restore of custom vfs
340 # For now, we are unable to do proper backup and restore of custom vfs
343 # but for bookmarks that are handled outside this mechanism.
341 # but for bookmarks that are handled outside this mechanism.
344 self._filegenerators[genid] = (order, filenames, genfunc, location)
342 self._filegenerators[genid] = (order, filenames, genfunc, location)
345
343
346 @active
344 @active
347 def removefilegenerator(self, genid):
345 def removefilegenerator(self, genid):
348 """reverse of addfilegenerator, remove a file generator function"""
346 """reverse of addfilegenerator, remove a file generator function"""
349 if genid in self._filegenerators:
347 if genid in self._filegenerators:
350 del self._filegenerators[genid]
348 del self._filegenerators[genid]
351
349
352 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
350 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
353 # write files registered for generation
351 # write files registered for generation
354 any = False
352 any = False
355
353
356 if group == GEN_GROUP_ALL:
354 if group == GEN_GROUP_ALL:
357 skip_post = skip_pre = False
355 skip_post = skip_pre = False
358 else:
356 else:
359 skip_pre = group == GEN_GROUP_POST_FINALIZE
357 skip_pre = group == GEN_GROUP_POST_FINALIZE
360 skip_post = group == GEN_GROUP_PRE_FINALIZE
358 skip_post = group == GEN_GROUP_PRE_FINALIZE
361
359
362 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
360 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
363 any = True
361 any = True
364 order, filenames, genfunc, location = entry
362 order, filenames, genfunc, location = entry
365
363
366 # for generation at closing, check if it's before or after finalize
364 # for generation at closing, check if it's before or after finalize
367 is_post = id in postfinalizegenerators
365 is_post = id in postfinalizegenerators
368 if skip_post and is_post:
366 if skip_post and is_post:
369 continue
367 continue
370 elif skip_pre and not is_post:
368 elif skip_pre and not is_post:
371 continue
369 continue
372
370
373 vfs = self._vfsmap[location]
371 vfs = self._vfsmap[location]
374 files = []
372 files = []
375 try:
373 try:
376 for name in filenames:
374 for name in filenames:
377 name += suffix
375 name += suffix
378 if suffix:
376 if suffix:
379 self.registertmp(name, location=location)
377 self.registertmp(name, location=location)
380 checkambig = False
378 checkambig = False
381 else:
379 else:
382 self.addbackup(name, location=location)
380 self.addbackup(name, location=location)
383 checkambig = (name, location) in self._checkambigfiles
381 checkambig = (name, location) in self._checkambigfiles
384 files.append(
382 files.append(
385 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
383 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
386 )
384 )
387 genfunc(*files)
385 genfunc(*files)
388 for f in files:
386 for f in files:
389 f.close()
387 f.close()
390 # skip discard() loop since we're sure no open file remains
388 # skip discard() loop since we're sure no open file remains
391 del files[:]
389 del files[:]
392 finally:
390 finally:
393 for f in files:
391 for f in files:
394 f.discard()
392 f.discard()
395 return any
393 return any
396
394
397 @active
395 @active
398 def findoffset(self, file):
396 def findoffset(self, file):
399 if file in self._map:
397 return self._offsetmap.get(file)
400 return self._entries[self._map[file]][1]
398
401 return None
399 @active
400 def readjournal(self):
401 self._file.seek(0)
402 entries = []
403 for l in self._file:
404 file, troffset = l.split(b'\0')
405 entries.append((file, int(troffset)))
406 return entries
402
407
403 @active
408 @active
404 def replace(self, file, offset):
409 def replace(self, file, offset):
405 '''
410 '''
406 replace can only replace already committed entries
411 replace can only replace already committed entries
407 that are not pending in the queue
412 that are not pending in the queue
408 '''
413 '''
409
414
410 if file not in self._map:
415 if file not in self._offsetmap:
411 raise KeyError(file)
416 raise KeyError(file)
412 index = self._map[file]
417 self._offsetmap[file] = offset
413 self._entries[index] = (file, offset)
414 self._file.write(b"%s\0%d\n" % (file, offset))
418 self._file.write(b"%s\0%d\n" % (file, offset))
415 self._file.flush()
419 self._file.flush()
416
420
417 @active
421 @active
418 def nest(self, name='<unnamed>'):
422 def nest(self, name='<unnamed>'):
419 self._count += 1
423 self._count += 1
420 self._usages += 1
424 self._usages += 1
421 self._names.append(name)
425 self._names.append(name)
422 return self
426 return self
423
427
424 def release(self):
428 def release(self):
425 if self._count > 0:
429 if self._count > 0:
426 self._usages -= 1
430 self._usages -= 1
427 if self._names:
431 if self._names:
428 self._names.pop()
432 self._names.pop()
429 # if the transaction scopes are left without being closed, fail
433 # if the transaction scopes are left without being closed, fail
430 if self._count > 0 and self._usages == 0:
434 if self._count > 0 and self._usages == 0:
431 self._abort()
435 self._abort()
432
436
433 def running(self):
437 def running(self):
434 return self._count > 0
438 return self._count > 0
435
439
436 def addpending(self, category, callback):
440 def addpending(self, category, callback):
437 """add a callback to be called when the transaction is pending
441 """add a callback to be called when the transaction is pending
438
442
439 The transaction will be given as callback's first argument.
443 The transaction will be given as callback's first argument.
440
444
441 Category is a unique identifier to allow overwriting an old callback
445 Category is a unique identifier to allow overwriting an old callback
442 with a newer callback.
446 with a newer callback.
443 """
447 """
444 self._pendingcallback[category] = callback
448 self._pendingcallback[category] = callback
445
449
446 @active
450 @active
447 def writepending(self):
451 def writepending(self):
448 '''write pending file to temporary version
452 '''write pending file to temporary version
449
453
450 This is used to allow hooks to view a transaction before commit'''
454 This is used to allow hooks to view a transaction before commit'''
451 categories = sorted(self._pendingcallback)
455 categories = sorted(self._pendingcallback)
452 for cat in categories:
456 for cat in categories:
453 # remove callback since the data will have been flushed
457 # remove callback since the data will have been flushed
454 any = self._pendingcallback.pop(cat)(self)
458 any = self._pendingcallback.pop(cat)(self)
455 self._anypending = self._anypending or any
459 self._anypending = self._anypending or any
456 self._anypending |= self._generatefiles(suffix=b'.pending')
460 self._anypending |= self._generatefiles(suffix=b'.pending')
457 return self._anypending
461 return self._anypending
458
462
459 @active
463 @active
460 def hasfinalize(self, category):
464 def hasfinalize(self, category):
461 """check is a callback already exist for a category
465 """check is a callback already exist for a category
462 """
466 """
463 return category in self._finalizecallback
467 return category in self._finalizecallback
464
468
465 @active
469 @active
466 def addfinalize(self, category, callback):
470 def addfinalize(self, category, callback):
467 """add a callback to be called when the transaction is closed
471 """add a callback to be called when the transaction is closed
468
472
469 The transaction will be given as callback's first argument.
473 The transaction will be given as callback's first argument.
470
474
471 Category is a unique identifier to allow overwriting old callbacks with
475 Category is a unique identifier to allow overwriting old callbacks with
472 newer callbacks.
476 newer callbacks.
473 """
477 """
474 self._finalizecallback[category] = callback
478 self._finalizecallback[category] = callback
475
479
476 @active
480 @active
477 def addpostclose(self, category, callback):
481 def addpostclose(self, category, callback):
478 """add or replace a callback to be called after the transaction closed
482 """add or replace a callback to be called after the transaction closed
479
483
480 The transaction will be given as callback's first argument.
484 The transaction will be given as callback's first argument.
481
485
482 Category is a unique identifier to allow overwriting an old callback
486 Category is a unique identifier to allow overwriting an old callback
483 with a newer callback.
487 with a newer callback.
484 """
488 """
485 self._postclosecallback[category] = callback
489 self._postclosecallback[category] = callback
486
490
487 @active
491 @active
488 def getpostclose(self, category):
492 def getpostclose(self, category):
489 """return a postclose callback added before, or None"""
493 """return a postclose callback added before, or None"""
490 return self._postclosecallback.get(category, None)
494 return self._postclosecallback.get(category, None)
491
495
492 @active
496 @active
493 def addabort(self, category, callback):
497 def addabort(self, category, callback):
494 """add a callback to be called when the transaction is aborted.
498 """add a callback to be called when the transaction is aborted.
495
499
496 The transaction will be given as the first argument to the callback.
500 The transaction will be given as the first argument to the callback.
497
501
498 Category is a unique identifier to allow overwriting an old callback
502 Category is a unique identifier to allow overwriting an old callback
499 with a newer callback.
503 with a newer callback.
500 """
504 """
501 self._abortcallback[category] = callback
505 self._abortcallback[category] = callback
502
506
503 @active
507 @active
504 def addvalidator(self, category, callback):
508 def addvalidator(self, category, callback):
505 """ adds a callback to be called when validating the transaction.
509 """ adds a callback to be called when validating the transaction.
506
510
507 The transaction will be given as the first argument to the callback.
511 The transaction will be given as the first argument to the callback.
508
512
509 callback should raise exception if to abort transaction """
513 callback should raise exception if to abort transaction """
510 self._validatecallback[category] = callback
514 self._validatecallback[category] = callback
511
515
512 @active
516 @active
513 def close(self):
517 def close(self):
514 '''commit the transaction'''
518 '''commit the transaction'''
515 if self._count == 1:
519 if self._count == 1:
516 for category in sorted(self._validatecallback):
520 for category in sorted(self._validatecallback):
517 self._validatecallback[category](self)
521 self._validatecallback[category](self)
518 self._validatecallback = None # Help prevent cycles.
522 self._validatecallback = None # Help prevent cycles.
519 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
523 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
520 while self._finalizecallback:
524 while self._finalizecallback:
521 callbacks = self._finalizecallback
525 callbacks = self._finalizecallback
522 self._finalizecallback = {}
526 self._finalizecallback = {}
523 categories = sorted(callbacks)
527 categories = sorted(callbacks)
524 for cat in categories:
528 for cat in categories:
525 callbacks[cat](self)
529 callbacks[cat](self)
526 # Prevent double usage and help clear cycles.
530 # Prevent double usage and help clear cycles.
527 self._finalizecallback = None
531 self._finalizecallback = None
528 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
532 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
529
533
530 self._count -= 1
534 self._count -= 1
531 if self._count != 0:
535 if self._count != 0:
532 return
536 return
533 self._file.close()
537 self._file.close()
534 self._backupsfile.close()
538 self._backupsfile.close()
535 # cleanup temporary files
539 # cleanup temporary files
536 for l, f, b, c in self._backupentries:
540 for l, f, b, c in self._backupentries:
537 if l not in self._vfsmap and c:
541 if l not in self._vfsmap and c:
538 self._report(
542 self._report(
539 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
543 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
540 )
544 )
541 continue
545 continue
542 vfs = self._vfsmap[l]
546 vfs = self._vfsmap[l]
543 if not f and b and vfs.exists(b):
547 if not f and b and vfs.exists(b):
544 try:
548 try:
545 vfs.unlink(b)
549 vfs.unlink(b)
546 except (IOError, OSError, error.Abort) as inst:
550 except (IOError, OSError, error.Abort) as inst:
547 if not c:
551 if not c:
548 raise
552 raise
549 # Abort may be raise by read only opener
553 # Abort may be raise by read only opener
550 self._report(
554 self._report(
551 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
555 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
552 )
556 )
553 self._entries = []
557 self._offsetmap = {}
554 self._writeundo()
558 self._writeundo()
555 if self._after:
559 if self._after:
556 self._after()
560 self._after()
557 self._after = None # Help prevent cycles.
561 self._after = None # Help prevent cycles.
558 if self._opener.isfile(self._backupjournal):
562 if self._opener.isfile(self._backupjournal):
559 self._opener.unlink(self._backupjournal)
563 self._opener.unlink(self._backupjournal)
560 if self._opener.isfile(self._journal):
564 if self._opener.isfile(self._journal):
561 self._opener.unlink(self._journal)
565 self._opener.unlink(self._journal)
562 for l, _f, b, c in self._backupentries:
566 for l, _f, b, c in self._backupentries:
563 if l not in self._vfsmap and c:
567 if l not in self._vfsmap and c:
564 self._report(
568 self._report(
565 b"couldn't remove %s: unknown cache location"
569 b"couldn't remove %s: unknown cache location"
566 b"%s\n" % (b, l)
570 b"%s\n" % (b, l)
567 )
571 )
568 continue
572 continue
569 vfs = self._vfsmap[l]
573 vfs = self._vfsmap[l]
570 if b and vfs.exists(b):
574 if b and vfs.exists(b):
571 try:
575 try:
572 vfs.unlink(b)
576 vfs.unlink(b)
573 except (IOError, OSError, error.Abort) as inst:
577 except (IOError, OSError, error.Abort) as inst:
574 if not c:
578 if not c:
575 raise
579 raise
576 # Abort may be raise by read only opener
580 # Abort may be raise by read only opener
577 self._report(
581 self._report(
578 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
579 )
583 )
580 self._backupentries = []
584 self._backupentries = []
581 self._journal = None
585 self._journal = None
582
586
583 self._releasefn(self, True) # notify success of closing transaction
587 self._releasefn(self, True) # notify success of closing transaction
584 self._releasefn = None # Help prevent cycles.
588 self._releasefn = None # Help prevent cycles.
585
589
586 # run post close action
590 # run post close action
587 categories = sorted(self._postclosecallback)
591 categories = sorted(self._postclosecallback)
588 for cat in categories:
592 for cat in categories:
589 self._postclosecallback[cat](self)
593 self._postclosecallback[cat](self)
590 # Prevent double usage and help clear cycles.
594 # Prevent double usage and help clear cycles.
591 self._postclosecallback = None
595 self._postclosecallback = None
592
596
593 @active
597 @active
594 def abort(self):
598 def abort(self):
595 '''abort the transaction (generally called on error, or when the
599 '''abort the transaction (generally called on error, or when the
596 transaction is not explicitly committed before going out of
600 transaction is not explicitly committed before going out of
597 scope)'''
601 scope)'''
598 self._abort()
602 self._abort()
599
603
600 def _writeundo(self):
604 def _writeundo(self):
601 """write transaction data for possible future undo call"""
605 """write transaction data for possible future undo call"""
602 if self._undoname is None:
606 if self._undoname is None:
603 return
607 return
604 undobackupfile = self._opener.open(
608 undobackupfile = self._opener.open(
605 b"%s.backupfiles" % self._undoname, b'w'
609 b"%s.backupfiles" % self._undoname, b'w'
606 )
610 )
607 undobackupfile.write(b'%d\n' % version)
611 undobackupfile.write(b'%d\n' % version)
608 for l, f, b, c in self._backupentries:
612 for l, f, b, c in self._backupentries:
609 if not f: # temporary file
613 if not f: # temporary file
610 continue
614 continue
611 if not b:
615 if not b:
612 u = b''
616 u = b''
613 else:
617 else:
614 if l not in self._vfsmap and c:
618 if l not in self._vfsmap and c:
615 self._report(
619 self._report(
616 b"couldn't remove %s: unknown cache location"
620 b"couldn't remove %s: unknown cache location"
617 b"%s\n" % (b, l)
621 b"%s\n" % (b, l)
618 )
622 )
619 continue
623 continue
620 vfs = self._vfsmap[l]
624 vfs = self._vfsmap[l]
621 base, name = vfs.split(b)
625 base, name = vfs.split(b)
622 assert name.startswith(self._journal), name
626 assert name.startswith(self._journal), name
623 uname = name.replace(self._journal, self._undoname, 1)
627 uname = name.replace(self._journal, self._undoname, 1)
624 u = vfs.reljoin(base, uname)
628 u = vfs.reljoin(base, uname)
625 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
629 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
626 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
630 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
627 undobackupfile.close()
631 undobackupfile.close()
628
632
629 def _abort(self):
633 def _abort(self):
634 entries = self.readjournal()
630 self._count = 0
635 self._count = 0
631 self._usages = 0
636 self._usages = 0
632 self._file.close()
637 self._file.close()
633 self._backupsfile.close()
638 self._backupsfile.close()
634
639
635 try:
640 try:
636 if not self._entries and not self._backupentries:
641 if not self._offsetmap and not self._backupentries:
637 if self._backupjournal:
642 if self._backupjournal:
638 self._opener.unlink(self._backupjournal)
643 self._opener.unlink(self._backupjournal)
639 if self._journal:
644 if self._journal:
640 self._opener.unlink(self._journal)
645 self._opener.unlink(self._journal)
641 return
646 return
642
647
643 self._report(_(b"transaction abort!\n"))
648 self._report(_(b"transaction abort!\n"))
644
649
645 try:
650 try:
646 for cat in sorted(self._abortcallback):
651 for cat in sorted(self._abortcallback):
647 self._abortcallback[cat](self)
652 self._abortcallback[cat](self)
648 # Prevent double usage and help clear cycles.
653 # Prevent double usage and help clear cycles.
649 self._abortcallback = None
654 self._abortcallback = None
650 _playback(
655 _playback(
651 self._journal,
656 self._journal,
652 self._report,
657 self._report,
653 self._opener,
658 self._opener,
654 self._vfsmap,
659 self._vfsmap,
655 self._entries,
660 entries,
656 self._backupentries,
661 self._backupentries,
657 False,
662 False,
658 checkambigfiles=self._checkambigfiles,
663 checkambigfiles=self._checkambigfiles,
659 )
664 )
660 self._report(_(b"rollback completed\n"))
665 self._report(_(b"rollback completed\n"))
661 except BaseException as exc:
666 except BaseException as exc:
662 self._report(_(b"rollback failed - please run hg recover\n"))
667 self._report(_(b"rollback failed - please run hg recover\n"))
663 self._report(
668 self._report(
664 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
669 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
665 )
670 )
666 finally:
671 finally:
667 self._journal = None
672 self._journal = None
668 self._releasefn(self, False) # notify failure of transaction
673 self._releasefn(self, False) # notify failure of transaction
669 self._releasefn = None # Help prevent cycles.
674 self._releasefn = None # Help prevent cycles.
670
675
671
676
672 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
677 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
673 """Rolls back the transaction contained in the given file
678 """Rolls back the transaction contained in the given file
674
679
675 Reads the entries in the specified file, and the corresponding
680 Reads the entries in the specified file, and the corresponding
676 '*.backupfiles' file, to recover from an incomplete transaction.
681 '*.backupfiles' file, to recover from an incomplete transaction.
677
682
678 * `file`: a file containing a list of entries, specifying where
683 * `file`: a file containing a list of entries, specifying where
679 to truncate each file. The file should contain a list of
684 to truncate each file. The file should contain a list of
680 file\0offset pairs, delimited by newlines. The corresponding
685 file\0offset pairs, delimited by newlines. The corresponding
681 '*.backupfiles' file should contain a list of file\0backupfile
686 '*.backupfiles' file should contain a list of file\0backupfile
682 pairs, delimited by \0.
687 pairs, delimited by \0.
683
688
684 `checkambigfiles` is a set of (path, vfs-location) tuples,
689 `checkambigfiles` is a set of (path, vfs-location) tuples,
685 which determine whether file stat ambiguity should be avoided at
690 which determine whether file stat ambiguity should be avoided at
686 restoring corresponded files.
691 restoring corresponded files.
687 """
692 """
688 entries = []
693 entries = []
689 backupentries = []
694 backupentries = []
690
695
691 fp = opener.open(file)
696 fp = opener.open(file)
692 lines = fp.readlines()
697 lines = fp.readlines()
693 fp.close()
698 fp.close()
694 for l in lines:
699 for l in lines:
695 try:
700 try:
696 f, o = l.split(b'\0')
701 f, o = l.split(b'\0')
697 entries.append((f, int(o)))
702 entries.append((f, int(o)))
698 except ValueError:
703 except ValueError:
699 report(
704 report(
700 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
705 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
701 )
706 )
702
707
703 backupjournal = b"%s.backupfiles" % file
708 backupjournal = b"%s.backupfiles" % file
704 if opener.exists(backupjournal):
709 if opener.exists(backupjournal):
705 fp = opener.open(backupjournal)
710 fp = opener.open(backupjournal)
706 lines = fp.readlines()
711 lines = fp.readlines()
707 if lines:
712 if lines:
708 ver = lines[0][:-1]
713 ver = lines[0][:-1]
709 if ver == (b'%d' % version):
714 if ver == (b'%d' % version):
710 for line in lines[1:]:
715 for line in lines[1:]:
711 if line:
716 if line:
712 # Shave off the trailing newline
717 # Shave off the trailing newline
713 line = line[:-1]
718 line = line[:-1]
714 l, f, b, c = line.split(b'\0')
719 l, f, b, c = line.split(b'\0')
715 backupentries.append((l, f, b, bool(c)))
720 backupentries.append((l, f, b, bool(c)))
716 else:
721 else:
717 report(
722 report(
718 _(
723 _(
719 b"journal was created by a different version of "
724 b"journal was created by a different version of "
720 b"Mercurial\n"
725 b"Mercurial\n"
721 )
726 )
722 )
727 )
723
728
724 _playback(
729 _playback(
725 file,
730 file,
726 report,
731 report,
727 opener,
732 opener,
728 vfsmap,
733 vfsmap,
729 entries,
734 entries,
730 backupentries,
735 backupentries,
731 checkambigfiles=checkambigfiles,
736 checkambigfiles=checkambigfiles,
732 )
737 )
General Comments 0
You need to be logged in to leave comments. Login now