##// END OF EJS Templates
transaction: split new files into a separate set...
Joerg Sonnenberger -
r46476:fae02ffc default draft
parent child Browse files
Show More
@@ -1,544 +1,545 b''
1 # repair.py - functions for repository repair for mercurial
1 # repair.py - functions for repository repair for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 # Copyright 2007 Matt Mackall
4 # Copyright 2007 Matt Mackall
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import errno
11 import errno
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 hex,
15 hex,
16 short,
16 short,
17 )
17 )
18 from . import (
18 from . import (
19 bundle2,
19 bundle2,
20 changegroup,
20 changegroup,
21 discovery,
21 discovery,
22 error,
22 error,
23 exchange,
23 exchange,
24 obsolete,
24 obsolete,
25 obsutil,
25 obsutil,
26 pathutil,
26 pathutil,
27 phases,
27 phases,
28 pycompat,
28 pycompat,
29 requirements,
29 requirements,
30 scmutil,
30 scmutil,
31 util,
31 util,
32 )
32 )
33 from .utils import (
33 from .utils import (
34 hashutil,
34 hashutil,
35 stringutil,
35 stringutil,
36 )
36 )
37
37
38
38
39 def backupbundle(
39 def backupbundle(
40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
40 repo, bases, heads, node, suffix, compress=True, obsolescence=True
41 ):
41 ):
42 """create a bundle with the specified revisions as a backup"""
42 """create a bundle with the specified revisions as a backup"""
43
43
44 backupdir = b"strip-backup"
44 backupdir = b"strip-backup"
45 vfs = repo.vfs
45 vfs = repo.vfs
46 if not vfs.isdir(backupdir):
46 if not vfs.isdir(backupdir):
47 vfs.mkdir(backupdir)
47 vfs.mkdir(backupdir)
48
48
49 # Include a hash of all the nodes in the filename for uniqueness
49 # Include a hash of all the nodes in the filename for uniqueness
50 allcommits = repo.set(b'%ln::%ln', bases, heads)
50 allcommits = repo.set(b'%ln::%ln', bases, heads)
51 allhashes = sorted(c.hex() for c in allcommits)
51 allhashes = sorted(c.hex() for c in allcommits)
52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
52 totalhash = hashutil.sha1(b''.join(allhashes)).digest()
53 name = b"%s/%s-%s-%s.hg" % (
53 name = b"%s/%s-%s-%s.hg" % (
54 backupdir,
54 backupdir,
55 short(node),
55 short(node),
56 hex(totalhash[:4]),
56 hex(totalhash[:4]),
57 suffix,
57 suffix,
58 )
58 )
59
59
60 cgversion = changegroup.localversion(repo)
60 cgversion = changegroup.localversion(repo)
61 comp = None
61 comp = None
62 if cgversion != b'01':
62 if cgversion != b'01':
63 bundletype = b"HG20"
63 bundletype = b"HG20"
64 if compress:
64 if compress:
65 comp = b'BZ'
65 comp = b'BZ'
66 elif compress:
66 elif compress:
67 bundletype = b"HG10BZ"
67 bundletype = b"HG10BZ"
68 else:
68 else:
69 bundletype = b"HG10UN"
69 bundletype = b"HG10UN"
70
70
71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
71 outgoing = discovery.outgoing(repo, missingroots=bases, ancestorsof=heads)
72 contentopts = {
72 contentopts = {
73 b'cg.version': cgversion,
73 b'cg.version': cgversion,
74 b'obsolescence': obsolescence,
74 b'obsolescence': obsolescence,
75 b'phases': True,
75 b'phases': True,
76 }
76 }
77 return bundle2.writenewbundle(
77 return bundle2.writenewbundle(
78 repo.ui,
78 repo.ui,
79 repo,
79 repo,
80 b'strip',
80 b'strip',
81 name,
81 name,
82 bundletype,
82 bundletype,
83 outgoing,
83 outgoing,
84 contentopts,
84 contentopts,
85 vfs,
85 vfs,
86 compression=comp,
86 compression=comp,
87 )
87 )
88
88
89
89
90 def _collectfiles(repo, striprev):
90 def _collectfiles(repo, striprev):
91 """find out the filelogs affected by the strip"""
91 """find out the filelogs affected by the strip"""
92 files = set()
92 files = set()
93
93
94 for x in pycompat.xrange(striprev, len(repo)):
94 for x in pycompat.xrange(striprev, len(repo)):
95 files.update(repo[x].files())
95 files.update(repo[x].files())
96
96
97 return sorted(files)
97 return sorted(files)
98
98
99
99
100 def _collectrevlog(revlog, striprev):
100 def _collectrevlog(revlog, striprev):
101 _, brokenset = revlog.getstrippoint(striprev)
101 _, brokenset = revlog.getstrippoint(striprev)
102 return [revlog.linkrev(r) for r in brokenset]
102 return [revlog.linkrev(r) for r in brokenset]
103
103
104
104
105 def _collectbrokencsets(repo, files, striprev):
105 def _collectbrokencsets(repo, files, striprev):
106 """return the changesets which will be broken by the truncation"""
106 """return the changesets which will be broken by the truncation"""
107 s = set()
107 s = set()
108
108
109 for revlog in manifestrevlogs(repo):
109 for revlog in manifestrevlogs(repo):
110 s.update(_collectrevlog(revlog, striprev))
110 s.update(_collectrevlog(revlog, striprev))
111 for fname in files:
111 for fname in files:
112 s.update(_collectrevlog(repo.file(fname), striprev))
112 s.update(_collectrevlog(repo.file(fname), striprev))
113
113
114 return s
114 return s
115
115
116
116
117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
117 def strip(ui, repo, nodelist, backup=True, topic=b'backup'):
118 # This function requires the caller to lock the repo, but it operates
118 # This function requires the caller to lock the repo, but it operates
119 # within a transaction of its own, and thus requires there to be no current
119 # within a transaction of its own, and thus requires there to be no current
120 # transaction when it is called.
120 # transaction when it is called.
121 if repo.currenttransaction() is not None:
121 if repo.currenttransaction() is not None:
122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
122 raise error.ProgrammingError(b'cannot strip from inside a transaction')
123
123
124 # Simple way to maintain backwards compatibility for this
124 # Simple way to maintain backwards compatibility for this
125 # argument.
125 # argument.
126 if backup in [b'none', b'strip']:
126 if backup in [b'none', b'strip']:
127 backup = False
127 backup = False
128
128
129 repo = repo.unfiltered()
129 repo = repo.unfiltered()
130 repo.destroying()
130 repo.destroying()
131 vfs = repo.vfs
131 vfs = repo.vfs
132 # load bookmark before changelog to avoid side effect from outdated
132 # load bookmark before changelog to avoid side effect from outdated
133 # changelog (see repo._refreshchangelog)
133 # changelog (see repo._refreshchangelog)
134 repo._bookmarks
134 repo._bookmarks
135 cl = repo.changelog
135 cl = repo.changelog
136
136
137 # TODO handle undo of merge sets
137 # TODO handle undo of merge sets
138 if isinstance(nodelist, bytes):
138 if isinstance(nodelist, bytes):
139 nodelist = [nodelist]
139 nodelist = [nodelist]
140 striplist = [cl.rev(node) for node in nodelist]
140 striplist = [cl.rev(node) for node in nodelist]
141 striprev = min(striplist)
141 striprev = min(striplist)
142
142
143 files = _collectfiles(repo, striprev)
143 files = _collectfiles(repo, striprev)
144 saverevs = _collectbrokencsets(repo, files, striprev)
144 saverevs = _collectbrokencsets(repo, files, striprev)
145
145
146 # Some revisions with rev > striprev may not be descendants of striprev.
146 # Some revisions with rev > striprev may not be descendants of striprev.
147 # We have to find these revisions and put them in a bundle, so that
147 # We have to find these revisions and put them in a bundle, so that
148 # we can restore them after the truncations.
148 # we can restore them after the truncations.
149 # To create the bundle we use repo.changegroupsubset which requires
149 # To create the bundle we use repo.changegroupsubset which requires
150 # the list of heads and bases of the set of interesting revisions.
150 # the list of heads and bases of the set of interesting revisions.
151 # (head = revision in the set that has no descendant in the set;
151 # (head = revision in the set that has no descendant in the set;
152 # base = revision in the set that has no ancestor in the set)
152 # base = revision in the set that has no ancestor in the set)
153 tostrip = set(striplist)
153 tostrip = set(striplist)
154 saveheads = set(saverevs)
154 saveheads = set(saverevs)
155 for r in cl.revs(start=striprev + 1):
155 for r in cl.revs(start=striprev + 1):
156 if any(p in tostrip for p in cl.parentrevs(r)):
156 if any(p in tostrip for p in cl.parentrevs(r)):
157 tostrip.add(r)
157 tostrip.add(r)
158
158
159 if r not in tostrip:
159 if r not in tostrip:
160 saverevs.add(r)
160 saverevs.add(r)
161 saveheads.difference_update(cl.parentrevs(r))
161 saveheads.difference_update(cl.parentrevs(r))
162 saveheads.add(r)
162 saveheads.add(r)
163 saveheads = [cl.node(r) for r in saveheads]
163 saveheads = [cl.node(r) for r in saveheads]
164
164
165 # compute base nodes
165 # compute base nodes
166 if saverevs:
166 if saverevs:
167 descendants = set(cl.descendants(saverevs))
167 descendants = set(cl.descendants(saverevs))
168 saverevs.difference_update(descendants)
168 saverevs.difference_update(descendants)
169 savebases = [cl.node(r) for r in saverevs]
169 savebases = [cl.node(r) for r in saverevs]
170 stripbases = [cl.node(r) for r in tostrip]
170 stripbases = [cl.node(r) for r in tostrip]
171
171
172 stripobsidx = obsmarkers = ()
172 stripobsidx = obsmarkers = ()
173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
173 if repo.ui.configbool(b'devel', b'strip-obsmarkers'):
174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
174 obsmarkers = obsutil.exclusivemarkers(repo, stripbases)
175 if obsmarkers:
175 if obsmarkers:
176 stripobsidx = [
176 stripobsidx = [
177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
177 i for i, m in enumerate(repo.obsstore) if m in obsmarkers
178 ]
178 ]
179
179
180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
180 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
181
181
182 backupfile = None
182 backupfile = None
183 node = nodelist[-1]
183 node = nodelist[-1]
184 if backup:
184 if backup:
185 backupfile = _createstripbackup(repo, stripbases, node, topic)
185 backupfile = _createstripbackup(repo, stripbases, node, topic)
186 # create a changegroup for all the branches we need to keep
186 # create a changegroup for all the branches we need to keep
187 tmpbundlefile = None
187 tmpbundlefile = None
188 if saveheads:
188 if saveheads:
189 # do not compress temporary bundle if we remove it from disk later
189 # do not compress temporary bundle if we remove it from disk later
190 #
190 #
191 # We do not include obsolescence, it might re-introduce prune markers
191 # We do not include obsolescence, it might re-introduce prune markers
192 # we are trying to strip. This is harmless since the stripped markers
192 # we are trying to strip. This is harmless since the stripped markers
193 # are already backed up and we did not touched the markers for the
193 # are already backed up and we did not touched the markers for the
194 # saved changesets.
194 # saved changesets.
195 tmpbundlefile = backupbundle(
195 tmpbundlefile = backupbundle(
196 repo,
196 repo,
197 savebases,
197 savebases,
198 saveheads,
198 saveheads,
199 node,
199 node,
200 b'temp',
200 b'temp',
201 compress=False,
201 compress=False,
202 obsolescence=False,
202 obsolescence=False,
203 )
203 )
204
204
205 with ui.uninterruptible():
205 with ui.uninterruptible():
206 try:
206 try:
207 with repo.transaction(b"strip") as tr:
207 with repo.transaction(b"strip") as tr:
208 # TODO this code violates the interface abstraction of the
208 # TODO this code violates the interface abstraction of the
209 # transaction and makes assumptions that file storage is
209 # transaction and makes assumptions that file storage is
210 # using append-only files. We'll need some kind of storage
210 # using append-only files. We'll need some kind of storage
211 # API to handle stripping for us.
211 # API to handle stripping for us.
212 oldfiles = set(tr._offsetmap.keys())
212 oldfiles = set(tr._offsetmap.keys())
213 oldfiles.update(tr._newfiles)
213
214
214 tr.startgroup()
215 tr.startgroup()
215 cl.strip(striprev, tr)
216 cl.strip(striprev, tr)
216 stripmanifest(repo, striprev, tr, files)
217 stripmanifest(repo, striprev, tr, files)
217
218
218 for fn in files:
219 for fn in files:
219 repo.file(fn).strip(striprev, tr)
220 repo.file(fn).strip(striprev, tr)
220 tr.endgroup()
221 tr.endgroup()
221
222
222 entries = tr.readjournal()
223 entries = tr.readjournal()
223
224
224 for file, troffset in entries:
225 for file, troffset in entries:
225 if file in oldfiles:
226 if file in oldfiles:
226 continue
227 continue
227 with repo.svfs(file, b'a', checkambig=True) as fp:
228 with repo.svfs(file, b'a', checkambig=True) as fp:
228 fp.truncate(troffset)
229 fp.truncate(troffset)
229 if troffset == 0:
230 if troffset == 0:
230 repo.store.markremoved(file)
231 repo.store.markremoved(file)
231
232
232 deleteobsmarkers(repo.obsstore, stripobsidx)
233 deleteobsmarkers(repo.obsstore, stripobsidx)
233 del repo.obsstore
234 del repo.obsstore
234 repo.invalidatevolatilesets()
235 repo.invalidatevolatilesets()
235 repo._phasecache.filterunknown(repo)
236 repo._phasecache.filterunknown(repo)
236
237
237 if tmpbundlefile:
238 if tmpbundlefile:
238 ui.note(_(b"adding branch\n"))
239 ui.note(_(b"adding branch\n"))
239 f = vfs.open(tmpbundlefile, b"rb")
240 f = vfs.open(tmpbundlefile, b"rb")
240 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 gen = exchange.readbundle(ui, f, tmpbundlefile, vfs)
241 if not repo.ui.verbose:
242 if not repo.ui.verbose:
242 # silence internal shuffling chatter
243 # silence internal shuffling chatter
243 repo.ui.pushbuffer()
244 repo.ui.pushbuffer()
244 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
245 tmpbundleurl = b'bundle:' + vfs.join(tmpbundlefile)
245 txnname = b'strip'
246 txnname = b'strip'
246 if not isinstance(gen, bundle2.unbundle20):
247 if not isinstance(gen, bundle2.unbundle20):
247 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
248 txnname = b"strip\n%s" % util.hidepassword(tmpbundleurl)
248 with repo.transaction(txnname) as tr:
249 with repo.transaction(txnname) as tr:
249 bundle2.applybundle(
250 bundle2.applybundle(
250 repo, gen, tr, source=b'strip', url=tmpbundleurl
251 repo, gen, tr, source=b'strip', url=tmpbundleurl
251 )
252 )
252 if not repo.ui.verbose:
253 if not repo.ui.verbose:
253 repo.ui.popbuffer()
254 repo.ui.popbuffer()
254 f.close()
255 f.close()
255
256
256 with repo.transaction(b'repair') as tr:
257 with repo.transaction(b'repair') as tr:
257 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
258 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
258 repo._bookmarks.applychanges(repo, tr, bmchanges)
259 repo._bookmarks.applychanges(repo, tr, bmchanges)
259
260
260 # remove undo files
261 # remove undo files
261 for undovfs, undofile in repo.undofiles():
262 for undovfs, undofile in repo.undofiles():
262 try:
263 try:
263 undovfs.unlink(undofile)
264 undovfs.unlink(undofile)
264 except OSError as e:
265 except OSError as e:
265 if e.errno != errno.ENOENT:
266 if e.errno != errno.ENOENT:
266 ui.warn(
267 ui.warn(
267 _(b'error removing %s: %s\n')
268 _(b'error removing %s: %s\n')
268 % (
269 % (
269 undovfs.join(undofile),
270 undovfs.join(undofile),
270 stringutil.forcebytestr(e),
271 stringutil.forcebytestr(e),
271 )
272 )
272 )
273 )
273
274
274 except: # re-raises
275 except: # re-raises
275 if backupfile:
276 if backupfile:
276 ui.warn(
277 ui.warn(
277 _(b"strip failed, backup bundle stored in '%s'\n")
278 _(b"strip failed, backup bundle stored in '%s'\n")
278 % vfs.join(backupfile)
279 % vfs.join(backupfile)
279 )
280 )
280 if tmpbundlefile:
281 if tmpbundlefile:
281 ui.warn(
282 ui.warn(
282 _(b"strip failed, unrecovered changes stored in '%s'\n")
283 _(b"strip failed, unrecovered changes stored in '%s'\n")
283 % vfs.join(tmpbundlefile)
284 % vfs.join(tmpbundlefile)
284 )
285 )
285 ui.warn(
286 ui.warn(
286 _(
287 _(
287 b"(fix the problem, then recover the changesets with "
288 b"(fix the problem, then recover the changesets with "
288 b"\"hg unbundle '%s'\")\n"
289 b"\"hg unbundle '%s'\")\n"
289 )
290 )
290 % vfs.join(tmpbundlefile)
291 % vfs.join(tmpbundlefile)
291 )
292 )
292 raise
293 raise
293 else:
294 else:
294 if tmpbundlefile:
295 if tmpbundlefile:
295 # Remove temporary bundle only if there were no exceptions
296 # Remove temporary bundle only if there were no exceptions
296 vfs.unlink(tmpbundlefile)
297 vfs.unlink(tmpbundlefile)
297
298
298 repo.destroyed()
299 repo.destroyed()
299 # return the backup file path (or None if 'backup' was False) so
300 # return the backup file path (or None if 'backup' was False) so
300 # extensions can use it
301 # extensions can use it
301 return backupfile
302 return backupfile
302
303
303
304
304 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
305 def softstrip(ui, repo, nodelist, backup=True, topic=b'backup'):
305 """perform a "soft" strip using the archived phase"""
306 """perform a "soft" strip using the archived phase"""
306 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
307 tostrip = [c.node() for c in repo.set(b'sort(%ln::)', nodelist)]
307 if not tostrip:
308 if not tostrip:
308 return None
309 return None
309
310
310 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
311 newbmtarget, updatebm = _bookmarkmovements(repo, tostrip)
311 if backup:
312 if backup:
312 node = tostrip[0]
313 node = tostrip[0]
313 backupfile = _createstripbackup(repo, tostrip, node, topic)
314 backupfile = _createstripbackup(repo, tostrip, node, topic)
314
315
315 with repo.transaction(b'strip') as tr:
316 with repo.transaction(b'strip') as tr:
316 phases.retractboundary(repo, tr, phases.archived, tostrip)
317 phases.retractboundary(repo, tr, phases.archived, tostrip)
317 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
318 bmchanges = [(m, repo[newbmtarget].node()) for m in updatebm]
318 repo._bookmarks.applychanges(repo, tr, bmchanges)
319 repo._bookmarks.applychanges(repo, tr, bmchanges)
319 return backupfile
320 return backupfile
320
321
321
322
322 def _bookmarkmovements(repo, tostrip):
323 def _bookmarkmovements(repo, tostrip):
323 # compute necessary bookmark movement
324 # compute necessary bookmark movement
324 bm = repo._bookmarks
325 bm = repo._bookmarks
325 updatebm = []
326 updatebm = []
326 for m in bm:
327 for m in bm:
327 rev = repo[bm[m]].rev()
328 rev = repo[bm[m]].rev()
328 if rev in tostrip:
329 if rev in tostrip:
329 updatebm.append(m)
330 updatebm.append(m)
330 newbmtarget = None
331 newbmtarget = None
331 # If we need to move bookmarks, compute bookmark
332 # If we need to move bookmarks, compute bookmark
332 # targets. Otherwise we can skip doing this logic.
333 # targets. Otherwise we can skip doing this logic.
333 if updatebm:
334 if updatebm:
334 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
335 # For a set s, max(parents(s) - s) is the same as max(heads(::s - s)),
335 # but is much faster
336 # but is much faster
336 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
337 newbmtarget = repo.revs(b'max(parents(%ld) - (%ld))', tostrip, tostrip)
337 if newbmtarget:
338 if newbmtarget:
338 newbmtarget = repo[newbmtarget.first()].node()
339 newbmtarget = repo[newbmtarget.first()].node()
339 else:
340 else:
340 newbmtarget = b'.'
341 newbmtarget = b'.'
341 return newbmtarget, updatebm
342 return newbmtarget, updatebm
342
343
343
344
344 def _createstripbackup(repo, stripbases, node, topic):
345 def _createstripbackup(repo, stripbases, node, topic):
345 # backup the changeset we are about to strip
346 # backup the changeset we are about to strip
346 vfs = repo.vfs
347 vfs = repo.vfs
347 cl = repo.changelog
348 cl = repo.changelog
348 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
349 backupfile = backupbundle(repo, stripbases, cl.heads(), node, topic)
349 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
350 repo.ui.status(_(b"saved backup bundle to %s\n") % vfs.join(backupfile))
350 repo.ui.log(
351 repo.ui.log(
351 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
352 b"backupbundle", b"saved backup bundle to %s\n", vfs.join(backupfile)
352 )
353 )
353 return backupfile
354 return backupfile
354
355
355
356
356 def safestriproots(ui, repo, nodes):
357 def safestriproots(ui, repo, nodes):
357 """return list of roots of nodes where descendants are covered by nodes"""
358 """return list of roots of nodes where descendants are covered by nodes"""
358 torev = repo.unfiltered().changelog.rev
359 torev = repo.unfiltered().changelog.rev
359 revs = {torev(n) for n in nodes}
360 revs = {torev(n) for n in nodes}
360 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
361 # tostrip = wanted - unsafe = wanted - ancestors(orphaned)
361 # orphaned = affected - wanted
362 # orphaned = affected - wanted
362 # affected = descendants(roots(wanted))
363 # affected = descendants(roots(wanted))
363 # wanted = revs
364 # wanted = revs
364 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
365 revset = b'%ld - ( ::( (roots(%ld):: and not _phase(%s)) -%ld) )'
365 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
366 tostrip = set(repo.revs(revset, revs, revs, phases.internal, revs))
366 notstrip = revs - tostrip
367 notstrip = revs - tostrip
367 if notstrip:
368 if notstrip:
368 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
369 nodestr = b', '.join(sorted(short(repo[n].node()) for n in notstrip))
369 ui.warn(
370 ui.warn(
370 _(b'warning: orphaned descendants detected, not stripping %s\n')
371 _(b'warning: orphaned descendants detected, not stripping %s\n')
371 % nodestr
372 % nodestr
372 )
373 )
373 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
374 return [c.node() for c in repo.set(b'roots(%ld)', tostrip)]
374
375
375
376
376 class stripcallback(object):
377 class stripcallback(object):
377 """used as a transaction postclose callback"""
378 """used as a transaction postclose callback"""
378
379
379 def __init__(self, ui, repo, backup, topic):
380 def __init__(self, ui, repo, backup, topic):
380 self.ui = ui
381 self.ui = ui
381 self.repo = repo
382 self.repo = repo
382 self.backup = backup
383 self.backup = backup
383 self.topic = topic or b'backup'
384 self.topic = topic or b'backup'
384 self.nodelist = []
385 self.nodelist = []
385
386
386 def addnodes(self, nodes):
387 def addnodes(self, nodes):
387 self.nodelist.extend(nodes)
388 self.nodelist.extend(nodes)
388
389
389 def __call__(self, tr):
390 def __call__(self, tr):
390 roots = safestriproots(self.ui, self.repo, self.nodelist)
391 roots = safestriproots(self.ui, self.repo, self.nodelist)
391 if roots:
392 if roots:
392 strip(self.ui, self.repo, roots, self.backup, self.topic)
393 strip(self.ui, self.repo, roots, self.backup, self.topic)
393
394
394
395
395 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
396 def delayedstrip(ui, repo, nodelist, topic=None, backup=True):
396 """like strip, but works inside transaction and won't strip irreverent revs
397 """like strip, but works inside transaction and won't strip irreverent revs
397
398
398 nodelist must explicitly contain all descendants. Otherwise a warning will
399 nodelist must explicitly contain all descendants. Otherwise a warning will
399 be printed that some nodes are not stripped.
400 be printed that some nodes are not stripped.
400
401
401 Will do a backup if `backup` is True. The last non-None "topic" will be
402 Will do a backup if `backup` is True. The last non-None "topic" will be
402 used as the backup topic name. The default backup topic name is "backup".
403 used as the backup topic name. The default backup topic name is "backup".
403 """
404 """
404 tr = repo.currenttransaction()
405 tr = repo.currenttransaction()
405 if not tr:
406 if not tr:
406 nodes = safestriproots(ui, repo, nodelist)
407 nodes = safestriproots(ui, repo, nodelist)
407 return strip(ui, repo, nodes, backup=backup, topic=topic)
408 return strip(ui, repo, nodes, backup=backup, topic=topic)
408 # transaction postclose callbacks are called in alphabet order.
409 # transaction postclose callbacks are called in alphabet order.
409 # use '\xff' as prefix so we are likely to be called last.
410 # use '\xff' as prefix so we are likely to be called last.
410 callback = tr.getpostclose(b'\xffstrip')
411 callback = tr.getpostclose(b'\xffstrip')
411 if callback is None:
412 if callback is None:
412 callback = stripcallback(ui, repo, backup=backup, topic=topic)
413 callback = stripcallback(ui, repo, backup=backup, topic=topic)
413 tr.addpostclose(b'\xffstrip', callback)
414 tr.addpostclose(b'\xffstrip', callback)
414 if topic:
415 if topic:
415 callback.topic = topic
416 callback.topic = topic
416 callback.addnodes(nodelist)
417 callback.addnodes(nodelist)
417
418
418
419
419 def stripmanifest(repo, striprev, tr, files):
420 def stripmanifest(repo, striprev, tr, files):
420 for revlog in manifestrevlogs(repo):
421 for revlog in manifestrevlogs(repo):
421 revlog.strip(striprev, tr)
422 revlog.strip(striprev, tr)
422
423
423
424
424 def manifestrevlogs(repo):
425 def manifestrevlogs(repo):
425 yield repo.manifestlog.getstorage(b'')
426 yield repo.manifestlog.getstorage(b'')
426 if scmutil.istreemanifest(repo):
427 if scmutil.istreemanifest(repo):
427 # This logic is safe if treemanifest isn't enabled, but also
428 # This logic is safe if treemanifest isn't enabled, but also
428 # pointless, so we skip it if treemanifest isn't enabled.
429 # pointless, so we skip it if treemanifest isn't enabled.
429 for unencoded, encoded, size in repo.store.datafiles():
430 for unencoded, encoded, size in repo.store.datafiles():
430 if unencoded.startswith(b'meta/') and unencoded.endswith(
431 if unencoded.startswith(b'meta/') and unencoded.endswith(
431 b'00manifest.i'
432 b'00manifest.i'
432 ):
433 ):
433 dir = unencoded[5:-12]
434 dir = unencoded[5:-12]
434 yield repo.manifestlog.getstorage(dir)
435 yield repo.manifestlog.getstorage(dir)
435
436
436
437
437 def rebuildfncache(ui, repo):
438 def rebuildfncache(ui, repo):
438 """Rebuilds the fncache file from repo history.
439 """Rebuilds the fncache file from repo history.
439
440
440 Missing entries will be added. Extra entries will be removed.
441 Missing entries will be added. Extra entries will be removed.
441 """
442 """
442 repo = repo.unfiltered()
443 repo = repo.unfiltered()
443
444
444 if b'fncache' not in repo.requirements:
445 if b'fncache' not in repo.requirements:
445 ui.warn(
446 ui.warn(
446 _(
447 _(
447 b'(not rebuilding fncache because repository does not '
448 b'(not rebuilding fncache because repository does not '
448 b'support fncache)\n'
449 b'support fncache)\n'
449 )
450 )
450 )
451 )
451 return
452 return
452
453
453 with repo.lock():
454 with repo.lock():
454 fnc = repo.store.fncache
455 fnc = repo.store.fncache
455 fnc.ensureloaded(warn=ui.warn)
456 fnc.ensureloaded(warn=ui.warn)
456
457
457 oldentries = set(fnc.entries)
458 oldentries = set(fnc.entries)
458 newentries = set()
459 newentries = set()
459 seenfiles = set()
460 seenfiles = set()
460
461
461 progress = ui.makeprogress(
462 progress = ui.makeprogress(
462 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
463 _(b'rebuilding'), unit=_(b'changesets'), total=len(repo)
463 )
464 )
464 for rev in repo:
465 for rev in repo:
465 progress.update(rev)
466 progress.update(rev)
466
467
467 ctx = repo[rev]
468 ctx = repo[rev]
468 for f in ctx.files():
469 for f in ctx.files():
469 # This is to minimize I/O.
470 # This is to minimize I/O.
470 if f in seenfiles:
471 if f in seenfiles:
471 continue
472 continue
472 seenfiles.add(f)
473 seenfiles.add(f)
473
474
474 i = b'data/%s.i' % f
475 i = b'data/%s.i' % f
475 d = b'data/%s.d' % f
476 d = b'data/%s.d' % f
476
477
477 if repo.store._exists(i):
478 if repo.store._exists(i):
478 newentries.add(i)
479 newentries.add(i)
479 if repo.store._exists(d):
480 if repo.store._exists(d):
480 newentries.add(d)
481 newentries.add(d)
481
482
482 progress.complete()
483 progress.complete()
483
484
484 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
485 if requirements.TREEMANIFEST_REQUIREMENT in repo.requirements:
485 # This logic is safe if treemanifest isn't enabled, but also
486 # This logic is safe if treemanifest isn't enabled, but also
486 # pointless, so we skip it if treemanifest isn't enabled.
487 # pointless, so we skip it if treemanifest isn't enabled.
487 for dir in pathutil.dirs(seenfiles):
488 for dir in pathutil.dirs(seenfiles):
488 i = b'meta/%s/00manifest.i' % dir
489 i = b'meta/%s/00manifest.i' % dir
489 d = b'meta/%s/00manifest.d' % dir
490 d = b'meta/%s/00manifest.d' % dir
490
491
491 if repo.store._exists(i):
492 if repo.store._exists(i):
492 newentries.add(i)
493 newentries.add(i)
493 if repo.store._exists(d):
494 if repo.store._exists(d):
494 newentries.add(d)
495 newentries.add(d)
495
496
496 addcount = len(newentries - oldentries)
497 addcount = len(newentries - oldentries)
497 removecount = len(oldentries - newentries)
498 removecount = len(oldentries - newentries)
498 for p in sorted(oldentries - newentries):
499 for p in sorted(oldentries - newentries):
499 ui.write(_(b'removing %s\n') % p)
500 ui.write(_(b'removing %s\n') % p)
500 for p in sorted(newentries - oldentries):
501 for p in sorted(newentries - oldentries):
501 ui.write(_(b'adding %s\n') % p)
502 ui.write(_(b'adding %s\n') % p)
502
503
503 if addcount or removecount:
504 if addcount or removecount:
504 ui.write(
505 ui.write(
505 _(b'%d items added, %d removed from fncache\n')
506 _(b'%d items added, %d removed from fncache\n')
506 % (addcount, removecount)
507 % (addcount, removecount)
507 )
508 )
508 fnc.entries = newentries
509 fnc.entries = newentries
509 fnc._dirty = True
510 fnc._dirty = True
510
511
511 with repo.transaction(b'fncache') as tr:
512 with repo.transaction(b'fncache') as tr:
512 fnc.write(tr)
513 fnc.write(tr)
513 else:
514 else:
514 ui.write(_(b'fncache already up to date\n'))
515 ui.write(_(b'fncache already up to date\n'))
515
516
516
517
517 def deleteobsmarkers(obsstore, indices):
518 def deleteobsmarkers(obsstore, indices):
518 """Delete some obsmarkers from obsstore and return how many were deleted
519 """Delete some obsmarkers from obsstore and return how many were deleted
519
520
520 'indices' is a list of ints which are the indices
521 'indices' is a list of ints which are the indices
521 of the markers to be deleted.
522 of the markers to be deleted.
522
523
523 Every invocation of this function completely rewrites the obsstore file,
524 Every invocation of this function completely rewrites the obsstore file,
524 skipping the markers we want to be removed. The new temporary file is
525 skipping the markers we want to be removed. The new temporary file is
525 created, remaining markers are written there and on .close() this file
526 created, remaining markers are written there and on .close() this file
526 gets atomically renamed to obsstore, thus guaranteeing consistency."""
527 gets atomically renamed to obsstore, thus guaranteeing consistency."""
527 if not indices:
528 if not indices:
528 # we don't want to rewrite the obsstore with the same content
529 # we don't want to rewrite the obsstore with the same content
529 return
530 return
530
531
531 left = []
532 left = []
532 current = obsstore._all
533 current = obsstore._all
533 n = 0
534 n = 0
534 for i, m in enumerate(current):
535 for i, m in enumerate(current):
535 if i in indices:
536 if i in indices:
536 n += 1
537 n += 1
537 continue
538 continue
538 left.append(m)
539 left.append(m)
539
540
540 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
541 newobsstorefile = obsstore.svfs(b'obsstore', b'w', atomictemp=True)
541 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
542 for bytes in obsolete.encodemarkers(left, True, obsstore._version):
542 newobsstorefile.write(bytes)
543 newobsstorefile.write(bytes)
543 newobsstorefile.close()
544 newobsstorefile.close()
544 return n
545 return n
@@ -1,737 +1,765 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
30 # the changelog having been written).
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
31 postfinalizegenerators = {b'bookmarks', b'dirstate'}
32
32
33 GEN_GROUP_ALL = b'all'
33 GEN_GROUP_ALL = b'all'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
34 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
35 GEN_GROUP_POST_FINALIZE = b'postfinalize'
36
36
37
37
38 def active(func):
38 def active(func):
39 def _active(self, *args, **kwds):
39 def _active(self, *args, **kwds):
40 if self._count == 0:
40 if self._count == 0:
41 raise error.ProgrammingError(
41 raise error.ProgrammingError(
42 b'cannot use transaction when it is already committed/aborted'
42 b'cannot use transaction when it is already committed/aborted'
43 )
43 )
44 return func(self, *args, **kwds)
44 return func(self, *args, **kwds)
45
45
46 return _active
46 return _active
47
47
48
48
49 def _playback(
49 def _playback(
50 journal,
50 journal,
51 report,
51 report,
52 opener,
52 opener,
53 vfsmap,
53 vfsmap,
54 entries,
54 entries,
55 backupentries,
55 backupentries,
56 unlink=True,
56 unlink=True,
57 checkambigfiles=None,
57 checkambigfiles=None,
58 ):
58 ):
59 for f, o in entries:
59 for f, o in entries:
60 if o or not unlink:
60 if o or not unlink:
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
61 checkambig = checkambigfiles and (f, b'') in checkambigfiles
62 try:
62 try:
63 fp = opener(f, b'a', checkambig=checkambig)
63 fp = opener(f, b'a', checkambig=checkambig)
64 if fp.tell() < o:
64 if fp.tell() < o:
65 raise error.Abort(
65 raise error.Abort(
66 _(
66 _(
67 b"attempted to truncate %s to %d bytes, but it was "
67 b"attempted to truncate %s to %d bytes, but it was "
68 b"already %d bytes\n"
68 b"already %d bytes\n"
69 )
69 )
70 % (f, o, fp.tell())
70 % (f, o, fp.tell())
71 )
71 )
72 fp.truncate(o)
72 fp.truncate(o)
73 fp.close()
73 fp.close()
74 except IOError:
74 except IOError:
75 report(_(b"failed to truncate %s\n") % f)
75 report(_(b"failed to truncate %s\n") % f)
76 raise
76 raise
77 else:
77 else:
78 try:
78 try:
79 opener.unlink(f)
79 opener.unlink(f)
80 except (IOError, OSError) as inst:
80 except (IOError, OSError) as inst:
81 if inst.errno != errno.ENOENT:
81 if inst.errno != errno.ENOENT:
82 raise
82 raise
83
83
84 backupfiles = []
84 backupfiles = []
85 for l, f, b, c in backupentries:
85 for l, f, b, c in backupentries:
86 if l not in vfsmap and c:
86 if l not in vfsmap and c:
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
87 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
88 vfs = vfsmap[l]
88 vfs = vfsmap[l]
89 try:
89 try:
90 if f and b:
90 if f and b:
91 filepath = vfs.join(f)
91 filepath = vfs.join(f)
92 backuppath = vfs.join(b)
92 backuppath = vfs.join(b)
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
93 checkambig = checkambigfiles and (f, l) in checkambigfiles
94 try:
94 try:
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
95 util.copyfile(backuppath, filepath, checkambig=checkambig)
96 backupfiles.append(b)
96 backupfiles.append(b)
97 except IOError:
97 except IOError:
98 report(_(b"failed to recover %s\n") % f)
98 report(_(b"failed to recover %s\n") % f)
99 else:
99 else:
100 target = f or b
100 target = f or b
101 try:
101 try:
102 vfs.unlink(target)
102 vfs.unlink(target)
103 except (IOError, OSError) as inst:
103 except (IOError, OSError) as inst:
104 if inst.errno != errno.ENOENT:
104 if inst.errno != errno.ENOENT:
105 raise
105 raise
106 except (IOError, OSError, error.Abort):
106 except (IOError, OSError, error.Abort):
107 if not c:
107 if not c:
108 raise
108 raise
109
109
110 backuppath = b"%s.backupfiles" % journal
110 backuppath = b"%s.backupfiles" % journal
111 if opener.exists(backuppath):
111 if opener.exists(backuppath):
112 opener.unlink(backuppath)
112 opener.unlink(backuppath)
113 opener.unlink(journal)
113 opener.unlink(journal)
114 try:
114 try:
115 for f in backupfiles:
115 for f in backupfiles:
116 if opener.exists(f):
116 if opener.exists(f):
117 opener.unlink(f)
117 opener.unlink(f)
118 except (IOError, OSError, error.Abort):
118 except (IOError, OSError, error.Abort):
119 # only pure backup file remains, it is sage to ignore any error
119 # only pure backup file remains, it is sage to ignore any error
120 pass
120 pass
121
121
122
122
123 class transaction(util.transactional):
123 class transaction(util.transactional):
124 def __init__(
124 def __init__(
125 self,
125 self,
126 report,
126 report,
127 opener,
127 opener,
128 vfsmap,
128 vfsmap,
129 journalname,
129 journalname,
130 undoname=None,
130 undoname=None,
131 after=None,
131 after=None,
132 createmode=None,
132 createmode=None,
133 validator=None,
133 validator=None,
134 releasefn=None,
134 releasefn=None,
135 checkambigfiles=None,
135 checkambigfiles=None,
136 name='<unnamed>',
136 name='<unnamed>',
137 ):
137 ):
138 """Begin a new transaction
138 """Begin a new transaction
139
139
140 Begins a new transaction that allows rolling back writes in the event of
140 Begins a new transaction that allows rolling back writes in the event of
141 an exception.
141 an exception.
142
142
143 * `after`: called after the transaction has been committed
143 * `after`: called after the transaction has been committed
144 * `createmode`: the mode of the journal file that will be created
144 * `createmode`: the mode of the journal file that will be created
145 * `releasefn`: called after releasing (with transaction and result)
145 * `releasefn`: called after releasing (with transaction and result)
146
146
147 `checkambigfiles` is a set of (path, vfs-location) tuples,
147 `checkambigfiles` is a set of (path, vfs-location) tuples,
148 which determine whether file stat ambiguity should be avoided
148 which determine whether file stat ambiguity should be avoided
149 for corresponded files.
149 for corresponded files.
150 """
150 """
151 self._count = 1
151 self._count = 1
152 self._usages = 1
152 self._usages = 1
153 self._report = report
153 self._report = report
154 # a vfs to the store content
154 # a vfs to the store content
155 self._opener = opener
155 self._opener = opener
156 # a map to access file in various {location -> vfs}
156 # a map to access file in various {location -> vfs}
157 vfsmap = vfsmap.copy()
157 vfsmap = vfsmap.copy()
158 vfsmap[b''] = opener # set default value
158 vfsmap[b''] = opener # set default value
159 self._vfsmap = vfsmap
159 self._vfsmap = vfsmap
160 self._after = after
160 self._after = after
161 self._offsetmap = {}
161 self._offsetmap = {}
162 self._newfiles = set()
162 self._journal = journalname
163 self._journal = journalname
163 self._undoname = undoname
164 self._undoname = undoname
164 self._queue = []
165 self._queue = []
165 # A callback to do something just after releasing transaction.
166 # A callback to do something just after releasing transaction.
166 if releasefn is None:
167 if releasefn is None:
167 releasefn = lambda tr, success: None
168 releasefn = lambda tr, success: None
168 self._releasefn = releasefn
169 self._releasefn = releasefn
169
170
170 self._checkambigfiles = set()
171 self._checkambigfiles = set()
171 if checkambigfiles:
172 if checkambigfiles:
172 self._checkambigfiles.update(checkambigfiles)
173 self._checkambigfiles.update(checkambigfiles)
173
174
174 self._names = [name]
175 self._names = [name]
175
176
176 # A dict dedicated to precisely tracking the changes introduced in the
177 # A dict dedicated to precisely tracking the changes introduced in the
177 # transaction.
178 # transaction.
178 self.changes = {}
179 self.changes = {}
179
180
180 # a dict of arguments to be passed to hooks
181 # a dict of arguments to be passed to hooks
181 self.hookargs = {}
182 self.hookargs = {}
182 self._file = opener.open(self._journal, b"w+")
183 self._file = opener.open(self._journal, b"w+")
183
184
184 # a list of ('location', 'path', 'backuppath', cache) entries.
185 # a list of ('location', 'path', 'backuppath', cache) entries.
185 # - if 'backuppath' is empty, no file existed at backup time
186 # - if 'backuppath' is empty, no file existed at backup time
186 # - if 'path' is empty, this is a temporary transaction file
187 # - if 'path' is empty, this is a temporary transaction file
187 # - if 'location' is not empty, the path is outside main opener reach.
188 # - if 'location' is not empty, the path is outside main opener reach.
188 # use 'location' value as a key in a vfsmap to find the right 'vfs'
189 # use 'location' value as a key in a vfsmap to find the right 'vfs'
189 # (cache is currently unused)
190 # (cache is currently unused)
190 self._backupentries = []
191 self._backupentries = []
191 self._backupmap = {}
192 self._backupmap = {}
192 self._backupjournal = b"%s.backupfiles" % self._journal
193 self._backupjournal = b"%s.backupfiles" % self._journal
193 self._backupsfile = opener.open(self._backupjournal, b'w')
194 self._backupsfile = opener.open(self._backupjournal, b'w')
194 self._backupsfile.write(b'%d\n' % version)
195 self._backupsfile.write(b'%d\n' % version)
195
196
196 if createmode is not None:
197 if createmode is not None:
197 opener.chmod(self._journal, createmode & 0o666)
198 opener.chmod(self._journal, createmode & 0o666)
198 opener.chmod(self._backupjournal, createmode & 0o666)
199 opener.chmod(self._backupjournal, createmode & 0o666)
199
200
200 # hold file generations to be performed on commit
201 # hold file generations to be performed on commit
201 self._filegenerators = {}
202 self._filegenerators = {}
202 # hold callback to write pending data for hooks
203 # hold callback to write pending data for hooks
203 self._pendingcallback = {}
204 self._pendingcallback = {}
204 # True is any pending data have been written ever
205 # True is any pending data have been written ever
205 self._anypending = False
206 self._anypending = False
206 # holds callback to call when writing the transaction
207 # holds callback to call when writing the transaction
207 self._finalizecallback = {}
208 self._finalizecallback = {}
208 # holds callback to call when validating the transaction
209 # holds callback to call when validating the transaction
209 # should raise exception if anything is wrong
210 # should raise exception if anything is wrong
210 self._validatecallback = {}
211 self._validatecallback = {}
211 if validator is not None:
212 if validator is not None:
212 self._validatecallback[b'001-userhooks'] = validator
213 self._validatecallback[b'001-userhooks'] = validator
213 # hold callback for post transaction close
214 # hold callback for post transaction close
214 self._postclosecallback = {}
215 self._postclosecallback = {}
215 # holds callbacks to call during abort
216 # holds callbacks to call during abort
216 self._abortcallback = {}
217 self._abortcallback = {}
217
218
218 def __repr__(self):
219 def __repr__(self):
219 name = '/'.join(self._names)
220 name = '/'.join(self._names)
220 return '<transaction name=%s, count=%d, usages=%d>' % (
221 return '<transaction name=%s, count=%d, usages=%d>' % (
221 name,
222 name,
222 self._count,
223 self._count,
223 self._usages,
224 self._usages,
224 )
225 )
225
226
226 def __del__(self):
227 def __del__(self):
227 if self._journal:
228 if self._journal:
228 self._abort()
229 self._abort()
229
230
230 @active
231 @active
231 def startgroup(self):
232 def startgroup(self):
232 """delay registration of file entry
233 """delay registration of file entry
233
234
234 This is used by strip to delay vision of strip offset. The transaction
235 This is used by strip to delay vision of strip offset. The transaction
235 sees either none or all of the strip actions to be done."""
236 sees either none or all of the strip actions to be done."""
236 self._queue.append([])
237 self._queue.append([])
237
238
238 @active
239 @active
239 def endgroup(self):
240 def endgroup(self):
240 """apply delayed registration of file entry.
241 """apply delayed registration of file entry.
241
242
242 This is used by strip to delay vision of strip offset. The transaction
243 This is used by strip to delay vision of strip offset. The transaction
243 sees either none or all of the strip actions to be done."""
244 sees either none or all of the strip actions to be done."""
244 q = self._queue.pop()
245 q = self._queue.pop()
245 for f, o in q:
246 for f, o in q:
246 self._addentry(f, o)
247 self._addentry(f, o)
247
248
248 @active
249 @active
249 def add(self, file, offset):
250 def add(self, file, offset):
250 """record the state of an append-only file before update"""
251 """record the state of an append-only file before update"""
251 if file in self._offsetmap or file in self._backupmap:
252 if (
253 file in self._newfiles
254 or file in self._offsetmap
255 or file in self._backupmap
256 ):
252 return
257 return
253 if self._queue:
258 if self._queue:
254 self._queue[-1].append((file, offset))
259 self._queue[-1].append((file, offset))
255 return
260 return
256
261
257 self._addentry(file, offset)
262 self._addentry(file, offset)
258
263
259 def _addentry(self, file, offset):
264 def _addentry(self, file, offset):
260 """add a append-only entry to memory and on-disk state"""
265 """add a append-only entry to memory and on-disk state"""
261 if file in self._offsetmap or file in self._backupmap:
266 if (
267 file in self._newfiles
268 or file in self._offsetmap
269 or file in self._backupmap
270 ):
262 return
271 return
263 self._offsetmap[file] = offset
272 if offset:
273 self._offsetmap[file] = offset
274 else:
275 self._newfiles.add(file)
264 # add enough data to the journal to do the truncate
276 # add enough data to the journal to do the truncate
265 self._file.write(b"%s\0%d\n" % (file, offset))
277 self._file.write(b"%s\0%d\n" % (file, offset))
266 self._file.flush()
278 self._file.flush()
267
279
268 @active
280 @active
269 def addbackup(self, file, hardlink=True, location=b''):
281 def addbackup(self, file, hardlink=True, location=b''):
270 """Adds a backup of the file to the transaction
282 """Adds a backup of the file to the transaction
271
283
272 Calling addbackup() creates a hardlink backup of the specified file
284 Calling addbackup() creates a hardlink backup of the specified file
273 that is used to recover the file in the event of the transaction
285 that is used to recover the file in the event of the transaction
274 aborting.
286 aborting.
275
287
276 * `file`: the file path, relative to .hg/store
288 * `file`: the file path, relative to .hg/store
277 * `hardlink`: use a hardlink to quickly create the backup
289 * `hardlink`: use a hardlink to quickly create the backup
278 """
290 """
279 if self._queue:
291 if self._queue:
280 msg = b'cannot use transaction.addbackup inside "group"'
292 msg = b'cannot use transaction.addbackup inside "group"'
281 raise error.ProgrammingError(msg)
293 raise error.ProgrammingError(msg)
282
294
283 if file in self._offsetmap or file in self._backupmap:
295 if (
296 file in self._newfiles
297 or file in self._offsetmap
298 or file in self._backupmap
299 ):
284 return
300 return
285 vfs = self._vfsmap[location]
301 vfs = self._vfsmap[location]
286 dirname, filename = vfs.split(file)
302 dirname, filename = vfs.split(file)
287 backupfilename = b"%s.backup.%s" % (self._journal, filename)
303 backupfilename = b"%s.backup.%s" % (self._journal, filename)
288 backupfile = vfs.reljoin(dirname, backupfilename)
304 backupfile = vfs.reljoin(dirname, backupfilename)
289 if vfs.exists(file):
305 if vfs.exists(file):
290 filepath = vfs.join(file)
306 filepath = vfs.join(file)
291 backuppath = vfs.join(backupfile)
307 backuppath = vfs.join(backupfile)
292 util.copyfile(filepath, backuppath, hardlink=hardlink)
308 util.copyfile(filepath, backuppath, hardlink=hardlink)
293 else:
309 else:
294 backupfile = b''
310 backupfile = b''
295
311
296 self._addbackupentry((location, file, backupfile, False))
312 self._addbackupentry((location, file, backupfile, False))
297
313
298 def _addbackupentry(self, entry):
314 def _addbackupentry(self, entry):
299 """register a new backup entry and write it to disk"""
315 """register a new backup entry and write it to disk"""
300 self._backupentries.append(entry)
316 self._backupentries.append(entry)
301 self._backupmap[entry[1]] = len(self._backupentries) - 1
317 self._backupmap[entry[1]] = len(self._backupentries) - 1
302 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
318 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
303 self._backupsfile.flush()
319 self._backupsfile.flush()
304
320
305 @active
321 @active
306 def registertmp(self, tmpfile, location=b''):
322 def registertmp(self, tmpfile, location=b''):
307 """register a temporary transaction file
323 """register a temporary transaction file
308
324
309 Such files will be deleted when the transaction exits (on both
325 Such files will be deleted when the transaction exits (on both
310 failure and success).
326 failure and success).
311 """
327 """
312 self._addbackupentry((location, b'', tmpfile, False))
328 self._addbackupentry((location, b'', tmpfile, False))
313
329
314 @active
330 @active
315 def addfilegenerator(
331 def addfilegenerator(
316 self, genid, filenames, genfunc, order=0, location=b''
332 self, genid, filenames, genfunc, order=0, location=b''
317 ):
333 ):
318 """add a function to generates some files at transaction commit
334 """add a function to generates some files at transaction commit
319
335
320 The `genfunc` argument is a function capable of generating proper
336 The `genfunc` argument is a function capable of generating proper
321 content of each entry in the `filename` tuple.
337 content of each entry in the `filename` tuple.
322
338
323 At transaction close time, `genfunc` will be called with one file
339 At transaction close time, `genfunc` will be called with one file
324 object argument per entries in `filenames`.
340 object argument per entries in `filenames`.
325
341
326 The transaction itself is responsible for the backup, creation and
342 The transaction itself is responsible for the backup, creation and
327 final write of such file.
343 final write of such file.
328
344
329 The `genid` argument is used to ensure the same set of file is only
345 The `genid` argument is used to ensure the same set of file is only
330 generated once. Call to `addfilegenerator` for a `genid` already
346 generated once. Call to `addfilegenerator` for a `genid` already
331 present will overwrite the old entry.
347 present will overwrite the old entry.
332
348
333 The `order` argument may be used to control the order in which multiple
349 The `order` argument may be used to control the order in which multiple
334 generator will be executed.
350 generator will be executed.
335
351
336 The `location` arguments may be used to indicate the files are located
352 The `location` arguments may be used to indicate the files are located
337 outside of the the standard directory for transaction. It should match
353 outside of the the standard directory for transaction. It should match
338 one of the key of the `transaction.vfsmap` dictionary.
354 one of the key of the `transaction.vfsmap` dictionary.
339 """
355 """
340 # For now, we are unable to do proper backup and restore of custom vfs
356 # For now, we are unable to do proper backup and restore of custom vfs
341 # but for bookmarks that are handled outside this mechanism.
357 # but for bookmarks that are handled outside this mechanism.
342 self._filegenerators[genid] = (order, filenames, genfunc, location)
358 self._filegenerators[genid] = (order, filenames, genfunc, location)
343
359
344 @active
360 @active
345 def removefilegenerator(self, genid):
361 def removefilegenerator(self, genid):
346 """reverse of addfilegenerator, remove a file generator function"""
362 """reverse of addfilegenerator, remove a file generator function"""
347 if genid in self._filegenerators:
363 if genid in self._filegenerators:
348 del self._filegenerators[genid]
364 del self._filegenerators[genid]
349
365
350 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
366 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
351 # write files registered for generation
367 # write files registered for generation
352 any = False
368 any = False
353
369
354 if group == GEN_GROUP_ALL:
370 if group == GEN_GROUP_ALL:
355 skip_post = skip_pre = False
371 skip_post = skip_pre = False
356 else:
372 else:
357 skip_pre = group == GEN_GROUP_POST_FINALIZE
373 skip_pre = group == GEN_GROUP_POST_FINALIZE
358 skip_post = group == GEN_GROUP_PRE_FINALIZE
374 skip_post = group == GEN_GROUP_PRE_FINALIZE
359
375
360 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
376 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
361 any = True
377 any = True
362 order, filenames, genfunc, location = entry
378 order, filenames, genfunc, location = entry
363
379
364 # for generation at closing, check if it's before or after finalize
380 # for generation at closing, check if it's before or after finalize
365 is_post = id in postfinalizegenerators
381 is_post = id in postfinalizegenerators
366 if skip_post and is_post:
382 if skip_post and is_post:
367 continue
383 continue
368 elif skip_pre and not is_post:
384 elif skip_pre and not is_post:
369 continue
385 continue
370
386
371 vfs = self._vfsmap[location]
387 vfs = self._vfsmap[location]
372 files = []
388 files = []
373 try:
389 try:
374 for name in filenames:
390 for name in filenames:
375 name += suffix
391 name += suffix
376 if suffix:
392 if suffix:
377 self.registertmp(name, location=location)
393 self.registertmp(name, location=location)
378 checkambig = False
394 checkambig = False
379 else:
395 else:
380 self.addbackup(name, location=location)
396 self.addbackup(name, location=location)
381 checkambig = (name, location) in self._checkambigfiles
397 checkambig = (name, location) in self._checkambigfiles
382 files.append(
398 files.append(
383 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
399 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
384 )
400 )
385 genfunc(*files)
401 genfunc(*files)
386 for f in files:
402 for f in files:
387 f.close()
403 f.close()
388 # skip discard() loop since we're sure no open file remains
404 # skip discard() loop since we're sure no open file remains
389 del files[:]
405 del files[:]
390 finally:
406 finally:
391 for f in files:
407 for f in files:
392 f.discard()
408 f.discard()
393 return any
409 return any
394
410
395 @active
411 @active
396 def findoffset(self, file):
412 def findoffset(self, file):
413 if file in self._newfiles:
414 return 0
397 return self._offsetmap.get(file)
415 return self._offsetmap.get(file)
398
416
399 @active
417 @active
400 def readjournal(self):
418 def readjournal(self):
401 self._file.seek(0)
419 self._file.seek(0)
402 entries = []
420 entries = []
403 for l in self._file:
421 for l in self._file:
404 file, troffset = l.split(b'\0')
422 file, troffset = l.split(b'\0')
405 entries.append((file, int(troffset)))
423 entries.append((file, int(troffset)))
406 return entries
424 return entries
407
425
408 @active
426 @active
409 def replace(self, file, offset):
427 def replace(self, file, offset):
410 '''
428 '''
411 replace can only replace already committed entries
429 replace can only replace already committed entries
412 that are not pending in the queue
430 that are not pending in the queue
413 '''
431 '''
414
432 if file in self._newfiles:
415 if file not in self._offsetmap:
433 if not offset:
434 return
435 self._newfiles.remove(file)
436 self._offsetmap[file] = offset
437 elif file in self._offsetmap:
438 if not offset:
439 del self._offsetmap[file]
440 self._newfiles.add(file)
441 else:
442 self._offsetmap[file] = offset
443 else:
416 raise KeyError(file)
444 raise KeyError(file)
417 self._offsetmap[file] = offset
418 self._file.write(b"%s\0%d\n" % (file, offset))
445 self._file.write(b"%s\0%d\n" % (file, offset))
419 self._file.flush()
446 self._file.flush()
420
447
421 @active
448 @active
422 def nest(self, name='<unnamed>'):
449 def nest(self, name='<unnamed>'):
423 self._count += 1
450 self._count += 1
424 self._usages += 1
451 self._usages += 1
425 self._names.append(name)
452 self._names.append(name)
426 return self
453 return self
427
454
428 def release(self):
455 def release(self):
429 if self._count > 0:
456 if self._count > 0:
430 self._usages -= 1
457 self._usages -= 1
431 if self._names:
458 if self._names:
432 self._names.pop()
459 self._names.pop()
433 # if the transaction scopes are left without being closed, fail
460 # if the transaction scopes are left without being closed, fail
434 if self._count > 0 and self._usages == 0:
461 if self._count > 0 and self._usages == 0:
435 self._abort()
462 self._abort()
436
463
437 def running(self):
464 def running(self):
438 return self._count > 0
465 return self._count > 0
439
466
440 def addpending(self, category, callback):
467 def addpending(self, category, callback):
441 """add a callback to be called when the transaction is pending
468 """add a callback to be called when the transaction is pending
442
469
443 The transaction will be given as callback's first argument.
470 The transaction will be given as callback's first argument.
444
471
445 Category is a unique identifier to allow overwriting an old callback
472 Category is a unique identifier to allow overwriting an old callback
446 with a newer callback.
473 with a newer callback.
447 """
474 """
448 self._pendingcallback[category] = callback
475 self._pendingcallback[category] = callback
449
476
450 @active
477 @active
451 def writepending(self):
478 def writepending(self):
452 '''write pending file to temporary version
479 '''write pending file to temporary version
453
480
454 This is used to allow hooks to view a transaction before commit'''
481 This is used to allow hooks to view a transaction before commit'''
455 categories = sorted(self._pendingcallback)
482 categories = sorted(self._pendingcallback)
456 for cat in categories:
483 for cat in categories:
457 # remove callback since the data will have been flushed
484 # remove callback since the data will have been flushed
458 any = self._pendingcallback.pop(cat)(self)
485 any = self._pendingcallback.pop(cat)(self)
459 self._anypending = self._anypending or any
486 self._anypending = self._anypending or any
460 self._anypending |= self._generatefiles(suffix=b'.pending')
487 self._anypending |= self._generatefiles(suffix=b'.pending')
461 return self._anypending
488 return self._anypending
462
489
463 @active
490 @active
464 def hasfinalize(self, category):
491 def hasfinalize(self, category):
465 """check is a callback already exist for a category
492 """check is a callback already exist for a category
466 """
493 """
467 return category in self._finalizecallback
494 return category in self._finalizecallback
468
495
469 @active
496 @active
470 def addfinalize(self, category, callback):
497 def addfinalize(self, category, callback):
471 """add a callback to be called when the transaction is closed
498 """add a callback to be called when the transaction is closed
472
499
473 The transaction will be given as callback's first argument.
500 The transaction will be given as callback's first argument.
474
501
475 Category is a unique identifier to allow overwriting old callbacks with
502 Category is a unique identifier to allow overwriting old callbacks with
476 newer callbacks.
503 newer callbacks.
477 """
504 """
478 self._finalizecallback[category] = callback
505 self._finalizecallback[category] = callback
479
506
480 @active
507 @active
481 def addpostclose(self, category, callback):
508 def addpostclose(self, category, callback):
482 """add or replace a callback to be called after the transaction closed
509 """add or replace a callback to be called after the transaction closed
483
510
484 The transaction will be given as callback's first argument.
511 The transaction will be given as callback's first argument.
485
512
486 Category is a unique identifier to allow overwriting an old callback
513 Category is a unique identifier to allow overwriting an old callback
487 with a newer callback.
514 with a newer callback.
488 """
515 """
489 self._postclosecallback[category] = callback
516 self._postclosecallback[category] = callback
490
517
491 @active
518 @active
492 def getpostclose(self, category):
519 def getpostclose(self, category):
493 """return a postclose callback added before, or None"""
520 """return a postclose callback added before, or None"""
494 return self._postclosecallback.get(category, None)
521 return self._postclosecallback.get(category, None)
495
522
496 @active
523 @active
497 def addabort(self, category, callback):
524 def addabort(self, category, callback):
498 """add a callback to be called when the transaction is aborted.
525 """add a callback to be called when the transaction is aborted.
499
526
500 The transaction will be given as the first argument to the callback.
527 The transaction will be given as the first argument to the callback.
501
528
502 Category is a unique identifier to allow overwriting an old callback
529 Category is a unique identifier to allow overwriting an old callback
503 with a newer callback.
530 with a newer callback.
504 """
531 """
505 self._abortcallback[category] = callback
532 self._abortcallback[category] = callback
506
533
507 @active
534 @active
508 def addvalidator(self, category, callback):
535 def addvalidator(self, category, callback):
509 """ adds a callback to be called when validating the transaction.
536 """ adds a callback to be called when validating the transaction.
510
537
511 The transaction will be given as the first argument to the callback.
538 The transaction will be given as the first argument to the callback.
512
539
513 callback should raise exception if to abort transaction """
540 callback should raise exception if to abort transaction """
514 self._validatecallback[category] = callback
541 self._validatecallback[category] = callback
515
542
516 @active
543 @active
517 def close(self):
544 def close(self):
518 '''commit the transaction'''
545 '''commit the transaction'''
519 if self._count == 1:
546 if self._count == 1:
520 for category in sorted(self._validatecallback):
547 for category in sorted(self._validatecallback):
521 self._validatecallback[category](self)
548 self._validatecallback[category](self)
522 self._validatecallback = None # Help prevent cycles.
549 self._validatecallback = None # Help prevent cycles.
523 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
550 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
524 while self._finalizecallback:
551 while self._finalizecallback:
525 callbacks = self._finalizecallback
552 callbacks = self._finalizecallback
526 self._finalizecallback = {}
553 self._finalizecallback = {}
527 categories = sorted(callbacks)
554 categories = sorted(callbacks)
528 for cat in categories:
555 for cat in categories:
529 callbacks[cat](self)
556 callbacks[cat](self)
530 # Prevent double usage and help clear cycles.
557 # Prevent double usage and help clear cycles.
531 self._finalizecallback = None
558 self._finalizecallback = None
532 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
559 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
533
560
534 self._count -= 1
561 self._count -= 1
535 if self._count != 0:
562 if self._count != 0:
536 return
563 return
537 self._file.close()
564 self._file.close()
538 self._backupsfile.close()
565 self._backupsfile.close()
539 # cleanup temporary files
566 # cleanup temporary files
540 for l, f, b, c in self._backupentries:
567 for l, f, b, c in self._backupentries:
541 if l not in self._vfsmap and c:
568 if l not in self._vfsmap and c:
542 self._report(
569 self._report(
543 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
570 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
544 )
571 )
545 continue
572 continue
546 vfs = self._vfsmap[l]
573 vfs = self._vfsmap[l]
547 if not f and b and vfs.exists(b):
574 if not f and b and vfs.exists(b):
548 try:
575 try:
549 vfs.unlink(b)
576 vfs.unlink(b)
550 except (IOError, OSError, error.Abort) as inst:
577 except (IOError, OSError, error.Abort) as inst:
551 if not c:
578 if not c:
552 raise
579 raise
553 # Abort may be raise by read only opener
580 # Abort may be raise by read only opener
554 self._report(
581 self._report(
555 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
556 )
583 )
557 self._offsetmap = {}
584 self._offsetmap = {}
585 self._newfiles = set()
558 self._writeundo()
586 self._writeundo()
559 if self._after:
587 if self._after:
560 self._after()
588 self._after()
561 self._after = None # Help prevent cycles.
589 self._after = None # Help prevent cycles.
562 if self._opener.isfile(self._backupjournal):
590 if self._opener.isfile(self._backupjournal):
563 self._opener.unlink(self._backupjournal)
591 self._opener.unlink(self._backupjournal)
564 if self._opener.isfile(self._journal):
592 if self._opener.isfile(self._journal):
565 self._opener.unlink(self._journal)
593 self._opener.unlink(self._journal)
566 for l, _f, b, c in self._backupentries:
594 for l, _f, b, c in self._backupentries:
567 if l not in self._vfsmap and c:
595 if l not in self._vfsmap and c:
568 self._report(
596 self._report(
569 b"couldn't remove %s: unknown cache location"
597 b"couldn't remove %s: unknown cache location"
570 b"%s\n" % (b, l)
598 b"%s\n" % (b, l)
571 )
599 )
572 continue
600 continue
573 vfs = self._vfsmap[l]
601 vfs = self._vfsmap[l]
574 if b and vfs.exists(b):
602 if b and vfs.exists(b):
575 try:
603 try:
576 vfs.unlink(b)
604 vfs.unlink(b)
577 except (IOError, OSError, error.Abort) as inst:
605 except (IOError, OSError, error.Abort) as inst:
578 if not c:
606 if not c:
579 raise
607 raise
580 # Abort may be raise by read only opener
608 # Abort may be raise by read only opener
581 self._report(
609 self._report(
582 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
610 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
583 )
611 )
584 self._backupentries = []
612 self._backupentries = []
585 self._journal = None
613 self._journal = None
586
614
587 self._releasefn(self, True) # notify success of closing transaction
615 self._releasefn(self, True) # notify success of closing transaction
588 self._releasefn = None # Help prevent cycles.
616 self._releasefn = None # Help prevent cycles.
589
617
590 # run post close action
618 # run post close action
591 categories = sorted(self._postclosecallback)
619 categories = sorted(self._postclosecallback)
592 for cat in categories:
620 for cat in categories:
593 self._postclosecallback[cat](self)
621 self._postclosecallback[cat](self)
594 # Prevent double usage and help clear cycles.
622 # Prevent double usage and help clear cycles.
595 self._postclosecallback = None
623 self._postclosecallback = None
596
624
597 @active
625 @active
598 def abort(self):
626 def abort(self):
599 '''abort the transaction (generally called on error, or when the
627 '''abort the transaction (generally called on error, or when the
600 transaction is not explicitly committed before going out of
628 transaction is not explicitly committed before going out of
601 scope)'''
629 scope)'''
602 self._abort()
630 self._abort()
603
631
604 def _writeundo(self):
632 def _writeundo(self):
605 """write transaction data for possible future undo call"""
633 """write transaction data for possible future undo call"""
606 if self._undoname is None:
634 if self._undoname is None:
607 return
635 return
608 undobackupfile = self._opener.open(
636 undobackupfile = self._opener.open(
609 b"%s.backupfiles" % self._undoname, b'w'
637 b"%s.backupfiles" % self._undoname, b'w'
610 )
638 )
611 undobackupfile.write(b'%d\n' % version)
639 undobackupfile.write(b'%d\n' % version)
612 for l, f, b, c in self._backupentries:
640 for l, f, b, c in self._backupentries:
613 if not f: # temporary file
641 if not f: # temporary file
614 continue
642 continue
615 if not b:
643 if not b:
616 u = b''
644 u = b''
617 else:
645 else:
618 if l not in self._vfsmap and c:
646 if l not in self._vfsmap and c:
619 self._report(
647 self._report(
620 b"couldn't remove %s: unknown cache location"
648 b"couldn't remove %s: unknown cache location"
621 b"%s\n" % (b, l)
649 b"%s\n" % (b, l)
622 )
650 )
623 continue
651 continue
624 vfs = self._vfsmap[l]
652 vfs = self._vfsmap[l]
625 base, name = vfs.split(b)
653 base, name = vfs.split(b)
626 assert name.startswith(self._journal), name
654 assert name.startswith(self._journal), name
627 uname = name.replace(self._journal, self._undoname, 1)
655 uname = name.replace(self._journal, self._undoname, 1)
628 u = vfs.reljoin(base, uname)
656 u = vfs.reljoin(base, uname)
629 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
657 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
630 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
658 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
631 undobackupfile.close()
659 undobackupfile.close()
632
660
633 def _abort(self):
661 def _abort(self):
634 entries = self.readjournal()
662 entries = self.readjournal()
635 self._count = 0
663 self._count = 0
636 self._usages = 0
664 self._usages = 0
637 self._file.close()
665 self._file.close()
638 self._backupsfile.close()
666 self._backupsfile.close()
639
667
640 try:
668 try:
641 if not self._offsetmap and not self._backupentries:
669 if not entries and not self._backupentries:
642 if self._backupjournal:
670 if self._backupjournal:
643 self._opener.unlink(self._backupjournal)
671 self._opener.unlink(self._backupjournal)
644 if self._journal:
672 if self._journal:
645 self._opener.unlink(self._journal)
673 self._opener.unlink(self._journal)
646 return
674 return
647
675
648 self._report(_(b"transaction abort!\n"))
676 self._report(_(b"transaction abort!\n"))
649
677
650 try:
678 try:
651 for cat in sorted(self._abortcallback):
679 for cat in sorted(self._abortcallback):
652 self._abortcallback[cat](self)
680 self._abortcallback[cat](self)
653 # Prevent double usage and help clear cycles.
681 # Prevent double usage and help clear cycles.
654 self._abortcallback = None
682 self._abortcallback = None
655 _playback(
683 _playback(
656 self._journal,
684 self._journal,
657 self._report,
685 self._report,
658 self._opener,
686 self._opener,
659 self._vfsmap,
687 self._vfsmap,
660 entries,
688 entries,
661 self._backupentries,
689 self._backupentries,
662 False,
690 False,
663 checkambigfiles=self._checkambigfiles,
691 checkambigfiles=self._checkambigfiles,
664 )
692 )
665 self._report(_(b"rollback completed\n"))
693 self._report(_(b"rollback completed\n"))
666 except BaseException as exc:
694 except BaseException as exc:
667 self._report(_(b"rollback failed - please run hg recover\n"))
695 self._report(_(b"rollback failed - please run hg recover\n"))
668 self._report(
696 self._report(
669 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
697 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
670 )
698 )
671 finally:
699 finally:
672 self._journal = None
700 self._journal = None
673 self._releasefn(self, False) # notify failure of transaction
701 self._releasefn(self, False) # notify failure of transaction
674 self._releasefn = None # Help prevent cycles.
702 self._releasefn = None # Help prevent cycles.
675
703
676
704
677 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
705 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
678 """Rolls back the transaction contained in the given file
706 """Rolls back the transaction contained in the given file
679
707
680 Reads the entries in the specified file, and the corresponding
708 Reads the entries in the specified file, and the corresponding
681 '*.backupfiles' file, to recover from an incomplete transaction.
709 '*.backupfiles' file, to recover from an incomplete transaction.
682
710
683 * `file`: a file containing a list of entries, specifying where
711 * `file`: a file containing a list of entries, specifying where
684 to truncate each file. The file should contain a list of
712 to truncate each file. The file should contain a list of
685 file\0offset pairs, delimited by newlines. The corresponding
713 file\0offset pairs, delimited by newlines. The corresponding
686 '*.backupfiles' file should contain a list of file\0backupfile
714 '*.backupfiles' file should contain a list of file\0backupfile
687 pairs, delimited by \0.
715 pairs, delimited by \0.
688
716
689 `checkambigfiles` is a set of (path, vfs-location) tuples,
717 `checkambigfiles` is a set of (path, vfs-location) tuples,
690 which determine whether file stat ambiguity should be avoided at
718 which determine whether file stat ambiguity should be avoided at
691 restoring corresponded files.
719 restoring corresponded files.
692 """
720 """
693 entries = []
721 entries = []
694 backupentries = []
722 backupentries = []
695
723
696 fp = opener.open(file)
724 fp = opener.open(file)
697 lines = fp.readlines()
725 lines = fp.readlines()
698 fp.close()
726 fp.close()
699 for l in lines:
727 for l in lines:
700 try:
728 try:
701 f, o = l.split(b'\0')
729 f, o = l.split(b'\0')
702 entries.append((f, int(o)))
730 entries.append((f, int(o)))
703 except ValueError:
731 except ValueError:
704 report(
732 report(
705 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
733 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
706 )
734 )
707
735
708 backupjournal = b"%s.backupfiles" % file
736 backupjournal = b"%s.backupfiles" % file
709 if opener.exists(backupjournal):
737 if opener.exists(backupjournal):
710 fp = opener.open(backupjournal)
738 fp = opener.open(backupjournal)
711 lines = fp.readlines()
739 lines = fp.readlines()
712 if lines:
740 if lines:
713 ver = lines[0][:-1]
741 ver = lines[0][:-1]
714 if ver == (b'%d' % version):
742 if ver == (b'%d' % version):
715 for line in lines[1:]:
743 for line in lines[1:]:
716 if line:
744 if line:
717 # Shave off the trailing newline
745 # Shave off the trailing newline
718 line = line[:-1]
746 line = line[:-1]
719 l, f, b, c = line.split(b'\0')
747 l, f, b, c = line.split(b'\0')
720 backupentries.append((l, f, b, bool(c)))
748 backupentries.append((l, f, b, bool(c)))
721 else:
749 else:
722 report(
750 report(
723 _(
751 _(
724 b"journal was created by a different version of "
752 b"journal was created by a different version of "
725 b"Mercurial\n"
753 b"Mercurial\n"
726 )
754 )
727 )
755 )
728
756
729 _playback(
757 _playback(
730 file,
758 file,
731 report,
759 report,
732 opener,
760 opener,
733 vfsmap,
761 vfsmap,
734 entries,
762 entries,
735 backupentries,
763 backupentries,
736 checkambigfiles=checkambigfiles,
764 checkambigfiles=checkambigfiles,
737 )
765 )
General Comments 0
You need to be logged in to leave comments. Login now