##// END OF EJS Templates
transaction: do not rely on a global variable to post_finalize file...
marmoute -
r49534:21ac6aed default
parent child Browse files
Show More
@@ -1,1118 +1,1122 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import struct
11 import struct
12
12
13 from .i18n import _
13 from .i18n import _
14 from .node import (
14 from .node import (
15 bin,
15 bin,
16 hex,
16 hex,
17 short,
17 short,
18 )
18 )
19 from .pycompat import getattr
19 from .pycompat import getattr
20 from . import (
20 from . import (
21 encoding,
21 encoding,
22 error,
22 error,
23 obsutil,
23 obsutil,
24 pycompat,
24 pycompat,
25 requirements,
25 requirements,
26 scmutil,
26 scmutil,
27 txnutil,
27 txnutil,
28 util,
28 util,
29 )
29 )
30 from .utils import (
30 from .utils import (
31 urlutil,
31 urlutil,
32 )
32 )
33
33
34 # label constants
34 # label constants
35 # until 3.5, bookmarks.current was the advertised name, not
35 # until 3.5, bookmarks.current was the advertised name, not
36 # bookmarks.active, so we must use both to avoid breaking old
36 # bookmarks.active, so we must use both to avoid breaking old
37 # custom styles
37 # custom styles
38 activebookmarklabel = b'bookmarks.active bookmarks.current'
38 activebookmarklabel = b'bookmarks.active bookmarks.current'
39
39
40
40
41 def bookmarksinstore(repo):
41 def bookmarksinstore(repo):
42 return requirements.BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
42 return requirements.BOOKMARKS_IN_STORE_REQUIREMENT in repo.requirements
43
43
44
44
45 def bookmarksvfs(repo):
45 def bookmarksvfs(repo):
46 return repo.svfs if bookmarksinstore(repo) else repo.vfs
46 return repo.svfs if bookmarksinstore(repo) else repo.vfs
47
47
48
48
49 def _getbkfile(repo):
49 def _getbkfile(repo):
50 """Hook so that extensions that mess with the store can hook bm storage.
50 """Hook so that extensions that mess with the store can hook bm storage.
51
51
52 For core, this just handles wether we should see pending
52 For core, this just handles wether we should see pending
53 bookmarks or the committed ones. Other extensions (like share)
53 bookmarks or the committed ones. Other extensions (like share)
54 may need to tweak this behavior further.
54 may need to tweak this behavior further.
55 """
55 """
56 fp, pending = txnutil.trypending(
56 fp, pending = txnutil.trypending(
57 repo.root, bookmarksvfs(repo), b'bookmarks'
57 repo.root, bookmarksvfs(repo), b'bookmarks'
58 )
58 )
59 return fp
59 return fp
60
60
61
61
62 class bmstore(object):
62 class bmstore(object):
63 r"""Storage for bookmarks.
63 r"""Storage for bookmarks.
64
64
65 This object should do all bookmark-related reads and writes, so
65 This object should do all bookmark-related reads and writes, so
66 that it's fairly simple to replace the storage underlying
66 that it's fairly simple to replace the storage underlying
67 bookmarks without having to clone the logic surrounding
67 bookmarks without having to clone the logic surrounding
68 bookmarks. This type also should manage the active bookmark, if
68 bookmarks. This type also should manage the active bookmark, if
69 any.
69 any.
70
70
71 This particular bmstore implementation stores bookmarks as
71 This particular bmstore implementation stores bookmarks as
72 {hash}\s{name}\n (the same format as localtags) in
72 {hash}\s{name}\n (the same format as localtags) in
73 .hg/bookmarks. The mapping is stored as {name: nodeid}.
73 .hg/bookmarks. The mapping is stored as {name: nodeid}.
74 """
74 """
75
75
76 def __init__(self, repo):
76 def __init__(self, repo):
77 self._repo = repo
77 self._repo = repo
78 self._refmap = refmap = {} # refspec: node
78 self._refmap = refmap = {} # refspec: node
79 self._nodemap = nodemap = {} # node: sorted([refspec, ...])
79 self._nodemap = nodemap = {} # node: sorted([refspec, ...])
80 self._clean = True
80 self._clean = True
81 self._aclean = True
81 self._aclean = True
82 has_node = repo.changelog.index.has_node
82 has_node = repo.changelog.index.has_node
83 tonode = bin # force local lookup
83 tonode = bin # force local lookup
84 try:
84 try:
85 with _getbkfile(repo) as bkfile:
85 with _getbkfile(repo) as bkfile:
86 for line in bkfile:
86 for line in bkfile:
87 line = line.strip()
87 line = line.strip()
88 if not line:
88 if not line:
89 continue
89 continue
90 try:
90 try:
91 sha, refspec = line.split(b' ', 1)
91 sha, refspec = line.split(b' ', 1)
92 node = tonode(sha)
92 node = tonode(sha)
93 if has_node(node):
93 if has_node(node):
94 refspec = encoding.tolocal(refspec)
94 refspec = encoding.tolocal(refspec)
95 refmap[refspec] = node
95 refmap[refspec] = node
96 nrefs = nodemap.get(node)
96 nrefs = nodemap.get(node)
97 if nrefs is None:
97 if nrefs is None:
98 nodemap[node] = [refspec]
98 nodemap[node] = [refspec]
99 else:
99 else:
100 nrefs.append(refspec)
100 nrefs.append(refspec)
101 if nrefs[-2] > refspec:
101 if nrefs[-2] > refspec:
102 # bookmarks weren't sorted before 4.5
102 # bookmarks weren't sorted before 4.5
103 nrefs.sort()
103 nrefs.sort()
104 except (TypeError, ValueError):
104 except (TypeError, ValueError):
105 # TypeError:
105 # TypeError:
106 # - bin(...)
106 # - bin(...)
107 # ValueError:
107 # ValueError:
108 # - node in nm, for non-20-bytes entry
108 # - node in nm, for non-20-bytes entry
109 # - split(...), for string without ' '
109 # - split(...), for string without ' '
110 bookmarkspath = b'.hg/bookmarks'
110 bookmarkspath = b'.hg/bookmarks'
111 if bookmarksinstore(repo):
111 if bookmarksinstore(repo):
112 bookmarkspath = b'.hg/store/bookmarks'
112 bookmarkspath = b'.hg/store/bookmarks'
113 repo.ui.warn(
113 repo.ui.warn(
114 _(b'malformed line in %s: %r\n')
114 _(b'malformed line in %s: %r\n')
115 % (bookmarkspath, pycompat.bytestr(line))
115 % (bookmarkspath, pycompat.bytestr(line))
116 )
116 )
117 except IOError as inst:
117 except IOError as inst:
118 if inst.errno != errno.ENOENT:
118 if inst.errno != errno.ENOENT:
119 raise
119 raise
120 self._active = _readactive(repo, self)
120 self._active = _readactive(repo, self)
121
121
122 @property
122 @property
123 def active(self):
123 def active(self):
124 return self._active
124 return self._active
125
125
126 @active.setter
126 @active.setter
127 def active(self, mark):
127 def active(self, mark):
128 if mark is not None and mark not in self._refmap:
128 if mark is not None and mark not in self._refmap:
129 raise AssertionError(b'bookmark %s does not exist!' % mark)
129 raise AssertionError(b'bookmark %s does not exist!' % mark)
130
130
131 self._active = mark
131 self._active = mark
132 self._aclean = False
132 self._aclean = False
133
133
134 def __len__(self):
134 def __len__(self):
135 return len(self._refmap)
135 return len(self._refmap)
136
136
137 def __iter__(self):
137 def __iter__(self):
138 return iter(self._refmap)
138 return iter(self._refmap)
139
139
140 def iteritems(self):
140 def iteritems(self):
141 return pycompat.iteritems(self._refmap)
141 return pycompat.iteritems(self._refmap)
142
142
143 def items(self):
143 def items(self):
144 return self._refmap.items()
144 return self._refmap.items()
145
145
146 # TODO: maybe rename to allnames()?
146 # TODO: maybe rename to allnames()?
147 def keys(self):
147 def keys(self):
148 return self._refmap.keys()
148 return self._refmap.keys()
149
149
150 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
150 # TODO: maybe rename to allnodes()? but nodes would have to be deduplicated
151 # could be self._nodemap.keys()
151 # could be self._nodemap.keys()
152 def values(self):
152 def values(self):
153 return self._refmap.values()
153 return self._refmap.values()
154
154
155 def __contains__(self, mark):
155 def __contains__(self, mark):
156 return mark in self._refmap
156 return mark in self._refmap
157
157
158 def __getitem__(self, mark):
158 def __getitem__(self, mark):
159 return self._refmap[mark]
159 return self._refmap[mark]
160
160
161 def get(self, mark, default=None):
161 def get(self, mark, default=None):
162 return self._refmap.get(mark, default)
162 return self._refmap.get(mark, default)
163
163
164 def _set(self, mark, node):
164 def _set(self, mark, node):
165 self._clean = False
165 self._clean = False
166 if mark in self._refmap:
166 if mark in self._refmap:
167 self._del(mark)
167 self._del(mark)
168 self._refmap[mark] = node
168 self._refmap[mark] = node
169 nrefs = self._nodemap.get(node)
169 nrefs = self._nodemap.get(node)
170 if nrefs is None:
170 if nrefs is None:
171 self._nodemap[node] = [mark]
171 self._nodemap[node] = [mark]
172 else:
172 else:
173 nrefs.append(mark)
173 nrefs.append(mark)
174 nrefs.sort()
174 nrefs.sort()
175
175
176 def _del(self, mark):
176 def _del(self, mark):
177 if mark not in self._refmap:
177 if mark not in self._refmap:
178 return
178 return
179 self._clean = False
179 self._clean = False
180 node = self._refmap.pop(mark)
180 node = self._refmap.pop(mark)
181 nrefs = self._nodemap[node]
181 nrefs = self._nodemap[node]
182 if len(nrefs) == 1:
182 if len(nrefs) == 1:
183 assert nrefs[0] == mark
183 assert nrefs[0] == mark
184 del self._nodemap[node]
184 del self._nodemap[node]
185 else:
185 else:
186 nrefs.remove(mark)
186 nrefs.remove(mark)
187
187
188 def names(self, node):
188 def names(self, node):
189 """Return a sorted list of bookmarks pointing to the specified node"""
189 """Return a sorted list of bookmarks pointing to the specified node"""
190 return self._nodemap.get(node, [])
190 return self._nodemap.get(node, [])
191
191
192 def applychanges(self, repo, tr, changes):
192 def applychanges(self, repo, tr, changes):
193 """Apply a list of changes to bookmarks"""
193 """Apply a list of changes to bookmarks"""
194 bmchanges = tr.changes.get(b'bookmarks')
194 bmchanges = tr.changes.get(b'bookmarks')
195 for name, node in changes:
195 for name, node in changes:
196 old = self._refmap.get(name)
196 old = self._refmap.get(name)
197 if node is None:
197 if node is None:
198 self._del(name)
198 self._del(name)
199 else:
199 else:
200 self._set(name, node)
200 self._set(name, node)
201 if bmchanges is not None:
201 if bmchanges is not None:
202 # if a previous value exist preserve the "initial" value
202 # if a previous value exist preserve the "initial" value
203 previous = bmchanges.get(name)
203 previous = bmchanges.get(name)
204 if previous is not None:
204 if previous is not None:
205 old = previous[0]
205 old = previous[0]
206 bmchanges[name] = (old, node)
206 bmchanges[name] = (old, node)
207 self._recordchange(tr)
207 self._recordchange(tr)
208
208
209 def _recordchange(self, tr):
209 def _recordchange(self, tr):
210 """record that bookmarks have been changed in a transaction
210 """record that bookmarks have been changed in a transaction
211
211
212 The transaction is then responsible for updating the file content."""
212 The transaction is then responsible for updating the file content."""
213 location = b'' if bookmarksinstore(self._repo) else b'plain'
213 location = b'' if bookmarksinstore(self._repo) else b'plain'
214 tr.addfilegenerator(
214 tr.addfilegenerator(
215 b'bookmarks', (b'bookmarks',), self._write, location=location
215 b'bookmarks',
216 (b'bookmarks',),
217 self._write,
218 location=location,
219 post_finalize=True,
216 )
220 )
217 tr.hookargs[b'bookmark_moved'] = b'1'
221 tr.hookargs[b'bookmark_moved'] = b'1'
218
222
219 def _writerepo(self, repo):
223 def _writerepo(self, repo):
220 """Factored out for extensibility"""
224 """Factored out for extensibility"""
221 rbm = repo._bookmarks
225 rbm = repo._bookmarks
222 if rbm.active not in self._refmap:
226 if rbm.active not in self._refmap:
223 rbm.active = None
227 rbm.active = None
224 rbm._writeactive()
228 rbm._writeactive()
225
229
226 if bookmarksinstore(repo):
230 if bookmarksinstore(repo):
227 vfs = repo.svfs
231 vfs = repo.svfs
228 lock = repo.lock()
232 lock = repo.lock()
229 else:
233 else:
230 vfs = repo.vfs
234 vfs = repo.vfs
231 lock = repo.wlock()
235 lock = repo.wlock()
232 with lock:
236 with lock:
233 with vfs(b'bookmarks', b'w', atomictemp=True, checkambig=True) as f:
237 with vfs(b'bookmarks', b'w', atomictemp=True, checkambig=True) as f:
234 self._write(f)
238 self._write(f)
235
239
236 def _writeactive(self):
240 def _writeactive(self):
237 if self._aclean:
241 if self._aclean:
238 return
242 return
239 with self._repo.wlock():
243 with self._repo.wlock():
240 if self._active is not None:
244 if self._active is not None:
241 with self._repo.vfs(
245 with self._repo.vfs(
242 b'bookmarks.current', b'w', atomictemp=True, checkambig=True
246 b'bookmarks.current', b'w', atomictemp=True, checkambig=True
243 ) as f:
247 ) as f:
244 f.write(encoding.fromlocal(self._active))
248 f.write(encoding.fromlocal(self._active))
245 else:
249 else:
246 self._repo.vfs.tryunlink(b'bookmarks.current')
250 self._repo.vfs.tryunlink(b'bookmarks.current')
247 self._aclean = True
251 self._aclean = True
248
252
249 def _write(self, fp):
253 def _write(self, fp):
250 for name, node in sorted(pycompat.iteritems(self._refmap)):
254 for name, node in sorted(pycompat.iteritems(self._refmap)):
251 fp.write(b"%s %s\n" % (hex(node), encoding.fromlocal(name)))
255 fp.write(b"%s %s\n" % (hex(node), encoding.fromlocal(name)))
252 self._clean = True
256 self._clean = True
253 self._repo.invalidatevolatilesets()
257 self._repo.invalidatevolatilesets()
254
258
255 def expandname(self, bname):
259 def expandname(self, bname):
256 if bname == b'.':
260 if bname == b'.':
257 if self.active:
261 if self.active:
258 return self.active
262 return self.active
259 else:
263 else:
260 raise error.RepoLookupError(_(b"no active bookmark"))
264 raise error.RepoLookupError(_(b"no active bookmark"))
261 return bname
265 return bname
262
266
263 def checkconflict(self, mark, force=False, target=None):
267 def checkconflict(self, mark, force=False, target=None):
264 """check repo for a potential clash of mark with an existing bookmark,
268 """check repo for a potential clash of mark with an existing bookmark,
265 branch, or hash
269 branch, or hash
266
270
267 If target is supplied, then check that we are moving the bookmark
271 If target is supplied, then check that we are moving the bookmark
268 forward.
272 forward.
269
273
270 If force is supplied, then forcibly move the bookmark to a new commit
274 If force is supplied, then forcibly move the bookmark to a new commit
271 regardless if it is a move forward.
275 regardless if it is a move forward.
272
276
273 If divergent bookmark are to be deleted, they will be returned as list.
277 If divergent bookmark are to be deleted, they will be returned as list.
274 """
278 """
275 cur = self._repo[b'.'].node()
279 cur = self._repo[b'.'].node()
276 if mark in self._refmap and not force:
280 if mark in self._refmap and not force:
277 if target:
281 if target:
278 if self._refmap[mark] == target and target == cur:
282 if self._refmap[mark] == target and target == cur:
279 # re-activating a bookmark
283 # re-activating a bookmark
280 return []
284 return []
281 rev = self._repo[target].rev()
285 rev = self._repo[target].rev()
282 anc = self._repo.changelog.ancestors([rev])
286 anc = self._repo.changelog.ancestors([rev])
283 bmctx = self._repo[self[mark]]
287 bmctx = self._repo[self[mark]]
284 divs = [
288 divs = [
285 self._refmap[b]
289 self._refmap[b]
286 for b in self._refmap
290 for b in self._refmap
287 if b.split(b'@', 1)[0] == mark.split(b'@', 1)[0]
291 if b.split(b'@', 1)[0] == mark.split(b'@', 1)[0]
288 ]
292 ]
289
293
290 # allow resolving a single divergent bookmark even if moving
294 # allow resolving a single divergent bookmark even if moving
291 # the bookmark across branches when a revision is specified
295 # the bookmark across branches when a revision is specified
292 # that contains a divergent bookmark
296 # that contains a divergent bookmark
293 if bmctx.rev() not in anc and target in divs:
297 if bmctx.rev() not in anc and target in divs:
294 return divergent2delete(self._repo, [target], mark)
298 return divergent2delete(self._repo, [target], mark)
295
299
296 deletefrom = [
300 deletefrom = [
297 b for b in divs if self._repo[b].rev() in anc or b == target
301 b for b in divs if self._repo[b].rev() in anc or b == target
298 ]
302 ]
299 delbms = divergent2delete(self._repo, deletefrom, mark)
303 delbms = divergent2delete(self._repo, deletefrom, mark)
300 if validdest(self._repo, bmctx, self._repo[target]):
304 if validdest(self._repo, bmctx, self._repo[target]):
301 self._repo.ui.status(
305 self._repo.ui.status(
302 _(b"moving bookmark '%s' forward from %s\n")
306 _(b"moving bookmark '%s' forward from %s\n")
303 % (mark, short(bmctx.node()))
307 % (mark, short(bmctx.node()))
304 )
308 )
305 return delbms
309 return delbms
306 raise error.Abort(
310 raise error.Abort(
307 _(b"bookmark '%s' already exists (use -f to force)") % mark
311 _(b"bookmark '%s' already exists (use -f to force)") % mark
308 )
312 )
309 if (
313 if (
310 mark in self._repo.branchmap()
314 mark in self._repo.branchmap()
311 or mark == self._repo.dirstate.branch()
315 or mark == self._repo.dirstate.branch()
312 ) and not force:
316 ) and not force:
313 raise error.Abort(
317 raise error.Abort(
314 _(b"a bookmark cannot have the name of an existing branch")
318 _(b"a bookmark cannot have the name of an existing branch")
315 )
319 )
316 if len(mark) > 3 and not force:
320 if len(mark) > 3 and not force:
317 try:
321 try:
318 shadowhash = scmutil.isrevsymbol(self._repo, mark)
322 shadowhash = scmutil.isrevsymbol(self._repo, mark)
319 except error.LookupError: # ambiguous identifier
323 except error.LookupError: # ambiguous identifier
320 shadowhash = False
324 shadowhash = False
321 if shadowhash:
325 if shadowhash:
322 self._repo.ui.warn(
326 self._repo.ui.warn(
323 _(
327 _(
324 b"bookmark %s matches a changeset hash\n"
328 b"bookmark %s matches a changeset hash\n"
325 b"(did you leave a -r out of an 'hg bookmark' "
329 b"(did you leave a -r out of an 'hg bookmark' "
326 b"command?)\n"
330 b"command?)\n"
327 )
331 )
328 % mark
332 % mark
329 )
333 )
330 return []
334 return []
331
335
332
336
333 def _readactive(repo, marks):
337 def _readactive(repo, marks):
334 """
338 """
335 Get the active bookmark. We can have an active bookmark that updates
339 Get the active bookmark. We can have an active bookmark that updates
336 itself as we commit. This function returns the name of that bookmark.
340 itself as we commit. This function returns the name of that bookmark.
337 It is stored in .hg/bookmarks.current
341 It is stored in .hg/bookmarks.current
338 """
342 """
339 # No readline() in osutil.posixfile, reading everything is
343 # No readline() in osutil.posixfile, reading everything is
340 # cheap.
344 # cheap.
341 content = repo.vfs.tryread(b'bookmarks.current')
345 content = repo.vfs.tryread(b'bookmarks.current')
342 mark = encoding.tolocal((content.splitlines() or [b''])[0])
346 mark = encoding.tolocal((content.splitlines() or [b''])[0])
343 if mark == b'' or mark not in marks:
347 if mark == b'' or mark not in marks:
344 mark = None
348 mark = None
345 return mark
349 return mark
346
350
347
351
348 def activate(repo, mark):
352 def activate(repo, mark):
349 """
353 """
350 Set the given bookmark to be 'active', meaning that this bookmark will
354 Set the given bookmark to be 'active', meaning that this bookmark will
351 follow new commits that are made.
355 follow new commits that are made.
352 The name is recorded in .hg/bookmarks.current
356 The name is recorded in .hg/bookmarks.current
353 """
357 """
354 repo._bookmarks.active = mark
358 repo._bookmarks.active = mark
355 repo._bookmarks._writeactive()
359 repo._bookmarks._writeactive()
356
360
357
361
358 def deactivate(repo):
362 def deactivate(repo):
359 """
363 """
360 Unset the active bookmark in this repository.
364 Unset the active bookmark in this repository.
361 """
365 """
362 repo._bookmarks.active = None
366 repo._bookmarks.active = None
363 repo._bookmarks._writeactive()
367 repo._bookmarks._writeactive()
364
368
365
369
366 def isactivewdirparent(repo):
370 def isactivewdirparent(repo):
367 """
371 """
368 Tell whether the 'active' bookmark (the one that follows new commits)
372 Tell whether the 'active' bookmark (the one that follows new commits)
369 points to one of the parents of the current working directory (wdir).
373 points to one of the parents of the current working directory (wdir).
370
374
371 While this is normally the case, it can on occasion be false; for example,
375 While this is normally the case, it can on occasion be false; for example,
372 immediately after a pull, the active bookmark can be moved to point
376 immediately after a pull, the active bookmark can be moved to point
373 to a place different than the wdir. This is solved by running `hg update`.
377 to a place different than the wdir. This is solved by running `hg update`.
374 """
378 """
375 mark = repo._activebookmark
379 mark = repo._activebookmark
376 marks = repo._bookmarks
380 marks = repo._bookmarks
377 parents = [p.node() for p in repo[None].parents()]
381 parents = [p.node() for p in repo[None].parents()]
378 return mark in marks and marks[mark] in parents
382 return mark in marks and marks[mark] in parents
379
383
380
384
381 def divergent2delete(repo, deletefrom, bm):
385 def divergent2delete(repo, deletefrom, bm):
382 """find divergent versions of bm on nodes in deletefrom.
386 """find divergent versions of bm on nodes in deletefrom.
383
387
384 the list of bookmark to delete."""
388 the list of bookmark to delete."""
385 todelete = []
389 todelete = []
386 marks = repo._bookmarks
390 marks = repo._bookmarks
387 divergent = [
391 divergent = [
388 b for b in marks if b.split(b'@', 1)[0] == bm.split(b'@', 1)[0]
392 b for b in marks if b.split(b'@', 1)[0] == bm.split(b'@', 1)[0]
389 ]
393 ]
390 for mark in divergent:
394 for mark in divergent:
391 if mark == b'@' or b'@' not in mark:
395 if mark == b'@' or b'@' not in mark:
392 # can't be divergent by definition
396 # can't be divergent by definition
393 continue
397 continue
394 if mark and marks[mark] in deletefrom:
398 if mark and marks[mark] in deletefrom:
395 if mark != bm:
399 if mark != bm:
396 todelete.append(mark)
400 todelete.append(mark)
397 return todelete
401 return todelete
398
402
399
403
400 def headsforactive(repo):
404 def headsforactive(repo):
401 """Given a repo with an active bookmark, return divergent bookmark nodes.
405 """Given a repo with an active bookmark, return divergent bookmark nodes.
402
406
403 Args:
407 Args:
404 repo: A repository with an active bookmark.
408 repo: A repository with an active bookmark.
405
409
406 Returns:
410 Returns:
407 A list of binary node ids that is the full list of other
411 A list of binary node ids that is the full list of other
408 revisions with bookmarks divergent from the active bookmark. If
412 revisions with bookmarks divergent from the active bookmark. If
409 there were no divergent bookmarks, then this list will contain
413 there were no divergent bookmarks, then this list will contain
410 only one entry.
414 only one entry.
411 """
415 """
412 if not repo._activebookmark:
416 if not repo._activebookmark:
413 raise ValueError(
417 raise ValueError(
414 b'headsforactive() only makes sense with an active bookmark'
418 b'headsforactive() only makes sense with an active bookmark'
415 )
419 )
416 name = repo._activebookmark.split(b'@', 1)[0]
420 name = repo._activebookmark.split(b'@', 1)[0]
417 heads = []
421 heads = []
418 for mark, n in pycompat.iteritems(repo._bookmarks):
422 for mark, n in pycompat.iteritems(repo._bookmarks):
419 if mark.split(b'@', 1)[0] == name:
423 if mark.split(b'@', 1)[0] == name:
420 heads.append(n)
424 heads.append(n)
421 return heads
425 return heads
422
426
423
427
424 def calculateupdate(ui, repo):
428 def calculateupdate(ui, repo):
425 """Return a tuple (activemark, movemarkfrom) indicating the active bookmark
429 """Return a tuple (activemark, movemarkfrom) indicating the active bookmark
426 and where to move the active bookmark from, if needed."""
430 and where to move the active bookmark from, if needed."""
427 checkout, movemarkfrom = None, None
431 checkout, movemarkfrom = None, None
428 activemark = repo._activebookmark
432 activemark = repo._activebookmark
429 if isactivewdirparent(repo):
433 if isactivewdirparent(repo):
430 movemarkfrom = repo[b'.'].node()
434 movemarkfrom = repo[b'.'].node()
431 elif activemark:
435 elif activemark:
432 ui.status(_(b"updating to active bookmark %s\n") % activemark)
436 ui.status(_(b"updating to active bookmark %s\n") % activemark)
433 checkout = activemark
437 checkout = activemark
434 return (checkout, movemarkfrom)
438 return (checkout, movemarkfrom)
435
439
436
440
437 def update(repo, parents, node):
441 def update(repo, parents, node):
438 deletefrom = parents
442 deletefrom = parents
439 marks = repo._bookmarks
443 marks = repo._bookmarks
440 active = marks.active
444 active = marks.active
441 if not active:
445 if not active:
442 return False
446 return False
443
447
444 bmchanges = []
448 bmchanges = []
445 if marks[active] in parents:
449 if marks[active] in parents:
446 new = repo[node]
450 new = repo[node]
447 divs = [
451 divs = [
448 repo[marks[b]]
452 repo[marks[b]]
449 for b in marks
453 for b in marks
450 if b.split(b'@', 1)[0] == active.split(b'@', 1)[0]
454 if b.split(b'@', 1)[0] == active.split(b'@', 1)[0]
451 ]
455 ]
452 anc = repo.changelog.ancestors([new.rev()])
456 anc = repo.changelog.ancestors([new.rev()])
453 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
457 deletefrom = [b.node() for b in divs if b.rev() in anc or b == new]
454 if validdest(repo, repo[marks[active]], new):
458 if validdest(repo, repo[marks[active]], new):
455 bmchanges.append((active, new.node()))
459 bmchanges.append((active, new.node()))
456
460
457 for bm in divergent2delete(repo, deletefrom, active):
461 for bm in divergent2delete(repo, deletefrom, active):
458 bmchanges.append((bm, None))
462 bmchanges.append((bm, None))
459
463
460 if bmchanges:
464 if bmchanges:
461 with repo.lock(), repo.transaction(b'bookmark') as tr:
465 with repo.lock(), repo.transaction(b'bookmark') as tr:
462 marks.applychanges(repo, tr, bmchanges)
466 marks.applychanges(repo, tr, bmchanges)
463 return bool(bmchanges)
467 return bool(bmchanges)
464
468
465
469
466 def isdivergent(b):
470 def isdivergent(b):
467 return b'@' in b and not b.endswith(b'@')
471 return b'@' in b and not b.endswith(b'@')
468
472
469
473
470 def listbinbookmarks(repo):
474 def listbinbookmarks(repo):
471 # We may try to list bookmarks on a repo type that does not
475 # We may try to list bookmarks on a repo type that does not
472 # support it (e.g., statichttprepository).
476 # support it (e.g., statichttprepository).
473 marks = getattr(repo, '_bookmarks', {})
477 marks = getattr(repo, '_bookmarks', {})
474
478
475 hasnode = repo.changelog.hasnode
479 hasnode = repo.changelog.hasnode
476 for k, v in pycompat.iteritems(marks):
480 for k, v in pycompat.iteritems(marks):
477 # don't expose local divergent bookmarks
481 # don't expose local divergent bookmarks
478 if hasnode(v) and not isdivergent(k):
482 if hasnode(v) and not isdivergent(k):
479 yield k, v
483 yield k, v
480
484
481
485
482 def listbookmarks(repo):
486 def listbookmarks(repo):
483 d = {}
487 d = {}
484 for book, node in listbinbookmarks(repo):
488 for book, node in listbinbookmarks(repo):
485 d[book] = hex(node)
489 d[book] = hex(node)
486 return d
490 return d
487
491
488
492
489 def pushbookmark(repo, key, old, new):
493 def pushbookmark(repo, key, old, new):
490 if isdivergent(key):
494 if isdivergent(key):
491 return False
495 return False
492 if bookmarksinstore(repo):
496 if bookmarksinstore(repo):
493 wlock = util.nullcontextmanager()
497 wlock = util.nullcontextmanager()
494 else:
498 else:
495 wlock = repo.wlock()
499 wlock = repo.wlock()
496 with wlock, repo.lock(), repo.transaction(b'bookmarks') as tr:
500 with wlock, repo.lock(), repo.transaction(b'bookmarks') as tr:
497 marks = repo._bookmarks
501 marks = repo._bookmarks
498 existing = hex(marks.get(key, b''))
502 existing = hex(marks.get(key, b''))
499 if existing != old and existing != new:
503 if existing != old and existing != new:
500 return False
504 return False
501 if new == b'':
505 if new == b'':
502 changes = [(key, None)]
506 changes = [(key, None)]
503 else:
507 else:
504 if new not in repo:
508 if new not in repo:
505 return False
509 return False
506 changes = [(key, repo[new].node())]
510 changes = [(key, repo[new].node())]
507 marks.applychanges(repo, tr, changes)
511 marks.applychanges(repo, tr, changes)
508 return True
512 return True
509
513
510
514
511 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
515 def comparebookmarks(repo, srcmarks, dstmarks, targets=None):
512 """Compare bookmarks between srcmarks and dstmarks
516 """Compare bookmarks between srcmarks and dstmarks
513
517
514 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
518 This returns tuple "(addsrc, adddst, advsrc, advdst, diverge,
515 differ, invalid)", each are list of bookmarks below:
519 differ, invalid)", each are list of bookmarks below:
516
520
517 :addsrc: added on src side (removed on dst side, perhaps)
521 :addsrc: added on src side (removed on dst side, perhaps)
518 :adddst: added on dst side (removed on src side, perhaps)
522 :adddst: added on dst side (removed on src side, perhaps)
519 :advsrc: advanced on src side
523 :advsrc: advanced on src side
520 :advdst: advanced on dst side
524 :advdst: advanced on dst side
521 :diverge: diverge
525 :diverge: diverge
522 :differ: changed, but changeset referred on src is unknown on dst
526 :differ: changed, but changeset referred on src is unknown on dst
523 :invalid: unknown on both side
527 :invalid: unknown on both side
524 :same: same on both side
528 :same: same on both side
525
529
526 Each elements of lists in result tuple is tuple "(bookmark name,
530 Each elements of lists in result tuple is tuple "(bookmark name,
527 changeset ID on source side, changeset ID on destination
531 changeset ID on source side, changeset ID on destination
528 side)". Each changeset ID is a binary node or None.
532 side)". Each changeset ID is a binary node or None.
529
533
530 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
534 Changeset IDs of tuples in "addsrc", "adddst", "differ" or
531 "invalid" list may be unknown for repo.
535 "invalid" list may be unknown for repo.
532
536
533 If "targets" is specified, only bookmarks listed in it are
537 If "targets" is specified, only bookmarks listed in it are
534 examined.
538 examined.
535 """
539 """
536
540
537 if targets:
541 if targets:
538 bset = set(targets)
542 bset = set(targets)
539 else:
543 else:
540 srcmarkset = set(srcmarks)
544 srcmarkset = set(srcmarks)
541 dstmarkset = set(dstmarks)
545 dstmarkset = set(dstmarks)
542 bset = srcmarkset | dstmarkset
546 bset = srcmarkset | dstmarkset
543
547
544 results = ([], [], [], [], [], [], [], [])
548 results = ([], [], [], [], [], [], [], [])
545 addsrc = results[0].append
549 addsrc = results[0].append
546 adddst = results[1].append
550 adddst = results[1].append
547 advsrc = results[2].append
551 advsrc = results[2].append
548 advdst = results[3].append
552 advdst = results[3].append
549 diverge = results[4].append
553 diverge = results[4].append
550 differ = results[5].append
554 differ = results[5].append
551 invalid = results[6].append
555 invalid = results[6].append
552 same = results[7].append
556 same = results[7].append
553
557
554 for b in sorted(bset):
558 for b in sorted(bset):
555 if b not in srcmarks:
559 if b not in srcmarks:
556 if b in dstmarks:
560 if b in dstmarks:
557 adddst((b, None, dstmarks[b]))
561 adddst((b, None, dstmarks[b]))
558 else:
562 else:
559 invalid((b, None, None))
563 invalid((b, None, None))
560 elif b not in dstmarks:
564 elif b not in dstmarks:
561 addsrc((b, srcmarks[b], None))
565 addsrc((b, srcmarks[b], None))
562 else:
566 else:
563 scid = srcmarks[b]
567 scid = srcmarks[b]
564 dcid = dstmarks[b]
568 dcid = dstmarks[b]
565 if scid == dcid:
569 if scid == dcid:
566 same((b, scid, dcid))
570 same((b, scid, dcid))
567 elif scid in repo and dcid in repo:
571 elif scid in repo and dcid in repo:
568 sctx = repo[scid]
572 sctx = repo[scid]
569 dctx = repo[dcid]
573 dctx = repo[dcid]
570 if sctx.rev() < dctx.rev():
574 if sctx.rev() < dctx.rev():
571 if validdest(repo, sctx, dctx):
575 if validdest(repo, sctx, dctx):
572 advdst((b, scid, dcid))
576 advdst((b, scid, dcid))
573 else:
577 else:
574 diverge((b, scid, dcid))
578 diverge((b, scid, dcid))
575 else:
579 else:
576 if validdest(repo, dctx, sctx):
580 if validdest(repo, dctx, sctx):
577 advsrc((b, scid, dcid))
581 advsrc((b, scid, dcid))
578 else:
582 else:
579 diverge((b, scid, dcid))
583 diverge((b, scid, dcid))
580 else:
584 else:
581 # it is too expensive to examine in detail, in this case
585 # it is too expensive to examine in detail, in this case
582 differ((b, scid, dcid))
586 differ((b, scid, dcid))
583
587
584 return results
588 return results
585
589
586
590
587 def _diverge(ui, b, path, localmarks, remotenode):
591 def _diverge(ui, b, path, localmarks, remotenode):
588 """Return appropriate diverged bookmark for specified ``path``
592 """Return appropriate diverged bookmark for specified ``path``
589
593
590 This returns None, if it is failed to assign any divergent
594 This returns None, if it is failed to assign any divergent
591 bookmark name.
595 bookmark name.
592
596
593 This reuses already existing one with "@number" suffix, if it
597 This reuses already existing one with "@number" suffix, if it
594 refers ``remotenode``.
598 refers ``remotenode``.
595 """
599 """
596 if b == b'@':
600 if b == b'@':
597 b = b''
601 b = b''
598 # try to use an @pathalias suffix
602 # try to use an @pathalias suffix
599 # if an @pathalias already exists, we overwrite (update) it
603 # if an @pathalias already exists, we overwrite (update) it
600 if path.startswith(b"file:"):
604 if path.startswith(b"file:"):
601 path = urlutil.url(path).path
605 path = urlutil.url(path).path
602 for name, p in urlutil.list_paths(ui):
606 for name, p in urlutil.list_paths(ui):
603 loc = p.rawloc
607 loc = p.rawloc
604 if loc.startswith(b"file:"):
608 if loc.startswith(b"file:"):
605 loc = urlutil.url(loc).path
609 loc = urlutil.url(loc).path
606 if path == loc:
610 if path == loc:
607 return b'%s@%s' % (b, name)
611 return b'%s@%s' % (b, name)
608
612
609 # assign a unique "@number" suffix newly
613 # assign a unique "@number" suffix newly
610 for x in range(1, 100):
614 for x in range(1, 100):
611 n = b'%s@%d' % (b, x)
615 n = b'%s@%d' % (b, x)
612 if n not in localmarks or localmarks[n] == remotenode:
616 if n not in localmarks or localmarks[n] == remotenode:
613 return n
617 return n
614
618
615 return None
619 return None
616
620
617
621
618 def unhexlifybookmarks(marks):
622 def unhexlifybookmarks(marks):
619 binremotemarks = {}
623 binremotemarks = {}
620 for name, node in marks.items():
624 for name, node in marks.items():
621 binremotemarks[name] = bin(node)
625 binremotemarks[name] = bin(node)
622 return binremotemarks
626 return binremotemarks
623
627
624
628
625 _binaryentry = struct.Struct(b'>20sH')
629 _binaryentry = struct.Struct(b'>20sH')
626
630
627
631
628 def binaryencode(repo, bookmarks):
632 def binaryencode(repo, bookmarks):
629 """encode a '(bookmark, node)' iterable into a binary stream
633 """encode a '(bookmark, node)' iterable into a binary stream
630
634
631 the binary format is:
635 the binary format is:
632
636
633 <node><bookmark-length><bookmark-name>
637 <node><bookmark-length><bookmark-name>
634
638
635 :node: is a 20 bytes binary node,
639 :node: is a 20 bytes binary node,
636 :bookmark-length: an unsigned short,
640 :bookmark-length: an unsigned short,
637 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
641 :bookmark-name: the name of the bookmark (of length <bookmark-length>)
638
642
639 wdirid (all bits set) will be used as a special value for "missing"
643 wdirid (all bits set) will be used as a special value for "missing"
640 """
644 """
641 binarydata = []
645 binarydata = []
642 for book, node in bookmarks:
646 for book, node in bookmarks:
643 if not node: # None or ''
647 if not node: # None or ''
644 node = repo.nodeconstants.wdirid
648 node = repo.nodeconstants.wdirid
645 binarydata.append(_binaryentry.pack(node, len(book)))
649 binarydata.append(_binaryentry.pack(node, len(book)))
646 binarydata.append(book)
650 binarydata.append(book)
647 return b''.join(binarydata)
651 return b''.join(binarydata)
648
652
649
653
650 def binarydecode(repo, stream):
654 def binarydecode(repo, stream):
651 """decode a binary stream into an '(bookmark, node)' iterable
655 """decode a binary stream into an '(bookmark, node)' iterable
652
656
653 the binary format is:
657 the binary format is:
654
658
655 <node><bookmark-length><bookmark-name>
659 <node><bookmark-length><bookmark-name>
656
660
657 :node: is a 20 bytes binary node,
661 :node: is a 20 bytes binary node,
658 :bookmark-length: an unsigned short,
662 :bookmark-length: an unsigned short,
659 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
663 :bookmark-name: the name of the bookmark (of length <bookmark-length>))
660
664
661 wdirid (all bits set) will be used as a special value for "missing"
665 wdirid (all bits set) will be used as a special value for "missing"
662 """
666 """
663 entrysize = _binaryentry.size
667 entrysize = _binaryentry.size
664 books = []
668 books = []
665 while True:
669 while True:
666 entry = stream.read(entrysize)
670 entry = stream.read(entrysize)
667 if len(entry) < entrysize:
671 if len(entry) < entrysize:
668 if entry:
672 if entry:
669 raise error.Abort(_(b'bad bookmark stream'))
673 raise error.Abort(_(b'bad bookmark stream'))
670 break
674 break
671 node, length = _binaryentry.unpack(entry)
675 node, length = _binaryentry.unpack(entry)
672 bookmark = stream.read(length)
676 bookmark = stream.read(length)
673 if len(bookmark) < length:
677 if len(bookmark) < length:
674 if entry:
678 if entry:
675 raise error.Abort(_(b'bad bookmark stream'))
679 raise error.Abort(_(b'bad bookmark stream'))
676 if node == repo.nodeconstants.wdirid:
680 if node == repo.nodeconstants.wdirid:
677 node = None
681 node = None
678 books.append((bookmark, node))
682 books.append((bookmark, node))
679 return books
683 return books
680
684
681
685
682 def mirroring_remote(ui, repo, remotemarks):
686 def mirroring_remote(ui, repo, remotemarks):
683 """computes the bookmark changes that set the local bookmarks to
687 """computes the bookmark changes that set the local bookmarks to
684 remotemarks"""
688 remotemarks"""
685 changed = []
689 changed = []
686 localmarks = repo._bookmarks
690 localmarks = repo._bookmarks
687 for (b, id) in pycompat.iteritems(remotemarks):
691 for (b, id) in pycompat.iteritems(remotemarks):
688 if id != localmarks.get(b, None) and id in repo:
692 if id != localmarks.get(b, None) and id in repo:
689 changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
693 changed.append((b, id, ui.debug, _(b"updating bookmark %s\n") % b))
690 for b in localmarks:
694 for b in localmarks:
691 if b not in remotemarks:
695 if b not in remotemarks:
692 changed.append(
696 changed.append(
693 (b, None, ui.debug, _(b"removing bookmark %s\n") % b)
697 (b, None, ui.debug, _(b"removing bookmark %s\n") % b)
694 )
698 )
695 return changed
699 return changed
696
700
697
701
698 def merging_from_remote(ui, repo, remotemarks, path, explicit=()):
702 def merging_from_remote(ui, repo, remotemarks, path, explicit=()):
699 """computes the bookmark changes that merge remote bookmarks into the
703 """computes the bookmark changes that merge remote bookmarks into the
700 local bookmarks, based on comparebookmarks"""
704 local bookmarks, based on comparebookmarks"""
701 localmarks = repo._bookmarks
705 localmarks = repo._bookmarks
702 (
706 (
703 addsrc,
707 addsrc,
704 adddst,
708 adddst,
705 advsrc,
709 advsrc,
706 advdst,
710 advdst,
707 diverge,
711 diverge,
708 differ,
712 differ,
709 invalid,
713 invalid,
710 same,
714 same,
711 ) = comparebookmarks(repo, remotemarks, localmarks)
715 ) = comparebookmarks(repo, remotemarks, localmarks)
712
716
713 status = ui.status
717 status = ui.status
714 warn = ui.warn
718 warn = ui.warn
715 if ui.configbool(b'ui', b'quietbookmarkmove'):
719 if ui.configbool(b'ui', b'quietbookmarkmove'):
716 status = warn = ui.debug
720 status = warn = ui.debug
717
721
718 explicit = set(explicit)
722 explicit = set(explicit)
719 changed = []
723 changed = []
720 for b, scid, dcid in addsrc:
724 for b, scid, dcid in addsrc:
721 if scid in repo: # add remote bookmarks for changes we already have
725 if scid in repo: # add remote bookmarks for changes we already have
722 changed.append(
726 changed.append(
723 (b, scid, status, _(b"adding remote bookmark %s\n") % b)
727 (b, scid, status, _(b"adding remote bookmark %s\n") % b)
724 )
728 )
725 elif b in explicit:
729 elif b in explicit:
726 explicit.remove(b)
730 explicit.remove(b)
727 ui.warn(
731 ui.warn(
728 _(b"remote bookmark %s points to locally missing %s\n")
732 _(b"remote bookmark %s points to locally missing %s\n")
729 % (b, hex(scid)[:12])
733 % (b, hex(scid)[:12])
730 )
734 )
731
735
732 for b, scid, dcid in advsrc:
736 for b, scid, dcid in advsrc:
733 changed.append((b, scid, status, _(b"updating bookmark %s\n") % b))
737 changed.append((b, scid, status, _(b"updating bookmark %s\n") % b))
734 # remove normal movement from explicit set
738 # remove normal movement from explicit set
735 explicit.difference_update(d[0] for d in changed)
739 explicit.difference_update(d[0] for d in changed)
736
740
737 for b, scid, dcid in diverge:
741 for b, scid, dcid in diverge:
738 if b in explicit:
742 if b in explicit:
739 explicit.discard(b)
743 explicit.discard(b)
740 changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
744 changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
741 else:
745 else:
742 db = _diverge(ui, b, path, localmarks, scid)
746 db = _diverge(ui, b, path, localmarks, scid)
743 if db:
747 if db:
744 changed.append(
748 changed.append(
745 (
749 (
746 db,
750 db,
747 scid,
751 scid,
748 warn,
752 warn,
749 _(b"divergent bookmark %s stored as %s\n") % (b, db),
753 _(b"divergent bookmark %s stored as %s\n") % (b, db),
750 )
754 )
751 )
755 )
752 else:
756 else:
753 warn(
757 warn(
754 _(
758 _(
755 b"warning: failed to assign numbered name "
759 b"warning: failed to assign numbered name "
756 b"to divergent bookmark %s\n"
760 b"to divergent bookmark %s\n"
757 )
761 )
758 % b
762 % b
759 )
763 )
760 for b, scid, dcid in adddst + advdst:
764 for b, scid, dcid in adddst + advdst:
761 if b in explicit:
765 if b in explicit:
762 explicit.discard(b)
766 explicit.discard(b)
763 changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
767 changed.append((b, scid, status, _(b"importing bookmark %s\n") % b))
764 for b, scid, dcid in differ:
768 for b, scid, dcid in differ:
765 if b in explicit:
769 if b in explicit:
766 explicit.remove(b)
770 explicit.remove(b)
767 ui.warn(
771 ui.warn(
768 _(b"remote bookmark %s points to locally missing %s\n")
772 _(b"remote bookmark %s points to locally missing %s\n")
769 % (b, hex(scid)[:12])
773 % (b, hex(scid)[:12])
770 )
774 )
771 return changed
775 return changed
772
776
773
777
774 def updatefromremote(
778 def updatefromremote(
775 ui, repo, remotemarks, path, trfunc, explicit=(), mode=None
779 ui, repo, remotemarks, path, trfunc, explicit=(), mode=None
776 ):
780 ):
777 if mode == b'ignore':
781 if mode == b'ignore':
778 # This should move to an higher level to avoid fetching bookmark at all
782 # This should move to an higher level to avoid fetching bookmark at all
779 return
783 return
780 ui.debug(b"checking for updated bookmarks\n")
784 ui.debug(b"checking for updated bookmarks\n")
781 if mode == b'mirror':
785 if mode == b'mirror':
782 changed = mirroring_remote(ui, repo, remotemarks)
786 changed = mirroring_remote(ui, repo, remotemarks)
783 else:
787 else:
784 changed = merging_from_remote(ui, repo, remotemarks, path, explicit)
788 changed = merging_from_remote(ui, repo, remotemarks, path, explicit)
785
789
786 if changed:
790 if changed:
787 tr = trfunc()
791 tr = trfunc()
788 changes = []
792 changes = []
789 key = lambda t: (t[0], t[1] or b'')
793 key = lambda t: (t[0], t[1] or b'')
790 for b, node, writer, msg in sorted(changed, key=key):
794 for b, node, writer, msg in sorted(changed, key=key):
791 changes.append((b, node))
795 changes.append((b, node))
792 writer(msg)
796 writer(msg)
793 repo._bookmarks.applychanges(repo, tr, changes)
797 repo._bookmarks.applychanges(repo, tr, changes)
794
798
795
799
796 def incoming(ui, repo, peer, mode=None):
800 def incoming(ui, repo, peer, mode=None):
797 """Show bookmarks incoming from other to repo"""
801 """Show bookmarks incoming from other to repo"""
798 if mode == b'ignore':
802 if mode == b'ignore':
799 ui.status(_(b"bookmarks exchange disabled with this path\n"))
803 ui.status(_(b"bookmarks exchange disabled with this path\n"))
800 return 0
804 return 0
801 ui.status(_(b"searching for changed bookmarks\n"))
805 ui.status(_(b"searching for changed bookmarks\n"))
802
806
803 with peer.commandexecutor() as e:
807 with peer.commandexecutor() as e:
804 remotemarks = unhexlifybookmarks(
808 remotemarks = unhexlifybookmarks(
805 e.callcommand(
809 e.callcommand(
806 b'listkeys',
810 b'listkeys',
807 {
811 {
808 b'namespace': b'bookmarks',
812 b'namespace': b'bookmarks',
809 },
813 },
810 ).result()
814 ).result()
811 )
815 )
812
816
813 incomings = []
817 incomings = []
814 if ui.debugflag:
818 if ui.debugflag:
815 getid = lambda id: id
819 getid = lambda id: id
816 else:
820 else:
817 getid = lambda id: id[:12]
821 getid = lambda id: id[:12]
818 if ui.verbose:
822 if ui.verbose:
819
823
820 def add(b, id, st):
824 def add(b, id, st):
821 incomings.append(b" %-25s %s %s\n" % (b, getid(id), st))
825 incomings.append(b" %-25s %s %s\n" % (b, getid(id), st))
822
826
823 else:
827 else:
824
828
825 def add(b, id, st):
829 def add(b, id, st):
826 incomings.append(b" %-25s %s\n" % (b, getid(id)))
830 incomings.append(b" %-25s %s\n" % (b, getid(id)))
827
831
828 if mode == b'mirror':
832 if mode == b'mirror':
829 localmarks = repo._bookmarks
833 localmarks = repo._bookmarks
830 allmarks = set(remotemarks.keys()) | set(localmarks.keys())
834 allmarks = set(remotemarks.keys()) | set(localmarks.keys())
831 for b in sorted(allmarks):
835 for b in sorted(allmarks):
832 loc = localmarks.get(b)
836 loc = localmarks.get(b)
833 rem = remotemarks.get(b)
837 rem = remotemarks.get(b)
834 if loc == rem:
838 if loc == rem:
835 continue
839 continue
836 elif loc is None:
840 elif loc is None:
837 add(b, hex(rem), _(b'added'))
841 add(b, hex(rem), _(b'added'))
838 elif rem is None:
842 elif rem is None:
839 add(b, hex(repo.nullid), _(b'removed'))
843 add(b, hex(repo.nullid), _(b'removed'))
840 else:
844 else:
841 add(b, hex(rem), _(b'changed'))
845 add(b, hex(rem), _(b'changed'))
842 else:
846 else:
843 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
847 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
844 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
848 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
845
849
846 for b, scid, dcid in addsrc:
850 for b, scid, dcid in addsrc:
847 # i18n: "added" refers to a bookmark
851 # i18n: "added" refers to a bookmark
848 add(b, hex(scid), _(b'added'))
852 add(b, hex(scid), _(b'added'))
849 for b, scid, dcid in advsrc:
853 for b, scid, dcid in advsrc:
850 # i18n: "advanced" refers to a bookmark
854 # i18n: "advanced" refers to a bookmark
851 add(b, hex(scid), _(b'advanced'))
855 add(b, hex(scid), _(b'advanced'))
852 for b, scid, dcid in diverge:
856 for b, scid, dcid in diverge:
853 # i18n: "diverged" refers to a bookmark
857 # i18n: "diverged" refers to a bookmark
854 add(b, hex(scid), _(b'diverged'))
858 add(b, hex(scid), _(b'diverged'))
855 for b, scid, dcid in differ:
859 for b, scid, dcid in differ:
856 # i18n: "changed" refers to a bookmark
860 # i18n: "changed" refers to a bookmark
857 add(b, hex(scid), _(b'changed'))
861 add(b, hex(scid), _(b'changed'))
858
862
859 if not incomings:
863 if not incomings:
860 ui.status(_(b"no changed bookmarks found\n"))
864 ui.status(_(b"no changed bookmarks found\n"))
861 return 1
865 return 1
862
866
863 for s in sorted(incomings):
867 for s in sorted(incomings):
864 ui.write(s)
868 ui.write(s)
865
869
866 return 0
870 return 0
867
871
868
872
869 def outgoing(ui, repo, other):
873 def outgoing(ui, repo, other):
870 """Show bookmarks outgoing from repo to other"""
874 """Show bookmarks outgoing from repo to other"""
871 ui.status(_(b"searching for changed bookmarks\n"))
875 ui.status(_(b"searching for changed bookmarks\n"))
872
876
873 remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks'))
877 remotemarks = unhexlifybookmarks(other.listkeys(b'bookmarks'))
874 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
878 r = comparebookmarks(repo, repo._bookmarks, remotemarks)
875 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
879 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
876
880
877 outgoings = []
881 outgoings = []
878 if ui.debugflag:
882 if ui.debugflag:
879 getid = lambda id: id
883 getid = lambda id: id
880 else:
884 else:
881 getid = lambda id: id[:12]
885 getid = lambda id: id[:12]
882 if ui.verbose:
886 if ui.verbose:
883
887
884 def add(b, id, st):
888 def add(b, id, st):
885 outgoings.append(b" %-25s %s %s\n" % (b, getid(id), st))
889 outgoings.append(b" %-25s %s %s\n" % (b, getid(id), st))
886
890
887 else:
891 else:
888
892
889 def add(b, id, st):
893 def add(b, id, st):
890 outgoings.append(b" %-25s %s\n" % (b, getid(id)))
894 outgoings.append(b" %-25s %s\n" % (b, getid(id)))
891
895
892 for b, scid, dcid in addsrc:
896 for b, scid, dcid in addsrc:
893 # i18n: "added refers to a bookmark
897 # i18n: "added refers to a bookmark
894 add(b, hex(scid), _(b'added'))
898 add(b, hex(scid), _(b'added'))
895 for b, scid, dcid in adddst:
899 for b, scid, dcid in adddst:
896 # i18n: "deleted" refers to a bookmark
900 # i18n: "deleted" refers to a bookmark
897 add(b, b' ' * 40, _(b'deleted'))
901 add(b, b' ' * 40, _(b'deleted'))
898 for b, scid, dcid in advsrc:
902 for b, scid, dcid in advsrc:
899 # i18n: "advanced" refers to a bookmark
903 # i18n: "advanced" refers to a bookmark
900 add(b, hex(scid), _(b'advanced'))
904 add(b, hex(scid), _(b'advanced'))
901 for b, scid, dcid in diverge:
905 for b, scid, dcid in diverge:
902 # i18n: "diverged" refers to a bookmark
906 # i18n: "diverged" refers to a bookmark
903 add(b, hex(scid), _(b'diverged'))
907 add(b, hex(scid), _(b'diverged'))
904 for b, scid, dcid in differ:
908 for b, scid, dcid in differ:
905 # i18n: "changed" refers to a bookmark
909 # i18n: "changed" refers to a bookmark
906 add(b, hex(scid), _(b'changed'))
910 add(b, hex(scid), _(b'changed'))
907
911
908 if not outgoings:
912 if not outgoings:
909 ui.status(_(b"no changed bookmarks found\n"))
913 ui.status(_(b"no changed bookmarks found\n"))
910 return 1
914 return 1
911
915
912 for s in sorted(outgoings):
916 for s in sorted(outgoings):
913 ui.write(s)
917 ui.write(s)
914
918
915 return 0
919 return 0
916
920
917
921
918 def summary(repo, peer):
922 def summary(repo, peer):
919 """Compare bookmarks between repo and other for "hg summary" output
923 """Compare bookmarks between repo and other for "hg summary" output
920
924
921 This returns "(# of incoming, # of outgoing)" tuple.
925 This returns "(# of incoming, # of outgoing)" tuple.
922 """
926 """
923 with peer.commandexecutor() as e:
927 with peer.commandexecutor() as e:
924 remotemarks = unhexlifybookmarks(
928 remotemarks = unhexlifybookmarks(
925 e.callcommand(
929 e.callcommand(
926 b'listkeys',
930 b'listkeys',
927 {
931 {
928 b'namespace': b'bookmarks',
932 b'namespace': b'bookmarks',
929 },
933 },
930 ).result()
934 ).result()
931 )
935 )
932
936
933 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
937 r = comparebookmarks(repo, remotemarks, repo._bookmarks)
934 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
938 addsrc, adddst, advsrc, advdst, diverge, differ, invalid, same = r
935 return (len(addsrc), len(adddst))
939 return (len(addsrc), len(adddst))
936
940
937
941
938 def validdest(repo, old, new):
942 def validdest(repo, old, new):
939 """Is the new bookmark destination a valid update from the old one"""
943 """Is the new bookmark destination a valid update from the old one"""
940 repo = repo.unfiltered()
944 repo = repo.unfiltered()
941 if old == new:
945 if old == new:
942 # Old == new -> nothing to update.
946 # Old == new -> nothing to update.
943 return False
947 return False
944 elif not old:
948 elif not old:
945 # old is nullrev, anything is valid.
949 # old is nullrev, anything is valid.
946 # (new != nullrev has been excluded by the previous check)
950 # (new != nullrev has been excluded by the previous check)
947 return True
951 return True
948 elif repo.obsstore:
952 elif repo.obsstore:
949 return new.node() in obsutil.foreground(repo, [old.node()])
953 return new.node() in obsutil.foreground(repo, [old.node()])
950 else:
954 else:
951 # still an independent clause as it is lazier (and therefore faster)
955 # still an independent clause as it is lazier (and therefore faster)
952 return old.isancestorof(new)
956 return old.isancestorof(new)
953
957
954
958
955 def checkformat(repo, mark):
959 def checkformat(repo, mark):
956 """return a valid version of a potential bookmark name
960 """return a valid version of a potential bookmark name
957
961
958 Raises an abort error if the bookmark name is not valid.
962 Raises an abort error if the bookmark name is not valid.
959 """
963 """
960 mark = mark.strip()
964 mark = mark.strip()
961 if not mark:
965 if not mark:
962 raise error.InputError(
966 raise error.InputError(
963 _(b"bookmark names cannot consist entirely of whitespace")
967 _(b"bookmark names cannot consist entirely of whitespace")
964 )
968 )
965 scmutil.checknewlabel(repo, mark, b'bookmark')
969 scmutil.checknewlabel(repo, mark, b'bookmark')
966 return mark
970 return mark
967
971
968
972
969 def delete(repo, tr, names):
973 def delete(repo, tr, names):
970 """remove a mark from the bookmark store
974 """remove a mark from the bookmark store
971
975
972 Raises an abort error if mark does not exist.
976 Raises an abort error if mark does not exist.
973 """
977 """
974 marks = repo._bookmarks
978 marks = repo._bookmarks
975 changes = []
979 changes = []
976 for mark in names:
980 for mark in names:
977 if mark not in marks:
981 if mark not in marks:
978 raise error.InputError(_(b"bookmark '%s' does not exist") % mark)
982 raise error.InputError(_(b"bookmark '%s' does not exist") % mark)
979 if mark == repo._activebookmark:
983 if mark == repo._activebookmark:
980 deactivate(repo)
984 deactivate(repo)
981 changes.append((mark, None))
985 changes.append((mark, None))
982 marks.applychanges(repo, tr, changes)
986 marks.applychanges(repo, tr, changes)
983
987
984
988
985 def rename(repo, tr, old, new, force=False, inactive=False):
989 def rename(repo, tr, old, new, force=False, inactive=False):
986 """rename a bookmark from old to new
990 """rename a bookmark from old to new
987
991
988 If force is specified, then the new name can overwrite an existing
992 If force is specified, then the new name can overwrite an existing
989 bookmark.
993 bookmark.
990
994
991 If inactive is specified, then do not activate the new bookmark.
995 If inactive is specified, then do not activate the new bookmark.
992
996
993 Raises an abort error if old is not in the bookmark store.
997 Raises an abort error if old is not in the bookmark store.
994 """
998 """
995 marks = repo._bookmarks
999 marks = repo._bookmarks
996 mark = checkformat(repo, new)
1000 mark = checkformat(repo, new)
997 if old not in marks:
1001 if old not in marks:
998 raise error.InputError(_(b"bookmark '%s' does not exist") % old)
1002 raise error.InputError(_(b"bookmark '%s' does not exist") % old)
999 changes = []
1003 changes = []
1000 for bm in marks.checkconflict(mark, force):
1004 for bm in marks.checkconflict(mark, force):
1001 changes.append((bm, None))
1005 changes.append((bm, None))
1002 changes.extend([(mark, marks[old]), (old, None)])
1006 changes.extend([(mark, marks[old]), (old, None)])
1003 marks.applychanges(repo, tr, changes)
1007 marks.applychanges(repo, tr, changes)
1004 if repo._activebookmark == old and not inactive:
1008 if repo._activebookmark == old and not inactive:
1005 activate(repo, mark)
1009 activate(repo, mark)
1006
1010
1007
1011
1008 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
1012 def addbookmarks(repo, tr, names, rev=None, force=False, inactive=False):
1009 """add a list of bookmarks
1013 """add a list of bookmarks
1010
1014
1011 If force is specified, then the new name can overwrite an existing
1015 If force is specified, then the new name can overwrite an existing
1012 bookmark.
1016 bookmark.
1013
1017
1014 If inactive is specified, then do not activate any bookmark. Otherwise, the
1018 If inactive is specified, then do not activate any bookmark. Otherwise, the
1015 first bookmark is activated.
1019 first bookmark is activated.
1016
1020
1017 Raises an abort error if old is not in the bookmark store.
1021 Raises an abort error if old is not in the bookmark store.
1018 """
1022 """
1019 marks = repo._bookmarks
1023 marks = repo._bookmarks
1020 cur = repo[b'.'].node()
1024 cur = repo[b'.'].node()
1021 newact = None
1025 newact = None
1022 changes = []
1026 changes = []
1023
1027
1024 # unhide revs if any
1028 # unhide revs if any
1025 if rev:
1029 if rev:
1026 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1030 repo = scmutil.unhidehashlikerevs(repo, [rev], b'nowarn')
1027
1031
1028 ctx = scmutil.revsingle(repo, rev, None)
1032 ctx = scmutil.revsingle(repo, rev, None)
1029 # bookmarking wdir means creating a bookmark on p1 and activating it
1033 # bookmarking wdir means creating a bookmark on p1 and activating it
1030 activatenew = not inactive and ctx.rev() is None
1034 activatenew = not inactive and ctx.rev() is None
1031 if ctx.node() is None:
1035 if ctx.node() is None:
1032 ctx = ctx.p1()
1036 ctx = ctx.p1()
1033 tgt = ctx.node()
1037 tgt = ctx.node()
1034 assert tgt
1038 assert tgt
1035
1039
1036 for mark in names:
1040 for mark in names:
1037 mark = checkformat(repo, mark)
1041 mark = checkformat(repo, mark)
1038 if newact is None:
1042 if newact is None:
1039 newact = mark
1043 newact = mark
1040 if inactive and mark == repo._activebookmark:
1044 if inactive and mark == repo._activebookmark:
1041 deactivate(repo)
1045 deactivate(repo)
1042 continue
1046 continue
1043 for bm in marks.checkconflict(mark, force, tgt):
1047 for bm in marks.checkconflict(mark, force, tgt):
1044 changes.append((bm, None))
1048 changes.append((bm, None))
1045 changes.append((mark, tgt))
1049 changes.append((mark, tgt))
1046
1050
1047 # nothing changed but for the one deactivated above
1051 # nothing changed but for the one deactivated above
1048 if not changes:
1052 if not changes:
1049 return
1053 return
1050
1054
1051 if ctx.hidden():
1055 if ctx.hidden():
1052 repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % ctx.hex()[:12])
1056 repo.ui.warn(_(b"bookmarking hidden changeset %s\n") % ctx.hex()[:12])
1053
1057
1054 if ctx.obsolete():
1058 if ctx.obsolete():
1055 msg = obsutil._getfilteredreason(repo, ctx.hex()[:12], ctx)
1059 msg = obsutil._getfilteredreason(repo, ctx.hex()[:12], ctx)
1056 repo.ui.warn(b"(%s)\n" % msg)
1060 repo.ui.warn(b"(%s)\n" % msg)
1057
1061
1058 marks.applychanges(repo, tr, changes)
1062 marks.applychanges(repo, tr, changes)
1059 if activatenew and cur == marks[newact]:
1063 if activatenew and cur == marks[newact]:
1060 activate(repo, newact)
1064 activate(repo, newact)
1061 elif cur != tgt and newact == repo._activebookmark:
1065 elif cur != tgt and newact == repo._activebookmark:
1062 deactivate(repo)
1066 deactivate(repo)
1063
1067
1064
1068
1065 def _printbookmarks(ui, repo, fm, bmarks):
1069 def _printbookmarks(ui, repo, fm, bmarks):
1066 """private method to print bookmarks
1070 """private method to print bookmarks
1067
1071
1068 Provides a way for extensions to control how bookmarks are printed (e.g.
1072 Provides a way for extensions to control how bookmarks are printed (e.g.
1069 prepend or postpend names)
1073 prepend or postpend names)
1070 """
1074 """
1071 hexfn = fm.hexfunc
1075 hexfn = fm.hexfunc
1072 if len(bmarks) == 0 and fm.isplain():
1076 if len(bmarks) == 0 and fm.isplain():
1073 ui.status(_(b"no bookmarks set\n"))
1077 ui.status(_(b"no bookmarks set\n"))
1074 for bmark, (n, prefix, label) in sorted(pycompat.iteritems(bmarks)):
1078 for bmark, (n, prefix, label) in sorted(pycompat.iteritems(bmarks)):
1075 fm.startitem()
1079 fm.startitem()
1076 fm.context(repo=repo)
1080 fm.context(repo=repo)
1077 if not ui.quiet:
1081 if not ui.quiet:
1078 fm.plain(b' %s ' % prefix, label=label)
1082 fm.plain(b' %s ' % prefix, label=label)
1079 fm.write(b'bookmark', b'%s', bmark, label=label)
1083 fm.write(b'bookmark', b'%s', bmark, label=label)
1080 pad = b" " * (25 - encoding.colwidth(bmark))
1084 pad = b" " * (25 - encoding.colwidth(bmark))
1081 fm.condwrite(
1085 fm.condwrite(
1082 not ui.quiet,
1086 not ui.quiet,
1083 b'rev node',
1087 b'rev node',
1084 pad + b' %d:%s',
1088 pad + b' %d:%s',
1085 repo.changelog.rev(n),
1089 repo.changelog.rev(n),
1086 hexfn(n),
1090 hexfn(n),
1087 label=label,
1091 label=label,
1088 )
1092 )
1089 fm.data(active=(activebookmarklabel in label))
1093 fm.data(active=(activebookmarklabel in label))
1090 fm.plain(b'\n')
1094 fm.plain(b'\n')
1091
1095
1092
1096
1093 def printbookmarks(ui, repo, fm, names=None):
1097 def printbookmarks(ui, repo, fm, names=None):
1094 """print bookmarks by the given formatter
1098 """print bookmarks by the given formatter
1095
1099
1096 Provides a way for extensions to control how bookmarks are printed.
1100 Provides a way for extensions to control how bookmarks are printed.
1097 """
1101 """
1098 marks = repo._bookmarks
1102 marks = repo._bookmarks
1099 bmarks = {}
1103 bmarks = {}
1100 for bmark in names or marks:
1104 for bmark in names or marks:
1101 if bmark not in marks:
1105 if bmark not in marks:
1102 raise error.InputError(_(b"bookmark '%s' does not exist") % bmark)
1106 raise error.InputError(_(b"bookmark '%s' does not exist") % bmark)
1103 active = repo._activebookmark
1107 active = repo._activebookmark
1104 if bmark == active:
1108 if bmark == active:
1105 prefix, label = b'*', activebookmarklabel
1109 prefix, label = b'*', activebookmarklabel
1106 else:
1110 else:
1107 prefix, label = b' ', b''
1111 prefix, label = b' ', b''
1108
1112
1109 bmarks[bmark] = (marks[bmark], prefix, label)
1113 bmarks[bmark] = (marks[bmark], prefix, label)
1110 _printbookmarks(ui, repo, fm, bmarks)
1114 _printbookmarks(ui, repo, fm, bmarks)
1111
1115
1112
1116
1113 def preparehookargs(name, old, new):
1117 def preparehookargs(name, old, new):
1114 if new is None:
1118 if new is None:
1115 new = b''
1119 new = b''
1116 if old is None:
1120 if old is None:
1117 old = b''
1121 old = b''
1118 return {b'bookmark': name, b'node': hex(new), b'oldnode': hex(old)}
1122 return {b'bookmark': name, b'node': hex(new), b'oldnode': hex(old)}
@@ -1,1477 +1,1481 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15 import uuid
15 import uuid
16
16
17 from .i18n import _
17 from .i18n import _
18 from .pycompat import delattr
18 from .pycompat import delattr
19
19
20 from hgdemandimport import tracing
20 from hgdemandimport import tracing
21
21
22 from . import (
22 from . import (
23 dirstatemap,
23 dirstatemap,
24 encoding,
24 encoding,
25 error,
25 error,
26 match as matchmod,
26 match as matchmod,
27 node,
27 node,
28 pathutil,
28 pathutil,
29 policy,
29 policy,
30 pycompat,
30 pycompat,
31 scmutil,
31 scmutil,
32 sparse,
32 sparse,
33 util,
33 util,
34 )
34 )
35
35
36 from .dirstateutils import (
36 from .dirstateutils import (
37 timestamp,
37 timestamp,
38 )
38 )
39
39
40 from .interfaces import (
40 from .interfaces import (
41 dirstate as intdirstate,
41 dirstate as intdirstate,
42 util as interfaceutil,
42 util as interfaceutil,
43 )
43 )
44
44
45 parsers = policy.importmod('parsers')
45 parsers = policy.importmod('parsers')
46 rustmod = policy.importrust('dirstate')
46 rustmod = policy.importrust('dirstate')
47
47
48 HAS_FAST_DIRSTATE_V2 = rustmod is not None
48 HAS_FAST_DIRSTATE_V2 = rustmod is not None
49
49
50 propertycache = util.propertycache
50 propertycache = util.propertycache
51 filecache = scmutil.filecache
51 filecache = scmutil.filecache
52 _rangemask = dirstatemap.rangemask
52 _rangemask = dirstatemap.rangemask
53
53
54 DirstateItem = dirstatemap.DirstateItem
54 DirstateItem = dirstatemap.DirstateItem
55
55
56
56
57 class repocache(filecache):
57 class repocache(filecache):
58 """filecache for files in .hg/"""
58 """filecache for files in .hg/"""
59
59
60 def join(self, obj, fname):
60 def join(self, obj, fname):
61 return obj._opener.join(fname)
61 return obj._opener.join(fname)
62
62
63
63
64 class rootcache(filecache):
64 class rootcache(filecache):
65 """filecache for files in the repository root"""
65 """filecache for files in the repository root"""
66
66
67 def join(self, obj, fname):
67 def join(self, obj, fname):
68 return obj._join(fname)
68 return obj._join(fname)
69
69
70
70
71 def requires_parents_change(func):
71 def requires_parents_change(func):
72 def wrap(self, *args, **kwargs):
72 def wrap(self, *args, **kwargs):
73 if not self.pendingparentchange():
73 if not self.pendingparentchange():
74 msg = 'calling `%s` outside of a parentchange context'
74 msg = 'calling `%s` outside of a parentchange context'
75 msg %= func.__name__
75 msg %= func.__name__
76 raise error.ProgrammingError(msg)
76 raise error.ProgrammingError(msg)
77 return func(self, *args, **kwargs)
77 return func(self, *args, **kwargs)
78
78
79 return wrap
79 return wrap
80
80
81
81
82 def requires_no_parents_change(func):
82 def requires_no_parents_change(func):
83 def wrap(self, *args, **kwargs):
83 def wrap(self, *args, **kwargs):
84 if self.pendingparentchange():
84 if self.pendingparentchange():
85 msg = 'calling `%s` inside of a parentchange context'
85 msg = 'calling `%s` inside of a parentchange context'
86 msg %= func.__name__
86 msg %= func.__name__
87 raise error.ProgrammingError(msg)
87 raise error.ProgrammingError(msg)
88 return func(self, *args, **kwargs)
88 return func(self, *args, **kwargs)
89
89
90 return wrap
90 return wrap
91
91
92
92
93 @interfaceutil.implementer(intdirstate.idirstate)
93 @interfaceutil.implementer(intdirstate.idirstate)
94 class dirstate(object):
94 class dirstate(object):
95 def __init__(
95 def __init__(
96 self,
96 self,
97 opener,
97 opener,
98 ui,
98 ui,
99 root,
99 root,
100 validate,
100 validate,
101 sparsematchfn,
101 sparsematchfn,
102 nodeconstants,
102 nodeconstants,
103 use_dirstate_v2,
103 use_dirstate_v2,
104 use_tracked_key=False,
104 use_tracked_key=False,
105 ):
105 ):
106 """Create a new dirstate object.
106 """Create a new dirstate object.
107
107
108 opener is an open()-like callable that can be used to open the
108 opener is an open()-like callable that can be used to open the
109 dirstate file; root is the root of the directory tracked by
109 dirstate file; root is the root of the directory tracked by
110 the dirstate.
110 the dirstate.
111 """
111 """
112 self._use_dirstate_v2 = use_dirstate_v2
112 self._use_dirstate_v2 = use_dirstate_v2
113 self._use_tracked_key = use_tracked_key
113 self._use_tracked_key = use_tracked_key
114 self._nodeconstants = nodeconstants
114 self._nodeconstants = nodeconstants
115 self._opener = opener
115 self._opener = opener
116 self._validate = validate
116 self._validate = validate
117 self._root = root
117 self._root = root
118 self._sparsematchfn = sparsematchfn
118 self._sparsematchfn = sparsematchfn
119 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
119 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
120 # UNC path pointing to root share (issue4557)
120 # UNC path pointing to root share (issue4557)
121 self._rootdir = pathutil.normasprefix(root)
121 self._rootdir = pathutil.normasprefix(root)
122 # True is any internal state may be different
122 # True is any internal state may be different
123 self._dirty = False
123 self._dirty = False
124 # True if the set of tracked file may be different
124 # True if the set of tracked file may be different
125 self._dirty_tracked_set = False
125 self._dirty_tracked_set = False
126 self._ui = ui
126 self._ui = ui
127 self._filecache = {}
127 self._filecache = {}
128 self._parentwriters = 0
128 self._parentwriters = 0
129 self._filename = b'dirstate'
129 self._filename = b'dirstate'
130 self._filename_tk = b'dirstate-tracked-key'
130 self._filename_tk = b'dirstate-tracked-key'
131 self._pendingfilename = b'%s.pending' % self._filename
131 self._pendingfilename = b'%s.pending' % self._filename
132 self._plchangecallbacks = {}
132 self._plchangecallbacks = {}
133 self._origpl = None
133 self._origpl = None
134 self._mapcls = dirstatemap.dirstatemap
134 self._mapcls = dirstatemap.dirstatemap
135 # Access and cache cwd early, so we don't access it for the first time
135 # Access and cache cwd early, so we don't access it for the first time
136 # after a working-copy update caused it to not exist (accessing it then
136 # after a working-copy update caused it to not exist (accessing it then
137 # raises an exception).
137 # raises an exception).
138 self._cwd
138 self._cwd
139
139
140 def prefetch_parents(self):
140 def prefetch_parents(self):
141 """make sure the parents are loaded
141 """make sure the parents are loaded
142
142
143 Used to avoid a race condition.
143 Used to avoid a race condition.
144 """
144 """
145 self._pl
145 self._pl
146
146
147 @contextlib.contextmanager
147 @contextlib.contextmanager
148 def parentchange(self):
148 def parentchange(self):
149 """Context manager for handling dirstate parents.
149 """Context manager for handling dirstate parents.
150
150
151 If an exception occurs in the scope of the context manager,
151 If an exception occurs in the scope of the context manager,
152 the incoherent dirstate won't be written when wlock is
152 the incoherent dirstate won't be written when wlock is
153 released.
153 released.
154 """
154 """
155 self._parentwriters += 1
155 self._parentwriters += 1
156 yield
156 yield
157 # Typically we want the "undo" step of a context manager in a
157 # Typically we want the "undo" step of a context manager in a
158 # finally block so it happens even when an exception
158 # finally block so it happens even when an exception
159 # occurs. In this case, however, we only want to decrement
159 # occurs. In this case, however, we only want to decrement
160 # parentwriters if the code in the with statement exits
160 # parentwriters if the code in the with statement exits
161 # normally, so we don't have a try/finally here on purpose.
161 # normally, so we don't have a try/finally here on purpose.
162 self._parentwriters -= 1
162 self._parentwriters -= 1
163
163
164 def pendingparentchange(self):
164 def pendingparentchange(self):
165 """Returns true if the dirstate is in the middle of a set of changes
165 """Returns true if the dirstate is in the middle of a set of changes
166 that modify the dirstate parent.
166 that modify the dirstate parent.
167 """
167 """
168 return self._parentwriters > 0
168 return self._parentwriters > 0
169
169
170 @propertycache
170 @propertycache
171 def _map(self):
171 def _map(self):
172 """Return the dirstate contents (see documentation for dirstatemap)."""
172 """Return the dirstate contents (see documentation for dirstatemap)."""
173 self._map = self._mapcls(
173 self._map = self._mapcls(
174 self._ui,
174 self._ui,
175 self._opener,
175 self._opener,
176 self._root,
176 self._root,
177 self._nodeconstants,
177 self._nodeconstants,
178 self._use_dirstate_v2,
178 self._use_dirstate_v2,
179 )
179 )
180 return self._map
180 return self._map
181
181
182 @property
182 @property
183 def _sparsematcher(self):
183 def _sparsematcher(self):
184 """The matcher for the sparse checkout.
184 """The matcher for the sparse checkout.
185
185
186 The working directory may not include every file from a manifest. The
186 The working directory may not include every file from a manifest. The
187 matcher obtained by this property will match a path if it is to be
187 matcher obtained by this property will match a path if it is to be
188 included in the working directory.
188 included in the working directory.
189 """
189 """
190 # TODO there is potential to cache this property. For now, the matcher
190 # TODO there is potential to cache this property. For now, the matcher
191 # is resolved on every access. (But the called function does use a
191 # is resolved on every access. (But the called function does use a
192 # cache to keep the lookup fast.)
192 # cache to keep the lookup fast.)
193 return self._sparsematchfn()
193 return self._sparsematchfn()
194
194
195 @repocache(b'branch')
195 @repocache(b'branch')
196 def _branch(self):
196 def _branch(self):
197 try:
197 try:
198 return self._opener.read(b"branch").strip() or b"default"
198 return self._opener.read(b"branch").strip() or b"default"
199 except IOError as inst:
199 except IOError as inst:
200 if inst.errno != errno.ENOENT:
200 if inst.errno != errno.ENOENT:
201 raise
201 raise
202 return b"default"
202 return b"default"
203
203
204 @property
204 @property
205 def _pl(self):
205 def _pl(self):
206 return self._map.parents()
206 return self._map.parents()
207
207
208 def hasdir(self, d):
208 def hasdir(self, d):
209 return self._map.hastrackeddir(d)
209 return self._map.hastrackeddir(d)
210
210
211 @rootcache(b'.hgignore')
211 @rootcache(b'.hgignore')
212 def _ignore(self):
212 def _ignore(self):
213 files = self._ignorefiles()
213 files = self._ignorefiles()
214 if not files:
214 if not files:
215 return matchmod.never()
215 return matchmod.never()
216
216
217 pats = [b'include:%s' % f for f in files]
217 pats = [b'include:%s' % f for f in files]
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
218 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
219
219
220 @propertycache
220 @propertycache
221 def _slash(self):
221 def _slash(self):
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
222 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
223
223
224 @propertycache
224 @propertycache
225 def _checklink(self):
225 def _checklink(self):
226 return util.checklink(self._root)
226 return util.checklink(self._root)
227
227
228 @propertycache
228 @propertycache
229 def _checkexec(self):
229 def _checkexec(self):
230 return bool(util.checkexec(self._root))
230 return bool(util.checkexec(self._root))
231
231
232 @propertycache
232 @propertycache
233 def _checkcase(self):
233 def _checkcase(self):
234 return not util.fscasesensitive(self._join(b'.hg'))
234 return not util.fscasesensitive(self._join(b'.hg'))
235
235
236 def _join(self, f):
236 def _join(self, f):
237 # much faster than os.path.join()
237 # much faster than os.path.join()
238 # it's safe because f is always a relative path
238 # it's safe because f is always a relative path
239 return self._rootdir + f
239 return self._rootdir + f
240
240
241 def flagfunc(self, buildfallback):
241 def flagfunc(self, buildfallback):
242 """build a callable that returns flags associated with a filename
242 """build a callable that returns flags associated with a filename
243
243
244 The information is extracted from three possible layers:
244 The information is extracted from three possible layers:
245 1. the file system if it supports the information
245 1. the file system if it supports the information
246 2. the "fallback" information stored in the dirstate if any
246 2. the "fallback" information stored in the dirstate if any
247 3. a more expensive mechanism inferring the flags from the parents.
247 3. a more expensive mechanism inferring the flags from the parents.
248 """
248 """
249
249
250 # small hack to cache the result of buildfallback()
250 # small hack to cache the result of buildfallback()
251 fallback_func = []
251 fallback_func = []
252
252
253 def get_flags(x):
253 def get_flags(x):
254 entry = None
254 entry = None
255 fallback_value = None
255 fallback_value = None
256 try:
256 try:
257 st = os.lstat(self._join(x))
257 st = os.lstat(self._join(x))
258 except OSError:
258 except OSError:
259 return b''
259 return b''
260
260
261 if self._checklink:
261 if self._checklink:
262 if util.statislink(st):
262 if util.statislink(st):
263 return b'l'
263 return b'l'
264 else:
264 else:
265 entry = self.get_entry(x)
265 entry = self.get_entry(x)
266 if entry.has_fallback_symlink:
266 if entry.has_fallback_symlink:
267 if entry.fallback_symlink:
267 if entry.fallback_symlink:
268 return b'l'
268 return b'l'
269 else:
269 else:
270 if not fallback_func:
270 if not fallback_func:
271 fallback_func.append(buildfallback())
271 fallback_func.append(buildfallback())
272 fallback_value = fallback_func[0](x)
272 fallback_value = fallback_func[0](x)
273 if b'l' in fallback_value:
273 if b'l' in fallback_value:
274 return b'l'
274 return b'l'
275
275
276 if self._checkexec:
276 if self._checkexec:
277 if util.statisexec(st):
277 if util.statisexec(st):
278 return b'x'
278 return b'x'
279 else:
279 else:
280 if entry is None:
280 if entry is None:
281 entry = self.get_entry(x)
281 entry = self.get_entry(x)
282 if entry.has_fallback_exec:
282 if entry.has_fallback_exec:
283 if entry.fallback_exec:
283 if entry.fallback_exec:
284 return b'x'
284 return b'x'
285 else:
285 else:
286 if fallback_value is None:
286 if fallback_value is None:
287 if not fallback_func:
287 if not fallback_func:
288 fallback_func.append(buildfallback())
288 fallback_func.append(buildfallback())
289 fallback_value = fallback_func[0](x)
289 fallback_value = fallback_func[0](x)
290 if b'x' in fallback_value:
290 if b'x' in fallback_value:
291 return b'x'
291 return b'x'
292 return b''
292 return b''
293
293
294 return get_flags
294 return get_flags
295
295
296 @propertycache
296 @propertycache
297 def _cwd(self):
297 def _cwd(self):
298 # internal config: ui.forcecwd
298 # internal config: ui.forcecwd
299 forcecwd = self._ui.config(b'ui', b'forcecwd')
299 forcecwd = self._ui.config(b'ui', b'forcecwd')
300 if forcecwd:
300 if forcecwd:
301 return forcecwd
301 return forcecwd
302 return encoding.getcwd()
302 return encoding.getcwd()
303
303
304 def getcwd(self):
304 def getcwd(self):
305 """Return the path from which a canonical path is calculated.
305 """Return the path from which a canonical path is calculated.
306
306
307 This path should be used to resolve file patterns or to convert
307 This path should be used to resolve file patterns or to convert
308 canonical paths back to file paths for display. It shouldn't be
308 canonical paths back to file paths for display. It shouldn't be
309 used to get real file paths. Use vfs functions instead.
309 used to get real file paths. Use vfs functions instead.
310 """
310 """
311 cwd = self._cwd
311 cwd = self._cwd
312 if cwd == self._root:
312 if cwd == self._root:
313 return b''
313 return b''
314 # self._root ends with a path separator if self._root is '/' or 'C:\'
314 # self._root ends with a path separator if self._root is '/' or 'C:\'
315 rootsep = self._root
315 rootsep = self._root
316 if not util.endswithsep(rootsep):
316 if not util.endswithsep(rootsep):
317 rootsep += pycompat.ossep
317 rootsep += pycompat.ossep
318 if cwd.startswith(rootsep):
318 if cwd.startswith(rootsep):
319 return cwd[len(rootsep) :]
319 return cwd[len(rootsep) :]
320 else:
320 else:
321 # we're outside the repo. return an absolute path.
321 # we're outside the repo. return an absolute path.
322 return cwd
322 return cwd
323
323
324 def pathto(self, f, cwd=None):
324 def pathto(self, f, cwd=None):
325 if cwd is None:
325 if cwd is None:
326 cwd = self.getcwd()
326 cwd = self.getcwd()
327 path = util.pathto(self._root, cwd, f)
327 path = util.pathto(self._root, cwd, f)
328 if self._slash:
328 if self._slash:
329 return util.pconvert(path)
329 return util.pconvert(path)
330 return path
330 return path
331
331
332 def get_entry(self, path):
332 def get_entry(self, path):
333 """return a DirstateItem for the associated path"""
333 """return a DirstateItem for the associated path"""
334 entry = self._map.get(path)
334 entry = self._map.get(path)
335 if entry is None:
335 if entry is None:
336 return DirstateItem()
336 return DirstateItem()
337 return entry
337 return entry
338
338
339 def __contains__(self, key):
339 def __contains__(self, key):
340 return key in self._map
340 return key in self._map
341
341
342 def __iter__(self):
342 def __iter__(self):
343 return iter(sorted(self._map))
343 return iter(sorted(self._map))
344
344
345 def items(self):
345 def items(self):
346 return pycompat.iteritems(self._map)
346 return pycompat.iteritems(self._map)
347
347
348 iteritems = items
348 iteritems = items
349
349
350 def parents(self):
350 def parents(self):
351 return [self._validate(p) for p in self._pl]
351 return [self._validate(p) for p in self._pl]
352
352
353 def p1(self):
353 def p1(self):
354 return self._validate(self._pl[0])
354 return self._validate(self._pl[0])
355
355
356 def p2(self):
356 def p2(self):
357 return self._validate(self._pl[1])
357 return self._validate(self._pl[1])
358
358
359 @property
359 @property
360 def in_merge(self):
360 def in_merge(self):
361 """True if a merge is in progress"""
361 """True if a merge is in progress"""
362 return self._pl[1] != self._nodeconstants.nullid
362 return self._pl[1] != self._nodeconstants.nullid
363
363
364 def branch(self):
364 def branch(self):
365 return encoding.tolocal(self._branch)
365 return encoding.tolocal(self._branch)
366
366
367 def setparents(self, p1, p2=None):
367 def setparents(self, p1, p2=None):
368 """Set dirstate parents to p1 and p2.
368 """Set dirstate parents to p1 and p2.
369
369
370 When moving from two parents to one, "merged" entries a
370 When moving from two parents to one, "merged" entries a
371 adjusted to normal and previous copy records discarded and
371 adjusted to normal and previous copy records discarded and
372 returned by the call.
372 returned by the call.
373
373
374 See localrepo.setparents()
374 See localrepo.setparents()
375 """
375 """
376 if p2 is None:
376 if p2 is None:
377 p2 = self._nodeconstants.nullid
377 p2 = self._nodeconstants.nullid
378 if self._parentwriters == 0:
378 if self._parentwriters == 0:
379 raise ValueError(
379 raise ValueError(
380 b"cannot set dirstate parent outside of "
380 b"cannot set dirstate parent outside of "
381 b"dirstate.parentchange context manager"
381 b"dirstate.parentchange context manager"
382 )
382 )
383
383
384 self._dirty = True
384 self._dirty = True
385 oldp2 = self._pl[1]
385 oldp2 = self._pl[1]
386 if self._origpl is None:
386 if self._origpl is None:
387 self._origpl = self._pl
387 self._origpl = self._pl
388 nullid = self._nodeconstants.nullid
388 nullid = self._nodeconstants.nullid
389 # True if we need to fold p2 related state back to a linear case
389 # True if we need to fold p2 related state back to a linear case
390 fold_p2 = oldp2 != nullid and p2 == nullid
390 fold_p2 = oldp2 != nullid and p2 == nullid
391 return self._map.setparents(p1, p2, fold_p2=fold_p2)
391 return self._map.setparents(p1, p2, fold_p2=fold_p2)
392
392
393 def setbranch(self, branch):
393 def setbranch(self, branch):
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
394 self.__class__._branch.set(self, encoding.fromlocal(branch))
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
395 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
396 try:
396 try:
397 f.write(self._branch + b'\n')
397 f.write(self._branch + b'\n')
398 f.close()
398 f.close()
399
399
400 # make sure filecache has the correct stat info for _branch after
400 # make sure filecache has the correct stat info for _branch after
401 # replacing the underlying file
401 # replacing the underlying file
402 ce = self._filecache[b'_branch']
402 ce = self._filecache[b'_branch']
403 if ce:
403 if ce:
404 ce.refresh()
404 ce.refresh()
405 except: # re-raises
405 except: # re-raises
406 f.discard()
406 f.discard()
407 raise
407 raise
408
408
409 def invalidate(self):
409 def invalidate(self):
410 """Causes the next access to reread the dirstate.
410 """Causes the next access to reread the dirstate.
411
411
412 This is different from localrepo.invalidatedirstate() because it always
412 This is different from localrepo.invalidatedirstate() because it always
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
413 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
414 check whether the dirstate has changed before rereading it."""
414 check whether the dirstate has changed before rereading it."""
415
415
416 for a in ("_map", "_branch", "_ignore"):
416 for a in ("_map", "_branch", "_ignore"):
417 if a in self.__dict__:
417 if a in self.__dict__:
418 delattr(self, a)
418 delattr(self, a)
419 self._dirty = False
419 self._dirty = False
420 self._dirty_tracked_set = False
420 self._dirty_tracked_set = False
421 self._parentwriters = 0
421 self._parentwriters = 0
422 self._origpl = None
422 self._origpl = None
423
423
424 def copy(self, source, dest):
424 def copy(self, source, dest):
425 """Mark dest as a copy of source. Unmark dest if source is None."""
425 """Mark dest as a copy of source. Unmark dest if source is None."""
426 if source == dest:
426 if source == dest:
427 return
427 return
428 self._dirty = True
428 self._dirty = True
429 if source is not None:
429 if source is not None:
430 self._map.copymap[dest] = source
430 self._map.copymap[dest] = source
431 else:
431 else:
432 self._map.copymap.pop(dest, None)
432 self._map.copymap.pop(dest, None)
433
433
434 def copied(self, file):
434 def copied(self, file):
435 return self._map.copymap.get(file, None)
435 return self._map.copymap.get(file, None)
436
436
437 def copies(self):
437 def copies(self):
438 return self._map.copymap
438 return self._map.copymap
439
439
440 @requires_no_parents_change
440 @requires_no_parents_change
441 def set_tracked(self, filename, reset_copy=False):
441 def set_tracked(self, filename, reset_copy=False):
442 """a "public" method for generic code to mark a file as tracked
442 """a "public" method for generic code to mark a file as tracked
443
443
444 This function is to be called outside of "update/merge" case. For
444 This function is to be called outside of "update/merge" case. For
445 example by a command like `hg add X`.
445 example by a command like `hg add X`.
446
446
447 if reset_copy is set, any existing copy information will be dropped.
447 if reset_copy is set, any existing copy information will be dropped.
448
448
449 return True the file was previously untracked, False otherwise.
449 return True the file was previously untracked, False otherwise.
450 """
450 """
451 self._dirty = True
451 self._dirty = True
452 entry = self._map.get(filename)
452 entry = self._map.get(filename)
453 if entry is None or not entry.tracked:
453 if entry is None or not entry.tracked:
454 self._check_new_tracked_filename(filename)
454 self._check_new_tracked_filename(filename)
455 pre_tracked = self._map.set_tracked(filename)
455 pre_tracked = self._map.set_tracked(filename)
456 if reset_copy:
456 if reset_copy:
457 self._map.copymap.pop(filename, None)
457 self._map.copymap.pop(filename, None)
458 if pre_tracked:
458 if pre_tracked:
459 self._dirty_tracked_set = True
459 self._dirty_tracked_set = True
460 return pre_tracked
460 return pre_tracked
461
461
462 @requires_no_parents_change
462 @requires_no_parents_change
463 def set_untracked(self, filename):
463 def set_untracked(self, filename):
464 """a "public" method for generic code to mark a file as untracked
464 """a "public" method for generic code to mark a file as untracked
465
465
466 This function is to be called outside of "update/merge" case. For
466 This function is to be called outside of "update/merge" case. For
467 example by a command like `hg remove X`.
467 example by a command like `hg remove X`.
468
468
469 return True the file was previously tracked, False otherwise.
469 return True the file was previously tracked, False otherwise.
470 """
470 """
471 ret = self._map.set_untracked(filename)
471 ret = self._map.set_untracked(filename)
472 if ret:
472 if ret:
473 self._dirty = True
473 self._dirty = True
474 self._dirty_tracked_set = True
474 self._dirty_tracked_set = True
475 return ret
475 return ret
476
476
477 @requires_no_parents_change
477 @requires_no_parents_change
478 def set_clean(self, filename, parentfiledata):
478 def set_clean(self, filename, parentfiledata):
479 """record that the current state of the file on disk is known to be clean"""
479 """record that the current state of the file on disk is known to be clean"""
480 self._dirty = True
480 self._dirty = True
481 if not self._map[filename].tracked:
481 if not self._map[filename].tracked:
482 self._check_new_tracked_filename(filename)
482 self._check_new_tracked_filename(filename)
483 (mode, size, mtime) = parentfiledata
483 (mode, size, mtime) = parentfiledata
484 self._map.set_clean(filename, mode, size, mtime)
484 self._map.set_clean(filename, mode, size, mtime)
485
485
486 @requires_no_parents_change
486 @requires_no_parents_change
487 def set_possibly_dirty(self, filename):
487 def set_possibly_dirty(self, filename):
488 """record that the current state of the file on disk is unknown"""
488 """record that the current state of the file on disk is unknown"""
489 self._dirty = True
489 self._dirty = True
490 self._map.set_possibly_dirty(filename)
490 self._map.set_possibly_dirty(filename)
491
491
492 @requires_parents_change
492 @requires_parents_change
493 def update_file_p1(
493 def update_file_p1(
494 self,
494 self,
495 filename,
495 filename,
496 p1_tracked,
496 p1_tracked,
497 ):
497 ):
498 """Set a file as tracked in the parent (or not)
498 """Set a file as tracked in the parent (or not)
499
499
500 This is to be called when adjust the dirstate to a new parent after an history
500 This is to be called when adjust the dirstate to a new parent after an history
501 rewriting operation.
501 rewriting operation.
502
502
503 It should not be called during a merge (p2 != nullid) and only within
503 It should not be called during a merge (p2 != nullid) and only within
504 a `with dirstate.parentchange():` context.
504 a `with dirstate.parentchange():` context.
505 """
505 """
506 if self.in_merge:
506 if self.in_merge:
507 msg = b'update_file_reference should not be called when merging'
507 msg = b'update_file_reference should not be called when merging'
508 raise error.ProgrammingError(msg)
508 raise error.ProgrammingError(msg)
509 entry = self._map.get(filename)
509 entry = self._map.get(filename)
510 if entry is None:
510 if entry is None:
511 wc_tracked = False
511 wc_tracked = False
512 else:
512 else:
513 wc_tracked = entry.tracked
513 wc_tracked = entry.tracked
514 if not (p1_tracked or wc_tracked):
514 if not (p1_tracked or wc_tracked):
515 # the file is no longer relevant to anyone
515 # the file is no longer relevant to anyone
516 if self._map.get(filename) is not None:
516 if self._map.get(filename) is not None:
517 self._map.reset_state(filename)
517 self._map.reset_state(filename)
518 self._dirty = True
518 self._dirty = True
519 elif (not p1_tracked) and wc_tracked:
519 elif (not p1_tracked) and wc_tracked:
520 if entry is not None and entry.added:
520 if entry is not None and entry.added:
521 return # avoid dropping copy information (maybe?)
521 return # avoid dropping copy information (maybe?)
522
522
523 self._map.reset_state(
523 self._map.reset_state(
524 filename,
524 filename,
525 wc_tracked,
525 wc_tracked,
526 p1_tracked,
526 p1_tracked,
527 # the underlying reference might have changed, we will have to
527 # the underlying reference might have changed, we will have to
528 # check it.
528 # check it.
529 has_meaningful_mtime=False,
529 has_meaningful_mtime=False,
530 )
530 )
531
531
532 @requires_parents_change
532 @requires_parents_change
533 def update_file(
533 def update_file(
534 self,
534 self,
535 filename,
535 filename,
536 wc_tracked,
536 wc_tracked,
537 p1_tracked,
537 p1_tracked,
538 p2_info=False,
538 p2_info=False,
539 possibly_dirty=False,
539 possibly_dirty=False,
540 parentfiledata=None,
540 parentfiledata=None,
541 ):
541 ):
542 """update the information about a file in the dirstate
542 """update the information about a file in the dirstate
543
543
544 This is to be called when the direstates parent changes to keep track
544 This is to be called when the direstates parent changes to keep track
545 of what is the file situation in regards to the working copy and its parent.
545 of what is the file situation in regards to the working copy and its parent.
546
546
547 This function must be called within a `dirstate.parentchange` context.
547 This function must be called within a `dirstate.parentchange` context.
548
548
549 note: the API is at an early stage and we might need to adjust it
549 note: the API is at an early stage and we might need to adjust it
550 depending of what information ends up being relevant and useful to
550 depending of what information ends up being relevant and useful to
551 other processing.
551 other processing.
552 """
552 """
553
553
554 # note: I do not think we need to double check name clash here since we
554 # note: I do not think we need to double check name clash here since we
555 # are in a update/merge case that should already have taken care of
555 # are in a update/merge case that should already have taken care of
556 # this. The test agrees
556 # this. The test agrees
557
557
558 self._dirty = True
558 self._dirty = True
559 old_entry = self._map.get(filename)
559 old_entry = self._map.get(filename)
560 if old_entry is None:
560 if old_entry is None:
561 prev_tracked = False
561 prev_tracked = False
562 else:
562 else:
563 prev_tracked = old_entry.tracked
563 prev_tracked = old_entry.tracked
564 if prev_tracked != wc_tracked:
564 if prev_tracked != wc_tracked:
565 self._dirty_tracked_set = True
565 self._dirty_tracked_set = True
566
566
567 self._map.reset_state(
567 self._map.reset_state(
568 filename,
568 filename,
569 wc_tracked,
569 wc_tracked,
570 p1_tracked,
570 p1_tracked,
571 p2_info=p2_info,
571 p2_info=p2_info,
572 has_meaningful_mtime=not possibly_dirty,
572 has_meaningful_mtime=not possibly_dirty,
573 parentfiledata=parentfiledata,
573 parentfiledata=parentfiledata,
574 )
574 )
575
575
576 def _check_new_tracked_filename(self, filename):
576 def _check_new_tracked_filename(self, filename):
577 scmutil.checkfilename(filename)
577 scmutil.checkfilename(filename)
578 if self._map.hastrackeddir(filename):
578 if self._map.hastrackeddir(filename):
579 msg = _(b'directory %r already in dirstate')
579 msg = _(b'directory %r already in dirstate')
580 msg %= pycompat.bytestr(filename)
580 msg %= pycompat.bytestr(filename)
581 raise error.Abort(msg)
581 raise error.Abort(msg)
582 # shadows
582 # shadows
583 for d in pathutil.finddirs(filename):
583 for d in pathutil.finddirs(filename):
584 if self._map.hastrackeddir(d):
584 if self._map.hastrackeddir(d):
585 break
585 break
586 entry = self._map.get(d)
586 entry = self._map.get(d)
587 if entry is not None and not entry.removed:
587 if entry is not None and not entry.removed:
588 msg = _(b'file %r in dirstate clashes with %r')
588 msg = _(b'file %r in dirstate clashes with %r')
589 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
589 msg %= (pycompat.bytestr(d), pycompat.bytestr(filename))
590 raise error.Abort(msg)
590 raise error.Abort(msg)
591
591
592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
592 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
593 if exists is None:
593 if exists is None:
594 exists = os.path.lexists(os.path.join(self._root, path))
594 exists = os.path.lexists(os.path.join(self._root, path))
595 if not exists:
595 if not exists:
596 # Maybe a path component exists
596 # Maybe a path component exists
597 if not ignoremissing and b'/' in path:
597 if not ignoremissing and b'/' in path:
598 d, f = path.rsplit(b'/', 1)
598 d, f = path.rsplit(b'/', 1)
599 d = self._normalize(d, False, ignoremissing, None)
599 d = self._normalize(d, False, ignoremissing, None)
600 folded = d + b"/" + f
600 folded = d + b"/" + f
601 else:
601 else:
602 # No path components, preserve original case
602 # No path components, preserve original case
603 folded = path
603 folded = path
604 else:
604 else:
605 # recursively normalize leading directory components
605 # recursively normalize leading directory components
606 # against dirstate
606 # against dirstate
607 if b'/' in normed:
607 if b'/' in normed:
608 d, f = normed.rsplit(b'/', 1)
608 d, f = normed.rsplit(b'/', 1)
609 d = self._normalize(d, False, ignoremissing, True)
609 d = self._normalize(d, False, ignoremissing, True)
610 r = self._root + b"/" + d
610 r = self._root + b"/" + d
611 folded = d + b"/" + util.fspath(f, r)
611 folded = d + b"/" + util.fspath(f, r)
612 else:
612 else:
613 folded = util.fspath(normed, self._root)
613 folded = util.fspath(normed, self._root)
614 storemap[normed] = folded
614 storemap[normed] = folded
615
615
616 return folded
616 return folded
617
617
618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
618 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
619 normed = util.normcase(path)
619 normed = util.normcase(path)
620 folded = self._map.filefoldmap.get(normed, None)
620 folded = self._map.filefoldmap.get(normed, None)
621 if folded is None:
621 if folded is None:
622 if isknown:
622 if isknown:
623 folded = path
623 folded = path
624 else:
624 else:
625 folded = self._discoverpath(
625 folded = self._discoverpath(
626 path, normed, ignoremissing, exists, self._map.filefoldmap
626 path, normed, ignoremissing, exists, self._map.filefoldmap
627 )
627 )
628 return folded
628 return folded
629
629
630 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
630 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
631 normed = util.normcase(path)
631 normed = util.normcase(path)
632 folded = self._map.filefoldmap.get(normed, None)
632 folded = self._map.filefoldmap.get(normed, None)
633 if folded is None:
633 if folded is None:
634 folded = self._map.dirfoldmap.get(normed, None)
634 folded = self._map.dirfoldmap.get(normed, None)
635 if folded is None:
635 if folded is None:
636 if isknown:
636 if isknown:
637 folded = path
637 folded = path
638 else:
638 else:
639 # store discovered result in dirfoldmap so that future
639 # store discovered result in dirfoldmap so that future
640 # normalizefile calls don't start matching directories
640 # normalizefile calls don't start matching directories
641 folded = self._discoverpath(
641 folded = self._discoverpath(
642 path, normed, ignoremissing, exists, self._map.dirfoldmap
642 path, normed, ignoremissing, exists, self._map.dirfoldmap
643 )
643 )
644 return folded
644 return folded
645
645
646 def normalize(self, path, isknown=False, ignoremissing=False):
646 def normalize(self, path, isknown=False, ignoremissing=False):
647 """
647 """
648 normalize the case of a pathname when on a casefolding filesystem
648 normalize the case of a pathname when on a casefolding filesystem
649
649
650 isknown specifies whether the filename came from walking the
650 isknown specifies whether the filename came from walking the
651 disk, to avoid extra filesystem access.
651 disk, to avoid extra filesystem access.
652
652
653 If ignoremissing is True, missing path are returned
653 If ignoremissing is True, missing path are returned
654 unchanged. Otherwise, we try harder to normalize possibly
654 unchanged. Otherwise, we try harder to normalize possibly
655 existing path components.
655 existing path components.
656
656
657 The normalized case is determined based on the following precedence:
657 The normalized case is determined based on the following precedence:
658
658
659 - version of name already stored in the dirstate
659 - version of name already stored in the dirstate
660 - version of name stored on disk
660 - version of name stored on disk
661 - version provided via command arguments
661 - version provided via command arguments
662 """
662 """
663
663
664 if self._checkcase:
664 if self._checkcase:
665 return self._normalize(path, isknown, ignoremissing)
665 return self._normalize(path, isknown, ignoremissing)
666 return path
666 return path
667
667
668 def clear(self):
668 def clear(self):
669 self._map.clear()
669 self._map.clear()
670 self._dirty = True
670 self._dirty = True
671
671
672 def rebuild(self, parent, allfiles, changedfiles=None):
672 def rebuild(self, parent, allfiles, changedfiles=None):
673 if changedfiles is None:
673 if changedfiles is None:
674 # Rebuild entire dirstate
674 # Rebuild entire dirstate
675 to_lookup = allfiles
675 to_lookup = allfiles
676 to_drop = []
676 to_drop = []
677 self.clear()
677 self.clear()
678 elif len(changedfiles) < 10:
678 elif len(changedfiles) < 10:
679 # Avoid turning allfiles into a set, which can be expensive if it's
679 # Avoid turning allfiles into a set, which can be expensive if it's
680 # large.
680 # large.
681 to_lookup = []
681 to_lookup = []
682 to_drop = []
682 to_drop = []
683 for f in changedfiles:
683 for f in changedfiles:
684 if f in allfiles:
684 if f in allfiles:
685 to_lookup.append(f)
685 to_lookup.append(f)
686 else:
686 else:
687 to_drop.append(f)
687 to_drop.append(f)
688 else:
688 else:
689 changedfilesset = set(changedfiles)
689 changedfilesset = set(changedfiles)
690 to_lookup = changedfilesset & set(allfiles)
690 to_lookup = changedfilesset & set(allfiles)
691 to_drop = changedfilesset - to_lookup
691 to_drop = changedfilesset - to_lookup
692
692
693 if self._origpl is None:
693 if self._origpl is None:
694 self._origpl = self._pl
694 self._origpl = self._pl
695 self._map.setparents(parent, self._nodeconstants.nullid)
695 self._map.setparents(parent, self._nodeconstants.nullid)
696
696
697 for f in to_lookup:
697 for f in to_lookup:
698
698
699 if self.in_merge:
699 if self.in_merge:
700 self.set_tracked(f)
700 self.set_tracked(f)
701 else:
701 else:
702 self._map.reset_state(
702 self._map.reset_state(
703 f,
703 f,
704 wc_tracked=True,
704 wc_tracked=True,
705 p1_tracked=True,
705 p1_tracked=True,
706 )
706 )
707 for f in to_drop:
707 for f in to_drop:
708 self._map.reset_state(f)
708 self._map.reset_state(f)
709
709
710 self._dirty = True
710 self._dirty = True
711
711
712 def identity(self):
712 def identity(self):
713 """Return identity of dirstate itself to detect changing in storage
713 """Return identity of dirstate itself to detect changing in storage
714
714
715 If identity of previous dirstate is equal to this, writing
715 If identity of previous dirstate is equal to this, writing
716 changes based on the former dirstate out can keep consistency.
716 changes based on the former dirstate out can keep consistency.
717 """
717 """
718 return self._map.identity
718 return self._map.identity
719
719
720 def write(self, tr):
720 def write(self, tr):
721 if not self._dirty:
721 if not self._dirty:
722 return
722 return
723
723
724 write_key = self._use_tracked_key and self._dirty_tracked_set
724 write_key = self._use_tracked_key and self._dirty_tracked_set
725 if tr:
725 if tr:
726 # delay writing in-memory changes out
726 # delay writing in-memory changes out
727 if write_key:
727 if write_key:
728 tr.addfilegenerator(
728 tr.addfilegenerator(
729 b'dirstate-0-key-pre',
729 b'dirstate-0-key-pre',
730 (self._filename_tk,),
730 (self._filename_tk,),
731 lambda f: self._write_tracked_key(tr, f),
731 lambda f: self._write_tracked_key(tr, f),
732 location=b'plain',
732 location=b'plain',
733 post_finalize=True,
733 )
734 )
734 tr.addfilegenerator(
735 tr.addfilegenerator(
735 b'dirstate-1-main',
736 b'dirstate-1-main',
736 (self._filename,),
737 (self._filename,),
737 lambda f: self._writedirstate(tr, f),
738 lambda f: self._writedirstate(tr, f),
738 location=b'plain',
739 location=b'plain',
740 post_finalize=True,
739 )
741 )
740 if write_key:
742 if write_key:
741 tr.addfilegenerator(
743 tr.addfilegenerator(
742 b'dirstate-2-key-post',
744 b'dirstate-2-key-post',
743 (self._filename_tk,),
745 (self._filename_tk,),
744 lambda f: self._write_tracked_key(tr, f),
746 lambda f: self._write_tracked_key(tr, f),
745 location=b'plain',
747 location=b'plain',
748 post_finalize=True,
746 )
749 )
747 return
750 return
748
751
749 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
752 file = lambda f: self._opener(f, b"w", atomictemp=True, checkambig=True)
750 if write_key:
753 if write_key:
751 # we change the key-file before changing the dirstate to make sure
754 # we change the key-file before changing the dirstate to make sure
752 # reading invalidate there cache before we start writing
755 # reading invalidate there cache before we start writing
753 with file(self._filename_tk) as f:
756 with file(self._filename_tk) as f:
754 self._write_tracked_key(tr, f)
757 self._write_tracked_key(tr, f)
755 with file(self._filename) as f:
758 with file(self._filename) as f:
756 self._writedirstate(tr, f)
759 self._writedirstate(tr, f)
757 if write_key:
760 if write_key:
758 # we update the key-file after writing to make sure reader have a
761 # we update the key-file after writing to make sure reader have a
759 # key that match the newly written content
762 # key that match the newly written content
760 with file(self._filename_tk) as f:
763 with file(self._filename_tk) as f:
761 self._write_tracked_key(tr, f)
764 self._write_tracked_key(tr, f)
762
765
763 def addparentchangecallback(self, category, callback):
766 def addparentchangecallback(self, category, callback):
764 """add a callback to be called when the wd parents are changed
767 """add a callback to be called when the wd parents are changed
765
768
766 Callback will be called with the following arguments:
769 Callback will be called with the following arguments:
767 dirstate, (oldp1, oldp2), (newp1, newp2)
770 dirstate, (oldp1, oldp2), (newp1, newp2)
768
771
769 Category is a unique identifier to allow overwriting an old callback
772 Category is a unique identifier to allow overwriting an old callback
770 with a newer callback.
773 with a newer callback.
771 """
774 """
772 self._plchangecallbacks[category] = callback
775 self._plchangecallbacks[category] = callback
773
776
774 def _writedirstate(self, tr, st):
777 def _writedirstate(self, tr, st):
775 # notify callbacks about parents change
778 # notify callbacks about parents change
776 if self._origpl is not None and self._origpl != self._pl:
779 if self._origpl is not None and self._origpl != self._pl:
777 for c, callback in sorted(
780 for c, callback in sorted(
778 pycompat.iteritems(self._plchangecallbacks)
781 pycompat.iteritems(self._plchangecallbacks)
779 ):
782 ):
780 callback(self, self._origpl, self._pl)
783 callback(self, self._origpl, self._pl)
781 self._origpl = None
784 self._origpl = None
782 self._map.write(tr, st)
785 self._map.write(tr, st)
783 self._dirty = False
786 self._dirty = False
784 self._dirty_tracked_set = False
787 self._dirty_tracked_set = False
785
788
786 def _write_tracked_key(self, tr, f):
789 def _write_tracked_key(self, tr, f):
787 key = node.hex(uuid.uuid4().bytes)
790 key = node.hex(uuid.uuid4().bytes)
788 f.write(b"1\n%s\n" % key) # 1 is the format version
791 f.write(b"1\n%s\n" % key) # 1 is the format version
789
792
790 def _dirignore(self, f):
793 def _dirignore(self, f):
791 if self._ignore(f):
794 if self._ignore(f):
792 return True
795 return True
793 for p in pathutil.finddirs(f):
796 for p in pathutil.finddirs(f):
794 if self._ignore(p):
797 if self._ignore(p):
795 return True
798 return True
796 return False
799 return False
797
800
798 def _ignorefiles(self):
801 def _ignorefiles(self):
799 files = []
802 files = []
800 if os.path.exists(self._join(b'.hgignore')):
803 if os.path.exists(self._join(b'.hgignore')):
801 files.append(self._join(b'.hgignore'))
804 files.append(self._join(b'.hgignore'))
802 for name, path in self._ui.configitems(b"ui"):
805 for name, path in self._ui.configitems(b"ui"):
803 if name == b'ignore' or name.startswith(b'ignore.'):
806 if name == b'ignore' or name.startswith(b'ignore.'):
804 # we need to use os.path.join here rather than self._join
807 # we need to use os.path.join here rather than self._join
805 # because path is arbitrary and user-specified
808 # because path is arbitrary and user-specified
806 files.append(os.path.join(self._rootdir, util.expandpath(path)))
809 files.append(os.path.join(self._rootdir, util.expandpath(path)))
807 return files
810 return files
808
811
809 def _ignorefileandline(self, f):
812 def _ignorefileandline(self, f):
810 files = collections.deque(self._ignorefiles())
813 files = collections.deque(self._ignorefiles())
811 visited = set()
814 visited = set()
812 while files:
815 while files:
813 i = files.popleft()
816 i = files.popleft()
814 patterns = matchmod.readpatternfile(
817 patterns = matchmod.readpatternfile(
815 i, self._ui.warn, sourceinfo=True
818 i, self._ui.warn, sourceinfo=True
816 )
819 )
817 for pattern, lineno, line in patterns:
820 for pattern, lineno, line in patterns:
818 kind, p = matchmod._patsplit(pattern, b'glob')
821 kind, p = matchmod._patsplit(pattern, b'glob')
819 if kind == b"subinclude":
822 if kind == b"subinclude":
820 if p not in visited:
823 if p not in visited:
821 files.append(p)
824 files.append(p)
822 continue
825 continue
823 m = matchmod.match(
826 m = matchmod.match(
824 self._root, b'', [], [pattern], warn=self._ui.warn
827 self._root, b'', [], [pattern], warn=self._ui.warn
825 )
828 )
826 if m(f):
829 if m(f):
827 return (i, lineno, line)
830 return (i, lineno, line)
828 visited.add(i)
831 visited.add(i)
829 return (None, -1, b"")
832 return (None, -1, b"")
830
833
831 def _walkexplicit(self, match, subrepos):
834 def _walkexplicit(self, match, subrepos):
832 """Get stat data about the files explicitly specified by match.
835 """Get stat data about the files explicitly specified by match.
833
836
834 Return a triple (results, dirsfound, dirsnotfound).
837 Return a triple (results, dirsfound, dirsnotfound).
835 - results is a mapping from filename to stat result. It also contains
838 - results is a mapping from filename to stat result. It also contains
836 listings mapping subrepos and .hg to None.
839 listings mapping subrepos and .hg to None.
837 - dirsfound is a list of files found to be directories.
840 - dirsfound is a list of files found to be directories.
838 - dirsnotfound is a list of files that the dirstate thinks are
841 - dirsnotfound is a list of files that the dirstate thinks are
839 directories and that were not found."""
842 directories and that were not found."""
840
843
841 def badtype(mode):
844 def badtype(mode):
842 kind = _(b'unknown')
845 kind = _(b'unknown')
843 if stat.S_ISCHR(mode):
846 if stat.S_ISCHR(mode):
844 kind = _(b'character device')
847 kind = _(b'character device')
845 elif stat.S_ISBLK(mode):
848 elif stat.S_ISBLK(mode):
846 kind = _(b'block device')
849 kind = _(b'block device')
847 elif stat.S_ISFIFO(mode):
850 elif stat.S_ISFIFO(mode):
848 kind = _(b'fifo')
851 kind = _(b'fifo')
849 elif stat.S_ISSOCK(mode):
852 elif stat.S_ISSOCK(mode):
850 kind = _(b'socket')
853 kind = _(b'socket')
851 elif stat.S_ISDIR(mode):
854 elif stat.S_ISDIR(mode):
852 kind = _(b'directory')
855 kind = _(b'directory')
853 return _(b'unsupported file type (type is %s)') % kind
856 return _(b'unsupported file type (type is %s)') % kind
854
857
855 badfn = match.bad
858 badfn = match.bad
856 dmap = self._map
859 dmap = self._map
857 lstat = os.lstat
860 lstat = os.lstat
858 getkind = stat.S_IFMT
861 getkind = stat.S_IFMT
859 dirkind = stat.S_IFDIR
862 dirkind = stat.S_IFDIR
860 regkind = stat.S_IFREG
863 regkind = stat.S_IFREG
861 lnkkind = stat.S_IFLNK
864 lnkkind = stat.S_IFLNK
862 join = self._join
865 join = self._join
863 dirsfound = []
866 dirsfound = []
864 foundadd = dirsfound.append
867 foundadd = dirsfound.append
865 dirsnotfound = []
868 dirsnotfound = []
866 notfoundadd = dirsnotfound.append
869 notfoundadd = dirsnotfound.append
867
870
868 if not match.isexact() and self._checkcase:
871 if not match.isexact() and self._checkcase:
869 normalize = self._normalize
872 normalize = self._normalize
870 else:
873 else:
871 normalize = None
874 normalize = None
872
875
873 files = sorted(match.files())
876 files = sorted(match.files())
874 subrepos.sort()
877 subrepos.sort()
875 i, j = 0, 0
878 i, j = 0, 0
876 while i < len(files) and j < len(subrepos):
879 while i < len(files) and j < len(subrepos):
877 subpath = subrepos[j] + b"/"
880 subpath = subrepos[j] + b"/"
878 if files[i] < subpath:
881 if files[i] < subpath:
879 i += 1
882 i += 1
880 continue
883 continue
881 while i < len(files) and files[i].startswith(subpath):
884 while i < len(files) and files[i].startswith(subpath):
882 del files[i]
885 del files[i]
883 j += 1
886 j += 1
884
887
885 if not files or b'' in files:
888 if not files or b'' in files:
886 files = [b'']
889 files = [b'']
887 # constructing the foldmap is expensive, so don't do it for the
890 # constructing the foldmap is expensive, so don't do it for the
888 # common case where files is ['']
891 # common case where files is ['']
889 normalize = None
892 normalize = None
890 results = dict.fromkeys(subrepos)
893 results = dict.fromkeys(subrepos)
891 results[b'.hg'] = None
894 results[b'.hg'] = None
892
895
893 for ff in files:
896 for ff in files:
894 if normalize:
897 if normalize:
895 nf = normalize(ff, False, True)
898 nf = normalize(ff, False, True)
896 else:
899 else:
897 nf = ff
900 nf = ff
898 if nf in results:
901 if nf in results:
899 continue
902 continue
900
903
901 try:
904 try:
902 st = lstat(join(nf))
905 st = lstat(join(nf))
903 kind = getkind(st.st_mode)
906 kind = getkind(st.st_mode)
904 if kind == dirkind:
907 if kind == dirkind:
905 if nf in dmap:
908 if nf in dmap:
906 # file replaced by dir on disk but still in dirstate
909 # file replaced by dir on disk but still in dirstate
907 results[nf] = None
910 results[nf] = None
908 foundadd((nf, ff))
911 foundadd((nf, ff))
909 elif kind == regkind or kind == lnkkind:
912 elif kind == regkind or kind == lnkkind:
910 results[nf] = st
913 results[nf] = st
911 else:
914 else:
912 badfn(ff, badtype(kind))
915 badfn(ff, badtype(kind))
913 if nf in dmap:
916 if nf in dmap:
914 results[nf] = None
917 results[nf] = None
915 except OSError as inst: # nf not found on disk - it is dirstate only
918 except OSError as inst: # nf not found on disk - it is dirstate only
916 if nf in dmap: # does it exactly match a missing file?
919 if nf in dmap: # does it exactly match a missing file?
917 results[nf] = None
920 results[nf] = None
918 else: # does it match a missing directory?
921 else: # does it match a missing directory?
919 if self._map.hasdir(nf):
922 if self._map.hasdir(nf):
920 notfoundadd(nf)
923 notfoundadd(nf)
921 else:
924 else:
922 badfn(ff, encoding.strtolocal(inst.strerror))
925 badfn(ff, encoding.strtolocal(inst.strerror))
923
926
924 # match.files() may contain explicitly-specified paths that shouldn't
927 # match.files() may contain explicitly-specified paths that shouldn't
925 # be taken; drop them from the list of files found. dirsfound/notfound
928 # be taken; drop them from the list of files found. dirsfound/notfound
926 # aren't filtered here because they will be tested later.
929 # aren't filtered here because they will be tested later.
927 if match.anypats():
930 if match.anypats():
928 for f in list(results):
931 for f in list(results):
929 if f == b'.hg' or f in subrepos:
932 if f == b'.hg' or f in subrepos:
930 # keep sentinel to disable further out-of-repo walks
933 # keep sentinel to disable further out-of-repo walks
931 continue
934 continue
932 if not match(f):
935 if not match(f):
933 del results[f]
936 del results[f]
934
937
935 # Case insensitive filesystems cannot rely on lstat() failing to detect
938 # Case insensitive filesystems cannot rely on lstat() failing to detect
936 # a case-only rename. Prune the stat object for any file that does not
939 # a case-only rename. Prune the stat object for any file that does not
937 # match the case in the filesystem, if there are multiple files that
940 # match the case in the filesystem, if there are multiple files that
938 # normalize to the same path.
941 # normalize to the same path.
939 if match.isexact() and self._checkcase:
942 if match.isexact() and self._checkcase:
940 normed = {}
943 normed = {}
941
944
942 for f, st in pycompat.iteritems(results):
945 for f, st in pycompat.iteritems(results):
943 if st is None:
946 if st is None:
944 continue
947 continue
945
948
946 nc = util.normcase(f)
949 nc = util.normcase(f)
947 paths = normed.get(nc)
950 paths = normed.get(nc)
948
951
949 if paths is None:
952 if paths is None:
950 paths = set()
953 paths = set()
951 normed[nc] = paths
954 normed[nc] = paths
952
955
953 paths.add(f)
956 paths.add(f)
954
957
955 for norm, paths in pycompat.iteritems(normed):
958 for norm, paths in pycompat.iteritems(normed):
956 if len(paths) > 1:
959 if len(paths) > 1:
957 for path in paths:
960 for path in paths:
958 folded = self._discoverpath(
961 folded = self._discoverpath(
959 path, norm, True, None, self._map.dirfoldmap
962 path, norm, True, None, self._map.dirfoldmap
960 )
963 )
961 if path != folded:
964 if path != folded:
962 results[path] = None
965 results[path] = None
963
966
964 return results, dirsfound, dirsnotfound
967 return results, dirsfound, dirsnotfound
965
968
966 def walk(self, match, subrepos, unknown, ignored, full=True):
969 def walk(self, match, subrepos, unknown, ignored, full=True):
967 """
970 """
968 Walk recursively through the directory tree, finding all files
971 Walk recursively through the directory tree, finding all files
969 matched by match.
972 matched by match.
970
973
971 If full is False, maybe skip some known-clean files.
974 If full is False, maybe skip some known-clean files.
972
975
973 Return a dict mapping filename to stat-like object (either
976 Return a dict mapping filename to stat-like object (either
974 mercurial.osutil.stat instance or return value of os.stat()).
977 mercurial.osutil.stat instance or return value of os.stat()).
975
978
976 """
979 """
977 # full is a flag that extensions that hook into walk can use -- this
980 # full is a flag that extensions that hook into walk can use -- this
978 # implementation doesn't use it at all. This satisfies the contract
981 # implementation doesn't use it at all. This satisfies the contract
979 # because we only guarantee a "maybe".
982 # because we only guarantee a "maybe".
980
983
981 if ignored:
984 if ignored:
982 ignore = util.never
985 ignore = util.never
983 dirignore = util.never
986 dirignore = util.never
984 elif unknown:
987 elif unknown:
985 ignore = self._ignore
988 ignore = self._ignore
986 dirignore = self._dirignore
989 dirignore = self._dirignore
987 else:
990 else:
988 # if not unknown and not ignored, drop dir recursion and step 2
991 # if not unknown and not ignored, drop dir recursion and step 2
989 ignore = util.always
992 ignore = util.always
990 dirignore = util.always
993 dirignore = util.always
991
994
992 matchfn = match.matchfn
995 matchfn = match.matchfn
993 matchalways = match.always()
996 matchalways = match.always()
994 matchtdir = match.traversedir
997 matchtdir = match.traversedir
995 dmap = self._map
998 dmap = self._map
996 listdir = util.listdir
999 listdir = util.listdir
997 lstat = os.lstat
1000 lstat = os.lstat
998 dirkind = stat.S_IFDIR
1001 dirkind = stat.S_IFDIR
999 regkind = stat.S_IFREG
1002 regkind = stat.S_IFREG
1000 lnkkind = stat.S_IFLNK
1003 lnkkind = stat.S_IFLNK
1001 join = self._join
1004 join = self._join
1002
1005
1003 exact = skipstep3 = False
1006 exact = skipstep3 = False
1004 if match.isexact(): # match.exact
1007 if match.isexact(): # match.exact
1005 exact = True
1008 exact = True
1006 dirignore = util.always # skip step 2
1009 dirignore = util.always # skip step 2
1007 elif match.prefix(): # match.match, no patterns
1010 elif match.prefix(): # match.match, no patterns
1008 skipstep3 = True
1011 skipstep3 = True
1009
1012
1010 if not exact and self._checkcase:
1013 if not exact and self._checkcase:
1011 normalize = self._normalize
1014 normalize = self._normalize
1012 normalizefile = self._normalizefile
1015 normalizefile = self._normalizefile
1013 skipstep3 = False
1016 skipstep3 = False
1014 else:
1017 else:
1015 normalize = self._normalize
1018 normalize = self._normalize
1016 normalizefile = None
1019 normalizefile = None
1017
1020
1018 # step 1: find all explicit files
1021 # step 1: find all explicit files
1019 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1022 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
1020 if matchtdir:
1023 if matchtdir:
1021 for d in work:
1024 for d in work:
1022 matchtdir(d[0])
1025 matchtdir(d[0])
1023 for d in dirsnotfound:
1026 for d in dirsnotfound:
1024 matchtdir(d)
1027 matchtdir(d)
1025
1028
1026 skipstep3 = skipstep3 and not (work or dirsnotfound)
1029 skipstep3 = skipstep3 and not (work or dirsnotfound)
1027 work = [d for d in work if not dirignore(d[0])]
1030 work = [d for d in work if not dirignore(d[0])]
1028
1031
1029 # step 2: visit subdirectories
1032 # step 2: visit subdirectories
1030 def traverse(work, alreadynormed):
1033 def traverse(work, alreadynormed):
1031 wadd = work.append
1034 wadd = work.append
1032 while work:
1035 while work:
1033 tracing.counter('dirstate.walk work', len(work))
1036 tracing.counter('dirstate.walk work', len(work))
1034 nd = work.pop()
1037 nd = work.pop()
1035 visitentries = match.visitchildrenset(nd)
1038 visitentries = match.visitchildrenset(nd)
1036 if not visitentries:
1039 if not visitentries:
1037 continue
1040 continue
1038 if visitentries == b'this' or visitentries == b'all':
1041 if visitentries == b'this' or visitentries == b'all':
1039 visitentries = None
1042 visitentries = None
1040 skip = None
1043 skip = None
1041 if nd != b'':
1044 if nd != b'':
1042 skip = b'.hg'
1045 skip = b'.hg'
1043 try:
1046 try:
1044 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1047 with tracing.log('dirstate.walk.traverse listdir %s', nd):
1045 entries = listdir(join(nd), stat=True, skip=skip)
1048 entries = listdir(join(nd), stat=True, skip=skip)
1046 except OSError as inst:
1049 except OSError as inst:
1047 if inst.errno in (errno.EACCES, errno.ENOENT):
1050 if inst.errno in (errno.EACCES, errno.ENOENT):
1048 match.bad(
1051 match.bad(
1049 self.pathto(nd), encoding.strtolocal(inst.strerror)
1052 self.pathto(nd), encoding.strtolocal(inst.strerror)
1050 )
1053 )
1051 continue
1054 continue
1052 raise
1055 raise
1053 for f, kind, st in entries:
1056 for f, kind, st in entries:
1054 # Some matchers may return files in the visitentries set,
1057 # Some matchers may return files in the visitentries set,
1055 # instead of 'this', if the matcher explicitly mentions them
1058 # instead of 'this', if the matcher explicitly mentions them
1056 # and is not an exactmatcher. This is acceptable; we do not
1059 # and is not an exactmatcher. This is acceptable; we do not
1057 # make any hard assumptions about file-or-directory below
1060 # make any hard assumptions about file-or-directory below
1058 # based on the presence of `f` in visitentries. If
1061 # based on the presence of `f` in visitentries. If
1059 # visitchildrenset returned a set, we can always skip the
1062 # visitchildrenset returned a set, we can always skip the
1060 # entries *not* in the set it provided regardless of whether
1063 # entries *not* in the set it provided regardless of whether
1061 # they're actually a file or a directory.
1064 # they're actually a file or a directory.
1062 if visitentries and f not in visitentries:
1065 if visitentries and f not in visitentries:
1063 continue
1066 continue
1064 if normalizefile:
1067 if normalizefile:
1065 # even though f might be a directory, we're only
1068 # even though f might be a directory, we're only
1066 # interested in comparing it to files currently in the
1069 # interested in comparing it to files currently in the
1067 # dmap -- therefore normalizefile is enough
1070 # dmap -- therefore normalizefile is enough
1068 nf = normalizefile(
1071 nf = normalizefile(
1069 nd and (nd + b"/" + f) or f, True, True
1072 nd and (nd + b"/" + f) or f, True, True
1070 )
1073 )
1071 else:
1074 else:
1072 nf = nd and (nd + b"/" + f) or f
1075 nf = nd and (nd + b"/" + f) or f
1073 if nf not in results:
1076 if nf not in results:
1074 if kind == dirkind:
1077 if kind == dirkind:
1075 if not ignore(nf):
1078 if not ignore(nf):
1076 if matchtdir:
1079 if matchtdir:
1077 matchtdir(nf)
1080 matchtdir(nf)
1078 wadd(nf)
1081 wadd(nf)
1079 if nf in dmap and (matchalways or matchfn(nf)):
1082 if nf in dmap and (matchalways or matchfn(nf)):
1080 results[nf] = None
1083 results[nf] = None
1081 elif kind == regkind or kind == lnkkind:
1084 elif kind == regkind or kind == lnkkind:
1082 if nf in dmap:
1085 if nf in dmap:
1083 if matchalways or matchfn(nf):
1086 if matchalways or matchfn(nf):
1084 results[nf] = st
1087 results[nf] = st
1085 elif (matchalways or matchfn(nf)) and not ignore(
1088 elif (matchalways or matchfn(nf)) and not ignore(
1086 nf
1089 nf
1087 ):
1090 ):
1088 # unknown file -- normalize if necessary
1091 # unknown file -- normalize if necessary
1089 if not alreadynormed:
1092 if not alreadynormed:
1090 nf = normalize(nf, False, True)
1093 nf = normalize(nf, False, True)
1091 results[nf] = st
1094 results[nf] = st
1092 elif nf in dmap and (matchalways or matchfn(nf)):
1095 elif nf in dmap and (matchalways or matchfn(nf)):
1093 results[nf] = None
1096 results[nf] = None
1094
1097
1095 for nd, d in work:
1098 for nd, d in work:
1096 # alreadynormed means that processwork doesn't have to do any
1099 # alreadynormed means that processwork doesn't have to do any
1097 # expensive directory normalization
1100 # expensive directory normalization
1098 alreadynormed = not normalize or nd == d
1101 alreadynormed = not normalize or nd == d
1099 traverse([d], alreadynormed)
1102 traverse([d], alreadynormed)
1100
1103
1101 for s in subrepos:
1104 for s in subrepos:
1102 del results[s]
1105 del results[s]
1103 del results[b'.hg']
1106 del results[b'.hg']
1104
1107
1105 # step 3: visit remaining files from dmap
1108 # step 3: visit remaining files from dmap
1106 if not skipstep3 and not exact:
1109 if not skipstep3 and not exact:
1107 # If a dmap file is not in results yet, it was either
1110 # If a dmap file is not in results yet, it was either
1108 # a) not matching matchfn b) ignored, c) missing, or d) under a
1111 # a) not matching matchfn b) ignored, c) missing, or d) under a
1109 # symlink directory.
1112 # symlink directory.
1110 if not results and matchalways:
1113 if not results and matchalways:
1111 visit = [f for f in dmap]
1114 visit = [f for f in dmap]
1112 else:
1115 else:
1113 visit = [f for f in dmap if f not in results and matchfn(f)]
1116 visit = [f for f in dmap if f not in results and matchfn(f)]
1114 visit.sort()
1117 visit.sort()
1115
1118
1116 if unknown:
1119 if unknown:
1117 # unknown == True means we walked all dirs under the roots
1120 # unknown == True means we walked all dirs under the roots
1118 # that wasn't ignored, and everything that matched was stat'ed
1121 # that wasn't ignored, and everything that matched was stat'ed
1119 # and is already in results.
1122 # and is already in results.
1120 # The rest must thus be ignored or under a symlink.
1123 # The rest must thus be ignored or under a symlink.
1121 audit_path = pathutil.pathauditor(self._root, cached=True)
1124 audit_path = pathutil.pathauditor(self._root, cached=True)
1122
1125
1123 for nf in iter(visit):
1126 for nf in iter(visit):
1124 # If a stat for the same file was already added with a
1127 # If a stat for the same file was already added with a
1125 # different case, don't add one for this, since that would
1128 # different case, don't add one for this, since that would
1126 # make it appear as if the file exists under both names
1129 # make it appear as if the file exists under both names
1127 # on disk.
1130 # on disk.
1128 if (
1131 if (
1129 normalizefile
1132 normalizefile
1130 and normalizefile(nf, True, True) in results
1133 and normalizefile(nf, True, True) in results
1131 ):
1134 ):
1132 results[nf] = None
1135 results[nf] = None
1133 # Report ignored items in the dmap as long as they are not
1136 # Report ignored items in the dmap as long as they are not
1134 # under a symlink directory.
1137 # under a symlink directory.
1135 elif audit_path.check(nf):
1138 elif audit_path.check(nf):
1136 try:
1139 try:
1137 results[nf] = lstat(join(nf))
1140 results[nf] = lstat(join(nf))
1138 # file was just ignored, no links, and exists
1141 # file was just ignored, no links, and exists
1139 except OSError:
1142 except OSError:
1140 # file doesn't exist
1143 # file doesn't exist
1141 results[nf] = None
1144 results[nf] = None
1142 else:
1145 else:
1143 # It's either missing or under a symlink directory
1146 # It's either missing or under a symlink directory
1144 # which we in this case report as missing
1147 # which we in this case report as missing
1145 results[nf] = None
1148 results[nf] = None
1146 else:
1149 else:
1147 # We may not have walked the full directory tree above,
1150 # We may not have walked the full directory tree above,
1148 # so stat and check everything we missed.
1151 # so stat and check everything we missed.
1149 iv = iter(visit)
1152 iv = iter(visit)
1150 for st in util.statfiles([join(i) for i in visit]):
1153 for st in util.statfiles([join(i) for i in visit]):
1151 results[next(iv)] = st
1154 results[next(iv)] = st
1152 return results
1155 return results
1153
1156
1154 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1157 def _rust_status(self, matcher, list_clean, list_ignored, list_unknown):
1155 # Force Rayon (Rust parallelism library) to respect the number of
1158 # Force Rayon (Rust parallelism library) to respect the number of
1156 # workers. This is a temporary workaround until Rust code knows
1159 # workers. This is a temporary workaround until Rust code knows
1157 # how to read the config file.
1160 # how to read the config file.
1158 numcpus = self._ui.configint(b"worker", b"numcpus")
1161 numcpus = self._ui.configint(b"worker", b"numcpus")
1159 if numcpus is not None:
1162 if numcpus is not None:
1160 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1163 encoding.environ.setdefault(b'RAYON_NUM_THREADS', b'%d' % numcpus)
1161
1164
1162 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1165 workers_enabled = self._ui.configbool(b"worker", b"enabled", True)
1163 if not workers_enabled:
1166 if not workers_enabled:
1164 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1167 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1165
1168
1166 (
1169 (
1167 lookup,
1170 lookup,
1168 modified,
1171 modified,
1169 added,
1172 added,
1170 removed,
1173 removed,
1171 deleted,
1174 deleted,
1172 clean,
1175 clean,
1173 ignored,
1176 ignored,
1174 unknown,
1177 unknown,
1175 warnings,
1178 warnings,
1176 bad,
1179 bad,
1177 traversed,
1180 traversed,
1178 dirty,
1181 dirty,
1179 ) = rustmod.status(
1182 ) = rustmod.status(
1180 self._map._map,
1183 self._map._map,
1181 matcher,
1184 matcher,
1182 self._rootdir,
1185 self._rootdir,
1183 self._ignorefiles(),
1186 self._ignorefiles(),
1184 self._checkexec,
1187 self._checkexec,
1185 bool(list_clean),
1188 bool(list_clean),
1186 bool(list_ignored),
1189 bool(list_ignored),
1187 bool(list_unknown),
1190 bool(list_unknown),
1188 bool(matcher.traversedir),
1191 bool(matcher.traversedir),
1189 )
1192 )
1190
1193
1191 self._dirty |= dirty
1194 self._dirty |= dirty
1192
1195
1193 if matcher.traversedir:
1196 if matcher.traversedir:
1194 for dir in traversed:
1197 for dir in traversed:
1195 matcher.traversedir(dir)
1198 matcher.traversedir(dir)
1196
1199
1197 if self._ui.warn:
1200 if self._ui.warn:
1198 for item in warnings:
1201 for item in warnings:
1199 if isinstance(item, tuple):
1202 if isinstance(item, tuple):
1200 file_path, syntax = item
1203 file_path, syntax = item
1201 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1204 msg = _(b"%s: ignoring invalid syntax '%s'\n") % (
1202 file_path,
1205 file_path,
1203 syntax,
1206 syntax,
1204 )
1207 )
1205 self._ui.warn(msg)
1208 self._ui.warn(msg)
1206 else:
1209 else:
1207 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1210 msg = _(b"skipping unreadable pattern file '%s': %s\n")
1208 self._ui.warn(
1211 self._ui.warn(
1209 msg
1212 msg
1210 % (
1213 % (
1211 pathutil.canonpath(
1214 pathutil.canonpath(
1212 self._rootdir, self._rootdir, item
1215 self._rootdir, self._rootdir, item
1213 ),
1216 ),
1214 b"No such file or directory",
1217 b"No such file or directory",
1215 )
1218 )
1216 )
1219 )
1217
1220
1218 for (fn, message) in bad:
1221 for (fn, message) in bad:
1219 matcher.bad(fn, encoding.strtolocal(message))
1222 matcher.bad(fn, encoding.strtolocal(message))
1220
1223
1221 status = scmutil.status(
1224 status = scmutil.status(
1222 modified=modified,
1225 modified=modified,
1223 added=added,
1226 added=added,
1224 removed=removed,
1227 removed=removed,
1225 deleted=deleted,
1228 deleted=deleted,
1226 unknown=unknown,
1229 unknown=unknown,
1227 ignored=ignored,
1230 ignored=ignored,
1228 clean=clean,
1231 clean=clean,
1229 )
1232 )
1230 return (lookup, status)
1233 return (lookup, status)
1231
1234
1232 def status(self, match, subrepos, ignored, clean, unknown):
1235 def status(self, match, subrepos, ignored, clean, unknown):
1233 """Determine the status of the working copy relative to the
1236 """Determine the status of the working copy relative to the
1234 dirstate and return a pair of (unsure, status), where status is of type
1237 dirstate and return a pair of (unsure, status), where status is of type
1235 scmutil.status and:
1238 scmutil.status and:
1236
1239
1237 unsure:
1240 unsure:
1238 files that might have been modified since the dirstate was
1241 files that might have been modified since the dirstate was
1239 written, but need to be read to be sure (size is the same
1242 written, but need to be read to be sure (size is the same
1240 but mtime differs)
1243 but mtime differs)
1241 status.modified:
1244 status.modified:
1242 files that have definitely been modified since the dirstate
1245 files that have definitely been modified since the dirstate
1243 was written (different size or mode)
1246 was written (different size or mode)
1244 status.clean:
1247 status.clean:
1245 files that have definitely not been modified since the
1248 files that have definitely not been modified since the
1246 dirstate was written
1249 dirstate was written
1247 """
1250 """
1248 listignored, listclean, listunknown = ignored, clean, unknown
1251 listignored, listclean, listunknown = ignored, clean, unknown
1249 lookup, modified, added, unknown, ignored = [], [], [], [], []
1252 lookup, modified, added, unknown, ignored = [], [], [], [], []
1250 removed, deleted, clean = [], [], []
1253 removed, deleted, clean = [], [], []
1251
1254
1252 dmap = self._map
1255 dmap = self._map
1253 dmap.preload()
1256 dmap.preload()
1254
1257
1255 use_rust = True
1258 use_rust = True
1256
1259
1257 allowed_matchers = (
1260 allowed_matchers = (
1258 matchmod.alwaysmatcher,
1261 matchmod.alwaysmatcher,
1259 matchmod.exactmatcher,
1262 matchmod.exactmatcher,
1260 matchmod.includematcher,
1263 matchmod.includematcher,
1261 )
1264 )
1262
1265
1263 if rustmod is None:
1266 if rustmod is None:
1264 use_rust = False
1267 use_rust = False
1265 elif self._checkcase:
1268 elif self._checkcase:
1266 # Case-insensitive filesystems are not handled yet
1269 # Case-insensitive filesystems are not handled yet
1267 use_rust = False
1270 use_rust = False
1268 elif subrepos:
1271 elif subrepos:
1269 use_rust = False
1272 use_rust = False
1270 elif sparse.enabled:
1273 elif sparse.enabled:
1271 use_rust = False
1274 use_rust = False
1272 elif not isinstance(match, allowed_matchers):
1275 elif not isinstance(match, allowed_matchers):
1273 # Some matchers have yet to be implemented
1276 # Some matchers have yet to be implemented
1274 use_rust = False
1277 use_rust = False
1275
1278
1276 # Get the time from the filesystem so we can disambiguate files that
1279 # Get the time from the filesystem so we can disambiguate files that
1277 # appear modified in the present or future.
1280 # appear modified in the present or future.
1278 try:
1281 try:
1279 mtime_boundary = timestamp.get_fs_now(self._opener)
1282 mtime_boundary = timestamp.get_fs_now(self._opener)
1280 except OSError:
1283 except OSError:
1281 # In largefiles or readonly context
1284 # In largefiles or readonly context
1282 mtime_boundary = None
1285 mtime_boundary = None
1283
1286
1284 if use_rust:
1287 if use_rust:
1285 try:
1288 try:
1286 res = self._rust_status(
1289 res = self._rust_status(
1287 match, listclean, listignored, listunknown
1290 match, listclean, listignored, listunknown
1288 )
1291 )
1289 return res + (mtime_boundary,)
1292 return res + (mtime_boundary,)
1290 except rustmod.FallbackError:
1293 except rustmod.FallbackError:
1291 pass
1294 pass
1292
1295
1293 def noop(f):
1296 def noop(f):
1294 pass
1297 pass
1295
1298
1296 dcontains = dmap.__contains__
1299 dcontains = dmap.__contains__
1297 dget = dmap.__getitem__
1300 dget = dmap.__getitem__
1298 ladd = lookup.append # aka "unsure"
1301 ladd = lookup.append # aka "unsure"
1299 madd = modified.append
1302 madd = modified.append
1300 aadd = added.append
1303 aadd = added.append
1301 uadd = unknown.append if listunknown else noop
1304 uadd = unknown.append if listunknown else noop
1302 iadd = ignored.append if listignored else noop
1305 iadd = ignored.append if listignored else noop
1303 radd = removed.append
1306 radd = removed.append
1304 dadd = deleted.append
1307 dadd = deleted.append
1305 cadd = clean.append if listclean else noop
1308 cadd = clean.append if listclean else noop
1306 mexact = match.exact
1309 mexact = match.exact
1307 dirignore = self._dirignore
1310 dirignore = self._dirignore
1308 checkexec = self._checkexec
1311 checkexec = self._checkexec
1309 checklink = self._checklink
1312 checklink = self._checklink
1310 copymap = self._map.copymap
1313 copymap = self._map.copymap
1311
1314
1312 # We need to do full walks when either
1315 # We need to do full walks when either
1313 # - we're listing all clean files, or
1316 # - we're listing all clean files, or
1314 # - match.traversedir does something, because match.traversedir should
1317 # - match.traversedir does something, because match.traversedir should
1315 # be called for every dir in the working dir
1318 # be called for every dir in the working dir
1316 full = listclean or match.traversedir is not None
1319 full = listclean or match.traversedir is not None
1317 for fn, st in pycompat.iteritems(
1320 for fn, st in pycompat.iteritems(
1318 self.walk(match, subrepos, listunknown, listignored, full=full)
1321 self.walk(match, subrepos, listunknown, listignored, full=full)
1319 ):
1322 ):
1320 if not dcontains(fn):
1323 if not dcontains(fn):
1321 if (listignored or mexact(fn)) and dirignore(fn):
1324 if (listignored or mexact(fn)) and dirignore(fn):
1322 if listignored:
1325 if listignored:
1323 iadd(fn)
1326 iadd(fn)
1324 else:
1327 else:
1325 uadd(fn)
1328 uadd(fn)
1326 continue
1329 continue
1327
1330
1328 t = dget(fn)
1331 t = dget(fn)
1329 mode = t.mode
1332 mode = t.mode
1330 size = t.size
1333 size = t.size
1331
1334
1332 if not st and t.tracked:
1335 if not st and t.tracked:
1333 dadd(fn)
1336 dadd(fn)
1334 elif t.p2_info:
1337 elif t.p2_info:
1335 madd(fn)
1338 madd(fn)
1336 elif t.added:
1339 elif t.added:
1337 aadd(fn)
1340 aadd(fn)
1338 elif t.removed:
1341 elif t.removed:
1339 radd(fn)
1342 radd(fn)
1340 elif t.tracked:
1343 elif t.tracked:
1341 if not checklink and t.has_fallback_symlink:
1344 if not checklink and t.has_fallback_symlink:
1342 # If the file system does not support symlink, the mode
1345 # If the file system does not support symlink, the mode
1343 # might not be correctly stored in the dirstate, so do not
1346 # might not be correctly stored in the dirstate, so do not
1344 # trust it.
1347 # trust it.
1345 ladd(fn)
1348 ladd(fn)
1346 elif not checkexec and t.has_fallback_exec:
1349 elif not checkexec and t.has_fallback_exec:
1347 # If the file system does not support exec bits, the mode
1350 # If the file system does not support exec bits, the mode
1348 # might not be correctly stored in the dirstate, so do not
1351 # might not be correctly stored in the dirstate, so do not
1349 # trust it.
1352 # trust it.
1350 ladd(fn)
1353 ladd(fn)
1351 elif (
1354 elif (
1352 size >= 0
1355 size >= 0
1353 and (
1356 and (
1354 (size != st.st_size and size != st.st_size & _rangemask)
1357 (size != st.st_size and size != st.st_size & _rangemask)
1355 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1358 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1356 )
1359 )
1357 or fn in copymap
1360 or fn in copymap
1358 ):
1361 ):
1359 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1362 if stat.S_ISLNK(st.st_mode) and size != st.st_size:
1360 # issue6456: Size returned may be longer due to
1363 # issue6456: Size returned may be longer due to
1361 # encryption on EXT-4 fscrypt, undecided.
1364 # encryption on EXT-4 fscrypt, undecided.
1362 ladd(fn)
1365 ladd(fn)
1363 else:
1366 else:
1364 madd(fn)
1367 madd(fn)
1365 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1368 elif not t.mtime_likely_equal_to(timestamp.mtime_of(st)):
1366 # There might be a change in the future if for example the
1369 # There might be a change in the future if for example the
1367 # internal clock is off, but this is a case where the issues
1370 # internal clock is off, but this is a case where the issues
1368 # the user would face would be a lot worse and there is
1371 # the user would face would be a lot worse and there is
1369 # nothing we can really do.
1372 # nothing we can really do.
1370 ladd(fn)
1373 ladd(fn)
1371 elif listclean:
1374 elif listclean:
1372 cadd(fn)
1375 cadd(fn)
1373 status = scmutil.status(
1376 status = scmutil.status(
1374 modified, added, removed, deleted, unknown, ignored, clean
1377 modified, added, removed, deleted, unknown, ignored, clean
1375 )
1378 )
1376 return (lookup, status, mtime_boundary)
1379 return (lookup, status, mtime_boundary)
1377
1380
1378 def matches(self, match):
1381 def matches(self, match):
1379 """
1382 """
1380 return files in the dirstate (in whatever state) filtered by match
1383 return files in the dirstate (in whatever state) filtered by match
1381 """
1384 """
1382 dmap = self._map
1385 dmap = self._map
1383 if rustmod is not None:
1386 if rustmod is not None:
1384 dmap = self._map._map
1387 dmap = self._map._map
1385
1388
1386 if match.always():
1389 if match.always():
1387 return dmap.keys()
1390 return dmap.keys()
1388 files = match.files()
1391 files = match.files()
1389 if match.isexact():
1392 if match.isexact():
1390 # fast path -- filter the other way around, since typically files is
1393 # fast path -- filter the other way around, since typically files is
1391 # much smaller than dmap
1394 # much smaller than dmap
1392 return [f for f in files if f in dmap]
1395 return [f for f in files if f in dmap]
1393 if match.prefix() and all(fn in dmap for fn in files):
1396 if match.prefix() and all(fn in dmap for fn in files):
1394 # fast path -- all the values are known to be files, so just return
1397 # fast path -- all the values are known to be files, so just return
1395 # that
1398 # that
1396 return list(files)
1399 return list(files)
1397 return [f for f in dmap if match(f)]
1400 return [f for f in dmap if match(f)]
1398
1401
1399 def _actualfilename(self, tr):
1402 def _actualfilename(self, tr):
1400 if tr:
1403 if tr:
1401 return self._pendingfilename
1404 return self._pendingfilename
1402 else:
1405 else:
1403 return self._filename
1406 return self._filename
1404
1407
1405 def savebackup(self, tr, backupname):
1408 def savebackup(self, tr, backupname):
1406 '''Save current dirstate into backup file'''
1409 '''Save current dirstate into backup file'''
1407 filename = self._actualfilename(tr)
1410 filename = self._actualfilename(tr)
1408 assert backupname != filename
1411 assert backupname != filename
1409
1412
1410 # use '_writedirstate' instead of 'write' to write changes certainly,
1413 # use '_writedirstate' instead of 'write' to write changes certainly,
1411 # because the latter omits writing out if transaction is running.
1414 # because the latter omits writing out if transaction is running.
1412 # output file will be used to create backup of dirstate at this point.
1415 # output file will be used to create backup of dirstate at this point.
1413 if self._dirty or not self._opener.exists(filename):
1416 if self._dirty or not self._opener.exists(filename):
1414 self._writedirstate(
1417 self._writedirstate(
1415 tr,
1418 tr,
1416 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1419 self._opener(filename, b"w", atomictemp=True, checkambig=True),
1417 )
1420 )
1418
1421
1419 if tr:
1422 if tr:
1420 # ensure that subsequent tr.writepending returns True for
1423 # ensure that subsequent tr.writepending returns True for
1421 # changes written out above, even if dirstate is never
1424 # changes written out above, even if dirstate is never
1422 # changed after this
1425 # changed after this
1423 tr.addfilegenerator(
1426 tr.addfilegenerator(
1424 b'dirstate-1-main',
1427 b'dirstate-1-main',
1425 (self._filename,),
1428 (self._filename,),
1426 lambda f: self._writedirstate(tr, f),
1429 lambda f: self._writedirstate(tr, f),
1427 location=b'plain',
1430 location=b'plain',
1431 post_finalize=True,
1428 )
1432 )
1429
1433
1430 # ensure that pending file written above is unlinked at
1434 # ensure that pending file written above is unlinked at
1431 # failure, even if tr.writepending isn't invoked until the
1435 # failure, even if tr.writepending isn't invoked until the
1432 # end of this transaction
1436 # end of this transaction
1433 tr.registertmp(filename, location=b'plain')
1437 tr.registertmp(filename, location=b'plain')
1434
1438
1435 self._opener.tryunlink(backupname)
1439 self._opener.tryunlink(backupname)
1436 # hardlink backup is okay because _writedirstate is always called
1440 # hardlink backup is okay because _writedirstate is always called
1437 # with an "atomictemp=True" file.
1441 # with an "atomictemp=True" file.
1438 util.copyfile(
1442 util.copyfile(
1439 self._opener.join(filename),
1443 self._opener.join(filename),
1440 self._opener.join(backupname),
1444 self._opener.join(backupname),
1441 hardlink=True,
1445 hardlink=True,
1442 )
1446 )
1443
1447
1444 def restorebackup(self, tr, backupname):
1448 def restorebackup(self, tr, backupname):
1445 '''Restore dirstate by backup file'''
1449 '''Restore dirstate by backup file'''
1446 # this "invalidate()" prevents "wlock.release()" from writing
1450 # this "invalidate()" prevents "wlock.release()" from writing
1447 # changes of dirstate out after restoring from backup file
1451 # changes of dirstate out after restoring from backup file
1448 self.invalidate()
1452 self.invalidate()
1449 filename = self._actualfilename(tr)
1453 filename = self._actualfilename(tr)
1450 o = self._opener
1454 o = self._opener
1451 if util.samefile(o.join(backupname), o.join(filename)):
1455 if util.samefile(o.join(backupname), o.join(filename)):
1452 o.unlink(backupname)
1456 o.unlink(backupname)
1453 else:
1457 else:
1454 o.rename(backupname, filename, checkambig=True)
1458 o.rename(backupname, filename, checkambig=True)
1455
1459
1456 def clearbackup(self, tr, backupname):
1460 def clearbackup(self, tr, backupname):
1457 '''Clear backup file'''
1461 '''Clear backup file'''
1458 self._opener.unlink(backupname)
1462 self._opener.unlink(backupname)
1459
1463
1460 def verify(self, m1, m2):
1464 def verify(self, m1, m2):
1461 """check the dirstate content again the parent manifest and yield errors"""
1465 """check the dirstate content again the parent manifest and yield errors"""
1462 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1466 missing_from_p1 = b"%s in state %s, but not in manifest1\n"
1463 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1467 unexpected_in_p1 = b"%s in state %s, but also in manifest1\n"
1464 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1468 missing_from_ps = b"%s in state %s, but not in either manifest\n"
1465 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1469 missing_from_ds = b"%s in manifest1, but listed as state %s\n"
1466 for f, entry in self.items():
1470 for f, entry in self.items():
1467 state = entry.state
1471 state = entry.state
1468 if state in b"nr" and f not in m1:
1472 if state in b"nr" and f not in m1:
1469 yield (missing_from_p1, f, state)
1473 yield (missing_from_p1, f, state)
1470 if state in b"a" and f in m1:
1474 if state in b"a" and f in m1:
1471 yield (unexpected_in_p1, f, state)
1475 yield (unexpected_in_p1, f, state)
1472 if state in b"m" and f not in m1 and f not in m2:
1476 if state in b"m" and f not in m1 and f not in m2:
1473 yield (missing_from_ps, f, state)
1477 yield (missing_from_ps, f, state)
1474 for f in m1:
1478 for f in m1:
1475 state = self.get_entry(f).state
1479 state = self.get_entry(f).state
1476 if state not in b"nrm":
1480 if state not in b"nrm":
1477 yield (missing_from_ds, f, state)
1481 yield (missing_from_ds, f, state)
@@ -1,773 +1,772 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
9 # Copyright 2005, 2006 Olivia Mackall <olivia@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import errno
16 import errno
17
17
18 from .i18n import _
18 from .i18n import _
19 from . import (
19 from . import (
20 error,
20 error,
21 pycompat,
21 pycompat,
22 util,
22 util,
23 )
23 )
24 from .utils import stringutil
24 from .utils import stringutil
25
25
26 version = 2
26 version = 2
27
27
28 # These are the file generators that should only be executed after the
29 # finalizers are done, since they rely on the output of the finalizers (like
30 # the changelog having been written).
31 postfinalizegenerators = {
32 b'bookmarks',
33 b'dirstate-0-key-pre',
34 b'dirstate-1-main',
35 b'dirstate-2-key-post',
36 }
37
38 GEN_GROUP_ALL = b'all'
28 GEN_GROUP_ALL = b'all'
39 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
29 GEN_GROUP_PRE_FINALIZE = b'prefinalize'
40 GEN_GROUP_POST_FINALIZE = b'postfinalize'
30 GEN_GROUP_POST_FINALIZE = b'postfinalize'
41
31
42
32
43 def active(func):
33 def active(func):
44 def _active(self, *args, **kwds):
34 def _active(self, *args, **kwds):
45 if self._count == 0:
35 if self._count == 0:
46 raise error.ProgrammingError(
36 raise error.ProgrammingError(
47 b'cannot use transaction when it is already committed/aborted'
37 b'cannot use transaction when it is already committed/aborted'
48 )
38 )
49 return func(self, *args, **kwds)
39 return func(self, *args, **kwds)
50
40
51 return _active
41 return _active
52
42
53
43
54 def _playback(
44 def _playback(
55 journal,
45 journal,
56 report,
46 report,
57 opener,
47 opener,
58 vfsmap,
48 vfsmap,
59 entries,
49 entries,
60 backupentries,
50 backupentries,
61 unlink=True,
51 unlink=True,
62 checkambigfiles=None,
52 checkambigfiles=None,
63 ):
53 ):
64 for f, o in sorted(dict(entries).items()):
54 for f, o in sorted(dict(entries).items()):
65 if o or not unlink:
55 if o or not unlink:
66 checkambig = checkambigfiles and (f, b'') in checkambigfiles
56 checkambig = checkambigfiles and (f, b'') in checkambigfiles
67 try:
57 try:
68 fp = opener(f, b'a', checkambig=checkambig)
58 fp = opener(f, b'a', checkambig=checkambig)
69 if fp.tell() < o:
59 if fp.tell() < o:
70 raise error.Abort(
60 raise error.Abort(
71 _(
61 _(
72 b"attempted to truncate %s to %d bytes, but it was "
62 b"attempted to truncate %s to %d bytes, but it was "
73 b"already %d bytes\n"
63 b"already %d bytes\n"
74 )
64 )
75 % (f, o, fp.tell())
65 % (f, o, fp.tell())
76 )
66 )
77 fp.truncate(o)
67 fp.truncate(o)
78 fp.close()
68 fp.close()
79 except IOError:
69 except IOError:
80 report(_(b"failed to truncate %s\n") % f)
70 report(_(b"failed to truncate %s\n") % f)
81 raise
71 raise
82 else:
72 else:
83 try:
73 try:
84 opener.unlink(f)
74 opener.unlink(f)
85 except (IOError, OSError) as inst:
75 except (IOError, OSError) as inst:
86 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
87 raise
77 raise
88
78
89 backupfiles = []
79 backupfiles = []
90 for l, f, b, c in backupentries:
80 for l, f, b, c in backupentries:
91 if l not in vfsmap and c:
81 if l not in vfsmap and c:
92 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
82 report(b"couldn't handle %s: unknown cache location %s\n" % (b, l))
93 vfs = vfsmap[l]
83 vfs = vfsmap[l]
94 try:
84 try:
95 if f and b:
85 if f and b:
96 filepath = vfs.join(f)
86 filepath = vfs.join(f)
97 backuppath = vfs.join(b)
87 backuppath = vfs.join(b)
98 checkambig = checkambigfiles and (f, l) in checkambigfiles
88 checkambig = checkambigfiles and (f, l) in checkambigfiles
99 try:
89 try:
100 util.copyfile(backuppath, filepath, checkambig=checkambig)
90 util.copyfile(backuppath, filepath, checkambig=checkambig)
101 backupfiles.append(b)
91 backupfiles.append(b)
102 except IOError as exc:
92 except IOError as exc:
103 e_msg = stringutil.forcebytestr(exc)
93 e_msg = stringutil.forcebytestr(exc)
104 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
94 report(_(b"failed to recover %s (%s)\n") % (f, e_msg))
105 else:
95 else:
106 target = f or b
96 target = f or b
107 try:
97 try:
108 vfs.unlink(target)
98 vfs.unlink(target)
109 except (IOError, OSError) as inst:
99 except (IOError, OSError) as inst:
110 if inst.errno != errno.ENOENT:
100 if inst.errno != errno.ENOENT:
111 raise
101 raise
112 except (IOError, OSError, error.Abort):
102 except (IOError, OSError, error.Abort):
113 if not c:
103 if not c:
114 raise
104 raise
115
105
116 backuppath = b"%s.backupfiles" % journal
106 backuppath = b"%s.backupfiles" % journal
117 if opener.exists(backuppath):
107 if opener.exists(backuppath):
118 opener.unlink(backuppath)
108 opener.unlink(backuppath)
119 opener.unlink(journal)
109 opener.unlink(journal)
120 try:
110 try:
121 for f in backupfiles:
111 for f in backupfiles:
122 if opener.exists(f):
112 if opener.exists(f):
123 opener.unlink(f)
113 opener.unlink(f)
124 except (IOError, OSError, error.Abort):
114 except (IOError, OSError, error.Abort):
125 # only pure backup file remains, it is sage to ignore any error
115 # only pure backup file remains, it is sage to ignore any error
126 pass
116 pass
127
117
128
118
129 class transaction(util.transactional):
119 class transaction(util.transactional):
130 def __init__(
120 def __init__(
131 self,
121 self,
132 report,
122 report,
133 opener,
123 opener,
134 vfsmap,
124 vfsmap,
135 journalname,
125 journalname,
136 undoname=None,
126 undoname=None,
137 after=None,
127 after=None,
138 createmode=None,
128 createmode=None,
139 validator=None,
129 validator=None,
140 releasefn=None,
130 releasefn=None,
141 checkambigfiles=None,
131 checkambigfiles=None,
142 name='<unnamed>',
132 name='<unnamed>',
143 ):
133 ):
144 """Begin a new transaction
134 """Begin a new transaction
145
135
146 Begins a new transaction that allows rolling back writes in the event of
136 Begins a new transaction that allows rolling back writes in the event of
147 an exception.
137 an exception.
148
138
149 * `after`: called after the transaction has been committed
139 * `after`: called after the transaction has been committed
150 * `createmode`: the mode of the journal file that will be created
140 * `createmode`: the mode of the journal file that will be created
151 * `releasefn`: called after releasing (with transaction and result)
141 * `releasefn`: called after releasing (with transaction and result)
152
142
153 `checkambigfiles` is a set of (path, vfs-location) tuples,
143 `checkambigfiles` is a set of (path, vfs-location) tuples,
154 which determine whether file stat ambiguity should be avoided
144 which determine whether file stat ambiguity should be avoided
155 for corresponded files.
145 for corresponded files.
156 """
146 """
157 self._count = 1
147 self._count = 1
158 self._usages = 1
148 self._usages = 1
159 self._report = report
149 self._report = report
160 # a vfs to the store content
150 # a vfs to the store content
161 self._opener = opener
151 self._opener = opener
162 # a map to access file in various {location -> vfs}
152 # a map to access file in various {location -> vfs}
163 vfsmap = vfsmap.copy()
153 vfsmap = vfsmap.copy()
164 vfsmap[b''] = opener # set default value
154 vfsmap[b''] = opener # set default value
165 self._vfsmap = vfsmap
155 self._vfsmap = vfsmap
166 self._after = after
156 self._after = after
167 self._offsetmap = {}
157 self._offsetmap = {}
168 self._newfiles = set()
158 self._newfiles = set()
169 self._journal = journalname
159 self._journal = journalname
170 self._undoname = undoname
160 self._undoname = undoname
171 self._queue = []
161 self._queue = []
172 # A callback to do something just after releasing transaction.
162 # A callback to do something just after releasing transaction.
173 if releasefn is None:
163 if releasefn is None:
174 releasefn = lambda tr, success: None
164 releasefn = lambda tr, success: None
175 self._releasefn = releasefn
165 self._releasefn = releasefn
176
166
177 self._checkambigfiles = set()
167 self._checkambigfiles = set()
178 if checkambigfiles:
168 if checkambigfiles:
179 self._checkambigfiles.update(checkambigfiles)
169 self._checkambigfiles.update(checkambigfiles)
180
170
181 self._names = [name]
171 self._names = [name]
182
172
183 # A dict dedicated to precisely tracking the changes introduced in the
173 # A dict dedicated to precisely tracking the changes introduced in the
184 # transaction.
174 # transaction.
185 self.changes = {}
175 self.changes = {}
186
176
187 # a dict of arguments to be passed to hooks
177 # a dict of arguments to be passed to hooks
188 self.hookargs = {}
178 self.hookargs = {}
189 self._file = opener.open(self._journal, b"w+")
179 self._file = opener.open(self._journal, b"w+")
190
180
191 # a list of ('location', 'path', 'backuppath', cache) entries.
181 # a list of ('location', 'path', 'backuppath', cache) entries.
192 # - if 'backuppath' is empty, no file existed at backup time
182 # - if 'backuppath' is empty, no file existed at backup time
193 # - if 'path' is empty, this is a temporary transaction file
183 # - if 'path' is empty, this is a temporary transaction file
194 # - if 'location' is not empty, the path is outside main opener reach.
184 # - if 'location' is not empty, the path is outside main opener reach.
195 # use 'location' value as a key in a vfsmap to find the right 'vfs'
185 # use 'location' value as a key in a vfsmap to find the right 'vfs'
196 # (cache is currently unused)
186 # (cache is currently unused)
197 self._backupentries = []
187 self._backupentries = []
198 self._backupmap = {}
188 self._backupmap = {}
199 self._backupjournal = b"%s.backupfiles" % self._journal
189 self._backupjournal = b"%s.backupfiles" % self._journal
200 self._backupsfile = opener.open(self._backupjournal, b'w')
190 self._backupsfile = opener.open(self._backupjournal, b'w')
201 self._backupsfile.write(b'%d\n' % version)
191 self._backupsfile.write(b'%d\n' % version)
202
192
203 if createmode is not None:
193 if createmode is not None:
204 opener.chmod(self._journal, createmode & 0o666)
194 opener.chmod(self._journal, createmode & 0o666)
205 opener.chmod(self._backupjournal, createmode & 0o666)
195 opener.chmod(self._backupjournal, createmode & 0o666)
206
196
207 # hold file generations to be performed on commit
197 # hold file generations to be performed on commit
208 self._filegenerators = {}
198 self._filegenerators = {}
209 # hold callback to write pending data for hooks
199 # hold callback to write pending data for hooks
210 self._pendingcallback = {}
200 self._pendingcallback = {}
211 # True is any pending data have been written ever
201 # True is any pending data have been written ever
212 self._anypending = False
202 self._anypending = False
213 # holds callback to call when writing the transaction
203 # holds callback to call when writing the transaction
214 self._finalizecallback = {}
204 self._finalizecallback = {}
215 # holds callback to call when validating the transaction
205 # holds callback to call when validating the transaction
216 # should raise exception if anything is wrong
206 # should raise exception if anything is wrong
217 self._validatecallback = {}
207 self._validatecallback = {}
218 if validator is not None:
208 if validator is not None:
219 self._validatecallback[b'001-userhooks'] = validator
209 self._validatecallback[b'001-userhooks'] = validator
220 # hold callback for post transaction close
210 # hold callback for post transaction close
221 self._postclosecallback = {}
211 self._postclosecallback = {}
222 # holds callbacks to call during abort
212 # holds callbacks to call during abort
223 self._abortcallback = {}
213 self._abortcallback = {}
224
214
225 def __repr__(self):
215 def __repr__(self):
226 name = '/'.join(self._names)
216 name = '/'.join(self._names)
227 return '<transaction name=%s, count=%d, usages=%d>' % (
217 return '<transaction name=%s, count=%d, usages=%d>' % (
228 name,
218 name,
229 self._count,
219 self._count,
230 self._usages,
220 self._usages,
231 )
221 )
232
222
233 def __del__(self):
223 def __del__(self):
234 if self._journal:
224 if self._journal:
235 self._abort()
225 self._abort()
236
226
237 @property
227 @property
238 def finalized(self):
228 def finalized(self):
239 return self._finalizecallback is None
229 return self._finalizecallback is None
240
230
241 @active
231 @active
242 def startgroup(self):
232 def startgroup(self):
243 """delay registration of file entry
233 """delay registration of file entry
244
234
245 This is used by strip to delay vision of strip offset. The transaction
235 This is used by strip to delay vision of strip offset. The transaction
246 sees either none or all of the strip actions to be done."""
236 sees either none or all of the strip actions to be done."""
247 self._queue.append([])
237 self._queue.append([])
248
238
249 @active
239 @active
250 def endgroup(self):
240 def endgroup(self):
251 """apply delayed registration of file entry.
241 """apply delayed registration of file entry.
252
242
253 This is used by strip to delay vision of strip offset. The transaction
243 This is used by strip to delay vision of strip offset. The transaction
254 sees either none or all of the strip actions to be done."""
244 sees either none or all of the strip actions to be done."""
255 q = self._queue.pop()
245 q = self._queue.pop()
256 for f, o in q:
246 for f, o in q:
257 self._addentry(f, o)
247 self._addentry(f, o)
258
248
259 @active
249 @active
260 def add(self, file, offset):
250 def add(self, file, offset):
261 """record the state of an append-only file before update"""
251 """record the state of an append-only file before update"""
262 if (
252 if (
263 file in self._newfiles
253 file in self._newfiles
264 or file in self._offsetmap
254 or file in self._offsetmap
265 or file in self._backupmap
255 or file in self._backupmap
266 ):
256 ):
267 return
257 return
268 if self._queue:
258 if self._queue:
269 self._queue[-1].append((file, offset))
259 self._queue[-1].append((file, offset))
270 return
260 return
271
261
272 self._addentry(file, offset)
262 self._addentry(file, offset)
273
263
274 def _addentry(self, file, offset):
264 def _addentry(self, file, offset):
275 """add a append-only entry to memory and on-disk state"""
265 """add a append-only entry to memory and on-disk state"""
276 if (
266 if (
277 file in self._newfiles
267 file in self._newfiles
278 or file in self._offsetmap
268 or file in self._offsetmap
279 or file in self._backupmap
269 or file in self._backupmap
280 ):
270 ):
281 return
271 return
282 if offset:
272 if offset:
283 self._offsetmap[file] = offset
273 self._offsetmap[file] = offset
284 else:
274 else:
285 self._newfiles.add(file)
275 self._newfiles.add(file)
286 # add enough data to the journal to do the truncate
276 # add enough data to the journal to do the truncate
287 self._file.write(b"%s\0%d\n" % (file, offset))
277 self._file.write(b"%s\0%d\n" % (file, offset))
288 self._file.flush()
278 self._file.flush()
289
279
290 @active
280 @active
291 def addbackup(self, file, hardlink=True, location=b''):
281 def addbackup(self, file, hardlink=True, location=b''):
292 """Adds a backup of the file to the transaction
282 """Adds a backup of the file to the transaction
293
283
294 Calling addbackup() creates a hardlink backup of the specified file
284 Calling addbackup() creates a hardlink backup of the specified file
295 that is used to recover the file in the event of the transaction
285 that is used to recover the file in the event of the transaction
296 aborting.
286 aborting.
297
287
298 * `file`: the file path, relative to .hg/store
288 * `file`: the file path, relative to .hg/store
299 * `hardlink`: use a hardlink to quickly create the backup
289 * `hardlink`: use a hardlink to quickly create the backup
300 """
290 """
301 if self._queue:
291 if self._queue:
302 msg = b'cannot use transaction.addbackup inside "group"'
292 msg = b'cannot use transaction.addbackup inside "group"'
303 raise error.ProgrammingError(msg)
293 raise error.ProgrammingError(msg)
304
294
305 if (
295 if (
306 file in self._newfiles
296 file in self._newfiles
307 or file in self._offsetmap
297 or file in self._offsetmap
308 or file in self._backupmap
298 or file in self._backupmap
309 ):
299 ):
310 return
300 return
311 vfs = self._vfsmap[location]
301 vfs = self._vfsmap[location]
312 dirname, filename = vfs.split(file)
302 dirname, filename = vfs.split(file)
313 backupfilename = b"%s.backup.%s" % (self._journal, filename)
303 backupfilename = b"%s.backup.%s" % (self._journal, filename)
314 backupfile = vfs.reljoin(dirname, backupfilename)
304 backupfile = vfs.reljoin(dirname, backupfilename)
315 if vfs.exists(file):
305 if vfs.exists(file):
316 filepath = vfs.join(file)
306 filepath = vfs.join(file)
317 backuppath = vfs.join(backupfile)
307 backuppath = vfs.join(backupfile)
318 util.copyfile(filepath, backuppath, hardlink=hardlink)
308 util.copyfile(filepath, backuppath, hardlink=hardlink)
319 else:
309 else:
320 backupfile = b''
310 backupfile = b''
321
311
322 self._addbackupentry((location, file, backupfile, False))
312 self._addbackupentry((location, file, backupfile, False))
323
313
324 def _addbackupentry(self, entry):
314 def _addbackupentry(self, entry):
325 """register a new backup entry and write it to disk"""
315 """register a new backup entry and write it to disk"""
326 self._backupentries.append(entry)
316 self._backupentries.append(entry)
327 self._backupmap[entry[1]] = len(self._backupentries) - 1
317 self._backupmap[entry[1]] = len(self._backupentries) - 1
328 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
318 self._backupsfile.write(b"%s\0%s\0%s\0%d\n" % entry)
329 self._backupsfile.flush()
319 self._backupsfile.flush()
330
320
331 @active
321 @active
332 def registertmp(self, tmpfile, location=b''):
322 def registertmp(self, tmpfile, location=b''):
333 """register a temporary transaction file
323 """register a temporary transaction file
334
324
335 Such files will be deleted when the transaction exits (on both
325 Such files will be deleted when the transaction exits (on both
336 failure and success).
326 failure and success).
337 """
327 """
338 self._addbackupentry((location, b'', tmpfile, False))
328 self._addbackupentry((location, b'', tmpfile, False))
339
329
340 @active
330 @active
341 def addfilegenerator(
331 def addfilegenerator(
342 self, genid, filenames, genfunc, order=0, location=b''
332 self,
333 genid,
334 filenames,
335 genfunc,
336 order=0,
337 location=b'',
338 post_finalize=False,
343 ):
339 ):
344 """add a function to generates some files at transaction commit
340 """add a function to generates some files at transaction commit
345
341
346 The `genfunc` argument is a function capable of generating proper
342 The `genfunc` argument is a function capable of generating proper
347 content of each entry in the `filename` tuple.
343 content of each entry in the `filename` tuple.
348
344
349 At transaction close time, `genfunc` will be called with one file
345 At transaction close time, `genfunc` will be called with one file
350 object argument per entries in `filenames`.
346 object argument per entries in `filenames`.
351
347
352 The transaction itself is responsible for the backup, creation and
348 The transaction itself is responsible for the backup, creation and
353 final write of such file.
349 final write of such file.
354
350
355 The `genid` argument is used to ensure the same set of file is only
351 The `genid` argument is used to ensure the same set of file is only
356 generated once. Call to `addfilegenerator` for a `genid` already
352 generated once. Call to `addfilegenerator` for a `genid` already
357 present will overwrite the old entry.
353 present will overwrite the old entry.
358
354
359 The `order` argument may be used to control the order in which multiple
355 The `order` argument may be used to control the order in which multiple
360 generator will be executed.
356 generator will be executed.
361
357
362 The `location` arguments may be used to indicate the files are located
358 The `location` arguments may be used to indicate the files are located
363 outside of the the standard directory for transaction. It should match
359 outside of the the standard directory for transaction. It should match
364 one of the key of the `transaction.vfsmap` dictionary.
360 one of the key of the `transaction.vfsmap` dictionary.
361
362 The `post_finalize` argument can be set to `True` for file generation
363 that must be run after the transaction has been finalized.
365 """
364 """
366 # For now, we are unable to do proper backup and restore of custom vfs
365 # For now, we are unable to do proper backup and restore of custom vfs
367 # but for bookmarks that are handled outside this mechanism.
366 # but for bookmarks that are handled outside this mechanism.
368 self._filegenerators[genid] = (order, filenames, genfunc, location)
367 entry = (order, filenames, genfunc, location, post_finalize)
368 self._filegenerators[genid] = entry
369
369
370 @active
370 @active
371 def removefilegenerator(self, genid):
371 def removefilegenerator(self, genid):
372 """reverse of addfilegenerator, remove a file generator function"""
372 """reverse of addfilegenerator, remove a file generator function"""
373 if genid in self._filegenerators:
373 if genid in self._filegenerators:
374 del self._filegenerators[genid]
374 del self._filegenerators[genid]
375
375
376 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
376 def _generatefiles(self, suffix=b'', group=GEN_GROUP_ALL):
377 # write files registered for generation
377 # write files registered for generation
378 any = False
378 any = False
379
379
380 if group == GEN_GROUP_ALL:
380 if group == GEN_GROUP_ALL:
381 skip_post = skip_pre = False
381 skip_post = skip_pre = False
382 else:
382 else:
383 skip_pre = group == GEN_GROUP_POST_FINALIZE
383 skip_pre = group == GEN_GROUP_POST_FINALIZE
384 skip_post = group == GEN_GROUP_PRE_FINALIZE
384 skip_post = group == GEN_GROUP_PRE_FINALIZE
385
385
386 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
386 for id, entry in sorted(pycompat.iteritems(self._filegenerators)):
387 any = True
387 any = True
388 order, filenames, genfunc, location = entry
388 order, filenames, genfunc, location, post_finalize = entry
389
389
390 # for generation at closing, check if it's before or after finalize
390 # for generation at closing, check if it's before or after finalize
391 is_post = id in postfinalizegenerators
391 if skip_post and post_finalize:
392 if skip_post and is_post:
393 continue
392 continue
394 elif skip_pre and not is_post:
393 elif skip_pre and not post_finalize:
395 continue
394 continue
396
395
397 vfs = self._vfsmap[location]
396 vfs = self._vfsmap[location]
398 files = []
397 files = []
399 try:
398 try:
400 for name in filenames:
399 for name in filenames:
401 name += suffix
400 name += suffix
402 if suffix:
401 if suffix:
403 self.registertmp(name, location=location)
402 self.registertmp(name, location=location)
404 checkambig = False
403 checkambig = False
405 else:
404 else:
406 self.addbackup(name, location=location)
405 self.addbackup(name, location=location)
407 checkambig = (name, location) in self._checkambigfiles
406 checkambig = (name, location) in self._checkambigfiles
408 files.append(
407 files.append(
409 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
408 vfs(name, b'w', atomictemp=True, checkambig=checkambig)
410 )
409 )
411 genfunc(*files)
410 genfunc(*files)
412 for f in files:
411 for f in files:
413 f.close()
412 f.close()
414 # skip discard() loop since we're sure no open file remains
413 # skip discard() loop since we're sure no open file remains
415 del files[:]
414 del files[:]
416 finally:
415 finally:
417 for f in files:
416 for f in files:
418 f.discard()
417 f.discard()
419 return any
418 return any
420
419
421 @active
420 @active
422 def findoffset(self, file):
421 def findoffset(self, file):
423 if file in self._newfiles:
422 if file in self._newfiles:
424 return 0
423 return 0
425 return self._offsetmap.get(file)
424 return self._offsetmap.get(file)
426
425
427 @active
426 @active
428 def readjournal(self):
427 def readjournal(self):
429 self._file.seek(0)
428 self._file.seek(0)
430 entries = []
429 entries = []
431 for l in self._file.readlines():
430 for l in self._file.readlines():
432 file, troffset = l.split(b'\0')
431 file, troffset = l.split(b'\0')
433 entries.append((file, int(troffset)))
432 entries.append((file, int(troffset)))
434 return entries
433 return entries
435
434
436 @active
435 @active
437 def replace(self, file, offset):
436 def replace(self, file, offset):
438 """
437 """
439 replace can only replace already committed entries
438 replace can only replace already committed entries
440 that are not pending in the queue
439 that are not pending in the queue
441 """
440 """
442 if file in self._newfiles:
441 if file in self._newfiles:
443 if not offset:
442 if not offset:
444 return
443 return
445 self._newfiles.remove(file)
444 self._newfiles.remove(file)
446 self._offsetmap[file] = offset
445 self._offsetmap[file] = offset
447 elif file in self._offsetmap:
446 elif file in self._offsetmap:
448 if not offset:
447 if not offset:
449 del self._offsetmap[file]
448 del self._offsetmap[file]
450 self._newfiles.add(file)
449 self._newfiles.add(file)
451 else:
450 else:
452 self._offsetmap[file] = offset
451 self._offsetmap[file] = offset
453 else:
452 else:
454 raise KeyError(file)
453 raise KeyError(file)
455 self._file.write(b"%s\0%d\n" % (file, offset))
454 self._file.write(b"%s\0%d\n" % (file, offset))
456 self._file.flush()
455 self._file.flush()
457
456
458 @active
457 @active
459 def nest(self, name='<unnamed>'):
458 def nest(self, name='<unnamed>'):
460 self._count += 1
459 self._count += 1
461 self._usages += 1
460 self._usages += 1
462 self._names.append(name)
461 self._names.append(name)
463 return self
462 return self
464
463
465 def release(self):
464 def release(self):
466 if self._count > 0:
465 if self._count > 0:
467 self._usages -= 1
466 self._usages -= 1
468 if self._names:
467 if self._names:
469 self._names.pop()
468 self._names.pop()
470 # if the transaction scopes are left without being closed, fail
469 # if the transaction scopes are left without being closed, fail
471 if self._count > 0 and self._usages == 0:
470 if self._count > 0 and self._usages == 0:
472 self._abort()
471 self._abort()
473
472
474 def running(self):
473 def running(self):
475 return self._count > 0
474 return self._count > 0
476
475
477 def addpending(self, category, callback):
476 def addpending(self, category, callback):
478 """add a callback to be called when the transaction is pending
477 """add a callback to be called when the transaction is pending
479
478
480 The transaction will be given as callback's first argument.
479 The transaction will be given as callback's first argument.
481
480
482 Category is a unique identifier to allow overwriting an old callback
481 Category is a unique identifier to allow overwriting an old callback
483 with a newer callback.
482 with a newer callback.
484 """
483 """
485 self._pendingcallback[category] = callback
484 self._pendingcallback[category] = callback
486
485
487 @active
486 @active
488 def writepending(self):
487 def writepending(self):
489 """write pending file to temporary version
488 """write pending file to temporary version
490
489
491 This is used to allow hooks to view a transaction before commit"""
490 This is used to allow hooks to view a transaction before commit"""
492 categories = sorted(self._pendingcallback)
491 categories = sorted(self._pendingcallback)
493 for cat in categories:
492 for cat in categories:
494 # remove callback since the data will have been flushed
493 # remove callback since the data will have been flushed
495 any = self._pendingcallback.pop(cat)(self)
494 any = self._pendingcallback.pop(cat)(self)
496 self._anypending = self._anypending or any
495 self._anypending = self._anypending or any
497 self._anypending |= self._generatefiles(suffix=b'.pending')
496 self._anypending |= self._generatefiles(suffix=b'.pending')
498 return self._anypending
497 return self._anypending
499
498
500 @active
499 @active
501 def hasfinalize(self, category):
500 def hasfinalize(self, category):
502 """check is a callback already exist for a category"""
501 """check is a callback already exist for a category"""
503 return category in self._finalizecallback
502 return category in self._finalizecallback
504
503
505 @active
504 @active
506 def addfinalize(self, category, callback):
505 def addfinalize(self, category, callback):
507 """add a callback to be called when the transaction is closed
506 """add a callback to be called when the transaction is closed
508
507
509 The transaction will be given as callback's first argument.
508 The transaction will be given as callback's first argument.
510
509
511 Category is a unique identifier to allow overwriting old callbacks with
510 Category is a unique identifier to allow overwriting old callbacks with
512 newer callbacks.
511 newer callbacks.
513 """
512 """
514 self._finalizecallback[category] = callback
513 self._finalizecallback[category] = callback
515
514
516 @active
515 @active
517 def addpostclose(self, category, callback):
516 def addpostclose(self, category, callback):
518 """add or replace a callback to be called after the transaction closed
517 """add or replace a callback to be called after the transaction closed
519
518
520 The transaction will be given as callback's first argument.
519 The transaction will be given as callback's first argument.
521
520
522 Category is a unique identifier to allow overwriting an old callback
521 Category is a unique identifier to allow overwriting an old callback
523 with a newer callback.
522 with a newer callback.
524 """
523 """
525 self._postclosecallback[category] = callback
524 self._postclosecallback[category] = callback
526
525
527 @active
526 @active
528 def getpostclose(self, category):
527 def getpostclose(self, category):
529 """return a postclose callback added before, or None"""
528 """return a postclose callback added before, or None"""
530 return self._postclosecallback.get(category, None)
529 return self._postclosecallback.get(category, None)
531
530
532 @active
531 @active
533 def addabort(self, category, callback):
532 def addabort(self, category, callback):
534 """add a callback to be called when the transaction is aborted.
533 """add a callback to be called when the transaction is aborted.
535
534
536 The transaction will be given as the first argument to the callback.
535 The transaction will be given as the first argument to the callback.
537
536
538 Category is a unique identifier to allow overwriting an old callback
537 Category is a unique identifier to allow overwriting an old callback
539 with a newer callback.
538 with a newer callback.
540 """
539 """
541 self._abortcallback[category] = callback
540 self._abortcallback[category] = callback
542
541
543 @active
542 @active
544 def addvalidator(self, category, callback):
543 def addvalidator(self, category, callback):
545 """adds a callback to be called when validating the transaction.
544 """adds a callback to be called when validating the transaction.
546
545
547 The transaction will be given as the first argument to the callback.
546 The transaction will be given as the first argument to the callback.
548
547
549 callback should raise exception if to abort transaction"""
548 callback should raise exception if to abort transaction"""
550 self._validatecallback[category] = callback
549 self._validatecallback[category] = callback
551
550
552 @active
551 @active
553 def close(self):
552 def close(self):
554 '''commit the transaction'''
553 '''commit the transaction'''
555 if self._count == 1:
554 if self._count == 1:
556 for category in sorted(self._validatecallback):
555 for category in sorted(self._validatecallback):
557 self._validatecallback[category](self)
556 self._validatecallback[category](self)
558 self._validatecallback = None # Help prevent cycles.
557 self._validatecallback = None # Help prevent cycles.
559 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
558 self._generatefiles(group=GEN_GROUP_PRE_FINALIZE)
560 while self._finalizecallback:
559 while self._finalizecallback:
561 callbacks = self._finalizecallback
560 callbacks = self._finalizecallback
562 self._finalizecallback = {}
561 self._finalizecallback = {}
563 categories = sorted(callbacks)
562 categories = sorted(callbacks)
564 for cat in categories:
563 for cat in categories:
565 callbacks[cat](self)
564 callbacks[cat](self)
566 # Prevent double usage and help clear cycles.
565 # Prevent double usage and help clear cycles.
567 self._finalizecallback = None
566 self._finalizecallback = None
568 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
567 self._generatefiles(group=GEN_GROUP_POST_FINALIZE)
569
568
570 self._count -= 1
569 self._count -= 1
571 if self._count != 0:
570 if self._count != 0:
572 return
571 return
573 self._file.close()
572 self._file.close()
574 self._backupsfile.close()
573 self._backupsfile.close()
575 # cleanup temporary files
574 # cleanup temporary files
576 for l, f, b, c in self._backupentries:
575 for l, f, b, c in self._backupentries:
577 if l not in self._vfsmap and c:
576 if l not in self._vfsmap and c:
578 self._report(
577 self._report(
579 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
578 b"couldn't remove %s: unknown cache location %s\n" % (b, l)
580 )
579 )
581 continue
580 continue
582 vfs = self._vfsmap[l]
581 vfs = self._vfsmap[l]
583 if not f and b and vfs.exists(b):
582 if not f and b and vfs.exists(b):
584 try:
583 try:
585 vfs.unlink(b)
584 vfs.unlink(b)
586 except (IOError, OSError, error.Abort) as inst:
585 except (IOError, OSError, error.Abort) as inst:
587 if not c:
586 if not c:
588 raise
587 raise
589 # Abort may be raise by read only opener
588 # Abort may be raise by read only opener
590 self._report(
589 self._report(
591 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
590 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
592 )
591 )
593 self._offsetmap = {}
592 self._offsetmap = {}
594 self._newfiles = set()
593 self._newfiles = set()
595 self._writeundo()
594 self._writeundo()
596 if self._after:
595 if self._after:
597 self._after()
596 self._after()
598 self._after = None # Help prevent cycles.
597 self._after = None # Help prevent cycles.
599 if self._opener.isfile(self._backupjournal):
598 if self._opener.isfile(self._backupjournal):
600 self._opener.unlink(self._backupjournal)
599 self._opener.unlink(self._backupjournal)
601 if self._opener.isfile(self._journal):
600 if self._opener.isfile(self._journal):
602 self._opener.unlink(self._journal)
601 self._opener.unlink(self._journal)
603 for l, _f, b, c in self._backupentries:
602 for l, _f, b, c in self._backupentries:
604 if l not in self._vfsmap and c:
603 if l not in self._vfsmap and c:
605 self._report(
604 self._report(
606 b"couldn't remove %s: unknown cache location"
605 b"couldn't remove %s: unknown cache location"
607 b"%s\n" % (b, l)
606 b"%s\n" % (b, l)
608 )
607 )
609 continue
608 continue
610 vfs = self._vfsmap[l]
609 vfs = self._vfsmap[l]
611 if b and vfs.exists(b):
610 if b and vfs.exists(b):
612 try:
611 try:
613 vfs.unlink(b)
612 vfs.unlink(b)
614 except (IOError, OSError, error.Abort) as inst:
613 except (IOError, OSError, error.Abort) as inst:
615 if not c:
614 if not c:
616 raise
615 raise
617 # Abort may be raise by read only opener
616 # Abort may be raise by read only opener
618 self._report(
617 self._report(
619 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
618 b"couldn't remove %s: %s\n" % (vfs.join(b), inst)
620 )
619 )
621 self._backupentries = []
620 self._backupentries = []
622 self._journal = None
621 self._journal = None
623
622
624 self._releasefn(self, True) # notify success of closing transaction
623 self._releasefn(self, True) # notify success of closing transaction
625 self._releasefn = None # Help prevent cycles.
624 self._releasefn = None # Help prevent cycles.
626
625
627 # run post close action
626 # run post close action
628 categories = sorted(self._postclosecallback)
627 categories = sorted(self._postclosecallback)
629 for cat in categories:
628 for cat in categories:
630 self._postclosecallback[cat](self)
629 self._postclosecallback[cat](self)
631 # Prevent double usage and help clear cycles.
630 # Prevent double usage and help clear cycles.
632 self._postclosecallback = None
631 self._postclosecallback = None
633
632
634 @active
633 @active
635 def abort(self):
634 def abort(self):
636 """abort the transaction (generally called on error, or when the
635 """abort the transaction (generally called on error, or when the
637 transaction is not explicitly committed before going out of
636 transaction is not explicitly committed before going out of
638 scope)"""
637 scope)"""
639 self._abort()
638 self._abort()
640
639
641 def _writeundo(self):
640 def _writeundo(self):
642 """write transaction data for possible future undo call"""
641 """write transaction data for possible future undo call"""
643 if self._undoname is None:
642 if self._undoname is None:
644 return
643 return
645
644
646 undo_backup_path = b"%s.backupfiles" % self._undoname
645 undo_backup_path = b"%s.backupfiles" % self._undoname
647 undobackupfile = self._opener.open(undo_backup_path, b'w')
646 undobackupfile = self._opener.open(undo_backup_path, b'w')
648 undobackupfile.write(b'%d\n' % version)
647 undobackupfile.write(b'%d\n' % version)
649 for l, f, b, c in self._backupentries:
648 for l, f, b, c in self._backupentries:
650 if not f: # temporary file
649 if not f: # temporary file
651 continue
650 continue
652 if not b:
651 if not b:
653 u = b''
652 u = b''
654 else:
653 else:
655 if l not in self._vfsmap and c:
654 if l not in self._vfsmap and c:
656 self._report(
655 self._report(
657 b"couldn't remove %s: unknown cache location"
656 b"couldn't remove %s: unknown cache location"
658 b"%s\n" % (b, l)
657 b"%s\n" % (b, l)
659 )
658 )
660 continue
659 continue
661 vfs = self._vfsmap[l]
660 vfs = self._vfsmap[l]
662 base, name = vfs.split(b)
661 base, name = vfs.split(b)
663 assert name.startswith(self._journal), name
662 assert name.startswith(self._journal), name
664 uname = name.replace(self._journal, self._undoname, 1)
663 uname = name.replace(self._journal, self._undoname, 1)
665 u = vfs.reljoin(base, uname)
664 u = vfs.reljoin(base, uname)
666 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
665 util.copyfile(vfs.join(b), vfs.join(u), hardlink=True)
667 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
666 undobackupfile.write(b"%s\0%s\0%s\0%d\n" % (l, f, u, c))
668 undobackupfile.close()
667 undobackupfile.close()
669
668
670 def _abort(self):
669 def _abort(self):
671 entries = self.readjournal()
670 entries = self.readjournal()
672 self._count = 0
671 self._count = 0
673 self._usages = 0
672 self._usages = 0
674 self._file.close()
673 self._file.close()
675 self._backupsfile.close()
674 self._backupsfile.close()
676
675
677 try:
676 try:
678 if not entries and not self._backupentries:
677 if not entries and not self._backupentries:
679 if self._backupjournal:
678 if self._backupjournal:
680 self._opener.unlink(self._backupjournal)
679 self._opener.unlink(self._backupjournal)
681 if self._journal:
680 if self._journal:
682 self._opener.unlink(self._journal)
681 self._opener.unlink(self._journal)
683 return
682 return
684
683
685 self._report(_(b"transaction abort!\n"))
684 self._report(_(b"transaction abort!\n"))
686
685
687 try:
686 try:
688 for cat in sorted(self._abortcallback):
687 for cat in sorted(self._abortcallback):
689 self._abortcallback[cat](self)
688 self._abortcallback[cat](self)
690 # Prevent double usage and help clear cycles.
689 # Prevent double usage and help clear cycles.
691 self._abortcallback = None
690 self._abortcallback = None
692 _playback(
691 _playback(
693 self._journal,
692 self._journal,
694 self._report,
693 self._report,
695 self._opener,
694 self._opener,
696 self._vfsmap,
695 self._vfsmap,
697 entries,
696 entries,
698 self._backupentries,
697 self._backupentries,
699 False,
698 False,
700 checkambigfiles=self._checkambigfiles,
699 checkambigfiles=self._checkambigfiles,
701 )
700 )
702 self._report(_(b"rollback completed\n"))
701 self._report(_(b"rollback completed\n"))
703 except BaseException as exc:
702 except BaseException as exc:
704 self._report(_(b"rollback failed - please run hg recover\n"))
703 self._report(_(b"rollback failed - please run hg recover\n"))
705 self._report(
704 self._report(
706 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
705 _(b"(failure reason: %s)\n") % stringutil.forcebytestr(exc)
707 )
706 )
708 finally:
707 finally:
709 self._journal = None
708 self._journal = None
710 self._releasefn(self, False) # notify failure of transaction
709 self._releasefn(self, False) # notify failure of transaction
711 self._releasefn = None # Help prevent cycles.
710 self._releasefn = None # Help prevent cycles.
712
711
713
712
714 BAD_VERSION_MSG = _(
713 BAD_VERSION_MSG = _(
715 b"journal was created by a different version of Mercurial\n"
714 b"journal was created by a different version of Mercurial\n"
716 )
715 )
717
716
718
717
719 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
718 def rollback(opener, vfsmap, file, report, checkambigfiles=None):
720 """Rolls back the transaction contained in the given file
719 """Rolls back the transaction contained in the given file
721
720
722 Reads the entries in the specified file, and the corresponding
721 Reads the entries in the specified file, and the corresponding
723 '*.backupfiles' file, to recover from an incomplete transaction.
722 '*.backupfiles' file, to recover from an incomplete transaction.
724
723
725 * `file`: a file containing a list of entries, specifying where
724 * `file`: a file containing a list of entries, specifying where
726 to truncate each file. The file should contain a list of
725 to truncate each file. The file should contain a list of
727 file\0offset pairs, delimited by newlines. The corresponding
726 file\0offset pairs, delimited by newlines. The corresponding
728 '*.backupfiles' file should contain a list of file\0backupfile
727 '*.backupfiles' file should contain a list of file\0backupfile
729 pairs, delimited by \0.
728 pairs, delimited by \0.
730
729
731 `checkambigfiles` is a set of (path, vfs-location) tuples,
730 `checkambigfiles` is a set of (path, vfs-location) tuples,
732 which determine whether file stat ambiguity should be avoided at
731 which determine whether file stat ambiguity should be avoided at
733 restoring corresponded files.
732 restoring corresponded files.
734 """
733 """
735 entries = []
734 entries = []
736 backupentries = []
735 backupentries = []
737
736
738 with opener.open(file) as fp:
737 with opener.open(file) as fp:
739 lines = fp.readlines()
738 lines = fp.readlines()
740 for l in lines:
739 for l in lines:
741 try:
740 try:
742 f, o = l.split(b'\0')
741 f, o = l.split(b'\0')
743 entries.append((f, int(o)))
742 entries.append((f, int(o)))
744 except ValueError:
743 except ValueError:
745 report(
744 report(
746 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
745 _(b"couldn't read journal entry %r!\n") % pycompat.bytestr(l)
747 )
746 )
748
747
749 backupjournal = b"%s.backupfiles" % file
748 backupjournal = b"%s.backupfiles" % file
750 if opener.exists(backupjournal):
749 if opener.exists(backupjournal):
751 fp = opener.open(backupjournal)
750 fp = opener.open(backupjournal)
752 lines = fp.readlines()
751 lines = fp.readlines()
753 if lines:
752 if lines:
754 ver = lines[0][:-1]
753 ver = lines[0][:-1]
755 if ver != (b'%d' % version):
754 if ver != (b'%d' % version):
756 report(BAD_VERSION_MSG)
755 report(BAD_VERSION_MSG)
757 else:
756 else:
758 for line in lines[1:]:
757 for line in lines[1:]:
759 if line:
758 if line:
760 # Shave off the trailing newline
759 # Shave off the trailing newline
761 line = line[:-1]
760 line = line[:-1]
762 l, f, b, c = line.split(b'\0')
761 l, f, b, c = line.split(b'\0')
763 backupentries.append((l, f, b, bool(c)))
762 backupentries.append((l, f, b, bool(c)))
764
763
765 _playback(
764 _playback(
766 file,
765 file,
767 report,
766 report,
768 opener,
767 opener,
769 vfsmap,
768 vfsmap,
770 entries,
769 entries,
771 backupentries,
770 backupentries,
772 checkambigfiles=checkambigfiles,
771 checkambigfiles=checkambigfiles,
773 )
772 )
@@ -1,32 +1,32 b''
1 # tiny extension to abort a transaction very late during test
1 # tiny extension to abort a transaction very late during test
2 #
2 #
3 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2020 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from mercurial import (
10 from mercurial import (
11 error,
11 error,
12 transaction,
13 )
12 )
14
13
15
14
16 def abort(fp):
15 def abort(fp):
17 raise error.Abort(b"This is a late abort")
16 raise error.Abort(b"This is a late abort")
18
17
19
18
20 def reposetup(ui, repo):
19 def reposetup(ui, repo):
21
22 transaction.postfinalizegenerators.add(b'late-abort')
23
24 class LateAbortRepo(repo.__class__):
20 class LateAbortRepo(repo.__class__):
25 def transaction(self, *args, **kwargs):
21 def transaction(self, *args, **kwargs):
26 tr = super(LateAbortRepo, self).transaction(*args, **kwargs)
22 tr = super(LateAbortRepo, self).transaction(*args, **kwargs)
27 tr.addfilegenerator(
23 tr.addfilegenerator(
28 b'late-abort', [b'late-abort'], abort, order=9999999
24 b'late-abort',
25 [b'late-abort'],
26 abort,
27 order=9999999,
28 post_finalize=True,
29 )
29 )
30 return tr
30 return tr
31
31
32 repo.__class__ = LateAbortRepo
32 repo.__class__ = LateAbortRepo
General Comments 0
You need to be logged in to leave comments. Login now