##// END OF EJS Templates
utils: move finddirs() to pathutil...
Martin von Zweigbergk -
r44032:0b773371 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,2986 +1,2986 b''
1 # context.py - changeset and file context objects for mercurial
1 # context.py - changeset and file context objects for mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import filecmp
11 import filecmp
12 import os
12 import os
13 import stat
13 import stat
14
14
15 from .i18n import _
15 from .i18n import _
16 from .node import (
16 from .node import (
17 addednodeid,
17 addednodeid,
18 hex,
18 hex,
19 modifiednodeid,
19 modifiednodeid,
20 nullid,
20 nullid,
21 nullrev,
21 nullrev,
22 short,
22 short,
23 wdirfilenodeids,
23 wdirfilenodeids,
24 wdirhex,
24 wdirhex,
25 )
25 )
26 from .pycompat import (
26 from .pycompat import (
27 getattr,
27 getattr,
28 open,
28 open,
29 )
29 )
30 from . import (
30 from . import (
31 copies,
31 copies,
32 dagop,
32 dagop,
33 encoding,
33 encoding,
34 error,
34 error,
35 fileset,
35 fileset,
36 match as matchmod,
36 match as matchmod,
37 obsolete as obsmod,
37 obsolete as obsmod,
38 patch,
38 patch,
39 pathutil,
39 pathutil,
40 phases,
40 phases,
41 pycompat,
41 pycompat,
42 repoview,
42 repoview,
43 scmutil,
43 scmutil,
44 sparse,
44 sparse,
45 subrepo,
45 subrepo,
46 subrepoutil,
46 subrepoutil,
47 util,
47 util,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 propertycache = util.propertycache
54 propertycache = util.propertycache
55
55
56
56
57 class basectx(object):
57 class basectx(object):
58 """A basectx object represents the common logic for its children:
58 """A basectx object represents the common logic for its children:
59 changectx: read-only context that is already present in the repo,
59 changectx: read-only context that is already present in the repo,
60 workingctx: a context that represents the working directory and can
60 workingctx: a context that represents the working directory and can
61 be committed,
61 be committed,
62 memctx: a context that represents changes in-memory and can also
62 memctx: a context that represents changes in-memory and can also
63 be committed."""
63 be committed."""
64
64
65 def __init__(self, repo):
65 def __init__(self, repo):
66 self._repo = repo
66 self._repo = repo
67
67
68 def __bytes__(self):
68 def __bytes__(self):
69 return short(self.node())
69 return short(self.node())
70
70
71 __str__ = encoding.strmethod(__bytes__)
71 __str__ = encoding.strmethod(__bytes__)
72
72
73 def __repr__(self):
73 def __repr__(self):
74 return "<%s %s>" % (type(self).__name__, str(self))
74 return "<%s %s>" % (type(self).__name__, str(self))
75
75
76 def __eq__(self, other):
76 def __eq__(self, other):
77 try:
77 try:
78 return type(self) == type(other) and self._rev == other._rev
78 return type(self) == type(other) and self._rev == other._rev
79 except AttributeError:
79 except AttributeError:
80 return False
80 return False
81
81
82 def __ne__(self, other):
82 def __ne__(self, other):
83 return not (self == other)
83 return not (self == other)
84
84
85 def __contains__(self, key):
85 def __contains__(self, key):
86 return key in self._manifest
86 return key in self._manifest
87
87
88 def __getitem__(self, key):
88 def __getitem__(self, key):
89 return self.filectx(key)
89 return self.filectx(key)
90
90
91 def __iter__(self):
91 def __iter__(self):
92 return iter(self._manifest)
92 return iter(self._manifest)
93
93
94 def _buildstatusmanifest(self, status):
94 def _buildstatusmanifest(self, status):
95 """Builds a manifest that includes the given status results, if this is
95 """Builds a manifest that includes the given status results, if this is
96 a working copy context. For non-working copy contexts, it just returns
96 a working copy context. For non-working copy contexts, it just returns
97 the normal manifest."""
97 the normal manifest."""
98 return self.manifest()
98 return self.manifest()
99
99
100 def _matchstatus(self, other, match):
100 def _matchstatus(self, other, match):
101 """This internal method provides a way for child objects to override the
101 """This internal method provides a way for child objects to override the
102 match operator.
102 match operator.
103 """
103 """
104 return match
104 return match
105
105
106 def _buildstatus(
106 def _buildstatus(
107 self, other, s, match, listignored, listclean, listunknown
107 self, other, s, match, listignored, listclean, listunknown
108 ):
108 ):
109 """build a status with respect to another context"""
109 """build a status with respect to another context"""
110 # Load earliest manifest first for caching reasons. More specifically,
110 # Load earliest manifest first for caching reasons. More specifically,
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
111 # if you have revisions 1000 and 1001, 1001 is probably stored as a
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
112 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
113 # 1000 and cache it so that when you read 1001, we just need to apply a
113 # 1000 and cache it so that when you read 1001, we just need to apply a
114 # delta to what's in the cache. So that's one full reconstruction + one
114 # delta to what's in the cache. So that's one full reconstruction + one
115 # delta application.
115 # delta application.
116 mf2 = None
116 mf2 = None
117 if self.rev() is not None and self.rev() < other.rev():
117 if self.rev() is not None and self.rev() < other.rev():
118 mf2 = self._buildstatusmanifest(s)
118 mf2 = self._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
119 mf1 = other._buildstatusmanifest(s)
120 if mf2 is None:
120 if mf2 is None:
121 mf2 = self._buildstatusmanifest(s)
121 mf2 = self._buildstatusmanifest(s)
122
122
123 modified, added = [], []
123 modified, added = [], []
124 removed = []
124 removed = []
125 clean = []
125 clean = []
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
126 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
127 deletedset = set(deleted)
127 deletedset = set(deleted)
128 d = mf1.diff(mf2, match=match, clean=listclean)
128 d = mf1.diff(mf2, match=match, clean=listclean)
129 for fn, value in pycompat.iteritems(d):
129 for fn, value in pycompat.iteritems(d):
130 if fn in deletedset:
130 if fn in deletedset:
131 continue
131 continue
132 if value is None:
132 if value is None:
133 clean.append(fn)
133 clean.append(fn)
134 continue
134 continue
135 (node1, flag1), (node2, flag2) = value
135 (node1, flag1), (node2, flag2) = value
136 if node1 is None:
136 if node1 is None:
137 added.append(fn)
137 added.append(fn)
138 elif node2 is None:
138 elif node2 is None:
139 removed.append(fn)
139 removed.append(fn)
140 elif flag1 != flag2:
140 elif flag1 != flag2:
141 modified.append(fn)
141 modified.append(fn)
142 elif node2 not in wdirfilenodeids:
142 elif node2 not in wdirfilenodeids:
143 # When comparing files between two commits, we save time by
143 # When comparing files between two commits, we save time by
144 # not comparing the file contents when the nodeids differ.
144 # not comparing the file contents when the nodeids differ.
145 # Note that this means we incorrectly report a reverted change
145 # Note that this means we incorrectly report a reverted change
146 # to a file as a modification.
146 # to a file as a modification.
147 modified.append(fn)
147 modified.append(fn)
148 elif self[fn].cmp(other[fn]):
148 elif self[fn].cmp(other[fn]):
149 modified.append(fn)
149 modified.append(fn)
150 else:
150 else:
151 clean.append(fn)
151 clean.append(fn)
152
152
153 if removed:
153 if removed:
154 # need to filter files if they are already reported as removed
154 # need to filter files if they are already reported as removed
155 unknown = [
155 unknown = [
156 fn
156 fn
157 for fn in unknown
157 for fn in unknown
158 if fn not in mf1 and (not match or match(fn))
158 if fn not in mf1 and (not match or match(fn))
159 ]
159 ]
160 ignored = [
160 ignored = [
161 fn
161 fn
162 for fn in ignored
162 for fn in ignored
163 if fn not in mf1 and (not match or match(fn))
163 if fn not in mf1 and (not match or match(fn))
164 ]
164 ]
165 # if they're deleted, don't report them as removed
165 # if they're deleted, don't report them as removed
166 removed = [fn for fn in removed if fn not in deletedset]
166 removed = [fn for fn in removed if fn not in deletedset]
167
167
168 return scmutil.status(
168 return scmutil.status(
169 modified, added, removed, deleted, unknown, ignored, clean
169 modified, added, removed, deleted, unknown, ignored, clean
170 )
170 )
171
171
172 @propertycache
172 @propertycache
173 def substate(self):
173 def substate(self):
174 return subrepoutil.state(self, self._repo.ui)
174 return subrepoutil.state(self, self._repo.ui)
175
175
176 def subrev(self, subpath):
176 def subrev(self, subpath):
177 return self.substate[subpath][1]
177 return self.substate[subpath][1]
178
178
179 def rev(self):
179 def rev(self):
180 return self._rev
180 return self._rev
181
181
182 def node(self):
182 def node(self):
183 return self._node
183 return self._node
184
184
185 def hex(self):
185 def hex(self):
186 return hex(self.node())
186 return hex(self.node())
187
187
188 def manifest(self):
188 def manifest(self):
189 return self._manifest
189 return self._manifest
190
190
191 def manifestctx(self):
191 def manifestctx(self):
192 return self._manifestctx
192 return self._manifestctx
193
193
194 def repo(self):
194 def repo(self):
195 return self._repo
195 return self._repo
196
196
197 def phasestr(self):
197 def phasestr(self):
198 return phases.phasenames[self.phase()]
198 return phases.phasenames[self.phase()]
199
199
200 def mutable(self):
200 def mutable(self):
201 return self.phase() > phases.public
201 return self.phase() > phases.public
202
202
203 def matchfileset(self, expr, badfn=None):
203 def matchfileset(self, expr, badfn=None):
204 return fileset.match(self, expr, badfn=badfn)
204 return fileset.match(self, expr, badfn=badfn)
205
205
206 def obsolete(self):
206 def obsolete(self):
207 """True if the changeset is obsolete"""
207 """True if the changeset is obsolete"""
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
208 return self.rev() in obsmod.getrevs(self._repo, b'obsolete')
209
209
210 def extinct(self):
210 def extinct(self):
211 """True if the changeset is extinct"""
211 """True if the changeset is extinct"""
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
212 return self.rev() in obsmod.getrevs(self._repo, b'extinct')
213
213
214 def orphan(self):
214 def orphan(self):
215 """True if the changeset is not obsolete, but its ancestor is"""
215 """True if the changeset is not obsolete, but its ancestor is"""
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
216 return self.rev() in obsmod.getrevs(self._repo, b'orphan')
217
217
218 def phasedivergent(self):
218 def phasedivergent(self):
219 """True if the changeset tries to be a successor of a public changeset
219 """True if the changeset tries to be a successor of a public changeset
220
220
221 Only non-public and non-obsolete changesets may be phase-divergent.
221 Only non-public and non-obsolete changesets may be phase-divergent.
222 """
222 """
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
223 return self.rev() in obsmod.getrevs(self._repo, b'phasedivergent')
224
224
225 def contentdivergent(self):
225 def contentdivergent(self):
226 """Is a successor of a changeset with multiple possible successor sets
226 """Is a successor of a changeset with multiple possible successor sets
227
227
228 Only non-public and non-obsolete changesets may be content-divergent.
228 Only non-public and non-obsolete changesets may be content-divergent.
229 """
229 """
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
230 return self.rev() in obsmod.getrevs(self._repo, b'contentdivergent')
231
231
232 def isunstable(self):
232 def isunstable(self):
233 """True if the changeset is either orphan, phase-divergent or
233 """True if the changeset is either orphan, phase-divergent or
234 content-divergent"""
234 content-divergent"""
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
235 return self.orphan() or self.phasedivergent() or self.contentdivergent()
236
236
237 def instabilities(self):
237 def instabilities(self):
238 """return the list of instabilities affecting this changeset.
238 """return the list of instabilities affecting this changeset.
239
239
240 Instabilities are returned as strings. possible values are:
240 Instabilities are returned as strings. possible values are:
241 - orphan,
241 - orphan,
242 - phase-divergent,
242 - phase-divergent,
243 - content-divergent.
243 - content-divergent.
244 """
244 """
245 instabilities = []
245 instabilities = []
246 if self.orphan():
246 if self.orphan():
247 instabilities.append(b'orphan')
247 instabilities.append(b'orphan')
248 if self.phasedivergent():
248 if self.phasedivergent():
249 instabilities.append(b'phase-divergent')
249 instabilities.append(b'phase-divergent')
250 if self.contentdivergent():
250 if self.contentdivergent():
251 instabilities.append(b'content-divergent')
251 instabilities.append(b'content-divergent')
252 return instabilities
252 return instabilities
253
253
254 def parents(self):
254 def parents(self):
255 """return contexts for each parent changeset"""
255 """return contexts for each parent changeset"""
256 return self._parents
256 return self._parents
257
257
258 def p1(self):
258 def p1(self):
259 return self._parents[0]
259 return self._parents[0]
260
260
261 def p2(self):
261 def p2(self):
262 parents = self._parents
262 parents = self._parents
263 if len(parents) == 2:
263 if len(parents) == 2:
264 return parents[1]
264 return parents[1]
265 return self._repo[nullrev]
265 return self._repo[nullrev]
266
266
267 def _fileinfo(self, path):
267 def _fileinfo(self, path):
268 if '_manifest' in self.__dict__:
268 if '_manifest' in self.__dict__:
269 try:
269 try:
270 return self._manifest[path], self._manifest.flags(path)
270 return self._manifest[path], self._manifest.flags(path)
271 except KeyError:
271 except KeyError:
272 raise error.ManifestLookupError(
272 raise error.ManifestLookupError(
273 self._node, path, _(b'not found in manifest')
273 self._node, path, _(b'not found in manifest')
274 )
274 )
275 if '_manifestdelta' in self.__dict__ or path in self.files():
275 if '_manifestdelta' in self.__dict__ or path in self.files():
276 if path in self._manifestdelta:
276 if path in self._manifestdelta:
277 return (
277 return (
278 self._manifestdelta[path],
278 self._manifestdelta[path],
279 self._manifestdelta.flags(path),
279 self._manifestdelta.flags(path),
280 )
280 )
281 mfl = self._repo.manifestlog
281 mfl = self._repo.manifestlog
282 try:
282 try:
283 node, flag = mfl[self._changeset.manifest].find(path)
283 node, flag = mfl[self._changeset.manifest].find(path)
284 except KeyError:
284 except KeyError:
285 raise error.ManifestLookupError(
285 raise error.ManifestLookupError(
286 self._node, path, _(b'not found in manifest')
286 self._node, path, _(b'not found in manifest')
287 )
287 )
288
288
289 return node, flag
289 return node, flag
290
290
291 def filenode(self, path):
291 def filenode(self, path):
292 return self._fileinfo(path)[0]
292 return self._fileinfo(path)[0]
293
293
294 def flags(self, path):
294 def flags(self, path):
295 try:
295 try:
296 return self._fileinfo(path)[1]
296 return self._fileinfo(path)[1]
297 except error.LookupError:
297 except error.LookupError:
298 return b''
298 return b''
299
299
300 @propertycache
300 @propertycache
301 def _copies(self):
301 def _copies(self):
302 return copies.computechangesetcopies(self)
302 return copies.computechangesetcopies(self)
303
303
304 def p1copies(self):
304 def p1copies(self):
305 return self._copies[0]
305 return self._copies[0]
306
306
307 def p2copies(self):
307 def p2copies(self):
308 return self._copies[1]
308 return self._copies[1]
309
309
310 def sub(self, path, allowcreate=True):
310 def sub(self, path, allowcreate=True):
311 '''return a subrepo for the stored revision of path, never wdir()'''
311 '''return a subrepo for the stored revision of path, never wdir()'''
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
312 return subrepo.subrepo(self, path, allowcreate=allowcreate)
313
313
314 def nullsub(self, path, pctx):
314 def nullsub(self, path, pctx):
315 return subrepo.nullsubrepo(self, path, pctx)
315 return subrepo.nullsubrepo(self, path, pctx)
316
316
317 def workingsub(self, path):
317 def workingsub(self, path):
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
318 '''return a subrepo for the stored revision, or wdir if this is a wdir
319 context.
319 context.
320 '''
320 '''
321 return subrepo.subrepo(self, path, allowwdir=True)
321 return subrepo.subrepo(self, path, allowwdir=True)
322
322
323 def match(
323 def match(
324 self,
324 self,
325 pats=None,
325 pats=None,
326 include=None,
326 include=None,
327 exclude=None,
327 exclude=None,
328 default=b'glob',
328 default=b'glob',
329 listsubrepos=False,
329 listsubrepos=False,
330 badfn=None,
330 badfn=None,
331 ):
331 ):
332 r = self._repo
332 r = self._repo
333 return matchmod.match(
333 return matchmod.match(
334 r.root,
334 r.root,
335 r.getcwd(),
335 r.getcwd(),
336 pats,
336 pats,
337 include,
337 include,
338 exclude,
338 exclude,
339 default,
339 default,
340 auditor=r.nofsauditor,
340 auditor=r.nofsauditor,
341 ctx=self,
341 ctx=self,
342 listsubrepos=listsubrepos,
342 listsubrepos=listsubrepos,
343 badfn=badfn,
343 badfn=badfn,
344 )
344 )
345
345
346 def diff(
346 def diff(
347 self,
347 self,
348 ctx2=None,
348 ctx2=None,
349 match=None,
349 match=None,
350 changes=None,
350 changes=None,
351 opts=None,
351 opts=None,
352 losedatafn=None,
352 losedatafn=None,
353 pathfn=None,
353 pathfn=None,
354 copy=None,
354 copy=None,
355 copysourcematch=None,
355 copysourcematch=None,
356 hunksfilterfn=None,
356 hunksfilterfn=None,
357 ):
357 ):
358 """Returns a diff generator for the given contexts and matcher"""
358 """Returns a diff generator for the given contexts and matcher"""
359 if ctx2 is None:
359 if ctx2 is None:
360 ctx2 = self.p1()
360 ctx2 = self.p1()
361 if ctx2 is not None:
361 if ctx2 is not None:
362 ctx2 = self._repo[ctx2]
362 ctx2 = self._repo[ctx2]
363 return patch.diff(
363 return patch.diff(
364 self._repo,
364 self._repo,
365 ctx2,
365 ctx2,
366 self,
366 self,
367 match=match,
367 match=match,
368 changes=changes,
368 changes=changes,
369 opts=opts,
369 opts=opts,
370 losedatafn=losedatafn,
370 losedatafn=losedatafn,
371 pathfn=pathfn,
371 pathfn=pathfn,
372 copy=copy,
372 copy=copy,
373 copysourcematch=copysourcematch,
373 copysourcematch=copysourcematch,
374 hunksfilterfn=hunksfilterfn,
374 hunksfilterfn=hunksfilterfn,
375 )
375 )
376
376
377 def dirs(self):
377 def dirs(self):
378 return self._manifest.dirs()
378 return self._manifest.dirs()
379
379
380 def hasdir(self, dir):
380 def hasdir(self, dir):
381 return self._manifest.hasdir(dir)
381 return self._manifest.hasdir(dir)
382
382
383 def status(
383 def status(
384 self,
384 self,
385 other=None,
385 other=None,
386 match=None,
386 match=None,
387 listignored=False,
387 listignored=False,
388 listclean=False,
388 listclean=False,
389 listunknown=False,
389 listunknown=False,
390 listsubrepos=False,
390 listsubrepos=False,
391 ):
391 ):
392 """return status of files between two nodes or node and working
392 """return status of files between two nodes or node and working
393 directory.
393 directory.
394
394
395 If other is None, compare this node with working directory.
395 If other is None, compare this node with working directory.
396
396
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
397 returns (modified, added, removed, deleted, unknown, ignored, clean)
398 """
398 """
399
399
400 ctx1 = self
400 ctx1 = self
401 ctx2 = self._repo[other]
401 ctx2 = self._repo[other]
402
402
403 # This next code block is, admittedly, fragile logic that tests for
403 # This next code block is, admittedly, fragile logic that tests for
404 # reversing the contexts and wouldn't need to exist if it weren't for
404 # reversing the contexts and wouldn't need to exist if it weren't for
405 # the fast (and common) code path of comparing the working directory
405 # the fast (and common) code path of comparing the working directory
406 # with its first parent.
406 # with its first parent.
407 #
407 #
408 # What we're aiming for here is the ability to call:
408 # What we're aiming for here is the ability to call:
409 #
409 #
410 # workingctx.status(parentctx)
410 # workingctx.status(parentctx)
411 #
411 #
412 # If we always built the manifest for each context and compared those,
412 # If we always built the manifest for each context and compared those,
413 # then we'd be done. But the special case of the above call means we
413 # then we'd be done. But the special case of the above call means we
414 # just copy the manifest of the parent.
414 # just copy the manifest of the parent.
415 reversed = False
415 reversed = False
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
416 if not isinstance(ctx1, changectx) and isinstance(ctx2, changectx):
417 reversed = True
417 reversed = True
418 ctx1, ctx2 = ctx2, ctx1
418 ctx1, ctx2 = ctx2, ctx1
419
419
420 match = self._repo.narrowmatch(match)
420 match = self._repo.narrowmatch(match)
421 match = ctx2._matchstatus(ctx1, match)
421 match = ctx2._matchstatus(ctx1, match)
422 r = scmutil.status([], [], [], [], [], [], [])
422 r = scmutil.status([], [], [], [], [], [], [])
423 r = ctx2._buildstatus(
423 r = ctx2._buildstatus(
424 ctx1, r, match, listignored, listclean, listunknown
424 ctx1, r, match, listignored, listclean, listunknown
425 )
425 )
426
426
427 if reversed:
427 if reversed:
428 # Reverse added and removed. Clear deleted, unknown and ignored as
428 # Reverse added and removed. Clear deleted, unknown and ignored as
429 # these make no sense to reverse.
429 # these make no sense to reverse.
430 r = scmutil.status(
430 r = scmutil.status(
431 r.modified, r.removed, r.added, [], [], [], r.clean
431 r.modified, r.removed, r.added, [], [], [], r.clean
432 )
432 )
433
433
434 if listsubrepos:
434 if listsubrepos:
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
435 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
436 try:
436 try:
437 rev2 = ctx2.subrev(subpath)
437 rev2 = ctx2.subrev(subpath)
438 except KeyError:
438 except KeyError:
439 # A subrepo that existed in node1 was deleted between
439 # A subrepo that existed in node1 was deleted between
440 # node1 and node2 (inclusive). Thus, ctx2's substate
440 # node1 and node2 (inclusive). Thus, ctx2's substate
441 # won't contain that subpath. The best we can do ignore it.
441 # won't contain that subpath. The best we can do ignore it.
442 rev2 = None
442 rev2 = None
443 submatch = matchmod.subdirmatcher(subpath, match)
443 submatch = matchmod.subdirmatcher(subpath, match)
444 s = sub.status(
444 s = sub.status(
445 rev2,
445 rev2,
446 match=submatch,
446 match=submatch,
447 ignored=listignored,
447 ignored=listignored,
448 clean=listclean,
448 clean=listclean,
449 unknown=listunknown,
449 unknown=listunknown,
450 listsubrepos=True,
450 listsubrepos=True,
451 )
451 )
452 for rfiles, sfiles in zip(r, s):
452 for rfiles, sfiles in zip(r, s):
453 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
453 rfiles.extend(b"%s/%s" % (subpath, f) for f in sfiles)
454
454
455 for l in r:
455 for l in r:
456 l.sort()
456 l.sort()
457
457
458 return r
458 return r
459
459
460
460
461 class changectx(basectx):
461 class changectx(basectx):
462 """A changecontext object makes access to data related to a particular
462 """A changecontext object makes access to data related to a particular
463 changeset convenient. It represents a read-only context already present in
463 changeset convenient. It represents a read-only context already present in
464 the repo."""
464 the repo."""
465
465
466 def __init__(self, repo, rev, node):
466 def __init__(self, repo, rev, node):
467 super(changectx, self).__init__(repo)
467 super(changectx, self).__init__(repo)
468 self._rev = rev
468 self._rev = rev
469 self._node = node
469 self._node = node
470
470
471 def __hash__(self):
471 def __hash__(self):
472 try:
472 try:
473 return hash(self._rev)
473 return hash(self._rev)
474 except AttributeError:
474 except AttributeError:
475 return id(self)
475 return id(self)
476
476
477 def __nonzero__(self):
477 def __nonzero__(self):
478 return self._rev != nullrev
478 return self._rev != nullrev
479
479
480 __bool__ = __nonzero__
480 __bool__ = __nonzero__
481
481
482 @propertycache
482 @propertycache
483 def _changeset(self):
483 def _changeset(self):
484 return self._repo.changelog.changelogrevision(self.rev())
484 return self._repo.changelog.changelogrevision(self.rev())
485
485
486 @propertycache
486 @propertycache
487 def _manifest(self):
487 def _manifest(self):
488 return self._manifestctx.read()
488 return self._manifestctx.read()
489
489
490 @property
490 @property
491 def _manifestctx(self):
491 def _manifestctx(self):
492 return self._repo.manifestlog[self._changeset.manifest]
492 return self._repo.manifestlog[self._changeset.manifest]
493
493
494 @propertycache
494 @propertycache
495 def _manifestdelta(self):
495 def _manifestdelta(self):
496 return self._manifestctx.readdelta()
496 return self._manifestctx.readdelta()
497
497
498 @propertycache
498 @propertycache
499 def _parents(self):
499 def _parents(self):
500 repo = self._repo
500 repo = self._repo
501 p1, p2 = repo.changelog.parentrevs(self._rev)
501 p1, p2 = repo.changelog.parentrevs(self._rev)
502 if p2 == nullrev:
502 if p2 == nullrev:
503 return [repo[p1]]
503 return [repo[p1]]
504 return [repo[p1], repo[p2]]
504 return [repo[p1], repo[p2]]
505
505
506 def changeset(self):
506 def changeset(self):
507 c = self._changeset
507 c = self._changeset
508 return (
508 return (
509 c.manifest,
509 c.manifest,
510 c.user,
510 c.user,
511 c.date,
511 c.date,
512 c.files,
512 c.files,
513 c.description,
513 c.description,
514 c.extra,
514 c.extra,
515 )
515 )
516
516
517 def manifestnode(self):
517 def manifestnode(self):
518 return self._changeset.manifest
518 return self._changeset.manifest
519
519
520 def user(self):
520 def user(self):
521 return self._changeset.user
521 return self._changeset.user
522
522
523 def date(self):
523 def date(self):
524 return self._changeset.date
524 return self._changeset.date
525
525
526 def files(self):
526 def files(self):
527 return self._changeset.files
527 return self._changeset.files
528
528
529 def filesmodified(self):
529 def filesmodified(self):
530 modified = set(self.files())
530 modified = set(self.files())
531 modified.difference_update(self.filesadded())
531 modified.difference_update(self.filesadded())
532 modified.difference_update(self.filesremoved())
532 modified.difference_update(self.filesremoved())
533 return sorted(modified)
533 return sorted(modified)
534
534
535 def filesadded(self):
535 def filesadded(self):
536 filesadded = self._changeset.filesadded
536 filesadded = self._changeset.filesadded
537 compute_on_none = True
537 compute_on_none = True
538 if self._repo.filecopiesmode == b'changeset-sidedata':
538 if self._repo.filecopiesmode == b'changeset-sidedata':
539 compute_on_none = False
539 compute_on_none = False
540 else:
540 else:
541 source = self._repo.ui.config(b'experimental', b'copies.read-from')
541 source = self._repo.ui.config(b'experimental', b'copies.read-from')
542 if source == b'changeset-only':
542 if source == b'changeset-only':
543 compute_on_none = False
543 compute_on_none = False
544 elif source != b'compatibility':
544 elif source != b'compatibility':
545 # filelog mode, ignore any changelog content
545 # filelog mode, ignore any changelog content
546 filesadded = None
546 filesadded = None
547 if filesadded is None:
547 if filesadded is None:
548 if compute_on_none:
548 if compute_on_none:
549 filesadded = copies.computechangesetfilesadded(self)
549 filesadded = copies.computechangesetfilesadded(self)
550 else:
550 else:
551 filesadded = []
551 filesadded = []
552 return filesadded
552 return filesadded
553
553
554 def filesremoved(self):
554 def filesremoved(self):
555 filesremoved = self._changeset.filesremoved
555 filesremoved = self._changeset.filesremoved
556 compute_on_none = True
556 compute_on_none = True
557 if self._repo.filecopiesmode == b'changeset-sidedata':
557 if self._repo.filecopiesmode == b'changeset-sidedata':
558 compute_on_none = False
558 compute_on_none = False
559 else:
559 else:
560 source = self._repo.ui.config(b'experimental', b'copies.read-from')
560 source = self._repo.ui.config(b'experimental', b'copies.read-from')
561 if source == b'changeset-only':
561 if source == b'changeset-only':
562 compute_on_none = False
562 compute_on_none = False
563 elif source != b'compatibility':
563 elif source != b'compatibility':
564 # filelog mode, ignore any changelog content
564 # filelog mode, ignore any changelog content
565 filesremoved = None
565 filesremoved = None
566 if filesremoved is None:
566 if filesremoved is None:
567 if compute_on_none:
567 if compute_on_none:
568 filesremoved = copies.computechangesetfilesremoved(self)
568 filesremoved = copies.computechangesetfilesremoved(self)
569 else:
569 else:
570 filesremoved = []
570 filesremoved = []
571 return filesremoved
571 return filesremoved
572
572
573 @propertycache
573 @propertycache
574 def _copies(self):
574 def _copies(self):
575 p1copies = self._changeset.p1copies
575 p1copies = self._changeset.p1copies
576 p2copies = self._changeset.p2copies
576 p2copies = self._changeset.p2copies
577 compute_on_none = True
577 compute_on_none = True
578 if self._repo.filecopiesmode == b'changeset-sidedata':
578 if self._repo.filecopiesmode == b'changeset-sidedata':
579 compute_on_none = False
579 compute_on_none = False
580 else:
580 else:
581 source = self._repo.ui.config(b'experimental', b'copies.read-from')
581 source = self._repo.ui.config(b'experimental', b'copies.read-from')
582 # If config says to get copy metadata only from changeset, then
582 # If config says to get copy metadata only from changeset, then
583 # return that, defaulting to {} if there was no copy metadata. In
583 # return that, defaulting to {} if there was no copy metadata. In
584 # compatibility mode, we return copy data from the changeset if it
584 # compatibility mode, we return copy data from the changeset if it
585 # was recorded there, and otherwise we fall back to getting it from
585 # was recorded there, and otherwise we fall back to getting it from
586 # the filelogs (below).
586 # the filelogs (below).
587 #
587 #
588 # If we are in compatiblity mode and there is not data in the
588 # If we are in compatiblity mode and there is not data in the
589 # changeset), we get the copy metadata from the filelogs.
589 # changeset), we get the copy metadata from the filelogs.
590 #
590 #
591 # otherwise, when config said to read only from filelog, we get the
591 # otherwise, when config said to read only from filelog, we get the
592 # copy metadata from the filelogs.
592 # copy metadata from the filelogs.
593 if source == b'changeset-only':
593 if source == b'changeset-only':
594 compute_on_none = False
594 compute_on_none = False
595 elif source != b'compatibility':
595 elif source != b'compatibility':
596 # filelog mode, ignore any changelog content
596 # filelog mode, ignore any changelog content
597 p1copies = p2copies = None
597 p1copies = p2copies = None
598 if p1copies is None:
598 if p1copies is None:
599 if compute_on_none:
599 if compute_on_none:
600 p1copies, p2copies = super(changectx, self)._copies
600 p1copies, p2copies = super(changectx, self)._copies
601 else:
601 else:
602 if p1copies is None:
602 if p1copies is None:
603 p1copies = {}
603 p1copies = {}
604 if p2copies is None:
604 if p2copies is None:
605 p2copies = {}
605 p2copies = {}
606 return p1copies, p2copies
606 return p1copies, p2copies
607
607
608 def description(self):
608 def description(self):
609 return self._changeset.description
609 return self._changeset.description
610
610
611 def branch(self):
611 def branch(self):
612 return encoding.tolocal(self._changeset.extra.get(b"branch"))
612 return encoding.tolocal(self._changeset.extra.get(b"branch"))
613
613
614 def closesbranch(self):
614 def closesbranch(self):
615 return b'close' in self._changeset.extra
615 return b'close' in self._changeset.extra
616
616
617 def extra(self):
617 def extra(self):
618 """Return a dict of extra information."""
618 """Return a dict of extra information."""
619 return self._changeset.extra
619 return self._changeset.extra
620
620
621 def tags(self):
621 def tags(self):
622 """Return a list of byte tag names"""
622 """Return a list of byte tag names"""
623 return self._repo.nodetags(self._node)
623 return self._repo.nodetags(self._node)
624
624
625 def bookmarks(self):
625 def bookmarks(self):
626 """Return a list of byte bookmark names."""
626 """Return a list of byte bookmark names."""
627 return self._repo.nodebookmarks(self._node)
627 return self._repo.nodebookmarks(self._node)
628
628
629 def phase(self):
629 def phase(self):
630 return self._repo._phasecache.phase(self._repo, self._rev)
630 return self._repo._phasecache.phase(self._repo, self._rev)
631
631
632 def hidden(self):
632 def hidden(self):
633 return self._rev in repoview.filterrevs(self._repo, b'visible')
633 return self._rev in repoview.filterrevs(self._repo, b'visible')
634
634
635 def isinmemory(self):
635 def isinmemory(self):
636 return False
636 return False
637
637
638 def children(self):
638 def children(self):
639 """return list of changectx contexts for each child changeset.
639 """return list of changectx contexts for each child changeset.
640
640
641 This returns only the immediate child changesets. Use descendants() to
641 This returns only the immediate child changesets. Use descendants() to
642 recursively walk children.
642 recursively walk children.
643 """
643 """
644 c = self._repo.changelog.children(self._node)
644 c = self._repo.changelog.children(self._node)
645 return [self._repo[x] for x in c]
645 return [self._repo[x] for x in c]
646
646
647 def ancestors(self):
647 def ancestors(self):
648 for a in self._repo.changelog.ancestors([self._rev]):
648 for a in self._repo.changelog.ancestors([self._rev]):
649 yield self._repo[a]
649 yield self._repo[a]
650
650
651 def descendants(self):
651 def descendants(self):
652 """Recursively yield all children of the changeset.
652 """Recursively yield all children of the changeset.
653
653
654 For just the immediate children, use children()
654 For just the immediate children, use children()
655 """
655 """
656 for d in self._repo.changelog.descendants([self._rev]):
656 for d in self._repo.changelog.descendants([self._rev]):
657 yield self._repo[d]
657 yield self._repo[d]
658
658
659 def filectx(self, path, fileid=None, filelog=None):
659 def filectx(self, path, fileid=None, filelog=None):
660 """get a file context from this changeset"""
660 """get a file context from this changeset"""
661 if fileid is None:
661 if fileid is None:
662 fileid = self.filenode(path)
662 fileid = self.filenode(path)
663 return filectx(
663 return filectx(
664 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
664 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
665 )
665 )
666
666
667 def ancestor(self, c2, warn=False):
667 def ancestor(self, c2, warn=False):
668 """return the "best" ancestor context of self and c2
668 """return the "best" ancestor context of self and c2
669
669
670 If there are multiple candidates, it will show a message and check
670 If there are multiple candidates, it will show a message and check
671 merge.preferancestor configuration before falling back to the
671 merge.preferancestor configuration before falling back to the
672 revlog ancestor."""
672 revlog ancestor."""
673 # deal with workingctxs
673 # deal with workingctxs
674 n2 = c2._node
674 n2 = c2._node
675 if n2 is None:
675 if n2 is None:
676 n2 = c2._parents[0]._node
676 n2 = c2._parents[0]._node
677 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
677 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
678 if not cahs:
678 if not cahs:
679 anc = nullid
679 anc = nullid
680 elif len(cahs) == 1:
680 elif len(cahs) == 1:
681 anc = cahs[0]
681 anc = cahs[0]
682 else:
682 else:
683 # experimental config: merge.preferancestor
683 # experimental config: merge.preferancestor
684 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
684 for r in self._repo.ui.configlist(b'merge', b'preferancestor'):
685 try:
685 try:
686 ctx = scmutil.revsymbol(self._repo, r)
686 ctx = scmutil.revsymbol(self._repo, r)
687 except error.RepoLookupError:
687 except error.RepoLookupError:
688 continue
688 continue
689 anc = ctx.node()
689 anc = ctx.node()
690 if anc in cahs:
690 if anc in cahs:
691 break
691 break
692 else:
692 else:
693 anc = self._repo.changelog.ancestor(self._node, n2)
693 anc = self._repo.changelog.ancestor(self._node, n2)
694 if warn:
694 if warn:
695 self._repo.ui.status(
695 self._repo.ui.status(
696 (
696 (
697 _(b"note: using %s as ancestor of %s and %s\n")
697 _(b"note: using %s as ancestor of %s and %s\n")
698 % (short(anc), short(self._node), short(n2))
698 % (short(anc), short(self._node), short(n2))
699 )
699 )
700 + b''.join(
700 + b''.join(
701 _(
701 _(
702 b" alternatively, use --config "
702 b" alternatively, use --config "
703 b"merge.preferancestor=%s\n"
703 b"merge.preferancestor=%s\n"
704 )
704 )
705 % short(n)
705 % short(n)
706 for n in sorted(cahs)
706 for n in sorted(cahs)
707 if n != anc
707 if n != anc
708 )
708 )
709 )
709 )
710 return self._repo[anc]
710 return self._repo[anc]
711
711
712 def isancestorof(self, other):
712 def isancestorof(self, other):
713 """True if this changeset is an ancestor of other"""
713 """True if this changeset is an ancestor of other"""
714 return self._repo.changelog.isancestorrev(self._rev, other._rev)
714 return self._repo.changelog.isancestorrev(self._rev, other._rev)
715
715
716 def walk(self, match):
716 def walk(self, match):
717 '''Generates matching file names.'''
717 '''Generates matching file names.'''
718
718
719 # Wrap match.bad method to have message with nodeid
719 # Wrap match.bad method to have message with nodeid
720 def bad(fn, msg):
720 def bad(fn, msg):
721 # The manifest doesn't know about subrepos, so don't complain about
721 # The manifest doesn't know about subrepos, so don't complain about
722 # paths into valid subrepos.
722 # paths into valid subrepos.
723 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
723 if any(fn == s or fn.startswith(s + b'/') for s in self.substate):
724 return
724 return
725 match.bad(fn, _(b'no such file in rev %s') % self)
725 match.bad(fn, _(b'no such file in rev %s') % self)
726
726
727 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
727 m = matchmod.badmatch(self._repo.narrowmatch(match), bad)
728 return self._manifest.walk(m)
728 return self._manifest.walk(m)
729
729
730 def matches(self, match):
730 def matches(self, match):
731 return self.walk(match)
731 return self.walk(match)
732
732
733
733
734 class basefilectx(object):
734 class basefilectx(object):
735 """A filecontext object represents the common logic for its children:
735 """A filecontext object represents the common logic for its children:
736 filectx: read-only access to a filerevision that is already present
736 filectx: read-only access to a filerevision that is already present
737 in the repo,
737 in the repo,
738 workingfilectx: a filecontext that represents files from the working
738 workingfilectx: a filecontext that represents files from the working
739 directory,
739 directory,
740 memfilectx: a filecontext that represents files in-memory,
740 memfilectx: a filecontext that represents files in-memory,
741 """
741 """
742
742
743 @propertycache
743 @propertycache
744 def _filelog(self):
744 def _filelog(self):
745 return self._repo.file(self._path)
745 return self._repo.file(self._path)
746
746
747 @propertycache
747 @propertycache
748 def _changeid(self):
748 def _changeid(self):
749 if '_changectx' in self.__dict__:
749 if '_changectx' in self.__dict__:
750 return self._changectx.rev()
750 return self._changectx.rev()
751 elif '_descendantrev' in self.__dict__:
751 elif '_descendantrev' in self.__dict__:
752 # this file context was created from a revision with a known
752 # this file context was created from a revision with a known
753 # descendant, we can (lazily) correct for linkrev aliases
753 # descendant, we can (lazily) correct for linkrev aliases
754 return self._adjustlinkrev(self._descendantrev)
754 return self._adjustlinkrev(self._descendantrev)
755 else:
755 else:
756 return self._filelog.linkrev(self._filerev)
756 return self._filelog.linkrev(self._filerev)
757
757
758 @propertycache
758 @propertycache
759 def _filenode(self):
759 def _filenode(self):
760 if '_fileid' in self.__dict__:
760 if '_fileid' in self.__dict__:
761 return self._filelog.lookup(self._fileid)
761 return self._filelog.lookup(self._fileid)
762 else:
762 else:
763 return self._changectx.filenode(self._path)
763 return self._changectx.filenode(self._path)
764
764
765 @propertycache
765 @propertycache
766 def _filerev(self):
766 def _filerev(self):
767 return self._filelog.rev(self._filenode)
767 return self._filelog.rev(self._filenode)
768
768
769 @propertycache
769 @propertycache
770 def _repopath(self):
770 def _repopath(self):
771 return self._path
771 return self._path
772
772
773 def __nonzero__(self):
773 def __nonzero__(self):
774 try:
774 try:
775 self._filenode
775 self._filenode
776 return True
776 return True
777 except error.LookupError:
777 except error.LookupError:
778 # file is missing
778 # file is missing
779 return False
779 return False
780
780
781 __bool__ = __nonzero__
781 __bool__ = __nonzero__
782
782
783 def __bytes__(self):
783 def __bytes__(self):
784 try:
784 try:
785 return b"%s@%s" % (self.path(), self._changectx)
785 return b"%s@%s" % (self.path(), self._changectx)
786 except error.LookupError:
786 except error.LookupError:
787 return b"%s@???" % self.path()
787 return b"%s@???" % self.path()
788
788
789 __str__ = encoding.strmethod(__bytes__)
789 __str__ = encoding.strmethod(__bytes__)
790
790
791 def __repr__(self):
791 def __repr__(self):
792 return "<%s %s>" % (type(self).__name__, str(self))
792 return "<%s %s>" % (type(self).__name__, str(self))
793
793
794 def __hash__(self):
794 def __hash__(self):
795 try:
795 try:
796 return hash((self._path, self._filenode))
796 return hash((self._path, self._filenode))
797 except AttributeError:
797 except AttributeError:
798 return id(self)
798 return id(self)
799
799
800 def __eq__(self, other):
800 def __eq__(self, other):
801 try:
801 try:
802 return (
802 return (
803 type(self) == type(other)
803 type(self) == type(other)
804 and self._path == other._path
804 and self._path == other._path
805 and self._filenode == other._filenode
805 and self._filenode == other._filenode
806 )
806 )
807 except AttributeError:
807 except AttributeError:
808 return False
808 return False
809
809
810 def __ne__(self, other):
810 def __ne__(self, other):
811 return not (self == other)
811 return not (self == other)
812
812
813 def filerev(self):
813 def filerev(self):
814 return self._filerev
814 return self._filerev
815
815
816 def filenode(self):
816 def filenode(self):
817 return self._filenode
817 return self._filenode
818
818
819 @propertycache
819 @propertycache
820 def _flags(self):
820 def _flags(self):
821 return self._changectx.flags(self._path)
821 return self._changectx.flags(self._path)
822
822
823 def flags(self):
823 def flags(self):
824 return self._flags
824 return self._flags
825
825
826 def filelog(self):
826 def filelog(self):
827 return self._filelog
827 return self._filelog
828
828
829 def rev(self):
829 def rev(self):
830 return self._changeid
830 return self._changeid
831
831
832 def linkrev(self):
832 def linkrev(self):
833 return self._filelog.linkrev(self._filerev)
833 return self._filelog.linkrev(self._filerev)
834
834
835 def node(self):
835 def node(self):
836 return self._changectx.node()
836 return self._changectx.node()
837
837
838 def hex(self):
838 def hex(self):
839 return self._changectx.hex()
839 return self._changectx.hex()
840
840
841 def user(self):
841 def user(self):
842 return self._changectx.user()
842 return self._changectx.user()
843
843
844 def date(self):
844 def date(self):
845 return self._changectx.date()
845 return self._changectx.date()
846
846
847 def files(self):
847 def files(self):
848 return self._changectx.files()
848 return self._changectx.files()
849
849
850 def description(self):
850 def description(self):
851 return self._changectx.description()
851 return self._changectx.description()
852
852
853 def branch(self):
853 def branch(self):
854 return self._changectx.branch()
854 return self._changectx.branch()
855
855
856 def extra(self):
856 def extra(self):
857 return self._changectx.extra()
857 return self._changectx.extra()
858
858
859 def phase(self):
859 def phase(self):
860 return self._changectx.phase()
860 return self._changectx.phase()
861
861
862 def phasestr(self):
862 def phasestr(self):
863 return self._changectx.phasestr()
863 return self._changectx.phasestr()
864
864
865 def obsolete(self):
865 def obsolete(self):
866 return self._changectx.obsolete()
866 return self._changectx.obsolete()
867
867
868 def instabilities(self):
868 def instabilities(self):
869 return self._changectx.instabilities()
869 return self._changectx.instabilities()
870
870
871 def manifest(self):
871 def manifest(self):
872 return self._changectx.manifest()
872 return self._changectx.manifest()
873
873
874 def changectx(self):
874 def changectx(self):
875 return self._changectx
875 return self._changectx
876
876
877 def renamed(self):
877 def renamed(self):
878 return self._copied
878 return self._copied
879
879
880 def copysource(self):
880 def copysource(self):
881 return self._copied and self._copied[0]
881 return self._copied and self._copied[0]
882
882
883 def repo(self):
883 def repo(self):
884 return self._repo
884 return self._repo
885
885
886 def size(self):
886 def size(self):
887 return len(self.data())
887 return len(self.data())
888
888
889 def path(self):
889 def path(self):
890 return self._path
890 return self._path
891
891
892 def isbinary(self):
892 def isbinary(self):
893 try:
893 try:
894 return stringutil.binary(self.data())
894 return stringutil.binary(self.data())
895 except IOError:
895 except IOError:
896 return False
896 return False
897
897
898 def isexec(self):
898 def isexec(self):
899 return b'x' in self.flags()
899 return b'x' in self.flags()
900
900
901 def islink(self):
901 def islink(self):
902 return b'l' in self.flags()
902 return b'l' in self.flags()
903
903
904 def isabsent(self):
904 def isabsent(self):
905 """whether this filectx represents a file not in self._changectx
905 """whether this filectx represents a file not in self._changectx
906
906
907 This is mainly for merge code to detect change/delete conflicts. This is
907 This is mainly for merge code to detect change/delete conflicts. This is
908 expected to be True for all subclasses of basectx."""
908 expected to be True for all subclasses of basectx."""
909 return False
909 return False
910
910
911 _customcmp = False
911 _customcmp = False
912
912
913 def cmp(self, fctx):
913 def cmp(self, fctx):
914 """compare with other file context
914 """compare with other file context
915
915
916 returns True if different than fctx.
916 returns True if different than fctx.
917 """
917 """
918 if fctx._customcmp:
918 if fctx._customcmp:
919 return fctx.cmp(self)
919 return fctx.cmp(self)
920
920
921 if self._filenode is None:
921 if self._filenode is None:
922 raise error.ProgrammingError(
922 raise error.ProgrammingError(
923 b'filectx.cmp() must be reimplemented if not backed by revlog'
923 b'filectx.cmp() must be reimplemented if not backed by revlog'
924 )
924 )
925
925
926 if fctx._filenode is None:
926 if fctx._filenode is None:
927 if self._repo._encodefilterpats:
927 if self._repo._encodefilterpats:
928 # can't rely on size() because wdir content may be decoded
928 # can't rely on size() because wdir content may be decoded
929 return self._filelog.cmp(self._filenode, fctx.data())
929 return self._filelog.cmp(self._filenode, fctx.data())
930 if self.size() - 4 == fctx.size():
930 if self.size() - 4 == fctx.size():
931 # size() can match:
931 # size() can match:
932 # if file data starts with '\1\n', empty metadata block is
932 # if file data starts with '\1\n', empty metadata block is
933 # prepended, which adds 4 bytes to filelog.size().
933 # prepended, which adds 4 bytes to filelog.size().
934 return self._filelog.cmp(self._filenode, fctx.data())
934 return self._filelog.cmp(self._filenode, fctx.data())
935 if self.size() == fctx.size():
935 if self.size() == fctx.size():
936 # size() matches: need to compare content
936 # size() matches: need to compare content
937 return self._filelog.cmp(self._filenode, fctx.data())
937 return self._filelog.cmp(self._filenode, fctx.data())
938
938
939 # size() differs
939 # size() differs
940 return True
940 return True
941
941
942 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
942 def _adjustlinkrev(self, srcrev, inclusive=False, stoprev=None):
943 """return the first ancestor of <srcrev> introducing <fnode>
943 """return the first ancestor of <srcrev> introducing <fnode>
944
944
945 If the linkrev of the file revision does not point to an ancestor of
945 If the linkrev of the file revision does not point to an ancestor of
946 srcrev, we'll walk down the ancestors until we find one introducing
946 srcrev, we'll walk down the ancestors until we find one introducing
947 this file revision.
947 this file revision.
948
948
949 :srcrev: the changeset revision we search ancestors from
949 :srcrev: the changeset revision we search ancestors from
950 :inclusive: if true, the src revision will also be checked
950 :inclusive: if true, the src revision will also be checked
951 :stoprev: an optional revision to stop the walk at. If no introduction
951 :stoprev: an optional revision to stop the walk at. If no introduction
952 of this file content could be found before this floor
952 of this file content could be found before this floor
953 revision, the function will returns "None" and stops its
953 revision, the function will returns "None" and stops its
954 iteration.
954 iteration.
955 """
955 """
956 repo = self._repo
956 repo = self._repo
957 cl = repo.unfiltered().changelog
957 cl = repo.unfiltered().changelog
958 mfl = repo.manifestlog
958 mfl = repo.manifestlog
959 # fetch the linkrev
959 # fetch the linkrev
960 lkr = self.linkrev()
960 lkr = self.linkrev()
961 if srcrev == lkr:
961 if srcrev == lkr:
962 return lkr
962 return lkr
963 # hack to reuse ancestor computation when searching for renames
963 # hack to reuse ancestor computation when searching for renames
964 memberanc = getattr(self, '_ancestrycontext', None)
964 memberanc = getattr(self, '_ancestrycontext', None)
965 iteranc = None
965 iteranc = None
966 if srcrev is None:
966 if srcrev is None:
967 # wctx case, used by workingfilectx during mergecopy
967 # wctx case, used by workingfilectx during mergecopy
968 revs = [p.rev() for p in self._repo[None].parents()]
968 revs = [p.rev() for p in self._repo[None].parents()]
969 inclusive = True # we skipped the real (revless) source
969 inclusive = True # we skipped the real (revless) source
970 else:
970 else:
971 revs = [srcrev]
971 revs = [srcrev]
972 if memberanc is None:
972 if memberanc is None:
973 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
973 memberanc = iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
974 # check if this linkrev is an ancestor of srcrev
974 # check if this linkrev is an ancestor of srcrev
975 if lkr not in memberanc:
975 if lkr not in memberanc:
976 if iteranc is None:
976 if iteranc is None:
977 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
977 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
978 fnode = self._filenode
978 fnode = self._filenode
979 path = self._path
979 path = self._path
980 for a in iteranc:
980 for a in iteranc:
981 if stoprev is not None and a < stoprev:
981 if stoprev is not None and a < stoprev:
982 return None
982 return None
983 ac = cl.read(a) # get changeset data (we avoid object creation)
983 ac = cl.read(a) # get changeset data (we avoid object creation)
984 if path in ac[3]: # checking the 'files' field.
984 if path in ac[3]: # checking the 'files' field.
985 # The file has been touched, check if the content is
985 # The file has been touched, check if the content is
986 # similar to the one we search for.
986 # similar to the one we search for.
987 if fnode == mfl[ac[0]].readfast().get(path):
987 if fnode == mfl[ac[0]].readfast().get(path):
988 return a
988 return a
989 # In theory, we should never get out of that loop without a result.
989 # In theory, we should never get out of that loop without a result.
990 # But if manifest uses a buggy file revision (not children of the
990 # But if manifest uses a buggy file revision (not children of the
991 # one it replaces) we could. Such a buggy situation will likely
991 # one it replaces) we could. Such a buggy situation will likely
992 # result is crash somewhere else at to some point.
992 # result is crash somewhere else at to some point.
993 return lkr
993 return lkr
994
994
995 def isintroducedafter(self, changelogrev):
995 def isintroducedafter(self, changelogrev):
996 """True if a filectx has been introduced after a given floor revision
996 """True if a filectx has been introduced after a given floor revision
997 """
997 """
998 if self.linkrev() >= changelogrev:
998 if self.linkrev() >= changelogrev:
999 return True
999 return True
1000 introrev = self._introrev(stoprev=changelogrev)
1000 introrev = self._introrev(stoprev=changelogrev)
1001 if introrev is None:
1001 if introrev is None:
1002 return False
1002 return False
1003 return introrev >= changelogrev
1003 return introrev >= changelogrev
1004
1004
1005 def introrev(self):
1005 def introrev(self):
1006 """return the rev of the changeset which introduced this file revision
1006 """return the rev of the changeset which introduced this file revision
1007
1007
1008 This method is different from linkrev because it take into account the
1008 This method is different from linkrev because it take into account the
1009 changeset the filectx was created from. It ensures the returned
1009 changeset the filectx was created from. It ensures the returned
1010 revision is one of its ancestors. This prevents bugs from
1010 revision is one of its ancestors. This prevents bugs from
1011 'linkrev-shadowing' when a file revision is used by multiple
1011 'linkrev-shadowing' when a file revision is used by multiple
1012 changesets.
1012 changesets.
1013 """
1013 """
1014 return self._introrev()
1014 return self._introrev()
1015
1015
1016 def _introrev(self, stoprev=None):
1016 def _introrev(self, stoprev=None):
1017 """
1017 """
1018 Same as `introrev` but, with an extra argument to limit changelog
1018 Same as `introrev` but, with an extra argument to limit changelog
1019 iteration range in some internal usecase.
1019 iteration range in some internal usecase.
1020
1020
1021 If `stoprev` is set, the `introrev` will not be searched past that
1021 If `stoprev` is set, the `introrev` will not be searched past that
1022 `stoprev` revision and "None" might be returned. This is useful to
1022 `stoprev` revision and "None" might be returned. This is useful to
1023 limit the iteration range.
1023 limit the iteration range.
1024 """
1024 """
1025 toprev = None
1025 toprev = None
1026 attrs = vars(self)
1026 attrs = vars(self)
1027 if '_changeid' in attrs:
1027 if '_changeid' in attrs:
1028 # We have a cached value already
1028 # We have a cached value already
1029 toprev = self._changeid
1029 toprev = self._changeid
1030 elif '_changectx' in attrs:
1030 elif '_changectx' in attrs:
1031 # We know which changelog entry we are coming from
1031 # We know which changelog entry we are coming from
1032 toprev = self._changectx.rev()
1032 toprev = self._changectx.rev()
1033
1033
1034 if toprev is not None:
1034 if toprev is not None:
1035 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1035 return self._adjustlinkrev(toprev, inclusive=True, stoprev=stoprev)
1036 elif '_descendantrev' in attrs:
1036 elif '_descendantrev' in attrs:
1037 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1037 introrev = self._adjustlinkrev(self._descendantrev, stoprev=stoprev)
1038 # be nice and cache the result of the computation
1038 # be nice and cache the result of the computation
1039 if introrev is not None:
1039 if introrev is not None:
1040 self._changeid = introrev
1040 self._changeid = introrev
1041 return introrev
1041 return introrev
1042 else:
1042 else:
1043 return self.linkrev()
1043 return self.linkrev()
1044
1044
1045 def introfilectx(self):
1045 def introfilectx(self):
1046 """Return filectx having identical contents, but pointing to the
1046 """Return filectx having identical contents, but pointing to the
1047 changeset revision where this filectx was introduced"""
1047 changeset revision where this filectx was introduced"""
1048 introrev = self.introrev()
1048 introrev = self.introrev()
1049 if self.rev() == introrev:
1049 if self.rev() == introrev:
1050 return self
1050 return self
1051 return self.filectx(self.filenode(), changeid=introrev)
1051 return self.filectx(self.filenode(), changeid=introrev)
1052
1052
1053 def _parentfilectx(self, path, fileid, filelog):
1053 def _parentfilectx(self, path, fileid, filelog):
1054 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1054 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
1055 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1055 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
1056 if '_changeid' in vars(self) or '_changectx' in vars(self):
1056 if '_changeid' in vars(self) or '_changectx' in vars(self):
1057 # If self is associated with a changeset (probably explicitly
1057 # If self is associated with a changeset (probably explicitly
1058 # fed), ensure the created filectx is associated with a
1058 # fed), ensure the created filectx is associated with a
1059 # changeset that is an ancestor of self.changectx.
1059 # changeset that is an ancestor of self.changectx.
1060 # This lets us later use _adjustlinkrev to get a correct link.
1060 # This lets us later use _adjustlinkrev to get a correct link.
1061 fctx._descendantrev = self.rev()
1061 fctx._descendantrev = self.rev()
1062 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1062 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1063 elif '_descendantrev' in vars(self):
1063 elif '_descendantrev' in vars(self):
1064 # Otherwise propagate _descendantrev if we have one associated.
1064 # Otherwise propagate _descendantrev if we have one associated.
1065 fctx._descendantrev = self._descendantrev
1065 fctx._descendantrev = self._descendantrev
1066 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1066 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
1067 return fctx
1067 return fctx
1068
1068
1069 def parents(self):
1069 def parents(self):
1070 _path = self._path
1070 _path = self._path
1071 fl = self._filelog
1071 fl = self._filelog
1072 parents = self._filelog.parents(self._filenode)
1072 parents = self._filelog.parents(self._filenode)
1073 pl = [(_path, node, fl) for node in parents if node != nullid]
1073 pl = [(_path, node, fl) for node in parents if node != nullid]
1074
1074
1075 r = fl.renamed(self._filenode)
1075 r = fl.renamed(self._filenode)
1076 if r:
1076 if r:
1077 # - In the simple rename case, both parent are nullid, pl is empty.
1077 # - In the simple rename case, both parent are nullid, pl is empty.
1078 # - In case of merge, only one of the parent is null id and should
1078 # - In case of merge, only one of the parent is null id and should
1079 # be replaced with the rename information. This parent is -always-
1079 # be replaced with the rename information. This parent is -always-
1080 # the first one.
1080 # the first one.
1081 #
1081 #
1082 # As null id have always been filtered out in the previous list
1082 # As null id have always been filtered out in the previous list
1083 # comprehension, inserting to 0 will always result in "replacing
1083 # comprehension, inserting to 0 will always result in "replacing
1084 # first nullid parent with rename information.
1084 # first nullid parent with rename information.
1085 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1085 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
1086
1086
1087 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1087 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
1088
1088
1089 def p1(self):
1089 def p1(self):
1090 return self.parents()[0]
1090 return self.parents()[0]
1091
1091
1092 def p2(self):
1092 def p2(self):
1093 p = self.parents()
1093 p = self.parents()
1094 if len(p) == 2:
1094 if len(p) == 2:
1095 return p[1]
1095 return p[1]
1096 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1096 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
1097
1097
1098 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1098 def annotate(self, follow=False, skiprevs=None, diffopts=None):
1099 """Returns a list of annotateline objects for each line in the file
1099 """Returns a list of annotateline objects for each line in the file
1100
1100
1101 - line.fctx is the filectx of the node where that line was last changed
1101 - line.fctx is the filectx of the node where that line was last changed
1102 - line.lineno is the line number at the first appearance in the managed
1102 - line.lineno is the line number at the first appearance in the managed
1103 file
1103 file
1104 - line.text is the data on that line (including newline character)
1104 - line.text is the data on that line (including newline character)
1105 """
1105 """
1106 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1106 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
1107
1107
1108 def parents(f):
1108 def parents(f):
1109 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1109 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
1110 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1110 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
1111 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1111 # from the topmost introrev (= srcrev) down to p.linkrev() if it
1112 # isn't an ancestor of the srcrev.
1112 # isn't an ancestor of the srcrev.
1113 f._changeid
1113 f._changeid
1114 pl = f.parents()
1114 pl = f.parents()
1115
1115
1116 # Don't return renamed parents if we aren't following.
1116 # Don't return renamed parents if we aren't following.
1117 if not follow:
1117 if not follow:
1118 pl = [p for p in pl if p.path() == f.path()]
1118 pl = [p for p in pl if p.path() == f.path()]
1119
1119
1120 # renamed filectx won't have a filelog yet, so set it
1120 # renamed filectx won't have a filelog yet, so set it
1121 # from the cache to save time
1121 # from the cache to save time
1122 for p in pl:
1122 for p in pl:
1123 if not '_filelog' in p.__dict__:
1123 if not '_filelog' in p.__dict__:
1124 p._filelog = getlog(p.path())
1124 p._filelog = getlog(p.path())
1125
1125
1126 return pl
1126 return pl
1127
1127
1128 # use linkrev to find the first changeset where self appeared
1128 # use linkrev to find the first changeset where self appeared
1129 base = self.introfilectx()
1129 base = self.introfilectx()
1130 if getattr(base, '_ancestrycontext', None) is None:
1130 if getattr(base, '_ancestrycontext', None) is None:
1131 cl = self._repo.changelog
1131 cl = self._repo.changelog
1132 if base.rev() is None:
1132 if base.rev() is None:
1133 # wctx is not inclusive, but works because _ancestrycontext
1133 # wctx is not inclusive, but works because _ancestrycontext
1134 # is used to test filelog revisions
1134 # is used to test filelog revisions
1135 ac = cl.ancestors(
1135 ac = cl.ancestors(
1136 [p.rev() for p in base.parents()], inclusive=True
1136 [p.rev() for p in base.parents()], inclusive=True
1137 )
1137 )
1138 else:
1138 else:
1139 ac = cl.ancestors([base.rev()], inclusive=True)
1139 ac = cl.ancestors([base.rev()], inclusive=True)
1140 base._ancestrycontext = ac
1140 base._ancestrycontext = ac
1141
1141
1142 return dagop.annotate(
1142 return dagop.annotate(
1143 base, parents, skiprevs=skiprevs, diffopts=diffopts
1143 base, parents, skiprevs=skiprevs, diffopts=diffopts
1144 )
1144 )
1145
1145
1146 def ancestors(self, followfirst=False):
1146 def ancestors(self, followfirst=False):
1147 visit = {}
1147 visit = {}
1148 c = self
1148 c = self
1149 if followfirst:
1149 if followfirst:
1150 cut = 1
1150 cut = 1
1151 else:
1151 else:
1152 cut = None
1152 cut = None
1153
1153
1154 while True:
1154 while True:
1155 for parent in c.parents()[:cut]:
1155 for parent in c.parents()[:cut]:
1156 visit[(parent.linkrev(), parent.filenode())] = parent
1156 visit[(parent.linkrev(), parent.filenode())] = parent
1157 if not visit:
1157 if not visit:
1158 break
1158 break
1159 c = visit.pop(max(visit))
1159 c = visit.pop(max(visit))
1160 yield c
1160 yield c
1161
1161
1162 def decodeddata(self):
1162 def decodeddata(self):
1163 """Returns `data()` after running repository decoding filters.
1163 """Returns `data()` after running repository decoding filters.
1164
1164
1165 This is often equivalent to how the data would be expressed on disk.
1165 This is often equivalent to how the data would be expressed on disk.
1166 """
1166 """
1167 return self._repo.wwritedata(self.path(), self.data())
1167 return self._repo.wwritedata(self.path(), self.data())
1168
1168
1169
1169
1170 class filectx(basefilectx):
1170 class filectx(basefilectx):
1171 """A filecontext object makes access to data related to a particular
1171 """A filecontext object makes access to data related to a particular
1172 filerevision convenient."""
1172 filerevision convenient."""
1173
1173
1174 def __init__(
1174 def __init__(
1175 self,
1175 self,
1176 repo,
1176 repo,
1177 path,
1177 path,
1178 changeid=None,
1178 changeid=None,
1179 fileid=None,
1179 fileid=None,
1180 filelog=None,
1180 filelog=None,
1181 changectx=None,
1181 changectx=None,
1182 ):
1182 ):
1183 """changeid must be a revision number, if specified.
1183 """changeid must be a revision number, if specified.
1184 fileid can be a file revision or node."""
1184 fileid can be a file revision or node."""
1185 self._repo = repo
1185 self._repo = repo
1186 self._path = path
1186 self._path = path
1187
1187
1188 assert (
1188 assert (
1189 changeid is not None or fileid is not None or changectx is not None
1189 changeid is not None or fileid is not None or changectx is not None
1190 ), (
1190 ), (
1191 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1191 b"bad args: changeid=%r, fileid=%r, changectx=%r"
1192 % (changeid, fileid, changectx,)
1192 % (changeid, fileid, changectx,)
1193 )
1193 )
1194
1194
1195 if filelog is not None:
1195 if filelog is not None:
1196 self._filelog = filelog
1196 self._filelog = filelog
1197
1197
1198 if changeid is not None:
1198 if changeid is not None:
1199 self._changeid = changeid
1199 self._changeid = changeid
1200 if changectx is not None:
1200 if changectx is not None:
1201 self._changectx = changectx
1201 self._changectx = changectx
1202 if fileid is not None:
1202 if fileid is not None:
1203 self._fileid = fileid
1203 self._fileid = fileid
1204
1204
1205 @propertycache
1205 @propertycache
1206 def _changectx(self):
1206 def _changectx(self):
1207 try:
1207 try:
1208 return self._repo[self._changeid]
1208 return self._repo[self._changeid]
1209 except error.FilteredRepoLookupError:
1209 except error.FilteredRepoLookupError:
1210 # Linkrev may point to any revision in the repository. When the
1210 # Linkrev may point to any revision in the repository. When the
1211 # repository is filtered this may lead to `filectx` trying to build
1211 # repository is filtered this may lead to `filectx` trying to build
1212 # `changectx` for filtered revision. In such case we fallback to
1212 # `changectx` for filtered revision. In such case we fallback to
1213 # creating `changectx` on the unfiltered version of the reposition.
1213 # creating `changectx` on the unfiltered version of the reposition.
1214 # This fallback should not be an issue because `changectx` from
1214 # This fallback should not be an issue because `changectx` from
1215 # `filectx` are not used in complex operations that care about
1215 # `filectx` are not used in complex operations that care about
1216 # filtering.
1216 # filtering.
1217 #
1217 #
1218 # This fallback is a cheap and dirty fix that prevent several
1218 # This fallback is a cheap and dirty fix that prevent several
1219 # crashes. It does not ensure the behavior is correct. However the
1219 # crashes. It does not ensure the behavior is correct. However the
1220 # behavior was not correct before filtering either and "incorrect
1220 # behavior was not correct before filtering either and "incorrect
1221 # behavior" is seen as better as "crash"
1221 # behavior" is seen as better as "crash"
1222 #
1222 #
1223 # Linkrevs have several serious troubles with filtering that are
1223 # Linkrevs have several serious troubles with filtering that are
1224 # complicated to solve. Proper handling of the issue here should be
1224 # complicated to solve. Proper handling of the issue here should be
1225 # considered when solving linkrev issue are on the table.
1225 # considered when solving linkrev issue are on the table.
1226 return self._repo.unfiltered()[self._changeid]
1226 return self._repo.unfiltered()[self._changeid]
1227
1227
1228 def filectx(self, fileid, changeid=None):
1228 def filectx(self, fileid, changeid=None):
1229 '''opens an arbitrary revision of the file without
1229 '''opens an arbitrary revision of the file without
1230 opening a new filelog'''
1230 opening a new filelog'''
1231 return filectx(
1231 return filectx(
1232 self._repo,
1232 self._repo,
1233 self._path,
1233 self._path,
1234 fileid=fileid,
1234 fileid=fileid,
1235 filelog=self._filelog,
1235 filelog=self._filelog,
1236 changeid=changeid,
1236 changeid=changeid,
1237 )
1237 )
1238
1238
1239 def rawdata(self):
1239 def rawdata(self):
1240 return self._filelog.rawdata(self._filenode)
1240 return self._filelog.rawdata(self._filenode)
1241
1241
1242 def rawflags(self):
1242 def rawflags(self):
1243 """low-level revlog flags"""
1243 """low-level revlog flags"""
1244 return self._filelog.flags(self._filerev)
1244 return self._filelog.flags(self._filerev)
1245
1245
1246 def data(self):
1246 def data(self):
1247 try:
1247 try:
1248 return self._filelog.read(self._filenode)
1248 return self._filelog.read(self._filenode)
1249 except error.CensoredNodeError:
1249 except error.CensoredNodeError:
1250 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1250 if self._repo.ui.config(b"censor", b"policy") == b"ignore":
1251 return b""
1251 return b""
1252 raise error.Abort(
1252 raise error.Abort(
1253 _(b"censored node: %s") % short(self._filenode),
1253 _(b"censored node: %s") % short(self._filenode),
1254 hint=_(b"set censor.policy to ignore errors"),
1254 hint=_(b"set censor.policy to ignore errors"),
1255 )
1255 )
1256
1256
1257 def size(self):
1257 def size(self):
1258 return self._filelog.size(self._filerev)
1258 return self._filelog.size(self._filerev)
1259
1259
1260 @propertycache
1260 @propertycache
1261 def _copied(self):
1261 def _copied(self):
1262 """check if file was actually renamed in this changeset revision
1262 """check if file was actually renamed in this changeset revision
1263
1263
1264 If rename logged in file revision, we report copy for changeset only
1264 If rename logged in file revision, we report copy for changeset only
1265 if file revisions linkrev points back to the changeset in question
1265 if file revisions linkrev points back to the changeset in question
1266 or both changeset parents contain different file revisions.
1266 or both changeset parents contain different file revisions.
1267 """
1267 """
1268
1268
1269 renamed = self._filelog.renamed(self._filenode)
1269 renamed = self._filelog.renamed(self._filenode)
1270 if not renamed:
1270 if not renamed:
1271 return None
1271 return None
1272
1272
1273 if self.rev() == self.linkrev():
1273 if self.rev() == self.linkrev():
1274 return renamed
1274 return renamed
1275
1275
1276 name = self.path()
1276 name = self.path()
1277 fnode = self._filenode
1277 fnode = self._filenode
1278 for p in self._changectx.parents():
1278 for p in self._changectx.parents():
1279 try:
1279 try:
1280 if fnode == p.filenode(name):
1280 if fnode == p.filenode(name):
1281 return None
1281 return None
1282 except error.LookupError:
1282 except error.LookupError:
1283 pass
1283 pass
1284 return renamed
1284 return renamed
1285
1285
1286 def children(self):
1286 def children(self):
1287 # hard for renames
1287 # hard for renames
1288 c = self._filelog.children(self._filenode)
1288 c = self._filelog.children(self._filenode)
1289 return [
1289 return [
1290 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1290 filectx(self._repo, self._path, fileid=x, filelog=self._filelog)
1291 for x in c
1291 for x in c
1292 ]
1292 ]
1293
1293
1294
1294
1295 class committablectx(basectx):
1295 class committablectx(basectx):
1296 """A committablectx object provides common functionality for a context that
1296 """A committablectx object provides common functionality for a context that
1297 wants the ability to commit, e.g. workingctx or memctx."""
1297 wants the ability to commit, e.g. workingctx or memctx."""
1298
1298
1299 def __init__(
1299 def __init__(
1300 self,
1300 self,
1301 repo,
1301 repo,
1302 text=b"",
1302 text=b"",
1303 user=None,
1303 user=None,
1304 date=None,
1304 date=None,
1305 extra=None,
1305 extra=None,
1306 changes=None,
1306 changes=None,
1307 branch=None,
1307 branch=None,
1308 ):
1308 ):
1309 super(committablectx, self).__init__(repo)
1309 super(committablectx, self).__init__(repo)
1310 self._rev = None
1310 self._rev = None
1311 self._node = None
1311 self._node = None
1312 self._text = text
1312 self._text = text
1313 if date:
1313 if date:
1314 self._date = dateutil.parsedate(date)
1314 self._date = dateutil.parsedate(date)
1315 if user:
1315 if user:
1316 self._user = user
1316 self._user = user
1317 if changes:
1317 if changes:
1318 self._status = changes
1318 self._status = changes
1319
1319
1320 self._extra = {}
1320 self._extra = {}
1321 if extra:
1321 if extra:
1322 self._extra = extra.copy()
1322 self._extra = extra.copy()
1323 if branch is not None:
1323 if branch is not None:
1324 self._extra[b'branch'] = encoding.fromlocal(branch)
1324 self._extra[b'branch'] = encoding.fromlocal(branch)
1325 if not self._extra.get(b'branch'):
1325 if not self._extra.get(b'branch'):
1326 self._extra[b'branch'] = b'default'
1326 self._extra[b'branch'] = b'default'
1327
1327
1328 def __bytes__(self):
1328 def __bytes__(self):
1329 return bytes(self._parents[0]) + b"+"
1329 return bytes(self._parents[0]) + b"+"
1330
1330
1331 __str__ = encoding.strmethod(__bytes__)
1331 __str__ = encoding.strmethod(__bytes__)
1332
1332
1333 def __nonzero__(self):
1333 def __nonzero__(self):
1334 return True
1334 return True
1335
1335
1336 __bool__ = __nonzero__
1336 __bool__ = __nonzero__
1337
1337
1338 @propertycache
1338 @propertycache
1339 def _status(self):
1339 def _status(self):
1340 return self._repo.status()
1340 return self._repo.status()
1341
1341
1342 @propertycache
1342 @propertycache
1343 def _user(self):
1343 def _user(self):
1344 return self._repo.ui.username()
1344 return self._repo.ui.username()
1345
1345
1346 @propertycache
1346 @propertycache
1347 def _date(self):
1347 def _date(self):
1348 ui = self._repo.ui
1348 ui = self._repo.ui
1349 date = ui.configdate(b'devel', b'default-date')
1349 date = ui.configdate(b'devel', b'default-date')
1350 if date is None:
1350 if date is None:
1351 date = dateutil.makedate()
1351 date = dateutil.makedate()
1352 return date
1352 return date
1353
1353
1354 def subrev(self, subpath):
1354 def subrev(self, subpath):
1355 return None
1355 return None
1356
1356
1357 def manifestnode(self):
1357 def manifestnode(self):
1358 return None
1358 return None
1359
1359
1360 def user(self):
1360 def user(self):
1361 return self._user or self._repo.ui.username()
1361 return self._user or self._repo.ui.username()
1362
1362
1363 def date(self):
1363 def date(self):
1364 return self._date
1364 return self._date
1365
1365
1366 def description(self):
1366 def description(self):
1367 return self._text
1367 return self._text
1368
1368
1369 def files(self):
1369 def files(self):
1370 return sorted(
1370 return sorted(
1371 self._status.modified + self._status.added + self._status.removed
1371 self._status.modified + self._status.added + self._status.removed
1372 )
1372 )
1373
1373
1374 def modified(self):
1374 def modified(self):
1375 return self._status.modified
1375 return self._status.modified
1376
1376
1377 def added(self):
1377 def added(self):
1378 return self._status.added
1378 return self._status.added
1379
1379
1380 def removed(self):
1380 def removed(self):
1381 return self._status.removed
1381 return self._status.removed
1382
1382
1383 def deleted(self):
1383 def deleted(self):
1384 return self._status.deleted
1384 return self._status.deleted
1385
1385
1386 filesmodified = modified
1386 filesmodified = modified
1387 filesadded = added
1387 filesadded = added
1388 filesremoved = removed
1388 filesremoved = removed
1389
1389
1390 def branch(self):
1390 def branch(self):
1391 return encoding.tolocal(self._extra[b'branch'])
1391 return encoding.tolocal(self._extra[b'branch'])
1392
1392
1393 def closesbranch(self):
1393 def closesbranch(self):
1394 return b'close' in self._extra
1394 return b'close' in self._extra
1395
1395
1396 def extra(self):
1396 def extra(self):
1397 return self._extra
1397 return self._extra
1398
1398
1399 def isinmemory(self):
1399 def isinmemory(self):
1400 return False
1400 return False
1401
1401
1402 def tags(self):
1402 def tags(self):
1403 return []
1403 return []
1404
1404
1405 def bookmarks(self):
1405 def bookmarks(self):
1406 b = []
1406 b = []
1407 for p in self.parents():
1407 for p in self.parents():
1408 b.extend(p.bookmarks())
1408 b.extend(p.bookmarks())
1409 return b
1409 return b
1410
1410
1411 def phase(self):
1411 def phase(self):
1412 phase = phases.draft # default phase to draft
1412 phase = phases.draft # default phase to draft
1413 for p in self.parents():
1413 for p in self.parents():
1414 phase = max(phase, p.phase())
1414 phase = max(phase, p.phase())
1415 return phase
1415 return phase
1416
1416
1417 def hidden(self):
1417 def hidden(self):
1418 return False
1418 return False
1419
1419
1420 def children(self):
1420 def children(self):
1421 return []
1421 return []
1422
1422
1423 def ancestor(self, c2):
1423 def ancestor(self, c2):
1424 """return the "best" ancestor context of self and c2"""
1424 """return the "best" ancestor context of self and c2"""
1425 return self._parents[0].ancestor(c2) # punt on two parents for now
1425 return self._parents[0].ancestor(c2) # punt on two parents for now
1426
1426
1427 def ancestors(self):
1427 def ancestors(self):
1428 for p in self._parents:
1428 for p in self._parents:
1429 yield p
1429 yield p
1430 for a in self._repo.changelog.ancestors(
1430 for a in self._repo.changelog.ancestors(
1431 [p.rev() for p in self._parents]
1431 [p.rev() for p in self._parents]
1432 ):
1432 ):
1433 yield self._repo[a]
1433 yield self._repo[a]
1434
1434
1435 def markcommitted(self, node):
1435 def markcommitted(self, node):
1436 """Perform post-commit cleanup necessary after committing this ctx
1436 """Perform post-commit cleanup necessary after committing this ctx
1437
1437
1438 Specifically, this updates backing stores this working context
1438 Specifically, this updates backing stores this working context
1439 wraps to reflect the fact that the changes reflected by this
1439 wraps to reflect the fact that the changes reflected by this
1440 workingctx have been committed. For example, it marks
1440 workingctx have been committed. For example, it marks
1441 modified and added files as normal in the dirstate.
1441 modified and added files as normal in the dirstate.
1442
1442
1443 """
1443 """
1444
1444
1445 def dirty(self, missing=False, merge=True, branch=True):
1445 def dirty(self, missing=False, merge=True, branch=True):
1446 return False
1446 return False
1447
1447
1448
1448
1449 class workingctx(committablectx):
1449 class workingctx(committablectx):
1450 """A workingctx object makes access to data related to
1450 """A workingctx object makes access to data related to
1451 the current working directory convenient.
1451 the current working directory convenient.
1452 date - any valid date string or (unixtime, offset), or None.
1452 date - any valid date string or (unixtime, offset), or None.
1453 user - username string, or None.
1453 user - username string, or None.
1454 extra - a dictionary of extra values, or None.
1454 extra - a dictionary of extra values, or None.
1455 changes - a list of file lists as returned by localrepo.status()
1455 changes - a list of file lists as returned by localrepo.status()
1456 or None to use the repository status.
1456 or None to use the repository status.
1457 """
1457 """
1458
1458
1459 def __init__(
1459 def __init__(
1460 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1460 self, repo, text=b"", user=None, date=None, extra=None, changes=None
1461 ):
1461 ):
1462 branch = None
1462 branch = None
1463 if not extra or b'branch' not in extra:
1463 if not extra or b'branch' not in extra:
1464 try:
1464 try:
1465 branch = repo.dirstate.branch()
1465 branch = repo.dirstate.branch()
1466 except UnicodeDecodeError:
1466 except UnicodeDecodeError:
1467 raise error.Abort(_(b'branch name not in UTF-8!'))
1467 raise error.Abort(_(b'branch name not in UTF-8!'))
1468 super(workingctx, self).__init__(
1468 super(workingctx, self).__init__(
1469 repo, text, user, date, extra, changes, branch=branch
1469 repo, text, user, date, extra, changes, branch=branch
1470 )
1470 )
1471
1471
1472 def __iter__(self):
1472 def __iter__(self):
1473 d = self._repo.dirstate
1473 d = self._repo.dirstate
1474 for f in d:
1474 for f in d:
1475 if d[f] != b'r':
1475 if d[f] != b'r':
1476 yield f
1476 yield f
1477
1477
1478 def __contains__(self, key):
1478 def __contains__(self, key):
1479 return self._repo.dirstate[key] not in b"?r"
1479 return self._repo.dirstate[key] not in b"?r"
1480
1480
1481 def hex(self):
1481 def hex(self):
1482 return wdirhex
1482 return wdirhex
1483
1483
1484 @propertycache
1484 @propertycache
1485 def _parents(self):
1485 def _parents(self):
1486 p = self._repo.dirstate.parents()
1486 p = self._repo.dirstate.parents()
1487 if p[1] == nullid:
1487 if p[1] == nullid:
1488 p = p[:-1]
1488 p = p[:-1]
1489 # use unfiltered repo to delay/avoid loading obsmarkers
1489 # use unfiltered repo to delay/avoid loading obsmarkers
1490 unfi = self._repo.unfiltered()
1490 unfi = self._repo.unfiltered()
1491 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1491 return [changectx(self._repo, unfi.changelog.rev(n), n) for n in p]
1492
1492
1493 def _fileinfo(self, path):
1493 def _fileinfo(self, path):
1494 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1494 # populate __dict__['_manifest'] as workingctx has no _manifestdelta
1495 self._manifest
1495 self._manifest
1496 return super(workingctx, self)._fileinfo(path)
1496 return super(workingctx, self)._fileinfo(path)
1497
1497
1498 def _buildflagfunc(self):
1498 def _buildflagfunc(self):
1499 # Create a fallback function for getting file flags when the
1499 # Create a fallback function for getting file flags when the
1500 # filesystem doesn't support them
1500 # filesystem doesn't support them
1501
1501
1502 copiesget = self._repo.dirstate.copies().get
1502 copiesget = self._repo.dirstate.copies().get
1503 parents = self.parents()
1503 parents = self.parents()
1504 if len(parents) < 2:
1504 if len(parents) < 2:
1505 # when we have one parent, it's easy: copy from parent
1505 # when we have one parent, it's easy: copy from parent
1506 man = parents[0].manifest()
1506 man = parents[0].manifest()
1507
1507
1508 def func(f):
1508 def func(f):
1509 f = copiesget(f, f)
1509 f = copiesget(f, f)
1510 return man.flags(f)
1510 return man.flags(f)
1511
1511
1512 else:
1512 else:
1513 # merges are tricky: we try to reconstruct the unstored
1513 # merges are tricky: we try to reconstruct the unstored
1514 # result from the merge (issue1802)
1514 # result from the merge (issue1802)
1515 p1, p2 = parents
1515 p1, p2 = parents
1516 pa = p1.ancestor(p2)
1516 pa = p1.ancestor(p2)
1517 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1517 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1518
1518
1519 def func(f):
1519 def func(f):
1520 f = copiesget(f, f) # may be wrong for merges with copies
1520 f = copiesget(f, f) # may be wrong for merges with copies
1521 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1521 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1522 if fl1 == fl2:
1522 if fl1 == fl2:
1523 return fl1
1523 return fl1
1524 if fl1 == fla:
1524 if fl1 == fla:
1525 return fl2
1525 return fl2
1526 if fl2 == fla:
1526 if fl2 == fla:
1527 return fl1
1527 return fl1
1528 return b'' # punt for conflicts
1528 return b'' # punt for conflicts
1529
1529
1530 return func
1530 return func
1531
1531
1532 @propertycache
1532 @propertycache
1533 def _flagfunc(self):
1533 def _flagfunc(self):
1534 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1534 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1535
1535
1536 def flags(self, path):
1536 def flags(self, path):
1537 if '_manifest' in self.__dict__:
1537 if '_manifest' in self.__dict__:
1538 try:
1538 try:
1539 return self._manifest.flags(path)
1539 return self._manifest.flags(path)
1540 except KeyError:
1540 except KeyError:
1541 return b''
1541 return b''
1542
1542
1543 try:
1543 try:
1544 return self._flagfunc(path)
1544 return self._flagfunc(path)
1545 except OSError:
1545 except OSError:
1546 return b''
1546 return b''
1547
1547
1548 def filectx(self, path, filelog=None):
1548 def filectx(self, path, filelog=None):
1549 """get a file context from the working directory"""
1549 """get a file context from the working directory"""
1550 return workingfilectx(
1550 return workingfilectx(
1551 self._repo, path, workingctx=self, filelog=filelog
1551 self._repo, path, workingctx=self, filelog=filelog
1552 )
1552 )
1553
1553
1554 def dirty(self, missing=False, merge=True, branch=True):
1554 def dirty(self, missing=False, merge=True, branch=True):
1555 b"check whether a working directory is modified"
1555 b"check whether a working directory is modified"
1556 # check subrepos first
1556 # check subrepos first
1557 for s in sorted(self.substate):
1557 for s in sorted(self.substate):
1558 if self.sub(s).dirty(missing=missing):
1558 if self.sub(s).dirty(missing=missing):
1559 return True
1559 return True
1560 # check current working dir
1560 # check current working dir
1561 return (
1561 return (
1562 (merge and self.p2())
1562 (merge and self.p2())
1563 or (branch and self.branch() != self.p1().branch())
1563 or (branch and self.branch() != self.p1().branch())
1564 or self.modified()
1564 or self.modified()
1565 or self.added()
1565 or self.added()
1566 or self.removed()
1566 or self.removed()
1567 or (missing and self.deleted())
1567 or (missing and self.deleted())
1568 )
1568 )
1569
1569
1570 def add(self, list, prefix=b""):
1570 def add(self, list, prefix=b""):
1571 with self._repo.wlock():
1571 with self._repo.wlock():
1572 ui, ds = self._repo.ui, self._repo.dirstate
1572 ui, ds = self._repo.ui, self._repo.dirstate
1573 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1573 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1574 rejected = []
1574 rejected = []
1575 lstat = self._repo.wvfs.lstat
1575 lstat = self._repo.wvfs.lstat
1576 for f in list:
1576 for f in list:
1577 # ds.pathto() returns an absolute file when this is invoked from
1577 # ds.pathto() returns an absolute file when this is invoked from
1578 # the keyword extension. That gets flagged as non-portable on
1578 # the keyword extension. That gets flagged as non-portable on
1579 # Windows, since it contains the drive letter and colon.
1579 # Windows, since it contains the drive letter and colon.
1580 scmutil.checkportable(ui, os.path.join(prefix, f))
1580 scmutil.checkportable(ui, os.path.join(prefix, f))
1581 try:
1581 try:
1582 st = lstat(f)
1582 st = lstat(f)
1583 except OSError:
1583 except OSError:
1584 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1584 ui.warn(_(b"%s does not exist!\n") % uipath(f))
1585 rejected.append(f)
1585 rejected.append(f)
1586 continue
1586 continue
1587 limit = ui.configbytes(b'ui', b'large-file-limit')
1587 limit = ui.configbytes(b'ui', b'large-file-limit')
1588 if limit != 0 and st.st_size > limit:
1588 if limit != 0 and st.st_size > limit:
1589 ui.warn(
1589 ui.warn(
1590 _(
1590 _(
1591 b"%s: up to %d MB of RAM may be required "
1591 b"%s: up to %d MB of RAM may be required "
1592 b"to manage this file\n"
1592 b"to manage this file\n"
1593 b"(use 'hg revert %s' to cancel the "
1593 b"(use 'hg revert %s' to cancel the "
1594 b"pending addition)\n"
1594 b"pending addition)\n"
1595 )
1595 )
1596 % (f, 3 * st.st_size // 1000000, uipath(f))
1596 % (f, 3 * st.st_size // 1000000, uipath(f))
1597 )
1597 )
1598 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1598 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1599 ui.warn(
1599 ui.warn(
1600 _(
1600 _(
1601 b"%s not added: only files and symlinks "
1601 b"%s not added: only files and symlinks "
1602 b"supported currently\n"
1602 b"supported currently\n"
1603 )
1603 )
1604 % uipath(f)
1604 % uipath(f)
1605 )
1605 )
1606 rejected.append(f)
1606 rejected.append(f)
1607 elif ds[f] in b'amn':
1607 elif ds[f] in b'amn':
1608 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1608 ui.warn(_(b"%s already tracked!\n") % uipath(f))
1609 elif ds[f] == b'r':
1609 elif ds[f] == b'r':
1610 ds.normallookup(f)
1610 ds.normallookup(f)
1611 else:
1611 else:
1612 ds.add(f)
1612 ds.add(f)
1613 return rejected
1613 return rejected
1614
1614
1615 def forget(self, files, prefix=b""):
1615 def forget(self, files, prefix=b""):
1616 with self._repo.wlock():
1616 with self._repo.wlock():
1617 ds = self._repo.dirstate
1617 ds = self._repo.dirstate
1618 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1618 uipath = lambda f: ds.pathto(pathutil.join(prefix, f))
1619 rejected = []
1619 rejected = []
1620 for f in files:
1620 for f in files:
1621 if f not in ds:
1621 if f not in ds:
1622 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1622 self._repo.ui.warn(_(b"%s not tracked!\n") % uipath(f))
1623 rejected.append(f)
1623 rejected.append(f)
1624 elif ds[f] != b'a':
1624 elif ds[f] != b'a':
1625 ds.remove(f)
1625 ds.remove(f)
1626 else:
1626 else:
1627 ds.drop(f)
1627 ds.drop(f)
1628 return rejected
1628 return rejected
1629
1629
1630 def copy(self, source, dest):
1630 def copy(self, source, dest):
1631 try:
1631 try:
1632 st = self._repo.wvfs.lstat(dest)
1632 st = self._repo.wvfs.lstat(dest)
1633 except OSError as err:
1633 except OSError as err:
1634 if err.errno != errno.ENOENT:
1634 if err.errno != errno.ENOENT:
1635 raise
1635 raise
1636 self._repo.ui.warn(
1636 self._repo.ui.warn(
1637 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1637 _(b"%s does not exist!\n") % self._repo.dirstate.pathto(dest)
1638 )
1638 )
1639 return
1639 return
1640 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1640 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1641 self._repo.ui.warn(
1641 self._repo.ui.warn(
1642 _(b"copy failed: %s is not a file or a symbolic link\n")
1642 _(b"copy failed: %s is not a file or a symbolic link\n")
1643 % self._repo.dirstate.pathto(dest)
1643 % self._repo.dirstate.pathto(dest)
1644 )
1644 )
1645 else:
1645 else:
1646 with self._repo.wlock():
1646 with self._repo.wlock():
1647 ds = self._repo.dirstate
1647 ds = self._repo.dirstate
1648 if ds[dest] in b'?':
1648 if ds[dest] in b'?':
1649 ds.add(dest)
1649 ds.add(dest)
1650 elif ds[dest] in b'r':
1650 elif ds[dest] in b'r':
1651 ds.normallookup(dest)
1651 ds.normallookup(dest)
1652 ds.copy(source, dest)
1652 ds.copy(source, dest)
1653
1653
1654 def match(
1654 def match(
1655 self,
1655 self,
1656 pats=None,
1656 pats=None,
1657 include=None,
1657 include=None,
1658 exclude=None,
1658 exclude=None,
1659 default=b'glob',
1659 default=b'glob',
1660 listsubrepos=False,
1660 listsubrepos=False,
1661 badfn=None,
1661 badfn=None,
1662 ):
1662 ):
1663 r = self._repo
1663 r = self._repo
1664
1664
1665 # Only a case insensitive filesystem needs magic to translate user input
1665 # Only a case insensitive filesystem needs magic to translate user input
1666 # to actual case in the filesystem.
1666 # to actual case in the filesystem.
1667 icasefs = not util.fscasesensitive(r.root)
1667 icasefs = not util.fscasesensitive(r.root)
1668 return matchmod.match(
1668 return matchmod.match(
1669 r.root,
1669 r.root,
1670 r.getcwd(),
1670 r.getcwd(),
1671 pats,
1671 pats,
1672 include,
1672 include,
1673 exclude,
1673 exclude,
1674 default,
1674 default,
1675 auditor=r.auditor,
1675 auditor=r.auditor,
1676 ctx=self,
1676 ctx=self,
1677 listsubrepos=listsubrepos,
1677 listsubrepos=listsubrepos,
1678 badfn=badfn,
1678 badfn=badfn,
1679 icasefs=icasefs,
1679 icasefs=icasefs,
1680 )
1680 )
1681
1681
1682 def _filtersuspectsymlink(self, files):
1682 def _filtersuspectsymlink(self, files):
1683 if not files or self._repo.dirstate._checklink:
1683 if not files or self._repo.dirstate._checklink:
1684 return files
1684 return files
1685
1685
1686 # Symlink placeholders may get non-symlink-like contents
1686 # Symlink placeholders may get non-symlink-like contents
1687 # via user error or dereferencing by NFS or Samba servers,
1687 # via user error or dereferencing by NFS or Samba servers,
1688 # so we filter out any placeholders that don't look like a
1688 # so we filter out any placeholders that don't look like a
1689 # symlink
1689 # symlink
1690 sane = []
1690 sane = []
1691 for f in files:
1691 for f in files:
1692 if self.flags(f) == b'l':
1692 if self.flags(f) == b'l':
1693 d = self[f].data()
1693 d = self[f].data()
1694 if (
1694 if (
1695 d == b''
1695 d == b''
1696 or len(d) >= 1024
1696 or len(d) >= 1024
1697 or b'\n' in d
1697 or b'\n' in d
1698 or stringutil.binary(d)
1698 or stringutil.binary(d)
1699 ):
1699 ):
1700 self._repo.ui.debug(
1700 self._repo.ui.debug(
1701 b'ignoring suspect symlink placeholder "%s"\n' % f
1701 b'ignoring suspect symlink placeholder "%s"\n' % f
1702 )
1702 )
1703 continue
1703 continue
1704 sane.append(f)
1704 sane.append(f)
1705 return sane
1705 return sane
1706
1706
1707 def _checklookup(self, files):
1707 def _checklookup(self, files):
1708 # check for any possibly clean files
1708 # check for any possibly clean files
1709 if not files:
1709 if not files:
1710 return [], [], []
1710 return [], [], []
1711
1711
1712 modified = []
1712 modified = []
1713 deleted = []
1713 deleted = []
1714 fixup = []
1714 fixup = []
1715 pctx = self._parents[0]
1715 pctx = self._parents[0]
1716 # do a full compare of any files that might have changed
1716 # do a full compare of any files that might have changed
1717 for f in sorted(files):
1717 for f in sorted(files):
1718 try:
1718 try:
1719 # This will return True for a file that got replaced by a
1719 # This will return True for a file that got replaced by a
1720 # directory in the interim, but fixing that is pretty hard.
1720 # directory in the interim, but fixing that is pretty hard.
1721 if (
1721 if (
1722 f not in pctx
1722 f not in pctx
1723 or self.flags(f) != pctx.flags(f)
1723 or self.flags(f) != pctx.flags(f)
1724 or pctx[f].cmp(self[f])
1724 or pctx[f].cmp(self[f])
1725 ):
1725 ):
1726 modified.append(f)
1726 modified.append(f)
1727 else:
1727 else:
1728 fixup.append(f)
1728 fixup.append(f)
1729 except (IOError, OSError):
1729 except (IOError, OSError):
1730 # A file become inaccessible in between? Mark it as deleted,
1730 # A file become inaccessible in between? Mark it as deleted,
1731 # matching dirstate behavior (issue5584).
1731 # matching dirstate behavior (issue5584).
1732 # The dirstate has more complex behavior around whether a
1732 # The dirstate has more complex behavior around whether a
1733 # missing file matches a directory, etc, but we don't need to
1733 # missing file matches a directory, etc, but we don't need to
1734 # bother with that: if f has made it to this point, we're sure
1734 # bother with that: if f has made it to this point, we're sure
1735 # it's in the dirstate.
1735 # it's in the dirstate.
1736 deleted.append(f)
1736 deleted.append(f)
1737
1737
1738 return modified, deleted, fixup
1738 return modified, deleted, fixup
1739
1739
1740 def _poststatusfixup(self, status, fixup):
1740 def _poststatusfixup(self, status, fixup):
1741 """update dirstate for files that are actually clean"""
1741 """update dirstate for files that are actually clean"""
1742 poststatus = self._repo.postdsstatus()
1742 poststatus = self._repo.postdsstatus()
1743 if fixup or poststatus:
1743 if fixup or poststatus:
1744 try:
1744 try:
1745 oldid = self._repo.dirstate.identity()
1745 oldid = self._repo.dirstate.identity()
1746
1746
1747 # updating the dirstate is optional
1747 # updating the dirstate is optional
1748 # so we don't wait on the lock
1748 # so we don't wait on the lock
1749 # wlock can invalidate the dirstate, so cache normal _after_
1749 # wlock can invalidate the dirstate, so cache normal _after_
1750 # taking the lock
1750 # taking the lock
1751 with self._repo.wlock(False):
1751 with self._repo.wlock(False):
1752 if self._repo.dirstate.identity() == oldid:
1752 if self._repo.dirstate.identity() == oldid:
1753 if fixup:
1753 if fixup:
1754 normal = self._repo.dirstate.normal
1754 normal = self._repo.dirstate.normal
1755 for f in fixup:
1755 for f in fixup:
1756 normal(f)
1756 normal(f)
1757 # write changes out explicitly, because nesting
1757 # write changes out explicitly, because nesting
1758 # wlock at runtime may prevent 'wlock.release()'
1758 # wlock at runtime may prevent 'wlock.release()'
1759 # after this block from doing so for subsequent
1759 # after this block from doing so for subsequent
1760 # changing files
1760 # changing files
1761 tr = self._repo.currenttransaction()
1761 tr = self._repo.currenttransaction()
1762 self._repo.dirstate.write(tr)
1762 self._repo.dirstate.write(tr)
1763
1763
1764 if poststatus:
1764 if poststatus:
1765 for ps in poststatus:
1765 for ps in poststatus:
1766 ps(self, status)
1766 ps(self, status)
1767 else:
1767 else:
1768 # in this case, writing changes out breaks
1768 # in this case, writing changes out breaks
1769 # consistency, because .hg/dirstate was
1769 # consistency, because .hg/dirstate was
1770 # already changed simultaneously after last
1770 # already changed simultaneously after last
1771 # caching (see also issue5584 for detail)
1771 # caching (see also issue5584 for detail)
1772 self._repo.ui.debug(
1772 self._repo.ui.debug(
1773 b'skip updating dirstate: identity mismatch\n'
1773 b'skip updating dirstate: identity mismatch\n'
1774 )
1774 )
1775 except error.LockError:
1775 except error.LockError:
1776 pass
1776 pass
1777 finally:
1777 finally:
1778 # Even if the wlock couldn't be grabbed, clear out the list.
1778 # Even if the wlock couldn't be grabbed, clear out the list.
1779 self._repo.clearpostdsstatus()
1779 self._repo.clearpostdsstatus()
1780
1780
1781 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1781 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
1782 '''Gets the status from the dirstate -- internal use only.'''
1782 '''Gets the status from the dirstate -- internal use only.'''
1783 subrepos = []
1783 subrepos = []
1784 if b'.hgsub' in self:
1784 if b'.hgsub' in self:
1785 subrepos = sorted(self.substate)
1785 subrepos = sorted(self.substate)
1786 cmp, s = self._repo.dirstate.status(
1786 cmp, s = self._repo.dirstate.status(
1787 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1787 match, subrepos, ignored=ignored, clean=clean, unknown=unknown
1788 )
1788 )
1789
1789
1790 # check for any possibly clean files
1790 # check for any possibly clean files
1791 fixup = []
1791 fixup = []
1792 if cmp:
1792 if cmp:
1793 modified2, deleted2, fixup = self._checklookup(cmp)
1793 modified2, deleted2, fixup = self._checklookup(cmp)
1794 s.modified.extend(modified2)
1794 s.modified.extend(modified2)
1795 s.deleted.extend(deleted2)
1795 s.deleted.extend(deleted2)
1796
1796
1797 if fixup and clean:
1797 if fixup and clean:
1798 s.clean.extend(fixup)
1798 s.clean.extend(fixup)
1799
1799
1800 self._poststatusfixup(s, fixup)
1800 self._poststatusfixup(s, fixup)
1801
1801
1802 if match.always():
1802 if match.always():
1803 # cache for performance
1803 # cache for performance
1804 if s.unknown or s.ignored or s.clean:
1804 if s.unknown or s.ignored or s.clean:
1805 # "_status" is cached with list*=False in the normal route
1805 # "_status" is cached with list*=False in the normal route
1806 self._status = scmutil.status(
1806 self._status = scmutil.status(
1807 s.modified, s.added, s.removed, s.deleted, [], [], []
1807 s.modified, s.added, s.removed, s.deleted, [], [], []
1808 )
1808 )
1809 else:
1809 else:
1810 self._status = s
1810 self._status = s
1811
1811
1812 return s
1812 return s
1813
1813
1814 @propertycache
1814 @propertycache
1815 def _copies(self):
1815 def _copies(self):
1816 p1copies = {}
1816 p1copies = {}
1817 p2copies = {}
1817 p2copies = {}
1818 parents = self._repo.dirstate.parents()
1818 parents = self._repo.dirstate.parents()
1819 p1manifest = self._repo[parents[0]].manifest()
1819 p1manifest = self._repo[parents[0]].manifest()
1820 p2manifest = self._repo[parents[1]].manifest()
1820 p2manifest = self._repo[parents[1]].manifest()
1821 changedset = set(self.added()) | set(self.modified())
1821 changedset = set(self.added()) | set(self.modified())
1822 narrowmatch = self._repo.narrowmatch()
1822 narrowmatch = self._repo.narrowmatch()
1823 for dst, src in self._repo.dirstate.copies().items():
1823 for dst, src in self._repo.dirstate.copies().items():
1824 if dst not in changedset or not narrowmatch(dst):
1824 if dst not in changedset or not narrowmatch(dst):
1825 continue
1825 continue
1826 if src in p1manifest:
1826 if src in p1manifest:
1827 p1copies[dst] = src
1827 p1copies[dst] = src
1828 elif src in p2manifest:
1828 elif src in p2manifest:
1829 p2copies[dst] = src
1829 p2copies[dst] = src
1830 return p1copies, p2copies
1830 return p1copies, p2copies
1831
1831
1832 @propertycache
1832 @propertycache
1833 def _manifest(self):
1833 def _manifest(self):
1834 """generate a manifest corresponding to the values in self._status
1834 """generate a manifest corresponding to the values in self._status
1835
1835
1836 This reuse the file nodeid from parent, but we use special node
1836 This reuse the file nodeid from parent, but we use special node
1837 identifiers for added and modified files. This is used by manifests
1837 identifiers for added and modified files. This is used by manifests
1838 merge to see that files are different and by update logic to avoid
1838 merge to see that files are different and by update logic to avoid
1839 deleting newly added files.
1839 deleting newly added files.
1840 """
1840 """
1841 return self._buildstatusmanifest(self._status)
1841 return self._buildstatusmanifest(self._status)
1842
1842
1843 def _buildstatusmanifest(self, status):
1843 def _buildstatusmanifest(self, status):
1844 """Builds a manifest that includes the given status results."""
1844 """Builds a manifest that includes the given status results."""
1845 parents = self.parents()
1845 parents = self.parents()
1846
1846
1847 man = parents[0].manifest().copy()
1847 man = parents[0].manifest().copy()
1848
1848
1849 ff = self._flagfunc
1849 ff = self._flagfunc
1850 for i, l in (
1850 for i, l in (
1851 (addednodeid, status.added),
1851 (addednodeid, status.added),
1852 (modifiednodeid, status.modified),
1852 (modifiednodeid, status.modified),
1853 ):
1853 ):
1854 for f in l:
1854 for f in l:
1855 man[f] = i
1855 man[f] = i
1856 try:
1856 try:
1857 man.setflag(f, ff(f))
1857 man.setflag(f, ff(f))
1858 except OSError:
1858 except OSError:
1859 pass
1859 pass
1860
1860
1861 for f in status.deleted + status.removed:
1861 for f in status.deleted + status.removed:
1862 if f in man:
1862 if f in man:
1863 del man[f]
1863 del man[f]
1864
1864
1865 return man
1865 return man
1866
1866
1867 def _buildstatus(
1867 def _buildstatus(
1868 self, other, s, match, listignored, listclean, listunknown
1868 self, other, s, match, listignored, listclean, listunknown
1869 ):
1869 ):
1870 """build a status with respect to another context
1870 """build a status with respect to another context
1871
1871
1872 This includes logic for maintaining the fast path of status when
1872 This includes logic for maintaining the fast path of status when
1873 comparing the working directory against its parent, which is to skip
1873 comparing the working directory against its parent, which is to skip
1874 building a new manifest if self (working directory) is not comparing
1874 building a new manifest if self (working directory) is not comparing
1875 against its parent (repo['.']).
1875 against its parent (repo['.']).
1876 """
1876 """
1877 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1877 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1878 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1878 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1879 # might have accidentally ended up with the entire contents of the file
1879 # might have accidentally ended up with the entire contents of the file
1880 # they are supposed to be linking to.
1880 # they are supposed to be linking to.
1881 s.modified[:] = self._filtersuspectsymlink(s.modified)
1881 s.modified[:] = self._filtersuspectsymlink(s.modified)
1882 if other != self._repo[b'.']:
1882 if other != self._repo[b'.']:
1883 s = super(workingctx, self)._buildstatus(
1883 s = super(workingctx, self)._buildstatus(
1884 other, s, match, listignored, listclean, listunknown
1884 other, s, match, listignored, listclean, listunknown
1885 )
1885 )
1886 return s
1886 return s
1887
1887
1888 def _matchstatus(self, other, match):
1888 def _matchstatus(self, other, match):
1889 """override the match method with a filter for directory patterns
1889 """override the match method with a filter for directory patterns
1890
1890
1891 We use inheritance to customize the match.bad method only in cases of
1891 We use inheritance to customize the match.bad method only in cases of
1892 workingctx since it belongs only to the working directory when
1892 workingctx since it belongs only to the working directory when
1893 comparing against the parent changeset.
1893 comparing against the parent changeset.
1894
1894
1895 If we aren't comparing against the working directory's parent, then we
1895 If we aren't comparing against the working directory's parent, then we
1896 just use the default match object sent to us.
1896 just use the default match object sent to us.
1897 """
1897 """
1898 if other != self._repo[b'.']:
1898 if other != self._repo[b'.']:
1899
1899
1900 def bad(f, msg):
1900 def bad(f, msg):
1901 # 'f' may be a directory pattern from 'match.files()',
1901 # 'f' may be a directory pattern from 'match.files()',
1902 # so 'f not in ctx1' is not enough
1902 # so 'f not in ctx1' is not enough
1903 if f not in other and not other.hasdir(f):
1903 if f not in other and not other.hasdir(f):
1904 self._repo.ui.warn(
1904 self._repo.ui.warn(
1905 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1905 b'%s: %s\n' % (self._repo.dirstate.pathto(f), msg)
1906 )
1906 )
1907
1907
1908 match.bad = bad
1908 match.bad = bad
1909 return match
1909 return match
1910
1910
1911 def walk(self, match):
1911 def walk(self, match):
1912 '''Generates matching file names.'''
1912 '''Generates matching file names.'''
1913 return sorted(
1913 return sorted(
1914 self._repo.dirstate.walk(
1914 self._repo.dirstate.walk(
1915 self._repo.narrowmatch(match),
1915 self._repo.narrowmatch(match),
1916 subrepos=sorted(self.substate),
1916 subrepos=sorted(self.substate),
1917 unknown=True,
1917 unknown=True,
1918 ignored=False,
1918 ignored=False,
1919 )
1919 )
1920 )
1920 )
1921
1921
1922 def matches(self, match):
1922 def matches(self, match):
1923 match = self._repo.narrowmatch(match)
1923 match = self._repo.narrowmatch(match)
1924 ds = self._repo.dirstate
1924 ds = self._repo.dirstate
1925 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1925 return sorted(f for f in ds.matches(match) if ds[f] != b'r')
1926
1926
1927 def markcommitted(self, node):
1927 def markcommitted(self, node):
1928 with self._repo.dirstate.parentchange():
1928 with self._repo.dirstate.parentchange():
1929 for f in self.modified() + self.added():
1929 for f in self.modified() + self.added():
1930 self._repo.dirstate.normal(f)
1930 self._repo.dirstate.normal(f)
1931 for f in self.removed():
1931 for f in self.removed():
1932 self._repo.dirstate.drop(f)
1932 self._repo.dirstate.drop(f)
1933 self._repo.dirstate.setparents(node)
1933 self._repo.dirstate.setparents(node)
1934
1934
1935 # write changes out explicitly, because nesting wlock at
1935 # write changes out explicitly, because nesting wlock at
1936 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1936 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1937 # from immediately doing so for subsequent changing files
1937 # from immediately doing so for subsequent changing files
1938 self._repo.dirstate.write(self._repo.currenttransaction())
1938 self._repo.dirstate.write(self._repo.currenttransaction())
1939
1939
1940 sparse.aftercommit(self._repo, node)
1940 sparse.aftercommit(self._repo, node)
1941
1941
1942
1942
1943 class committablefilectx(basefilectx):
1943 class committablefilectx(basefilectx):
1944 """A committablefilectx provides common functionality for a file context
1944 """A committablefilectx provides common functionality for a file context
1945 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1945 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1946
1946
1947 def __init__(self, repo, path, filelog=None, ctx=None):
1947 def __init__(self, repo, path, filelog=None, ctx=None):
1948 self._repo = repo
1948 self._repo = repo
1949 self._path = path
1949 self._path = path
1950 self._changeid = None
1950 self._changeid = None
1951 self._filerev = self._filenode = None
1951 self._filerev = self._filenode = None
1952
1952
1953 if filelog is not None:
1953 if filelog is not None:
1954 self._filelog = filelog
1954 self._filelog = filelog
1955 if ctx:
1955 if ctx:
1956 self._changectx = ctx
1956 self._changectx = ctx
1957
1957
1958 def __nonzero__(self):
1958 def __nonzero__(self):
1959 return True
1959 return True
1960
1960
1961 __bool__ = __nonzero__
1961 __bool__ = __nonzero__
1962
1962
1963 def linkrev(self):
1963 def linkrev(self):
1964 # linked to self._changectx no matter if file is modified or not
1964 # linked to self._changectx no matter if file is modified or not
1965 return self.rev()
1965 return self.rev()
1966
1966
1967 def renamed(self):
1967 def renamed(self):
1968 path = self.copysource()
1968 path = self.copysource()
1969 if not path:
1969 if not path:
1970 return None
1970 return None
1971 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1971 return path, self._changectx._parents[0]._manifest.get(path, nullid)
1972
1972
1973 def parents(self):
1973 def parents(self):
1974 '''return parent filectxs, following copies if necessary'''
1974 '''return parent filectxs, following copies if necessary'''
1975
1975
1976 def filenode(ctx, path):
1976 def filenode(ctx, path):
1977 return ctx._manifest.get(path, nullid)
1977 return ctx._manifest.get(path, nullid)
1978
1978
1979 path = self._path
1979 path = self._path
1980 fl = self._filelog
1980 fl = self._filelog
1981 pcl = self._changectx._parents
1981 pcl = self._changectx._parents
1982 renamed = self.renamed()
1982 renamed = self.renamed()
1983
1983
1984 if renamed:
1984 if renamed:
1985 pl = [renamed + (None,)]
1985 pl = [renamed + (None,)]
1986 else:
1986 else:
1987 pl = [(path, filenode(pcl[0], path), fl)]
1987 pl = [(path, filenode(pcl[0], path), fl)]
1988
1988
1989 for pc in pcl[1:]:
1989 for pc in pcl[1:]:
1990 pl.append((path, filenode(pc, path), fl))
1990 pl.append((path, filenode(pc, path), fl))
1991
1991
1992 return [
1992 return [
1993 self._parentfilectx(p, fileid=n, filelog=l)
1993 self._parentfilectx(p, fileid=n, filelog=l)
1994 for p, n, l in pl
1994 for p, n, l in pl
1995 if n != nullid
1995 if n != nullid
1996 ]
1996 ]
1997
1997
1998 def children(self):
1998 def children(self):
1999 return []
1999 return []
2000
2000
2001
2001
2002 class workingfilectx(committablefilectx):
2002 class workingfilectx(committablefilectx):
2003 """A workingfilectx object makes access to data related to a particular
2003 """A workingfilectx object makes access to data related to a particular
2004 file in the working directory convenient."""
2004 file in the working directory convenient."""
2005
2005
2006 def __init__(self, repo, path, filelog=None, workingctx=None):
2006 def __init__(self, repo, path, filelog=None, workingctx=None):
2007 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2007 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
2008
2008
2009 @propertycache
2009 @propertycache
2010 def _changectx(self):
2010 def _changectx(self):
2011 return workingctx(self._repo)
2011 return workingctx(self._repo)
2012
2012
2013 def data(self):
2013 def data(self):
2014 return self._repo.wread(self._path)
2014 return self._repo.wread(self._path)
2015
2015
2016 def copysource(self):
2016 def copysource(self):
2017 return self._repo.dirstate.copied(self._path)
2017 return self._repo.dirstate.copied(self._path)
2018
2018
2019 def size(self):
2019 def size(self):
2020 return self._repo.wvfs.lstat(self._path).st_size
2020 return self._repo.wvfs.lstat(self._path).st_size
2021
2021
2022 def lstat(self):
2022 def lstat(self):
2023 return self._repo.wvfs.lstat(self._path)
2023 return self._repo.wvfs.lstat(self._path)
2024
2024
2025 def date(self):
2025 def date(self):
2026 t, tz = self._changectx.date()
2026 t, tz = self._changectx.date()
2027 try:
2027 try:
2028 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2028 return (self._repo.wvfs.lstat(self._path)[stat.ST_MTIME], tz)
2029 except OSError as err:
2029 except OSError as err:
2030 if err.errno != errno.ENOENT:
2030 if err.errno != errno.ENOENT:
2031 raise
2031 raise
2032 return (t, tz)
2032 return (t, tz)
2033
2033
2034 def exists(self):
2034 def exists(self):
2035 return self._repo.wvfs.exists(self._path)
2035 return self._repo.wvfs.exists(self._path)
2036
2036
2037 def lexists(self):
2037 def lexists(self):
2038 return self._repo.wvfs.lexists(self._path)
2038 return self._repo.wvfs.lexists(self._path)
2039
2039
2040 def audit(self):
2040 def audit(self):
2041 return self._repo.wvfs.audit(self._path)
2041 return self._repo.wvfs.audit(self._path)
2042
2042
2043 def cmp(self, fctx):
2043 def cmp(self, fctx):
2044 """compare with other file context
2044 """compare with other file context
2045
2045
2046 returns True if different than fctx.
2046 returns True if different than fctx.
2047 """
2047 """
2048 # fctx should be a filectx (not a workingfilectx)
2048 # fctx should be a filectx (not a workingfilectx)
2049 # invert comparison to reuse the same code path
2049 # invert comparison to reuse the same code path
2050 return fctx.cmp(self)
2050 return fctx.cmp(self)
2051
2051
2052 def remove(self, ignoremissing=False):
2052 def remove(self, ignoremissing=False):
2053 """wraps unlink for a repo's working directory"""
2053 """wraps unlink for a repo's working directory"""
2054 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2054 rmdir = self._repo.ui.configbool(b'experimental', b'removeemptydirs')
2055 self._repo.wvfs.unlinkpath(
2055 self._repo.wvfs.unlinkpath(
2056 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2056 self._path, ignoremissing=ignoremissing, rmdir=rmdir
2057 )
2057 )
2058
2058
2059 def write(self, data, flags, backgroundclose=False, **kwargs):
2059 def write(self, data, flags, backgroundclose=False, **kwargs):
2060 """wraps repo.wwrite"""
2060 """wraps repo.wwrite"""
2061 return self._repo.wwrite(
2061 return self._repo.wwrite(
2062 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2062 self._path, data, flags, backgroundclose=backgroundclose, **kwargs
2063 )
2063 )
2064
2064
2065 def markcopied(self, src):
2065 def markcopied(self, src):
2066 """marks this file a copy of `src`"""
2066 """marks this file a copy of `src`"""
2067 self._repo.dirstate.copy(src, self._path)
2067 self._repo.dirstate.copy(src, self._path)
2068
2068
2069 def clearunknown(self):
2069 def clearunknown(self):
2070 """Removes conflicting items in the working directory so that
2070 """Removes conflicting items in the working directory so that
2071 ``write()`` can be called successfully.
2071 ``write()`` can be called successfully.
2072 """
2072 """
2073 wvfs = self._repo.wvfs
2073 wvfs = self._repo.wvfs
2074 f = self._path
2074 f = self._path
2075 wvfs.audit(f)
2075 wvfs.audit(f)
2076 if self._repo.ui.configbool(
2076 if self._repo.ui.configbool(
2077 b'experimental', b'merge.checkpathconflicts'
2077 b'experimental', b'merge.checkpathconflicts'
2078 ):
2078 ):
2079 # remove files under the directory as they should already be
2079 # remove files under the directory as they should already be
2080 # warned and backed up
2080 # warned and backed up
2081 if wvfs.isdir(f) and not wvfs.islink(f):
2081 if wvfs.isdir(f) and not wvfs.islink(f):
2082 wvfs.rmtree(f, forcibly=True)
2082 wvfs.rmtree(f, forcibly=True)
2083 for p in reversed(list(util.finddirs(f))):
2083 for p in reversed(list(pathutil.finddirs(f))):
2084 if wvfs.isfileorlink(p):
2084 if wvfs.isfileorlink(p):
2085 wvfs.unlink(p)
2085 wvfs.unlink(p)
2086 break
2086 break
2087 else:
2087 else:
2088 # don't remove files if path conflicts are not processed
2088 # don't remove files if path conflicts are not processed
2089 if wvfs.isdir(f) and not wvfs.islink(f):
2089 if wvfs.isdir(f) and not wvfs.islink(f):
2090 wvfs.removedirs(f)
2090 wvfs.removedirs(f)
2091
2091
2092 def setflags(self, l, x):
2092 def setflags(self, l, x):
2093 self._repo.wvfs.setflags(self._path, l, x)
2093 self._repo.wvfs.setflags(self._path, l, x)
2094
2094
2095
2095
2096 class overlayworkingctx(committablectx):
2096 class overlayworkingctx(committablectx):
2097 """Wraps another mutable context with a write-back cache that can be
2097 """Wraps another mutable context with a write-back cache that can be
2098 converted into a commit context.
2098 converted into a commit context.
2099
2099
2100 self._cache[path] maps to a dict with keys: {
2100 self._cache[path] maps to a dict with keys: {
2101 'exists': bool?
2101 'exists': bool?
2102 'date': date?
2102 'date': date?
2103 'data': str?
2103 'data': str?
2104 'flags': str?
2104 'flags': str?
2105 'copied': str? (path or None)
2105 'copied': str? (path or None)
2106 }
2106 }
2107 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2107 If `exists` is True, `flags` must be non-None and 'date' is non-None. If it
2108 is `False`, the file was deleted.
2108 is `False`, the file was deleted.
2109 """
2109 """
2110
2110
2111 def __init__(self, repo):
2111 def __init__(self, repo):
2112 super(overlayworkingctx, self).__init__(repo)
2112 super(overlayworkingctx, self).__init__(repo)
2113 self.clean()
2113 self.clean()
2114
2114
2115 def setbase(self, wrappedctx):
2115 def setbase(self, wrappedctx):
2116 self._wrappedctx = wrappedctx
2116 self._wrappedctx = wrappedctx
2117 self._parents = [wrappedctx]
2117 self._parents = [wrappedctx]
2118 # Drop old manifest cache as it is now out of date.
2118 # Drop old manifest cache as it is now out of date.
2119 # This is necessary when, e.g., rebasing several nodes with one
2119 # This is necessary when, e.g., rebasing several nodes with one
2120 # ``overlayworkingctx`` (e.g. with --collapse).
2120 # ``overlayworkingctx`` (e.g. with --collapse).
2121 util.clearcachedproperty(self, b'_manifest')
2121 util.clearcachedproperty(self, b'_manifest')
2122
2122
2123 def data(self, path):
2123 def data(self, path):
2124 if self.isdirty(path):
2124 if self.isdirty(path):
2125 if self._cache[path][b'exists']:
2125 if self._cache[path][b'exists']:
2126 if self._cache[path][b'data'] is not None:
2126 if self._cache[path][b'data'] is not None:
2127 return self._cache[path][b'data']
2127 return self._cache[path][b'data']
2128 else:
2128 else:
2129 # Must fallback here, too, because we only set flags.
2129 # Must fallback here, too, because we only set flags.
2130 return self._wrappedctx[path].data()
2130 return self._wrappedctx[path].data()
2131 else:
2131 else:
2132 raise error.ProgrammingError(
2132 raise error.ProgrammingError(
2133 b"No such file or directory: %s" % path
2133 b"No such file or directory: %s" % path
2134 )
2134 )
2135 else:
2135 else:
2136 return self._wrappedctx[path].data()
2136 return self._wrappedctx[path].data()
2137
2137
2138 @propertycache
2138 @propertycache
2139 def _manifest(self):
2139 def _manifest(self):
2140 parents = self.parents()
2140 parents = self.parents()
2141 man = parents[0].manifest().copy()
2141 man = parents[0].manifest().copy()
2142
2142
2143 flag = self._flagfunc
2143 flag = self._flagfunc
2144 for path in self.added():
2144 for path in self.added():
2145 man[path] = addednodeid
2145 man[path] = addednodeid
2146 man.setflag(path, flag(path))
2146 man.setflag(path, flag(path))
2147 for path in self.modified():
2147 for path in self.modified():
2148 man[path] = modifiednodeid
2148 man[path] = modifiednodeid
2149 man.setflag(path, flag(path))
2149 man.setflag(path, flag(path))
2150 for path in self.removed():
2150 for path in self.removed():
2151 del man[path]
2151 del man[path]
2152 return man
2152 return man
2153
2153
2154 @propertycache
2154 @propertycache
2155 def _flagfunc(self):
2155 def _flagfunc(self):
2156 def f(path):
2156 def f(path):
2157 return self._cache[path][b'flags']
2157 return self._cache[path][b'flags']
2158
2158
2159 return f
2159 return f
2160
2160
2161 def files(self):
2161 def files(self):
2162 return sorted(self.added() + self.modified() + self.removed())
2162 return sorted(self.added() + self.modified() + self.removed())
2163
2163
2164 def modified(self):
2164 def modified(self):
2165 return [
2165 return [
2166 f
2166 f
2167 for f in self._cache.keys()
2167 for f in self._cache.keys()
2168 if self._cache[f][b'exists'] and self._existsinparent(f)
2168 if self._cache[f][b'exists'] and self._existsinparent(f)
2169 ]
2169 ]
2170
2170
2171 def added(self):
2171 def added(self):
2172 return [
2172 return [
2173 f
2173 f
2174 for f in self._cache.keys()
2174 for f in self._cache.keys()
2175 if self._cache[f][b'exists'] and not self._existsinparent(f)
2175 if self._cache[f][b'exists'] and not self._existsinparent(f)
2176 ]
2176 ]
2177
2177
2178 def removed(self):
2178 def removed(self):
2179 return [
2179 return [
2180 f
2180 f
2181 for f in self._cache.keys()
2181 for f in self._cache.keys()
2182 if not self._cache[f][b'exists'] and self._existsinparent(f)
2182 if not self._cache[f][b'exists'] and self._existsinparent(f)
2183 ]
2183 ]
2184
2184
2185 def p1copies(self):
2185 def p1copies(self):
2186 copies = self._repo._wrappedctx.p1copies().copy()
2186 copies = self._repo._wrappedctx.p1copies().copy()
2187 narrowmatch = self._repo.narrowmatch()
2187 narrowmatch = self._repo.narrowmatch()
2188 for f in self._cache.keys():
2188 for f in self._cache.keys():
2189 if not narrowmatch(f):
2189 if not narrowmatch(f):
2190 continue
2190 continue
2191 copies.pop(f, None) # delete if it exists
2191 copies.pop(f, None) # delete if it exists
2192 source = self._cache[f][b'copied']
2192 source = self._cache[f][b'copied']
2193 if source:
2193 if source:
2194 copies[f] = source
2194 copies[f] = source
2195 return copies
2195 return copies
2196
2196
2197 def p2copies(self):
2197 def p2copies(self):
2198 copies = self._repo._wrappedctx.p2copies().copy()
2198 copies = self._repo._wrappedctx.p2copies().copy()
2199 narrowmatch = self._repo.narrowmatch()
2199 narrowmatch = self._repo.narrowmatch()
2200 for f in self._cache.keys():
2200 for f in self._cache.keys():
2201 if not narrowmatch(f):
2201 if not narrowmatch(f):
2202 continue
2202 continue
2203 copies.pop(f, None) # delete if it exists
2203 copies.pop(f, None) # delete if it exists
2204 source = self._cache[f][b'copied']
2204 source = self._cache[f][b'copied']
2205 if source:
2205 if source:
2206 copies[f] = source
2206 copies[f] = source
2207 return copies
2207 return copies
2208
2208
2209 def isinmemory(self):
2209 def isinmemory(self):
2210 return True
2210 return True
2211
2211
2212 def filedate(self, path):
2212 def filedate(self, path):
2213 if self.isdirty(path):
2213 if self.isdirty(path):
2214 return self._cache[path][b'date']
2214 return self._cache[path][b'date']
2215 else:
2215 else:
2216 return self._wrappedctx[path].date()
2216 return self._wrappedctx[path].date()
2217
2217
2218 def markcopied(self, path, origin):
2218 def markcopied(self, path, origin):
2219 self._markdirty(
2219 self._markdirty(
2220 path,
2220 path,
2221 exists=True,
2221 exists=True,
2222 date=self.filedate(path),
2222 date=self.filedate(path),
2223 flags=self.flags(path),
2223 flags=self.flags(path),
2224 copied=origin,
2224 copied=origin,
2225 )
2225 )
2226
2226
2227 def copydata(self, path):
2227 def copydata(self, path):
2228 if self.isdirty(path):
2228 if self.isdirty(path):
2229 return self._cache[path][b'copied']
2229 return self._cache[path][b'copied']
2230 else:
2230 else:
2231 return None
2231 return None
2232
2232
2233 def flags(self, path):
2233 def flags(self, path):
2234 if self.isdirty(path):
2234 if self.isdirty(path):
2235 if self._cache[path][b'exists']:
2235 if self._cache[path][b'exists']:
2236 return self._cache[path][b'flags']
2236 return self._cache[path][b'flags']
2237 else:
2237 else:
2238 raise error.ProgrammingError(
2238 raise error.ProgrammingError(
2239 b"No such file or directory: %s" % self._path
2239 b"No such file or directory: %s" % self._path
2240 )
2240 )
2241 else:
2241 else:
2242 return self._wrappedctx[path].flags()
2242 return self._wrappedctx[path].flags()
2243
2243
2244 def __contains__(self, key):
2244 def __contains__(self, key):
2245 if key in self._cache:
2245 if key in self._cache:
2246 return self._cache[key][b'exists']
2246 return self._cache[key][b'exists']
2247 return key in self.p1()
2247 return key in self.p1()
2248
2248
2249 def _existsinparent(self, path):
2249 def _existsinparent(self, path):
2250 try:
2250 try:
2251 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2251 # ``commitctx` raises a ``ManifestLookupError`` if a path does not
2252 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2252 # exist, unlike ``workingctx``, which returns a ``workingfilectx``
2253 # with an ``exists()`` function.
2253 # with an ``exists()`` function.
2254 self._wrappedctx[path]
2254 self._wrappedctx[path]
2255 return True
2255 return True
2256 except error.ManifestLookupError:
2256 except error.ManifestLookupError:
2257 return False
2257 return False
2258
2258
2259 def _auditconflicts(self, path):
2259 def _auditconflicts(self, path):
2260 """Replicates conflict checks done by wvfs.write().
2260 """Replicates conflict checks done by wvfs.write().
2261
2261
2262 Since we never write to the filesystem and never call `applyupdates` in
2262 Since we never write to the filesystem and never call `applyupdates` in
2263 IMM, we'll never check that a path is actually writable -- e.g., because
2263 IMM, we'll never check that a path is actually writable -- e.g., because
2264 it adds `a/foo`, but `a` is actually a file in the other commit.
2264 it adds `a/foo`, but `a` is actually a file in the other commit.
2265 """
2265 """
2266
2266
2267 def fail(path, component):
2267 def fail(path, component):
2268 # p1() is the base and we're receiving "writes" for p2()'s
2268 # p1() is the base and we're receiving "writes" for p2()'s
2269 # files.
2269 # files.
2270 if b'l' in self.p1()[component].flags():
2270 if b'l' in self.p1()[component].flags():
2271 raise error.Abort(
2271 raise error.Abort(
2272 b"error: %s conflicts with symlink %s "
2272 b"error: %s conflicts with symlink %s "
2273 b"in %d." % (path, component, self.p1().rev())
2273 b"in %d." % (path, component, self.p1().rev())
2274 )
2274 )
2275 else:
2275 else:
2276 raise error.Abort(
2276 raise error.Abort(
2277 b"error: '%s' conflicts with file '%s' in "
2277 b"error: '%s' conflicts with file '%s' in "
2278 b"%d." % (path, component, self.p1().rev())
2278 b"%d." % (path, component, self.p1().rev())
2279 )
2279 )
2280
2280
2281 # Test that each new directory to be created to write this path from p2
2281 # Test that each new directory to be created to write this path from p2
2282 # is not a file in p1.
2282 # is not a file in p1.
2283 components = path.split(b'/')
2283 components = path.split(b'/')
2284 for i in pycompat.xrange(len(components)):
2284 for i in pycompat.xrange(len(components)):
2285 component = b"/".join(components[0:i])
2285 component = b"/".join(components[0:i])
2286 if component in self:
2286 if component in self:
2287 fail(path, component)
2287 fail(path, component)
2288
2288
2289 # Test the other direction -- that this path from p2 isn't a directory
2289 # Test the other direction -- that this path from p2 isn't a directory
2290 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2290 # in p1 (test that p1 doesn't have any paths matching `path/*`).
2291 match = self.match([path], default=b'path')
2291 match = self.match([path], default=b'path')
2292 matches = self.p1().manifest().matches(match)
2292 matches = self.p1().manifest().matches(match)
2293 mfiles = matches.keys()
2293 mfiles = matches.keys()
2294 if len(mfiles) > 0:
2294 if len(mfiles) > 0:
2295 if len(mfiles) == 1 and mfiles[0] == path:
2295 if len(mfiles) == 1 and mfiles[0] == path:
2296 return
2296 return
2297 # omit the files which are deleted in current IMM wctx
2297 # omit the files which are deleted in current IMM wctx
2298 mfiles = [m for m in mfiles if m in self]
2298 mfiles = [m for m in mfiles if m in self]
2299 if not mfiles:
2299 if not mfiles:
2300 return
2300 return
2301 raise error.Abort(
2301 raise error.Abort(
2302 b"error: file '%s' cannot be written because "
2302 b"error: file '%s' cannot be written because "
2303 b" '%s/' is a directory in %s (containing %d "
2303 b" '%s/' is a directory in %s (containing %d "
2304 b"entries: %s)"
2304 b"entries: %s)"
2305 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2305 % (path, path, self.p1(), len(mfiles), b', '.join(mfiles))
2306 )
2306 )
2307
2307
2308 def write(self, path, data, flags=b'', **kwargs):
2308 def write(self, path, data, flags=b'', **kwargs):
2309 if data is None:
2309 if data is None:
2310 raise error.ProgrammingError(b"data must be non-None")
2310 raise error.ProgrammingError(b"data must be non-None")
2311 self._auditconflicts(path)
2311 self._auditconflicts(path)
2312 self._markdirty(
2312 self._markdirty(
2313 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2313 path, exists=True, data=data, date=dateutil.makedate(), flags=flags
2314 )
2314 )
2315
2315
2316 def setflags(self, path, l, x):
2316 def setflags(self, path, l, x):
2317 flag = b''
2317 flag = b''
2318 if l:
2318 if l:
2319 flag = b'l'
2319 flag = b'l'
2320 elif x:
2320 elif x:
2321 flag = b'x'
2321 flag = b'x'
2322 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2322 self._markdirty(path, exists=True, date=dateutil.makedate(), flags=flag)
2323
2323
2324 def remove(self, path):
2324 def remove(self, path):
2325 self._markdirty(path, exists=False)
2325 self._markdirty(path, exists=False)
2326
2326
2327 def exists(self, path):
2327 def exists(self, path):
2328 """exists behaves like `lexists`, but needs to follow symlinks and
2328 """exists behaves like `lexists`, but needs to follow symlinks and
2329 return False if they are broken.
2329 return False if they are broken.
2330 """
2330 """
2331 if self.isdirty(path):
2331 if self.isdirty(path):
2332 # If this path exists and is a symlink, "follow" it by calling
2332 # If this path exists and is a symlink, "follow" it by calling
2333 # exists on the destination path.
2333 # exists on the destination path.
2334 if (
2334 if (
2335 self._cache[path][b'exists']
2335 self._cache[path][b'exists']
2336 and b'l' in self._cache[path][b'flags']
2336 and b'l' in self._cache[path][b'flags']
2337 ):
2337 ):
2338 return self.exists(self._cache[path][b'data'].strip())
2338 return self.exists(self._cache[path][b'data'].strip())
2339 else:
2339 else:
2340 return self._cache[path][b'exists']
2340 return self._cache[path][b'exists']
2341
2341
2342 return self._existsinparent(path)
2342 return self._existsinparent(path)
2343
2343
2344 def lexists(self, path):
2344 def lexists(self, path):
2345 """lexists returns True if the path exists"""
2345 """lexists returns True if the path exists"""
2346 if self.isdirty(path):
2346 if self.isdirty(path):
2347 return self._cache[path][b'exists']
2347 return self._cache[path][b'exists']
2348
2348
2349 return self._existsinparent(path)
2349 return self._existsinparent(path)
2350
2350
2351 def size(self, path):
2351 def size(self, path):
2352 if self.isdirty(path):
2352 if self.isdirty(path):
2353 if self._cache[path][b'exists']:
2353 if self._cache[path][b'exists']:
2354 return len(self._cache[path][b'data'])
2354 return len(self._cache[path][b'data'])
2355 else:
2355 else:
2356 raise error.ProgrammingError(
2356 raise error.ProgrammingError(
2357 b"No such file or directory: %s" % self._path
2357 b"No such file or directory: %s" % self._path
2358 )
2358 )
2359 return self._wrappedctx[path].size()
2359 return self._wrappedctx[path].size()
2360
2360
2361 def tomemctx(
2361 def tomemctx(
2362 self,
2362 self,
2363 text,
2363 text,
2364 branch=None,
2364 branch=None,
2365 extra=None,
2365 extra=None,
2366 date=None,
2366 date=None,
2367 parents=None,
2367 parents=None,
2368 user=None,
2368 user=None,
2369 editor=None,
2369 editor=None,
2370 ):
2370 ):
2371 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2371 """Converts this ``overlayworkingctx`` into a ``memctx`` ready to be
2372 committed.
2372 committed.
2373
2373
2374 ``text`` is the commit message.
2374 ``text`` is the commit message.
2375 ``parents`` (optional) are rev numbers.
2375 ``parents`` (optional) are rev numbers.
2376 """
2376 """
2377 # Default parents to the wrapped contexts' if not passed.
2377 # Default parents to the wrapped contexts' if not passed.
2378 if parents is None:
2378 if parents is None:
2379 parents = self._wrappedctx.parents()
2379 parents = self._wrappedctx.parents()
2380 if len(parents) == 1:
2380 if len(parents) == 1:
2381 parents = (parents[0], None)
2381 parents = (parents[0], None)
2382
2382
2383 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2383 # ``parents`` is passed as rev numbers; convert to ``commitctxs``.
2384 if parents[1] is None:
2384 if parents[1] is None:
2385 parents = (self._repo[parents[0]], None)
2385 parents = (self._repo[parents[0]], None)
2386 else:
2386 else:
2387 parents = (self._repo[parents[0]], self._repo[parents[1]])
2387 parents = (self._repo[parents[0]], self._repo[parents[1]])
2388
2388
2389 files = self.files()
2389 files = self.files()
2390
2390
2391 def getfile(repo, memctx, path):
2391 def getfile(repo, memctx, path):
2392 if self._cache[path][b'exists']:
2392 if self._cache[path][b'exists']:
2393 return memfilectx(
2393 return memfilectx(
2394 repo,
2394 repo,
2395 memctx,
2395 memctx,
2396 path,
2396 path,
2397 self._cache[path][b'data'],
2397 self._cache[path][b'data'],
2398 b'l' in self._cache[path][b'flags'],
2398 b'l' in self._cache[path][b'flags'],
2399 b'x' in self._cache[path][b'flags'],
2399 b'x' in self._cache[path][b'flags'],
2400 self._cache[path][b'copied'],
2400 self._cache[path][b'copied'],
2401 )
2401 )
2402 else:
2402 else:
2403 # Returning None, but including the path in `files`, is
2403 # Returning None, but including the path in `files`, is
2404 # necessary for memctx to register a deletion.
2404 # necessary for memctx to register a deletion.
2405 return None
2405 return None
2406
2406
2407 return memctx(
2407 return memctx(
2408 self._repo,
2408 self._repo,
2409 parents,
2409 parents,
2410 text,
2410 text,
2411 files,
2411 files,
2412 getfile,
2412 getfile,
2413 date=date,
2413 date=date,
2414 extra=extra,
2414 extra=extra,
2415 user=user,
2415 user=user,
2416 branch=branch,
2416 branch=branch,
2417 editor=editor,
2417 editor=editor,
2418 )
2418 )
2419
2419
2420 def isdirty(self, path):
2420 def isdirty(self, path):
2421 return path in self._cache
2421 return path in self._cache
2422
2422
2423 def isempty(self):
2423 def isempty(self):
2424 # We need to discard any keys that are actually clean before the empty
2424 # We need to discard any keys that are actually clean before the empty
2425 # commit check.
2425 # commit check.
2426 self._compact()
2426 self._compact()
2427 return len(self._cache) == 0
2427 return len(self._cache) == 0
2428
2428
2429 def clean(self):
2429 def clean(self):
2430 self._cache = {}
2430 self._cache = {}
2431
2431
2432 def _compact(self):
2432 def _compact(self):
2433 """Removes keys from the cache that are actually clean, by comparing
2433 """Removes keys from the cache that are actually clean, by comparing
2434 them with the underlying context.
2434 them with the underlying context.
2435
2435
2436 This can occur during the merge process, e.g. by passing --tool :local
2436 This can occur during the merge process, e.g. by passing --tool :local
2437 to resolve a conflict.
2437 to resolve a conflict.
2438 """
2438 """
2439 keys = []
2439 keys = []
2440 # This won't be perfect, but can help performance significantly when
2440 # This won't be perfect, but can help performance significantly when
2441 # using things like remotefilelog.
2441 # using things like remotefilelog.
2442 scmutil.prefetchfiles(
2442 scmutil.prefetchfiles(
2443 self.repo(),
2443 self.repo(),
2444 [self.p1().rev()],
2444 [self.p1().rev()],
2445 scmutil.matchfiles(self.repo(), self._cache.keys()),
2445 scmutil.matchfiles(self.repo(), self._cache.keys()),
2446 )
2446 )
2447
2447
2448 for path in self._cache.keys():
2448 for path in self._cache.keys():
2449 cache = self._cache[path]
2449 cache = self._cache[path]
2450 try:
2450 try:
2451 underlying = self._wrappedctx[path]
2451 underlying = self._wrappedctx[path]
2452 if (
2452 if (
2453 underlying.data() == cache[b'data']
2453 underlying.data() == cache[b'data']
2454 and underlying.flags() == cache[b'flags']
2454 and underlying.flags() == cache[b'flags']
2455 ):
2455 ):
2456 keys.append(path)
2456 keys.append(path)
2457 except error.ManifestLookupError:
2457 except error.ManifestLookupError:
2458 # Path not in the underlying manifest (created).
2458 # Path not in the underlying manifest (created).
2459 continue
2459 continue
2460
2460
2461 for path in keys:
2461 for path in keys:
2462 del self._cache[path]
2462 del self._cache[path]
2463 return keys
2463 return keys
2464
2464
2465 def _markdirty(
2465 def _markdirty(
2466 self, path, exists, data=None, date=None, flags=b'', copied=None
2466 self, path, exists, data=None, date=None, flags=b'', copied=None
2467 ):
2467 ):
2468 # data not provided, let's see if we already have some; if not, let's
2468 # data not provided, let's see if we already have some; if not, let's
2469 # grab it from our underlying context, so that we always have data if
2469 # grab it from our underlying context, so that we always have data if
2470 # the file is marked as existing.
2470 # the file is marked as existing.
2471 if exists and data is None:
2471 if exists and data is None:
2472 oldentry = self._cache.get(path) or {}
2472 oldentry = self._cache.get(path) or {}
2473 data = oldentry.get(b'data')
2473 data = oldentry.get(b'data')
2474 if data is None:
2474 if data is None:
2475 data = self._wrappedctx[path].data()
2475 data = self._wrappedctx[path].data()
2476
2476
2477 self._cache[path] = {
2477 self._cache[path] = {
2478 b'exists': exists,
2478 b'exists': exists,
2479 b'data': data,
2479 b'data': data,
2480 b'date': date,
2480 b'date': date,
2481 b'flags': flags,
2481 b'flags': flags,
2482 b'copied': copied,
2482 b'copied': copied,
2483 }
2483 }
2484
2484
2485 def filectx(self, path, filelog=None):
2485 def filectx(self, path, filelog=None):
2486 return overlayworkingfilectx(
2486 return overlayworkingfilectx(
2487 self._repo, path, parent=self, filelog=filelog
2487 self._repo, path, parent=self, filelog=filelog
2488 )
2488 )
2489
2489
2490
2490
2491 class overlayworkingfilectx(committablefilectx):
2491 class overlayworkingfilectx(committablefilectx):
2492 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2492 """Wrap a ``workingfilectx`` but intercepts all writes into an in-memory
2493 cache, which can be flushed through later by calling ``flush()``."""
2493 cache, which can be flushed through later by calling ``flush()``."""
2494
2494
2495 def __init__(self, repo, path, filelog=None, parent=None):
2495 def __init__(self, repo, path, filelog=None, parent=None):
2496 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2496 super(overlayworkingfilectx, self).__init__(repo, path, filelog, parent)
2497 self._repo = repo
2497 self._repo = repo
2498 self._parent = parent
2498 self._parent = parent
2499 self._path = path
2499 self._path = path
2500
2500
2501 def cmp(self, fctx):
2501 def cmp(self, fctx):
2502 return self.data() != fctx.data()
2502 return self.data() != fctx.data()
2503
2503
2504 def changectx(self):
2504 def changectx(self):
2505 return self._parent
2505 return self._parent
2506
2506
2507 def data(self):
2507 def data(self):
2508 return self._parent.data(self._path)
2508 return self._parent.data(self._path)
2509
2509
2510 def date(self):
2510 def date(self):
2511 return self._parent.filedate(self._path)
2511 return self._parent.filedate(self._path)
2512
2512
2513 def exists(self):
2513 def exists(self):
2514 return self.lexists()
2514 return self.lexists()
2515
2515
2516 def lexists(self):
2516 def lexists(self):
2517 return self._parent.exists(self._path)
2517 return self._parent.exists(self._path)
2518
2518
2519 def copysource(self):
2519 def copysource(self):
2520 return self._parent.copydata(self._path)
2520 return self._parent.copydata(self._path)
2521
2521
2522 def size(self):
2522 def size(self):
2523 return self._parent.size(self._path)
2523 return self._parent.size(self._path)
2524
2524
2525 def markcopied(self, origin):
2525 def markcopied(self, origin):
2526 self._parent.markcopied(self._path, origin)
2526 self._parent.markcopied(self._path, origin)
2527
2527
2528 def audit(self):
2528 def audit(self):
2529 pass
2529 pass
2530
2530
2531 def flags(self):
2531 def flags(self):
2532 return self._parent.flags(self._path)
2532 return self._parent.flags(self._path)
2533
2533
2534 def setflags(self, islink, isexec):
2534 def setflags(self, islink, isexec):
2535 return self._parent.setflags(self._path, islink, isexec)
2535 return self._parent.setflags(self._path, islink, isexec)
2536
2536
2537 def write(self, data, flags, backgroundclose=False, **kwargs):
2537 def write(self, data, flags, backgroundclose=False, **kwargs):
2538 return self._parent.write(self._path, data, flags, **kwargs)
2538 return self._parent.write(self._path, data, flags, **kwargs)
2539
2539
2540 def remove(self, ignoremissing=False):
2540 def remove(self, ignoremissing=False):
2541 return self._parent.remove(self._path)
2541 return self._parent.remove(self._path)
2542
2542
2543 def clearunknown(self):
2543 def clearunknown(self):
2544 pass
2544 pass
2545
2545
2546
2546
2547 class workingcommitctx(workingctx):
2547 class workingcommitctx(workingctx):
2548 """A workingcommitctx object makes access to data related to
2548 """A workingcommitctx object makes access to data related to
2549 the revision being committed convenient.
2549 the revision being committed convenient.
2550
2550
2551 This hides changes in the working directory, if they aren't
2551 This hides changes in the working directory, if they aren't
2552 committed in this context.
2552 committed in this context.
2553 """
2553 """
2554
2554
2555 def __init__(
2555 def __init__(
2556 self, repo, changes, text=b"", user=None, date=None, extra=None
2556 self, repo, changes, text=b"", user=None, date=None, extra=None
2557 ):
2557 ):
2558 super(workingcommitctx, self).__init__(
2558 super(workingcommitctx, self).__init__(
2559 repo, text, user, date, extra, changes
2559 repo, text, user, date, extra, changes
2560 )
2560 )
2561
2561
2562 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2562 def _dirstatestatus(self, match, ignored=False, clean=False, unknown=False):
2563 """Return matched files only in ``self._status``
2563 """Return matched files only in ``self._status``
2564
2564
2565 Uncommitted files appear "clean" via this context, even if
2565 Uncommitted files appear "clean" via this context, even if
2566 they aren't actually so in the working directory.
2566 they aren't actually so in the working directory.
2567 """
2567 """
2568 if clean:
2568 if clean:
2569 clean = [f for f in self._manifest if f not in self._changedset]
2569 clean = [f for f in self._manifest if f not in self._changedset]
2570 else:
2570 else:
2571 clean = []
2571 clean = []
2572 return scmutil.status(
2572 return scmutil.status(
2573 [f for f in self._status.modified if match(f)],
2573 [f for f in self._status.modified if match(f)],
2574 [f for f in self._status.added if match(f)],
2574 [f for f in self._status.added if match(f)],
2575 [f for f in self._status.removed if match(f)],
2575 [f for f in self._status.removed if match(f)],
2576 [],
2576 [],
2577 [],
2577 [],
2578 [],
2578 [],
2579 clean,
2579 clean,
2580 )
2580 )
2581
2581
2582 @propertycache
2582 @propertycache
2583 def _changedset(self):
2583 def _changedset(self):
2584 """Return the set of files changed in this context
2584 """Return the set of files changed in this context
2585 """
2585 """
2586 changed = set(self._status.modified)
2586 changed = set(self._status.modified)
2587 changed.update(self._status.added)
2587 changed.update(self._status.added)
2588 changed.update(self._status.removed)
2588 changed.update(self._status.removed)
2589 return changed
2589 return changed
2590
2590
2591
2591
2592 def makecachingfilectxfn(func):
2592 def makecachingfilectxfn(func):
2593 """Create a filectxfn that caches based on the path.
2593 """Create a filectxfn that caches based on the path.
2594
2594
2595 We can't use util.cachefunc because it uses all arguments as the cache
2595 We can't use util.cachefunc because it uses all arguments as the cache
2596 key and this creates a cycle since the arguments include the repo and
2596 key and this creates a cycle since the arguments include the repo and
2597 memctx.
2597 memctx.
2598 """
2598 """
2599 cache = {}
2599 cache = {}
2600
2600
2601 def getfilectx(repo, memctx, path):
2601 def getfilectx(repo, memctx, path):
2602 if path not in cache:
2602 if path not in cache:
2603 cache[path] = func(repo, memctx, path)
2603 cache[path] = func(repo, memctx, path)
2604 return cache[path]
2604 return cache[path]
2605
2605
2606 return getfilectx
2606 return getfilectx
2607
2607
2608
2608
2609 def memfilefromctx(ctx):
2609 def memfilefromctx(ctx):
2610 """Given a context return a memfilectx for ctx[path]
2610 """Given a context return a memfilectx for ctx[path]
2611
2611
2612 This is a convenience method for building a memctx based on another
2612 This is a convenience method for building a memctx based on another
2613 context.
2613 context.
2614 """
2614 """
2615
2615
2616 def getfilectx(repo, memctx, path):
2616 def getfilectx(repo, memctx, path):
2617 fctx = ctx[path]
2617 fctx = ctx[path]
2618 copysource = fctx.copysource()
2618 copysource = fctx.copysource()
2619 return memfilectx(
2619 return memfilectx(
2620 repo,
2620 repo,
2621 memctx,
2621 memctx,
2622 path,
2622 path,
2623 fctx.data(),
2623 fctx.data(),
2624 islink=fctx.islink(),
2624 islink=fctx.islink(),
2625 isexec=fctx.isexec(),
2625 isexec=fctx.isexec(),
2626 copysource=copysource,
2626 copysource=copysource,
2627 )
2627 )
2628
2628
2629 return getfilectx
2629 return getfilectx
2630
2630
2631
2631
2632 def memfilefrompatch(patchstore):
2632 def memfilefrompatch(patchstore):
2633 """Given a patch (e.g. patchstore object) return a memfilectx
2633 """Given a patch (e.g. patchstore object) return a memfilectx
2634
2634
2635 This is a convenience method for building a memctx based on a patchstore.
2635 This is a convenience method for building a memctx based on a patchstore.
2636 """
2636 """
2637
2637
2638 def getfilectx(repo, memctx, path):
2638 def getfilectx(repo, memctx, path):
2639 data, mode, copysource = patchstore.getfile(path)
2639 data, mode, copysource = patchstore.getfile(path)
2640 if data is None:
2640 if data is None:
2641 return None
2641 return None
2642 islink, isexec = mode
2642 islink, isexec = mode
2643 return memfilectx(
2643 return memfilectx(
2644 repo,
2644 repo,
2645 memctx,
2645 memctx,
2646 path,
2646 path,
2647 data,
2647 data,
2648 islink=islink,
2648 islink=islink,
2649 isexec=isexec,
2649 isexec=isexec,
2650 copysource=copysource,
2650 copysource=copysource,
2651 )
2651 )
2652
2652
2653 return getfilectx
2653 return getfilectx
2654
2654
2655
2655
2656 class memctx(committablectx):
2656 class memctx(committablectx):
2657 """Use memctx to perform in-memory commits via localrepo.commitctx().
2657 """Use memctx to perform in-memory commits via localrepo.commitctx().
2658
2658
2659 Revision information is supplied at initialization time while
2659 Revision information is supplied at initialization time while
2660 related files data and is made available through a callback
2660 related files data and is made available through a callback
2661 mechanism. 'repo' is the current localrepo, 'parents' is a
2661 mechanism. 'repo' is the current localrepo, 'parents' is a
2662 sequence of two parent revisions identifiers (pass None for every
2662 sequence of two parent revisions identifiers (pass None for every
2663 missing parent), 'text' is the commit message and 'files' lists
2663 missing parent), 'text' is the commit message and 'files' lists
2664 names of files touched by the revision (normalized and relative to
2664 names of files touched by the revision (normalized and relative to
2665 repository root).
2665 repository root).
2666
2666
2667 filectxfn(repo, memctx, path) is a callable receiving the
2667 filectxfn(repo, memctx, path) is a callable receiving the
2668 repository, the current memctx object and the normalized path of
2668 repository, the current memctx object and the normalized path of
2669 requested file, relative to repository root. It is fired by the
2669 requested file, relative to repository root. It is fired by the
2670 commit function for every file in 'files', but calls order is
2670 commit function for every file in 'files', but calls order is
2671 undefined. If the file is available in the revision being
2671 undefined. If the file is available in the revision being
2672 committed (updated or added), filectxfn returns a memfilectx
2672 committed (updated or added), filectxfn returns a memfilectx
2673 object. If the file was removed, filectxfn return None for recent
2673 object. If the file was removed, filectxfn return None for recent
2674 Mercurial. Moved files are represented by marking the source file
2674 Mercurial. Moved files are represented by marking the source file
2675 removed and the new file added with copy information (see
2675 removed and the new file added with copy information (see
2676 memfilectx).
2676 memfilectx).
2677
2677
2678 user receives the committer name and defaults to current
2678 user receives the committer name and defaults to current
2679 repository username, date is the commit date in any format
2679 repository username, date is the commit date in any format
2680 supported by dateutil.parsedate() and defaults to current date, extra
2680 supported by dateutil.parsedate() and defaults to current date, extra
2681 is a dictionary of metadata or is left empty.
2681 is a dictionary of metadata or is left empty.
2682 """
2682 """
2683
2683
2684 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2684 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2685 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2685 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2686 # this field to determine what to do in filectxfn.
2686 # this field to determine what to do in filectxfn.
2687 _returnnoneformissingfiles = True
2687 _returnnoneformissingfiles = True
2688
2688
2689 def __init__(
2689 def __init__(
2690 self,
2690 self,
2691 repo,
2691 repo,
2692 parents,
2692 parents,
2693 text,
2693 text,
2694 files,
2694 files,
2695 filectxfn,
2695 filectxfn,
2696 user=None,
2696 user=None,
2697 date=None,
2697 date=None,
2698 extra=None,
2698 extra=None,
2699 branch=None,
2699 branch=None,
2700 editor=False,
2700 editor=False,
2701 ):
2701 ):
2702 super(memctx, self).__init__(
2702 super(memctx, self).__init__(
2703 repo, text, user, date, extra, branch=branch
2703 repo, text, user, date, extra, branch=branch
2704 )
2704 )
2705 self._rev = None
2705 self._rev = None
2706 self._node = None
2706 self._node = None
2707 parents = [(p or nullid) for p in parents]
2707 parents = [(p or nullid) for p in parents]
2708 p1, p2 = parents
2708 p1, p2 = parents
2709 self._parents = [self._repo[p] for p in (p1, p2)]
2709 self._parents = [self._repo[p] for p in (p1, p2)]
2710 files = sorted(set(files))
2710 files = sorted(set(files))
2711 self._files = files
2711 self._files = files
2712 self.substate = {}
2712 self.substate = {}
2713
2713
2714 if isinstance(filectxfn, patch.filestore):
2714 if isinstance(filectxfn, patch.filestore):
2715 filectxfn = memfilefrompatch(filectxfn)
2715 filectxfn = memfilefrompatch(filectxfn)
2716 elif not callable(filectxfn):
2716 elif not callable(filectxfn):
2717 # if store is not callable, wrap it in a function
2717 # if store is not callable, wrap it in a function
2718 filectxfn = memfilefromctx(filectxfn)
2718 filectxfn = memfilefromctx(filectxfn)
2719
2719
2720 # memoizing increases performance for e.g. vcs convert scenarios.
2720 # memoizing increases performance for e.g. vcs convert scenarios.
2721 self._filectxfn = makecachingfilectxfn(filectxfn)
2721 self._filectxfn = makecachingfilectxfn(filectxfn)
2722
2722
2723 if editor:
2723 if editor:
2724 self._text = editor(self._repo, self, [])
2724 self._text = editor(self._repo, self, [])
2725 self._repo.savecommitmessage(self._text)
2725 self._repo.savecommitmessage(self._text)
2726
2726
2727 def filectx(self, path, filelog=None):
2727 def filectx(self, path, filelog=None):
2728 """get a file context from the working directory
2728 """get a file context from the working directory
2729
2729
2730 Returns None if file doesn't exist and should be removed."""
2730 Returns None if file doesn't exist and should be removed."""
2731 return self._filectxfn(self._repo, self, path)
2731 return self._filectxfn(self._repo, self, path)
2732
2732
2733 def commit(self):
2733 def commit(self):
2734 """commit context to the repo"""
2734 """commit context to the repo"""
2735 return self._repo.commitctx(self)
2735 return self._repo.commitctx(self)
2736
2736
2737 @propertycache
2737 @propertycache
2738 def _manifest(self):
2738 def _manifest(self):
2739 """generate a manifest based on the return values of filectxfn"""
2739 """generate a manifest based on the return values of filectxfn"""
2740
2740
2741 # keep this simple for now; just worry about p1
2741 # keep this simple for now; just worry about p1
2742 pctx = self._parents[0]
2742 pctx = self._parents[0]
2743 man = pctx.manifest().copy()
2743 man = pctx.manifest().copy()
2744
2744
2745 for f in self._status.modified:
2745 for f in self._status.modified:
2746 man[f] = modifiednodeid
2746 man[f] = modifiednodeid
2747
2747
2748 for f in self._status.added:
2748 for f in self._status.added:
2749 man[f] = addednodeid
2749 man[f] = addednodeid
2750
2750
2751 for f in self._status.removed:
2751 for f in self._status.removed:
2752 if f in man:
2752 if f in man:
2753 del man[f]
2753 del man[f]
2754
2754
2755 return man
2755 return man
2756
2756
2757 @propertycache
2757 @propertycache
2758 def _status(self):
2758 def _status(self):
2759 """Calculate exact status from ``files`` specified at construction
2759 """Calculate exact status from ``files`` specified at construction
2760 """
2760 """
2761 man1 = self.p1().manifest()
2761 man1 = self.p1().manifest()
2762 p2 = self._parents[1]
2762 p2 = self._parents[1]
2763 # "1 < len(self._parents)" can't be used for checking
2763 # "1 < len(self._parents)" can't be used for checking
2764 # existence of the 2nd parent, because "memctx._parents" is
2764 # existence of the 2nd parent, because "memctx._parents" is
2765 # explicitly initialized by the list, of which length is 2.
2765 # explicitly initialized by the list, of which length is 2.
2766 if p2.node() != nullid:
2766 if p2.node() != nullid:
2767 man2 = p2.manifest()
2767 man2 = p2.manifest()
2768 managing = lambda f: f in man1 or f in man2
2768 managing = lambda f: f in man1 or f in man2
2769 else:
2769 else:
2770 managing = lambda f: f in man1
2770 managing = lambda f: f in man1
2771
2771
2772 modified, added, removed = [], [], []
2772 modified, added, removed = [], [], []
2773 for f in self._files:
2773 for f in self._files:
2774 if not managing(f):
2774 if not managing(f):
2775 added.append(f)
2775 added.append(f)
2776 elif self[f]:
2776 elif self[f]:
2777 modified.append(f)
2777 modified.append(f)
2778 else:
2778 else:
2779 removed.append(f)
2779 removed.append(f)
2780
2780
2781 return scmutil.status(modified, added, removed, [], [], [], [])
2781 return scmutil.status(modified, added, removed, [], [], [], [])
2782
2782
2783
2783
2784 class memfilectx(committablefilectx):
2784 class memfilectx(committablefilectx):
2785 """memfilectx represents an in-memory file to commit.
2785 """memfilectx represents an in-memory file to commit.
2786
2786
2787 See memctx and committablefilectx for more details.
2787 See memctx and committablefilectx for more details.
2788 """
2788 """
2789
2789
2790 def __init__(
2790 def __init__(
2791 self,
2791 self,
2792 repo,
2792 repo,
2793 changectx,
2793 changectx,
2794 path,
2794 path,
2795 data,
2795 data,
2796 islink=False,
2796 islink=False,
2797 isexec=False,
2797 isexec=False,
2798 copysource=None,
2798 copysource=None,
2799 ):
2799 ):
2800 """
2800 """
2801 path is the normalized file path relative to repository root.
2801 path is the normalized file path relative to repository root.
2802 data is the file content as a string.
2802 data is the file content as a string.
2803 islink is True if the file is a symbolic link.
2803 islink is True if the file is a symbolic link.
2804 isexec is True if the file is executable.
2804 isexec is True if the file is executable.
2805 copied is the source file path if current file was copied in the
2805 copied is the source file path if current file was copied in the
2806 revision being committed, or None."""
2806 revision being committed, or None."""
2807 super(memfilectx, self).__init__(repo, path, None, changectx)
2807 super(memfilectx, self).__init__(repo, path, None, changectx)
2808 self._data = data
2808 self._data = data
2809 if islink:
2809 if islink:
2810 self._flags = b'l'
2810 self._flags = b'l'
2811 elif isexec:
2811 elif isexec:
2812 self._flags = b'x'
2812 self._flags = b'x'
2813 else:
2813 else:
2814 self._flags = b''
2814 self._flags = b''
2815 self._copysource = copysource
2815 self._copysource = copysource
2816
2816
2817 def copysource(self):
2817 def copysource(self):
2818 return self._copysource
2818 return self._copysource
2819
2819
2820 def cmp(self, fctx):
2820 def cmp(self, fctx):
2821 return self.data() != fctx.data()
2821 return self.data() != fctx.data()
2822
2822
2823 def data(self):
2823 def data(self):
2824 return self._data
2824 return self._data
2825
2825
2826 def remove(self, ignoremissing=False):
2826 def remove(self, ignoremissing=False):
2827 """wraps unlink for a repo's working directory"""
2827 """wraps unlink for a repo's working directory"""
2828 # need to figure out what to do here
2828 # need to figure out what to do here
2829 del self._changectx[self._path]
2829 del self._changectx[self._path]
2830
2830
2831 def write(self, data, flags, **kwargs):
2831 def write(self, data, flags, **kwargs):
2832 """wraps repo.wwrite"""
2832 """wraps repo.wwrite"""
2833 self._data = data
2833 self._data = data
2834
2834
2835
2835
2836 class metadataonlyctx(committablectx):
2836 class metadataonlyctx(committablectx):
2837 """Like memctx but it's reusing the manifest of different commit.
2837 """Like memctx but it's reusing the manifest of different commit.
2838 Intended to be used by lightweight operations that are creating
2838 Intended to be used by lightweight operations that are creating
2839 metadata-only changes.
2839 metadata-only changes.
2840
2840
2841 Revision information is supplied at initialization time. 'repo' is the
2841 Revision information is supplied at initialization time. 'repo' is the
2842 current localrepo, 'ctx' is original revision which manifest we're reuisng
2842 current localrepo, 'ctx' is original revision which manifest we're reuisng
2843 'parents' is a sequence of two parent revisions identifiers (pass None for
2843 'parents' is a sequence of two parent revisions identifiers (pass None for
2844 every missing parent), 'text' is the commit.
2844 every missing parent), 'text' is the commit.
2845
2845
2846 user receives the committer name and defaults to current repository
2846 user receives the committer name and defaults to current repository
2847 username, date is the commit date in any format supported by
2847 username, date is the commit date in any format supported by
2848 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2848 dateutil.parsedate() and defaults to current date, extra is a dictionary of
2849 metadata or is left empty.
2849 metadata or is left empty.
2850 """
2850 """
2851
2851
2852 def __init__(
2852 def __init__(
2853 self,
2853 self,
2854 repo,
2854 repo,
2855 originalctx,
2855 originalctx,
2856 parents=None,
2856 parents=None,
2857 text=None,
2857 text=None,
2858 user=None,
2858 user=None,
2859 date=None,
2859 date=None,
2860 extra=None,
2860 extra=None,
2861 editor=False,
2861 editor=False,
2862 ):
2862 ):
2863 if text is None:
2863 if text is None:
2864 text = originalctx.description()
2864 text = originalctx.description()
2865 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2865 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2866 self._rev = None
2866 self._rev = None
2867 self._node = None
2867 self._node = None
2868 self._originalctx = originalctx
2868 self._originalctx = originalctx
2869 self._manifestnode = originalctx.manifestnode()
2869 self._manifestnode = originalctx.manifestnode()
2870 if parents is None:
2870 if parents is None:
2871 parents = originalctx.parents()
2871 parents = originalctx.parents()
2872 else:
2872 else:
2873 parents = [repo[p] for p in parents if p is not None]
2873 parents = [repo[p] for p in parents if p is not None]
2874 parents = parents[:]
2874 parents = parents[:]
2875 while len(parents) < 2:
2875 while len(parents) < 2:
2876 parents.append(repo[nullid])
2876 parents.append(repo[nullid])
2877 p1, p2 = self._parents = parents
2877 p1, p2 = self._parents = parents
2878
2878
2879 # sanity check to ensure that the reused manifest parents are
2879 # sanity check to ensure that the reused manifest parents are
2880 # manifests of our commit parents
2880 # manifests of our commit parents
2881 mp1, mp2 = self.manifestctx().parents
2881 mp1, mp2 = self.manifestctx().parents
2882 if p1 != nullid and p1.manifestnode() != mp1:
2882 if p1 != nullid and p1.manifestnode() != mp1:
2883 raise RuntimeError(
2883 raise RuntimeError(
2884 r"can't reuse the manifest: its p1 "
2884 r"can't reuse the manifest: its p1 "
2885 r"doesn't match the new ctx p1"
2885 r"doesn't match the new ctx p1"
2886 )
2886 )
2887 if p2 != nullid and p2.manifestnode() != mp2:
2887 if p2 != nullid and p2.manifestnode() != mp2:
2888 raise RuntimeError(
2888 raise RuntimeError(
2889 r"can't reuse the manifest: "
2889 r"can't reuse the manifest: "
2890 r"its p2 doesn't match the new ctx p2"
2890 r"its p2 doesn't match the new ctx p2"
2891 )
2891 )
2892
2892
2893 self._files = originalctx.files()
2893 self._files = originalctx.files()
2894 self.substate = {}
2894 self.substate = {}
2895
2895
2896 if editor:
2896 if editor:
2897 self._text = editor(self._repo, self, [])
2897 self._text = editor(self._repo, self, [])
2898 self._repo.savecommitmessage(self._text)
2898 self._repo.savecommitmessage(self._text)
2899
2899
2900 def manifestnode(self):
2900 def manifestnode(self):
2901 return self._manifestnode
2901 return self._manifestnode
2902
2902
2903 @property
2903 @property
2904 def _manifestctx(self):
2904 def _manifestctx(self):
2905 return self._repo.manifestlog[self._manifestnode]
2905 return self._repo.manifestlog[self._manifestnode]
2906
2906
2907 def filectx(self, path, filelog=None):
2907 def filectx(self, path, filelog=None):
2908 return self._originalctx.filectx(path, filelog=filelog)
2908 return self._originalctx.filectx(path, filelog=filelog)
2909
2909
2910 def commit(self):
2910 def commit(self):
2911 """commit context to the repo"""
2911 """commit context to the repo"""
2912 return self._repo.commitctx(self)
2912 return self._repo.commitctx(self)
2913
2913
2914 @property
2914 @property
2915 def _manifest(self):
2915 def _manifest(self):
2916 return self._originalctx.manifest()
2916 return self._originalctx.manifest()
2917
2917
2918 @propertycache
2918 @propertycache
2919 def _status(self):
2919 def _status(self):
2920 """Calculate exact status from ``files`` specified in the ``origctx``
2920 """Calculate exact status from ``files`` specified in the ``origctx``
2921 and parents manifests.
2921 and parents manifests.
2922 """
2922 """
2923 man1 = self.p1().manifest()
2923 man1 = self.p1().manifest()
2924 p2 = self._parents[1]
2924 p2 = self._parents[1]
2925 # "1 < len(self._parents)" can't be used for checking
2925 # "1 < len(self._parents)" can't be used for checking
2926 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2926 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2927 # explicitly initialized by the list, of which length is 2.
2927 # explicitly initialized by the list, of which length is 2.
2928 if p2.node() != nullid:
2928 if p2.node() != nullid:
2929 man2 = p2.manifest()
2929 man2 = p2.manifest()
2930 managing = lambda f: f in man1 or f in man2
2930 managing = lambda f: f in man1 or f in man2
2931 else:
2931 else:
2932 managing = lambda f: f in man1
2932 managing = lambda f: f in man1
2933
2933
2934 modified, added, removed = [], [], []
2934 modified, added, removed = [], [], []
2935 for f in self._files:
2935 for f in self._files:
2936 if not managing(f):
2936 if not managing(f):
2937 added.append(f)
2937 added.append(f)
2938 elif f in self:
2938 elif f in self:
2939 modified.append(f)
2939 modified.append(f)
2940 else:
2940 else:
2941 removed.append(f)
2941 removed.append(f)
2942
2942
2943 return scmutil.status(modified, added, removed, [], [], [], [])
2943 return scmutil.status(modified, added, removed, [], [], [], [])
2944
2944
2945
2945
2946 class arbitraryfilectx(object):
2946 class arbitraryfilectx(object):
2947 """Allows you to use filectx-like functions on a file in an arbitrary
2947 """Allows you to use filectx-like functions on a file in an arbitrary
2948 location on disk, possibly not in the working directory.
2948 location on disk, possibly not in the working directory.
2949 """
2949 """
2950
2950
2951 def __init__(self, path, repo=None):
2951 def __init__(self, path, repo=None):
2952 # Repo is optional because contrib/simplemerge uses this class.
2952 # Repo is optional because contrib/simplemerge uses this class.
2953 self._repo = repo
2953 self._repo = repo
2954 self._path = path
2954 self._path = path
2955
2955
2956 def cmp(self, fctx):
2956 def cmp(self, fctx):
2957 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2957 # filecmp follows symlinks whereas `cmp` should not, so skip the fast
2958 # path if either side is a symlink.
2958 # path if either side is a symlink.
2959 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2959 symlinks = b'l' in self.flags() or b'l' in fctx.flags()
2960 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2960 if not symlinks and isinstance(fctx, workingfilectx) and self._repo:
2961 # Add a fast-path for merge if both sides are disk-backed.
2961 # Add a fast-path for merge if both sides are disk-backed.
2962 # Note that filecmp uses the opposite return values (True if same)
2962 # Note that filecmp uses the opposite return values (True if same)
2963 # from our cmp functions (True if different).
2963 # from our cmp functions (True if different).
2964 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2964 return not filecmp.cmp(self.path(), self._repo.wjoin(fctx.path()))
2965 return self.data() != fctx.data()
2965 return self.data() != fctx.data()
2966
2966
2967 def path(self):
2967 def path(self):
2968 return self._path
2968 return self._path
2969
2969
2970 def flags(self):
2970 def flags(self):
2971 return b''
2971 return b''
2972
2972
2973 def data(self):
2973 def data(self):
2974 return util.readfile(self._path)
2974 return util.readfile(self._path)
2975
2975
2976 def decodeddata(self):
2976 def decodeddata(self):
2977 with open(self._path, b"rb") as f:
2977 with open(self._path, b"rb") as f:
2978 return f.read()
2978 return f.read()
2979
2979
2980 def remove(self):
2980 def remove(self):
2981 util.unlink(self._path)
2981 util.unlink(self._path)
2982
2982
2983 def write(self, data, flags, **kwargs):
2983 def write(self, data, flags, **kwargs):
2984 assert not flags
2984 assert not flags
2985 with open(self._path, b"wb") as f:
2985 with open(self._path, b"wb") as f:
2986 f.write(data)
2986 f.write(data)
@@ -1,4265 +1,4266 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import random
16 import random
17 import re
17 import re
18 import socket
18 import socket
19 import ssl
19 import ssl
20 import stat
20 import stat
21 import string
21 import string
22 import subprocess
22 import subprocess
23 import sys
23 import sys
24 import time
24 import time
25
25
26 from .i18n import _
26 from .i18n import _
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullhex,
30 nullhex,
31 nullid,
31 nullid,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from .pycompat import (
35 from .pycompat import (
36 getattr,
36 getattr,
37 open,
37 open,
38 )
38 )
39 from . import (
39 from . import (
40 bundle2,
40 bundle2,
41 changegroup,
41 changegroup,
42 cmdutil,
42 cmdutil,
43 color,
43 color,
44 context,
44 context,
45 copies,
45 copies,
46 dagparser,
46 dagparser,
47 encoding,
47 encoding,
48 error,
48 error,
49 exchange,
49 exchange,
50 extensions,
50 extensions,
51 filemerge,
51 filemerge,
52 filesetlang,
52 filesetlang,
53 formatter,
53 formatter,
54 hg,
54 hg,
55 httppeer,
55 httppeer,
56 localrepo,
56 localrepo,
57 lock as lockmod,
57 lock as lockmod,
58 logcmdutil,
58 logcmdutil,
59 merge as mergemod,
59 merge as mergemod,
60 obsolete,
60 obsolete,
61 obsutil,
61 obsutil,
62 pathutil,
62 phases,
63 phases,
63 policy,
64 policy,
64 pvec,
65 pvec,
65 pycompat,
66 pycompat,
66 registrar,
67 registrar,
67 repair,
68 repair,
68 revlog,
69 revlog,
69 revset,
70 revset,
70 revsetlang,
71 revsetlang,
71 scmutil,
72 scmutil,
72 setdiscovery,
73 setdiscovery,
73 simplemerge,
74 simplemerge,
74 sshpeer,
75 sshpeer,
75 sslutil,
76 sslutil,
76 streamclone,
77 streamclone,
77 templater,
78 templater,
78 treediscovery,
79 treediscovery,
79 upgrade,
80 upgrade,
80 url as urlmod,
81 url as urlmod,
81 util,
82 util,
82 vfs as vfsmod,
83 vfs as vfsmod,
83 wireprotoframing,
84 wireprotoframing,
84 wireprotoserver,
85 wireprotoserver,
85 wireprotov2peer,
86 wireprotov2peer,
86 )
87 )
87 from .utils import (
88 from .utils import (
88 cborutil,
89 cborutil,
89 compression,
90 compression,
90 dateutil,
91 dateutil,
91 procutil,
92 procutil,
92 stringutil,
93 stringutil,
93 )
94 )
94
95
95 from .revlogutils import deltas as deltautil
96 from .revlogutils import deltas as deltautil
96
97
97 release = lockmod.release
98 release = lockmod.release
98
99
99 command = registrar.command()
100 command = registrar.command()
100
101
101
102
102 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
103 @command(b'debugancestor', [], _(b'[INDEX] REV1 REV2'), optionalrepo=True)
103 def debugancestor(ui, repo, *args):
104 def debugancestor(ui, repo, *args):
104 """find the ancestor revision of two revisions in a given index"""
105 """find the ancestor revision of two revisions in a given index"""
105 if len(args) == 3:
106 if len(args) == 3:
106 index, rev1, rev2 = args
107 index, rev1, rev2 = args
107 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
108 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
108 lookup = r.lookup
109 lookup = r.lookup
109 elif len(args) == 2:
110 elif len(args) == 2:
110 if not repo:
111 if not repo:
111 raise error.Abort(
112 raise error.Abort(
112 _(b'there is no Mercurial repository here (.hg not found)')
113 _(b'there is no Mercurial repository here (.hg not found)')
113 )
114 )
114 rev1, rev2 = args
115 rev1, rev2 = args
115 r = repo.changelog
116 r = repo.changelog
116 lookup = repo.lookup
117 lookup = repo.lookup
117 else:
118 else:
118 raise error.Abort(_(b'either two or three arguments required'))
119 raise error.Abort(_(b'either two or three arguments required'))
119 a = r.ancestor(lookup(rev1), lookup(rev2))
120 a = r.ancestor(lookup(rev1), lookup(rev2))
120 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
121 ui.write(b'%d:%s\n' % (r.rev(a), hex(a)))
121
122
122
123
123 @command(b'debugapplystreamclonebundle', [], b'FILE')
124 @command(b'debugapplystreamclonebundle', [], b'FILE')
124 def debugapplystreamclonebundle(ui, repo, fname):
125 def debugapplystreamclonebundle(ui, repo, fname):
125 """apply a stream clone bundle file"""
126 """apply a stream clone bundle file"""
126 f = hg.openpath(ui, fname)
127 f = hg.openpath(ui, fname)
127 gen = exchange.readbundle(ui, f, fname)
128 gen = exchange.readbundle(ui, f, fname)
128 gen.apply(repo)
129 gen.apply(repo)
129
130
130
131
131 @command(
132 @command(
132 b'debugbuilddag',
133 b'debugbuilddag',
133 [
134 [
134 (
135 (
135 b'm',
136 b'm',
136 b'mergeable-file',
137 b'mergeable-file',
137 None,
138 None,
138 _(b'add single file mergeable changes'),
139 _(b'add single file mergeable changes'),
139 ),
140 ),
140 (
141 (
141 b'o',
142 b'o',
142 b'overwritten-file',
143 b'overwritten-file',
143 None,
144 None,
144 _(b'add single file all revs overwrite'),
145 _(b'add single file all revs overwrite'),
145 ),
146 ),
146 (b'n', b'new-file', None, _(b'add new file at each rev')),
147 (b'n', b'new-file', None, _(b'add new file at each rev')),
147 ],
148 ],
148 _(b'[OPTION]... [TEXT]'),
149 _(b'[OPTION]... [TEXT]'),
149 )
150 )
150 def debugbuilddag(
151 def debugbuilddag(
151 ui,
152 ui,
152 repo,
153 repo,
153 text=None,
154 text=None,
154 mergeable_file=False,
155 mergeable_file=False,
155 overwritten_file=False,
156 overwritten_file=False,
156 new_file=False,
157 new_file=False,
157 ):
158 ):
158 """builds a repo with a given DAG from scratch in the current empty repo
159 """builds a repo with a given DAG from scratch in the current empty repo
159
160
160 The description of the DAG is read from stdin if not given on the
161 The description of the DAG is read from stdin if not given on the
161 command line.
162 command line.
162
163
163 Elements:
164 Elements:
164
165
165 - "+n" is a linear run of n nodes based on the current default parent
166 - "+n" is a linear run of n nodes based on the current default parent
166 - "." is a single node based on the current default parent
167 - "." is a single node based on the current default parent
167 - "$" resets the default parent to null (implied at the start);
168 - "$" resets the default parent to null (implied at the start);
168 otherwise the default parent is always the last node created
169 otherwise the default parent is always the last node created
169 - "<p" sets the default parent to the backref p
170 - "<p" sets the default parent to the backref p
170 - "*p" is a fork at parent p, which is a backref
171 - "*p" is a fork at parent p, which is a backref
171 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
172 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
172 - "/p2" is a merge of the preceding node and p2
173 - "/p2" is a merge of the preceding node and p2
173 - ":tag" defines a local tag for the preceding node
174 - ":tag" defines a local tag for the preceding node
174 - "@branch" sets the named branch for subsequent nodes
175 - "@branch" sets the named branch for subsequent nodes
175 - "#...\\n" is a comment up to the end of the line
176 - "#...\\n" is a comment up to the end of the line
176
177
177 Whitespace between the above elements is ignored.
178 Whitespace between the above elements is ignored.
178
179
179 A backref is either
180 A backref is either
180
181
181 - a number n, which references the node curr-n, where curr is the current
182 - a number n, which references the node curr-n, where curr is the current
182 node, or
183 node, or
183 - the name of a local tag you placed earlier using ":tag", or
184 - the name of a local tag you placed earlier using ":tag", or
184 - empty to denote the default parent.
185 - empty to denote the default parent.
185
186
186 All string valued-elements are either strictly alphanumeric, or must
187 All string valued-elements are either strictly alphanumeric, or must
187 be enclosed in double quotes ("..."), with "\\" as escape character.
188 be enclosed in double quotes ("..."), with "\\" as escape character.
188 """
189 """
189
190
190 if text is None:
191 if text is None:
191 ui.status(_(b"reading DAG from stdin\n"))
192 ui.status(_(b"reading DAG from stdin\n"))
192 text = ui.fin.read()
193 text = ui.fin.read()
193
194
194 cl = repo.changelog
195 cl = repo.changelog
195 if len(cl) > 0:
196 if len(cl) > 0:
196 raise error.Abort(_(b'repository is not empty'))
197 raise error.Abort(_(b'repository is not empty'))
197
198
198 # determine number of revs in DAG
199 # determine number of revs in DAG
199 total = 0
200 total = 0
200 for type, data in dagparser.parsedag(text):
201 for type, data in dagparser.parsedag(text):
201 if type == b'n':
202 if type == b'n':
202 total += 1
203 total += 1
203
204
204 if mergeable_file:
205 if mergeable_file:
205 linesperrev = 2
206 linesperrev = 2
206 # make a file with k lines per rev
207 # make a file with k lines per rev
207 initialmergedlines = [
208 initialmergedlines = [
208 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
209 b'%d' % i for i in pycompat.xrange(0, total * linesperrev)
209 ]
210 ]
210 initialmergedlines.append(b"")
211 initialmergedlines.append(b"")
211
212
212 tags = []
213 tags = []
213 progress = ui.makeprogress(
214 progress = ui.makeprogress(
214 _(b'building'), unit=_(b'revisions'), total=total
215 _(b'building'), unit=_(b'revisions'), total=total
215 )
216 )
216 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
217 with progress, repo.wlock(), repo.lock(), repo.transaction(b"builddag"):
217 at = -1
218 at = -1
218 atbranch = b'default'
219 atbranch = b'default'
219 nodeids = []
220 nodeids = []
220 id = 0
221 id = 0
221 progress.update(id)
222 progress.update(id)
222 for type, data in dagparser.parsedag(text):
223 for type, data in dagparser.parsedag(text):
223 if type == b'n':
224 if type == b'n':
224 ui.note((b'node %s\n' % pycompat.bytestr(data)))
225 ui.note((b'node %s\n' % pycompat.bytestr(data)))
225 id, ps = data
226 id, ps = data
226
227
227 files = []
228 files = []
228 filecontent = {}
229 filecontent = {}
229
230
230 p2 = None
231 p2 = None
231 if mergeable_file:
232 if mergeable_file:
232 fn = b"mf"
233 fn = b"mf"
233 p1 = repo[ps[0]]
234 p1 = repo[ps[0]]
234 if len(ps) > 1:
235 if len(ps) > 1:
235 p2 = repo[ps[1]]
236 p2 = repo[ps[1]]
236 pa = p1.ancestor(p2)
237 pa = p1.ancestor(p2)
237 base, local, other = [
238 base, local, other = [
238 x[fn].data() for x in (pa, p1, p2)
239 x[fn].data() for x in (pa, p1, p2)
239 ]
240 ]
240 m3 = simplemerge.Merge3Text(base, local, other)
241 m3 = simplemerge.Merge3Text(base, local, other)
241 ml = [l.strip() for l in m3.merge_lines()]
242 ml = [l.strip() for l in m3.merge_lines()]
242 ml.append(b"")
243 ml.append(b"")
243 elif at > 0:
244 elif at > 0:
244 ml = p1[fn].data().split(b"\n")
245 ml = p1[fn].data().split(b"\n")
245 else:
246 else:
246 ml = initialmergedlines
247 ml = initialmergedlines
247 ml[id * linesperrev] += b" r%i" % id
248 ml[id * linesperrev] += b" r%i" % id
248 mergedtext = b"\n".join(ml)
249 mergedtext = b"\n".join(ml)
249 files.append(fn)
250 files.append(fn)
250 filecontent[fn] = mergedtext
251 filecontent[fn] = mergedtext
251
252
252 if overwritten_file:
253 if overwritten_file:
253 fn = b"of"
254 fn = b"of"
254 files.append(fn)
255 files.append(fn)
255 filecontent[fn] = b"r%i\n" % id
256 filecontent[fn] = b"r%i\n" % id
256
257
257 if new_file:
258 if new_file:
258 fn = b"nf%i" % id
259 fn = b"nf%i" % id
259 files.append(fn)
260 files.append(fn)
260 filecontent[fn] = b"r%i\n" % id
261 filecontent[fn] = b"r%i\n" % id
261 if len(ps) > 1:
262 if len(ps) > 1:
262 if not p2:
263 if not p2:
263 p2 = repo[ps[1]]
264 p2 = repo[ps[1]]
264 for fn in p2:
265 for fn in p2:
265 if fn.startswith(b"nf"):
266 if fn.startswith(b"nf"):
266 files.append(fn)
267 files.append(fn)
267 filecontent[fn] = p2[fn].data()
268 filecontent[fn] = p2[fn].data()
268
269
269 def fctxfn(repo, cx, path):
270 def fctxfn(repo, cx, path):
270 if path in filecontent:
271 if path in filecontent:
271 return context.memfilectx(
272 return context.memfilectx(
272 repo, cx, path, filecontent[path]
273 repo, cx, path, filecontent[path]
273 )
274 )
274 return None
275 return None
275
276
276 if len(ps) == 0 or ps[0] < 0:
277 if len(ps) == 0 or ps[0] < 0:
277 pars = [None, None]
278 pars = [None, None]
278 elif len(ps) == 1:
279 elif len(ps) == 1:
279 pars = [nodeids[ps[0]], None]
280 pars = [nodeids[ps[0]], None]
280 else:
281 else:
281 pars = [nodeids[p] for p in ps]
282 pars = [nodeids[p] for p in ps]
282 cx = context.memctx(
283 cx = context.memctx(
283 repo,
284 repo,
284 pars,
285 pars,
285 b"r%i" % id,
286 b"r%i" % id,
286 files,
287 files,
287 fctxfn,
288 fctxfn,
288 date=(id, 0),
289 date=(id, 0),
289 user=b"debugbuilddag",
290 user=b"debugbuilddag",
290 extra={b'branch': atbranch},
291 extra={b'branch': atbranch},
291 )
292 )
292 nodeid = repo.commitctx(cx)
293 nodeid = repo.commitctx(cx)
293 nodeids.append(nodeid)
294 nodeids.append(nodeid)
294 at = id
295 at = id
295 elif type == b'l':
296 elif type == b'l':
296 id, name = data
297 id, name = data
297 ui.note((b'tag %s\n' % name))
298 ui.note((b'tag %s\n' % name))
298 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
299 tags.append(b"%s %s\n" % (hex(repo.changelog.node(id)), name))
299 elif type == b'a':
300 elif type == b'a':
300 ui.note((b'branch %s\n' % data))
301 ui.note((b'branch %s\n' % data))
301 atbranch = data
302 atbranch = data
302 progress.update(id)
303 progress.update(id)
303
304
304 if tags:
305 if tags:
305 repo.vfs.write(b"localtags", b"".join(tags))
306 repo.vfs.write(b"localtags", b"".join(tags))
306
307
307
308
308 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
309 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
309 indent_string = b' ' * indent
310 indent_string = b' ' * indent
310 if all:
311 if all:
311 ui.writenoi18n(
312 ui.writenoi18n(
312 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
313 b"%sformat: id, p1, p2, cset, delta base, len(delta)\n"
313 % indent_string
314 % indent_string
314 )
315 )
315
316
316 def showchunks(named):
317 def showchunks(named):
317 ui.write(b"\n%s%s\n" % (indent_string, named))
318 ui.write(b"\n%s%s\n" % (indent_string, named))
318 for deltadata in gen.deltaiter():
319 for deltadata in gen.deltaiter():
319 node, p1, p2, cs, deltabase, delta, flags = deltadata
320 node, p1, p2, cs, deltabase, delta, flags = deltadata
320 ui.write(
321 ui.write(
321 b"%s%s %s %s %s %s %d\n"
322 b"%s%s %s %s %s %s %d\n"
322 % (
323 % (
323 indent_string,
324 indent_string,
324 hex(node),
325 hex(node),
325 hex(p1),
326 hex(p1),
326 hex(p2),
327 hex(p2),
327 hex(cs),
328 hex(cs),
328 hex(deltabase),
329 hex(deltabase),
329 len(delta),
330 len(delta),
330 )
331 )
331 )
332 )
332
333
333 chunkdata = gen.changelogheader()
334 chunkdata = gen.changelogheader()
334 showchunks(b"changelog")
335 showchunks(b"changelog")
335 chunkdata = gen.manifestheader()
336 chunkdata = gen.manifestheader()
336 showchunks(b"manifest")
337 showchunks(b"manifest")
337 for chunkdata in iter(gen.filelogheader, {}):
338 for chunkdata in iter(gen.filelogheader, {}):
338 fname = chunkdata[b'filename']
339 fname = chunkdata[b'filename']
339 showchunks(fname)
340 showchunks(fname)
340 else:
341 else:
341 if isinstance(gen, bundle2.unbundle20):
342 if isinstance(gen, bundle2.unbundle20):
342 raise error.Abort(_(b'use debugbundle2 for this file'))
343 raise error.Abort(_(b'use debugbundle2 for this file'))
343 chunkdata = gen.changelogheader()
344 chunkdata = gen.changelogheader()
344 for deltadata in gen.deltaiter():
345 for deltadata in gen.deltaiter():
345 node, p1, p2, cs, deltabase, delta, flags = deltadata
346 node, p1, p2, cs, deltabase, delta, flags = deltadata
346 ui.write(b"%s%s\n" % (indent_string, hex(node)))
347 ui.write(b"%s%s\n" % (indent_string, hex(node)))
347
348
348
349
349 def _debugobsmarkers(ui, part, indent=0, **opts):
350 def _debugobsmarkers(ui, part, indent=0, **opts):
350 """display version and markers contained in 'data'"""
351 """display version and markers contained in 'data'"""
351 opts = pycompat.byteskwargs(opts)
352 opts = pycompat.byteskwargs(opts)
352 data = part.read()
353 data = part.read()
353 indent_string = b' ' * indent
354 indent_string = b' ' * indent
354 try:
355 try:
355 version, markers = obsolete._readmarkers(data)
356 version, markers = obsolete._readmarkers(data)
356 except error.UnknownVersion as exc:
357 except error.UnknownVersion as exc:
357 msg = b"%sunsupported version: %s (%d bytes)\n"
358 msg = b"%sunsupported version: %s (%d bytes)\n"
358 msg %= indent_string, exc.version, len(data)
359 msg %= indent_string, exc.version, len(data)
359 ui.write(msg)
360 ui.write(msg)
360 else:
361 else:
361 msg = b"%sversion: %d (%d bytes)\n"
362 msg = b"%sversion: %d (%d bytes)\n"
362 msg %= indent_string, version, len(data)
363 msg %= indent_string, version, len(data)
363 ui.write(msg)
364 ui.write(msg)
364 fm = ui.formatter(b'debugobsolete', opts)
365 fm = ui.formatter(b'debugobsolete', opts)
365 for rawmarker in sorted(markers):
366 for rawmarker in sorted(markers):
366 m = obsutil.marker(None, rawmarker)
367 m = obsutil.marker(None, rawmarker)
367 fm.startitem()
368 fm.startitem()
368 fm.plain(indent_string)
369 fm.plain(indent_string)
369 cmdutil.showmarker(fm, m)
370 cmdutil.showmarker(fm, m)
370 fm.end()
371 fm.end()
371
372
372
373
373 def _debugphaseheads(ui, data, indent=0):
374 def _debugphaseheads(ui, data, indent=0):
374 """display version and markers contained in 'data'"""
375 """display version and markers contained in 'data'"""
375 indent_string = b' ' * indent
376 indent_string = b' ' * indent
376 headsbyphase = phases.binarydecode(data)
377 headsbyphase = phases.binarydecode(data)
377 for phase in phases.allphases:
378 for phase in phases.allphases:
378 for head in headsbyphase[phase]:
379 for head in headsbyphase[phase]:
379 ui.write(indent_string)
380 ui.write(indent_string)
380 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
381 ui.write(b'%s %s\n' % (hex(head), phases.phasenames[phase]))
381
382
382
383
383 def _quasirepr(thing):
384 def _quasirepr(thing):
384 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
385 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
385 return b'{%s}' % (
386 return b'{%s}' % (
386 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
387 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))
387 )
388 )
388 return pycompat.bytestr(repr(thing))
389 return pycompat.bytestr(repr(thing))
389
390
390
391
391 def _debugbundle2(ui, gen, all=None, **opts):
392 def _debugbundle2(ui, gen, all=None, **opts):
392 """lists the contents of a bundle2"""
393 """lists the contents of a bundle2"""
393 if not isinstance(gen, bundle2.unbundle20):
394 if not isinstance(gen, bundle2.unbundle20):
394 raise error.Abort(_(b'not a bundle2 file'))
395 raise error.Abort(_(b'not a bundle2 file'))
395 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
396 ui.write((b'Stream params: %s\n' % _quasirepr(gen.params)))
396 parttypes = opts.get('part_type', [])
397 parttypes = opts.get('part_type', [])
397 for part in gen.iterparts():
398 for part in gen.iterparts():
398 if parttypes and part.type not in parttypes:
399 if parttypes and part.type not in parttypes:
399 continue
400 continue
400 msg = b'%s -- %s (mandatory: %r)\n'
401 msg = b'%s -- %s (mandatory: %r)\n'
401 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
402 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
402 if part.type == b'changegroup':
403 if part.type == b'changegroup':
403 version = part.params.get(b'version', b'01')
404 version = part.params.get(b'version', b'01')
404 cg = changegroup.getunbundler(version, part, b'UN')
405 cg = changegroup.getunbundler(version, part, b'UN')
405 if not ui.quiet:
406 if not ui.quiet:
406 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
407 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
407 if part.type == b'obsmarkers':
408 if part.type == b'obsmarkers':
408 if not ui.quiet:
409 if not ui.quiet:
409 _debugobsmarkers(ui, part, indent=4, **opts)
410 _debugobsmarkers(ui, part, indent=4, **opts)
410 if part.type == b'phase-heads':
411 if part.type == b'phase-heads':
411 if not ui.quiet:
412 if not ui.quiet:
412 _debugphaseheads(ui, part, indent=4)
413 _debugphaseheads(ui, part, indent=4)
413
414
414
415
415 @command(
416 @command(
416 b'debugbundle',
417 b'debugbundle',
417 [
418 [
418 (b'a', b'all', None, _(b'show all details')),
419 (b'a', b'all', None, _(b'show all details')),
419 (b'', b'part-type', [], _(b'show only the named part type')),
420 (b'', b'part-type', [], _(b'show only the named part type')),
420 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
421 (b'', b'spec', None, _(b'print the bundlespec of the bundle')),
421 ],
422 ],
422 _(b'FILE'),
423 _(b'FILE'),
423 norepo=True,
424 norepo=True,
424 )
425 )
425 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
426 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
426 """lists the contents of a bundle"""
427 """lists the contents of a bundle"""
427 with hg.openpath(ui, bundlepath) as f:
428 with hg.openpath(ui, bundlepath) as f:
428 if spec:
429 if spec:
429 spec = exchange.getbundlespec(ui, f)
430 spec = exchange.getbundlespec(ui, f)
430 ui.write(b'%s\n' % spec)
431 ui.write(b'%s\n' % spec)
431 return
432 return
432
433
433 gen = exchange.readbundle(ui, f, bundlepath)
434 gen = exchange.readbundle(ui, f, bundlepath)
434 if isinstance(gen, bundle2.unbundle20):
435 if isinstance(gen, bundle2.unbundle20):
435 return _debugbundle2(ui, gen, all=all, **opts)
436 return _debugbundle2(ui, gen, all=all, **opts)
436 _debugchangegroup(ui, gen, all=all, **opts)
437 _debugchangegroup(ui, gen, all=all, **opts)
437
438
438
439
439 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
440 @command(b'debugcapabilities', [], _(b'PATH'), norepo=True)
440 def debugcapabilities(ui, path, **opts):
441 def debugcapabilities(ui, path, **opts):
441 """lists the capabilities of a remote peer"""
442 """lists the capabilities of a remote peer"""
442 opts = pycompat.byteskwargs(opts)
443 opts = pycompat.byteskwargs(opts)
443 peer = hg.peer(ui, opts, path)
444 peer = hg.peer(ui, opts, path)
444 caps = peer.capabilities()
445 caps = peer.capabilities()
445 ui.writenoi18n(b'Main capabilities:\n')
446 ui.writenoi18n(b'Main capabilities:\n')
446 for c in sorted(caps):
447 for c in sorted(caps):
447 ui.write(b' %s\n' % c)
448 ui.write(b' %s\n' % c)
448 b2caps = bundle2.bundle2caps(peer)
449 b2caps = bundle2.bundle2caps(peer)
449 if b2caps:
450 if b2caps:
450 ui.writenoi18n(b'Bundle2 capabilities:\n')
451 ui.writenoi18n(b'Bundle2 capabilities:\n')
451 for key, values in sorted(pycompat.iteritems(b2caps)):
452 for key, values in sorted(pycompat.iteritems(b2caps)):
452 ui.write(b' %s\n' % key)
453 ui.write(b' %s\n' % key)
453 for v in values:
454 for v in values:
454 ui.write(b' %s\n' % v)
455 ui.write(b' %s\n' % v)
455
456
456
457
457 @command(b'debugcheckstate', [], b'')
458 @command(b'debugcheckstate', [], b'')
458 def debugcheckstate(ui, repo):
459 def debugcheckstate(ui, repo):
459 """validate the correctness of the current dirstate"""
460 """validate the correctness of the current dirstate"""
460 parent1, parent2 = repo.dirstate.parents()
461 parent1, parent2 = repo.dirstate.parents()
461 m1 = repo[parent1].manifest()
462 m1 = repo[parent1].manifest()
462 m2 = repo[parent2].manifest()
463 m2 = repo[parent2].manifest()
463 errors = 0
464 errors = 0
464 for f in repo.dirstate:
465 for f in repo.dirstate:
465 state = repo.dirstate[f]
466 state = repo.dirstate[f]
466 if state in b"nr" and f not in m1:
467 if state in b"nr" and f not in m1:
467 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
468 ui.warn(_(b"%s in state %s, but not in manifest1\n") % (f, state))
468 errors += 1
469 errors += 1
469 if state in b"a" and f in m1:
470 if state in b"a" and f in m1:
470 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
471 ui.warn(_(b"%s in state %s, but also in manifest1\n") % (f, state))
471 errors += 1
472 errors += 1
472 if state in b"m" and f not in m1 and f not in m2:
473 if state in b"m" and f not in m1 and f not in m2:
473 ui.warn(
474 ui.warn(
474 _(b"%s in state %s, but not in either manifest\n") % (f, state)
475 _(b"%s in state %s, but not in either manifest\n") % (f, state)
475 )
476 )
476 errors += 1
477 errors += 1
477 for f in m1:
478 for f in m1:
478 state = repo.dirstate[f]
479 state = repo.dirstate[f]
479 if state not in b"nrm":
480 if state not in b"nrm":
480 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
481 ui.warn(_(b"%s in manifest1, but listed as state %s") % (f, state))
481 errors += 1
482 errors += 1
482 if errors:
483 if errors:
483 error = _(b".hg/dirstate inconsistent with current parent's manifest")
484 error = _(b".hg/dirstate inconsistent with current parent's manifest")
484 raise error.Abort(error)
485 raise error.Abort(error)
485
486
486
487
487 @command(
488 @command(
488 b'debugcolor',
489 b'debugcolor',
489 [(b'', b'style', None, _(b'show all configured styles'))],
490 [(b'', b'style', None, _(b'show all configured styles'))],
490 b'hg debugcolor',
491 b'hg debugcolor',
491 )
492 )
492 def debugcolor(ui, repo, **opts):
493 def debugcolor(ui, repo, **opts):
493 """show available color, effects or style"""
494 """show available color, effects or style"""
494 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
495 ui.writenoi18n(b'color mode: %s\n' % stringutil.pprint(ui._colormode))
495 if opts.get('style'):
496 if opts.get('style'):
496 return _debugdisplaystyle(ui)
497 return _debugdisplaystyle(ui)
497 else:
498 else:
498 return _debugdisplaycolor(ui)
499 return _debugdisplaycolor(ui)
499
500
500
501
501 def _debugdisplaycolor(ui):
502 def _debugdisplaycolor(ui):
502 ui = ui.copy()
503 ui = ui.copy()
503 ui._styles.clear()
504 ui._styles.clear()
504 for effect in color._activeeffects(ui).keys():
505 for effect in color._activeeffects(ui).keys():
505 ui._styles[effect] = effect
506 ui._styles[effect] = effect
506 if ui._terminfoparams:
507 if ui._terminfoparams:
507 for k, v in ui.configitems(b'color'):
508 for k, v in ui.configitems(b'color'):
508 if k.startswith(b'color.'):
509 if k.startswith(b'color.'):
509 ui._styles[k] = k[6:]
510 ui._styles[k] = k[6:]
510 elif k.startswith(b'terminfo.'):
511 elif k.startswith(b'terminfo.'):
511 ui._styles[k] = k[9:]
512 ui._styles[k] = k[9:]
512 ui.write(_(b'available colors:\n'))
513 ui.write(_(b'available colors:\n'))
513 # sort label with a '_' after the other to group '_background' entry.
514 # sort label with a '_' after the other to group '_background' entry.
514 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
515 items = sorted(ui._styles.items(), key=lambda i: (b'_' in i[0], i[0], i[1]))
515 for colorname, label in items:
516 for colorname, label in items:
516 ui.write(b'%s\n' % colorname, label=label)
517 ui.write(b'%s\n' % colorname, label=label)
517
518
518
519
519 def _debugdisplaystyle(ui):
520 def _debugdisplaystyle(ui):
520 ui.write(_(b'available style:\n'))
521 ui.write(_(b'available style:\n'))
521 if not ui._styles:
522 if not ui._styles:
522 return
523 return
523 width = max(len(s) for s in ui._styles)
524 width = max(len(s) for s in ui._styles)
524 for label, effects in sorted(ui._styles.items()):
525 for label, effects in sorted(ui._styles.items()):
525 ui.write(b'%s' % label, label=label)
526 ui.write(b'%s' % label, label=label)
526 if effects:
527 if effects:
527 # 50
528 # 50
528 ui.write(b': ')
529 ui.write(b': ')
529 ui.write(b' ' * (max(0, width - len(label))))
530 ui.write(b' ' * (max(0, width - len(label))))
530 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
531 ui.write(b', '.join(ui.label(e, e) for e in effects.split()))
531 ui.write(b'\n')
532 ui.write(b'\n')
532
533
533
534
534 @command(b'debugcreatestreamclonebundle', [], b'FILE')
535 @command(b'debugcreatestreamclonebundle', [], b'FILE')
535 def debugcreatestreamclonebundle(ui, repo, fname):
536 def debugcreatestreamclonebundle(ui, repo, fname):
536 """create a stream clone bundle file
537 """create a stream clone bundle file
537
538
538 Stream bundles are special bundles that are essentially archives of
539 Stream bundles are special bundles that are essentially archives of
539 revlog files. They are commonly used for cloning very quickly.
540 revlog files. They are commonly used for cloning very quickly.
540 """
541 """
541 # TODO we may want to turn this into an abort when this functionality
542 # TODO we may want to turn this into an abort when this functionality
542 # is moved into `hg bundle`.
543 # is moved into `hg bundle`.
543 if phases.hassecret(repo):
544 if phases.hassecret(repo):
544 ui.warn(
545 ui.warn(
545 _(
546 _(
546 b'(warning: stream clone bundle will contain secret '
547 b'(warning: stream clone bundle will contain secret '
547 b'revisions)\n'
548 b'revisions)\n'
548 )
549 )
549 )
550 )
550
551
551 requirements, gen = streamclone.generatebundlev1(repo)
552 requirements, gen = streamclone.generatebundlev1(repo)
552 changegroup.writechunks(ui, gen, fname)
553 changegroup.writechunks(ui, gen, fname)
553
554
554 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
555 ui.write(_(b'bundle requirements: %s\n') % b', '.join(sorted(requirements)))
555
556
556
557
557 @command(
558 @command(
558 b'debugdag',
559 b'debugdag',
559 [
560 [
560 (b't', b'tags', None, _(b'use tags as labels')),
561 (b't', b'tags', None, _(b'use tags as labels')),
561 (b'b', b'branches', None, _(b'annotate with branch names')),
562 (b'b', b'branches', None, _(b'annotate with branch names')),
562 (b'', b'dots', None, _(b'use dots for runs')),
563 (b'', b'dots', None, _(b'use dots for runs')),
563 (b's', b'spaces', None, _(b'separate elements by spaces')),
564 (b's', b'spaces', None, _(b'separate elements by spaces')),
564 ],
565 ],
565 _(b'[OPTION]... [FILE [REV]...]'),
566 _(b'[OPTION]... [FILE [REV]...]'),
566 optionalrepo=True,
567 optionalrepo=True,
567 )
568 )
568 def debugdag(ui, repo, file_=None, *revs, **opts):
569 def debugdag(ui, repo, file_=None, *revs, **opts):
569 """format the changelog or an index DAG as a concise textual description
570 """format the changelog or an index DAG as a concise textual description
570
571
571 If you pass a revlog index, the revlog's DAG is emitted. If you list
572 If you pass a revlog index, the revlog's DAG is emitted. If you list
572 revision numbers, they get labeled in the output as rN.
573 revision numbers, they get labeled in the output as rN.
573
574
574 Otherwise, the changelog DAG of the current repo is emitted.
575 Otherwise, the changelog DAG of the current repo is emitted.
575 """
576 """
576 spaces = opts.get('spaces')
577 spaces = opts.get('spaces')
577 dots = opts.get('dots')
578 dots = opts.get('dots')
578 if file_:
579 if file_:
579 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
580 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), file_)
580 revs = set((int(r) for r in revs))
581 revs = set((int(r) for r in revs))
581
582
582 def events():
583 def events():
583 for r in rlog:
584 for r in rlog:
584 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
585 yield b'n', (r, list(p for p in rlog.parentrevs(r) if p != -1))
585 if r in revs:
586 if r in revs:
586 yield b'l', (r, b"r%i" % r)
587 yield b'l', (r, b"r%i" % r)
587
588
588 elif repo:
589 elif repo:
589 cl = repo.changelog
590 cl = repo.changelog
590 tags = opts.get('tags')
591 tags = opts.get('tags')
591 branches = opts.get('branches')
592 branches = opts.get('branches')
592 if tags:
593 if tags:
593 labels = {}
594 labels = {}
594 for l, n in repo.tags().items():
595 for l, n in repo.tags().items():
595 labels.setdefault(cl.rev(n), []).append(l)
596 labels.setdefault(cl.rev(n), []).append(l)
596
597
597 def events():
598 def events():
598 b = b"default"
599 b = b"default"
599 for r in cl:
600 for r in cl:
600 if branches:
601 if branches:
601 newb = cl.read(cl.node(r))[5][b'branch']
602 newb = cl.read(cl.node(r))[5][b'branch']
602 if newb != b:
603 if newb != b:
603 yield b'a', newb
604 yield b'a', newb
604 b = newb
605 b = newb
605 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
606 yield b'n', (r, list(p for p in cl.parentrevs(r) if p != -1))
606 if tags:
607 if tags:
607 ls = labels.get(r)
608 ls = labels.get(r)
608 if ls:
609 if ls:
609 for l in ls:
610 for l in ls:
610 yield b'l', (r, l)
611 yield b'l', (r, l)
611
612
612 else:
613 else:
613 raise error.Abort(_(b'need repo for changelog dag'))
614 raise error.Abort(_(b'need repo for changelog dag'))
614
615
615 for line in dagparser.dagtextlines(
616 for line in dagparser.dagtextlines(
616 events(),
617 events(),
617 addspaces=spaces,
618 addspaces=spaces,
618 wraplabels=True,
619 wraplabels=True,
619 wrapannotations=True,
620 wrapannotations=True,
620 wrapnonlinear=dots,
621 wrapnonlinear=dots,
621 usedots=dots,
622 usedots=dots,
622 maxlinewidth=70,
623 maxlinewidth=70,
623 ):
624 ):
624 ui.write(line)
625 ui.write(line)
625 ui.write(b"\n")
626 ui.write(b"\n")
626
627
627
628
628 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
629 @command(b'debugdata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
629 def debugdata(ui, repo, file_, rev=None, **opts):
630 def debugdata(ui, repo, file_, rev=None, **opts):
630 """dump the contents of a data file revision"""
631 """dump the contents of a data file revision"""
631 opts = pycompat.byteskwargs(opts)
632 opts = pycompat.byteskwargs(opts)
632 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
633 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
633 if rev is not None:
634 if rev is not None:
634 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
635 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
635 file_, rev = None, file_
636 file_, rev = None, file_
636 elif rev is None:
637 elif rev is None:
637 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
638 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
638 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
639 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
639 try:
640 try:
640 ui.write(r.rawdata(r.lookup(rev)))
641 ui.write(r.rawdata(r.lookup(rev)))
641 except KeyError:
642 except KeyError:
642 raise error.Abort(_(b'invalid revision identifier %s') % rev)
643 raise error.Abort(_(b'invalid revision identifier %s') % rev)
643
644
644
645
645 @command(
646 @command(
646 b'debugdate',
647 b'debugdate',
647 [(b'e', b'extended', None, _(b'try extended date formats'))],
648 [(b'e', b'extended', None, _(b'try extended date formats'))],
648 _(b'[-e] DATE [RANGE]'),
649 _(b'[-e] DATE [RANGE]'),
649 norepo=True,
650 norepo=True,
650 optionalrepo=True,
651 optionalrepo=True,
651 )
652 )
652 def debugdate(ui, date, range=None, **opts):
653 def debugdate(ui, date, range=None, **opts):
653 """parse and display a date"""
654 """parse and display a date"""
654 if opts["extended"]:
655 if opts["extended"]:
655 d = dateutil.parsedate(date, util.extendeddateformats)
656 d = dateutil.parsedate(date, util.extendeddateformats)
656 else:
657 else:
657 d = dateutil.parsedate(date)
658 d = dateutil.parsedate(date)
658 ui.writenoi18n(b"internal: %d %d\n" % d)
659 ui.writenoi18n(b"internal: %d %d\n" % d)
659 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
660 ui.writenoi18n(b"standard: %s\n" % dateutil.datestr(d))
660 if range:
661 if range:
661 m = dateutil.matchdate(range)
662 m = dateutil.matchdate(range)
662 ui.writenoi18n(b"match: %s\n" % m(d[0]))
663 ui.writenoi18n(b"match: %s\n" % m(d[0]))
663
664
664
665
665 @command(
666 @command(
666 b'debugdeltachain',
667 b'debugdeltachain',
667 cmdutil.debugrevlogopts + cmdutil.formatteropts,
668 cmdutil.debugrevlogopts + cmdutil.formatteropts,
668 _(b'-c|-m|FILE'),
669 _(b'-c|-m|FILE'),
669 optionalrepo=True,
670 optionalrepo=True,
670 )
671 )
671 def debugdeltachain(ui, repo, file_=None, **opts):
672 def debugdeltachain(ui, repo, file_=None, **opts):
672 """dump information about delta chains in a revlog
673 """dump information about delta chains in a revlog
673
674
674 Output can be templatized. Available template keywords are:
675 Output can be templatized. Available template keywords are:
675
676
676 :``rev``: revision number
677 :``rev``: revision number
677 :``chainid``: delta chain identifier (numbered by unique base)
678 :``chainid``: delta chain identifier (numbered by unique base)
678 :``chainlen``: delta chain length to this revision
679 :``chainlen``: delta chain length to this revision
679 :``prevrev``: previous revision in delta chain
680 :``prevrev``: previous revision in delta chain
680 :``deltatype``: role of delta / how it was computed
681 :``deltatype``: role of delta / how it was computed
681 :``compsize``: compressed size of revision
682 :``compsize``: compressed size of revision
682 :``uncompsize``: uncompressed size of revision
683 :``uncompsize``: uncompressed size of revision
683 :``chainsize``: total size of compressed revisions in chain
684 :``chainsize``: total size of compressed revisions in chain
684 :``chainratio``: total chain size divided by uncompressed revision size
685 :``chainratio``: total chain size divided by uncompressed revision size
685 (new delta chains typically start at ratio 2.00)
686 (new delta chains typically start at ratio 2.00)
686 :``lindist``: linear distance from base revision in delta chain to end
687 :``lindist``: linear distance from base revision in delta chain to end
687 of this revision
688 of this revision
688 :``extradist``: total size of revisions not part of this delta chain from
689 :``extradist``: total size of revisions not part of this delta chain from
689 base of delta chain to end of this revision; a measurement
690 base of delta chain to end of this revision; a measurement
690 of how much extra data we need to read/seek across to read
691 of how much extra data we need to read/seek across to read
691 the delta chain for this revision
692 the delta chain for this revision
692 :``extraratio``: extradist divided by chainsize; another representation of
693 :``extraratio``: extradist divided by chainsize; another representation of
693 how much unrelated data is needed to load this delta chain
694 how much unrelated data is needed to load this delta chain
694
695
695 If the repository is configured to use the sparse read, additional keywords
696 If the repository is configured to use the sparse read, additional keywords
696 are available:
697 are available:
697
698
698 :``readsize``: total size of data read from the disk for a revision
699 :``readsize``: total size of data read from the disk for a revision
699 (sum of the sizes of all the blocks)
700 (sum of the sizes of all the blocks)
700 :``largestblock``: size of the largest block of data read from the disk
701 :``largestblock``: size of the largest block of data read from the disk
701 :``readdensity``: density of useful bytes in the data read from the disk
702 :``readdensity``: density of useful bytes in the data read from the disk
702 :``srchunks``: in how many data hunks the whole revision would be read
703 :``srchunks``: in how many data hunks the whole revision would be read
703
704
704 The sparse read can be enabled with experimental.sparse-read = True
705 The sparse read can be enabled with experimental.sparse-read = True
705 """
706 """
706 opts = pycompat.byteskwargs(opts)
707 opts = pycompat.byteskwargs(opts)
707 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
708 r = cmdutil.openrevlog(repo, b'debugdeltachain', file_, opts)
708 index = r.index
709 index = r.index
709 start = r.start
710 start = r.start
710 length = r.length
711 length = r.length
711 generaldelta = r.version & revlog.FLAG_GENERALDELTA
712 generaldelta = r.version & revlog.FLAG_GENERALDELTA
712 withsparseread = getattr(r, '_withsparseread', False)
713 withsparseread = getattr(r, '_withsparseread', False)
713
714
714 def revinfo(rev):
715 def revinfo(rev):
715 e = index[rev]
716 e = index[rev]
716 compsize = e[1]
717 compsize = e[1]
717 uncompsize = e[2]
718 uncompsize = e[2]
718 chainsize = 0
719 chainsize = 0
719
720
720 if generaldelta:
721 if generaldelta:
721 if e[3] == e[5]:
722 if e[3] == e[5]:
722 deltatype = b'p1'
723 deltatype = b'p1'
723 elif e[3] == e[6]:
724 elif e[3] == e[6]:
724 deltatype = b'p2'
725 deltatype = b'p2'
725 elif e[3] == rev - 1:
726 elif e[3] == rev - 1:
726 deltatype = b'prev'
727 deltatype = b'prev'
727 elif e[3] == rev:
728 elif e[3] == rev:
728 deltatype = b'base'
729 deltatype = b'base'
729 else:
730 else:
730 deltatype = b'other'
731 deltatype = b'other'
731 else:
732 else:
732 if e[3] == rev:
733 if e[3] == rev:
733 deltatype = b'base'
734 deltatype = b'base'
734 else:
735 else:
735 deltatype = b'prev'
736 deltatype = b'prev'
736
737
737 chain = r._deltachain(rev)[0]
738 chain = r._deltachain(rev)[0]
738 for iterrev in chain:
739 for iterrev in chain:
739 e = index[iterrev]
740 e = index[iterrev]
740 chainsize += e[1]
741 chainsize += e[1]
741
742
742 return compsize, uncompsize, deltatype, chain, chainsize
743 return compsize, uncompsize, deltatype, chain, chainsize
743
744
744 fm = ui.formatter(b'debugdeltachain', opts)
745 fm = ui.formatter(b'debugdeltachain', opts)
745
746
746 fm.plain(
747 fm.plain(
747 b' rev chain# chainlen prev delta '
748 b' rev chain# chainlen prev delta '
748 b'size rawsize chainsize ratio lindist extradist '
749 b'size rawsize chainsize ratio lindist extradist '
749 b'extraratio'
750 b'extraratio'
750 )
751 )
751 if withsparseread:
752 if withsparseread:
752 fm.plain(b' readsize largestblk rddensity srchunks')
753 fm.plain(b' readsize largestblk rddensity srchunks')
753 fm.plain(b'\n')
754 fm.plain(b'\n')
754
755
755 chainbases = {}
756 chainbases = {}
756 for rev in r:
757 for rev in r:
757 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
758 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
758 chainbase = chain[0]
759 chainbase = chain[0]
759 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
760 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
760 basestart = start(chainbase)
761 basestart = start(chainbase)
761 revstart = start(rev)
762 revstart = start(rev)
762 lineardist = revstart + comp - basestart
763 lineardist = revstart + comp - basestart
763 extradist = lineardist - chainsize
764 extradist = lineardist - chainsize
764 try:
765 try:
765 prevrev = chain[-2]
766 prevrev = chain[-2]
766 except IndexError:
767 except IndexError:
767 prevrev = -1
768 prevrev = -1
768
769
769 if uncomp != 0:
770 if uncomp != 0:
770 chainratio = float(chainsize) / float(uncomp)
771 chainratio = float(chainsize) / float(uncomp)
771 else:
772 else:
772 chainratio = chainsize
773 chainratio = chainsize
773
774
774 if chainsize != 0:
775 if chainsize != 0:
775 extraratio = float(extradist) / float(chainsize)
776 extraratio = float(extradist) / float(chainsize)
776 else:
777 else:
777 extraratio = extradist
778 extraratio = extradist
778
779
779 fm.startitem()
780 fm.startitem()
780 fm.write(
781 fm.write(
781 b'rev chainid chainlen prevrev deltatype compsize '
782 b'rev chainid chainlen prevrev deltatype compsize '
782 b'uncompsize chainsize chainratio lindist extradist '
783 b'uncompsize chainsize chainratio lindist extradist '
783 b'extraratio',
784 b'extraratio',
784 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
785 b'%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
785 rev,
786 rev,
786 chainid,
787 chainid,
787 len(chain),
788 len(chain),
788 prevrev,
789 prevrev,
789 deltatype,
790 deltatype,
790 comp,
791 comp,
791 uncomp,
792 uncomp,
792 chainsize,
793 chainsize,
793 chainratio,
794 chainratio,
794 lineardist,
795 lineardist,
795 extradist,
796 extradist,
796 extraratio,
797 extraratio,
797 rev=rev,
798 rev=rev,
798 chainid=chainid,
799 chainid=chainid,
799 chainlen=len(chain),
800 chainlen=len(chain),
800 prevrev=prevrev,
801 prevrev=prevrev,
801 deltatype=deltatype,
802 deltatype=deltatype,
802 compsize=comp,
803 compsize=comp,
803 uncompsize=uncomp,
804 uncompsize=uncomp,
804 chainsize=chainsize,
805 chainsize=chainsize,
805 chainratio=chainratio,
806 chainratio=chainratio,
806 lindist=lineardist,
807 lindist=lineardist,
807 extradist=extradist,
808 extradist=extradist,
808 extraratio=extraratio,
809 extraratio=extraratio,
809 )
810 )
810 if withsparseread:
811 if withsparseread:
811 readsize = 0
812 readsize = 0
812 largestblock = 0
813 largestblock = 0
813 srchunks = 0
814 srchunks = 0
814
815
815 for revschunk in deltautil.slicechunk(r, chain):
816 for revschunk in deltautil.slicechunk(r, chain):
816 srchunks += 1
817 srchunks += 1
817 blkend = start(revschunk[-1]) + length(revschunk[-1])
818 blkend = start(revschunk[-1]) + length(revschunk[-1])
818 blksize = blkend - start(revschunk[0])
819 blksize = blkend - start(revschunk[0])
819
820
820 readsize += blksize
821 readsize += blksize
821 if largestblock < blksize:
822 if largestblock < blksize:
822 largestblock = blksize
823 largestblock = blksize
823
824
824 if readsize:
825 if readsize:
825 readdensity = float(chainsize) / float(readsize)
826 readdensity = float(chainsize) / float(readsize)
826 else:
827 else:
827 readdensity = 1
828 readdensity = 1
828
829
829 fm.write(
830 fm.write(
830 b'readsize largestblock readdensity srchunks',
831 b'readsize largestblock readdensity srchunks',
831 b' %10d %10d %9.5f %8d',
832 b' %10d %10d %9.5f %8d',
832 readsize,
833 readsize,
833 largestblock,
834 largestblock,
834 readdensity,
835 readdensity,
835 srchunks,
836 srchunks,
836 readsize=readsize,
837 readsize=readsize,
837 largestblock=largestblock,
838 largestblock=largestblock,
838 readdensity=readdensity,
839 readdensity=readdensity,
839 srchunks=srchunks,
840 srchunks=srchunks,
840 )
841 )
841
842
842 fm.plain(b'\n')
843 fm.plain(b'\n')
843
844
844 fm.end()
845 fm.end()
845
846
846
847
847 @command(
848 @command(
848 b'debugdirstate|debugstate',
849 b'debugdirstate|debugstate',
849 [
850 [
850 (
851 (
851 b'',
852 b'',
852 b'nodates',
853 b'nodates',
853 None,
854 None,
854 _(b'do not display the saved mtime (DEPRECATED)'),
855 _(b'do not display the saved mtime (DEPRECATED)'),
855 ),
856 ),
856 (b'', b'dates', True, _(b'display the saved mtime')),
857 (b'', b'dates', True, _(b'display the saved mtime')),
857 (b'', b'datesort', None, _(b'sort by saved mtime')),
858 (b'', b'datesort', None, _(b'sort by saved mtime')),
858 ],
859 ],
859 _(b'[OPTION]...'),
860 _(b'[OPTION]...'),
860 )
861 )
861 def debugstate(ui, repo, **opts):
862 def debugstate(ui, repo, **opts):
862 """show the contents of the current dirstate"""
863 """show the contents of the current dirstate"""
863
864
864 nodates = not opts['dates']
865 nodates = not opts['dates']
865 if opts.get('nodates') is not None:
866 if opts.get('nodates') is not None:
866 nodates = True
867 nodates = True
867 datesort = opts.get('datesort')
868 datesort = opts.get('datesort')
868
869
869 if datesort:
870 if datesort:
870 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
871 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
871 else:
872 else:
872 keyfunc = None # sort by filename
873 keyfunc = None # sort by filename
873 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
874 for file_, ent in sorted(pycompat.iteritems(repo.dirstate), key=keyfunc):
874 if ent[3] == -1:
875 if ent[3] == -1:
875 timestr = b'unset '
876 timestr = b'unset '
876 elif nodates:
877 elif nodates:
877 timestr = b'set '
878 timestr = b'set '
878 else:
879 else:
879 timestr = time.strftime(
880 timestr = time.strftime(
880 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
881 "%Y-%m-%d %H:%M:%S ", time.localtime(ent[3])
881 )
882 )
882 timestr = encoding.strtolocal(timestr)
883 timestr = encoding.strtolocal(timestr)
883 if ent[1] & 0o20000:
884 if ent[1] & 0o20000:
884 mode = b'lnk'
885 mode = b'lnk'
885 else:
886 else:
886 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
887 mode = b'%3o' % (ent[1] & 0o777 & ~util.umask)
887 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
888 ui.write(b"%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
888 for f in repo.dirstate.copies():
889 for f in repo.dirstate.copies():
889 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
890 ui.write(_(b"copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
890
891
891
892
892 @command(
893 @command(
893 b'debugdiscovery',
894 b'debugdiscovery',
894 [
895 [
895 (b'', b'old', None, _(b'use old-style discovery')),
896 (b'', b'old', None, _(b'use old-style discovery')),
896 (
897 (
897 b'',
898 b'',
898 b'nonheads',
899 b'nonheads',
899 None,
900 None,
900 _(b'use old-style discovery with non-heads included'),
901 _(b'use old-style discovery with non-heads included'),
901 ),
902 ),
902 (b'', b'rev', [], b'restrict discovery to this set of revs'),
903 (b'', b'rev', [], b'restrict discovery to this set of revs'),
903 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
904 (b'', b'seed', b'12323', b'specify the random seed use for discovery'),
904 ]
905 ]
905 + cmdutil.remoteopts,
906 + cmdutil.remoteopts,
906 _(b'[--rev REV] [OTHER]'),
907 _(b'[--rev REV] [OTHER]'),
907 )
908 )
908 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
909 def debugdiscovery(ui, repo, remoteurl=b"default", **opts):
909 """runs the changeset discovery protocol in isolation"""
910 """runs the changeset discovery protocol in isolation"""
910 opts = pycompat.byteskwargs(opts)
911 opts = pycompat.byteskwargs(opts)
911 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
912 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
912 remote = hg.peer(repo, opts, remoteurl)
913 remote = hg.peer(repo, opts, remoteurl)
913 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
914 ui.status(_(b'comparing with %s\n') % util.hidepassword(remoteurl))
914
915
915 # make sure tests are repeatable
916 # make sure tests are repeatable
916 random.seed(int(opts[b'seed']))
917 random.seed(int(opts[b'seed']))
917
918
918 if opts.get(b'old'):
919 if opts.get(b'old'):
919
920
920 def doit(pushedrevs, remoteheads, remote=remote):
921 def doit(pushedrevs, remoteheads, remote=remote):
921 if not util.safehasattr(remote, b'branches'):
922 if not util.safehasattr(remote, b'branches'):
922 # enable in-client legacy support
923 # enable in-client legacy support
923 remote = localrepo.locallegacypeer(remote.local())
924 remote = localrepo.locallegacypeer(remote.local())
924 common, _in, hds = treediscovery.findcommonincoming(
925 common, _in, hds = treediscovery.findcommonincoming(
925 repo, remote, force=True
926 repo, remote, force=True
926 )
927 )
927 common = set(common)
928 common = set(common)
928 if not opts.get(b'nonheads'):
929 if not opts.get(b'nonheads'):
929 ui.writenoi18n(
930 ui.writenoi18n(
930 b"unpruned common: %s\n"
931 b"unpruned common: %s\n"
931 % b" ".join(sorted(short(n) for n in common))
932 % b" ".join(sorted(short(n) for n in common))
932 )
933 )
933
934
934 clnode = repo.changelog.node
935 clnode = repo.changelog.node
935 common = repo.revs(b'heads(::%ln)', common)
936 common = repo.revs(b'heads(::%ln)', common)
936 common = {clnode(r) for r in common}
937 common = {clnode(r) for r in common}
937 return common, hds
938 return common, hds
938
939
939 else:
940 else:
940
941
941 def doit(pushedrevs, remoteheads, remote=remote):
942 def doit(pushedrevs, remoteheads, remote=remote):
942 nodes = None
943 nodes = None
943 if pushedrevs:
944 if pushedrevs:
944 revs = scmutil.revrange(repo, pushedrevs)
945 revs = scmutil.revrange(repo, pushedrevs)
945 nodes = [repo[r].node() for r in revs]
946 nodes = [repo[r].node() for r in revs]
946 common, any, hds = setdiscovery.findcommonheads(
947 common, any, hds = setdiscovery.findcommonheads(
947 ui, repo, remote, ancestorsof=nodes
948 ui, repo, remote, ancestorsof=nodes
948 )
949 )
949 return common, hds
950 return common, hds
950
951
951 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
952 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
952 localrevs = opts[b'rev']
953 localrevs = opts[b'rev']
953 with util.timedcm('debug-discovery') as t:
954 with util.timedcm('debug-discovery') as t:
954 common, hds = doit(localrevs, remoterevs)
955 common, hds = doit(localrevs, remoterevs)
955
956
956 # compute all statistics
957 # compute all statistics
957 common = set(common)
958 common = set(common)
958 rheads = set(hds)
959 rheads = set(hds)
959 lheads = set(repo.heads())
960 lheads = set(repo.heads())
960
961
961 data = {}
962 data = {}
962 data[b'elapsed'] = t.elapsed
963 data[b'elapsed'] = t.elapsed
963 data[b'nb-common'] = len(common)
964 data[b'nb-common'] = len(common)
964 data[b'nb-common-local'] = len(common & lheads)
965 data[b'nb-common-local'] = len(common & lheads)
965 data[b'nb-common-remote'] = len(common & rheads)
966 data[b'nb-common-remote'] = len(common & rheads)
966 data[b'nb-common-both'] = len(common & rheads & lheads)
967 data[b'nb-common-both'] = len(common & rheads & lheads)
967 data[b'nb-local'] = len(lheads)
968 data[b'nb-local'] = len(lheads)
968 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
969 data[b'nb-local-missing'] = data[b'nb-local'] - data[b'nb-common-local']
969 data[b'nb-remote'] = len(rheads)
970 data[b'nb-remote'] = len(rheads)
970 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
971 data[b'nb-remote-unknown'] = data[b'nb-remote'] - data[b'nb-common-remote']
971 data[b'nb-revs'] = len(repo.revs(b'all()'))
972 data[b'nb-revs'] = len(repo.revs(b'all()'))
972 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
973 data[b'nb-revs-common'] = len(repo.revs(b'::%ln', common))
973 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
974 data[b'nb-revs-missing'] = data[b'nb-revs'] - data[b'nb-revs-common']
974
975
975 # display discovery summary
976 # display discovery summary
976 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
977 ui.writenoi18n(b"elapsed time: %(elapsed)f seconds\n" % data)
977 ui.writenoi18n(b"heads summary:\n")
978 ui.writenoi18n(b"heads summary:\n")
978 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
979 ui.writenoi18n(b" total common heads: %(nb-common)9d\n" % data)
979 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
980 ui.writenoi18n(b" also local heads: %(nb-common-local)9d\n" % data)
980 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
981 ui.writenoi18n(b" also remote heads: %(nb-common-remote)9d\n" % data)
981 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
982 ui.writenoi18n(b" both: %(nb-common-both)9d\n" % data)
982 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
983 ui.writenoi18n(b" local heads: %(nb-local)9d\n" % data)
983 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
984 ui.writenoi18n(b" common: %(nb-common-local)9d\n" % data)
984 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
985 ui.writenoi18n(b" missing: %(nb-local-missing)9d\n" % data)
985 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
986 ui.writenoi18n(b" remote heads: %(nb-remote)9d\n" % data)
986 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
987 ui.writenoi18n(b" common: %(nb-common-remote)9d\n" % data)
987 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
988 ui.writenoi18n(b" unknown: %(nb-remote-unknown)9d\n" % data)
988 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
989 ui.writenoi18n(b"local changesets: %(nb-revs)9d\n" % data)
989 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
990 ui.writenoi18n(b" common: %(nb-revs-common)9d\n" % data)
990 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
991 ui.writenoi18n(b" missing: %(nb-revs-missing)9d\n" % data)
991
992
992 if ui.verbose:
993 if ui.verbose:
993 ui.writenoi18n(
994 ui.writenoi18n(
994 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
995 b"common heads: %s\n" % b" ".join(sorted(short(n) for n in common))
995 )
996 )
996
997
997
998
998 _chunksize = 4 << 10
999 _chunksize = 4 << 10
999
1000
1000
1001
1001 @command(
1002 @command(
1002 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1003 b'debugdownload', [(b'o', b'output', b'', _(b'path')),], optionalrepo=True
1003 )
1004 )
1004 def debugdownload(ui, repo, url, output=None, **opts):
1005 def debugdownload(ui, repo, url, output=None, **opts):
1005 """download a resource using Mercurial logic and config
1006 """download a resource using Mercurial logic and config
1006 """
1007 """
1007 fh = urlmod.open(ui, url, output)
1008 fh = urlmod.open(ui, url, output)
1008
1009
1009 dest = ui
1010 dest = ui
1010 if output:
1011 if output:
1011 dest = open(output, b"wb", _chunksize)
1012 dest = open(output, b"wb", _chunksize)
1012 try:
1013 try:
1013 data = fh.read(_chunksize)
1014 data = fh.read(_chunksize)
1014 while data:
1015 while data:
1015 dest.write(data)
1016 dest.write(data)
1016 data = fh.read(_chunksize)
1017 data = fh.read(_chunksize)
1017 finally:
1018 finally:
1018 if output:
1019 if output:
1019 dest.close()
1020 dest.close()
1020
1021
1021
1022
1022 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1023 @command(b'debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
1023 def debugextensions(ui, repo, **opts):
1024 def debugextensions(ui, repo, **opts):
1024 '''show information about active extensions'''
1025 '''show information about active extensions'''
1025 opts = pycompat.byteskwargs(opts)
1026 opts = pycompat.byteskwargs(opts)
1026 exts = extensions.extensions(ui)
1027 exts = extensions.extensions(ui)
1027 hgver = util.version()
1028 hgver = util.version()
1028 fm = ui.formatter(b'debugextensions', opts)
1029 fm = ui.formatter(b'debugextensions', opts)
1029 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1030 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
1030 isinternal = extensions.ismoduleinternal(extmod)
1031 isinternal = extensions.ismoduleinternal(extmod)
1031 extsource = pycompat.fsencode(extmod.__file__)
1032 extsource = pycompat.fsencode(extmod.__file__)
1032 if isinternal:
1033 if isinternal:
1033 exttestedwith = [] # never expose magic string to users
1034 exttestedwith = [] # never expose magic string to users
1034 else:
1035 else:
1035 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1036 exttestedwith = getattr(extmod, 'testedwith', b'').split()
1036 extbuglink = getattr(extmod, 'buglink', None)
1037 extbuglink = getattr(extmod, 'buglink', None)
1037
1038
1038 fm.startitem()
1039 fm.startitem()
1039
1040
1040 if ui.quiet or ui.verbose:
1041 if ui.quiet or ui.verbose:
1041 fm.write(b'name', b'%s\n', extname)
1042 fm.write(b'name', b'%s\n', extname)
1042 else:
1043 else:
1043 fm.write(b'name', b'%s', extname)
1044 fm.write(b'name', b'%s', extname)
1044 if isinternal or hgver in exttestedwith:
1045 if isinternal or hgver in exttestedwith:
1045 fm.plain(b'\n')
1046 fm.plain(b'\n')
1046 elif not exttestedwith:
1047 elif not exttestedwith:
1047 fm.plain(_(b' (untested!)\n'))
1048 fm.plain(_(b' (untested!)\n'))
1048 else:
1049 else:
1049 lasttestedversion = exttestedwith[-1]
1050 lasttestedversion = exttestedwith[-1]
1050 fm.plain(b' (%s!)\n' % lasttestedversion)
1051 fm.plain(b' (%s!)\n' % lasttestedversion)
1051
1052
1052 fm.condwrite(
1053 fm.condwrite(
1053 ui.verbose and extsource,
1054 ui.verbose and extsource,
1054 b'source',
1055 b'source',
1055 _(b' location: %s\n'),
1056 _(b' location: %s\n'),
1056 extsource or b"",
1057 extsource or b"",
1057 )
1058 )
1058
1059
1059 if ui.verbose:
1060 if ui.verbose:
1060 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1061 fm.plain(_(b' bundled: %s\n') % [b'no', b'yes'][isinternal])
1061 fm.data(bundled=isinternal)
1062 fm.data(bundled=isinternal)
1062
1063
1063 fm.condwrite(
1064 fm.condwrite(
1064 ui.verbose and exttestedwith,
1065 ui.verbose and exttestedwith,
1065 b'testedwith',
1066 b'testedwith',
1066 _(b' tested with: %s\n'),
1067 _(b' tested with: %s\n'),
1067 fm.formatlist(exttestedwith, name=b'ver'),
1068 fm.formatlist(exttestedwith, name=b'ver'),
1068 )
1069 )
1069
1070
1070 fm.condwrite(
1071 fm.condwrite(
1071 ui.verbose and extbuglink,
1072 ui.verbose and extbuglink,
1072 b'buglink',
1073 b'buglink',
1073 _(b' bug reporting: %s\n'),
1074 _(b' bug reporting: %s\n'),
1074 extbuglink or b"",
1075 extbuglink or b"",
1075 )
1076 )
1076
1077
1077 fm.end()
1078 fm.end()
1078
1079
1079
1080
1080 @command(
1081 @command(
1081 b'debugfileset',
1082 b'debugfileset',
1082 [
1083 [
1083 (
1084 (
1084 b'r',
1085 b'r',
1085 b'rev',
1086 b'rev',
1086 b'',
1087 b'',
1087 _(b'apply the filespec on this revision'),
1088 _(b'apply the filespec on this revision'),
1088 _(b'REV'),
1089 _(b'REV'),
1089 ),
1090 ),
1090 (
1091 (
1091 b'',
1092 b'',
1092 b'all-files',
1093 b'all-files',
1093 False,
1094 False,
1094 _(b'test files from all revisions and working directory'),
1095 _(b'test files from all revisions and working directory'),
1095 ),
1096 ),
1096 (
1097 (
1097 b's',
1098 b's',
1098 b'show-matcher',
1099 b'show-matcher',
1099 None,
1100 None,
1100 _(b'print internal representation of matcher'),
1101 _(b'print internal representation of matcher'),
1101 ),
1102 ),
1102 (
1103 (
1103 b'p',
1104 b'p',
1104 b'show-stage',
1105 b'show-stage',
1105 [],
1106 [],
1106 _(b'print parsed tree at the given stage'),
1107 _(b'print parsed tree at the given stage'),
1107 _(b'NAME'),
1108 _(b'NAME'),
1108 ),
1109 ),
1109 ],
1110 ],
1110 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1111 _(b'[-r REV] [--all-files] [OPTION]... FILESPEC'),
1111 )
1112 )
1112 def debugfileset(ui, repo, expr, **opts):
1113 def debugfileset(ui, repo, expr, **opts):
1113 '''parse and apply a fileset specification'''
1114 '''parse and apply a fileset specification'''
1114 from . import fileset
1115 from . import fileset
1115
1116
1116 fileset.symbols # force import of fileset so we have predicates to optimize
1117 fileset.symbols # force import of fileset so we have predicates to optimize
1117 opts = pycompat.byteskwargs(opts)
1118 opts = pycompat.byteskwargs(opts)
1118 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1119 ctx = scmutil.revsingle(repo, opts.get(b'rev'), None)
1119
1120
1120 stages = [
1121 stages = [
1121 (b'parsed', pycompat.identity),
1122 (b'parsed', pycompat.identity),
1122 (b'analyzed', filesetlang.analyze),
1123 (b'analyzed', filesetlang.analyze),
1123 (b'optimized', filesetlang.optimize),
1124 (b'optimized', filesetlang.optimize),
1124 ]
1125 ]
1125 stagenames = set(n for n, f in stages)
1126 stagenames = set(n for n, f in stages)
1126
1127
1127 showalways = set()
1128 showalways = set()
1128 if ui.verbose and not opts[b'show_stage']:
1129 if ui.verbose and not opts[b'show_stage']:
1129 # show parsed tree by --verbose (deprecated)
1130 # show parsed tree by --verbose (deprecated)
1130 showalways.add(b'parsed')
1131 showalways.add(b'parsed')
1131 if opts[b'show_stage'] == [b'all']:
1132 if opts[b'show_stage'] == [b'all']:
1132 showalways.update(stagenames)
1133 showalways.update(stagenames)
1133 else:
1134 else:
1134 for n in opts[b'show_stage']:
1135 for n in opts[b'show_stage']:
1135 if n not in stagenames:
1136 if n not in stagenames:
1136 raise error.Abort(_(b'invalid stage name: %s') % n)
1137 raise error.Abort(_(b'invalid stage name: %s') % n)
1137 showalways.update(opts[b'show_stage'])
1138 showalways.update(opts[b'show_stage'])
1138
1139
1139 tree = filesetlang.parse(expr)
1140 tree = filesetlang.parse(expr)
1140 for n, f in stages:
1141 for n, f in stages:
1141 tree = f(tree)
1142 tree = f(tree)
1142 if n in showalways:
1143 if n in showalways:
1143 if opts[b'show_stage'] or n != b'parsed':
1144 if opts[b'show_stage'] or n != b'parsed':
1144 ui.write(b"* %s:\n" % n)
1145 ui.write(b"* %s:\n" % n)
1145 ui.write(filesetlang.prettyformat(tree), b"\n")
1146 ui.write(filesetlang.prettyformat(tree), b"\n")
1146
1147
1147 files = set()
1148 files = set()
1148 if opts[b'all_files']:
1149 if opts[b'all_files']:
1149 for r in repo:
1150 for r in repo:
1150 c = repo[r]
1151 c = repo[r]
1151 files.update(c.files())
1152 files.update(c.files())
1152 files.update(c.substate)
1153 files.update(c.substate)
1153 if opts[b'all_files'] or ctx.rev() is None:
1154 if opts[b'all_files'] or ctx.rev() is None:
1154 wctx = repo[None]
1155 wctx = repo[None]
1155 files.update(
1156 files.update(
1156 repo.dirstate.walk(
1157 repo.dirstate.walk(
1157 scmutil.matchall(repo),
1158 scmutil.matchall(repo),
1158 subrepos=list(wctx.substate),
1159 subrepos=list(wctx.substate),
1159 unknown=True,
1160 unknown=True,
1160 ignored=True,
1161 ignored=True,
1161 )
1162 )
1162 )
1163 )
1163 files.update(wctx.substate)
1164 files.update(wctx.substate)
1164 else:
1165 else:
1165 files.update(ctx.files())
1166 files.update(ctx.files())
1166 files.update(ctx.substate)
1167 files.update(ctx.substate)
1167
1168
1168 m = ctx.matchfileset(expr)
1169 m = ctx.matchfileset(expr)
1169 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1170 if opts[b'show_matcher'] or (opts[b'show_matcher'] is None and ui.verbose):
1170 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1171 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
1171 for f in sorted(files):
1172 for f in sorted(files):
1172 if not m(f):
1173 if not m(f):
1173 continue
1174 continue
1174 ui.write(b"%s\n" % f)
1175 ui.write(b"%s\n" % f)
1175
1176
1176
1177
1177 @command(b'debugformat', [] + cmdutil.formatteropts)
1178 @command(b'debugformat', [] + cmdutil.formatteropts)
1178 def debugformat(ui, repo, **opts):
1179 def debugformat(ui, repo, **opts):
1179 """display format information about the current repository
1180 """display format information about the current repository
1180
1181
1181 Use --verbose to get extra information about current config value and
1182 Use --verbose to get extra information about current config value and
1182 Mercurial default."""
1183 Mercurial default."""
1183 opts = pycompat.byteskwargs(opts)
1184 opts = pycompat.byteskwargs(opts)
1184 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1185 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1185 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1186 maxvariantlength = max(len(b'format-variant'), maxvariantlength)
1186
1187
1187 def makeformatname(name):
1188 def makeformatname(name):
1188 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1189 return b'%s:' + (b' ' * (maxvariantlength - len(name)))
1189
1190
1190 fm = ui.formatter(b'debugformat', opts)
1191 fm = ui.formatter(b'debugformat', opts)
1191 if fm.isplain():
1192 if fm.isplain():
1192
1193
1193 def formatvalue(value):
1194 def formatvalue(value):
1194 if util.safehasattr(value, b'startswith'):
1195 if util.safehasattr(value, b'startswith'):
1195 return value
1196 return value
1196 if value:
1197 if value:
1197 return b'yes'
1198 return b'yes'
1198 else:
1199 else:
1199 return b'no'
1200 return b'no'
1200
1201
1201 else:
1202 else:
1202 formatvalue = pycompat.identity
1203 formatvalue = pycompat.identity
1203
1204
1204 fm.plain(b'format-variant')
1205 fm.plain(b'format-variant')
1205 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1206 fm.plain(b' ' * (maxvariantlength - len(b'format-variant')))
1206 fm.plain(b' repo')
1207 fm.plain(b' repo')
1207 if ui.verbose:
1208 if ui.verbose:
1208 fm.plain(b' config default')
1209 fm.plain(b' config default')
1209 fm.plain(b'\n')
1210 fm.plain(b'\n')
1210 for fv in upgrade.allformatvariant:
1211 for fv in upgrade.allformatvariant:
1211 fm.startitem()
1212 fm.startitem()
1212 repovalue = fv.fromrepo(repo)
1213 repovalue = fv.fromrepo(repo)
1213 configvalue = fv.fromconfig(repo)
1214 configvalue = fv.fromconfig(repo)
1214
1215
1215 if repovalue != configvalue:
1216 if repovalue != configvalue:
1216 namelabel = b'formatvariant.name.mismatchconfig'
1217 namelabel = b'formatvariant.name.mismatchconfig'
1217 repolabel = b'formatvariant.repo.mismatchconfig'
1218 repolabel = b'formatvariant.repo.mismatchconfig'
1218 elif repovalue != fv.default:
1219 elif repovalue != fv.default:
1219 namelabel = b'formatvariant.name.mismatchdefault'
1220 namelabel = b'formatvariant.name.mismatchdefault'
1220 repolabel = b'formatvariant.repo.mismatchdefault'
1221 repolabel = b'formatvariant.repo.mismatchdefault'
1221 else:
1222 else:
1222 namelabel = b'formatvariant.name.uptodate'
1223 namelabel = b'formatvariant.name.uptodate'
1223 repolabel = b'formatvariant.repo.uptodate'
1224 repolabel = b'formatvariant.repo.uptodate'
1224
1225
1225 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1226 fm.write(b'name', makeformatname(fv.name), fv.name, label=namelabel)
1226 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1227 fm.write(b'repo', b' %3s', formatvalue(repovalue), label=repolabel)
1227 if fv.default != configvalue:
1228 if fv.default != configvalue:
1228 configlabel = b'formatvariant.config.special'
1229 configlabel = b'formatvariant.config.special'
1229 else:
1230 else:
1230 configlabel = b'formatvariant.config.default'
1231 configlabel = b'formatvariant.config.default'
1231 fm.condwrite(
1232 fm.condwrite(
1232 ui.verbose,
1233 ui.verbose,
1233 b'config',
1234 b'config',
1234 b' %6s',
1235 b' %6s',
1235 formatvalue(configvalue),
1236 formatvalue(configvalue),
1236 label=configlabel,
1237 label=configlabel,
1237 )
1238 )
1238 fm.condwrite(
1239 fm.condwrite(
1239 ui.verbose,
1240 ui.verbose,
1240 b'default',
1241 b'default',
1241 b' %7s',
1242 b' %7s',
1242 formatvalue(fv.default),
1243 formatvalue(fv.default),
1243 label=b'formatvariant.default',
1244 label=b'formatvariant.default',
1244 )
1245 )
1245 fm.plain(b'\n')
1246 fm.plain(b'\n')
1246 fm.end()
1247 fm.end()
1247
1248
1248
1249
1249 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1250 @command(b'debugfsinfo', [], _(b'[PATH]'), norepo=True)
1250 def debugfsinfo(ui, path=b"."):
1251 def debugfsinfo(ui, path=b"."):
1251 """show information detected about current filesystem"""
1252 """show information detected about current filesystem"""
1252 ui.writenoi18n(b'path: %s\n' % path)
1253 ui.writenoi18n(b'path: %s\n' % path)
1253 ui.writenoi18n(
1254 ui.writenoi18n(
1254 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1255 b'mounted on: %s\n' % (util.getfsmountpoint(path) or b'(unknown)')
1255 )
1256 )
1256 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1257 ui.writenoi18n(b'exec: %s\n' % (util.checkexec(path) and b'yes' or b'no'))
1257 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1258 ui.writenoi18n(b'fstype: %s\n' % (util.getfstype(path) or b'(unknown)'))
1258 ui.writenoi18n(
1259 ui.writenoi18n(
1259 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1260 b'symlink: %s\n' % (util.checklink(path) and b'yes' or b'no')
1260 )
1261 )
1261 ui.writenoi18n(
1262 ui.writenoi18n(
1262 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1263 b'hardlink: %s\n' % (util.checknlink(path) and b'yes' or b'no')
1263 )
1264 )
1264 casesensitive = b'(unknown)'
1265 casesensitive = b'(unknown)'
1265 try:
1266 try:
1266 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1267 with pycompat.namedtempfile(prefix=b'.debugfsinfo', dir=path) as f:
1267 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1268 casesensitive = util.fscasesensitive(f.name) and b'yes' or b'no'
1268 except OSError:
1269 except OSError:
1269 pass
1270 pass
1270 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1271 ui.writenoi18n(b'case-sensitive: %s\n' % casesensitive)
1271
1272
1272
1273
1273 @command(
1274 @command(
1274 b'debuggetbundle',
1275 b'debuggetbundle',
1275 [
1276 [
1276 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1277 (b'H', b'head', [], _(b'id of head node'), _(b'ID')),
1277 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1278 (b'C', b'common', [], _(b'id of common node'), _(b'ID')),
1278 (
1279 (
1279 b't',
1280 b't',
1280 b'type',
1281 b'type',
1281 b'bzip2',
1282 b'bzip2',
1282 _(b'bundle compression type to use'),
1283 _(b'bundle compression type to use'),
1283 _(b'TYPE'),
1284 _(b'TYPE'),
1284 ),
1285 ),
1285 ],
1286 ],
1286 _(b'REPO FILE [-H|-C ID]...'),
1287 _(b'REPO FILE [-H|-C ID]...'),
1287 norepo=True,
1288 norepo=True,
1288 )
1289 )
1289 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1290 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1290 """retrieves a bundle from a repo
1291 """retrieves a bundle from a repo
1291
1292
1292 Every ID must be a full-length hex node id string. Saves the bundle to the
1293 Every ID must be a full-length hex node id string. Saves the bundle to the
1293 given file.
1294 given file.
1294 """
1295 """
1295 opts = pycompat.byteskwargs(opts)
1296 opts = pycompat.byteskwargs(opts)
1296 repo = hg.peer(ui, opts, repopath)
1297 repo = hg.peer(ui, opts, repopath)
1297 if not repo.capable(b'getbundle'):
1298 if not repo.capable(b'getbundle'):
1298 raise error.Abort(b"getbundle() not supported by target repository")
1299 raise error.Abort(b"getbundle() not supported by target repository")
1299 args = {}
1300 args = {}
1300 if common:
1301 if common:
1301 args['common'] = [bin(s) for s in common]
1302 args['common'] = [bin(s) for s in common]
1302 if head:
1303 if head:
1303 args['heads'] = [bin(s) for s in head]
1304 args['heads'] = [bin(s) for s in head]
1304 # TODO: get desired bundlecaps from command line.
1305 # TODO: get desired bundlecaps from command line.
1305 args['bundlecaps'] = None
1306 args['bundlecaps'] = None
1306 bundle = repo.getbundle(b'debug', **args)
1307 bundle = repo.getbundle(b'debug', **args)
1307
1308
1308 bundletype = opts.get(b'type', b'bzip2').lower()
1309 bundletype = opts.get(b'type', b'bzip2').lower()
1309 btypes = {
1310 btypes = {
1310 b'none': b'HG10UN',
1311 b'none': b'HG10UN',
1311 b'bzip2': b'HG10BZ',
1312 b'bzip2': b'HG10BZ',
1312 b'gzip': b'HG10GZ',
1313 b'gzip': b'HG10GZ',
1313 b'bundle2': b'HG20',
1314 b'bundle2': b'HG20',
1314 }
1315 }
1315 bundletype = btypes.get(bundletype)
1316 bundletype = btypes.get(bundletype)
1316 if bundletype not in bundle2.bundletypes:
1317 if bundletype not in bundle2.bundletypes:
1317 raise error.Abort(_(b'unknown bundle type specified with --type'))
1318 raise error.Abort(_(b'unknown bundle type specified with --type'))
1318 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1319 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1319
1320
1320
1321
1321 @command(b'debugignore', [], b'[FILE]')
1322 @command(b'debugignore', [], b'[FILE]')
1322 def debugignore(ui, repo, *files, **opts):
1323 def debugignore(ui, repo, *files, **opts):
1323 """display the combined ignore pattern and information about ignored files
1324 """display the combined ignore pattern and information about ignored files
1324
1325
1325 With no argument display the combined ignore pattern.
1326 With no argument display the combined ignore pattern.
1326
1327
1327 Given space separated file names, shows if the given file is ignored and
1328 Given space separated file names, shows if the given file is ignored and
1328 if so, show the ignore rule (file and line number) that matched it.
1329 if so, show the ignore rule (file and line number) that matched it.
1329 """
1330 """
1330 ignore = repo.dirstate._ignore
1331 ignore = repo.dirstate._ignore
1331 if not files:
1332 if not files:
1332 # Show all the patterns
1333 # Show all the patterns
1333 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1334 ui.write(b"%s\n" % pycompat.byterepr(ignore))
1334 else:
1335 else:
1335 m = scmutil.match(repo[None], pats=files)
1336 m = scmutil.match(repo[None], pats=files)
1336 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1337 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1337 for f in m.files():
1338 for f in m.files():
1338 nf = util.normpath(f)
1339 nf = util.normpath(f)
1339 ignored = None
1340 ignored = None
1340 ignoredata = None
1341 ignoredata = None
1341 if nf != b'.':
1342 if nf != b'.':
1342 if ignore(nf):
1343 if ignore(nf):
1343 ignored = nf
1344 ignored = nf
1344 ignoredata = repo.dirstate._ignorefileandline(nf)
1345 ignoredata = repo.dirstate._ignorefileandline(nf)
1345 else:
1346 else:
1346 for p in util.finddirs(nf):
1347 for p in pathutil.finddirs(nf):
1347 if ignore(p):
1348 if ignore(p):
1348 ignored = p
1349 ignored = p
1349 ignoredata = repo.dirstate._ignorefileandline(p)
1350 ignoredata = repo.dirstate._ignorefileandline(p)
1350 break
1351 break
1351 if ignored:
1352 if ignored:
1352 if ignored == nf:
1353 if ignored == nf:
1353 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1354 ui.write(_(b"%s is ignored\n") % uipathfn(f))
1354 else:
1355 else:
1355 ui.write(
1356 ui.write(
1356 _(
1357 _(
1357 b"%s is ignored because of "
1358 b"%s is ignored because of "
1358 b"containing directory %s\n"
1359 b"containing directory %s\n"
1359 )
1360 )
1360 % (uipathfn(f), ignored)
1361 % (uipathfn(f), ignored)
1361 )
1362 )
1362 ignorefile, lineno, line = ignoredata
1363 ignorefile, lineno, line = ignoredata
1363 ui.write(
1364 ui.write(
1364 _(b"(ignore rule in %s, line %d: '%s')\n")
1365 _(b"(ignore rule in %s, line %d: '%s')\n")
1365 % (ignorefile, lineno, line)
1366 % (ignorefile, lineno, line)
1366 )
1367 )
1367 else:
1368 else:
1368 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1369 ui.write(_(b"%s is not ignored\n") % uipathfn(f))
1369
1370
1370
1371
1371 @command(
1372 @command(
1372 b'debugindex',
1373 b'debugindex',
1373 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1374 cmdutil.debugrevlogopts + cmdutil.formatteropts,
1374 _(b'-c|-m|FILE'),
1375 _(b'-c|-m|FILE'),
1375 )
1376 )
1376 def debugindex(ui, repo, file_=None, **opts):
1377 def debugindex(ui, repo, file_=None, **opts):
1377 """dump index data for a storage primitive"""
1378 """dump index data for a storage primitive"""
1378 opts = pycompat.byteskwargs(opts)
1379 opts = pycompat.byteskwargs(opts)
1379 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1380 store = cmdutil.openstorage(repo, b'debugindex', file_, opts)
1380
1381
1381 if ui.debugflag:
1382 if ui.debugflag:
1382 shortfn = hex
1383 shortfn = hex
1383 else:
1384 else:
1384 shortfn = short
1385 shortfn = short
1385
1386
1386 idlen = 12
1387 idlen = 12
1387 for i in store:
1388 for i in store:
1388 idlen = len(shortfn(store.node(i)))
1389 idlen = len(shortfn(store.node(i)))
1389 break
1390 break
1390
1391
1391 fm = ui.formatter(b'debugindex', opts)
1392 fm = ui.formatter(b'debugindex', opts)
1392 fm.plain(
1393 fm.plain(
1393 b' rev linkrev %s %s p2\n'
1394 b' rev linkrev %s %s p2\n'
1394 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1395 % (b'nodeid'.ljust(idlen), b'p1'.ljust(idlen))
1395 )
1396 )
1396
1397
1397 for rev in store:
1398 for rev in store:
1398 node = store.node(rev)
1399 node = store.node(rev)
1399 parents = store.parents(node)
1400 parents = store.parents(node)
1400
1401
1401 fm.startitem()
1402 fm.startitem()
1402 fm.write(b'rev', b'%6d ', rev)
1403 fm.write(b'rev', b'%6d ', rev)
1403 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1404 fm.write(b'linkrev', b'%7d ', store.linkrev(rev))
1404 fm.write(b'node', b'%s ', shortfn(node))
1405 fm.write(b'node', b'%s ', shortfn(node))
1405 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1406 fm.write(b'p1', b'%s ', shortfn(parents[0]))
1406 fm.write(b'p2', b'%s', shortfn(parents[1]))
1407 fm.write(b'p2', b'%s', shortfn(parents[1]))
1407 fm.plain(b'\n')
1408 fm.plain(b'\n')
1408
1409
1409 fm.end()
1410 fm.end()
1410
1411
1411
1412
1412 @command(
1413 @command(
1413 b'debugindexdot',
1414 b'debugindexdot',
1414 cmdutil.debugrevlogopts,
1415 cmdutil.debugrevlogopts,
1415 _(b'-c|-m|FILE'),
1416 _(b'-c|-m|FILE'),
1416 optionalrepo=True,
1417 optionalrepo=True,
1417 )
1418 )
1418 def debugindexdot(ui, repo, file_=None, **opts):
1419 def debugindexdot(ui, repo, file_=None, **opts):
1419 """dump an index DAG as a graphviz dot file"""
1420 """dump an index DAG as a graphviz dot file"""
1420 opts = pycompat.byteskwargs(opts)
1421 opts = pycompat.byteskwargs(opts)
1421 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1422 r = cmdutil.openstorage(repo, b'debugindexdot', file_, opts)
1422 ui.writenoi18n(b"digraph G {\n")
1423 ui.writenoi18n(b"digraph G {\n")
1423 for i in r:
1424 for i in r:
1424 node = r.node(i)
1425 node = r.node(i)
1425 pp = r.parents(node)
1426 pp = r.parents(node)
1426 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1427 ui.write(b"\t%d -> %d\n" % (r.rev(pp[0]), i))
1427 if pp[1] != nullid:
1428 if pp[1] != nullid:
1428 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1429 ui.write(b"\t%d -> %d\n" % (r.rev(pp[1]), i))
1429 ui.write(b"}\n")
1430 ui.write(b"}\n")
1430
1431
1431
1432
1432 @command(b'debugindexstats', [])
1433 @command(b'debugindexstats', [])
1433 def debugindexstats(ui, repo):
1434 def debugindexstats(ui, repo):
1434 """show stats related to the changelog index"""
1435 """show stats related to the changelog index"""
1435 repo.changelog.shortest(nullid, 1)
1436 repo.changelog.shortest(nullid, 1)
1436 index = repo.changelog.index
1437 index = repo.changelog.index
1437 if not util.safehasattr(index, b'stats'):
1438 if not util.safehasattr(index, b'stats'):
1438 raise error.Abort(_(b'debugindexstats only works with native code'))
1439 raise error.Abort(_(b'debugindexstats only works with native code'))
1439 for k, v in sorted(index.stats().items()):
1440 for k, v in sorted(index.stats().items()):
1440 ui.write(b'%s: %d\n' % (k, v))
1441 ui.write(b'%s: %d\n' % (k, v))
1441
1442
1442
1443
1443 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1444 @command(b'debuginstall', [] + cmdutil.formatteropts, b'', norepo=True)
1444 def debuginstall(ui, **opts):
1445 def debuginstall(ui, **opts):
1445 '''test Mercurial installation
1446 '''test Mercurial installation
1446
1447
1447 Returns 0 on success.
1448 Returns 0 on success.
1448 '''
1449 '''
1449 opts = pycompat.byteskwargs(opts)
1450 opts = pycompat.byteskwargs(opts)
1450
1451
1451 problems = 0
1452 problems = 0
1452
1453
1453 fm = ui.formatter(b'debuginstall', opts)
1454 fm = ui.formatter(b'debuginstall', opts)
1454 fm.startitem()
1455 fm.startitem()
1455
1456
1456 # encoding
1457 # encoding
1457 fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
1458 fm.write(b'encoding', _(b"checking encoding (%s)...\n"), encoding.encoding)
1458 err = None
1459 err = None
1459 try:
1460 try:
1460 codecs.lookup(pycompat.sysstr(encoding.encoding))
1461 codecs.lookup(pycompat.sysstr(encoding.encoding))
1461 except LookupError as inst:
1462 except LookupError as inst:
1462 err = stringutil.forcebytestr(inst)
1463 err = stringutil.forcebytestr(inst)
1463 problems += 1
1464 problems += 1
1464 fm.condwrite(
1465 fm.condwrite(
1465 err,
1466 err,
1466 b'encodingerror',
1467 b'encodingerror',
1467 _(b" %s\n (check that your locale is properly set)\n"),
1468 _(b" %s\n (check that your locale is properly set)\n"),
1468 err,
1469 err,
1469 )
1470 )
1470
1471
1471 # Python
1472 # Python
1472 fm.write(
1473 fm.write(
1473 b'pythonexe',
1474 b'pythonexe',
1474 _(b"checking Python executable (%s)\n"),
1475 _(b"checking Python executable (%s)\n"),
1475 pycompat.sysexecutable or _(b"unknown"),
1476 pycompat.sysexecutable or _(b"unknown"),
1476 )
1477 )
1477 fm.write(
1478 fm.write(
1478 b'pythonver',
1479 b'pythonver',
1479 _(b"checking Python version (%s)\n"),
1480 _(b"checking Python version (%s)\n"),
1480 (b"%d.%d.%d" % sys.version_info[:3]),
1481 (b"%d.%d.%d" % sys.version_info[:3]),
1481 )
1482 )
1482 fm.write(
1483 fm.write(
1483 b'pythonlib',
1484 b'pythonlib',
1484 _(b"checking Python lib (%s)...\n"),
1485 _(b"checking Python lib (%s)...\n"),
1485 os.path.dirname(pycompat.fsencode(os.__file__)),
1486 os.path.dirname(pycompat.fsencode(os.__file__)),
1486 )
1487 )
1487
1488
1488 security = set(sslutil.supportedprotocols)
1489 security = set(sslutil.supportedprotocols)
1489 if sslutil.hassni:
1490 if sslutil.hassni:
1490 security.add(b'sni')
1491 security.add(b'sni')
1491
1492
1492 fm.write(
1493 fm.write(
1493 b'pythonsecurity',
1494 b'pythonsecurity',
1494 _(b"checking Python security support (%s)\n"),
1495 _(b"checking Python security support (%s)\n"),
1495 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1496 fm.formatlist(sorted(security), name=b'protocol', fmt=b'%s', sep=b','),
1496 )
1497 )
1497
1498
1498 # These are warnings, not errors. So don't increment problem count. This
1499 # These are warnings, not errors. So don't increment problem count. This
1499 # may change in the future.
1500 # may change in the future.
1500 if b'tls1.2' not in security:
1501 if b'tls1.2' not in security:
1501 fm.plain(
1502 fm.plain(
1502 _(
1503 _(
1503 b' TLS 1.2 not supported by Python install; '
1504 b' TLS 1.2 not supported by Python install; '
1504 b'network connections lack modern security\n'
1505 b'network connections lack modern security\n'
1505 )
1506 )
1506 )
1507 )
1507 if b'sni' not in security:
1508 if b'sni' not in security:
1508 fm.plain(
1509 fm.plain(
1509 _(
1510 _(
1510 b' SNI not supported by Python install; may have '
1511 b' SNI not supported by Python install; may have '
1511 b'connectivity issues with some servers\n'
1512 b'connectivity issues with some servers\n'
1512 )
1513 )
1513 )
1514 )
1514
1515
1515 # TODO print CA cert info
1516 # TODO print CA cert info
1516
1517
1517 # hg version
1518 # hg version
1518 hgver = util.version()
1519 hgver = util.version()
1519 fm.write(
1520 fm.write(
1520 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1521 b'hgver', _(b"checking Mercurial version (%s)\n"), hgver.split(b'+')[0]
1521 )
1522 )
1522 fm.write(
1523 fm.write(
1523 b'hgverextra',
1524 b'hgverextra',
1524 _(b"checking Mercurial custom build (%s)\n"),
1525 _(b"checking Mercurial custom build (%s)\n"),
1525 b'+'.join(hgver.split(b'+')[1:]),
1526 b'+'.join(hgver.split(b'+')[1:]),
1526 )
1527 )
1527
1528
1528 # compiled modules
1529 # compiled modules
1529 fm.write(
1530 fm.write(
1530 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1531 b'hgmodulepolicy', _(b"checking module policy (%s)\n"), policy.policy
1531 )
1532 )
1532 fm.write(
1533 fm.write(
1533 b'hgmodules',
1534 b'hgmodules',
1534 _(b"checking installed modules (%s)...\n"),
1535 _(b"checking installed modules (%s)...\n"),
1535 os.path.dirname(pycompat.fsencode(__file__)),
1536 os.path.dirname(pycompat.fsencode(__file__)),
1536 )
1537 )
1537
1538
1538 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1539 rustandc = policy.policy in (b'rust+c', b'rust+c-allow')
1539 rustext = rustandc # for now, that's the only case
1540 rustext = rustandc # for now, that's the only case
1540 cext = policy.policy in (b'c', b'allow') or rustandc
1541 cext = policy.policy in (b'c', b'allow') or rustandc
1541 nopure = cext or rustext
1542 nopure = cext or rustext
1542 if nopure:
1543 if nopure:
1543 err = None
1544 err = None
1544 try:
1545 try:
1545 if cext:
1546 if cext:
1546 from .cext import (
1547 from .cext import (
1547 base85,
1548 base85,
1548 bdiff,
1549 bdiff,
1549 mpatch,
1550 mpatch,
1550 osutil,
1551 osutil,
1551 )
1552 )
1552
1553
1553 # quiet pyflakes
1554 # quiet pyflakes
1554 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1555 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1555 if rustext:
1556 if rustext:
1556 from .rustext import (
1557 from .rustext import (
1557 ancestor,
1558 ancestor,
1558 dirstate,
1559 dirstate,
1559 )
1560 )
1560
1561
1561 dir(ancestor), dir(dirstate) # quiet pyflakes
1562 dir(ancestor), dir(dirstate) # quiet pyflakes
1562 except Exception as inst:
1563 except Exception as inst:
1563 err = stringutil.forcebytestr(inst)
1564 err = stringutil.forcebytestr(inst)
1564 problems += 1
1565 problems += 1
1565 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1566 fm.condwrite(err, b'extensionserror', b" %s\n", err)
1566
1567
1567 compengines = util.compengines._engines.values()
1568 compengines = util.compengines._engines.values()
1568 fm.write(
1569 fm.write(
1569 b'compengines',
1570 b'compengines',
1570 _(b'checking registered compression engines (%s)\n'),
1571 _(b'checking registered compression engines (%s)\n'),
1571 fm.formatlist(
1572 fm.formatlist(
1572 sorted(e.name() for e in compengines),
1573 sorted(e.name() for e in compengines),
1573 name=b'compengine',
1574 name=b'compengine',
1574 fmt=b'%s',
1575 fmt=b'%s',
1575 sep=b', ',
1576 sep=b', ',
1576 ),
1577 ),
1577 )
1578 )
1578 fm.write(
1579 fm.write(
1579 b'compenginesavail',
1580 b'compenginesavail',
1580 _(b'checking available compression engines (%s)\n'),
1581 _(b'checking available compression engines (%s)\n'),
1581 fm.formatlist(
1582 fm.formatlist(
1582 sorted(e.name() for e in compengines if e.available()),
1583 sorted(e.name() for e in compengines if e.available()),
1583 name=b'compengine',
1584 name=b'compengine',
1584 fmt=b'%s',
1585 fmt=b'%s',
1585 sep=b', ',
1586 sep=b', ',
1586 ),
1587 ),
1587 )
1588 )
1588 wirecompengines = compression.compengines.supportedwireengines(
1589 wirecompengines = compression.compengines.supportedwireengines(
1589 compression.SERVERROLE
1590 compression.SERVERROLE
1590 )
1591 )
1591 fm.write(
1592 fm.write(
1592 b'compenginesserver',
1593 b'compenginesserver',
1593 _(
1594 _(
1594 b'checking available compression engines '
1595 b'checking available compression engines '
1595 b'for wire protocol (%s)\n'
1596 b'for wire protocol (%s)\n'
1596 ),
1597 ),
1597 fm.formatlist(
1598 fm.formatlist(
1598 [e.name() for e in wirecompengines if e.wireprotosupport()],
1599 [e.name() for e in wirecompengines if e.wireprotosupport()],
1599 name=b'compengine',
1600 name=b'compengine',
1600 fmt=b'%s',
1601 fmt=b'%s',
1601 sep=b', ',
1602 sep=b', ',
1602 ),
1603 ),
1603 )
1604 )
1604 re2 = b'missing'
1605 re2 = b'missing'
1605 if util._re2:
1606 if util._re2:
1606 re2 = b'available'
1607 re2 = b'available'
1607 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1608 fm.plain(_(b'checking "re2" regexp engine (%s)\n') % re2)
1608 fm.data(re2=bool(util._re2))
1609 fm.data(re2=bool(util._re2))
1609
1610
1610 # templates
1611 # templates
1611 p = templater.templatepaths()
1612 p = templater.templatepaths()
1612 fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
1613 fm.write(b'templatedirs', b'checking templates (%s)...\n', b' '.join(p))
1613 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1614 fm.condwrite(not p, b'', _(b" no template directories found\n"))
1614 if p:
1615 if p:
1615 m = templater.templatepath(b"map-cmdline.default")
1616 m = templater.templatepath(b"map-cmdline.default")
1616 if m:
1617 if m:
1617 # template found, check if it is working
1618 # template found, check if it is working
1618 err = None
1619 err = None
1619 try:
1620 try:
1620 templater.templater.frommapfile(m)
1621 templater.templater.frommapfile(m)
1621 except Exception as inst:
1622 except Exception as inst:
1622 err = stringutil.forcebytestr(inst)
1623 err = stringutil.forcebytestr(inst)
1623 p = None
1624 p = None
1624 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1625 fm.condwrite(err, b'defaulttemplateerror', b" %s\n", err)
1625 else:
1626 else:
1626 p = None
1627 p = None
1627 fm.condwrite(
1628 fm.condwrite(
1628 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1629 p, b'defaulttemplate', _(b"checking default template (%s)\n"), m
1629 )
1630 )
1630 fm.condwrite(
1631 fm.condwrite(
1631 not m,
1632 not m,
1632 b'defaulttemplatenotfound',
1633 b'defaulttemplatenotfound',
1633 _(b" template '%s' not found\n"),
1634 _(b" template '%s' not found\n"),
1634 b"default",
1635 b"default",
1635 )
1636 )
1636 if not p:
1637 if not p:
1637 problems += 1
1638 problems += 1
1638 fm.condwrite(
1639 fm.condwrite(
1639 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1640 not p, b'', _(b" (templates seem to have been installed incorrectly)\n")
1640 )
1641 )
1641
1642
1642 # editor
1643 # editor
1643 editor = ui.geteditor()
1644 editor = ui.geteditor()
1644 editor = util.expandpath(editor)
1645 editor = util.expandpath(editor)
1645 editorbin = procutil.shellsplit(editor)[0]
1646 editorbin = procutil.shellsplit(editor)[0]
1646 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1647 fm.write(b'editor', _(b"checking commit editor... (%s)\n"), editorbin)
1647 cmdpath = procutil.findexe(editorbin)
1648 cmdpath = procutil.findexe(editorbin)
1648 fm.condwrite(
1649 fm.condwrite(
1649 not cmdpath and editor == b'vi',
1650 not cmdpath and editor == b'vi',
1650 b'vinotfound',
1651 b'vinotfound',
1651 _(
1652 _(
1652 b" No commit editor set and can't find %s in PATH\n"
1653 b" No commit editor set and can't find %s in PATH\n"
1653 b" (specify a commit editor in your configuration"
1654 b" (specify a commit editor in your configuration"
1654 b" file)\n"
1655 b" file)\n"
1655 ),
1656 ),
1656 not cmdpath and editor == b'vi' and editorbin,
1657 not cmdpath and editor == b'vi' and editorbin,
1657 )
1658 )
1658 fm.condwrite(
1659 fm.condwrite(
1659 not cmdpath and editor != b'vi',
1660 not cmdpath and editor != b'vi',
1660 b'editornotfound',
1661 b'editornotfound',
1661 _(
1662 _(
1662 b" Can't find editor '%s' in PATH\n"
1663 b" Can't find editor '%s' in PATH\n"
1663 b" (specify a commit editor in your configuration"
1664 b" (specify a commit editor in your configuration"
1664 b" file)\n"
1665 b" file)\n"
1665 ),
1666 ),
1666 not cmdpath and editorbin,
1667 not cmdpath and editorbin,
1667 )
1668 )
1668 if not cmdpath and editor != b'vi':
1669 if not cmdpath and editor != b'vi':
1669 problems += 1
1670 problems += 1
1670
1671
1671 # check username
1672 # check username
1672 username = None
1673 username = None
1673 err = None
1674 err = None
1674 try:
1675 try:
1675 username = ui.username()
1676 username = ui.username()
1676 except error.Abort as e:
1677 except error.Abort as e:
1677 err = stringutil.forcebytestr(e)
1678 err = stringutil.forcebytestr(e)
1678 problems += 1
1679 problems += 1
1679
1680
1680 fm.condwrite(
1681 fm.condwrite(
1681 username, b'username', _(b"checking username (%s)\n"), username
1682 username, b'username', _(b"checking username (%s)\n"), username
1682 )
1683 )
1683 fm.condwrite(
1684 fm.condwrite(
1684 err,
1685 err,
1685 b'usernameerror',
1686 b'usernameerror',
1686 _(
1687 _(
1687 b"checking username...\n %s\n"
1688 b"checking username...\n %s\n"
1688 b" (specify a username in your configuration file)\n"
1689 b" (specify a username in your configuration file)\n"
1689 ),
1690 ),
1690 err,
1691 err,
1691 )
1692 )
1692
1693
1693 for name, mod in extensions.extensions():
1694 for name, mod in extensions.extensions():
1694 handler = getattr(mod, 'debuginstall', None)
1695 handler = getattr(mod, 'debuginstall', None)
1695 if handler is not None:
1696 if handler is not None:
1696 problems += handler(ui, fm)
1697 problems += handler(ui, fm)
1697
1698
1698 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1699 fm.condwrite(not problems, b'', _(b"no problems detected\n"))
1699 if not problems:
1700 if not problems:
1700 fm.data(problems=problems)
1701 fm.data(problems=problems)
1701 fm.condwrite(
1702 fm.condwrite(
1702 problems,
1703 problems,
1703 b'problems',
1704 b'problems',
1704 _(b"%d problems detected, please check your install!\n"),
1705 _(b"%d problems detected, please check your install!\n"),
1705 problems,
1706 problems,
1706 )
1707 )
1707 fm.end()
1708 fm.end()
1708
1709
1709 return problems
1710 return problems
1710
1711
1711
1712
1712 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1713 @command(b'debugknown', [], _(b'REPO ID...'), norepo=True)
1713 def debugknown(ui, repopath, *ids, **opts):
1714 def debugknown(ui, repopath, *ids, **opts):
1714 """test whether node ids are known to a repo
1715 """test whether node ids are known to a repo
1715
1716
1716 Every ID must be a full-length hex node id string. Returns a list of 0s
1717 Every ID must be a full-length hex node id string. Returns a list of 0s
1717 and 1s indicating unknown/known.
1718 and 1s indicating unknown/known.
1718 """
1719 """
1719 opts = pycompat.byteskwargs(opts)
1720 opts = pycompat.byteskwargs(opts)
1720 repo = hg.peer(ui, opts, repopath)
1721 repo = hg.peer(ui, opts, repopath)
1721 if not repo.capable(b'known'):
1722 if not repo.capable(b'known'):
1722 raise error.Abort(b"known() not supported by target repository")
1723 raise error.Abort(b"known() not supported by target repository")
1723 flags = repo.known([bin(s) for s in ids])
1724 flags = repo.known([bin(s) for s in ids])
1724 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1725 ui.write(b"%s\n" % (b"".join([f and b"1" or b"0" for f in flags])))
1725
1726
1726
1727
1727 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1728 @command(b'debuglabelcomplete', [], _(b'LABEL...'))
1728 def debuglabelcomplete(ui, repo, *args):
1729 def debuglabelcomplete(ui, repo, *args):
1729 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1730 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1730 debugnamecomplete(ui, repo, *args)
1731 debugnamecomplete(ui, repo, *args)
1731
1732
1732
1733
1733 @command(
1734 @command(
1734 b'debuglocks',
1735 b'debuglocks',
1735 [
1736 [
1736 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1737 (b'L', b'force-lock', None, _(b'free the store lock (DANGEROUS)')),
1737 (
1738 (
1738 b'W',
1739 b'W',
1739 b'force-wlock',
1740 b'force-wlock',
1740 None,
1741 None,
1741 _(b'free the working state lock (DANGEROUS)'),
1742 _(b'free the working state lock (DANGEROUS)'),
1742 ),
1743 ),
1743 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1744 (b's', b'set-lock', None, _(b'set the store lock until stopped')),
1744 (
1745 (
1745 b'S',
1746 b'S',
1746 b'set-wlock',
1747 b'set-wlock',
1747 None,
1748 None,
1748 _(b'set the working state lock until stopped'),
1749 _(b'set the working state lock until stopped'),
1749 ),
1750 ),
1750 ],
1751 ],
1751 _(b'[OPTION]...'),
1752 _(b'[OPTION]...'),
1752 )
1753 )
1753 def debuglocks(ui, repo, **opts):
1754 def debuglocks(ui, repo, **opts):
1754 """show or modify state of locks
1755 """show or modify state of locks
1755
1756
1756 By default, this command will show which locks are held. This
1757 By default, this command will show which locks are held. This
1757 includes the user and process holding the lock, the amount of time
1758 includes the user and process holding the lock, the amount of time
1758 the lock has been held, and the machine name where the process is
1759 the lock has been held, and the machine name where the process is
1759 running if it's not local.
1760 running if it's not local.
1760
1761
1761 Locks protect the integrity of Mercurial's data, so should be
1762 Locks protect the integrity of Mercurial's data, so should be
1762 treated with care. System crashes or other interruptions may cause
1763 treated with care. System crashes or other interruptions may cause
1763 locks to not be properly released, though Mercurial will usually
1764 locks to not be properly released, though Mercurial will usually
1764 detect and remove such stale locks automatically.
1765 detect and remove such stale locks automatically.
1765
1766
1766 However, detecting stale locks may not always be possible (for
1767 However, detecting stale locks may not always be possible (for
1767 instance, on a shared filesystem). Removing locks may also be
1768 instance, on a shared filesystem). Removing locks may also be
1768 blocked by filesystem permissions.
1769 blocked by filesystem permissions.
1769
1770
1770 Setting a lock will prevent other commands from changing the data.
1771 Setting a lock will prevent other commands from changing the data.
1771 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1772 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1772 The set locks are removed when the command exits.
1773 The set locks are removed when the command exits.
1773
1774
1774 Returns 0 if no locks are held.
1775 Returns 0 if no locks are held.
1775
1776
1776 """
1777 """
1777
1778
1778 if opts.get('force_lock'):
1779 if opts.get('force_lock'):
1779 repo.svfs.unlink(b'lock')
1780 repo.svfs.unlink(b'lock')
1780 if opts.get('force_wlock'):
1781 if opts.get('force_wlock'):
1781 repo.vfs.unlink(b'wlock')
1782 repo.vfs.unlink(b'wlock')
1782 if opts.get('force_lock') or opts.get('force_wlock'):
1783 if opts.get('force_lock') or opts.get('force_wlock'):
1783 return 0
1784 return 0
1784
1785
1785 locks = []
1786 locks = []
1786 try:
1787 try:
1787 if opts.get('set_wlock'):
1788 if opts.get('set_wlock'):
1788 try:
1789 try:
1789 locks.append(repo.wlock(False))
1790 locks.append(repo.wlock(False))
1790 except error.LockHeld:
1791 except error.LockHeld:
1791 raise error.Abort(_(b'wlock is already held'))
1792 raise error.Abort(_(b'wlock is already held'))
1792 if opts.get('set_lock'):
1793 if opts.get('set_lock'):
1793 try:
1794 try:
1794 locks.append(repo.lock(False))
1795 locks.append(repo.lock(False))
1795 except error.LockHeld:
1796 except error.LockHeld:
1796 raise error.Abort(_(b'lock is already held'))
1797 raise error.Abort(_(b'lock is already held'))
1797 if len(locks):
1798 if len(locks):
1798 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1799 ui.promptchoice(_(b"ready to release the lock (y)? $$ &Yes"))
1799 return 0
1800 return 0
1800 finally:
1801 finally:
1801 release(*locks)
1802 release(*locks)
1802
1803
1803 now = time.time()
1804 now = time.time()
1804 held = 0
1805 held = 0
1805
1806
1806 def report(vfs, name, method):
1807 def report(vfs, name, method):
1807 # this causes stale locks to get reaped for more accurate reporting
1808 # this causes stale locks to get reaped for more accurate reporting
1808 try:
1809 try:
1809 l = method(False)
1810 l = method(False)
1810 except error.LockHeld:
1811 except error.LockHeld:
1811 l = None
1812 l = None
1812
1813
1813 if l:
1814 if l:
1814 l.release()
1815 l.release()
1815 else:
1816 else:
1816 try:
1817 try:
1817 st = vfs.lstat(name)
1818 st = vfs.lstat(name)
1818 age = now - st[stat.ST_MTIME]
1819 age = now - st[stat.ST_MTIME]
1819 user = util.username(st.st_uid)
1820 user = util.username(st.st_uid)
1820 locker = vfs.readlock(name)
1821 locker = vfs.readlock(name)
1821 if b":" in locker:
1822 if b":" in locker:
1822 host, pid = locker.split(b':')
1823 host, pid = locker.split(b':')
1823 if host == socket.gethostname():
1824 if host == socket.gethostname():
1824 locker = b'user %s, process %s' % (user or b'None', pid)
1825 locker = b'user %s, process %s' % (user or b'None', pid)
1825 else:
1826 else:
1826 locker = b'user %s, process %s, host %s' % (
1827 locker = b'user %s, process %s, host %s' % (
1827 user or b'None',
1828 user or b'None',
1828 pid,
1829 pid,
1829 host,
1830 host,
1830 )
1831 )
1831 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1832 ui.writenoi18n(b"%-6s %s (%ds)\n" % (name + b":", locker, age))
1832 return 1
1833 return 1
1833 except OSError as e:
1834 except OSError as e:
1834 if e.errno != errno.ENOENT:
1835 if e.errno != errno.ENOENT:
1835 raise
1836 raise
1836
1837
1837 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1838 ui.writenoi18n(b"%-6s free\n" % (name + b":"))
1838 return 0
1839 return 0
1839
1840
1840 held += report(repo.svfs, b"lock", repo.lock)
1841 held += report(repo.svfs, b"lock", repo.lock)
1841 held += report(repo.vfs, b"wlock", repo.wlock)
1842 held += report(repo.vfs, b"wlock", repo.wlock)
1842
1843
1843 return held
1844 return held
1844
1845
1845
1846
1846 @command(
1847 @command(
1847 b'debugmanifestfulltextcache',
1848 b'debugmanifestfulltextcache',
1848 [
1849 [
1849 (b'', b'clear', False, _(b'clear the cache')),
1850 (b'', b'clear', False, _(b'clear the cache')),
1850 (
1851 (
1851 b'a',
1852 b'a',
1852 b'add',
1853 b'add',
1853 [],
1854 [],
1854 _(b'add the given manifest nodes to the cache'),
1855 _(b'add the given manifest nodes to the cache'),
1855 _(b'NODE'),
1856 _(b'NODE'),
1856 ),
1857 ),
1857 ],
1858 ],
1858 b'',
1859 b'',
1859 )
1860 )
1860 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1861 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1861 """show, clear or amend the contents of the manifest fulltext cache"""
1862 """show, clear or amend the contents of the manifest fulltext cache"""
1862
1863
1863 def getcache():
1864 def getcache():
1864 r = repo.manifestlog.getstorage(b'')
1865 r = repo.manifestlog.getstorage(b'')
1865 try:
1866 try:
1866 return r._fulltextcache
1867 return r._fulltextcache
1867 except AttributeError:
1868 except AttributeError:
1868 msg = _(
1869 msg = _(
1869 b"Current revlog implementation doesn't appear to have a "
1870 b"Current revlog implementation doesn't appear to have a "
1870 b"manifest fulltext cache\n"
1871 b"manifest fulltext cache\n"
1871 )
1872 )
1872 raise error.Abort(msg)
1873 raise error.Abort(msg)
1873
1874
1874 if opts.get('clear'):
1875 if opts.get('clear'):
1875 with repo.wlock():
1876 with repo.wlock():
1876 cache = getcache()
1877 cache = getcache()
1877 cache.clear(clear_persisted_data=True)
1878 cache.clear(clear_persisted_data=True)
1878 return
1879 return
1879
1880
1880 if add:
1881 if add:
1881 with repo.wlock():
1882 with repo.wlock():
1882 m = repo.manifestlog
1883 m = repo.manifestlog
1883 store = m.getstorage(b'')
1884 store = m.getstorage(b'')
1884 for n in add:
1885 for n in add:
1885 try:
1886 try:
1886 manifest = m[store.lookup(n)]
1887 manifest = m[store.lookup(n)]
1887 except error.LookupError as e:
1888 except error.LookupError as e:
1888 raise error.Abort(e, hint=b"Check your manifest node id")
1889 raise error.Abort(e, hint=b"Check your manifest node id")
1889 manifest.read() # stores revisision in cache too
1890 manifest.read() # stores revisision in cache too
1890 return
1891 return
1891
1892
1892 cache = getcache()
1893 cache = getcache()
1893 if not len(cache):
1894 if not len(cache):
1894 ui.write(_(b'cache empty\n'))
1895 ui.write(_(b'cache empty\n'))
1895 else:
1896 else:
1896 ui.write(
1897 ui.write(
1897 _(
1898 _(
1898 b'cache contains %d manifest entries, in order of most to '
1899 b'cache contains %d manifest entries, in order of most to '
1899 b'least recent:\n'
1900 b'least recent:\n'
1900 )
1901 )
1901 % (len(cache),)
1902 % (len(cache),)
1902 )
1903 )
1903 totalsize = 0
1904 totalsize = 0
1904 for nodeid in cache:
1905 for nodeid in cache:
1905 # Use cache.get to not update the LRU order
1906 # Use cache.get to not update the LRU order
1906 data = cache.peek(nodeid)
1907 data = cache.peek(nodeid)
1907 size = len(data)
1908 size = len(data)
1908 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1909 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1909 ui.write(
1910 ui.write(
1910 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
1911 _(b'id: %s, size %s\n') % (hex(nodeid), util.bytecount(size))
1911 )
1912 )
1912 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
1913 ondisk = cache._opener.stat(b'manifestfulltextcache').st_size
1913 ui.write(
1914 ui.write(
1914 _(b'total cache data size %s, on-disk %s\n')
1915 _(b'total cache data size %s, on-disk %s\n')
1915 % (util.bytecount(totalsize), util.bytecount(ondisk))
1916 % (util.bytecount(totalsize), util.bytecount(ondisk))
1916 )
1917 )
1917
1918
1918
1919
1919 @command(b'debugmergestate', [], b'')
1920 @command(b'debugmergestate', [], b'')
1920 def debugmergestate(ui, repo, *args):
1921 def debugmergestate(ui, repo, *args):
1921 """print merge state
1922 """print merge state
1922
1923
1923 Use --verbose to print out information about whether v1 or v2 merge state
1924 Use --verbose to print out information about whether v1 or v2 merge state
1924 was chosen."""
1925 was chosen."""
1925
1926
1926 def _hashornull(h):
1927 def _hashornull(h):
1927 if h == nullhex:
1928 if h == nullhex:
1928 return b'null'
1929 return b'null'
1929 else:
1930 else:
1930 return h
1931 return h
1931
1932
1932 def printrecords(version):
1933 def printrecords(version):
1933 ui.writenoi18n(b'* version %d records\n' % version)
1934 ui.writenoi18n(b'* version %d records\n' % version)
1934 if version == 1:
1935 if version == 1:
1935 records = v1records
1936 records = v1records
1936 else:
1937 else:
1937 records = v2records
1938 records = v2records
1938
1939
1939 for rtype, record in records:
1940 for rtype, record in records:
1940 # pretty print some record types
1941 # pretty print some record types
1941 if rtype == b'L':
1942 if rtype == b'L':
1942 ui.writenoi18n(b'local: %s\n' % record)
1943 ui.writenoi18n(b'local: %s\n' % record)
1943 elif rtype == b'O':
1944 elif rtype == b'O':
1944 ui.writenoi18n(b'other: %s\n' % record)
1945 ui.writenoi18n(b'other: %s\n' % record)
1945 elif rtype == b'm':
1946 elif rtype == b'm':
1946 driver, mdstate = record.split(b'\0', 1)
1947 driver, mdstate = record.split(b'\0', 1)
1947 ui.writenoi18n(
1948 ui.writenoi18n(
1948 b'merge driver: %s (state "%s")\n' % (driver, mdstate)
1949 b'merge driver: %s (state "%s")\n' % (driver, mdstate)
1949 )
1950 )
1950 elif rtype in b'FDC':
1951 elif rtype in b'FDC':
1951 r = record.split(b'\0')
1952 r = record.split(b'\0')
1952 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1953 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1953 if version == 1:
1954 if version == 1:
1954 onode = b'not stored in v1 format'
1955 onode = b'not stored in v1 format'
1955 flags = r[7]
1956 flags = r[7]
1956 else:
1957 else:
1957 onode, flags = r[7:9]
1958 onode, flags = r[7:9]
1958 ui.writenoi18n(
1959 ui.writenoi18n(
1959 b'file: %s (record type "%s", state "%s", hash %s)\n'
1960 b'file: %s (record type "%s", state "%s", hash %s)\n'
1960 % (f, rtype, state, _hashornull(hash))
1961 % (f, rtype, state, _hashornull(hash))
1961 )
1962 )
1962 ui.writenoi18n(
1963 ui.writenoi18n(
1963 b' local path: %s (flags "%s")\n' % (lfile, flags)
1964 b' local path: %s (flags "%s")\n' % (lfile, flags)
1964 )
1965 )
1965 ui.writenoi18n(
1966 ui.writenoi18n(
1966 b' ancestor path: %s (node %s)\n'
1967 b' ancestor path: %s (node %s)\n'
1967 % (afile, _hashornull(anode))
1968 % (afile, _hashornull(anode))
1968 )
1969 )
1969 ui.writenoi18n(
1970 ui.writenoi18n(
1970 b' other path: %s (node %s)\n'
1971 b' other path: %s (node %s)\n'
1971 % (ofile, _hashornull(onode))
1972 % (ofile, _hashornull(onode))
1972 )
1973 )
1973 elif rtype == b'f':
1974 elif rtype == b'f':
1974 filename, rawextras = record.split(b'\0', 1)
1975 filename, rawextras = record.split(b'\0', 1)
1975 extras = rawextras.split(b'\0')
1976 extras = rawextras.split(b'\0')
1976 i = 0
1977 i = 0
1977 extrastrings = []
1978 extrastrings = []
1978 while i < len(extras):
1979 while i < len(extras):
1979 extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
1980 extrastrings.append(b'%s = %s' % (extras[i], extras[i + 1]))
1980 i += 2
1981 i += 2
1981
1982
1982 ui.writenoi18n(
1983 ui.writenoi18n(
1983 b'file extras: %s (%s)\n'
1984 b'file extras: %s (%s)\n'
1984 % (filename, b', '.join(extrastrings))
1985 % (filename, b', '.join(extrastrings))
1985 )
1986 )
1986 elif rtype == b'l':
1987 elif rtype == b'l':
1987 labels = record.split(b'\0', 2)
1988 labels = record.split(b'\0', 2)
1988 labels = [l for l in labels if len(l) > 0]
1989 labels = [l for l in labels if len(l) > 0]
1989 ui.writenoi18n(b'labels:\n')
1990 ui.writenoi18n(b'labels:\n')
1990 ui.write((b' local: %s\n' % labels[0]))
1991 ui.write((b' local: %s\n' % labels[0]))
1991 ui.write((b' other: %s\n' % labels[1]))
1992 ui.write((b' other: %s\n' % labels[1]))
1992 if len(labels) > 2:
1993 if len(labels) > 2:
1993 ui.write((b' base: %s\n' % labels[2]))
1994 ui.write((b' base: %s\n' % labels[2]))
1994 else:
1995 else:
1995 ui.writenoi18n(
1996 ui.writenoi18n(
1996 b'unrecognized entry: %s\t%s\n'
1997 b'unrecognized entry: %s\t%s\n'
1997 % (rtype, record.replace(b'\0', b'\t'))
1998 % (rtype, record.replace(b'\0', b'\t'))
1998 )
1999 )
1999
2000
2000 # Avoid mergestate.read() since it may raise an exception for unsupported
2001 # Avoid mergestate.read() since it may raise an exception for unsupported
2001 # merge state records. We shouldn't be doing this, but this is OK since this
2002 # merge state records. We shouldn't be doing this, but this is OK since this
2002 # command is pretty low-level.
2003 # command is pretty low-level.
2003 ms = mergemod.mergestate(repo)
2004 ms = mergemod.mergestate(repo)
2004
2005
2005 # sort so that reasonable information is on top
2006 # sort so that reasonable information is on top
2006 v1records = ms._readrecordsv1()
2007 v1records = ms._readrecordsv1()
2007 v2records = ms._readrecordsv2()
2008 v2records = ms._readrecordsv2()
2008 order = b'LOml'
2009 order = b'LOml'
2009
2010
2010 def key(r):
2011 def key(r):
2011 idx = order.find(r[0])
2012 idx = order.find(r[0])
2012 if idx == -1:
2013 if idx == -1:
2013 return (1, r[1])
2014 return (1, r[1])
2014 else:
2015 else:
2015 return (0, idx)
2016 return (0, idx)
2016
2017
2017 v1records.sort(key=key)
2018 v1records.sort(key=key)
2018 v2records.sort(key=key)
2019 v2records.sort(key=key)
2019
2020
2020 if not v1records and not v2records:
2021 if not v1records and not v2records:
2021 ui.writenoi18n(b'no merge state found\n')
2022 ui.writenoi18n(b'no merge state found\n')
2022 elif not v2records:
2023 elif not v2records:
2023 ui.notenoi18n(b'no version 2 merge state\n')
2024 ui.notenoi18n(b'no version 2 merge state\n')
2024 printrecords(1)
2025 printrecords(1)
2025 elif ms._v1v2match(v1records, v2records):
2026 elif ms._v1v2match(v1records, v2records):
2026 ui.notenoi18n(b'v1 and v2 states match: using v2\n')
2027 ui.notenoi18n(b'v1 and v2 states match: using v2\n')
2027 printrecords(2)
2028 printrecords(2)
2028 else:
2029 else:
2029 ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
2030 ui.notenoi18n(b'v1 and v2 states mismatch: using v1\n')
2030 printrecords(1)
2031 printrecords(1)
2031 if ui.verbose:
2032 if ui.verbose:
2032 printrecords(2)
2033 printrecords(2)
2033
2034
2034
2035
2035 @command(b'debugnamecomplete', [], _(b'NAME...'))
2036 @command(b'debugnamecomplete', [], _(b'NAME...'))
2036 def debugnamecomplete(ui, repo, *args):
2037 def debugnamecomplete(ui, repo, *args):
2037 '''complete "names" - tags, open branch names, bookmark names'''
2038 '''complete "names" - tags, open branch names, bookmark names'''
2038
2039
2039 names = set()
2040 names = set()
2040 # since we previously only listed open branches, we will handle that
2041 # since we previously only listed open branches, we will handle that
2041 # specially (after this for loop)
2042 # specially (after this for loop)
2042 for name, ns in pycompat.iteritems(repo.names):
2043 for name, ns in pycompat.iteritems(repo.names):
2043 if name != b'branches':
2044 if name != b'branches':
2044 names.update(ns.listnames(repo))
2045 names.update(ns.listnames(repo))
2045 names.update(
2046 names.update(
2046 tag
2047 tag
2047 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2048 for (tag, heads, tip, closed) in repo.branchmap().iterbranches()
2048 if not closed
2049 if not closed
2049 )
2050 )
2050 completions = set()
2051 completions = set()
2051 if not args:
2052 if not args:
2052 args = [b'']
2053 args = [b'']
2053 for a in args:
2054 for a in args:
2054 completions.update(n for n in names if n.startswith(a))
2055 completions.update(n for n in names if n.startswith(a))
2055 ui.write(b'\n'.join(sorted(completions)))
2056 ui.write(b'\n'.join(sorted(completions)))
2056 ui.write(b'\n')
2057 ui.write(b'\n')
2057
2058
2058
2059
2059 @command(
2060 @command(
2060 b'debugobsolete',
2061 b'debugobsolete',
2061 [
2062 [
2062 (b'', b'flags', 0, _(b'markers flag')),
2063 (b'', b'flags', 0, _(b'markers flag')),
2063 (
2064 (
2064 b'',
2065 b'',
2065 b'record-parents',
2066 b'record-parents',
2066 False,
2067 False,
2067 _(b'record parent information for the precursor'),
2068 _(b'record parent information for the precursor'),
2068 ),
2069 ),
2069 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2070 (b'r', b'rev', [], _(b'display markers relevant to REV')),
2070 (
2071 (
2071 b'',
2072 b'',
2072 b'exclusive',
2073 b'exclusive',
2073 False,
2074 False,
2074 _(b'restrict display to markers only relevant to REV'),
2075 _(b'restrict display to markers only relevant to REV'),
2075 ),
2076 ),
2076 (b'', b'index', False, _(b'display index of the marker')),
2077 (b'', b'index', False, _(b'display index of the marker')),
2077 (b'', b'delete', [], _(b'delete markers specified by indices')),
2078 (b'', b'delete', [], _(b'delete markers specified by indices')),
2078 ]
2079 ]
2079 + cmdutil.commitopts2
2080 + cmdutil.commitopts2
2080 + cmdutil.formatteropts,
2081 + cmdutil.formatteropts,
2081 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2082 _(b'[OBSOLETED [REPLACEMENT ...]]'),
2082 )
2083 )
2083 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2084 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
2084 """create arbitrary obsolete marker
2085 """create arbitrary obsolete marker
2085
2086
2086 With no arguments, displays the list of obsolescence markers."""
2087 With no arguments, displays the list of obsolescence markers."""
2087
2088
2088 opts = pycompat.byteskwargs(opts)
2089 opts = pycompat.byteskwargs(opts)
2089
2090
2090 def parsenodeid(s):
2091 def parsenodeid(s):
2091 try:
2092 try:
2092 # We do not use revsingle/revrange functions here to accept
2093 # We do not use revsingle/revrange functions here to accept
2093 # arbitrary node identifiers, possibly not present in the
2094 # arbitrary node identifiers, possibly not present in the
2094 # local repository.
2095 # local repository.
2095 n = bin(s)
2096 n = bin(s)
2096 if len(n) != len(nullid):
2097 if len(n) != len(nullid):
2097 raise TypeError()
2098 raise TypeError()
2098 return n
2099 return n
2099 except TypeError:
2100 except TypeError:
2100 raise error.Abort(
2101 raise error.Abort(
2101 b'changeset references must be full hexadecimal '
2102 b'changeset references must be full hexadecimal '
2102 b'node identifiers'
2103 b'node identifiers'
2103 )
2104 )
2104
2105
2105 if opts.get(b'delete'):
2106 if opts.get(b'delete'):
2106 indices = []
2107 indices = []
2107 for v in opts.get(b'delete'):
2108 for v in opts.get(b'delete'):
2108 try:
2109 try:
2109 indices.append(int(v))
2110 indices.append(int(v))
2110 except ValueError:
2111 except ValueError:
2111 raise error.Abort(
2112 raise error.Abort(
2112 _(b'invalid index value: %r') % v,
2113 _(b'invalid index value: %r') % v,
2113 hint=_(b'use integers for indices'),
2114 hint=_(b'use integers for indices'),
2114 )
2115 )
2115
2116
2116 if repo.currenttransaction():
2117 if repo.currenttransaction():
2117 raise error.Abort(
2118 raise error.Abort(
2118 _(b'cannot delete obsmarkers in the middle of transaction.')
2119 _(b'cannot delete obsmarkers in the middle of transaction.')
2119 )
2120 )
2120
2121
2121 with repo.lock():
2122 with repo.lock():
2122 n = repair.deleteobsmarkers(repo.obsstore, indices)
2123 n = repair.deleteobsmarkers(repo.obsstore, indices)
2123 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2124 ui.write(_(b'deleted %i obsolescence markers\n') % n)
2124
2125
2125 return
2126 return
2126
2127
2127 if precursor is not None:
2128 if precursor is not None:
2128 if opts[b'rev']:
2129 if opts[b'rev']:
2129 raise error.Abort(b'cannot select revision when creating marker')
2130 raise error.Abort(b'cannot select revision when creating marker')
2130 metadata = {}
2131 metadata = {}
2131 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2132 metadata[b'user'] = encoding.fromlocal(opts[b'user'] or ui.username())
2132 succs = tuple(parsenodeid(succ) for succ in successors)
2133 succs = tuple(parsenodeid(succ) for succ in successors)
2133 l = repo.lock()
2134 l = repo.lock()
2134 try:
2135 try:
2135 tr = repo.transaction(b'debugobsolete')
2136 tr = repo.transaction(b'debugobsolete')
2136 try:
2137 try:
2137 date = opts.get(b'date')
2138 date = opts.get(b'date')
2138 if date:
2139 if date:
2139 date = dateutil.parsedate(date)
2140 date = dateutil.parsedate(date)
2140 else:
2141 else:
2141 date = None
2142 date = None
2142 prec = parsenodeid(precursor)
2143 prec = parsenodeid(precursor)
2143 parents = None
2144 parents = None
2144 if opts[b'record_parents']:
2145 if opts[b'record_parents']:
2145 if prec not in repo.unfiltered():
2146 if prec not in repo.unfiltered():
2146 raise error.Abort(
2147 raise error.Abort(
2147 b'cannot used --record-parents on '
2148 b'cannot used --record-parents on '
2148 b'unknown changesets'
2149 b'unknown changesets'
2149 )
2150 )
2150 parents = repo.unfiltered()[prec].parents()
2151 parents = repo.unfiltered()[prec].parents()
2151 parents = tuple(p.node() for p in parents)
2152 parents = tuple(p.node() for p in parents)
2152 repo.obsstore.create(
2153 repo.obsstore.create(
2153 tr,
2154 tr,
2154 prec,
2155 prec,
2155 succs,
2156 succs,
2156 opts[b'flags'],
2157 opts[b'flags'],
2157 parents=parents,
2158 parents=parents,
2158 date=date,
2159 date=date,
2159 metadata=metadata,
2160 metadata=metadata,
2160 ui=ui,
2161 ui=ui,
2161 )
2162 )
2162 tr.close()
2163 tr.close()
2163 except ValueError as exc:
2164 except ValueError as exc:
2164 raise error.Abort(
2165 raise error.Abort(
2165 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2166 _(b'bad obsmarker input: %s') % pycompat.bytestr(exc)
2166 )
2167 )
2167 finally:
2168 finally:
2168 tr.release()
2169 tr.release()
2169 finally:
2170 finally:
2170 l.release()
2171 l.release()
2171 else:
2172 else:
2172 if opts[b'rev']:
2173 if opts[b'rev']:
2173 revs = scmutil.revrange(repo, opts[b'rev'])
2174 revs = scmutil.revrange(repo, opts[b'rev'])
2174 nodes = [repo[r].node() for r in revs]
2175 nodes = [repo[r].node() for r in revs]
2175 markers = list(
2176 markers = list(
2176 obsutil.getmarkers(
2177 obsutil.getmarkers(
2177 repo, nodes=nodes, exclusive=opts[b'exclusive']
2178 repo, nodes=nodes, exclusive=opts[b'exclusive']
2178 )
2179 )
2179 )
2180 )
2180 markers.sort(key=lambda x: x._data)
2181 markers.sort(key=lambda x: x._data)
2181 else:
2182 else:
2182 markers = obsutil.getmarkers(repo)
2183 markers = obsutil.getmarkers(repo)
2183
2184
2184 markerstoiter = markers
2185 markerstoiter = markers
2185 isrelevant = lambda m: True
2186 isrelevant = lambda m: True
2186 if opts.get(b'rev') and opts.get(b'index'):
2187 if opts.get(b'rev') and opts.get(b'index'):
2187 markerstoiter = obsutil.getmarkers(repo)
2188 markerstoiter = obsutil.getmarkers(repo)
2188 markerset = set(markers)
2189 markerset = set(markers)
2189 isrelevant = lambda m: m in markerset
2190 isrelevant = lambda m: m in markerset
2190
2191
2191 fm = ui.formatter(b'debugobsolete', opts)
2192 fm = ui.formatter(b'debugobsolete', opts)
2192 for i, m in enumerate(markerstoiter):
2193 for i, m in enumerate(markerstoiter):
2193 if not isrelevant(m):
2194 if not isrelevant(m):
2194 # marker can be irrelevant when we're iterating over a set
2195 # marker can be irrelevant when we're iterating over a set
2195 # of markers (markerstoiter) which is bigger than the set
2196 # of markers (markerstoiter) which is bigger than the set
2196 # of markers we want to display (markers)
2197 # of markers we want to display (markers)
2197 # this can happen if both --index and --rev options are
2198 # this can happen if both --index and --rev options are
2198 # provided and thus we need to iterate over all of the markers
2199 # provided and thus we need to iterate over all of the markers
2199 # to get the correct indices, but only display the ones that
2200 # to get the correct indices, but only display the ones that
2200 # are relevant to --rev value
2201 # are relevant to --rev value
2201 continue
2202 continue
2202 fm.startitem()
2203 fm.startitem()
2203 ind = i if opts.get(b'index') else None
2204 ind = i if opts.get(b'index') else None
2204 cmdutil.showmarker(fm, m, index=ind)
2205 cmdutil.showmarker(fm, m, index=ind)
2205 fm.end()
2206 fm.end()
2206
2207
2207
2208
2208 @command(
2209 @command(
2209 b'debugp1copies',
2210 b'debugp1copies',
2210 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2211 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2211 _(b'[-r REV]'),
2212 _(b'[-r REV]'),
2212 )
2213 )
2213 def debugp1copies(ui, repo, **opts):
2214 def debugp1copies(ui, repo, **opts):
2214 """dump copy information compared to p1"""
2215 """dump copy information compared to p1"""
2215
2216
2216 opts = pycompat.byteskwargs(opts)
2217 opts = pycompat.byteskwargs(opts)
2217 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2218 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2218 for dst, src in ctx.p1copies().items():
2219 for dst, src in ctx.p1copies().items():
2219 ui.write(b'%s -> %s\n' % (src, dst))
2220 ui.write(b'%s -> %s\n' % (src, dst))
2220
2221
2221
2222
2222 @command(
2223 @command(
2223 b'debugp2copies',
2224 b'debugp2copies',
2224 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2225 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2225 _(b'[-r REV]'),
2226 _(b'[-r REV]'),
2226 )
2227 )
2227 def debugp1copies(ui, repo, **opts):
2228 def debugp1copies(ui, repo, **opts):
2228 """dump copy information compared to p2"""
2229 """dump copy information compared to p2"""
2229
2230
2230 opts = pycompat.byteskwargs(opts)
2231 opts = pycompat.byteskwargs(opts)
2231 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2232 ctx = scmutil.revsingle(repo, opts.get(b'rev'), default=None)
2232 for dst, src in ctx.p2copies().items():
2233 for dst, src in ctx.p2copies().items():
2233 ui.write(b'%s -> %s\n' % (src, dst))
2234 ui.write(b'%s -> %s\n' % (src, dst))
2234
2235
2235
2236
2236 @command(
2237 @command(
2237 b'debugpathcomplete',
2238 b'debugpathcomplete',
2238 [
2239 [
2239 (b'f', b'full', None, _(b'complete an entire path')),
2240 (b'f', b'full', None, _(b'complete an entire path')),
2240 (b'n', b'normal', None, _(b'show only normal files')),
2241 (b'n', b'normal', None, _(b'show only normal files')),
2241 (b'a', b'added', None, _(b'show only added files')),
2242 (b'a', b'added', None, _(b'show only added files')),
2242 (b'r', b'removed', None, _(b'show only removed files')),
2243 (b'r', b'removed', None, _(b'show only removed files')),
2243 ],
2244 ],
2244 _(b'FILESPEC...'),
2245 _(b'FILESPEC...'),
2245 )
2246 )
2246 def debugpathcomplete(ui, repo, *specs, **opts):
2247 def debugpathcomplete(ui, repo, *specs, **opts):
2247 '''complete part or all of a tracked path
2248 '''complete part or all of a tracked path
2248
2249
2249 This command supports shells that offer path name completion. It
2250 This command supports shells that offer path name completion. It
2250 currently completes only files already known to the dirstate.
2251 currently completes only files already known to the dirstate.
2251
2252
2252 Completion extends only to the next path segment unless
2253 Completion extends only to the next path segment unless
2253 --full is specified, in which case entire paths are used.'''
2254 --full is specified, in which case entire paths are used.'''
2254
2255
2255 def complete(path, acceptable):
2256 def complete(path, acceptable):
2256 dirstate = repo.dirstate
2257 dirstate = repo.dirstate
2257 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2258 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
2258 rootdir = repo.root + pycompat.ossep
2259 rootdir = repo.root + pycompat.ossep
2259 if spec != repo.root and not spec.startswith(rootdir):
2260 if spec != repo.root and not spec.startswith(rootdir):
2260 return [], []
2261 return [], []
2261 if os.path.isdir(spec):
2262 if os.path.isdir(spec):
2262 spec += b'/'
2263 spec += b'/'
2263 spec = spec[len(rootdir) :]
2264 spec = spec[len(rootdir) :]
2264 fixpaths = pycompat.ossep != b'/'
2265 fixpaths = pycompat.ossep != b'/'
2265 if fixpaths:
2266 if fixpaths:
2266 spec = spec.replace(pycompat.ossep, b'/')
2267 spec = spec.replace(pycompat.ossep, b'/')
2267 speclen = len(spec)
2268 speclen = len(spec)
2268 fullpaths = opts['full']
2269 fullpaths = opts['full']
2269 files, dirs = set(), set()
2270 files, dirs = set(), set()
2270 adddir, addfile = dirs.add, files.add
2271 adddir, addfile = dirs.add, files.add
2271 for f, st in pycompat.iteritems(dirstate):
2272 for f, st in pycompat.iteritems(dirstate):
2272 if f.startswith(spec) and st[0] in acceptable:
2273 if f.startswith(spec) and st[0] in acceptable:
2273 if fixpaths:
2274 if fixpaths:
2274 f = f.replace(b'/', pycompat.ossep)
2275 f = f.replace(b'/', pycompat.ossep)
2275 if fullpaths:
2276 if fullpaths:
2276 addfile(f)
2277 addfile(f)
2277 continue
2278 continue
2278 s = f.find(pycompat.ossep, speclen)
2279 s = f.find(pycompat.ossep, speclen)
2279 if s >= 0:
2280 if s >= 0:
2280 adddir(f[:s])
2281 adddir(f[:s])
2281 else:
2282 else:
2282 addfile(f)
2283 addfile(f)
2283 return files, dirs
2284 return files, dirs
2284
2285
2285 acceptable = b''
2286 acceptable = b''
2286 if opts['normal']:
2287 if opts['normal']:
2287 acceptable += b'nm'
2288 acceptable += b'nm'
2288 if opts['added']:
2289 if opts['added']:
2289 acceptable += b'a'
2290 acceptable += b'a'
2290 if opts['removed']:
2291 if opts['removed']:
2291 acceptable += b'r'
2292 acceptable += b'r'
2292 cwd = repo.getcwd()
2293 cwd = repo.getcwd()
2293 if not specs:
2294 if not specs:
2294 specs = [b'.']
2295 specs = [b'.']
2295
2296
2296 files, dirs = set(), set()
2297 files, dirs = set(), set()
2297 for spec in specs:
2298 for spec in specs:
2298 f, d = complete(spec, acceptable or b'nmar')
2299 f, d = complete(spec, acceptable or b'nmar')
2299 files.update(f)
2300 files.update(f)
2300 dirs.update(d)
2301 dirs.update(d)
2301 files.update(dirs)
2302 files.update(dirs)
2302 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2303 ui.write(b'\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
2303 ui.write(b'\n')
2304 ui.write(b'\n')
2304
2305
2305
2306
2306 @command(
2307 @command(
2307 b'debugpathcopies',
2308 b'debugpathcopies',
2308 cmdutil.walkopts,
2309 cmdutil.walkopts,
2309 b'hg debugpathcopies REV1 REV2 [FILE]',
2310 b'hg debugpathcopies REV1 REV2 [FILE]',
2310 inferrepo=True,
2311 inferrepo=True,
2311 )
2312 )
2312 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2313 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
2313 """show copies between two revisions"""
2314 """show copies between two revisions"""
2314 ctx1 = scmutil.revsingle(repo, rev1)
2315 ctx1 = scmutil.revsingle(repo, rev1)
2315 ctx2 = scmutil.revsingle(repo, rev2)
2316 ctx2 = scmutil.revsingle(repo, rev2)
2316 m = scmutil.match(ctx1, pats, opts)
2317 m = scmutil.match(ctx1, pats, opts)
2317 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2318 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
2318 ui.write(b'%s -> %s\n' % (src, dst))
2319 ui.write(b'%s -> %s\n' % (src, dst))
2319
2320
2320
2321
2321 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2322 @command(b'debugpeer', [], _(b'PATH'), norepo=True)
2322 def debugpeer(ui, path):
2323 def debugpeer(ui, path):
2323 """establish a connection to a peer repository"""
2324 """establish a connection to a peer repository"""
2324 # Always enable peer request logging. Requires --debug to display
2325 # Always enable peer request logging. Requires --debug to display
2325 # though.
2326 # though.
2326 overrides = {
2327 overrides = {
2327 (b'devel', b'debug.peer-request'): True,
2328 (b'devel', b'debug.peer-request'): True,
2328 }
2329 }
2329
2330
2330 with ui.configoverride(overrides):
2331 with ui.configoverride(overrides):
2331 peer = hg.peer(ui, {}, path)
2332 peer = hg.peer(ui, {}, path)
2332
2333
2333 local = peer.local() is not None
2334 local = peer.local() is not None
2334 canpush = peer.canpush()
2335 canpush = peer.canpush()
2335
2336
2336 ui.write(_(b'url: %s\n') % peer.url())
2337 ui.write(_(b'url: %s\n') % peer.url())
2337 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2338 ui.write(_(b'local: %s\n') % (_(b'yes') if local else _(b'no')))
2338 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2339 ui.write(_(b'pushable: %s\n') % (_(b'yes') if canpush else _(b'no')))
2339
2340
2340
2341
2341 @command(
2342 @command(
2342 b'debugpickmergetool',
2343 b'debugpickmergetool',
2343 [
2344 [
2344 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2345 (b'r', b'rev', b'', _(b'check for files in this revision'), _(b'REV')),
2345 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2346 (b'', b'changedelete', None, _(b'emulate merging change and delete')),
2346 ]
2347 ]
2347 + cmdutil.walkopts
2348 + cmdutil.walkopts
2348 + cmdutil.mergetoolopts,
2349 + cmdutil.mergetoolopts,
2349 _(b'[PATTERN]...'),
2350 _(b'[PATTERN]...'),
2350 inferrepo=True,
2351 inferrepo=True,
2351 )
2352 )
2352 def debugpickmergetool(ui, repo, *pats, **opts):
2353 def debugpickmergetool(ui, repo, *pats, **opts):
2353 """examine which merge tool is chosen for specified file
2354 """examine which merge tool is chosen for specified file
2354
2355
2355 As described in :hg:`help merge-tools`, Mercurial examines
2356 As described in :hg:`help merge-tools`, Mercurial examines
2356 configurations below in this order to decide which merge tool is
2357 configurations below in this order to decide which merge tool is
2357 chosen for specified file.
2358 chosen for specified file.
2358
2359
2359 1. ``--tool`` option
2360 1. ``--tool`` option
2360 2. ``HGMERGE`` environment variable
2361 2. ``HGMERGE`` environment variable
2361 3. configurations in ``merge-patterns`` section
2362 3. configurations in ``merge-patterns`` section
2362 4. configuration of ``ui.merge``
2363 4. configuration of ``ui.merge``
2363 5. configurations in ``merge-tools`` section
2364 5. configurations in ``merge-tools`` section
2364 6. ``hgmerge`` tool (for historical reason only)
2365 6. ``hgmerge`` tool (for historical reason only)
2365 7. default tool for fallback (``:merge`` or ``:prompt``)
2366 7. default tool for fallback (``:merge`` or ``:prompt``)
2366
2367
2367 This command writes out examination result in the style below::
2368 This command writes out examination result in the style below::
2368
2369
2369 FILE = MERGETOOL
2370 FILE = MERGETOOL
2370
2371
2371 By default, all files known in the first parent context of the
2372 By default, all files known in the first parent context of the
2372 working directory are examined. Use file patterns and/or -I/-X
2373 working directory are examined. Use file patterns and/or -I/-X
2373 options to limit target files. -r/--rev is also useful to examine
2374 options to limit target files. -r/--rev is also useful to examine
2374 files in another context without actual updating to it.
2375 files in another context without actual updating to it.
2375
2376
2376 With --debug, this command shows warning messages while matching
2377 With --debug, this command shows warning messages while matching
2377 against ``merge-patterns`` and so on, too. It is recommended to
2378 against ``merge-patterns`` and so on, too. It is recommended to
2378 use this option with explicit file patterns and/or -I/-X options,
2379 use this option with explicit file patterns and/or -I/-X options,
2379 because this option increases amount of output per file according
2380 because this option increases amount of output per file according
2380 to configurations in hgrc.
2381 to configurations in hgrc.
2381
2382
2382 With -v/--verbose, this command shows configurations below at
2383 With -v/--verbose, this command shows configurations below at
2383 first (only if specified).
2384 first (only if specified).
2384
2385
2385 - ``--tool`` option
2386 - ``--tool`` option
2386 - ``HGMERGE`` environment variable
2387 - ``HGMERGE`` environment variable
2387 - configuration of ``ui.merge``
2388 - configuration of ``ui.merge``
2388
2389
2389 If merge tool is chosen before matching against
2390 If merge tool is chosen before matching against
2390 ``merge-patterns``, this command can't show any helpful
2391 ``merge-patterns``, this command can't show any helpful
2391 information, even with --debug. In such case, information above is
2392 information, even with --debug. In such case, information above is
2392 useful to know why a merge tool is chosen.
2393 useful to know why a merge tool is chosen.
2393 """
2394 """
2394 opts = pycompat.byteskwargs(opts)
2395 opts = pycompat.byteskwargs(opts)
2395 overrides = {}
2396 overrides = {}
2396 if opts[b'tool']:
2397 if opts[b'tool']:
2397 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2398 overrides[(b'ui', b'forcemerge')] = opts[b'tool']
2398 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2399 ui.notenoi18n(b'with --tool %r\n' % (pycompat.bytestr(opts[b'tool'])))
2399
2400
2400 with ui.configoverride(overrides, b'debugmergepatterns'):
2401 with ui.configoverride(overrides, b'debugmergepatterns'):
2401 hgmerge = encoding.environ.get(b"HGMERGE")
2402 hgmerge = encoding.environ.get(b"HGMERGE")
2402 if hgmerge is not None:
2403 if hgmerge is not None:
2403 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2404 ui.notenoi18n(b'with HGMERGE=%r\n' % (pycompat.bytestr(hgmerge)))
2404 uimerge = ui.config(b"ui", b"merge")
2405 uimerge = ui.config(b"ui", b"merge")
2405 if uimerge:
2406 if uimerge:
2406 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2407 ui.notenoi18n(b'with ui.merge=%r\n' % (pycompat.bytestr(uimerge)))
2407
2408
2408 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2409 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2409 m = scmutil.match(ctx, pats, opts)
2410 m = scmutil.match(ctx, pats, opts)
2410 changedelete = opts[b'changedelete']
2411 changedelete = opts[b'changedelete']
2411 for path in ctx.walk(m):
2412 for path in ctx.walk(m):
2412 fctx = ctx[path]
2413 fctx = ctx[path]
2413 try:
2414 try:
2414 if not ui.debugflag:
2415 if not ui.debugflag:
2415 ui.pushbuffer(error=True)
2416 ui.pushbuffer(error=True)
2416 tool, toolpath = filemerge._picktool(
2417 tool, toolpath = filemerge._picktool(
2417 repo,
2418 repo,
2418 ui,
2419 ui,
2419 path,
2420 path,
2420 fctx.isbinary(),
2421 fctx.isbinary(),
2421 b'l' in fctx.flags(),
2422 b'l' in fctx.flags(),
2422 changedelete,
2423 changedelete,
2423 )
2424 )
2424 finally:
2425 finally:
2425 if not ui.debugflag:
2426 if not ui.debugflag:
2426 ui.popbuffer()
2427 ui.popbuffer()
2427 ui.write(b'%s = %s\n' % (path, tool))
2428 ui.write(b'%s = %s\n' % (path, tool))
2428
2429
2429
2430
2430 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2431 @command(b'debugpushkey', [], _(b'REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2431 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2432 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2432 '''access the pushkey key/value protocol
2433 '''access the pushkey key/value protocol
2433
2434
2434 With two args, list the keys in the given namespace.
2435 With two args, list the keys in the given namespace.
2435
2436
2436 With five args, set a key to new if it currently is set to old.
2437 With five args, set a key to new if it currently is set to old.
2437 Reports success or failure.
2438 Reports success or failure.
2438 '''
2439 '''
2439
2440
2440 target = hg.peer(ui, {}, repopath)
2441 target = hg.peer(ui, {}, repopath)
2441 if keyinfo:
2442 if keyinfo:
2442 key, old, new = keyinfo
2443 key, old, new = keyinfo
2443 with target.commandexecutor() as e:
2444 with target.commandexecutor() as e:
2444 r = e.callcommand(
2445 r = e.callcommand(
2445 b'pushkey',
2446 b'pushkey',
2446 {
2447 {
2447 b'namespace': namespace,
2448 b'namespace': namespace,
2448 b'key': key,
2449 b'key': key,
2449 b'old': old,
2450 b'old': old,
2450 b'new': new,
2451 b'new': new,
2451 },
2452 },
2452 ).result()
2453 ).result()
2453
2454
2454 ui.status(pycompat.bytestr(r) + b'\n')
2455 ui.status(pycompat.bytestr(r) + b'\n')
2455 return not r
2456 return not r
2456 else:
2457 else:
2457 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2458 for k, v in sorted(pycompat.iteritems(target.listkeys(namespace))):
2458 ui.write(
2459 ui.write(
2459 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2460 b"%s\t%s\n" % (stringutil.escapestr(k), stringutil.escapestr(v))
2460 )
2461 )
2461
2462
2462
2463
2463 @command(b'debugpvec', [], _(b'A B'))
2464 @command(b'debugpvec', [], _(b'A B'))
2464 def debugpvec(ui, repo, a, b=None):
2465 def debugpvec(ui, repo, a, b=None):
2465 ca = scmutil.revsingle(repo, a)
2466 ca = scmutil.revsingle(repo, a)
2466 cb = scmutil.revsingle(repo, b)
2467 cb = scmutil.revsingle(repo, b)
2467 pa = pvec.ctxpvec(ca)
2468 pa = pvec.ctxpvec(ca)
2468 pb = pvec.ctxpvec(cb)
2469 pb = pvec.ctxpvec(cb)
2469 if pa == pb:
2470 if pa == pb:
2470 rel = b"="
2471 rel = b"="
2471 elif pa > pb:
2472 elif pa > pb:
2472 rel = b">"
2473 rel = b">"
2473 elif pa < pb:
2474 elif pa < pb:
2474 rel = b"<"
2475 rel = b"<"
2475 elif pa | pb:
2476 elif pa | pb:
2476 rel = b"|"
2477 rel = b"|"
2477 ui.write(_(b"a: %s\n") % pa)
2478 ui.write(_(b"a: %s\n") % pa)
2478 ui.write(_(b"b: %s\n") % pb)
2479 ui.write(_(b"b: %s\n") % pb)
2479 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2480 ui.write(_(b"depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2480 ui.write(
2481 ui.write(
2481 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2482 _(b"delta: %d hdist: %d distance: %d relation: %s\n")
2482 % (
2483 % (
2483 abs(pa._depth - pb._depth),
2484 abs(pa._depth - pb._depth),
2484 pvec._hamming(pa._vec, pb._vec),
2485 pvec._hamming(pa._vec, pb._vec),
2485 pa.distance(pb),
2486 pa.distance(pb),
2486 rel,
2487 rel,
2487 )
2488 )
2488 )
2489 )
2489
2490
2490
2491
2491 @command(
2492 @command(
2492 b'debugrebuilddirstate|debugrebuildstate',
2493 b'debugrebuilddirstate|debugrebuildstate',
2493 [
2494 [
2494 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2495 (b'r', b'rev', b'', _(b'revision to rebuild to'), _(b'REV')),
2495 (
2496 (
2496 b'',
2497 b'',
2497 b'minimal',
2498 b'minimal',
2498 None,
2499 None,
2499 _(
2500 _(
2500 b'only rebuild files that are inconsistent with '
2501 b'only rebuild files that are inconsistent with '
2501 b'the working copy parent'
2502 b'the working copy parent'
2502 ),
2503 ),
2503 ),
2504 ),
2504 ],
2505 ],
2505 _(b'[-r REV]'),
2506 _(b'[-r REV]'),
2506 )
2507 )
2507 def debugrebuilddirstate(ui, repo, rev, **opts):
2508 def debugrebuilddirstate(ui, repo, rev, **opts):
2508 """rebuild the dirstate as it would look like for the given revision
2509 """rebuild the dirstate as it would look like for the given revision
2509
2510
2510 If no revision is specified the first current parent will be used.
2511 If no revision is specified the first current parent will be used.
2511
2512
2512 The dirstate will be set to the files of the given revision.
2513 The dirstate will be set to the files of the given revision.
2513 The actual working directory content or existing dirstate
2514 The actual working directory content or existing dirstate
2514 information such as adds or removes is not considered.
2515 information such as adds or removes is not considered.
2515
2516
2516 ``minimal`` will only rebuild the dirstate status for files that claim to be
2517 ``minimal`` will only rebuild the dirstate status for files that claim to be
2517 tracked but are not in the parent manifest, or that exist in the parent
2518 tracked but are not in the parent manifest, or that exist in the parent
2518 manifest but are not in the dirstate. It will not change adds, removes, or
2519 manifest but are not in the dirstate. It will not change adds, removes, or
2519 modified files that are in the working copy parent.
2520 modified files that are in the working copy parent.
2520
2521
2521 One use of this command is to make the next :hg:`status` invocation
2522 One use of this command is to make the next :hg:`status` invocation
2522 check the actual file content.
2523 check the actual file content.
2523 """
2524 """
2524 ctx = scmutil.revsingle(repo, rev)
2525 ctx = scmutil.revsingle(repo, rev)
2525 with repo.wlock():
2526 with repo.wlock():
2526 dirstate = repo.dirstate
2527 dirstate = repo.dirstate
2527 changedfiles = None
2528 changedfiles = None
2528 # See command doc for what minimal does.
2529 # See command doc for what minimal does.
2529 if opts.get('minimal'):
2530 if opts.get('minimal'):
2530 manifestfiles = set(ctx.manifest().keys())
2531 manifestfiles = set(ctx.manifest().keys())
2531 dirstatefiles = set(dirstate)
2532 dirstatefiles = set(dirstate)
2532 manifestonly = manifestfiles - dirstatefiles
2533 manifestonly = manifestfiles - dirstatefiles
2533 dsonly = dirstatefiles - manifestfiles
2534 dsonly = dirstatefiles - manifestfiles
2534 dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
2535 dsnotadded = set(f for f in dsonly if dirstate[f] != b'a')
2535 changedfiles = manifestonly | dsnotadded
2536 changedfiles = manifestonly | dsnotadded
2536
2537
2537 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2538 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2538
2539
2539
2540
2540 @command(b'debugrebuildfncache', [], b'')
2541 @command(b'debugrebuildfncache', [], b'')
2541 def debugrebuildfncache(ui, repo):
2542 def debugrebuildfncache(ui, repo):
2542 """rebuild the fncache file"""
2543 """rebuild the fncache file"""
2543 repair.rebuildfncache(ui, repo)
2544 repair.rebuildfncache(ui, repo)
2544
2545
2545
2546
2546 @command(
2547 @command(
2547 b'debugrename',
2548 b'debugrename',
2548 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2549 [(b'r', b'rev', b'', _(b'revision to debug'), _(b'REV'))],
2549 _(b'[-r REV] [FILE]...'),
2550 _(b'[-r REV] [FILE]...'),
2550 )
2551 )
2551 def debugrename(ui, repo, *pats, **opts):
2552 def debugrename(ui, repo, *pats, **opts):
2552 """dump rename information"""
2553 """dump rename information"""
2553
2554
2554 opts = pycompat.byteskwargs(opts)
2555 opts = pycompat.byteskwargs(opts)
2555 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2556 ctx = scmutil.revsingle(repo, opts.get(b'rev'))
2556 m = scmutil.match(ctx, pats, opts)
2557 m = scmutil.match(ctx, pats, opts)
2557 for abs in ctx.walk(m):
2558 for abs in ctx.walk(m):
2558 fctx = ctx[abs]
2559 fctx = ctx[abs]
2559 o = fctx.filelog().renamed(fctx.filenode())
2560 o = fctx.filelog().renamed(fctx.filenode())
2560 rel = repo.pathto(abs)
2561 rel = repo.pathto(abs)
2561 if o:
2562 if o:
2562 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2563 ui.write(_(b"%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2563 else:
2564 else:
2564 ui.write(_(b"%s not renamed\n") % rel)
2565 ui.write(_(b"%s not renamed\n") % rel)
2565
2566
2566
2567
2567 @command(
2568 @command(
2568 b'debugrevlog',
2569 b'debugrevlog',
2569 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2570 cmdutil.debugrevlogopts + [(b'd', b'dump', False, _(b'dump index data'))],
2570 _(b'-c|-m|FILE'),
2571 _(b'-c|-m|FILE'),
2571 optionalrepo=True,
2572 optionalrepo=True,
2572 )
2573 )
2573 def debugrevlog(ui, repo, file_=None, **opts):
2574 def debugrevlog(ui, repo, file_=None, **opts):
2574 """show data and statistics about a revlog"""
2575 """show data and statistics about a revlog"""
2575 opts = pycompat.byteskwargs(opts)
2576 opts = pycompat.byteskwargs(opts)
2576 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2577 r = cmdutil.openrevlog(repo, b'debugrevlog', file_, opts)
2577
2578
2578 if opts.get(b"dump"):
2579 if opts.get(b"dump"):
2579 numrevs = len(r)
2580 numrevs = len(r)
2580 ui.write(
2581 ui.write(
2581 (
2582 (
2582 b"# rev p1rev p2rev start end deltastart base p1 p2"
2583 b"# rev p1rev p2rev start end deltastart base p1 p2"
2583 b" rawsize totalsize compression heads chainlen\n"
2584 b" rawsize totalsize compression heads chainlen\n"
2584 )
2585 )
2585 )
2586 )
2586 ts = 0
2587 ts = 0
2587 heads = set()
2588 heads = set()
2588
2589
2589 for rev in pycompat.xrange(numrevs):
2590 for rev in pycompat.xrange(numrevs):
2590 dbase = r.deltaparent(rev)
2591 dbase = r.deltaparent(rev)
2591 if dbase == -1:
2592 if dbase == -1:
2592 dbase = rev
2593 dbase = rev
2593 cbase = r.chainbase(rev)
2594 cbase = r.chainbase(rev)
2594 clen = r.chainlen(rev)
2595 clen = r.chainlen(rev)
2595 p1, p2 = r.parentrevs(rev)
2596 p1, p2 = r.parentrevs(rev)
2596 rs = r.rawsize(rev)
2597 rs = r.rawsize(rev)
2597 ts = ts + rs
2598 ts = ts + rs
2598 heads -= set(r.parentrevs(rev))
2599 heads -= set(r.parentrevs(rev))
2599 heads.add(rev)
2600 heads.add(rev)
2600 try:
2601 try:
2601 compression = ts / r.end(rev)
2602 compression = ts / r.end(rev)
2602 except ZeroDivisionError:
2603 except ZeroDivisionError:
2603 compression = 0
2604 compression = 0
2604 ui.write(
2605 ui.write(
2605 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2606 b"%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2606 b"%11d %5d %8d\n"
2607 b"%11d %5d %8d\n"
2607 % (
2608 % (
2608 rev,
2609 rev,
2609 p1,
2610 p1,
2610 p2,
2611 p2,
2611 r.start(rev),
2612 r.start(rev),
2612 r.end(rev),
2613 r.end(rev),
2613 r.start(dbase),
2614 r.start(dbase),
2614 r.start(cbase),
2615 r.start(cbase),
2615 r.start(p1),
2616 r.start(p1),
2616 r.start(p2),
2617 r.start(p2),
2617 rs,
2618 rs,
2618 ts,
2619 ts,
2619 compression,
2620 compression,
2620 len(heads),
2621 len(heads),
2621 clen,
2622 clen,
2622 )
2623 )
2623 )
2624 )
2624 return 0
2625 return 0
2625
2626
2626 v = r.version
2627 v = r.version
2627 format = v & 0xFFFF
2628 format = v & 0xFFFF
2628 flags = []
2629 flags = []
2629 gdelta = False
2630 gdelta = False
2630 if v & revlog.FLAG_INLINE_DATA:
2631 if v & revlog.FLAG_INLINE_DATA:
2631 flags.append(b'inline')
2632 flags.append(b'inline')
2632 if v & revlog.FLAG_GENERALDELTA:
2633 if v & revlog.FLAG_GENERALDELTA:
2633 gdelta = True
2634 gdelta = True
2634 flags.append(b'generaldelta')
2635 flags.append(b'generaldelta')
2635 if not flags:
2636 if not flags:
2636 flags = [b'(none)']
2637 flags = [b'(none)']
2637
2638
2638 ### tracks merge vs single parent
2639 ### tracks merge vs single parent
2639 nummerges = 0
2640 nummerges = 0
2640
2641
2641 ### tracks ways the "delta" are build
2642 ### tracks ways the "delta" are build
2642 # nodelta
2643 # nodelta
2643 numempty = 0
2644 numempty = 0
2644 numemptytext = 0
2645 numemptytext = 0
2645 numemptydelta = 0
2646 numemptydelta = 0
2646 # full file content
2647 # full file content
2647 numfull = 0
2648 numfull = 0
2648 # intermediate snapshot against a prior snapshot
2649 # intermediate snapshot against a prior snapshot
2649 numsemi = 0
2650 numsemi = 0
2650 # snapshot count per depth
2651 # snapshot count per depth
2651 numsnapdepth = collections.defaultdict(lambda: 0)
2652 numsnapdepth = collections.defaultdict(lambda: 0)
2652 # delta against previous revision
2653 # delta against previous revision
2653 numprev = 0
2654 numprev = 0
2654 # delta against first or second parent (not prev)
2655 # delta against first or second parent (not prev)
2655 nump1 = 0
2656 nump1 = 0
2656 nump2 = 0
2657 nump2 = 0
2657 # delta against neither prev nor parents
2658 # delta against neither prev nor parents
2658 numother = 0
2659 numother = 0
2659 # delta against prev that are also first or second parent
2660 # delta against prev that are also first or second parent
2660 # (details of `numprev`)
2661 # (details of `numprev`)
2661 nump1prev = 0
2662 nump1prev = 0
2662 nump2prev = 0
2663 nump2prev = 0
2663
2664
2664 # data about delta chain of each revs
2665 # data about delta chain of each revs
2665 chainlengths = []
2666 chainlengths = []
2666 chainbases = []
2667 chainbases = []
2667 chainspans = []
2668 chainspans = []
2668
2669
2669 # data about each revision
2670 # data about each revision
2670 datasize = [None, 0, 0]
2671 datasize = [None, 0, 0]
2671 fullsize = [None, 0, 0]
2672 fullsize = [None, 0, 0]
2672 semisize = [None, 0, 0]
2673 semisize = [None, 0, 0]
2673 # snapshot count per depth
2674 # snapshot count per depth
2674 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2675 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2675 deltasize = [None, 0, 0]
2676 deltasize = [None, 0, 0]
2676 chunktypecounts = {}
2677 chunktypecounts = {}
2677 chunktypesizes = {}
2678 chunktypesizes = {}
2678
2679
2679 def addsize(size, l):
2680 def addsize(size, l):
2680 if l[0] is None or size < l[0]:
2681 if l[0] is None or size < l[0]:
2681 l[0] = size
2682 l[0] = size
2682 if size > l[1]:
2683 if size > l[1]:
2683 l[1] = size
2684 l[1] = size
2684 l[2] += size
2685 l[2] += size
2685
2686
2686 numrevs = len(r)
2687 numrevs = len(r)
2687 for rev in pycompat.xrange(numrevs):
2688 for rev in pycompat.xrange(numrevs):
2688 p1, p2 = r.parentrevs(rev)
2689 p1, p2 = r.parentrevs(rev)
2689 delta = r.deltaparent(rev)
2690 delta = r.deltaparent(rev)
2690 if format > 0:
2691 if format > 0:
2691 addsize(r.rawsize(rev), datasize)
2692 addsize(r.rawsize(rev), datasize)
2692 if p2 != nullrev:
2693 if p2 != nullrev:
2693 nummerges += 1
2694 nummerges += 1
2694 size = r.length(rev)
2695 size = r.length(rev)
2695 if delta == nullrev:
2696 if delta == nullrev:
2696 chainlengths.append(0)
2697 chainlengths.append(0)
2697 chainbases.append(r.start(rev))
2698 chainbases.append(r.start(rev))
2698 chainspans.append(size)
2699 chainspans.append(size)
2699 if size == 0:
2700 if size == 0:
2700 numempty += 1
2701 numempty += 1
2701 numemptytext += 1
2702 numemptytext += 1
2702 else:
2703 else:
2703 numfull += 1
2704 numfull += 1
2704 numsnapdepth[0] += 1
2705 numsnapdepth[0] += 1
2705 addsize(size, fullsize)
2706 addsize(size, fullsize)
2706 addsize(size, snapsizedepth[0])
2707 addsize(size, snapsizedepth[0])
2707 else:
2708 else:
2708 chainlengths.append(chainlengths[delta] + 1)
2709 chainlengths.append(chainlengths[delta] + 1)
2709 baseaddr = chainbases[delta]
2710 baseaddr = chainbases[delta]
2710 revaddr = r.start(rev)
2711 revaddr = r.start(rev)
2711 chainbases.append(baseaddr)
2712 chainbases.append(baseaddr)
2712 chainspans.append((revaddr - baseaddr) + size)
2713 chainspans.append((revaddr - baseaddr) + size)
2713 if size == 0:
2714 if size == 0:
2714 numempty += 1
2715 numempty += 1
2715 numemptydelta += 1
2716 numemptydelta += 1
2716 elif r.issnapshot(rev):
2717 elif r.issnapshot(rev):
2717 addsize(size, semisize)
2718 addsize(size, semisize)
2718 numsemi += 1
2719 numsemi += 1
2719 depth = r.snapshotdepth(rev)
2720 depth = r.snapshotdepth(rev)
2720 numsnapdepth[depth] += 1
2721 numsnapdepth[depth] += 1
2721 addsize(size, snapsizedepth[depth])
2722 addsize(size, snapsizedepth[depth])
2722 else:
2723 else:
2723 addsize(size, deltasize)
2724 addsize(size, deltasize)
2724 if delta == rev - 1:
2725 if delta == rev - 1:
2725 numprev += 1
2726 numprev += 1
2726 if delta == p1:
2727 if delta == p1:
2727 nump1prev += 1
2728 nump1prev += 1
2728 elif delta == p2:
2729 elif delta == p2:
2729 nump2prev += 1
2730 nump2prev += 1
2730 elif delta == p1:
2731 elif delta == p1:
2731 nump1 += 1
2732 nump1 += 1
2732 elif delta == p2:
2733 elif delta == p2:
2733 nump2 += 1
2734 nump2 += 1
2734 elif delta != nullrev:
2735 elif delta != nullrev:
2735 numother += 1
2736 numother += 1
2736
2737
2737 # Obtain data on the raw chunks in the revlog.
2738 # Obtain data on the raw chunks in the revlog.
2738 if util.safehasattr(r, b'_getsegmentforrevs'):
2739 if util.safehasattr(r, b'_getsegmentforrevs'):
2739 segment = r._getsegmentforrevs(rev, rev)[1]
2740 segment = r._getsegmentforrevs(rev, rev)[1]
2740 else:
2741 else:
2741 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2742 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2742 if segment:
2743 if segment:
2743 chunktype = bytes(segment[0:1])
2744 chunktype = bytes(segment[0:1])
2744 else:
2745 else:
2745 chunktype = b'empty'
2746 chunktype = b'empty'
2746
2747
2747 if chunktype not in chunktypecounts:
2748 if chunktype not in chunktypecounts:
2748 chunktypecounts[chunktype] = 0
2749 chunktypecounts[chunktype] = 0
2749 chunktypesizes[chunktype] = 0
2750 chunktypesizes[chunktype] = 0
2750
2751
2751 chunktypecounts[chunktype] += 1
2752 chunktypecounts[chunktype] += 1
2752 chunktypesizes[chunktype] += size
2753 chunktypesizes[chunktype] += size
2753
2754
2754 # Adjust size min value for empty cases
2755 # Adjust size min value for empty cases
2755 for size in (datasize, fullsize, semisize, deltasize):
2756 for size in (datasize, fullsize, semisize, deltasize):
2756 if size[0] is None:
2757 if size[0] is None:
2757 size[0] = 0
2758 size[0] = 0
2758
2759
2759 numdeltas = numrevs - numfull - numempty - numsemi
2760 numdeltas = numrevs - numfull - numempty - numsemi
2760 numoprev = numprev - nump1prev - nump2prev
2761 numoprev = numprev - nump1prev - nump2prev
2761 totalrawsize = datasize[2]
2762 totalrawsize = datasize[2]
2762 datasize[2] /= numrevs
2763 datasize[2] /= numrevs
2763 fulltotal = fullsize[2]
2764 fulltotal = fullsize[2]
2764 if numfull == 0:
2765 if numfull == 0:
2765 fullsize[2] = 0
2766 fullsize[2] = 0
2766 else:
2767 else:
2767 fullsize[2] /= numfull
2768 fullsize[2] /= numfull
2768 semitotal = semisize[2]
2769 semitotal = semisize[2]
2769 snaptotal = {}
2770 snaptotal = {}
2770 if numsemi > 0:
2771 if numsemi > 0:
2771 semisize[2] /= numsemi
2772 semisize[2] /= numsemi
2772 for depth in snapsizedepth:
2773 for depth in snapsizedepth:
2773 snaptotal[depth] = snapsizedepth[depth][2]
2774 snaptotal[depth] = snapsizedepth[depth][2]
2774 snapsizedepth[depth][2] /= numsnapdepth[depth]
2775 snapsizedepth[depth][2] /= numsnapdepth[depth]
2775
2776
2776 deltatotal = deltasize[2]
2777 deltatotal = deltasize[2]
2777 if numdeltas > 0:
2778 if numdeltas > 0:
2778 deltasize[2] /= numdeltas
2779 deltasize[2] /= numdeltas
2779 totalsize = fulltotal + semitotal + deltatotal
2780 totalsize = fulltotal + semitotal + deltatotal
2780 avgchainlen = sum(chainlengths) / numrevs
2781 avgchainlen = sum(chainlengths) / numrevs
2781 maxchainlen = max(chainlengths)
2782 maxchainlen = max(chainlengths)
2782 maxchainspan = max(chainspans)
2783 maxchainspan = max(chainspans)
2783 compratio = 1
2784 compratio = 1
2784 if totalsize:
2785 if totalsize:
2785 compratio = totalrawsize / totalsize
2786 compratio = totalrawsize / totalsize
2786
2787
2787 basedfmtstr = b'%%%dd\n'
2788 basedfmtstr = b'%%%dd\n'
2788 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2789 basepcfmtstr = b'%%%dd %s(%%5.2f%%%%)\n'
2789
2790
2790 def dfmtstr(max):
2791 def dfmtstr(max):
2791 return basedfmtstr % len(str(max))
2792 return basedfmtstr % len(str(max))
2792
2793
2793 def pcfmtstr(max, padding=0):
2794 def pcfmtstr(max, padding=0):
2794 return basepcfmtstr % (len(str(max)), b' ' * padding)
2795 return basepcfmtstr % (len(str(max)), b' ' * padding)
2795
2796
2796 def pcfmt(value, total):
2797 def pcfmt(value, total):
2797 if total:
2798 if total:
2798 return (value, 100 * float(value) / total)
2799 return (value, 100 * float(value) / total)
2799 else:
2800 else:
2800 return value, 100.0
2801 return value, 100.0
2801
2802
2802 ui.writenoi18n(b'format : %d\n' % format)
2803 ui.writenoi18n(b'format : %d\n' % format)
2803 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2804 ui.writenoi18n(b'flags : %s\n' % b', '.join(flags))
2804
2805
2805 ui.write(b'\n')
2806 ui.write(b'\n')
2806 fmt = pcfmtstr(totalsize)
2807 fmt = pcfmtstr(totalsize)
2807 fmt2 = dfmtstr(totalsize)
2808 fmt2 = dfmtstr(totalsize)
2808 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2809 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2809 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2810 ui.writenoi18n(b' merges : ' + fmt % pcfmt(nummerges, numrevs))
2810 ui.writenoi18n(
2811 ui.writenoi18n(
2811 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2812 b' normal : ' + fmt % pcfmt(numrevs - nummerges, numrevs)
2812 )
2813 )
2813 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2814 ui.writenoi18n(b'revisions : ' + fmt2 % numrevs)
2814 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2815 ui.writenoi18n(b' empty : ' + fmt % pcfmt(numempty, numrevs))
2815 ui.writenoi18n(
2816 ui.writenoi18n(
2816 b' text : '
2817 b' text : '
2817 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2818 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)
2818 )
2819 )
2819 ui.writenoi18n(
2820 ui.writenoi18n(
2820 b' delta : '
2821 b' delta : '
2821 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2822 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)
2822 )
2823 )
2823 ui.writenoi18n(
2824 ui.writenoi18n(
2824 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2825 b' snapshot : ' + fmt % pcfmt(numfull + numsemi, numrevs)
2825 )
2826 )
2826 for depth in sorted(numsnapdepth):
2827 for depth in sorted(numsnapdepth):
2827 ui.write(
2828 ui.write(
2828 (b' lvl-%-3d : ' % depth)
2829 (b' lvl-%-3d : ' % depth)
2829 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2830 + fmt % pcfmt(numsnapdepth[depth], numrevs)
2830 )
2831 )
2831 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2832 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(numdeltas, numrevs))
2832 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2833 ui.writenoi18n(b'revision size : ' + fmt2 % totalsize)
2833 ui.writenoi18n(
2834 ui.writenoi18n(
2834 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2835 b' snapshot : ' + fmt % pcfmt(fulltotal + semitotal, totalsize)
2835 )
2836 )
2836 for depth in sorted(numsnapdepth):
2837 for depth in sorted(numsnapdepth):
2837 ui.write(
2838 ui.write(
2838 (b' lvl-%-3d : ' % depth)
2839 (b' lvl-%-3d : ' % depth)
2839 + fmt % pcfmt(snaptotal[depth], totalsize)
2840 + fmt % pcfmt(snaptotal[depth], totalsize)
2840 )
2841 )
2841 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
2842 ui.writenoi18n(b' deltas : ' + fmt % pcfmt(deltatotal, totalsize))
2842
2843
2843 def fmtchunktype(chunktype):
2844 def fmtchunktype(chunktype):
2844 if chunktype == b'empty':
2845 if chunktype == b'empty':
2845 return b' %s : ' % chunktype
2846 return b' %s : ' % chunktype
2846 elif chunktype in pycompat.bytestr(string.ascii_letters):
2847 elif chunktype in pycompat.bytestr(string.ascii_letters):
2847 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2848 return b' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2848 else:
2849 else:
2849 return b' 0x%s : ' % hex(chunktype)
2850 return b' 0x%s : ' % hex(chunktype)
2850
2851
2851 ui.write(b'\n')
2852 ui.write(b'\n')
2852 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
2853 ui.writenoi18n(b'chunks : ' + fmt2 % numrevs)
2853 for chunktype in sorted(chunktypecounts):
2854 for chunktype in sorted(chunktypecounts):
2854 ui.write(fmtchunktype(chunktype))
2855 ui.write(fmtchunktype(chunktype))
2855 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2856 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2856 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
2857 ui.writenoi18n(b'chunks size : ' + fmt2 % totalsize)
2857 for chunktype in sorted(chunktypecounts):
2858 for chunktype in sorted(chunktypecounts):
2858 ui.write(fmtchunktype(chunktype))
2859 ui.write(fmtchunktype(chunktype))
2859 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2860 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2860
2861
2861 ui.write(b'\n')
2862 ui.write(b'\n')
2862 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2863 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2863 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
2864 ui.writenoi18n(b'avg chain length : ' + fmt % avgchainlen)
2864 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
2865 ui.writenoi18n(b'max chain length : ' + fmt % maxchainlen)
2865 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
2866 ui.writenoi18n(b'max chain reach : ' + fmt % maxchainspan)
2866 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
2867 ui.writenoi18n(b'compression ratio : ' + fmt % compratio)
2867
2868
2868 if format > 0:
2869 if format > 0:
2869 ui.write(b'\n')
2870 ui.write(b'\n')
2870 ui.writenoi18n(
2871 ui.writenoi18n(
2871 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
2872 b'uncompressed data size (min/max/avg) : %d / %d / %d\n'
2872 % tuple(datasize)
2873 % tuple(datasize)
2873 )
2874 )
2874 ui.writenoi18n(
2875 ui.writenoi18n(
2875 b'full revision size (min/max/avg) : %d / %d / %d\n'
2876 b'full revision size (min/max/avg) : %d / %d / %d\n'
2876 % tuple(fullsize)
2877 % tuple(fullsize)
2877 )
2878 )
2878 ui.writenoi18n(
2879 ui.writenoi18n(
2879 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
2880 b'inter-snapshot size (min/max/avg) : %d / %d / %d\n'
2880 % tuple(semisize)
2881 % tuple(semisize)
2881 )
2882 )
2882 for depth in sorted(snapsizedepth):
2883 for depth in sorted(snapsizedepth):
2883 if depth == 0:
2884 if depth == 0:
2884 continue
2885 continue
2885 ui.writenoi18n(
2886 ui.writenoi18n(
2886 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
2887 b' level-%-3d (min/max/avg) : %d / %d / %d\n'
2887 % ((depth,) + tuple(snapsizedepth[depth]))
2888 % ((depth,) + tuple(snapsizedepth[depth]))
2888 )
2889 )
2889 ui.writenoi18n(
2890 ui.writenoi18n(
2890 b'delta size (min/max/avg) : %d / %d / %d\n'
2891 b'delta size (min/max/avg) : %d / %d / %d\n'
2891 % tuple(deltasize)
2892 % tuple(deltasize)
2892 )
2893 )
2893
2894
2894 if numdeltas > 0:
2895 if numdeltas > 0:
2895 ui.write(b'\n')
2896 ui.write(b'\n')
2896 fmt = pcfmtstr(numdeltas)
2897 fmt = pcfmtstr(numdeltas)
2897 fmt2 = pcfmtstr(numdeltas, 4)
2898 fmt2 = pcfmtstr(numdeltas, 4)
2898 ui.writenoi18n(
2899 ui.writenoi18n(
2899 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
2900 b'deltas against prev : ' + fmt % pcfmt(numprev, numdeltas)
2900 )
2901 )
2901 if numprev > 0:
2902 if numprev > 0:
2902 ui.writenoi18n(
2903 ui.writenoi18n(
2903 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
2904 b' where prev = p1 : ' + fmt2 % pcfmt(nump1prev, numprev)
2904 )
2905 )
2905 ui.writenoi18n(
2906 ui.writenoi18n(
2906 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
2907 b' where prev = p2 : ' + fmt2 % pcfmt(nump2prev, numprev)
2907 )
2908 )
2908 ui.writenoi18n(
2909 ui.writenoi18n(
2909 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
2910 b' other : ' + fmt2 % pcfmt(numoprev, numprev)
2910 )
2911 )
2911 if gdelta:
2912 if gdelta:
2912 ui.writenoi18n(
2913 ui.writenoi18n(
2913 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
2914 b'deltas against p1 : ' + fmt % pcfmt(nump1, numdeltas)
2914 )
2915 )
2915 ui.writenoi18n(
2916 ui.writenoi18n(
2916 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
2917 b'deltas against p2 : ' + fmt % pcfmt(nump2, numdeltas)
2917 )
2918 )
2918 ui.writenoi18n(
2919 ui.writenoi18n(
2919 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
2920 b'deltas against other : ' + fmt % pcfmt(numother, numdeltas)
2920 )
2921 )
2921
2922
2922
2923
2923 @command(
2924 @command(
2924 b'debugrevlogindex',
2925 b'debugrevlogindex',
2925 cmdutil.debugrevlogopts
2926 cmdutil.debugrevlogopts
2926 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
2927 + [(b'f', b'format', 0, _(b'revlog format'), _(b'FORMAT'))],
2927 _(b'[-f FORMAT] -c|-m|FILE'),
2928 _(b'[-f FORMAT] -c|-m|FILE'),
2928 optionalrepo=True,
2929 optionalrepo=True,
2929 )
2930 )
2930 def debugrevlogindex(ui, repo, file_=None, **opts):
2931 def debugrevlogindex(ui, repo, file_=None, **opts):
2931 """dump the contents of a revlog index"""
2932 """dump the contents of a revlog index"""
2932 opts = pycompat.byteskwargs(opts)
2933 opts = pycompat.byteskwargs(opts)
2933 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
2934 r = cmdutil.openrevlog(repo, b'debugrevlogindex', file_, opts)
2934 format = opts.get(b'format', 0)
2935 format = opts.get(b'format', 0)
2935 if format not in (0, 1):
2936 if format not in (0, 1):
2936 raise error.Abort(_(b"unknown format %d") % format)
2937 raise error.Abort(_(b"unknown format %d") % format)
2937
2938
2938 if ui.debugflag:
2939 if ui.debugflag:
2939 shortfn = hex
2940 shortfn = hex
2940 else:
2941 else:
2941 shortfn = short
2942 shortfn = short
2942
2943
2943 # There might not be anything in r, so have a sane default
2944 # There might not be anything in r, so have a sane default
2944 idlen = 12
2945 idlen = 12
2945 for i in r:
2946 for i in r:
2946 idlen = len(shortfn(r.node(i)))
2947 idlen = len(shortfn(r.node(i)))
2947 break
2948 break
2948
2949
2949 if format == 0:
2950 if format == 0:
2950 if ui.verbose:
2951 if ui.verbose:
2951 ui.writenoi18n(
2952 ui.writenoi18n(
2952 b" rev offset length linkrev %s %s p2\n"
2953 b" rev offset length linkrev %s %s p2\n"
2953 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
2954 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
2954 )
2955 )
2955 else:
2956 else:
2956 ui.writenoi18n(
2957 ui.writenoi18n(
2957 b" rev linkrev %s %s p2\n"
2958 b" rev linkrev %s %s p2\n"
2958 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
2959 % (b"nodeid".ljust(idlen), b"p1".ljust(idlen))
2959 )
2960 )
2960 elif format == 1:
2961 elif format == 1:
2961 if ui.verbose:
2962 if ui.verbose:
2962 ui.writenoi18n(
2963 ui.writenoi18n(
2963 (
2964 (
2964 b" rev flag offset length size link p1"
2965 b" rev flag offset length size link p1"
2965 b" p2 %s\n"
2966 b" p2 %s\n"
2966 )
2967 )
2967 % b"nodeid".rjust(idlen)
2968 % b"nodeid".rjust(idlen)
2968 )
2969 )
2969 else:
2970 else:
2970 ui.writenoi18n(
2971 ui.writenoi18n(
2971 b" rev flag size link p1 p2 %s\n"
2972 b" rev flag size link p1 p2 %s\n"
2972 % b"nodeid".rjust(idlen)
2973 % b"nodeid".rjust(idlen)
2973 )
2974 )
2974
2975
2975 for i in r:
2976 for i in r:
2976 node = r.node(i)
2977 node = r.node(i)
2977 if format == 0:
2978 if format == 0:
2978 try:
2979 try:
2979 pp = r.parents(node)
2980 pp = r.parents(node)
2980 except Exception:
2981 except Exception:
2981 pp = [nullid, nullid]
2982 pp = [nullid, nullid]
2982 if ui.verbose:
2983 if ui.verbose:
2983 ui.write(
2984 ui.write(
2984 b"% 6d % 9d % 7d % 7d %s %s %s\n"
2985 b"% 6d % 9d % 7d % 7d %s %s %s\n"
2985 % (
2986 % (
2986 i,
2987 i,
2987 r.start(i),
2988 r.start(i),
2988 r.length(i),
2989 r.length(i),
2989 r.linkrev(i),
2990 r.linkrev(i),
2990 shortfn(node),
2991 shortfn(node),
2991 shortfn(pp[0]),
2992 shortfn(pp[0]),
2992 shortfn(pp[1]),
2993 shortfn(pp[1]),
2993 )
2994 )
2994 )
2995 )
2995 else:
2996 else:
2996 ui.write(
2997 ui.write(
2997 b"% 6d % 7d %s %s %s\n"
2998 b"% 6d % 7d %s %s %s\n"
2998 % (
2999 % (
2999 i,
3000 i,
3000 r.linkrev(i),
3001 r.linkrev(i),
3001 shortfn(node),
3002 shortfn(node),
3002 shortfn(pp[0]),
3003 shortfn(pp[0]),
3003 shortfn(pp[1]),
3004 shortfn(pp[1]),
3004 )
3005 )
3005 )
3006 )
3006 elif format == 1:
3007 elif format == 1:
3007 pr = r.parentrevs(i)
3008 pr = r.parentrevs(i)
3008 if ui.verbose:
3009 if ui.verbose:
3009 ui.write(
3010 ui.write(
3010 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3011 b"% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n"
3011 % (
3012 % (
3012 i,
3013 i,
3013 r.flags(i),
3014 r.flags(i),
3014 r.start(i),
3015 r.start(i),
3015 r.length(i),
3016 r.length(i),
3016 r.rawsize(i),
3017 r.rawsize(i),
3017 r.linkrev(i),
3018 r.linkrev(i),
3018 pr[0],
3019 pr[0],
3019 pr[1],
3020 pr[1],
3020 shortfn(node),
3021 shortfn(node),
3021 )
3022 )
3022 )
3023 )
3023 else:
3024 else:
3024 ui.write(
3025 ui.write(
3025 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3026 b"% 6d %04x % 8d % 6d % 6d % 6d %s\n"
3026 % (
3027 % (
3027 i,
3028 i,
3028 r.flags(i),
3029 r.flags(i),
3029 r.rawsize(i),
3030 r.rawsize(i),
3030 r.linkrev(i),
3031 r.linkrev(i),
3031 pr[0],
3032 pr[0],
3032 pr[1],
3033 pr[1],
3033 shortfn(node),
3034 shortfn(node),
3034 )
3035 )
3035 )
3036 )
3036
3037
3037
3038
3038 @command(
3039 @command(
3039 b'debugrevspec',
3040 b'debugrevspec',
3040 [
3041 [
3041 (
3042 (
3042 b'',
3043 b'',
3043 b'optimize',
3044 b'optimize',
3044 None,
3045 None,
3045 _(b'print parsed tree after optimizing (DEPRECATED)'),
3046 _(b'print parsed tree after optimizing (DEPRECATED)'),
3046 ),
3047 ),
3047 (
3048 (
3048 b'',
3049 b'',
3049 b'show-revs',
3050 b'show-revs',
3050 True,
3051 True,
3051 _(b'print list of result revisions (default)'),
3052 _(b'print list of result revisions (default)'),
3052 ),
3053 ),
3053 (
3054 (
3054 b's',
3055 b's',
3055 b'show-set',
3056 b'show-set',
3056 None,
3057 None,
3057 _(b'print internal representation of result set'),
3058 _(b'print internal representation of result set'),
3058 ),
3059 ),
3059 (
3060 (
3060 b'p',
3061 b'p',
3061 b'show-stage',
3062 b'show-stage',
3062 [],
3063 [],
3063 _(b'print parsed tree at the given stage'),
3064 _(b'print parsed tree at the given stage'),
3064 _(b'NAME'),
3065 _(b'NAME'),
3065 ),
3066 ),
3066 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3067 (b'', b'no-optimized', False, _(b'evaluate tree without optimization')),
3067 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3068 (b'', b'verify-optimized', False, _(b'verify optimized result')),
3068 ],
3069 ],
3069 b'REVSPEC',
3070 b'REVSPEC',
3070 )
3071 )
3071 def debugrevspec(ui, repo, expr, **opts):
3072 def debugrevspec(ui, repo, expr, **opts):
3072 """parse and apply a revision specification
3073 """parse and apply a revision specification
3073
3074
3074 Use -p/--show-stage option to print the parsed tree at the given stages.
3075 Use -p/--show-stage option to print the parsed tree at the given stages.
3075 Use -p all to print tree at every stage.
3076 Use -p all to print tree at every stage.
3076
3077
3077 Use --no-show-revs option with -s or -p to print only the set
3078 Use --no-show-revs option with -s or -p to print only the set
3078 representation or the parsed tree respectively.
3079 representation or the parsed tree respectively.
3079
3080
3080 Use --verify-optimized to compare the optimized result with the unoptimized
3081 Use --verify-optimized to compare the optimized result with the unoptimized
3081 one. Returns 1 if the optimized result differs.
3082 one. Returns 1 if the optimized result differs.
3082 """
3083 """
3083 opts = pycompat.byteskwargs(opts)
3084 opts = pycompat.byteskwargs(opts)
3084 aliases = ui.configitems(b'revsetalias')
3085 aliases = ui.configitems(b'revsetalias')
3085 stages = [
3086 stages = [
3086 (b'parsed', lambda tree: tree),
3087 (b'parsed', lambda tree: tree),
3087 (
3088 (
3088 b'expanded',
3089 b'expanded',
3089 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3090 lambda tree: revsetlang.expandaliases(tree, aliases, ui.warn),
3090 ),
3091 ),
3091 (b'concatenated', revsetlang.foldconcat),
3092 (b'concatenated', revsetlang.foldconcat),
3092 (b'analyzed', revsetlang.analyze),
3093 (b'analyzed', revsetlang.analyze),
3093 (b'optimized', revsetlang.optimize),
3094 (b'optimized', revsetlang.optimize),
3094 ]
3095 ]
3095 if opts[b'no_optimized']:
3096 if opts[b'no_optimized']:
3096 stages = stages[:-1]
3097 stages = stages[:-1]
3097 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3098 if opts[b'verify_optimized'] and opts[b'no_optimized']:
3098 raise error.Abort(
3099 raise error.Abort(
3099 _(b'cannot use --verify-optimized with --no-optimized')
3100 _(b'cannot use --verify-optimized with --no-optimized')
3100 )
3101 )
3101 stagenames = set(n for n, f in stages)
3102 stagenames = set(n for n, f in stages)
3102
3103
3103 showalways = set()
3104 showalways = set()
3104 showchanged = set()
3105 showchanged = set()
3105 if ui.verbose and not opts[b'show_stage']:
3106 if ui.verbose and not opts[b'show_stage']:
3106 # show parsed tree by --verbose (deprecated)
3107 # show parsed tree by --verbose (deprecated)
3107 showalways.add(b'parsed')
3108 showalways.add(b'parsed')
3108 showchanged.update([b'expanded', b'concatenated'])
3109 showchanged.update([b'expanded', b'concatenated'])
3109 if opts[b'optimize']:
3110 if opts[b'optimize']:
3110 showalways.add(b'optimized')
3111 showalways.add(b'optimized')
3111 if opts[b'show_stage'] and opts[b'optimize']:
3112 if opts[b'show_stage'] and opts[b'optimize']:
3112 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3113 raise error.Abort(_(b'cannot use --optimize with --show-stage'))
3113 if opts[b'show_stage'] == [b'all']:
3114 if opts[b'show_stage'] == [b'all']:
3114 showalways.update(stagenames)
3115 showalways.update(stagenames)
3115 else:
3116 else:
3116 for n in opts[b'show_stage']:
3117 for n in opts[b'show_stage']:
3117 if n not in stagenames:
3118 if n not in stagenames:
3118 raise error.Abort(_(b'invalid stage name: %s') % n)
3119 raise error.Abort(_(b'invalid stage name: %s') % n)
3119 showalways.update(opts[b'show_stage'])
3120 showalways.update(opts[b'show_stage'])
3120
3121
3121 treebystage = {}
3122 treebystage = {}
3122 printedtree = None
3123 printedtree = None
3123 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3124 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
3124 for n, f in stages:
3125 for n, f in stages:
3125 treebystage[n] = tree = f(tree)
3126 treebystage[n] = tree = f(tree)
3126 if n in showalways or (n in showchanged and tree != printedtree):
3127 if n in showalways or (n in showchanged and tree != printedtree):
3127 if opts[b'show_stage'] or n != b'parsed':
3128 if opts[b'show_stage'] or n != b'parsed':
3128 ui.write(b"* %s:\n" % n)
3129 ui.write(b"* %s:\n" % n)
3129 ui.write(revsetlang.prettyformat(tree), b"\n")
3130 ui.write(revsetlang.prettyformat(tree), b"\n")
3130 printedtree = tree
3131 printedtree = tree
3131
3132
3132 if opts[b'verify_optimized']:
3133 if opts[b'verify_optimized']:
3133 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3134 arevs = revset.makematcher(treebystage[b'analyzed'])(repo)
3134 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3135 brevs = revset.makematcher(treebystage[b'optimized'])(repo)
3135 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3136 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3136 ui.writenoi18n(
3137 ui.writenoi18n(
3137 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3138 b"* analyzed set:\n", stringutil.prettyrepr(arevs), b"\n"
3138 )
3139 )
3139 ui.writenoi18n(
3140 ui.writenoi18n(
3140 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3141 b"* optimized set:\n", stringutil.prettyrepr(brevs), b"\n"
3141 )
3142 )
3142 arevs = list(arevs)
3143 arevs = list(arevs)
3143 brevs = list(brevs)
3144 brevs = list(brevs)
3144 if arevs == brevs:
3145 if arevs == brevs:
3145 return 0
3146 return 0
3146 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3147 ui.writenoi18n(b'--- analyzed\n', label=b'diff.file_a')
3147 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3148 ui.writenoi18n(b'+++ optimized\n', label=b'diff.file_b')
3148 sm = difflib.SequenceMatcher(None, arevs, brevs)
3149 sm = difflib.SequenceMatcher(None, arevs, brevs)
3149 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3150 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
3150 if tag in ('delete', 'replace'):
3151 if tag in ('delete', 'replace'):
3151 for c in arevs[alo:ahi]:
3152 for c in arevs[alo:ahi]:
3152 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3153 ui.write(b'-%d\n' % c, label=b'diff.deleted')
3153 if tag in ('insert', 'replace'):
3154 if tag in ('insert', 'replace'):
3154 for c in brevs[blo:bhi]:
3155 for c in brevs[blo:bhi]:
3155 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3156 ui.write(b'+%d\n' % c, label=b'diff.inserted')
3156 if tag == 'equal':
3157 if tag == 'equal':
3157 for c in arevs[alo:ahi]:
3158 for c in arevs[alo:ahi]:
3158 ui.write(b' %d\n' % c)
3159 ui.write(b' %d\n' % c)
3159 return 1
3160 return 1
3160
3161
3161 func = revset.makematcher(tree)
3162 func = revset.makematcher(tree)
3162 revs = func(repo)
3163 revs = func(repo)
3163 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3164 if opts[b'show_set'] or (opts[b'show_set'] is None and ui.verbose):
3164 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3165 ui.writenoi18n(b"* set:\n", stringutil.prettyrepr(revs), b"\n")
3165 if not opts[b'show_revs']:
3166 if not opts[b'show_revs']:
3166 return
3167 return
3167 for c in revs:
3168 for c in revs:
3168 ui.write(b"%d\n" % c)
3169 ui.write(b"%d\n" % c)
3169
3170
3170
3171
3171 @command(
3172 @command(
3172 b'debugserve',
3173 b'debugserve',
3173 [
3174 [
3174 (
3175 (
3175 b'',
3176 b'',
3176 b'sshstdio',
3177 b'sshstdio',
3177 False,
3178 False,
3178 _(b'run an SSH server bound to process handles'),
3179 _(b'run an SSH server bound to process handles'),
3179 ),
3180 ),
3180 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3181 (b'', b'logiofd', b'', _(b'file descriptor to log server I/O to')),
3181 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3182 (b'', b'logiofile', b'', _(b'file to log server I/O to')),
3182 ],
3183 ],
3183 b'',
3184 b'',
3184 )
3185 )
3185 def debugserve(ui, repo, **opts):
3186 def debugserve(ui, repo, **opts):
3186 """run a server with advanced settings
3187 """run a server with advanced settings
3187
3188
3188 This command is similar to :hg:`serve`. It exists partially as a
3189 This command is similar to :hg:`serve`. It exists partially as a
3189 workaround to the fact that ``hg serve --stdio`` must have specific
3190 workaround to the fact that ``hg serve --stdio`` must have specific
3190 arguments for security reasons.
3191 arguments for security reasons.
3191 """
3192 """
3192 opts = pycompat.byteskwargs(opts)
3193 opts = pycompat.byteskwargs(opts)
3193
3194
3194 if not opts[b'sshstdio']:
3195 if not opts[b'sshstdio']:
3195 raise error.Abort(_(b'only --sshstdio is currently supported'))
3196 raise error.Abort(_(b'only --sshstdio is currently supported'))
3196
3197
3197 logfh = None
3198 logfh = None
3198
3199
3199 if opts[b'logiofd'] and opts[b'logiofile']:
3200 if opts[b'logiofd'] and opts[b'logiofile']:
3200 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3201 raise error.Abort(_(b'cannot use both --logiofd and --logiofile'))
3201
3202
3202 if opts[b'logiofd']:
3203 if opts[b'logiofd']:
3203 # Line buffered because output is line based.
3204 # Line buffered because output is line based.
3204 try:
3205 try:
3205 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 1)
3206 logfh = os.fdopen(int(opts[b'logiofd']), 'ab', 1)
3206 except OSError as e:
3207 except OSError as e:
3207 if e.errno != errno.ESPIPE:
3208 if e.errno != errno.ESPIPE:
3208 raise
3209 raise
3209 # can't seek a pipe, so `ab` mode fails on py3
3210 # can't seek a pipe, so `ab` mode fails on py3
3210 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 1)
3211 logfh = os.fdopen(int(opts[b'logiofd']), 'wb', 1)
3211 elif opts[b'logiofile']:
3212 elif opts[b'logiofile']:
3212 logfh = open(opts[b'logiofile'], b'ab', 1)
3213 logfh = open(opts[b'logiofile'], b'ab', 1)
3213
3214
3214 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3215 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
3215 s.serve_forever()
3216 s.serve_forever()
3216
3217
3217
3218
3218 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3219 @command(b'debugsetparents', [], _(b'REV1 [REV2]'))
3219 def debugsetparents(ui, repo, rev1, rev2=None):
3220 def debugsetparents(ui, repo, rev1, rev2=None):
3220 """manually set the parents of the current working directory
3221 """manually set the parents of the current working directory
3221
3222
3222 This is useful for writing repository conversion tools, but should
3223 This is useful for writing repository conversion tools, but should
3223 be used with care. For example, neither the working directory nor the
3224 be used with care. For example, neither the working directory nor the
3224 dirstate is updated, so file status may be incorrect after running this
3225 dirstate is updated, so file status may be incorrect after running this
3225 command.
3226 command.
3226
3227
3227 Returns 0 on success.
3228 Returns 0 on success.
3228 """
3229 """
3229
3230
3230 node1 = scmutil.revsingle(repo, rev1).node()
3231 node1 = scmutil.revsingle(repo, rev1).node()
3231 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3232 node2 = scmutil.revsingle(repo, rev2, b'null').node()
3232
3233
3233 with repo.wlock():
3234 with repo.wlock():
3234 repo.setparents(node1, node2)
3235 repo.setparents(node1, node2)
3235
3236
3236
3237
3237 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3238 @command(b'debugsidedata', cmdutil.debugrevlogopts, _(b'-c|-m|FILE REV'))
3238 def debugsidedata(ui, repo, file_, rev=None, **opts):
3239 def debugsidedata(ui, repo, file_, rev=None, **opts):
3239 """dump the side data for a cl/manifest/file revision
3240 """dump the side data for a cl/manifest/file revision
3240
3241
3241 Use --verbose to dump the sidedata content."""
3242 Use --verbose to dump the sidedata content."""
3242 opts = pycompat.byteskwargs(opts)
3243 opts = pycompat.byteskwargs(opts)
3243 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3244 if opts.get(b'changelog') or opts.get(b'manifest') or opts.get(b'dir'):
3244 if rev is not None:
3245 if rev is not None:
3245 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3246 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3246 file_, rev = None, file_
3247 file_, rev = None, file_
3247 elif rev is None:
3248 elif rev is None:
3248 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3249 raise error.CommandError(b'debugdata', _(b'invalid arguments'))
3249 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3250 r = cmdutil.openstorage(repo, b'debugdata', file_, opts)
3250 r = getattr(r, '_revlog', r)
3251 r = getattr(r, '_revlog', r)
3251 try:
3252 try:
3252 sidedata = r.sidedata(r.lookup(rev))
3253 sidedata = r.sidedata(r.lookup(rev))
3253 except KeyError:
3254 except KeyError:
3254 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3255 raise error.Abort(_(b'invalid revision identifier %s') % rev)
3255 if sidedata:
3256 if sidedata:
3256 sidedata = list(sidedata.items())
3257 sidedata = list(sidedata.items())
3257 sidedata.sort()
3258 sidedata.sort()
3258 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3259 ui.writenoi18n(b'%d sidedata entries\n' % len(sidedata))
3259 for key, value in sidedata:
3260 for key, value in sidedata:
3260 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3261 ui.writenoi18n(b' entry-%04o size %d\n' % (key, len(value)))
3261 if ui.verbose:
3262 if ui.verbose:
3262 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3263 ui.writenoi18n(b' %s\n' % stringutil.pprint(value))
3263
3264
3264
3265
3265 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3266 @command(b'debugssl', [], b'[SOURCE]', optionalrepo=True)
3266 def debugssl(ui, repo, source=None, **opts):
3267 def debugssl(ui, repo, source=None, **opts):
3267 '''test a secure connection to a server
3268 '''test a secure connection to a server
3268
3269
3269 This builds the certificate chain for the server on Windows, installing the
3270 This builds the certificate chain for the server on Windows, installing the
3270 missing intermediates and trusted root via Windows Update if necessary. It
3271 missing intermediates and trusted root via Windows Update if necessary. It
3271 does nothing on other platforms.
3272 does nothing on other platforms.
3272
3273
3273 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3274 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
3274 that server is used. See :hg:`help urls` for more information.
3275 that server is used. See :hg:`help urls` for more information.
3275
3276
3276 If the update succeeds, retry the original operation. Otherwise, the cause
3277 If the update succeeds, retry the original operation. Otherwise, the cause
3277 of the SSL error is likely another issue.
3278 of the SSL error is likely another issue.
3278 '''
3279 '''
3279 if not pycompat.iswindows:
3280 if not pycompat.iswindows:
3280 raise error.Abort(
3281 raise error.Abort(
3281 _(b'certificate chain building is only possible on Windows')
3282 _(b'certificate chain building is only possible on Windows')
3282 )
3283 )
3283
3284
3284 if not source:
3285 if not source:
3285 if not repo:
3286 if not repo:
3286 raise error.Abort(
3287 raise error.Abort(
3287 _(
3288 _(
3288 b"there is no Mercurial repository here, and no "
3289 b"there is no Mercurial repository here, and no "
3289 b"server specified"
3290 b"server specified"
3290 )
3291 )
3291 )
3292 )
3292 source = b"default"
3293 source = b"default"
3293
3294
3294 source, branches = hg.parseurl(ui.expandpath(source))
3295 source, branches = hg.parseurl(ui.expandpath(source))
3295 url = util.url(source)
3296 url = util.url(source)
3296
3297
3297 defaultport = {b'https': 443, b'ssh': 22}
3298 defaultport = {b'https': 443, b'ssh': 22}
3298 if url.scheme in defaultport:
3299 if url.scheme in defaultport:
3299 try:
3300 try:
3300 addr = (url.host, int(url.port or defaultport[url.scheme]))
3301 addr = (url.host, int(url.port or defaultport[url.scheme]))
3301 except ValueError:
3302 except ValueError:
3302 raise error.Abort(_(b"malformed port number in URL"))
3303 raise error.Abort(_(b"malformed port number in URL"))
3303 else:
3304 else:
3304 raise error.Abort(_(b"only https and ssh connections are supported"))
3305 raise error.Abort(_(b"only https and ssh connections are supported"))
3305
3306
3306 from . import win32
3307 from . import win32
3307
3308
3308 s = ssl.wrap_socket(
3309 s = ssl.wrap_socket(
3309 socket.socket(),
3310 socket.socket(),
3310 ssl_version=ssl.PROTOCOL_TLS,
3311 ssl_version=ssl.PROTOCOL_TLS,
3311 cert_reqs=ssl.CERT_NONE,
3312 cert_reqs=ssl.CERT_NONE,
3312 ca_certs=None,
3313 ca_certs=None,
3313 )
3314 )
3314
3315
3315 try:
3316 try:
3316 s.connect(addr)
3317 s.connect(addr)
3317 cert = s.getpeercert(True)
3318 cert = s.getpeercert(True)
3318
3319
3319 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3320 ui.status(_(b'checking the certificate chain for %s\n') % url.host)
3320
3321
3321 complete = win32.checkcertificatechain(cert, build=False)
3322 complete = win32.checkcertificatechain(cert, build=False)
3322
3323
3323 if not complete:
3324 if not complete:
3324 ui.status(_(b'certificate chain is incomplete, updating... '))
3325 ui.status(_(b'certificate chain is incomplete, updating... '))
3325
3326
3326 if not win32.checkcertificatechain(cert):
3327 if not win32.checkcertificatechain(cert):
3327 ui.status(_(b'failed.\n'))
3328 ui.status(_(b'failed.\n'))
3328 else:
3329 else:
3329 ui.status(_(b'done.\n'))
3330 ui.status(_(b'done.\n'))
3330 else:
3331 else:
3331 ui.status(_(b'full certificate chain is available\n'))
3332 ui.status(_(b'full certificate chain is available\n'))
3332 finally:
3333 finally:
3333 s.close()
3334 s.close()
3334
3335
3335
3336
3336 @command(
3337 @command(
3337 b'debugsub',
3338 b'debugsub',
3338 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3339 [(b'r', b'rev', b'', _(b'revision to check'), _(b'REV'))],
3339 _(b'[-r REV] [REV]'),
3340 _(b'[-r REV] [REV]'),
3340 )
3341 )
3341 def debugsub(ui, repo, rev=None):
3342 def debugsub(ui, repo, rev=None):
3342 ctx = scmutil.revsingle(repo, rev, None)
3343 ctx = scmutil.revsingle(repo, rev, None)
3343 for k, v in sorted(ctx.substate.items()):
3344 for k, v in sorted(ctx.substate.items()):
3344 ui.writenoi18n(b'path %s\n' % k)
3345 ui.writenoi18n(b'path %s\n' % k)
3345 ui.writenoi18n(b' source %s\n' % v[0])
3346 ui.writenoi18n(b' source %s\n' % v[0])
3346 ui.writenoi18n(b' revision %s\n' % v[1])
3347 ui.writenoi18n(b' revision %s\n' % v[1])
3347
3348
3348
3349
3349 @command(
3350 @command(
3350 b'debugsuccessorssets',
3351 b'debugsuccessorssets',
3351 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3352 [(b'', b'closest', False, _(b'return closest successors sets only'))],
3352 _(b'[REV]'),
3353 _(b'[REV]'),
3353 )
3354 )
3354 def debugsuccessorssets(ui, repo, *revs, **opts):
3355 def debugsuccessorssets(ui, repo, *revs, **opts):
3355 """show set of successors for revision
3356 """show set of successors for revision
3356
3357
3357 A successors set of changeset A is a consistent group of revisions that
3358 A successors set of changeset A is a consistent group of revisions that
3358 succeed A. It contains non-obsolete changesets only unless closests
3359 succeed A. It contains non-obsolete changesets only unless closests
3359 successors set is set.
3360 successors set is set.
3360
3361
3361 In most cases a changeset A has a single successors set containing a single
3362 In most cases a changeset A has a single successors set containing a single
3362 successor (changeset A replaced by A').
3363 successor (changeset A replaced by A').
3363
3364
3364 A changeset that is made obsolete with no successors are called "pruned".
3365 A changeset that is made obsolete with no successors are called "pruned".
3365 Such changesets have no successors sets at all.
3366 Such changesets have no successors sets at all.
3366
3367
3367 A changeset that has been "split" will have a successors set containing
3368 A changeset that has been "split" will have a successors set containing
3368 more than one successor.
3369 more than one successor.
3369
3370
3370 A changeset that has been rewritten in multiple different ways is called
3371 A changeset that has been rewritten in multiple different ways is called
3371 "divergent". Such changesets have multiple successor sets (each of which
3372 "divergent". Such changesets have multiple successor sets (each of which
3372 may also be split, i.e. have multiple successors).
3373 may also be split, i.e. have multiple successors).
3373
3374
3374 Results are displayed as follows::
3375 Results are displayed as follows::
3375
3376
3376 <rev1>
3377 <rev1>
3377 <successors-1A>
3378 <successors-1A>
3378 <rev2>
3379 <rev2>
3379 <successors-2A>
3380 <successors-2A>
3380 <successors-2B1> <successors-2B2> <successors-2B3>
3381 <successors-2B1> <successors-2B2> <successors-2B3>
3381
3382
3382 Here rev2 has two possible (i.e. divergent) successors sets. The first
3383 Here rev2 has two possible (i.e. divergent) successors sets. The first
3383 holds one element, whereas the second holds three (i.e. the changeset has
3384 holds one element, whereas the second holds three (i.e. the changeset has
3384 been split).
3385 been split).
3385 """
3386 """
3386 # passed to successorssets caching computation from one call to another
3387 # passed to successorssets caching computation from one call to another
3387 cache = {}
3388 cache = {}
3388 ctx2str = bytes
3389 ctx2str = bytes
3389 node2str = short
3390 node2str = short
3390 for rev in scmutil.revrange(repo, revs):
3391 for rev in scmutil.revrange(repo, revs):
3391 ctx = repo[rev]
3392 ctx = repo[rev]
3392 ui.write(b'%s\n' % ctx2str(ctx))
3393 ui.write(b'%s\n' % ctx2str(ctx))
3393 for succsset in obsutil.successorssets(
3394 for succsset in obsutil.successorssets(
3394 repo, ctx.node(), closest=opts['closest'], cache=cache
3395 repo, ctx.node(), closest=opts['closest'], cache=cache
3395 ):
3396 ):
3396 if succsset:
3397 if succsset:
3397 ui.write(b' ')
3398 ui.write(b' ')
3398 ui.write(node2str(succsset[0]))
3399 ui.write(node2str(succsset[0]))
3399 for node in succsset[1:]:
3400 for node in succsset[1:]:
3400 ui.write(b' ')
3401 ui.write(b' ')
3401 ui.write(node2str(node))
3402 ui.write(node2str(node))
3402 ui.write(b'\n')
3403 ui.write(b'\n')
3403
3404
3404
3405
3405 @command(
3406 @command(
3406 b'debugtemplate',
3407 b'debugtemplate',
3407 [
3408 [
3408 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3409 (b'r', b'rev', [], _(b'apply template on changesets'), _(b'REV')),
3409 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3410 (b'D', b'define', [], _(b'define template keyword'), _(b'KEY=VALUE')),
3410 ],
3411 ],
3411 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3412 _(b'[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
3412 optionalrepo=True,
3413 optionalrepo=True,
3413 )
3414 )
3414 def debugtemplate(ui, repo, tmpl, **opts):
3415 def debugtemplate(ui, repo, tmpl, **opts):
3415 """parse and apply a template
3416 """parse and apply a template
3416
3417
3417 If -r/--rev is given, the template is processed as a log template and
3418 If -r/--rev is given, the template is processed as a log template and
3418 applied to the given changesets. Otherwise, it is processed as a generic
3419 applied to the given changesets. Otherwise, it is processed as a generic
3419 template.
3420 template.
3420
3421
3421 Use --verbose to print the parsed tree.
3422 Use --verbose to print the parsed tree.
3422 """
3423 """
3423 revs = None
3424 revs = None
3424 if opts['rev']:
3425 if opts['rev']:
3425 if repo is None:
3426 if repo is None:
3426 raise error.RepoError(
3427 raise error.RepoError(
3427 _(b'there is no Mercurial repository here (.hg not found)')
3428 _(b'there is no Mercurial repository here (.hg not found)')
3428 )
3429 )
3429 revs = scmutil.revrange(repo, opts['rev'])
3430 revs = scmutil.revrange(repo, opts['rev'])
3430
3431
3431 props = {}
3432 props = {}
3432 for d in opts['define']:
3433 for d in opts['define']:
3433 try:
3434 try:
3434 k, v = (e.strip() for e in d.split(b'=', 1))
3435 k, v = (e.strip() for e in d.split(b'=', 1))
3435 if not k or k == b'ui':
3436 if not k or k == b'ui':
3436 raise ValueError
3437 raise ValueError
3437 props[k] = v
3438 props[k] = v
3438 except ValueError:
3439 except ValueError:
3439 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3440 raise error.Abort(_(b'malformed keyword definition: %s') % d)
3440
3441
3441 if ui.verbose:
3442 if ui.verbose:
3442 aliases = ui.configitems(b'templatealias')
3443 aliases = ui.configitems(b'templatealias')
3443 tree = templater.parse(tmpl)
3444 tree = templater.parse(tmpl)
3444 ui.note(templater.prettyformat(tree), b'\n')
3445 ui.note(templater.prettyformat(tree), b'\n')
3445 newtree = templater.expandaliases(tree, aliases)
3446 newtree = templater.expandaliases(tree, aliases)
3446 if newtree != tree:
3447 if newtree != tree:
3447 ui.notenoi18n(
3448 ui.notenoi18n(
3448 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3449 b"* expanded:\n", templater.prettyformat(newtree), b'\n'
3449 )
3450 )
3450
3451
3451 if revs is None:
3452 if revs is None:
3452 tres = formatter.templateresources(ui, repo)
3453 tres = formatter.templateresources(ui, repo)
3453 t = formatter.maketemplater(ui, tmpl, resources=tres)
3454 t = formatter.maketemplater(ui, tmpl, resources=tres)
3454 if ui.verbose:
3455 if ui.verbose:
3455 kwds, funcs = t.symbolsuseddefault()
3456 kwds, funcs = t.symbolsuseddefault()
3456 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3457 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3457 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3458 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3458 ui.write(t.renderdefault(props))
3459 ui.write(t.renderdefault(props))
3459 else:
3460 else:
3460 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3461 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
3461 if ui.verbose:
3462 if ui.verbose:
3462 kwds, funcs = displayer.t.symbolsuseddefault()
3463 kwds, funcs = displayer.t.symbolsuseddefault()
3463 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3464 ui.writenoi18n(b"* keywords: %s\n" % b', '.join(sorted(kwds)))
3464 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3465 ui.writenoi18n(b"* functions: %s\n" % b', '.join(sorted(funcs)))
3465 for r in revs:
3466 for r in revs:
3466 displayer.show(repo[r], **pycompat.strkwargs(props))
3467 displayer.show(repo[r], **pycompat.strkwargs(props))
3467 displayer.close()
3468 displayer.close()
3468
3469
3469
3470
3470 @command(
3471 @command(
3471 b'debuguigetpass',
3472 b'debuguigetpass',
3472 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3473 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3473 _(b'[-p TEXT]'),
3474 _(b'[-p TEXT]'),
3474 norepo=True,
3475 norepo=True,
3475 )
3476 )
3476 def debuguigetpass(ui, prompt=b''):
3477 def debuguigetpass(ui, prompt=b''):
3477 """show prompt to type password"""
3478 """show prompt to type password"""
3478 r = ui.getpass(prompt)
3479 r = ui.getpass(prompt)
3479 ui.writenoi18n(b'respose: %s\n' % r)
3480 ui.writenoi18n(b'respose: %s\n' % r)
3480
3481
3481
3482
3482 @command(
3483 @command(
3483 b'debuguiprompt',
3484 b'debuguiprompt',
3484 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3485 [(b'p', b'prompt', b'', _(b'prompt text'), _(b'TEXT')),],
3485 _(b'[-p TEXT]'),
3486 _(b'[-p TEXT]'),
3486 norepo=True,
3487 norepo=True,
3487 )
3488 )
3488 def debuguiprompt(ui, prompt=b''):
3489 def debuguiprompt(ui, prompt=b''):
3489 """show plain prompt"""
3490 """show plain prompt"""
3490 r = ui.prompt(prompt)
3491 r = ui.prompt(prompt)
3491 ui.writenoi18n(b'response: %s\n' % r)
3492 ui.writenoi18n(b'response: %s\n' % r)
3492
3493
3493
3494
3494 @command(b'debugupdatecaches', [])
3495 @command(b'debugupdatecaches', [])
3495 def debugupdatecaches(ui, repo, *pats, **opts):
3496 def debugupdatecaches(ui, repo, *pats, **opts):
3496 """warm all known caches in the repository"""
3497 """warm all known caches in the repository"""
3497 with repo.wlock(), repo.lock():
3498 with repo.wlock(), repo.lock():
3498 repo.updatecaches(full=True)
3499 repo.updatecaches(full=True)
3499
3500
3500
3501
3501 @command(
3502 @command(
3502 b'debugupgraderepo',
3503 b'debugupgraderepo',
3503 [
3504 [
3504 (
3505 (
3505 b'o',
3506 b'o',
3506 b'optimize',
3507 b'optimize',
3507 [],
3508 [],
3508 _(b'extra optimization to perform'),
3509 _(b'extra optimization to perform'),
3509 _(b'NAME'),
3510 _(b'NAME'),
3510 ),
3511 ),
3511 (b'', b'run', False, _(b'performs an upgrade')),
3512 (b'', b'run', False, _(b'performs an upgrade')),
3512 (b'', b'backup', True, _(b'keep the old repository content around')),
3513 (b'', b'backup', True, _(b'keep the old repository content around')),
3513 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3514 (b'', b'changelog', None, _(b'select the changelog for upgrade')),
3514 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3515 (b'', b'manifest', None, _(b'select the manifest for upgrade')),
3515 ],
3516 ],
3516 )
3517 )
3517 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3518 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
3518 """upgrade a repository to use different features
3519 """upgrade a repository to use different features
3519
3520
3520 If no arguments are specified, the repository is evaluated for upgrade
3521 If no arguments are specified, the repository is evaluated for upgrade
3521 and a list of problems and potential optimizations is printed.
3522 and a list of problems and potential optimizations is printed.
3522
3523
3523 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3524 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
3524 can be influenced via additional arguments. More details will be provided
3525 can be influenced via additional arguments. More details will be provided
3525 by the command output when run without ``--run``.
3526 by the command output when run without ``--run``.
3526
3527
3527 During the upgrade, the repository will be locked and no writes will be
3528 During the upgrade, the repository will be locked and no writes will be
3528 allowed.
3529 allowed.
3529
3530
3530 At the end of the upgrade, the repository may not be readable while new
3531 At the end of the upgrade, the repository may not be readable while new
3531 repository data is swapped in. This window will be as long as it takes to
3532 repository data is swapped in. This window will be as long as it takes to
3532 rename some directories inside the ``.hg`` directory. On most machines, this
3533 rename some directories inside the ``.hg`` directory. On most machines, this
3533 should complete almost instantaneously and the chances of a consumer being
3534 should complete almost instantaneously and the chances of a consumer being
3534 unable to access the repository should be low.
3535 unable to access the repository should be low.
3535
3536
3536 By default, all revlog will be upgraded. You can restrict this using flag
3537 By default, all revlog will be upgraded. You can restrict this using flag
3537 such as `--manifest`:
3538 such as `--manifest`:
3538
3539
3539 * `--manifest`: only optimize the manifest
3540 * `--manifest`: only optimize the manifest
3540 * `--no-manifest`: optimize all revlog but the manifest
3541 * `--no-manifest`: optimize all revlog but the manifest
3541 * `--changelog`: optimize the changelog only
3542 * `--changelog`: optimize the changelog only
3542 * `--no-changelog --no-manifest`: optimize filelogs only
3543 * `--no-changelog --no-manifest`: optimize filelogs only
3543 """
3544 """
3544 return upgrade.upgraderepo(
3545 return upgrade.upgraderepo(
3545 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3546 ui, repo, run=run, optimize=optimize, backup=backup, **opts
3546 )
3547 )
3547
3548
3548
3549
3549 @command(
3550 @command(
3550 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3551 b'debugwalk', cmdutil.walkopts, _(b'[OPTION]... [FILE]...'), inferrepo=True
3551 )
3552 )
3552 def debugwalk(ui, repo, *pats, **opts):
3553 def debugwalk(ui, repo, *pats, **opts):
3553 """show how files match on given patterns"""
3554 """show how files match on given patterns"""
3554 opts = pycompat.byteskwargs(opts)
3555 opts = pycompat.byteskwargs(opts)
3555 m = scmutil.match(repo[None], pats, opts)
3556 m = scmutil.match(repo[None], pats, opts)
3556 if ui.verbose:
3557 if ui.verbose:
3557 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3558 ui.writenoi18n(b'* matcher:\n', stringutil.prettyrepr(m), b'\n')
3558 items = list(repo[None].walk(m))
3559 items = list(repo[None].walk(m))
3559 if not items:
3560 if not items:
3560 return
3561 return
3561 f = lambda fn: fn
3562 f = lambda fn: fn
3562 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3563 if ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/':
3563 f = lambda fn: util.normpath(fn)
3564 f = lambda fn: util.normpath(fn)
3564 fmt = b'f %%-%ds %%-%ds %%s' % (
3565 fmt = b'f %%-%ds %%-%ds %%s' % (
3565 max([len(abs) for abs in items]),
3566 max([len(abs) for abs in items]),
3566 max([len(repo.pathto(abs)) for abs in items]),
3567 max([len(repo.pathto(abs)) for abs in items]),
3567 )
3568 )
3568 for abs in items:
3569 for abs in items:
3569 line = fmt % (
3570 line = fmt % (
3570 abs,
3571 abs,
3571 f(repo.pathto(abs)),
3572 f(repo.pathto(abs)),
3572 m.exact(abs) and b'exact' or b'',
3573 m.exact(abs) and b'exact' or b'',
3573 )
3574 )
3574 ui.write(b"%s\n" % line.rstrip())
3575 ui.write(b"%s\n" % line.rstrip())
3575
3576
3576
3577
3577 @command(b'debugwhyunstable', [], _(b'REV'))
3578 @command(b'debugwhyunstable', [], _(b'REV'))
3578 def debugwhyunstable(ui, repo, rev):
3579 def debugwhyunstable(ui, repo, rev):
3579 """explain instabilities of a changeset"""
3580 """explain instabilities of a changeset"""
3580 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3581 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
3581 dnodes = b''
3582 dnodes = b''
3582 if entry.get(b'divergentnodes'):
3583 if entry.get(b'divergentnodes'):
3583 dnodes = (
3584 dnodes = (
3584 b' '.join(
3585 b' '.join(
3585 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3586 b'%s (%s)' % (ctx.hex(), ctx.phasestr())
3586 for ctx in entry[b'divergentnodes']
3587 for ctx in entry[b'divergentnodes']
3587 )
3588 )
3588 + b' '
3589 + b' '
3589 )
3590 )
3590 ui.write(
3591 ui.write(
3591 b'%s: %s%s %s\n'
3592 b'%s: %s%s %s\n'
3592 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3593 % (entry[b'instability'], dnodes, entry[b'reason'], entry[b'node'])
3593 )
3594 )
3594
3595
3595
3596
3596 @command(
3597 @command(
3597 b'debugwireargs',
3598 b'debugwireargs',
3598 [
3599 [
3599 (b'', b'three', b'', b'three'),
3600 (b'', b'three', b'', b'three'),
3600 (b'', b'four', b'', b'four'),
3601 (b'', b'four', b'', b'four'),
3601 (b'', b'five', b'', b'five'),
3602 (b'', b'five', b'', b'five'),
3602 ]
3603 ]
3603 + cmdutil.remoteopts,
3604 + cmdutil.remoteopts,
3604 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3605 _(b'REPO [OPTIONS]... [ONE [TWO]]'),
3605 norepo=True,
3606 norepo=True,
3606 )
3607 )
3607 def debugwireargs(ui, repopath, *vals, **opts):
3608 def debugwireargs(ui, repopath, *vals, **opts):
3608 opts = pycompat.byteskwargs(opts)
3609 opts = pycompat.byteskwargs(opts)
3609 repo = hg.peer(ui, opts, repopath)
3610 repo = hg.peer(ui, opts, repopath)
3610 for opt in cmdutil.remoteopts:
3611 for opt in cmdutil.remoteopts:
3611 del opts[opt[1]]
3612 del opts[opt[1]]
3612 args = {}
3613 args = {}
3613 for k, v in pycompat.iteritems(opts):
3614 for k, v in pycompat.iteritems(opts):
3614 if v:
3615 if v:
3615 args[k] = v
3616 args[k] = v
3616 args = pycompat.strkwargs(args)
3617 args = pycompat.strkwargs(args)
3617 # run twice to check that we don't mess up the stream for the next command
3618 # run twice to check that we don't mess up the stream for the next command
3618 res1 = repo.debugwireargs(*vals, **args)
3619 res1 = repo.debugwireargs(*vals, **args)
3619 res2 = repo.debugwireargs(*vals, **args)
3620 res2 = repo.debugwireargs(*vals, **args)
3620 ui.write(b"%s\n" % res1)
3621 ui.write(b"%s\n" % res1)
3621 if res1 != res2:
3622 if res1 != res2:
3622 ui.warn(b"%s\n" % res2)
3623 ui.warn(b"%s\n" % res2)
3623
3624
3624
3625
3625 def _parsewirelangblocks(fh):
3626 def _parsewirelangblocks(fh):
3626 activeaction = None
3627 activeaction = None
3627 blocklines = []
3628 blocklines = []
3628 lastindent = 0
3629 lastindent = 0
3629
3630
3630 for line in fh:
3631 for line in fh:
3631 line = line.rstrip()
3632 line = line.rstrip()
3632 if not line:
3633 if not line:
3633 continue
3634 continue
3634
3635
3635 if line.startswith(b'#'):
3636 if line.startswith(b'#'):
3636 continue
3637 continue
3637
3638
3638 if not line.startswith(b' '):
3639 if not line.startswith(b' '):
3639 # New block. Flush previous one.
3640 # New block. Flush previous one.
3640 if activeaction:
3641 if activeaction:
3641 yield activeaction, blocklines
3642 yield activeaction, blocklines
3642
3643
3643 activeaction = line
3644 activeaction = line
3644 blocklines = []
3645 blocklines = []
3645 lastindent = 0
3646 lastindent = 0
3646 continue
3647 continue
3647
3648
3648 # Else we start with an indent.
3649 # Else we start with an indent.
3649
3650
3650 if not activeaction:
3651 if not activeaction:
3651 raise error.Abort(_(b'indented line outside of block'))
3652 raise error.Abort(_(b'indented line outside of block'))
3652
3653
3653 indent = len(line) - len(line.lstrip())
3654 indent = len(line) - len(line.lstrip())
3654
3655
3655 # If this line is indented more than the last line, concatenate it.
3656 # If this line is indented more than the last line, concatenate it.
3656 if indent > lastindent and blocklines:
3657 if indent > lastindent and blocklines:
3657 blocklines[-1] += line.lstrip()
3658 blocklines[-1] += line.lstrip()
3658 else:
3659 else:
3659 blocklines.append(line)
3660 blocklines.append(line)
3660 lastindent = indent
3661 lastindent = indent
3661
3662
3662 # Flush last block.
3663 # Flush last block.
3663 if activeaction:
3664 if activeaction:
3664 yield activeaction, blocklines
3665 yield activeaction, blocklines
3665
3666
3666
3667
3667 @command(
3668 @command(
3668 b'debugwireproto',
3669 b'debugwireproto',
3669 [
3670 [
3670 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3671 (b'', b'localssh', False, _(b'start an SSH server for this repo')),
3671 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3672 (b'', b'peer', b'', _(b'construct a specific version of the peer')),
3672 (
3673 (
3673 b'',
3674 b'',
3674 b'noreadstderr',
3675 b'noreadstderr',
3675 False,
3676 False,
3676 _(b'do not read from stderr of the remote'),
3677 _(b'do not read from stderr of the remote'),
3677 ),
3678 ),
3678 (
3679 (
3679 b'',
3680 b'',
3680 b'nologhandshake',
3681 b'nologhandshake',
3681 False,
3682 False,
3682 _(b'do not log I/O related to the peer handshake'),
3683 _(b'do not log I/O related to the peer handshake'),
3683 ),
3684 ),
3684 ]
3685 ]
3685 + cmdutil.remoteopts,
3686 + cmdutil.remoteopts,
3686 _(b'[PATH]'),
3687 _(b'[PATH]'),
3687 optionalrepo=True,
3688 optionalrepo=True,
3688 )
3689 )
3689 def debugwireproto(ui, repo, path=None, **opts):
3690 def debugwireproto(ui, repo, path=None, **opts):
3690 """send wire protocol commands to a server
3691 """send wire protocol commands to a server
3691
3692
3692 This command can be used to issue wire protocol commands to remote
3693 This command can be used to issue wire protocol commands to remote
3693 peers and to debug the raw data being exchanged.
3694 peers and to debug the raw data being exchanged.
3694
3695
3695 ``--localssh`` will start an SSH server against the current repository
3696 ``--localssh`` will start an SSH server against the current repository
3696 and connect to that. By default, the connection will perform a handshake
3697 and connect to that. By default, the connection will perform a handshake
3697 and establish an appropriate peer instance.
3698 and establish an appropriate peer instance.
3698
3699
3699 ``--peer`` can be used to bypass the handshake protocol and construct a
3700 ``--peer`` can be used to bypass the handshake protocol and construct a
3700 peer instance using the specified class type. Valid values are ``raw``,
3701 peer instance using the specified class type. Valid values are ``raw``,
3701 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3702 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3702 raw data payloads and don't support higher-level command actions.
3703 raw data payloads and don't support higher-level command actions.
3703
3704
3704 ``--noreadstderr`` can be used to disable automatic reading from stderr
3705 ``--noreadstderr`` can be used to disable automatic reading from stderr
3705 of the peer (for SSH connections only). Disabling automatic reading of
3706 of the peer (for SSH connections only). Disabling automatic reading of
3706 stderr is useful for making output more deterministic.
3707 stderr is useful for making output more deterministic.
3707
3708
3708 Commands are issued via a mini language which is specified via stdin.
3709 Commands are issued via a mini language which is specified via stdin.
3709 The language consists of individual actions to perform. An action is
3710 The language consists of individual actions to perform. An action is
3710 defined by a block. A block is defined as a line with no leading
3711 defined by a block. A block is defined as a line with no leading
3711 space followed by 0 or more lines with leading space. Blocks are
3712 space followed by 0 or more lines with leading space. Blocks are
3712 effectively a high-level command with additional metadata.
3713 effectively a high-level command with additional metadata.
3713
3714
3714 Lines beginning with ``#`` are ignored.
3715 Lines beginning with ``#`` are ignored.
3715
3716
3716 The following sections denote available actions.
3717 The following sections denote available actions.
3717
3718
3718 raw
3719 raw
3719 ---
3720 ---
3720
3721
3721 Send raw data to the server.
3722 Send raw data to the server.
3722
3723
3723 The block payload contains the raw data to send as one atomic send
3724 The block payload contains the raw data to send as one atomic send
3724 operation. The data may not actually be delivered in a single system
3725 operation. The data may not actually be delivered in a single system
3725 call: it depends on the abilities of the transport being used.
3726 call: it depends on the abilities of the transport being used.
3726
3727
3727 Each line in the block is de-indented and concatenated. Then, that
3728 Each line in the block is de-indented and concatenated. Then, that
3728 value is evaluated as a Python b'' literal. This allows the use of
3729 value is evaluated as a Python b'' literal. This allows the use of
3729 backslash escaping, etc.
3730 backslash escaping, etc.
3730
3731
3731 raw+
3732 raw+
3732 ----
3733 ----
3733
3734
3734 Behaves like ``raw`` except flushes output afterwards.
3735 Behaves like ``raw`` except flushes output afterwards.
3735
3736
3736 command <X>
3737 command <X>
3737 -----------
3738 -----------
3738
3739
3739 Send a request to run a named command, whose name follows the ``command``
3740 Send a request to run a named command, whose name follows the ``command``
3740 string.
3741 string.
3741
3742
3742 Arguments to the command are defined as lines in this block. The format of
3743 Arguments to the command are defined as lines in this block. The format of
3743 each line is ``<key> <value>``. e.g.::
3744 each line is ``<key> <value>``. e.g.::
3744
3745
3745 command listkeys
3746 command listkeys
3746 namespace bookmarks
3747 namespace bookmarks
3747
3748
3748 If the value begins with ``eval:``, it will be interpreted as a Python
3749 If the value begins with ``eval:``, it will be interpreted as a Python
3749 literal expression. Otherwise values are interpreted as Python b'' literals.
3750 literal expression. Otherwise values are interpreted as Python b'' literals.
3750 This allows sending complex types and encoding special byte sequences via
3751 This allows sending complex types and encoding special byte sequences via
3751 backslash escaping.
3752 backslash escaping.
3752
3753
3753 The following arguments have special meaning:
3754 The following arguments have special meaning:
3754
3755
3755 ``PUSHFILE``
3756 ``PUSHFILE``
3756 When defined, the *push* mechanism of the peer will be used instead
3757 When defined, the *push* mechanism of the peer will be used instead
3757 of the static request-response mechanism and the content of the
3758 of the static request-response mechanism and the content of the
3758 file specified in the value of this argument will be sent as the
3759 file specified in the value of this argument will be sent as the
3759 command payload.
3760 command payload.
3760
3761
3761 This can be used to submit a local bundle file to the remote.
3762 This can be used to submit a local bundle file to the remote.
3762
3763
3763 batchbegin
3764 batchbegin
3764 ----------
3765 ----------
3765
3766
3766 Instruct the peer to begin a batched send.
3767 Instruct the peer to begin a batched send.
3767
3768
3768 All ``command`` blocks are queued for execution until the next
3769 All ``command`` blocks are queued for execution until the next
3769 ``batchsubmit`` block.
3770 ``batchsubmit`` block.
3770
3771
3771 batchsubmit
3772 batchsubmit
3772 -----------
3773 -----------
3773
3774
3774 Submit previously queued ``command`` blocks as a batch request.
3775 Submit previously queued ``command`` blocks as a batch request.
3775
3776
3776 This action MUST be paired with a ``batchbegin`` action.
3777 This action MUST be paired with a ``batchbegin`` action.
3777
3778
3778 httprequest <method> <path>
3779 httprequest <method> <path>
3779 ---------------------------
3780 ---------------------------
3780
3781
3781 (HTTP peer only)
3782 (HTTP peer only)
3782
3783
3783 Send an HTTP request to the peer.
3784 Send an HTTP request to the peer.
3784
3785
3785 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3786 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3786
3787
3787 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3788 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3788 headers to add to the request. e.g. ``Accept: foo``.
3789 headers to add to the request. e.g. ``Accept: foo``.
3789
3790
3790 The following arguments are special:
3791 The following arguments are special:
3791
3792
3792 ``BODYFILE``
3793 ``BODYFILE``
3793 The content of the file defined as the value to this argument will be
3794 The content of the file defined as the value to this argument will be
3794 transferred verbatim as the HTTP request body.
3795 transferred verbatim as the HTTP request body.
3795
3796
3796 ``frame <type> <flags> <payload>``
3797 ``frame <type> <flags> <payload>``
3797 Send a unified protocol frame as part of the request body.
3798 Send a unified protocol frame as part of the request body.
3798
3799
3799 All frames will be collected and sent as the body to the HTTP
3800 All frames will be collected and sent as the body to the HTTP
3800 request.
3801 request.
3801
3802
3802 close
3803 close
3803 -----
3804 -----
3804
3805
3805 Close the connection to the server.
3806 Close the connection to the server.
3806
3807
3807 flush
3808 flush
3808 -----
3809 -----
3809
3810
3810 Flush data written to the server.
3811 Flush data written to the server.
3811
3812
3812 readavailable
3813 readavailable
3813 -------------
3814 -------------
3814
3815
3815 Close the write end of the connection and read all available data from
3816 Close the write end of the connection and read all available data from
3816 the server.
3817 the server.
3817
3818
3818 If the connection to the server encompasses multiple pipes, we poll both
3819 If the connection to the server encompasses multiple pipes, we poll both
3819 pipes and read available data.
3820 pipes and read available data.
3820
3821
3821 readline
3822 readline
3822 --------
3823 --------
3823
3824
3824 Read a line of output from the server. If there are multiple output
3825 Read a line of output from the server. If there are multiple output
3825 pipes, reads only the main pipe.
3826 pipes, reads only the main pipe.
3826
3827
3827 ereadline
3828 ereadline
3828 ---------
3829 ---------
3829
3830
3830 Like ``readline``, but read from the stderr pipe, if available.
3831 Like ``readline``, but read from the stderr pipe, if available.
3831
3832
3832 read <X>
3833 read <X>
3833 --------
3834 --------
3834
3835
3835 ``read()`` N bytes from the server's main output pipe.
3836 ``read()`` N bytes from the server's main output pipe.
3836
3837
3837 eread <X>
3838 eread <X>
3838 ---------
3839 ---------
3839
3840
3840 ``read()`` N bytes from the server's stderr pipe, if available.
3841 ``read()`` N bytes from the server's stderr pipe, if available.
3841
3842
3842 Specifying Unified Frame-Based Protocol Frames
3843 Specifying Unified Frame-Based Protocol Frames
3843 ----------------------------------------------
3844 ----------------------------------------------
3844
3845
3845 It is possible to emit a *Unified Frame-Based Protocol* by using special
3846 It is possible to emit a *Unified Frame-Based Protocol* by using special
3846 syntax.
3847 syntax.
3847
3848
3848 A frame is composed as a type, flags, and payload. These can be parsed
3849 A frame is composed as a type, flags, and payload. These can be parsed
3849 from a string of the form:
3850 from a string of the form:
3850
3851
3851 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3852 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3852
3853
3853 ``request-id`` and ``stream-id`` are integers defining the request and
3854 ``request-id`` and ``stream-id`` are integers defining the request and
3854 stream identifiers.
3855 stream identifiers.
3855
3856
3856 ``type`` can be an integer value for the frame type or the string name
3857 ``type`` can be an integer value for the frame type or the string name
3857 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3858 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3858 ``command-name``.
3859 ``command-name``.
3859
3860
3860 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3861 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3861 components. Each component (and there can be just one) can be an integer
3862 components. Each component (and there can be just one) can be an integer
3862 or a flag name for stream flags or frame flags, respectively. Values are
3863 or a flag name for stream flags or frame flags, respectively. Values are
3863 resolved to integers and then bitwise OR'd together.
3864 resolved to integers and then bitwise OR'd together.
3864
3865
3865 ``payload`` represents the raw frame payload. If it begins with
3866 ``payload`` represents the raw frame payload. If it begins with
3866 ``cbor:``, the following string is evaluated as Python code and the
3867 ``cbor:``, the following string is evaluated as Python code and the
3867 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3868 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3868 as a Python byte string literal.
3869 as a Python byte string literal.
3869 """
3870 """
3870 opts = pycompat.byteskwargs(opts)
3871 opts = pycompat.byteskwargs(opts)
3871
3872
3872 if opts[b'localssh'] and not repo:
3873 if opts[b'localssh'] and not repo:
3873 raise error.Abort(_(b'--localssh requires a repository'))
3874 raise error.Abort(_(b'--localssh requires a repository'))
3874
3875
3875 if opts[b'peer'] and opts[b'peer'] not in (
3876 if opts[b'peer'] and opts[b'peer'] not in (
3876 b'raw',
3877 b'raw',
3877 b'http2',
3878 b'http2',
3878 b'ssh1',
3879 b'ssh1',
3879 b'ssh2',
3880 b'ssh2',
3880 ):
3881 ):
3881 raise error.Abort(
3882 raise error.Abort(
3882 _(b'invalid value for --peer'),
3883 _(b'invalid value for --peer'),
3883 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
3884 hint=_(b'valid values are "raw", "ssh1", and "ssh2"'),
3884 )
3885 )
3885
3886
3886 if path and opts[b'localssh']:
3887 if path and opts[b'localssh']:
3887 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
3888 raise error.Abort(_(b'cannot specify --localssh with an explicit path'))
3888
3889
3889 if ui.interactive():
3890 if ui.interactive():
3890 ui.write(_(b'(waiting for commands on stdin)\n'))
3891 ui.write(_(b'(waiting for commands on stdin)\n'))
3891
3892
3892 blocks = list(_parsewirelangblocks(ui.fin))
3893 blocks = list(_parsewirelangblocks(ui.fin))
3893
3894
3894 proc = None
3895 proc = None
3895 stdin = None
3896 stdin = None
3896 stdout = None
3897 stdout = None
3897 stderr = None
3898 stderr = None
3898 opener = None
3899 opener = None
3899
3900
3900 if opts[b'localssh']:
3901 if opts[b'localssh']:
3901 # We start the SSH server in its own process so there is process
3902 # We start the SSH server in its own process so there is process
3902 # separation. This prevents a whole class of potential bugs around
3903 # separation. This prevents a whole class of potential bugs around
3903 # shared state from interfering with server operation.
3904 # shared state from interfering with server operation.
3904 args = procutil.hgcmd() + [
3905 args = procutil.hgcmd() + [
3905 b'-R',
3906 b'-R',
3906 repo.root,
3907 repo.root,
3907 b'debugserve',
3908 b'debugserve',
3908 b'--sshstdio',
3909 b'--sshstdio',
3909 ]
3910 ]
3910 proc = subprocess.Popen(
3911 proc = subprocess.Popen(
3911 pycompat.rapply(procutil.tonativestr, args),
3912 pycompat.rapply(procutil.tonativestr, args),
3912 stdin=subprocess.PIPE,
3913 stdin=subprocess.PIPE,
3913 stdout=subprocess.PIPE,
3914 stdout=subprocess.PIPE,
3914 stderr=subprocess.PIPE,
3915 stderr=subprocess.PIPE,
3915 bufsize=0,
3916 bufsize=0,
3916 )
3917 )
3917
3918
3918 stdin = proc.stdin
3919 stdin = proc.stdin
3919 stdout = proc.stdout
3920 stdout = proc.stdout
3920 stderr = proc.stderr
3921 stderr = proc.stderr
3921
3922
3922 # We turn the pipes into observers so we can log I/O.
3923 # We turn the pipes into observers so we can log I/O.
3923 if ui.verbose or opts[b'peer'] == b'raw':
3924 if ui.verbose or opts[b'peer'] == b'raw':
3924 stdin = util.makeloggingfileobject(
3925 stdin = util.makeloggingfileobject(
3925 ui, proc.stdin, b'i', logdata=True
3926 ui, proc.stdin, b'i', logdata=True
3926 )
3927 )
3927 stdout = util.makeloggingfileobject(
3928 stdout = util.makeloggingfileobject(
3928 ui, proc.stdout, b'o', logdata=True
3929 ui, proc.stdout, b'o', logdata=True
3929 )
3930 )
3930 stderr = util.makeloggingfileobject(
3931 stderr = util.makeloggingfileobject(
3931 ui, proc.stderr, b'e', logdata=True
3932 ui, proc.stderr, b'e', logdata=True
3932 )
3933 )
3933
3934
3934 # --localssh also implies the peer connection settings.
3935 # --localssh also implies the peer connection settings.
3935
3936
3936 url = b'ssh://localserver'
3937 url = b'ssh://localserver'
3937 autoreadstderr = not opts[b'noreadstderr']
3938 autoreadstderr = not opts[b'noreadstderr']
3938
3939
3939 if opts[b'peer'] == b'ssh1':
3940 if opts[b'peer'] == b'ssh1':
3940 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
3941 ui.write(_(b'creating ssh peer for wire protocol version 1\n'))
3941 peer = sshpeer.sshv1peer(
3942 peer = sshpeer.sshv1peer(
3942 ui,
3943 ui,
3943 url,
3944 url,
3944 proc,
3945 proc,
3945 stdin,
3946 stdin,
3946 stdout,
3947 stdout,
3947 stderr,
3948 stderr,
3948 None,
3949 None,
3949 autoreadstderr=autoreadstderr,
3950 autoreadstderr=autoreadstderr,
3950 )
3951 )
3951 elif opts[b'peer'] == b'ssh2':
3952 elif opts[b'peer'] == b'ssh2':
3952 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
3953 ui.write(_(b'creating ssh peer for wire protocol version 2\n'))
3953 peer = sshpeer.sshv2peer(
3954 peer = sshpeer.sshv2peer(
3954 ui,
3955 ui,
3955 url,
3956 url,
3956 proc,
3957 proc,
3957 stdin,
3958 stdin,
3958 stdout,
3959 stdout,
3959 stderr,
3960 stderr,
3960 None,
3961 None,
3961 autoreadstderr=autoreadstderr,
3962 autoreadstderr=autoreadstderr,
3962 )
3963 )
3963 elif opts[b'peer'] == b'raw':
3964 elif opts[b'peer'] == b'raw':
3964 ui.write(_(b'using raw connection to peer\n'))
3965 ui.write(_(b'using raw connection to peer\n'))
3965 peer = None
3966 peer = None
3966 else:
3967 else:
3967 ui.write(_(b'creating ssh peer from handshake results\n'))
3968 ui.write(_(b'creating ssh peer from handshake results\n'))
3968 peer = sshpeer.makepeer(
3969 peer = sshpeer.makepeer(
3969 ui,
3970 ui,
3970 url,
3971 url,
3971 proc,
3972 proc,
3972 stdin,
3973 stdin,
3973 stdout,
3974 stdout,
3974 stderr,
3975 stderr,
3975 autoreadstderr=autoreadstderr,
3976 autoreadstderr=autoreadstderr,
3976 )
3977 )
3977
3978
3978 elif path:
3979 elif path:
3979 # We bypass hg.peer() so we can proxy the sockets.
3980 # We bypass hg.peer() so we can proxy the sockets.
3980 # TODO consider not doing this because we skip
3981 # TODO consider not doing this because we skip
3981 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3982 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3982 u = util.url(path)
3983 u = util.url(path)
3983 if u.scheme != b'http':
3984 if u.scheme != b'http':
3984 raise error.Abort(_(b'only http:// paths are currently supported'))
3985 raise error.Abort(_(b'only http:// paths are currently supported'))
3985
3986
3986 url, authinfo = u.authinfo()
3987 url, authinfo = u.authinfo()
3987 openerargs = {
3988 openerargs = {
3988 'useragent': b'Mercurial debugwireproto',
3989 'useragent': b'Mercurial debugwireproto',
3989 }
3990 }
3990
3991
3991 # Turn pipes/sockets into observers so we can log I/O.
3992 # Turn pipes/sockets into observers so we can log I/O.
3992 if ui.verbose:
3993 if ui.verbose:
3993 openerargs.update(
3994 openerargs.update(
3994 {
3995 {
3995 'loggingfh': ui,
3996 'loggingfh': ui,
3996 'loggingname': b's',
3997 'loggingname': b's',
3997 'loggingopts': {'logdata': True, 'logdataapis': False,},
3998 'loggingopts': {'logdata': True, 'logdataapis': False,},
3998 }
3999 }
3999 )
4000 )
4000
4001
4001 if ui.debugflag:
4002 if ui.debugflag:
4002 openerargs['loggingopts']['logdataapis'] = True
4003 openerargs['loggingopts']['logdataapis'] = True
4003
4004
4004 # Don't send default headers when in raw mode. This allows us to
4005 # Don't send default headers when in raw mode. This allows us to
4005 # bypass most of the behavior of our URL handling code so we can
4006 # bypass most of the behavior of our URL handling code so we can
4006 # have near complete control over what's sent on the wire.
4007 # have near complete control over what's sent on the wire.
4007 if opts[b'peer'] == b'raw':
4008 if opts[b'peer'] == b'raw':
4008 openerargs['sendaccept'] = False
4009 openerargs['sendaccept'] = False
4009
4010
4010 opener = urlmod.opener(ui, authinfo, **openerargs)
4011 opener = urlmod.opener(ui, authinfo, **openerargs)
4011
4012
4012 if opts[b'peer'] == b'http2':
4013 if opts[b'peer'] == b'http2':
4013 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4014 ui.write(_(b'creating http peer for wire protocol version 2\n'))
4014 # We go through makepeer() because we need an API descriptor for
4015 # We go through makepeer() because we need an API descriptor for
4015 # the peer instance to be useful.
4016 # the peer instance to be useful.
4016 with ui.configoverride(
4017 with ui.configoverride(
4017 {(b'experimental', b'httppeer.advertise-v2'): True}
4018 {(b'experimental', b'httppeer.advertise-v2'): True}
4018 ):
4019 ):
4019 if opts[b'nologhandshake']:
4020 if opts[b'nologhandshake']:
4020 ui.pushbuffer()
4021 ui.pushbuffer()
4021
4022
4022 peer = httppeer.makepeer(ui, path, opener=opener)
4023 peer = httppeer.makepeer(ui, path, opener=opener)
4023
4024
4024 if opts[b'nologhandshake']:
4025 if opts[b'nologhandshake']:
4025 ui.popbuffer()
4026 ui.popbuffer()
4026
4027
4027 if not isinstance(peer, httppeer.httpv2peer):
4028 if not isinstance(peer, httppeer.httpv2peer):
4028 raise error.Abort(
4029 raise error.Abort(
4029 _(
4030 _(
4030 b'could not instantiate HTTP peer for '
4031 b'could not instantiate HTTP peer for '
4031 b'wire protocol version 2'
4032 b'wire protocol version 2'
4032 ),
4033 ),
4033 hint=_(
4034 hint=_(
4034 b'the server may not have the feature '
4035 b'the server may not have the feature '
4035 b'enabled or is not allowing this '
4036 b'enabled or is not allowing this '
4036 b'client version'
4037 b'client version'
4037 ),
4038 ),
4038 )
4039 )
4039
4040
4040 elif opts[b'peer'] == b'raw':
4041 elif opts[b'peer'] == b'raw':
4041 ui.write(_(b'using raw connection to peer\n'))
4042 ui.write(_(b'using raw connection to peer\n'))
4042 peer = None
4043 peer = None
4043 elif opts[b'peer']:
4044 elif opts[b'peer']:
4044 raise error.Abort(
4045 raise error.Abort(
4045 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4046 _(b'--peer %s not supported with HTTP peers') % opts[b'peer']
4046 )
4047 )
4047 else:
4048 else:
4048 peer = httppeer.makepeer(ui, path, opener=opener)
4049 peer = httppeer.makepeer(ui, path, opener=opener)
4049
4050
4050 # We /could/ populate stdin/stdout with sock.makefile()...
4051 # We /could/ populate stdin/stdout with sock.makefile()...
4051 else:
4052 else:
4052 raise error.Abort(_(b'unsupported connection configuration'))
4053 raise error.Abort(_(b'unsupported connection configuration'))
4053
4054
4054 batchedcommands = None
4055 batchedcommands = None
4055
4056
4056 # Now perform actions based on the parsed wire language instructions.
4057 # Now perform actions based on the parsed wire language instructions.
4057 for action, lines in blocks:
4058 for action, lines in blocks:
4058 if action in (b'raw', b'raw+'):
4059 if action in (b'raw', b'raw+'):
4059 if not stdin:
4060 if not stdin:
4060 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4061 raise error.Abort(_(b'cannot call raw/raw+ on this peer'))
4061
4062
4062 # Concatenate the data together.
4063 # Concatenate the data together.
4063 data = b''.join(l.lstrip() for l in lines)
4064 data = b''.join(l.lstrip() for l in lines)
4064 data = stringutil.unescapestr(data)
4065 data = stringutil.unescapestr(data)
4065 stdin.write(data)
4066 stdin.write(data)
4066
4067
4067 if action == b'raw+':
4068 if action == b'raw+':
4068 stdin.flush()
4069 stdin.flush()
4069 elif action == b'flush':
4070 elif action == b'flush':
4070 if not stdin:
4071 if not stdin:
4071 raise error.Abort(_(b'cannot call flush on this peer'))
4072 raise error.Abort(_(b'cannot call flush on this peer'))
4072 stdin.flush()
4073 stdin.flush()
4073 elif action.startswith(b'command'):
4074 elif action.startswith(b'command'):
4074 if not peer:
4075 if not peer:
4075 raise error.Abort(
4076 raise error.Abort(
4076 _(
4077 _(
4077 b'cannot send commands unless peer instance '
4078 b'cannot send commands unless peer instance '
4078 b'is available'
4079 b'is available'
4079 )
4080 )
4080 )
4081 )
4081
4082
4082 command = action.split(b' ', 1)[1]
4083 command = action.split(b' ', 1)[1]
4083
4084
4084 args = {}
4085 args = {}
4085 for line in lines:
4086 for line in lines:
4086 # We need to allow empty values.
4087 # We need to allow empty values.
4087 fields = line.lstrip().split(b' ', 1)
4088 fields = line.lstrip().split(b' ', 1)
4088 if len(fields) == 1:
4089 if len(fields) == 1:
4089 key = fields[0]
4090 key = fields[0]
4090 value = b''
4091 value = b''
4091 else:
4092 else:
4092 key, value = fields
4093 key, value = fields
4093
4094
4094 if value.startswith(b'eval:'):
4095 if value.startswith(b'eval:'):
4095 value = stringutil.evalpythonliteral(value[5:])
4096 value = stringutil.evalpythonliteral(value[5:])
4096 else:
4097 else:
4097 value = stringutil.unescapestr(value)
4098 value = stringutil.unescapestr(value)
4098
4099
4099 args[key] = value
4100 args[key] = value
4100
4101
4101 if batchedcommands is not None:
4102 if batchedcommands is not None:
4102 batchedcommands.append((command, args))
4103 batchedcommands.append((command, args))
4103 continue
4104 continue
4104
4105
4105 ui.status(_(b'sending %s command\n') % command)
4106 ui.status(_(b'sending %s command\n') % command)
4106
4107
4107 if b'PUSHFILE' in args:
4108 if b'PUSHFILE' in args:
4108 with open(args[b'PUSHFILE'], 'rb') as fh:
4109 with open(args[b'PUSHFILE'], 'rb') as fh:
4109 del args[b'PUSHFILE']
4110 del args[b'PUSHFILE']
4110 res, output = peer._callpush(
4111 res, output = peer._callpush(
4111 command, fh, **pycompat.strkwargs(args)
4112 command, fh, **pycompat.strkwargs(args)
4112 )
4113 )
4113 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4114 ui.status(_(b'result: %s\n') % stringutil.escapestr(res))
4114 ui.status(
4115 ui.status(
4115 _(b'remote output: %s\n') % stringutil.escapestr(output)
4116 _(b'remote output: %s\n') % stringutil.escapestr(output)
4116 )
4117 )
4117 else:
4118 else:
4118 with peer.commandexecutor() as e:
4119 with peer.commandexecutor() as e:
4119 res = e.callcommand(command, args).result()
4120 res = e.callcommand(command, args).result()
4120
4121
4121 if isinstance(res, wireprotov2peer.commandresponse):
4122 if isinstance(res, wireprotov2peer.commandresponse):
4122 val = res.objects()
4123 val = res.objects()
4123 ui.status(
4124 ui.status(
4124 _(b'response: %s\n')
4125 _(b'response: %s\n')
4125 % stringutil.pprint(val, bprefix=True, indent=2)
4126 % stringutil.pprint(val, bprefix=True, indent=2)
4126 )
4127 )
4127 else:
4128 else:
4128 ui.status(
4129 ui.status(
4129 _(b'response: %s\n')
4130 _(b'response: %s\n')
4130 % stringutil.pprint(res, bprefix=True, indent=2)
4131 % stringutil.pprint(res, bprefix=True, indent=2)
4131 )
4132 )
4132
4133
4133 elif action == b'batchbegin':
4134 elif action == b'batchbegin':
4134 if batchedcommands is not None:
4135 if batchedcommands is not None:
4135 raise error.Abort(_(b'nested batchbegin not allowed'))
4136 raise error.Abort(_(b'nested batchbegin not allowed'))
4136
4137
4137 batchedcommands = []
4138 batchedcommands = []
4138 elif action == b'batchsubmit':
4139 elif action == b'batchsubmit':
4139 # There is a batching API we could go through. But it would be
4140 # There is a batching API we could go through. But it would be
4140 # difficult to normalize requests into function calls. It is easier
4141 # difficult to normalize requests into function calls. It is easier
4141 # to bypass this layer and normalize to commands + args.
4142 # to bypass this layer and normalize to commands + args.
4142 ui.status(
4143 ui.status(
4143 _(b'sending batch with %d sub-commands\n')
4144 _(b'sending batch with %d sub-commands\n')
4144 % len(batchedcommands)
4145 % len(batchedcommands)
4145 )
4146 )
4146 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4147 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
4147 ui.status(
4148 ui.status(
4148 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4149 _(b'response #%d: %s\n') % (i, stringutil.escapestr(chunk))
4149 )
4150 )
4150
4151
4151 batchedcommands = None
4152 batchedcommands = None
4152
4153
4153 elif action.startswith(b'httprequest '):
4154 elif action.startswith(b'httprequest '):
4154 if not opener:
4155 if not opener:
4155 raise error.Abort(
4156 raise error.Abort(
4156 _(b'cannot use httprequest without an HTTP peer')
4157 _(b'cannot use httprequest without an HTTP peer')
4157 )
4158 )
4158
4159
4159 request = action.split(b' ', 2)
4160 request = action.split(b' ', 2)
4160 if len(request) != 3:
4161 if len(request) != 3:
4161 raise error.Abort(
4162 raise error.Abort(
4162 _(
4163 _(
4163 b'invalid httprequest: expected format is '
4164 b'invalid httprequest: expected format is '
4164 b'"httprequest <method> <path>'
4165 b'"httprequest <method> <path>'
4165 )
4166 )
4166 )
4167 )
4167
4168
4168 method, httppath = request[1:]
4169 method, httppath = request[1:]
4169 headers = {}
4170 headers = {}
4170 body = None
4171 body = None
4171 frames = []
4172 frames = []
4172 for line in lines:
4173 for line in lines:
4173 line = line.lstrip()
4174 line = line.lstrip()
4174 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4175 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
4175 if m:
4176 if m:
4176 # Headers need to use native strings.
4177 # Headers need to use native strings.
4177 key = pycompat.strurl(m.group(1))
4178 key = pycompat.strurl(m.group(1))
4178 value = pycompat.strurl(m.group(2))
4179 value = pycompat.strurl(m.group(2))
4179 headers[key] = value
4180 headers[key] = value
4180 continue
4181 continue
4181
4182
4182 if line.startswith(b'BODYFILE '):
4183 if line.startswith(b'BODYFILE '):
4183 with open(line.split(b' ', 1), b'rb') as fh:
4184 with open(line.split(b' ', 1), b'rb') as fh:
4184 body = fh.read()
4185 body = fh.read()
4185 elif line.startswith(b'frame '):
4186 elif line.startswith(b'frame '):
4186 frame = wireprotoframing.makeframefromhumanstring(
4187 frame = wireprotoframing.makeframefromhumanstring(
4187 line[len(b'frame ') :]
4188 line[len(b'frame ') :]
4188 )
4189 )
4189
4190
4190 frames.append(frame)
4191 frames.append(frame)
4191 else:
4192 else:
4192 raise error.Abort(
4193 raise error.Abort(
4193 _(b'unknown argument to httprequest: %s') % line
4194 _(b'unknown argument to httprequest: %s') % line
4194 )
4195 )
4195
4196
4196 url = path + httppath
4197 url = path + httppath
4197
4198
4198 if frames:
4199 if frames:
4199 body = b''.join(bytes(f) for f in frames)
4200 body = b''.join(bytes(f) for f in frames)
4200
4201
4201 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4202 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
4202
4203
4203 # urllib.Request insists on using has_data() as a proxy for
4204 # urllib.Request insists on using has_data() as a proxy for
4204 # determining the request method. Override that to use our
4205 # determining the request method. Override that to use our
4205 # explicitly requested method.
4206 # explicitly requested method.
4206 req.get_method = lambda: pycompat.sysstr(method)
4207 req.get_method = lambda: pycompat.sysstr(method)
4207
4208
4208 try:
4209 try:
4209 res = opener.open(req)
4210 res = opener.open(req)
4210 body = res.read()
4211 body = res.read()
4211 except util.urlerr.urlerror as e:
4212 except util.urlerr.urlerror as e:
4212 # read() method must be called, but only exists in Python 2
4213 # read() method must be called, but only exists in Python 2
4213 getattr(e, 'read', lambda: None)()
4214 getattr(e, 'read', lambda: None)()
4214 continue
4215 continue
4215
4216
4216 ct = res.headers.get('Content-Type')
4217 ct = res.headers.get('Content-Type')
4217 if ct == 'application/mercurial-cbor':
4218 if ct == 'application/mercurial-cbor':
4218 ui.write(
4219 ui.write(
4219 _(b'cbor> %s\n')
4220 _(b'cbor> %s\n')
4220 % stringutil.pprint(
4221 % stringutil.pprint(
4221 cborutil.decodeall(body), bprefix=True, indent=2
4222 cborutil.decodeall(body), bprefix=True, indent=2
4222 )
4223 )
4223 )
4224 )
4224
4225
4225 elif action == b'close':
4226 elif action == b'close':
4226 peer.close()
4227 peer.close()
4227 elif action == b'readavailable':
4228 elif action == b'readavailable':
4228 if not stdout or not stderr:
4229 if not stdout or not stderr:
4229 raise error.Abort(
4230 raise error.Abort(
4230 _(b'readavailable not available on this peer')
4231 _(b'readavailable not available on this peer')
4231 )
4232 )
4232
4233
4233 stdin.close()
4234 stdin.close()
4234 stdout.read()
4235 stdout.read()
4235 stderr.read()
4236 stderr.read()
4236
4237
4237 elif action == b'readline':
4238 elif action == b'readline':
4238 if not stdout:
4239 if not stdout:
4239 raise error.Abort(_(b'readline not available on this peer'))
4240 raise error.Abort(_(b'readline not available on this peer'))
4240 stdout.readline()
4241 stdout.readline()
4241 elif action == b'ereadline':
4242 elif action == b'ereadline':
4242 if not stderr:
4243 if not stderr:
4243 raise error.Abort(_(b'ereadline not available on this peer'))
4244 raise error.Abort(_(b'ereadline not available on this peer'))
4244 stderr.readline()
4245 stderr.readline()
4245 elif action.startswith(b'read '):
4246 elif action.startswith(b'read '):
4246 count = int(action.split(b' ', 1)[1])
4247 count = int(action.split(b' ', 1)[1])
4247 if not stdout:
4248 if not stdout:
4248 raise error.Abort(_(b'read not available on this peer'))
4249 raise error.Abort(_(b'read not available on this peer'))
4249 stdout.read(count)
4250 stdout.read(count)
4250 elif action.startswith(b'eread '):
4251 elif action.startswith(b'eread '):
4251 count = int(action.split(b' ', 1)[1])
4252 count = int(action.split(b' ', 1)[1])
4252 if not stderr:
4253 if not stderr:
4253 raise error.Abort(_(b'eread not available on this peer'))
4254 raise error.Abort(_(b'eread not available on this peer'))
4254 stderr.read(count)
4255 stderr.read(count)
4255 else:
4256 else:
4256 raise error.Abort(_(b'unknown action: %s') % action)
4257 raise error.Abort(_(b'unknown action: %s') % action)
4257
4258
4258 if batchedcommands is not None:
4259 if batchedcommands is not None:
4259 raise error.Abort(_(b'unclosed "batchbegin" request'))
4260 raise error.Abort(_(b'unclosed "batchbegin" request'))
4260
4261
4261 if peer:
4262 if peer:
4262 peer.close()
4263 peer.close()
4263
4264
4264 if proc:
4265 if proc:
4265 proc.kill()
4266 proc.kill()
@@ -1,1843 +1,1843 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import collections
10 import collections
11 import contextlib
11 import contextlib
12 import errno
12 import errno
13 import os
13 import os
14 import stat
14 import stat
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import nullid
17 from .node import nullid
18 from .pycompat import delattr
18 from .pycompat import delattr
19
19
20 from hgdemandimport import tracing
20 from hgdemandimport import tracing
21
21
22 from . import (
22 from . import (
23 encoding,
23 encoding,
24 error,
24 error,
25 match as matchmod,
25 match as matchmod,
26 pathutil,
26 pathutil,
27 policy,
27 policy,
28 pycompat,
28 pycompat,
29 scmutil,
29 scmutil,
30 txnutil,
30 txnutil,
31 util,
31 util,
32 )
32 )
33
33
34 from .interfaces import (
34 from .interfaces import (
35 dirstate as intdirstate,
35 dirstate as intdirstate,
36 util as interfaceutil,
36 util as interfaceutil,
37 )
37 )
38
38
39 parsers = policy.importmod('parsers')
39 parsers = policy.importmod('parsers')
40 rustmod = policy.importrust('dirstate')
40 rustmod = policy.importrust('dirstate')
41
41
42 propertycache = util.propertycache
42 propertycache = util.propertycache
43 filecache = scmutil.filecache
43 filecache = scmutil.filecache
44 _rangemask = 0x7FFFFFFF
44 _rangemask = 0x7FFFFFFF
45
45
46 dirstatetuple = parsers.dirstatetuple
46 dirstatetuple = parsers.dirstatetuple
47
47
48
48
49 class repocache(filecache):
49 class repocache(filecache):
50 """filecache for files in .hg/"""
50 """filecache for files in .hg/"""
51
51
52 def join(self, obj, fname):
52 def join(self, obj, fname):
53 return obj._opener.join(fname)
53 return obj._opener.join(fname)
54
54
55
55
56 class rootcache(filecache):
56 class rootcache(filecache):
57 """filecache for files in the repository root"""
57 """filecache for files in the repository root"""
58
58
59 def join(self, obj, fname):
59 def join(self, obj, fname):
60 return obj._join(fname)
60 return obj._join(fname)
61
61
62
62
63 def _getfsnow(vfs):
63 def _getfsnow(vfs):
64 '''Get "now" timestamp on filesystem'''
64 '''Get "now" timestamp on filesystem'''
65 tmpfd, tmpname = vfs.mkstemp()
65 tmpfd, tmpname = vfs.mkstemp()
66 try:
66 try:
67 return os.fstat(tmpfd)[stat.ST_MTIME]
67 return os.fstat(tmpfd)[stat.ST_MTIME]
68 finally:
68 finally:
69 os.close(tmpfd)
69 os.close(tmpfd)
70 vfs.unlink(tmpname)
70 vfs.unlink(tmpname)
71
71
72
72
73 @interfaceutil.implementer(intdirstate.idirstate)
73 @interfaceutil.implementer(intdirstate.idirstate)
74 class dirstate(object):
74 class dirstate(object):
75 def __init__(self, opener, ui, root, validate, sparsematchfn):
75 def __init__(self, opener, ui, root, validate, sparsematchfn):
76 '''Create a new dirstate object.
76 '''Create a new dirstate object.
77
77
78 opener is an open()-like callable that can be used to open the
78 opener is an open()-like callable that can be used to open the
79 dirstate file; root is the root of the directory tracked by
79 dirstate file; root is the root of the directory tracked by
80 the dirstate.
80 the dirstate.
81 '''
81 '''
82 self._opener = opener
82 self._opener = opener
83 self._validate = validate
83 self._validate = validate
84 self._root = root
84 self._root = root
85 self._sparsematchfn = sparsematchfn
85 self._sparsematchfn = sparsematchfn
86 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
86 # ntpath.join(root, '') of Python 2.7.9 does not add sep if root is
87 # UNC path pointing to root share (issue4557)
87 # UNC path pointing to root share (issue4557)
88 self._rootdir = pathutil.normasprefix(root)
88 self._rootdir = pathutil.normasprefix(root)
89 self._dirty = False
89 self._dirty = False
90 self._lastnormaltime = 0
90 self._lastnormaltime = 0
91 self._ui = ui
91 self._ui = ui
92 self._filecache = {}
92 self._filecache = {}
93 self._parentwriters = 0
93 self._parentwriters = 0
94 self._filename = b'dirstate'
94 self._filename = b'dirstate'
95 self._pendingfilename = b'%s.pending' % self._filename
95 self._pendingfilename = b'%s.pending' % self._filename
96 self._plchangecallbacks = {}
96 self._plchangecallbacks = {}
97 self._origpl = None
97 self._origpl = None
98 self._updatedfiles = set()
98 self._updatedfiles = set()
99 self._mapcls = dirstatemap
99 self._mapcls = dirstatemap
100 # Access and cache cwd early, so we don't access it for the first time
100 # Access and cache cwd early, so we don't access it for the first time
101 # after a working-copy update caused it to not exist (accessing it then
101 # after a working-copy update caused it to not exist (accessing it then
102 # raises an exception).
102 # raises an exception).
103 self._cwd
103 self._cwd
104
104
105 @contextlib.contextmanager
105 @contextlib.contextmanager
106 def parentchange(self):
106 def parentchange(self):
107 '''Context manager for handling dirstate parents.
107 '''Context manager for handling dirstate parents.
108
108
109 If an exception occurs in the scope of the context manager,
109 If an exception occurs in the scope of the context manager,
110 the incoherent dirstate won't be written when wlock is
110 the incoherent dirstate won't be written when wlock is
111 released.
111 released.
112 '''
112 '''
113 self._parentwriters += 1
113 self._parentwriters += 1
114 yield
114 yield
115 # Typically we want the "undo" step of a context manager in a
115 # Typically we want the "undo" step of a context manager in a
116 # finally block so it happens even when an exception
116 # finally block so it happens even when an exception
117 # occurs. In this case, however, we only want to decrement
117 # occurs. In this case, however, we only want to decrement
118 # parentwriters if the code in the with statement exits
118 # parentwriters if the code in the with statement exits
119 # normally, so we don't have a try/finally here on purpose.
119 # normally, so we don't have a try/finally here on purpose.
120 self._parentwriters -= 1
120 self._parentwriters -= 1
121
121
122 def pendingparentchange(self):
122 def pendingparentchange(self):
123 '''Returns true if the dirstate is in the middle of a set of changes
123 '''Returns true if the dirstate is in the middle of a set of changes
124 that modify the dirstate parent.
124 that modify the dirstate parent.
125 '''
125 '''
126 return self._parentwriters > 0
126 return self._parentwriters > 0
127
127
128 @propertycache
128 @propertycache
129 def _map(self):
129 def _map(self):
130 """Return the dirstate contents (see documentation for dirstatemap)."""
130 """Return the dirstate contents (see documentation for dirstatemap)."""
131 self._map = self._mapcls(self._ui, self._opener, self._root)
131 self._map = self._mapcls(self._ui, self._opener, self._root)
132 return self._map
132 return self._map
133
133
134 @property
134 @property
135 def _sparsematcher(self):
135 def _sparsematcher(self):
136 """The matcher for the sparse checkout.
136 """The matcher for the sparse checkout.
137
137
138 The working directory may not include every file from a manifest. The
138 The working directory may not include every file from a manifest. The
139 matcher obtained by this property will match a path if it is to be
139 matcher obtained by this property will match a path if it is to be
140 included in the working directory.
140 included in the working directory.
141 """
141 """
142 # TODO there is potential to cache this property. For now, the matcher
142 # TODO there is potential to cache this property. For now, the matcher
143 # is resolved on every access. (But the called function does use a
143 # is resolved on every access. (But the called function does use a
144 # cache to keep the lookup fast.)
144 # cache to keep the lookup fast.)
145 return self._sparsematchfn()
145 return self._sparsematchfn()
146
146
147 @repocache(b'branch')
147 @repocache(b'branch')
148 def _branch(self):
148 def _branch(self):
149 try:
149 try:
150 return self._opener.read(b"branch").strip() or b"default"
150 return self._opener.read(b"branch").strip() or b"default"
151 except IOError as inst:
151 except IOError as inst:
152 if inst.errno != errno.ENOENT:
152 if inst.errno != errno.ENOENT:
153 raise
153 raise
154 return b"default"
154 return b"default"
155
155
156 @property
156 @property
157 def _pl(self):
157 def _pl(self):
158 return self._map.parents()
158 return self._map.parents()
159
159
160 def hasdir(self, d):
160 def hasdir(self, d):
161 return self._map.hastrackeddir(d)
161 return self._map.hastrackeddir(d)
162
162
163 @rootcache(b'.hgignore')
163 @rootcache(b'.hgignore')
164 def _ignore(self):
164 def _ignore(self):
165 files = self._ignorefiles()
165 files = self._ignorefiles()
166 if not files:
166 if not files:
167 return matchmod.never()
167 return matchmod.never()
168
168
169 pats = [b'include:%s' % f for f in files]
169 pats = [b'include:%s' % f for f in files]
170 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
170 return matchmod.match(self._root, b'', [], pats, warn=self._ui.warn)
171
171
172 @propertycache
172 @propertycache
173 def _slash(self):
173 def _slash(self):
174 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
174 return self._ui.configbool(b'ui', b'slash') and pycompat.ossep != b'/'
175
175
176 @propertycache
176 @propertycache
177 def _checklink(self):
177 def _checklink(self):
178 return util.checklink(self._root)
178 return util.checklink(self._root)
179
179
180 @propertycache
180 @propertycache
181 def _checkexec(self):
181 def _checkexec(self):
182 return util.checkexec(self._root)
182 return util.checkexec(self._root)
183
183
184 @propertycache
184 @propertycache
185 def _checkcase(self):
185 def _checkcase(self):
186 return not util.fscasesensitive(self._join(b'.hg'))
186 return not util.fscasesensitive(self._join(b'.hg'))
187
187
188 def _join(self, f):
188 def _join(self, f):
189 # much faster than os.path.join()
189 # much faster than os.path.join()
190 # it's safe because f is always a relative path
190 # it's safe because f is always a relative path
191 return self._rootdir + f
191 return self._rootdir + f
192
192
193 def flagfunc(self, buildfallback):
193 def flagfunc(self, buildfallback):
194 if self._checklink and self._checkexec:
194 if self._checklink and self._checkexec:
195
195
196 def f(x):
196 def f(x):
197 try:
197 try:
198 st = os.lstat(self._join(x))
198 st = os.lstat(self._join(x))
199 if util.statislink(st):
199 if util.statislink(st):
200 return b'l'
200 return b'l'
201 if util.statisexec(st):
201 if util.statisexec(st):
202 return b'x'
202 return b'x'
203 except OSError:
203 except OSError:
204 pass
204 pass
205 return b''
205 return b''
206
206
207 return f
207 return f
208
208
209 fallback = buildfallback()
209 fallback = buildfallback()
210 if self._checklink:
210 if self._checklink:
211
211
212 def f(x):
212 def f(x):
213 if os.path.islink(self._join(x)):
213 if os.path.islink(self._join(x)):
214 return b'l'
214 return b'l'
215 if b'x' in fallback(x):
215 if b'x' in fallback(x):
216 return b'x'
216 return b'x'
217 return b''
217 return b''
218
218
219 return f
219 return f
220 if self._checkexec:
220 if self._checkexec:
221
221
222 def f(x):
222 def f(x):
223 if b'l' in fallback(x):
223 if b'l' in fallback(x):
224 return b'l'
224 return b'l'
225 if util.isexec(self._join(x)):
225 if util.isexec(self._join(x)):
226 return b'x'
226 return b'x'
227 return b''
227 return b''
228
228
229 return f
229 return f
230 else:
230 else:
231 return fallback
231 return fallback
232
232
233 @propertycache
233 @propertycache
234 def _cwd(self):
234 def _cwd(self):
235 # internal config: ui.forcecwd
235 # internal config: ui.forcecwd
236 forcecwd = self._ui.config(b'ui', b'forcecwd')
236 forcecwd = self._ui.config(b'ui', b'forcecwd')
237 if forcecwd:
237 if forcecwd:
238 return forcecwd
238 return forcecwd
239 return encoding.getcwd()
239 return encoding.getcwd()
240
240
241 def getcwd(self):
241 def getcwd(self):
242 '''Return the path from which a canonical path is calculated.
242 '''Return the path from which a canonical path is calculated.
243
243
244 This path should be used to resolve file patterns or to convert
244 This path should be used to resolve file patterns or to convert
245 canonical paths back to file paths for display. It shouldn't be
245 canonical paths back to file paths for display. It shouldn't be
246 used to get real file paths. Use vfs functions instead.
246 used to get real file paths. Use vfs functions instead.
247 '''
247 '''
248 cwd = self._cwd
248 cwd = self._cwd
249 if cwd == self._root:
249 if cwd == self._root:
250 return b''
250 return b''
251 # self._root ends with a path separator if self._root is '/' or 'C:\'
251 # self._root ends with a path separator if self._root is '/' or 'C:\'
252 rootsep = self._root
252 rootsep = self._root
253 if not util.endswithsep(rootsep):
253 if not util.endswithsep(rootsep):
254 rootsep += pycompat.ossep
254 rootsep += pycompat.ossep
255 if cwd.startswith(rootsep):
255 if cwd.startswith(rootsep):
256 return cwd[len(rootsep) :]
256 return cwd[len(rootsep) :]
257 else:
257 else:
258 # we're outside the repo. return an absolute path.
258 # we're outside the repo. return an absolute path.
259 return cwd
259 return cwd
260
260
261 def pathto(self, f, cwd=None):
261 def pathto(self, f, cwd=None):
262 if cwd is None:
262 if cwd is None:
263 cwd = self.getcwd()
263 cwd = self.getcwd()
264 path = util.pathto(self._root, cwd, f)
264 path = util.pathto(self._root, cwd, f)
265 if self._slash:
265 if self._slash:
266 return util.pconvert(path)
266 return util.pconvert(path)
267 return path
267 return path
268
268
269 def __getitem__(self, key):
269 def __getitem__(self, key):
270 '''Return the current state of key (a filename) in the dirstate.
270 '''Return the current state of key (a filename) in the dirstate.
271
271
272 States are:
272 States are:
273 n normal
273 n normal
274 m needs merging
274 m needs merging
275 r marked for removal
275 r marked for removal
276 a marked for addition
276 a marked for addition
277 ? not tracked
277 ? not tracked
278 '''
278 '''
279 return self._map.get(key, (b"?",))[0]
279 return self._map.get(key, (b"?",))[0]
280
280
281 def __contains__(self, key):
281 def __contains__(self, key):
282 return key in self._map
282 return key in self._map
283
283
284 def __iter__(self):
284 def __iter__(self):
285 return iter(sorted(self._map))
285 return iter(sorted(self._map))
286
286
287 def items(self):
287 def items(self):
288 return pycompat.iteritems(self._map)
288 return pycompat.iteritems(self._map)
289
289
290 iteritems = items
290 iteritems = items
291
291
292 def parents(self):
292 def parents(self):
293 return [self._validate(p) for p in self._pl]
293 return [self._validate(p) for p in self._pl]
294
294
295 def p1(self):
295 def p1(self):
296 return self._validate(self._pl[0])
296 return self._validate(self._pl[0])
297
297
298 def p2(self):
298 def p2(self):
299 return self._validate(self._pl[1])
299 return self._validate(self._pl[1])
300
300
301 def branch(self):
301 def branch(self):
302 return encoding.tolocal(self._branch)
302 return encoding.tolocal(self._branch)
303
303
304 def setparents(self, p1, p2=nullid):
304 def setparents(self, p1, p2=nullid):
305 """Set dirstate parents to p1 and p2.
305 """Set dirstate parents to p1 and p2.
306
306
307 When moving from two parents to one, 'm' merged entries a
307 When moving from two parents to one, 'm' merged entries a
308 adjusted to normal and previous copy records discarded and
308 adjusted to normal and previous copy records discarded and
309 returned by the call.
309 returned by the call.
310
310
311 See localrepo.setparents()
311 See localrepo.setparents()
312 """
312 """
313 if self._parentwriters == 0:
313 if self._parentwriters == 0:
314 raise ValueError(
314 raise ValueError(
315 b"cannot set dirstate parent outside of "
315 b"cannot set dirstate parent outside of "
316 b"dirstate.parentchange context manager"
316 b"dirstate.parentchange context manager"
317 )
317 )
318
318
319 self._dirty = True
319 self._dirty = True
320 oldp2 = self._pl[1]
320 oldp2 = self._pl[1]
321 if self._origpl is None:
321 if self._origpl is None:
322 self._origpl = self._pl
322 self._origpl = self._pl
323 self._map.setparents(p1, p2)
323 self._map.setparents(p1, p2)
324 copies = {}
324 copies = {}
325 if oldp2 != nullid and p2 == nullid:
325 if oldp2 != nullid and p2 == nullid:
326 candidatefiles = self._map.nonnormalset.union(
326 candidatefiles = self._map.nonnormalset.union(
327 self._map.otherparentset
327 self._map.otherparentset
328 )
328 )
329 for f in candidatefiles:
329 for f in candidatefiles:
330 s = self._map.get(f)
330 s = self._map.get(f)
331 if s is None:
331 if s is None:
332 continue
332 continue
333
333
334 # Discard 'm' markers when moving away from a merge state
334 # Discard 'm' markers when moving away from a merge state
335 if s[0] == b'm':
335 if s[0] == b'm':
336 source = self._map.copymap.get(f)
336 source = self._map.copymap.get(f)
337 if source:
337 if source:
338 copies[f] = source
338 copies[f] = source
339 self.normallookup(f)
339 self.normallookup(f)
340 # Also fix up otherparent markers
340 # Also fix up otherparent markers
341 elif s[0] == b'n' and s[2] == -2:
341 elif s[0] == b'n' and s[2] == -2:
342 source = self._map.copymap.get(f)
342 source = self._map.copymap.get(f)
343 if source:
343 if source:
344 copies[f] = source
344 copies[f] = source
345 self.add(f)
345 self.add(f)
346 return copies
346 return copies
347
347
348 def setbranch(self, branch):
348 def setbranch(self, branch):
349 self.__class__._branch.set(self, encoding.fromlocal(branch))
349 self.__class__._branch.set(self, encoding.fromlocal(branch))
350 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
350 f = self._opener(b'branch', b'w', atomictemp=True, checkambig=True)
351 try:
351 try:
352 f.write(self._branch + b'\n')
352 f.write(self._branch + b'\n')
353 f.close()
353 f.close()
354
354
355 # make sure filecache has the correct stat info for _branch after
355 # make sure filecache has the correct stat info for _branch after
356 # replacing the underlying file
356 # replacing the underlying file
357 ce = self._filecache[b'_branch']
357 ce = self._filecache[b'_branch']
358 if ce:
358 if ce:
359 ce.refresh()
359 ce.refresh()
360 except: # re-raises
360 except: # re-raises
361 f.discard()
361 f.discard()
362 raise
362 raise
363
363
364 def invalidate(self):
364 def invalidate(self):
365 '''Causes the next access to reread the dirstate.
365 '''Causes the next access to reread the dirstate.
366
366
367 This is different from localrepo.invalidatedirstate() because it always
367 This is different from localrepo.invalidatedirstate() because it always
368 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
368 rereads the dirstate. Use localrepo.invalidatedirstate() if you want to
369 check whether the dirstate has changed before rereading it.'''
369 check whether the dirstate has changed before rereading it.'''
370
370
371 for a in ("_map", "_branch", "_ignore"):
371 for a in ("_map", "_branch", "_ignore"):
372 if a in self.__dict__:
372 if a in self.__dict__:
373 delattr(self, a)
373 delattr(self, a)
374 self._lastnormaltime = 0
374 self._lastnormaltime = 0
375 self._dirty = False
375 self._dirty = False
376 self._updatedfiles.clear()
376 self._updatedfiles.clear()
377 self._parentwriters = 0
377 self._parentwriters = 0
378 self._origpl = None
378 self._origpl = None
379
379
380 def copy(self, source, dest):
380 def copy(self, source, dest):
381 """Mark dest as a copy of source. Unmark dest if source is None."""
381 """Mark dest as a copy of source. Unmark dest if source is None."""
382 if source == dest:
382 if source == dest:
383 return
383 return
384 self._dirty = True
384 self._dirty = True
385 if source is not None:
385 if source is not None:
386 self._map.copymap[dest] = source
386 self._map.copymap[dest] = source
387 self._updatedfiles.add(source)
387 self._updatedfiles.add(source)
388 self._updatedfiles.add(dest)
388 self._updatedfiles.add(dest)
389 elif self._map.copymap.pop(dest, None):
389 elif self._map.copymap.pop(dest, None):
390 self._updatedfiles.add(dest)
390 self._updatedfiles.add(dest)
391
391
392 def copied(self, file):
392 def copied(self, file):
393 return self._map.copymap.get(file, None)
393 return self._map.copymap.get(file, None)
394
394
395 def copies(self):
395 def copies(self):
396 return self._map.copymap
396 return self._map.copymap
397
397
398 def _addpath(self, f, state, mode, size, mtime):
398 def _addpath(self, f, state, mode, size, mtime):
399 oldstate = self[f]
399 oldstate = self[f]
400 if state == b'a' or oldstate == b'r':
400 if state == b'a' or oldstate == b'r':
401 scmutil.checkfilename(f)
401 scmutil.checkfilename(f)
402 if self._map.hastrackeddir(f):
402 if self._map.hastrackeddir(f):
403 raise error.Abort(
403 raise error.Abort(
404 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
404 _(b'directory %r already in dirstate') % pycompat.bytestr(f)
405 )
405 )
406 # shadows
406 # shadows
407 for d in util.finddirs(f):
407 for d in pathutil.finddirs(f):
408 if self._map.hastrackeddir(d):
408 if self._map.hastrackeddir(d):
409 break
409 break
410 entry = self._map.get(d)
410 entry = self._map.get(d)
411 if entry is not None and entry[0] != b'r':
411 if entry is not None and entry[0] != b'r':
412 raise error.Abort(
412 raise error.Abort(
413 _(b'file %r in dirstate clashes with %r')
413 _(b'file %r in dirstate clashes with %r')
414 % (pycompat.bytestr(d), pycompat.bytestr(f))
414 % (pycompat.bytestr(d), pycompat.bytestr(f))
415 )
415 )
416 self._dirty = True
416 self._dirty = True
417 self._updatedfiles.add(f)
417 self._updatedfiles.add(f)
418 self._map.addfile(f, oldstate, state, mode, size, mtime)
418 self._map.addfile(f, oldstate, state, mode, size, mtime)
419
419
420 def normal(self, f, parentfiledata=None):
420 def normal(self, f, parentfiledata=None):
421 '''Mark a file normal and clean.
421 '''Mark a file normal and clean.
422
422
423 parentfiledata: (mode, size, mtime) of the clean file
423 parentfiledata: (mode, size, mtime) of the clean file
424
424
425 parentfiledata should be computed from memory (for mode,
425 parentfiledata should be computed from memory (for mode,
426 size), as or close as possible from the point where we
426 size), as or close as possible from the point where we
427 determined the file was clean, to limit the risk of the
427 determined the file was clean, to limit the risk of the
428 file having been changed by an external process between the
428 file having been changed by an external process between the
429 moment where the file was determined to be clean and now.'''
429 moment where the file was determined to be clean and now.'''
430 if parentfiledata:
430 if parentfiledata:
431 (mode, size, mtime) = parentfiledata
431 (mode, size, mtime) = parentfiledata
432 else:
432 else:
433 s = os.lstat(self._join(f))
433 s = os.lstat(self._join(f))
434 mode = s.st_mode
434 mode = s.st_mode
435 size = s.st_size
435 size = s.st_size
436 mtime = s[stat.ST_MTIME]
436 mtime = s[stat.ST_MTIME]
437 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
437 self._addpath(f, b'n', mode, size & _rangemask, mtime & _rangemask)
438 self._map.copymap.pop(f, None)
438 self._map.copymap.pop(f, None)
439 if f in self._map.nonnormalset:
439 if f in self._map.nonnormalset:
440 self._map.nonnormalset.remove(f)
440 self._map.nonnormalset.remove(f)
441 if mtime > self._lastnormaltime:
441 if mtime > self._lastnormaltime:
442 # Remember the most recent modification timeslot for status(),
442 # Remember the most recent modification timeslot for status(),
443 # to make sure we won't miss future size-preserving file content
443 # to make sure we won't miss future size-preserving file content
444 # modifications that happen within the same timeslot.
444 # modifications that happen within the same timeslot.
445 self._lastnormaltime = mtime
445 self._lastnormaltime = mtime
446
446
447 def normallookup(self, f):
447 def normallookup(self, f):
448 '''Mark a file normal, but possibly dirty.'''
448 '''Mark a file normal, but possibly dirty.'''
449 if self._pl[1] != nullid:
449 if self._pl[1] != nullid:
450 # if there is a merge going on and the file was either
450 # if there is a merge going on and the file was either
451 # in state 'm' (-1) or coming from other parent (-2) before
451 # in state 'm' (-1) or coming from other parent (-2) before
452 # being removed, restore that state.
452 # being removed, restore that state.
453 entry = self._map.get(f)
453 entry = self._map.get(f)
454 if entry is not None:
454 if entry is not None:
455 if entry[0] == b'r' and entry[2] in (-1, -2):
455 if entry[0] == b'r' and entry[2] in (-1, -2):
456 source = self._map.copymap.get(f)
456 source = self._map.copymap.get(f)
457 if entry[2] == -1:
457 if entry[2] == -1:
458 self.merge(f)
458 self.merge(f)
459 elif entry[2] == -2:
459 elif entry[2] == -2:
460 self.otherparent(f)
460 self.otherparent(f)
461 if source:
461 if source:
462 self.copy(source, f)
462 self.copy(source, f)
463 return
463 return
464 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
464 if entry[0] == b'm' or entry[0] == b'n' and entry[2] == -2:
465 return
465 return
466 self._addpath(f, b'n', 0, -1, -1)
466 self._addpath(f, b'n', 0, -1, -1)
467 self._map.copymap.pop(f, None)
467 self._map.copymap.pop(f, None)
468
468
469 def otherparent(self, f):
469 def otherparent(self, f):
470 '''Mark as coming from the other parent, always dirty.'''
470 '''Mark as coming from the other parent, always dirty.'''
471 if self._pl[1] == nullid:
471 if self._pl[1] == nullid:
472 raise error.Abort(
472 raise error.Abort(
473 _(b"setting %r to other parent only allowed in merges") % f
473 _(b"setting %r to other parent only allowed in merges") % f
474 )
474 )
475 if f in self and self[f] == b'n':
475 if f in self and self[f] == b'n':
476 # merge-like
476 # merge-like
477 self._addpath(f, b'm', 0, -2, -1)
477 self._addpath(f, b'm', 0, -2, -1)
478 else:
478 else:
479 # add-like
479 # add-like
480 self._addpath(f, b'n', 0, -2, -1)
480 self._addpath(f, b'n', 0, -2, -1)
481 self._map.copymap.pop(f, None)
481 self._map.copymap.pop(f, None)
482
482
483 def add(self, f):
483 def add(self, f):
484 '''Mark a file added.'''
484 '''Mark a file added.'''
485 self._addpath(f, b'a', 0, -1, -1)
485 self._addpath(f, b'a', 0, -1, -1)
486 self._map.copymap.pop(f, None)
486 self._map.copymap.pop(f, None)
487
487
488 def remove(self, f):
488 def remove(self, f):
489 '''Mark a file removed.'''
489 '''Mark a file removed.'''
490 self._dirty = True
490 self._dirty = True
491 oldstate = self[f]
491 oldstate = self[f]
492 size = 0
492 size = 0
493 if self._pl[1] != nullid:
493 if self._pl[1] != nullid:
494 entry = self._map.get(f)
494 entry = self._map.get(f)
495 if entry is not None:
495 if entry is not None:
496 # backup the previous state
496 # backup the previous state
497 if entry[0] == b'm': # merge
497 if entry[0] == b'm': # merge
498 size = -1
498 size = -1
499 elif entry[0] == b'n' and entry[2] == -2: # other parent
499 elif entry[0] == b'n' and entry[2] == -2: # other parent
500 size = -2
500 size = -2
501 self._map.otherparentset.add(f)
501 self._map.otherparentset.add(f)
502 self._updatedfiles.add(f)
502 self._updatedfiles.add(f)
503 self._map.removefile(f, oldstate, size)
503 self._map.removefile(f, oldstate, size)
504 if size == 0:
504 if size == 0:
505 self._map.copymap.pop(f, None)
505 self._map.copymap.pop(f, None)
506
506
507 def merge(self, f):
507 def merge(self, f):
508 '''Mark a file merged.'''
508 '''Mark a file merged.'''
509 if self._pl[1] == nullid:
509 if self._pl[1] == nullid:
510 return self.normallookup(f)
510 return self.normallookup(f)
511 return self.otherparent(f)
511 return self.otherparent(f)
512
512
513 def drop(self, f):
513 def drop(self, f):
514 '''Drop a file from the dirstate'''
514 '''Drop a file from the dirstate'''
515 oldstate = self[f]
515 oldstate = self[f]
516 if self._map.dropfile(f, oldstate):
516 if self._map.dropfile(f, oldstate):
517 self._dirty = True
517 self._dirty = True
518 self._updatedfiles.add(f)
518 self._updatedfiles.add(f)
519 self._map.copymap.pop(f, None)
519 self._map.copymap.pop(f, None)
520
520
521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
521 def _discoverpath(self, path, normed, ignoremissing, exists, storemap):
522 if exists is None:
522 if exists is None:
523 exists = os.path.lexists(os.path.join(self._root, path))
523 exists = os.path.lexists(os.path.join(self._root, path))
524 if not exists:
524 if not exists:
525 # Maybe a path component exists
525 # Maybe a path component exists
526 if not ignoremissing and b'/' in path:
526 if not ignoremissing and b'/' in path:
527 d, f = path.rsplit(b'/', 1)
527 d, f = path.rsplit(b'/', 1)
528 d = self._normalize(d, False, ignoremissing, None)
528 d = self._normalize(d, False, ignoremissing, None)
529 folded = d + b"/" + f
529 folded = d + b"/" + f
530 else:
530 else:
531 # No path components, preserve original case
531 # No path components, preserve original case
532 folded = path
532 folded = path
533 else:
533 else:
534 # recursively normalize leading directory components
534 # recursively normalize leading directory components
535 # against dirstate
535 # against dirstate
536 if b'/' in normed:
536 if b'/' in normed:
537 d, f = normed.rsplit(b'/', 1)
537 d, f = normed.rsplit(b'/', 1)
538 d = self._normalize(d, False, ignoremissing, True)
538 d = self._normalize(d, False, ignoremissing, True)
539 r = self._root + b"/" + d
539 r = self._root + b"/" + d
540 folded = d + b"/" + util.fspath(f, r)
540 folded = d + b"/" + util.fspath(f, r)
541 else:
541 else:
542 folded = util.fspath(normed, self._root)
542 folded = util.fspath(normed, self._root)
543 storemap[normed] = folded
543 storemap[normed] = folded
544
544
545 return folded
545 return folded
546
546
547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
547 def _normalizefile(self, path, isknown, ignoremissing=False, exists=None):
548 normed = util.normcase(path)
548 normed = util.normcase(path)
549 folded = self._map.filefoldmap.get(normed, None)
549 folded = self._map.filefoldmap.get(normed, None)
550 if folded is None:
550 if folded is None:
551 if isknown:
551 if isknown:
552 folded = path
552 folded = path
553 else:
553 else:
554 folded = self._discoverpath(
554 folded = self._discoverpath(
555 path, normed, ignoremissing, exists, self._map.filefoldmap
555 path, normed, ignoremissing, exists, self._map.filefoldmap
556 )
556 )
557 return folded
557 return folded
558
558
559 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
559 def _normalize(self, path, isknown, ignoremissing=False, exists=None):
560 normed = util.normcase(path)
560 normed = util.normcase(path)
561 folded = self._map.filefoldmap.get(normed, None)
561 folded = self._map.filefoldmap.get(normed, None)
562 if folded is None:
562 if folded is None:
563 folded = self._map.dirfoldmap.get(normed, None)
563 folded = self._map.dirfoldmap.get(normed, None)
564 if folded is None:
564 if folded is None:
565 if isknown:
565 if isknown:
566 folded = path
566 folded = path
567 else:
567 else:
568 # store discovered result in dirfoldmap so that future
568 # store discovered result in dirfoldmap so that future
569 # normalizefile calls don't start matching directories
569 # normalizefile calls don't start matching directories
570 folded = self._discoverpath(
570 folded = self._discoverpath(
571 path, normed, ignoremissing, exists, self._map.dirfoldmap
571 path, normed, ignoremissing, exists, self._map.dirfoldmap
572 )
572 )
573 return folded
573 return folded
574
574
575 def normalize(self, path, isknown=False, ignoremissing=False):
575 def normalize(self, path, isknown=False, ignoremissing=False):
576 '''
576 '''
577 normalize the case of a pathname when on a casefolding filesystem
577 normalize the case of a pathname when on a casefolding filesystem
578
578
579 isknown specifies whether the filename came from walking the
579 isknown specifies whether the filename came from walking the
580 disk, to avoid extra filesystem access.
580 disk, to avoid extra filesystem access.
581
581
582 If ignoremissing is True, missing path are returned
582 If ignoremissing is True, missing path are returned
583 unchanged. Otherwise, we try harder to normalize possibly
583 unchanged. Otherwise, we try harder to normalize possibly
584 existing path components.
584 existing path components.
585
585
586 The normalized case is determined based on the following precedence:
586 The normalized case is determined based on the following precedence:
587
587
588 - version of name already stored in the dirstate
588 - version of name already stored in the dirstate
589 - version of name stored on disk
589 - version of name stored on disk
590 - version provided via command arguments
590 - version provided via command arguments
591 '''
591 '''
592
592
593 if self._checkcase:
593 if self._checkcase:
594 return self._normalize(path, isknown, ignoremissing)
594 return self._normalize(path, isknown, ignoremissing)
595 return path
595 return path
596
596
597 def clear(self):
597 def clear(self):
598 self._map.clear()
598 self._map.clear()
599 self._lastnormaltime = 0
599 self._lastnormaltime = 0
600 self._updatedfiles.clear()
600 self._updatedfiles.clear()
601 self._dirty = True
601 self._dirty = True
602
602
603 def rebuild(self, parent, allfiles, changedfiles=None):
603 def rebuild(self, parent, allfiles, changedfiles=None):
604 if changedfiles is None:
604 if changedfiles is None:
605 # Rebuild entire dirstate
605 # Rebuild entire dirstate
606 changedfiles = allfiles
606 changedfiles = allfiles
607 lastnormaltime = self._lastnormaltime
607 lastnormaltime = self._lastnormaltime
608 self.clear()
608 self.clear()
609 self._lastnormaltime = lastnormaltime
609 self._lastnormaltime = lastnormaltime
610
610
611 if self._origpl is None:
611 if self._origpl is None:
612 self._origpl = self._pl
612 self._origpl = self._pl
613 self._map.setparents(parent, nullid)
613 self._map.setparents(parent, nullid)
614 for f in changedfiles:
614 for f in changedfiles:
615 if f in allfiles:
615 if f in allfiles:
616 self.normallookup(f)
616 self.normallookup(f)
617 else:
617 else:
618 self.drop(f)
618 self.drop(f)
619
619
620 self._dirty = True
620 self._dirty = True
621
621
622 def identity(self):
622 def identity(self):
623 '''Return identity of dirstate itself to detect changing in storage
623 '''Return identity of dirstate itself to detect changing in storage
624
624
625 If identity of previous dirstate is equal to this, writing
625 If identity of previous dirstate is equal to this, writing
626 changes based on the former dirstate out can keep consistency.
626 changes based on the former dirstate out can keep consistency.
627 '''
627 '''
628 return self._map.identity
628 return self._map.identity
629
629
630 def write(self, tr):
630 def write(self, tr):
631 if not self._dirty:
631 if not self._dirty:
632 return
632 return
633
633
634 filename = self._filename
634 filename = self._filename
635 if tr:
635 if tr:
636 # 'dirstate.write()' is not only for writing in-memory
636 # 'dirstate.write()' is not only for writing in-memory
637 # changes out, but also for dropping ambiguous timestamp.
637 # changes out, but also for dropping ambiguous timestamp.
638 # delayed writing re-raise "ambiguous timestamp issue".
638 # delayed writing re-raise "ambiguous timestamp issue".
639 # See also the wiki page below for detail:
639 # See also the wiki page below for detail:
640 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
640 # https://www.mercurial-scm.org/wiki/DirstateTransactionPlan
641
641
642 # emulate dropping timestamp in 'parsers.pack_dirstate'
642 # emulate dropping timestamp in 'parsers.pack_dirstate'
643 now = _getfsnow(self._opener)
643 now = _getfsnow(self._opener)
644 self._map.clearambiguoustimes(self._updatedfiles, now)
644 self._map.clearambiguoustimes(self._updatedfiles, now)
645
645
646 # emulate that all 'dirstate.normal' results are written out
646 # emulate that all 'dirstate.normal' results are written out
647 self._lastnormaltime = 0
647 self._lastnormaltime = 0
648 self._updatedfiles.clear()
648 self._updatedfiles.clear()
649
649
650 # delay writing in-memory changes out
650 # delay writing in-memory changes out
651 tr.addfilegenerator(
651 tr.addfilegenerator(
652 b'dirstate',
652 b'dirstate',
653 (self._filename,),
653 (self._filename,),
654 self._writedirstate,
654 self._writedirstate,
655 location=b'plain',
655 location=b'plain',
656 )
656 )
657 return
657 return
658
658
659 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
659 st = self._opener(filename, b"w", atomictemp=True, checkambig=True)
660 self._writedirstate(st)
660 self._writedirstate(st)
661
661
662 def addparentchangecallback(self, category, callback):
662 def addparentchangecallback(self, category, callback):
663 """add a callback to be called when the wd parents are changed
663 """add a callback to be called when the wd parents are changed
664
664
665 Callback will be called with the following arguments:
665 Callback will be called with the following arguments:
666 dirstate, (oldp1, oldp2), (newp1, newp2)
666 dirstate, (oldp1, oldp2), (newp1, newp2)
667
667
668 Category is a unique identifier to allow overwriting an old callback
668 Category is a unique identifier to allow overwriting an old callback
669 with a newer callback.
669 with a newer callback.
670 """
670 """
671 self._plchangecallbacks[category] = callback
671 self._plchangecallbacks[category] = callback
672
672
673 def _writedirstate(self, st):
673 def _writedirstate(self, st):
674 # notify callbacks about parents change
674 # notify callbacks about parents change
675 if self._origpl is not None and self._origpl != self._pl:
675 if self._origpl is not None and self._origpl != self._pl:
676 for c, callback in sorted(
676 for c, callback in sorted(
677 pycompat.iteritems(self._plchangecallbacks)
677 pycompat.iteritems(self._plchangecallbacks)
678 ):
678 ):
679 callback(self, self._origpl, self._pl)
679 callback(self, self._origpl, self._pl)
680 self._origpl = None
680 self._origpl = None
681 # use the modification time of the newly created temporary file as the
681 # use the modification time of the newly created temporary file as the
682 # filesystem's notion of 'now'
682 # filesystem's notion of 'now'
683 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
683 now = util.fstat(st)[stat.ST_MTIME] & _rangemask
684
684
685 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
685 # enough 'delaywrite' prevents 'pack_dirstate' from dropping
686 # timestamp of each entries in dirstate, because of 'now > mtime'
686 # timestamp of each entries in dirstate, because of 'now > mtime'
687 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
687 delaywrite = self._ui.configint(b'debug', b'dirstate.delaywrite')
688 if delaywrite > 0:
688 if delaywrite > 0:
689 # do we have any files to delay for?
689 # do we have any files to delay for?
690 for f, e in pycompat.iteritems(self._map):
690 for f, e in pycompat.iteritems(self._map):
691 if e[0] == b'n' and e[3] == now:
691 if e[0] == b'n' and e[3] == now:
692 import time # to avoid useless import
692 import time # to avoid useless import
693
693
694 # rather than sleep n seconds, sleep until the next
694 # rather than sleep n seconds, sleep until the next
695 # multiple of n seconds
695 # multiple of n seconds
696 clock = time.time()
696 clock = time.time()
697 start = int(clock) - (int(clock) % delaywrite)
697 start = int(clock) - (int(clock) % delaywrite)
698 end = start + delaywrite
698 end = start + delaywrite
699 time.sleep(end - clock)
699 time.sleep(end - clock)
700 now = end # trust our estimate that the end is near now
700 now = end # trust our estimate that the end is near now
701 break
701 break
702
702
703 self._map.write(st, now)
703 self._map.write(st, now)
704 self._lastnormaltime = 0
704 self._lastnormaltime = 0
705 self._dirty = False
705 self._dirty = False
706
706
707 def _dirignore(self, f):
707 def _dirignore(self, f):
708 if self._ignore(f):
708 if self._ignore(f):
709 return True
709 return True
710 for p in util.finddirs(f):
710 for p in pathutil.finddirs(f):
711 if self._ignore(p):
711 if self._ignore(p):
712 return True
712 return True
713 return False
713 return False
714
714
715 def _ignorefiles(self):
715 def _ignorefiles(self):
716 files = []
716 files = []
717 if os.path.exists(self._join(b'.hgignore')):
717 if os.path.exists(self._join(b'.hgignore')):
718 files.append(self._join(b'.hgignore'))
718 files.append(self._join(b'.hgignore'))
719 for name, path in self._ui.configitems(b"ui"):
719 for name, path in self._ui.configitems(b"ui"):
720 if name == b'ignore' or name.startswith(b'ignore.'):
720 if name == b'ignore' or name.startswith(b'ignore.'):
721 # we need to use os.path.join here rather than self._join
721 # we need to use os.path.join here rather than self._join
722 # because path is arbitrary and user-specified
722 # because path is arbitrary and user-specified
723 files.append(os.path.join(self._rootdir, util.expandpath(path)))
723 files.append(os.path.join(self._rootdir, util.expandpath(path)))
724 return files
724 return files
725
725
726 def _ignorefileandline(self, f):
726 def _ignorefileandline(self, f):
727 files = collections.deque(self._ignorefiles())
727 files = collections.deque(self._ignorefiles())
728 visited = set()
728 visited = set()
729 while files:
729 while files:
730 i = files.popleft()
730 i = files.popleft()
731 patterns = matchmod.readpatternfile(
731 patterns = matchmod.readpatternfile(
732 i, self._ui.warn, sourceinfo=True
732 i, self._ui.warn, sourceinfo=True
733 )
733 )
734 for pattern, lineno, line in patterns:
734 for pattern, lineno, line in patterns:
735 kind, p = matchmod._patsplit(pattern, b'glob')
735 kind, p = matchmod._patsplit(pattern, b'glob')
736 if kind == b"subinclude":
736 if kind == b"subinclude":
737 if p not in visited:
737 if p not in visited:
738 files.append(p)
738 files.append(p)
739 continue
739 continue
740 m = matchmod.match(
740 m = matchmod.match(
741 self._root, b'', [], [pattern], warn=self._ui.warn
741 self._root, b'', [], [pattern], warn=self._ui.warn
742 )
742 )
743 if m(f):
743 if m(f):
744 return (i, lineno, line)
744 return (i, lineno, line)
745 visited.add(i)
745 visited.add(i)
746 return (None, -1, b"")
746 return (None, -1, b"")
747
747
748 def _walkexplicit(self, match, subrepos):
748 def _walkexplicit(self, match, subrepos):
749 '''Get stat data about the files explicitly specified by match.
749 '''Get stat data about the files explicitly specified by match.
750
750
751 Return a triple (results, dirsfound, dirsnotfound).
751 Return a triple (results, dirsfound, dirsnotfound).
752 - results is a mapping from filename to stat result. It also contains
752 - results is a mapping from filename to stat result. It also contains
753 listings mapping subrepos and .hg to None.
753 listings mapping subrepos and .hg to None.
754 - dirsfound is a list of files found to be directories.
754 - dirsfound is a list of files found to be directories.
755 - dirsnotfound is a list of files that the dirstate thinks are
755 - dirsnotfound is a list of files that the dirstate thinks are
756 directories and that were not found.'''
756 directories and that were not found.'''
757
757
758 def badtype(mode):
758 def badtype(mode):
759 kind = _(b'unknown')
759 kind = _(b'unknown')
760 if stat.S_ISCHR(mode):
760 if stat.S_ISCHR(mode):
761 kind = _(b'character device')
761 kind = _(b'character device')
762 elif stat.S_ISBLK(mode):
762 elif stat.S_ISBLK(mode):
763 kind = _(b'block device')
763 kind = _(b'block device')
764 elif stat.S_ISFIFO(mode):
764 elif stat.S_ISFIFO(mode):
765 kind = _(b'fifo')
765 kind = _(b'fifo')
766 elif stat.S_ISSOCK(mode):
766 elif stat.S_ISSOCK(mode):
767 kind = _(b'socket')
767 kind = _(b'socket')
768 elif stat.S_ISDIR(mode):
768 elif stat.S_ISDIR(mode):
769 kind = _(b'directory')
769 kind = _(b'directory')
770 return _(b'unsupported file type (type is %s)') % kind
770 return _(b'unsupported file type (type is %s)') % kind
771
771
772 matchedir = match.explicitdir
772 matchedir = match.explicitdir
773 badfn = match.bad
773 badfn = match.bad
774 dmap = self._map
774 dmap = self._map
775 lstat = os.lstat
775 lstat = os.lstat
776 getkind = stat.S_IFMT
776 getkind = stat.S_IFMT
777 dirkind = stat.S_IFDIR
777 dirkind = stat.S_IFDIR
778 regkind = stat.S_IFREG
778 regkind = stat.S_IFREG
779 lnkkind = stat.S_IFLNK
779 lnkkind = stat.S_IFLNK
780 join = self._join
780 join = self._join
781 dirsfound = []
781 dirsfound = []
782 foundadd = dirsfound.append
782 foundadd = dirsfound.append
783 dirsnotfound = []
783 dirsnotfound = []
784 notfoundadd = dirsnotfound.append
784 notfoundadd = dirsnotfound.append
785
785
786 if not match.isexact() and self._checkcase:
786 if not match.isexact() and self._checkcase:
787 normalize = self._normalize
787 normalize = self._normalize
788 else:
788 else:
789 normalize = None
789 normalize = None
790
790
791 files = sorted(match.files())
791 files = sorted(match.files())
792 subrepos.sort()
792 subrepos.sort()
793 i, j = 0, 0
793 i, j = 0, 0
794 while i < len(files) and j < len(subrepos):
794 while i < len(files) and j < len(subrepos):
795 subpath = subrepos[j] + b"/"
795 subpath = subrepos[j] + b"/"
796 if files[i] < subpath:
796 if files[i] < subpath:
797 i += 1
797 i += 1
798 continue
798 continue
799 while i < len(files) and files[i].startswith(subpath):
799 while i < len(files) and files[i].startswith(subpath):
800 del files[i]
800 del files[i]
801 j += 1
801 j += 1
802
802
803 if not files or b'' in files:
803 if not files or b'' in files:
804 files = [b'']
804 files = [b'']
805 # constructing the foldmap is expensive, so don't do it for the
805 # constructing the foldmap is expensive, so don't do it for the
806 # common case where files is ['']
806 # common case where files is ['']
807 normalize = None
807 normalize = None
808 results = dict.fromkeys(subrepos)
808 results = dict.fromkeys(subrepos)
809 results[b'.hg'] = None
809 results[b'.hg'] = None
810
810
811 for ff in files:
811 for ff in files:
812 if normalize:
812 if normalize:
813 nf = normalize(ff, False, True)
813 nf = normalize(ff, False, True)
814 else:
814 else:
815 nf = ff
815 nf = ff
816 if nf in results:
816 if nf in results:
817 continue
817 continue
818
818
819 try:
819 try:
820 st = lstat(join(nf))
820 st = lstat(join(nf))
821 kind = getkind(st.st_mode)
821 kind = getkind(st.st_mode)
822 if kind == dirkind:
822 if kind == dirkind:
823 if nf in dmap:
823 if nf in dmap:
824 # file replaced by dir on disk but still in dirstate
824 # file replaced by dir on disk but still in dirstate
825 results[nf] = None
825 results[nf] = None
826 if matchedir:
826 if matchedir:
827 matchedir(nf)
827 matchedir(nf)
828 foundadd((nf, ff))
828 foundadd((nf, ff))
829 elif kind == regkind or kind == lnkkind:
829 elif kind == regkind or kind == lnkkind:
830 results[nf] = st
830 results[nf] = st
831 else:
831 else:
832 badfn(ff, badtype(kind))
832 badfn(ff, badtype(kind))
833 if nf in dmap:
833 if nf in dmap:
834 results[nf] = None
834 results[nf] = None
835 except OSError as inst: # nf not found on disk - it is dirstate only
835 except OSError as inst: # nf not found on disk - it is dirstate only
836 if nf in dmap: # does it exactly match a missing file?
836 if nf in dmap: # does it exactly match a missing file?
837 results[nf] = None
837 results[nf] = None
838 else: # does it match a missing directory?
838 else: # does it match a missing directory?
839 if self._map.hasdir(nf):
839 if self._map.hasdir(nf):
840 if matchedir:
840 if matchedir:
841 matchedir(nf)
841 matchedir(nf)
842 notfoundadd(nf)
842 notfoundadd(nf)
843 else:
843 else:
844 badfn(ff, encoding.strtolocal(inst.strerror))
844 badfn(ff, encoding.strtolocal(inst.strerror))
845
845
846 # match.files() may contain explicitly-specified paths that shouldn't
846 # match.files() may contain explicitly-specified paths that shouldn't
847 # be taken; drop them from the list of files found. dirsfound/notfound
847 # be taken; drop them from the list of files found. dirsfound/notfound
848 # aren't filtered here because they will be tested later.
848 # aren't filtered here because they will be tested later.
849 if match.anypats():
849 if match.anypats():
850 for f in list(results):
850 for f in list(results):
851 if f == b'.hg' or f in subrepos:
851 if f == b'.hg' or f in subrepos:
852 # keep sentinel to disable further out-of-repo walks
852 # keep sentinel to disable further out-of-repo walks
853 continue
853 continue
854 if not match(f):
854 if not match(f):
855 del results[f]
855 del results[f]
856
856
857 # Case insensitive filesystems cannot rely on lstat() failing to detect
857 # Case insensitive filesystems cannot rely on lstat() failing to detect
858 # a case-only rename. Prune the stat object for any file that does not
858 # a case-only rename. Prune the stat object for any file that does not
859 # match the case in the filesystem, if there are multiple files that
859 # match the case in the filesystem, if there are multiple files that
860 # normalize to the same path.
860 # normalize to the same path.
861 if match.isexact() and self._checkcase:
861 if match.isexact() and self._checkcase:
862 normed = {}
862 normed = {}
863
863
864 for f, st in pycompat.iteritems(results):
864 for f, st in pycompat.iteritems(results):
865 if st is None:
865 if st is None:
866 continue
866 continue
867
867
868 nc = util.normcase(f)
868 nc = util.normcase(f)
869 paths = normed.get(nc)
869 paths = normed.get(nc)
870
870
871 if paths is None:
871 if paths is None:
872 paths = set()
872 paths = set()
873 normed[nc] = paths
873 normed[nc] = paths
874
874
875 paths.add(f)
875 paths.add(f)
876
876
877 for norm, paths in pycompat.iteritems(normed):
877 for norm, paths in pycompat.iteritems(normed):
878 if len(paths) > 1:
878 if len(paths) > 1:
879 for path in paths:
879 for path in paths:
880 folded = self._discoverpath(
880 folded = self._discoverpath(
881 path, norm, True, None, self._map.dirfoldmap
881 path, norm, True, None, self._map.dirfoldmap
882 )
882 )
883 if path != folded:
883 if path != folded:
884 results[path] = None
884 results[path] = None
885
885
886 return results, dirsfound, dirsnotfound
886 return results, dirsfound, dirsnotfound
887
887
888 def walk(self, match, subrepos, unknown, ignored, full=True):
888 def walk(self, match, subrepos, unknown, ignored, full=True):
889 '''
889 '''
890 Walk recursively through the directory tree, finding all files
890 Walk recursively through the directory tree, finding all files
891 matched by match.
891 matched by match.
892
892
893 If full is False, maybe skip some known-clean files.
893 If full is False, maybe skip some known-clean files.
894
894
895 Return a dict mapping filename to stat-like object (either
895 Return a dict mapping filename to stat-like object (either
896 mercurial.osutil.stat instance or return value of os.stat()).
896 mercurial.osutil.stat instance or return value of os.stat()).
897
897
898 '''
898 '''
899 # full is a flag that extensions that hook into walk can use -- this
899 # full is a flag that extensions that hook into walk can use -- this
900 # implementation doesn't use it at all. This satisfies the contract
900 # implementation doesn't use it at all. This satisfies the contract
901 # because we only guarantee a "maybe".
901 # because we only guarantee a "maybe".
902
902
903 if ignored:
903 if ignored:
904 ignore = util.never
904 ignore = util.never
905 dirignore = util.never
905 dirignore = util.never
906 elif unknown:
906 elif unknown:
907 ignore = self._ignore
907 ignore = self._ignore
908 dirignore = self._dirignore
908 dirignore = self._dirignore
909 else:
909 else:
910 # if not unknown and not ignored, drop dir recursion and step 2
910 # if not unknown and not ignored, drop dir recursion and step 2
911 ignore = util.always
911 ignore = util.always
912 dirignore = util.always
912 dirignore = util.always
913
913
914 matchfn = match.matchfn
914 matchfn = match.matchfn
915 matchalways = match.always()
915 matchalways = match.always()
916 matchtdir = match.traversedir
916 matchtdir = match.traversedir
917 dmap = self._map
917 dmap = self._map
918 listdir = util.listdir
918 listdir = util.listdir
919 lstat = os.lstat
919 lstat = os.lstat
920 dirkind = stat.S_IFDIR
920 dirkind = stat.S_IFDIR
921 regkind = stat.S_IFREG
921 regkind = stat.S_IFREG
922 lnkkind = stat.S_IFLNK
922 lnkkind = stat.S_IFLNK
923 join = self._join
923 join = self._join
924
924
925 exact = skipstep3 = False
925 exact = skipstep3 = False
926 if match.isexact(): # match.exact
926 if match.isexact(): # match.exact
927 exact = True
927 exact = True
928 dirignore = util.always # skip step 2
928 dirignore = util.always # skip step 2
929 elif match.prefix(): # match.match, no patterns
929 elif match.prefix(): # match.match, no patterns
930 skipstep3 = True
930 skipstep3 = True
931
931
932 if not exact and self._checkcase:
932 if not exact and self._checkcase:
933 normalize = self._normalize
933 normalize = self._normalize
934 normalizefile = self._normalizefile
934 normalizefile = self._normalizefile
935 skipstep3 = False
935 skipstep3 = False
936 else:
936 else:
937 normalize = self._normalize
937 normalize = self._normalize
938 normalizefile = None
938 normalizefile = None
939
939
940 # step 1: find all explicit files
940 # step 1: find all explicit files
941 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
941 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
942
942
943 skipstep3 = skipstep3 and not (work or dirsnotfound)
943 skipstep3 = skipstep3 and not (work or dirsnotfound)
944 work = [d for d in work if not dirignore(d[0])]
944 work = [d for d in work if not dirignore(d[0])]
945
945
946 # step 2: visit subdirectories
946 # step 2: visit subdirectories
947 def traverse(work, alreadynormed):
947 def traverse(work, alreadynormed):
948 wadd = work.append
948 wadd = work.append
949 while work:
949 while work:
950 tracing.counter('dirstate.walk work', len(work))
950 tracing.counter('dirstate.walk work', len(work))
951 nd = work.pop()
951 nd = work.pop()
952 visitentries = match.visitchildrenset(nd)
952 visitentries = match.visitchildrenset(nd)
953 if not visitentries:
953 if not visitentries:
954 continue
954 continue
955 if visitentries == b'this' or visitentries == b'all':
955 if visitentries == b'this' or visitentries == b'all':
956 visitentries = None
956 visitentries = None
957 skip = None
957 skip = None
958 if nd != b'':
958 if nd != b'':
959 skip = b'.hg'
959 skip = b'.hg'
960 try:
960 try:
961 with tracing.log('dirstate.walk.traverse listdir %s', nd):
961 with tracing.log('dirstate.walk.traverse listdir %s', nd):
962 entries = listdir(join(nd), stat=True, skip=skip)
962 entries = listdir(join(nd), stat=True, skip=skip)
963 except OSError as inst:
963 except OSError as inst:
964 if inst.errno in (errno.EACCES, errno.ENOENT):
964 if inst.errno in (errno.EACCES, errno.ENOENT):
965 match.bad(
965 match.bad(
966 self.pathto(nd), encoding.strtolocal(inst.strerror)
966 self.pathto(nd), encoding.strtolocal(inst.strerror)
967 )
967 )
968 continue
968 continue
969 raise
969 raise
970 for f, kind, st in entries:
970 for f, kind, st in entries:
971 # Some matchers may return files in the visitentries set,
971 # Some matchers may return files in the visitentries set,
972 # instead of 'this', if the matcher explicitly mentions them
972 # instead of 'this', if the matcher explicitly mentions them
973 # and is not an exactmatcher. This is acceptable; we do not
973 # and is not an exactmatcher. This is acceptable; we do not
974 # make any hard assumptions about file-or-directory below
974 # make any hard assumptions about file-or-directory below
975 # based on the presence of `f` in visitentries. If
975 # based on the presence of `f` in visitentries. If
976 # visitchildrenset returned a set, we can always skip the
976 # visitchildrenset returned a set, we can always skip the
977 # entries *not* in the set it provided regardless of whether
977 # entries *not* in the set it provided regardless of whether
978 # they're actually a file or a directory.
978 # they're actually a file or a directory.
979 if visitentries and f not in visitentries:
979 if visitentries and f not in visitentries:
980 continue
980 continue
981 if normalizefile:
981 if normalizefile:
982 # even though f might be a directory, we're only
982 # even though f might be a directory, we're only
983 # interested in comparing it to files currently in the
983 # interested in comparing it to files currently in the
984 # dmap -- therefore normalizefile is enough
984 # dmap -- therefore normalizefile is enough
985 nf = normalizefile(
985 nf = normalizefile(
986 nd and (nd + b"/" + f) or f, True, True
986 nd and (nd + b"/" + f) or f, True, True
987 )
987 )
988 else:
988 else:
989 nf = nd and (nd + b"/" + f) or f
989 nf = nd and (nd + b"/" + f) or f
990 if nf not in results:
990 if nf not in results:
991 if kind == dirkind:
991 if kind == dirkind:
992 if not ignore(nf):
992 if not ignore(nf):
993 if matchtdir:
993 if matchtdir:
994 matchtdir(nf)
994 matchtdir(nf)
995 wadd(nf)
995 wadd(nf)
996 if nf in dmap and (matchalways or matchfn(nf)):
996 if nf in dmap and (matchalways or matchfn(nf)):
997 results[nf] = None
997 results[nf] = None
998 elif kind == regkind or kind == lnkkind:
998 elif kind == regkind or kind == lnkkind:
999 if nf in dmap:
999 if nf in dmap:
1000 if matchalways or matchfn(nf):
1000 if matchalways or matchfn(nf):
1001 results[nf] = st
1001 results[nf] = st
1002 elif (matchalways or matchfn(nf)) and not ignore(
1002 elif (matchalways or matchfn(nf)) and not ignore(
1003 nf
1003 nf
1004 ):
1004 ):
1005 # unknown file -- normalize if necessary
1005 # unknown file -- normalize if necessary
1006 if not alreadynormed:
1006 if not alreadynormed:
1007 nf = normalize(nf, False, True)
1007 nf = normalize(nf, False, True)
1008 results[nf] = st
1008 results[nf] = st
1009 elif nf in dmap and (matchalways or matchfn(nf)):
1009 elif nf in dmap and (matchalways or matchfn(nf)):
1010 results[nf] = None
1010 results[nf] = None
1011
1011
1012 for nd, d in work:
1012 for nd, d in work:
1013 # alreadynormed means that processwork doesn't have to do any
1013 # alreadynormed means that processwork doesn't have to do any
1014 # expensive directory normalization
1014 # expensive directory normalization
1015 alreadynormed = not normalize or nd == d
1015 alreadynormed = not normalize or nd == d
1016 traverse([d], alreadynormed)
1016 traverse([d], alreadynormed)
1017
1017
1018 for s in subrepos:
1018 for s in subrepos:
1019 del results[s]
1019 del results[s]
1020 del results[b'.hg']
1020 del results[b'.hg']
1021
1021
1022 # step 3: visit remaining files from dmap
1022 # step 3: visit remaining files from dmap
1023 if not skipstep3 and not exact:
1023 if not skipstep3 and not exact:
1024 # If a dmap file is not in results yet, it was either
1024 # If a dmap file is not in results yet, it was either
1025 # a) not matching matchfn b) ignored, c) missing, or d) under a
1025 # a) not matching matchfn b) ignored, c) missing, or d) under a
1026 # symlink directory.
1026 # symlink directory.
1027 if not results and matchalways:
1027 if not results and matchalways:
1028 visit = [f for f in dmap]
1028 visit = [f for f in dmap]
1029 else:
1029 else:
1030 visit = [f for f in dmap if f not in results and matchfn(f)]
1030 visit = [f for f in dmap if f not in results and matchfn(f)]
1031 visit.sort()
1031 visit.sort()
1032
1032
1033 if unknown:
1033 if unknown:
1034 # unknown == True means we walked all dirs under the roots
1034 # unknown == True means we walked all dirs under the roots
1035 # that wasn't ignored, and everything that matched was stat'ed
1035 # that wasn't ignored, and everything that matched was stat'ed
1036 # and is already in results.
1036 # and is already in results.
1037 # The rest must thus be ignored or under a symlink.
1037 # The rest must thus be ignored or under a symlink.
1038 audit_path = pathutil.pathauditor(self._root, cached=True)
1038 audit_path = pathutil.pathauditor(self._root, cached=True)
1039
1039
1040 for nf in iter(visit):
1040 for nf in iter(visit):
1041 # If a stat for the same file was already added with a
1041 # If a stat for the same file was already added with a
1042 # different case, don't add one for this, since that would
1042 # different case, don't add one for this, since that would
1043 # make it appear as if the file exists under both names
1043 # make it appear as if the file exists under both names
1044 # on disk.
1044 # on disk.
1045 if (
1045 if (
1046 normalizefile
1046 normalizefile
1047 and normalizefile(nf, True, True) in results
1047 and normalizefile(nf, True, True) in results
1048 ):
1048 ):
1049 results[nf] = None
1049 results[nf] = None
1050 # Report ignored items in the dmap as long as they are not
1050 # Report ignored items in the dmap as long as they are not
1051 # under a symlink directory.
1051 # under a symlink directory.
1052 elif audit_path.check(nf):
1052 elif audit_path.check(nf):
1053 try:
1053 try:
1054 results[nf] = lstat(join(nf))
1054 results[nf] = lstat(join(nf))
1055 # file was just ignored, no links, and exists
1055 # file was just ignored, no links, and exists
1056 except OSError:
1056 except OSError:
1057 # file doesn't exist
1057 # file doesn't exist
1058 results[nf] = None
1058 results[nf] = None
1059 else:
1059 else:
1060 # It's either missing or under a symlink directory
1060 # It's either missing or under a symlink directory
1061 # which we in this case report as missing
1061 # which we in this case report as missing
1062 results[nf] = None
1062 results[nf] = None
1063 else:
1063 else:
1064 # We may not have walked the full directory tree above,
1064 # We may not have walked the full directory tree above,
1065 # so stat and check everything we missed.
1065 # so stat and check everything we missed.
1066 iv = iter(visit)
1066 iv = iter(visit)
1067 for st in util.statfiles([join(i) for i in visit]):
1067 for st in util.statfiles([join(i) for i in visit]):
1068 results[next(iv)] = st
1068 results[next(iv)] = st
1069 return results
1069 return results
1070
1070
1071 def status(self, match, subrepos, ignored, clean, unknown):
1071 def status(self, match, subrepos, ignored, clean, unknown):
1072 '''Determine the status of the working copy relative to the
1072 '''Determine the status of the working copy relative to the
1073 dirstate and return a pair of (unsure, status), where status is of type
1073 dirstate and return a pair of (unsure, status), where status is of type
1074 scmutil.status and:
1074 scmutil.status and:
1075
1075
1076 unsure:
1076 unsure:
1077 files that might have been modified since the dirstate was
1077 files that might have been modified since the dirstate was
1078 written, but need to be read to be sure (size is the same
1078 written, but need to be read to be sure (size is the same
1079 but mtime differs)
1079 but mtime differs)
1080 status.modified:
1080 status.modified:
1081 files that have definitely been modified since the dirstate
1081 files that have definitely been modified since the dirstate
1082 was written (different size or mode)
1082 was written (different size or mode)
1083 status.clean:
1083 status.clean:
1084 files that have definitely not been modified since the
1084 files that have definitely not been modified since the
1085 dirstate was written
1085 dirstate was written
1086 '''
1086 '''
1087 listignored, listclean, listunknown = ignored, clean, unknown
1087 listignored, listclean, listunknown = ignored, clean, unknown
1088 lookup, modified, added, unknown, ignored = [], [], [], [], []
1088 lookup, modified, added, unknown, ignored = [], [], [], [], []
1089 removed, deleted, clean = [], [], []
1089 removed, deleted, clean = [], [], []
1090
1090
1091 dmap = self._map
1091 dmap = self._map
1092 dmap.preload()
1092 dmap.preload()
1093
1093
1094 use_rust = True
1094 use_rust = True
1095 if rustmod is None:
1095 if rustmod is None:
1096 use_rust = False
1096 use_rust = False
1097 elif subrepos:
1097 elif subrepos:
1098 use_rust = False
1098 use_rust = False
1099 if bool(listunknown):
1099 if bool(listunknown):
1100 # Pathauditor does not exist yet in Rust, unknown files
1100 # Pathauditor does not exist yet in Rust, unknown files
1101 # can't be trusted.
1101 # can't be trusted.
1102 use_rust = False
1102 use_rust = False
1103 elif self._ignorefiles() and listignored:
1103 elif self._ignorefiles() and listignored:
1104 # Rust has no ignore mechanism yet, so don't use Rust for
1104 # Rust has no ignore mechanism yet, so don't use Rust for
1105 # commands that need ignore.
1105 # commands that need ignore.
1106 use_rust = False
1106 use_rust = False
1107 elif not match.always():
1107 elif not match.always():
1108 # Matchers have yet to be implemented
1108 # Matchers have yet to be implemented
1109 use_rust = False
1109 use_rust = False
1110
1110
1111 if use_rust:
1111 if use_rust:
1112 # Force Rayon (Rust parallelism library) to respect the number of
1112 # Force Rayon (Rust parallelism library) to respect the number of
1113 # workers. This is a temporary workaround until Rust code knows
1113 # workers. This is a temporary workaround until Rust code knows
1114 # how to read the config file.
1114 # how to read the config file.
1115 numcpus = self._ui.configint("worker", "numcpus")
1115 numcpus = self._ui.configint("worker", "numcpus")
1116 if numcpus is not None:
1116 if numcpus is not None:
1117 encoding.environ.setdefault(
1117 encoding.environ.setdefault(
1118 b'RAYON_NUM_THREADS', b'%d' % numcpus
1118 b'RAYON_NUM_THREADS', b'%d' % numcpus
1119 )
1119 )
1120
1120
1121 workers_enabled = self._ui.configbool("worker", "enabled", True)
1121 workers_enabled = self._ui.configbool("worker", "enabled", True)
1122 if not workers_enabled:
1122 if not workers_enabled:
1123 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1123 encoding.environ[b"RAYON_NUM_THREADS"] = b"1"
1124
1124
1125 (
1125 (
1126 lookup,
1126 lookup,
1127 modified,
1127 modified,
1128 added,
1128 added,
1129 removed,
1129 removed,
1130 deleted,
1130 deleted,
1131 unknown,
1131 unknown,
1132 clean,
1132 clean,
1133 ) = rustmod.status(
1133 ) = rustmod.status(
1134 dmap._rustmap,
1134 dmap._rustmap,
1135 self._rootdir,
1135 self._rootdir,
1136 bool(listclean),
1136 bool(listclean),
1137 self._lastnormaltime,
1137 self._lastnormaltime,
1138 self._checkexec,
1138 self._checkexec,
1139 )
1139 )
1140
1140
1141 status = scmutil.status(
1141 status = scmutil.status(
1142 modified=modified,
1142 modified=modified,
1143 added=added,
1143 added=added,
1144 removed=removed,
1144 removed=removed,
1145 deleted=deleted,
1145 deleted=deleted,
1146 unknown=unknown,
1146 unknown=unknown,
1147 ignored=ignored,
1147 ignored=ignored,
1148 clean=clean,
1148 clean=clean,
1149 )
1149 )
1150 return (lookup, status)
1150 return (lookup, status)
1151
1151
1152 def noop(f):
1152 def noop(f):
1153 pass
1153 pass
1154
1154
1155 dcontains = dmap.__contains__
1155 dcontains = dmap.__contains__
1156 dget = dmap.__getitem__
1156 dget = dmap.__getitem__
1157 ladd = lookup.append # aka "unsure"
1157 ladd = lookup.append # aka "unsure"
1158 madd = modified.append
1158 madd = modified.append
1159 aadd = added.append
1159 aadd = added.append
1160 uadd = unknown.append if listunknown else noop
1160 uadd = unknown.append if listunknown else noop
1161 iadd = ignored.append if listignored else noop
1161 iadd = ignored.append if listignored else noop
1162 radd = removed.append
1162 radd = removed.append
1163 dadd = deleted.append
1163 dadd = deleted.append
1164 cadd = clean.append if listclean else noop
1164 cadd = clean.append if listclean else noop
1165 mexact = match.exact
1165 mexact = match.exact
1166 dirignore = self._dirignore
1166 dirignore = self._dirignore
1167 checkexec = self._checkexec
1167 checkexec = self._checkexec
1168 copymap = self._map.copymap
1168 copymap = self._map.copymap
1169 lastnormaltime = self._lastnormaltime
1169 lastnormaltime = self._lastnormaltime
1170
1170
1171 # We need to do full walks when either
1171 # We need to do full walks when either
1172 # - we're listing all clean files, or
1172 # - we're listing all clean files, or
1173 # - match.traversedir does something, because match.traversedir should
1173 # - match.traversedir does something, because match.traversedir should
1174 # be called for every dir in the working dir
1174 # be called for every dir in the working dir
1175 full = listclean or match.traversedir is not None
1175 full = listclean or match.traversedir is not None
1176 for fn, st in pycompat.iteritems(
1176 for fn, st in pycompat.iteritems(
1177 self.walk(match, subrepos, listunknown, listignored, full=full)
1177 self.walk(match, subrepos, listunknown, listignored, full=full)
1178 ):
1178 ):
1179 if not dcontains(fn):
1179 if not dcontains(fn):
1180 if (listignored or mexact(fn)) and dirignore(fn):
1180 if (listignored or mexact(fn)) and dirignore(fn):
1181 if listignored:
1181 if listignored:
1182 iadd(fn)
1182 iadd(fn)
1183 else:
1183 else:
1184 uadd(fn)
1184 uadd(fn)
1185 continue
1185 continue
1186
1186
1187 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1187 # This is equivalent to 'state, mode, size, time = dmap[fn]' but not
1188 # written like that for performance reasons. dmap[fn] is not a
1188 # written like that for performance reasons. dmap[fn] is not a
1189 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1189 # Python tuple in compiled builds. The CPython UNPACK_SEQUENCE
1190 # opcode has fast paths when the value to be unpacked is a tuple or
1190 # opcode has fast paths when the value to be unpacked is a tuple or
1191 # a list, but falls back to creating a full-fledged iterator in
1191 # a list, but falls back to creating a full-fledged iterator in
1192 # general. That is much slower than simply accessing and storing the
1192 # general. That is much slower than simply accessing and storing the
1193 # tuple members one by one.
1193 # tuple members one by one.
1194 t = dget(fn)
1194 t = dget(fn)
1195 state = t[0]
1195 state = t[0]
1196 mode = t[1]
1196 mode = t[1]
1197 size = t[2]
1197 size = t[2]
1198 time = t[3]
1198 time = t[3]
1199
1199
1200 if not st and state in b"nma":
1200 if not st and state in b"nma":
1201 dadd(fn)
1201 dadd(fn)
1202 elif state == b'n':
1202 elif state == b'n':
1203 if (
1203 if (
1204 size >= 0
1204 size >= 0
1205 and (
1205 and (
1206 (size != st.st_size and size != st.st_size & _rangemask)
1206 (size != st.st_size and size != st.st_size & _rangemask)
1207 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1207 or ((mode ^ st.st_mode) & 0o100 and checkexec)
1208 )
1208 )
1209 or size == -2 # other parent
1209 or size == -2 # other parent
1210 or fn in copymap
1210 or fn in copymap
1211 ):
1211 ):
1212 madd(fn)
1212 madd(fn)
1213 elif (
1213 elif (
1214 time != st[stat.ST_MTIME]
1214 time != st[stat.ST_MTIME]
1215 and time != st[stat.ST_MTIME] & _rangemask
1215 and time != st[stat.ST_MTIME] & _rangemask
1216 ):
1216 ):
1217 ladd(fn)
1217 ladd(fn)
1218 elif st[stat.ST_MTIME] == lastnormaltime:
1218 elif st[stat.ST_MTIME] == lastnormaltime:
1219 # fn may have just been marked as normal and it may have
1219 # fn may have just been marked as normal and it may have
1220 # changed in the same second without changing its size.
1220 # changed in the same second without changing its size.
1221 # This can happen if we quickly do multiple commits.
1221 # This can happen if we quickly do multiple commits.
1222 # Force lookup, so we don't miss such a racy file change.
1222 # Force lookup, so we don't miss such a racy file change.
1223 ladd(fn)
1223 ladd(fn)
1224 elif listclean:
1224 elif listclean:
1225 cadd(fn)
1225 cadd(fn)
1226 elif state == b'm':
1226 elif state == b'm':
1227 madd(fn)
1227 madd(fn)
1228 elif state == b'a':
1228 elif state == b'a':
1229 aadd(fn)
1229 aadd(fn)
1230 elif state == b'r':
1230 elif state == b'r':
1231 radd(fn)
1231 radd(fn)
1232
1232
1233 return (
1233 return (
1234 lookup,
1234 lookup,
1235 scmutil.status(
1235 scmutil.status(
1236 modified, added, removed, deleted, unknown, ignored, clean
1236 modified, added, removed, deleted, unknown, ignored, clean
1237 ),
1237 ),
1238 )
1238 )
1239
1239
1240 def matches(self, match):
1240 def matches(self, match):
1241 '''
1241 '''
1242 return files in the dirstate (in whatever state) filtered by match
1242 return files in the dirstate (in whatever state) filtered by match
1243 '''
1243 '''
1244 dmap = self._map
1244 dmap = self._map
1245 if match.always():
1245 if match.always():
1246 return dmap.keys()
1246 return dmap.keys()
1247 files = match.files()
1247 files = match.files()
1248 if match.isexact():
1248 if match.isexact():
1249 # fast path -- filter the other way around, since typically files is
1249 # fast path -- filter the other way around, since typically files is
1250 # much smaller than dmap
1250 # much smaller than dmap
1251 return [f for f in files if f in dmap]
1251 return [f for f in files if f in dmap]
1252 if match.prefix() and all(fn in dmap for fn in files):
1252 if match.prefix() and all(fn in dmap for fn in files):
1253 # fast path -- all the values are known to be files, so just return
1253 # fast path -- all the values are known to be files, so just return
1254 # that
1254 # that
1255 return list(files)
1255 return list(files)
1256 return [f for f in dmap if match(f)]
1256 return [f for f in dmap if match(f)]
1257
1257
1258 def _actualfilename(self, tr):
1258 def _actualfilename(self, tr):
1259 if tr:
1259 if tr:
1260 return self._pendingfilename
1260 return self._pendingfilename
1261 else:
1261 else:
1262 return self._filename
1262 return self._filename
1263
1263
1264 def savebackup(self, tr, backupname):
1264 def savebackup(self, tr, backupname):
1265 '''Save current dirstate into backup file'''
1265 '''Save current dirstate into backup file'''
1266 filename = self._actualfilename(tr)
1266 filename = self._actualfilename(tr)
1267 assert backupname != filename
1267 assert backupname != filename
1268
1268
1269 # use '_writedirstate' instead of 'write' to write changes certainly,
1269 # use '_writedirstate' instead of 'write' to write changes certainly,
1270 # because the latter omits writing out if transaction is running.
1270 # because the latter omits writing out if transaction is running.
1271 # output file will be used to create backup of dirstate at this point.
1271 # output file will be used to create backup of dirstate at this point.
1272 if self._dirty or not self._opener.exists(filename):
1272 if self._dirty or not self._opener.exists(filename):
1273 self._writedirstate(
1273 self._writedirstate(
1274 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1274 self._opener(filename, b"w", atomictemp=True, checkambig=True)
1275 )
1275 )
1276
1276
1277 if tr:
1277 if tr:
1278 # ensure that subsequent tr.writepending returns True for
1278 # ensure that subsequent tr.writepending returns True for
1279 # changes written out above, even if dirstate is never
1279 # changes written out above, even if dirstate is never
1280 # changed after this
1280 # changed after this
1281 tr.addfilegenerator(
1281 tr.addfilegenerator(
1282 b'dirstate',
1282 b'dirstate',
1283 (self._filename,),
1283 (self._filename,),
1284 self._writedirstate,
1284 self._writedirstate,
1285 location=b'plain',
1285 location=b'plain',
1286 )
1286 )
1287
1287
1288 # ensure that pending file written above is unlinked at
1288 # ensure that pending file written above is unlinked at
1289 # failure, even if tr.writepending isn't invoked until the
1289 # failure, even if tr.writepending isn't invoked until the
1290 # end of this transaction
1290 # end of this transaction
1291 tr.registertmp(filename, location=b'plain')
1291 tr.registertmp(filename, location=b'plain')
1292
1292
1293 self._opener.tryunlink(backupname)
1293 self._opener.tryunlink(backupname)
1294 # hardlink backup is okay because _writedirstate is always called
1294 # hardlink backup is okay because _writedirstate is always called
1295 # with an "atomictemp=True" file.
1295 # with an "atomictemp=True" file.
1296 util.copyfile(
1296 util.copyfile(
1297 self._opener.join(filename),
1297 self._opener.join(filename),
1298 self._opener.join(backupname),
1298 self._opener.join(backupname),
1299 hardlink=True,
1299 hardlink=True,
1300 )
1300 )
1301
1301
1302 def restorebackup(self, tr, backupname):
1302 def restorebackup(self, tr, backupname):
1303 '''Restore dirstate by backup file'''
1303 '''Restore dirstate by backup file'''
1304 # this "invalidate()" prevents "wlock.release()" from writing
1304 # this "invalidate()" prevents "wlock.release()" from writing
1305 # changes of dirstate out after restoring from backup file
1305 # changes of dirstate out after restoring from backup file
1306 self.invalidate()
1306 self.invalidate()
1307 filename = self._actualfilename(tr)
1307 filename = self._actualfilename(tr)
1308 o = self._opener
1308 o = self._opener
1309 if util.samefile(o.join(backupname), o.join(filename)):
1309 if util.samefile(o.join(backupname), o.join(filename)):
1310 o.unlink(backupname)
1310 o.unlink(backupname)
1311 else:
1311 else:
1312 o.rename(backupname, filename, checkambig=True)
1312 o.rename(backupname, filename, checkambig=True)
1313
1313
1314 def clearbackup(self, tr, backupname):
1314 def clearbackup(self, tr, backupname):
1315 '''Clear backup file'''
1315 '''Clear backup file'''
1316 self._opener.unlink(backupname)
1316 self._opener.unlink(backupname)
1317
1317
1318
1318
1319 class dirstatemap(object):
1319 class dirstatemap(object):
1320 """Map encapsulating the dirstate's contents.
1320 """Map encapsulating the dirstate's contents.
1321
1321
1322 The dirstate contains the following state:
1322 The dirstate contains the following state:
1323
1323
1324 - `identity` is the identity of the dirstate file, which can be used to
1324 - `identity` is the identity of the dirstate file, which can be used to
1325 detect when changes have occurred to the dirstate file.
1325 detect when changes have occurred to the dirstate file.
1326
1326
1327 - `parents` is a pair containing the parents of the working copy. The
1327 - `parents` is a pair containing the parents of the working copy. The
1328 parents are updated by calling `setparents`.
1328 parents are updated by calling `setparents`.
1329
1329
1330 - the state map maps filenames to tuples of (state, mode, size, mtime),
1330 - the state map maps filenames to tuples of (state, mode, size, mtime),
1331 where state is a single character representing 'normal', 'added',
1331 where state is a single character representing 'normal', 'added',
1332 'removed', or 'merged'. It is read by treating the dirstate as a
1332 'removed', or 'merged'. It is read by treating the dirstate as a
1333 dict. File state is updated by calling the `addfile`, `removefile` and
1333 dict. File state is updated by calling the `addfile`, `removefile` and
1334 `dropfile` methods.
1334 `dropfile` methods.
1335
1335
1336 - `copymap` maps destination filenames to their source filename.
1336 - `copymap` maps destination filenames to their source filename.
1337
1337
1338 The dirstate also provides the following views onto the state:
1338 The dirstate also provides the following views onto the state:
1339
1339
1340 - `nonnormalset` is a set of the filenames that have state other
1340 - `nonnormalset` is a set of the filenames that have state other
1341 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1341 than 'normal', or are normal but have an mtime of -1 ('normallookup').
1342
1342
1343 - `otherparentset` is a set of the filenames that are marked as coming
1343 - `otherparentset` is a set of the filenames that are marked as coming
1344 from the second parent when the dirstate is currently being merged.
1344 from the second parent when the dirstate is currently being merged.
1345
1345
1346 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1346 - `filefoldmap` is a dict mapping normalized filenames to the denormalized
1347 form that they appear as in the dirstate.
1347 form that they appear as in the dirstate.
1348
1348
1349 - `dirfoldmap` is a dict mapping normalized directory names to the
1349 - `dirfoldmap` is a dict mapping normalized directory names to the
1350 denormalized form that they appear as in the dirstate.
1350 denormalized form that they appear as in the dirstate.
1351 """
1351 """
1352
1352
1353 def __init__(self, ui, opener, root):
1353 def __init__(self, ui, opener, root):
1354 self._ui = ui
1354 self._ui = ui
1355 self._opener = opener
1355 self._opener = opener
1356 self._root = root
1356 self._root = root
1357 self._filename = b'dirstate'
1357 self._filename = b'dirstate'
1358
1358
1359 self._parents = None
1359 self._parents = None
1360 self._dirtyparents = False
1360 self._dirtyparents = False
1361
1361
1362 # for consistent view between _pl() and _read() invocations
1362 # for consistent view between _pl() and _read() invocations
1363 self._pendingmode = None
1363 self._pendingmode = None
1364
1364
1365 @propertycache
1365 @propertycache
1366 def _map(self):
1366 def _map(self):
1367 self._map = {}
1367 self._map = {}
1368 self.read()
1368 self.read()
1369 return self._map
1369 return self._map
1370
1370
1371 @propertycache
1371 @propertycache
1372 def copymap(self):
1372 def copymap(self):
1373 self.copymap = {}
1373 self.copymap = {}
1374 self._map
1374 self._map
1375 return self.copymap
1375 return self.copymap
1376
1376
1377 def clear(self):
1377 def clear(self):
1378 self._map.clear()
1378 self._map.clear()
1379 self.copymap.clear()
1379 self.copymap.clear()
1380 self.setparents(nullid, nullid)
1380 self.setparents(nullid, nullid)
1381 util.clearcachedproperty(self, b"_dirs")
1381 util.clearcachedproperty(self, b"_dirs")
1382 util.clearcachedproperty(self, b"_alldirs")
1382 util.clearcachedproperty(self, b"_alldirs")
1383 util.clearcachedproperty(self, b"filefoldmap")
1383 util.clearcachedproperty(self, b"filefoldmap")
1384 util.clearcachedproperty(self, b"dirfoldmap")
1384 util.clearcachedproperty(self, b"dirfoldmap")
1385 util.clearcachedproperty(self, b"nonnormalset")
1385 util.clearcachedproperty(self, b"nonnormalset")
1386 util.clearcachedproperty(self, b"otherparentset")
1386 util.clearcachedproperty(self, b"otherparentset")
1387
1387
1388 def items(self):
1388 def items(self):
1389 return pycompat.iteritems(self._map)
1389 return pycompat.iteritems(self._map)
1390
1390
1391 # forward for python2,3 compat
1391 # forward for python2,3 compat
1392 iteritems = items
1392 iteritems = items
1393
1393
1394 def __len__(self):
1394 def __len__(self):
1395 return len(self._map)
1395 return len(self._map)
1396
1396
1397 def __iter__(self):
1397 def __iter__(self):
1398 return iter(self._map)
1398 return iter(self._map)
1399
1399
1400 def get(self, key, default=None):
1400 def get(self, key, default=None):
1401 return self._map.get(key, default)
1401 return self._map.get(key, default)
1402
1402
1403 def __contains__(self, key):
1403 def __contains__(self, key):
1404 return key in self._map
1404 return key in self._map
1405
1405
1406 def __getitem__(self, key):
1406 def __getitem__(self, key):
1407 return self._map[key]
1407 return self._map[key]
1408
1408
1409 def keys(self):
1409 def keys(self):
1410 return self._map.keys()
1410 return self._map.keys()
1411
1411
1412 def preload(self):
1412 def preload(self):
1413 """Loads the underlying data, if it's not already loaded"""
1413 """Loads the underlying data, if it's not already loaded"""
1414 self._map
1414 self._map
1415
1415
1416 def addfile(self, f, oldstate, state, mode, size, mtime):
1416 def addfile(self, f, oldstate, state, mode, size, mtime):
1417 """Add a tracked file to the dirstate."""
1417 """Add a tracked file to the dirstate."""
1418 if oldstate in b"?r" and "_dirs" in self.__dict__:
1418 if oldstate in b"?r" and "_dirs" in self.__dict__:
1419 self._dirs.addpath(f)
1419 self._dirs.addpath(f)
1420 if oldstate == b"?" and "_alldirs" in self.__dict__:
1420 if oldstate == b"?" and "_alldirs" in self.__dict__:
1421 self._alldirs.addpath(f)
1421 self._alldirs.addpath(f)
1422 self._map[f] = dirstatetuple(state, mode, size, mtime)
1422 self._map[f] = dirstatetuple(state, mode, size, mtime)
1423 if state != b'n' or mtime == -1:
1423 if state != b'n' or mtime == -1:
1424 self.nonnormalset.add(f)
1424 self.nonnormalset.add(f)
1425 if size == -2:
1425 if size == -2:
1426 self.otherparentset.add(f)
1426 self.otherparentset.add(f)
1427
1427
1428 def removefile(self, f, oldstate, size):
1428 def removefile(self, f, oldstate, size):
1429 """
1429 """
1430 Mark a file as removed in the dirstate.
1430 Mark a file as removed in the dirstate.
1431
1431
1432 The `size` parameter is used to store sentinel values that indicate
1432 The `size` parameter is used to store sentinel values that indicate
1433 the file's previous state. In the future, we should refactor this
1433 the file's previous state. In the future, we should refactor this
1434 to be more explicit about what that state is.
1434 to be more explicit about what that state is.
1435 """
1435 """
1436 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1436 if oldstate not in b"?r" and "_dirs" in self.__dict__:
1437 self._dirs.delpath(f)
1437 self._dirs.delpath(f)
1438 if oldstate == b"?" and "_alldirs" in self.__dict__:
1438 if oldstate == b"?" and "_alldirs" in self.__dict__:
1439 self._alldirs.addpath(f)
1439 self._alldirs.addpath(f)
1440 if "filefoldmap" in self.__dict__:
1440 if "filefoldmap" in self.__dict__:
1441 normed = util.normcase(f)
1441 normed = util.normcase(f)
1442 self.filefoldmap.pop(normed, None)
1442 self.filefoldmap.pop(normed, None)
1443 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1443 self._map[f] = dirstatetuple(b'r', 0, size, 0)
1444 self.nonnormalset.add(f)
1444 self.nonnormalset.add(f)
1445
1445
1446 def dropfile(self, f, oldstate):
1446 def dropfile(self, f, oldstate):
1447 """
1447 """
1448 Remove a file from the dirstate. Returns True if the file was
1448 Remove a file from the dirstate. Returns True if the file was
1449 previously recorded.
1449 previously recorded.
1450 """
1450 """
1451 exists = self._map.pop(f, None) is not None
1451 exists = self._map.pop(f, None) is not None
1452 if exists:
1452 if exists:
1453 if oldstate != b"r" and "_dirs" in self.__dict__:
1453 if oldstate != b"r" and "_dirs" in self.__dict__:
1454 self._dirs.delpath(f)
1454 self._dirs.delpath(f)
1455 if "_alldirs" in self.__dict__:
1455 if "_alldirs" in self.__dict__:
1456 self._alldirs.delpath(f)
1456 self._alldirs.delpath(f)
1457 if "filefoldmap" in self.__dict__:
1457 if "filefoldmap" in self.__dict__:
1458 normed = util.normcase(f)
1458 normed = util.normcase(f)
1459 self.filefoldmap.pop(normed, None)
1459 self.filefoldmap.pop(normed, None)
1460 self.nonnormalset.discard(f)
1460 self.nonnormalset.discard(f)
1461 return exists
1461 return exists
1462
1462
1463 def clearambiguoustimes(self, files, now):
1463 def clearambiguoustimes(self, files, now):
1464 for f in files:
1464 for f in files:
1465 e = self.get(f)
1465 e = self.get(f)
1466 if e is not None and e[0] == b'n' and e[3] == now:
1466 if e is not None and e[0] == b'n' and e[3] == now:
1467 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1467 self._map[f] = dirstatetuple(e[0], e[1], e[2], -1)
1468 self.nonnormalset.add(f)
1468 self.nonnormalset.add(f)
1469
1469
1470 def nonnormalentries(self):
1470 def nonnormalentries(self):
1471 '''Compute the nonnormal dirstate entries from the dmap'''
1471 '''Compute the nonnormal dirstate entries from the dmap'''
1472 try:
1472 try:
1473 return parsers.nonnormalotherparententries(self._map)
1473 return parsers.nonnormalotherparententries(self._map)
1474 except AttributeError:
1474 except AttributeError:
1475 nonnorm = set()
1475 nonnorm = set()
1476 otherparent = set()
1476 otherparent = set()
1477 for fname, e in pycompat.iteritems(self._map):
1477 for fname, e in pycompat.iteritems(self._map):
1478 if e[0] != b'n' or e[3] == -1:
1478 if e[0] != b'n' or e[3] == -1:
1479 nonnorm.add(fname)
1479 nonnorm.add(fname)
1480 if e[0] == b'n' and e[2] == -2:
1480 if e[0] == b'n' and e[2] == -2:
1481 otherparent.add(fname)
1481 otherparent.add(fname)
1482 return nonnorm, otherparent
1482 return nonnorm, otherparent
1483
1483
1484 @propertycache
1484 @propertycache
1485 def filefoldmap(self):
1485 def filefoldmap(self):
1486 """Returns a dictionary mapping normalized case paths to their
1486 """Returns a dictionary mapping normalized case paths to their
1487 non-normalized versions.
1487 non-normalized versions.
1488 """
1488 """
1489 try:
1489 try:
1490 makefilefoldmap = parsers.make_file_foldmap
1490 makefilefoldmap = parsers.make_file_foldmap
1491 except AttributeError:
1491 except AttributeError:
1492 pass
1492 pass
1493 else:
1493 else:
1494 return makefilefoldmap(
1494 return makefilefoldmap(
1495 self._map, util.normcasespec, util.normcasefallback
1495 self._map, util.normcasespec, util.normcasefallback
1496 )
1496 )
1497
1497
1498 f = {}
1498 f = {}
1499 normcase = util.normcase
1499 normcase = util.normcase
1500 for name, s in pycompat.iteritems(self._map):
1500 for name, s in pycompat.iteritems(self._map):
1501 if s[0] != b'r':
1501 if s[0] != b'r':
1502 f[normcase(name)] = name
1502 f[normcase(name)] = name
1503 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1503 f[b'.'] = b'.' # prevents useless util.fspath() invocation
1504 return f
1504 return f
1505
1505
1506 def hastrackeddir(self, d):
1506 def hastrackeddir(self, d):
1507 """
1507 """
1508 Returns True if the dirstate contains a tracked (not removed) file
1508 Returns True if the dirstate contains a tracked (not removed) file
1509 in this directory.
1509 in this directory.
1510 """
1510 """
1511 return d in self._dirs
1511 return d in self._dirs
1512
1512
1513 def hasdir(self, d):
1513 def hasdir(self, d):
1514 """
1514 """
1515 Returns True if the dirstate contains a file (tracked or removed)
1515 Returns True if the dirstate contains a file (tracked or removed)
1516 in this directory.
1516 in this directory.
1517 """
1517 """
1518 return d in self._alldirs
1518 return d in self._alldirs
1519
1519
1520 @propertycache
1520 @propertycache
1521 def _dirs(self):
1521 def _dirs(self):
1522 return pathutil.dirs(self._map, b'r')
1522 return pathutil.dirs(self._map, b'r')
1523
1523
1524 @propertycache
1524 @propertycache
1525 def _alldirs(self):
1525 def _alldirs(self):
1526 return pathutil.dirs(self._map)
1526 return pathutil.dirs(self._map)
1527
1527
1528 def _opendirstatefile(self):
1528 def _opendirstatefile(self):
1529 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1529 fp, mode = txnutil.trypending(self._root, self._opener, self._filename)
1530 if self._pendingmode is not None and self._pendingmode != mode:
1530 if self._pendingmode is not None and self._pendingmode != mode:
1531 fp.close()
1531 fp.close()
1532 raise error.Abort(
1532 raise error.Abort(
1533 _(b'working directory state may be changed parallelly')
1533 _(b'working directory state may be changed parallelly')
1534 )
1534 )
1535 self._pendingmode = mode
1535 self._pendingmode = mode
1536 return fp
1536 return fp
1537
1537
1538 def parents(self):
1538 def parents(self):
1539 if not self._parents:
1539 if not self._parents:
1540 try:
1540 try:
1541 fp = self._opendirstatefile()
1541 fp = self._opendirstatefile()
1542 st = fp.read(40)
1542 st = fp.read(40)
1543 fp.close()
1543 fp.close()
1544 except IOError as err:
1544 except IOError as err:
1545 if err.errno != errno.ENOENT:
1545 if err.errno != errno.ENOENT:
1546 raise
1546 raise
1547 # File doesn't exist, so the current state is empty
1547 # File doesn't exist, so the current state is empty
1548 st = b''
1548 st = b''
1549
1549
1550 l = len(st)
1550 l = len(st)
1551 if l == 40:
1551 if l == 40:
1552 self._parents = (st[:20], st[20:40])
1552 self._parents = (st[:20], st[20:40])
1553 elif l == 0:
1553 elif l == 0:
1554 self._parents = (nullid, nullid)
1554 self._parents = (nullid, nullid)
1555 else:
1555 else:
1556 raise error.Abort(
1556 raise error.Abort(
1557 _(b'working directory state appears damaged!')
1557 _(b'working directory state appears damaged!')
1558 )
1558 )
1559
1559
1560 return self._parents
1560 return self._parents
1561
1561
1562 def setparents(self, p1, p2):
1562 def setparents(self, p1, p2):
1563 self._parents = (p1, p2)
1563 self._parents = (p1, p2)
1564 self._dirtyparents = True
1564 self._dirtyparents = True
1565
1565
1566 def read(self):
1566 def read(self):
1567 # ignore HG_PENDING because identity is used only for writing
1567 # ignore HG_PENDING because identity is used only for writing
1568 self.identity = util.filestat.frompath(
1568 self.identity = util.filestat.frompath(
1569 self._opener.join(self._filename)
1569 self._opener.join(self._filename)
1570 )
1570 )
1571
1571
1572 try:
1572 try:
1573 fp = self._opendirstatefile()
1573 fp = self._opendirstatefile()
1574 try:
1574 try:
1575 st = fp.read()
1575 st = fp.read()
1576 finally:
1576 finally:
1577 fp.close()
1577 fp.close()
1578 except IOError as err:
1578 except IOError as err:
1579 if err.errno != errno.ENOENT:
1579 if err.errno != errno.ENOENT:
1580 raise
1580 raise
1581 return
1581 return
1582 if not st:
1582 if not st:
1583 return
1583 return
1584
1584
1585 if util.safehasattr(parsers, b'dict_new_presized'):
1585 if util.safehasattr(parsers, b'dict_new_presized'):
1586 # Make an estimate of the number of files in the dirstate based on
1586 # Make an estimate of the number of files in the dirstate based on
1587 # its size. From a linear regression on a set of real-world repos,
1587 # its size. From a linear regression on a set of real-world repos,
1588 # all over 10,000 files, the size of a dirstate entry is 85
1588 # all over 10,000 files, the size of a dirstate entry is 85
1589 # bytes. The cost of resizing is significantly higher than the cost
1589 # bytes. The cost of resizing is significantly higher than the cost
1590 # of filling in a larger presized dict, so subtract 20% from the
1590 # of filling in a larger presized dict, so subtract 20% from the
1591 # size.
1591 # size.
1592 #
1592 #
1593 # This heuristic is imperfect in many ways, so in a future dirstate
1593 # This heuristic is imperfect in many ways, so in a future dirstate
1594 # format update it makes sense to just record the number of entries
1594 # format update it makes sense to just record the number of entries
1595 # on write.
1595 # on write.
1596 self._map = parsers.dict_new_presized(len(st) // 71)
1596 self._map = parsers.dict_new_presized(len(st) // 71)
1597
1597
1598 # Python's garbage collector triggers a GC each time a certain number
1598 # Python's garbage collector triggers a GC each time a certain number
1599 # of container objects (the number being defined by
1599 # of container objects (the number being defined by
1600 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1600 # gc.get_threshold()) are allocated. parse_dirstate creates a tuple
1601 # for each file in the dirstate. The C version then immediately marks
1601 # for each file in the dirstate. The C version then immediately marks
1602 # them as not to be tracked by the collector. However, this has no
1602 # them as not to be tracked by the collector. However, this has no
1603 # effect on when GCs are triggered, only on what objects the GC looks
1603 # effect on when GCs are triggered, only on what objects the GC looks
1604 # into. This means that O(number of files) GCs are unavoidable.
1604 # into. This means that O(number of files) GCs are unavoidable.
1605 # Depending on when in the process's lifetime the dirstate is parsed,
1605 # Depending on when in the process's lifetime the dirstate is parsed,
1606 # this can get very expensive. As a workaround, disable GC while
1606 # this can get very expensive. As a workaround, disable GC while
1607 # parsing the dirstate.
1607 # parsing the dirstate.
1608 #
1608 #
1609 # (we cannot decorate the function directly since it is in a C module)
1609 # (we cannot decorate the function directly since it is in a C module)
1610 parse_dirstate = util.nogc(parsers.parse_dirstate)
1610 parse_dirstate = util.nogc(parsers.parse_dirstate)
1611 p = parse_dirstate(self._map, self.copymap, st)
1611 p = parse_dirstate(self._map, self.copymap, st)
1612 if not self._dirtyparents:
1612 if not self._dirtyparents:
1613 self.setparents(*p)
1613 self.setparents(*p)
1614
1614
1615 # Avoid excess attribute lookups by fast pathing certain checks
1615 # Avoid excess attribute lookups by fast pathing certain checks
1616 self.__contains__ = self._map.__contains__
1616 self.__contains__ = self._map.__contains__
1617 self.__getitem__ = self._map.__getitem__
1617 self.__getitem__ = self._map.__getitem__
1618 self.get = self._map.get
1618 self.get = self._map.get
1619
1619
1620 def write(self, st, now):
1620 def write(self, st, now):
1621 st.write(
1621 st.write(
1622 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1622 parsers.pack_dirstate(self._map, self.copymap, self.parents(), now)
1623 )
1623 )
1624 st.close()
1624 st.close()
1625 self._dirtyparents = False
1625 self._dirtyparents = False
1626 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1626 self.nonnormalset, self.otherparentset = self.nonnormalentries()
1627
1627
1628 @propertycache
1628 @propertycache
1629 def nonnormalset(self):
1629 def nonnormalset(self):
1630 nonnorm, otherparents = self.nonnormalentries()
1630 nonnorm, otherparents = self.nonnormalentries()
1631 self.otherparentset = otherparents
1631 self.otherparentset = otherparents
1632 return nonnorm
1632 return nonnorm
1633
1633
1634 @propertycache
1634 @propertycache
1635 def otherparentset(self):
1635 def otherparentset(self):
1636 nonnorm, otherparents = self.nonnormalentries()
1636 nonnorm, otherparents = self.nonnormalentries()
1637 self.nonnormalset = nonnorm
1637 self.nonnormalset = nonnorm
1638 return otherparents
1638 return otherparents
1639
1639
1640 @propertycache
1640 @propertycache
1641 def identity(self):
1641 def identity(self):
1642 self._map
1642 self._map
1643 return self.identity
1643 return self.identity
1644
1644
1645 @propertycache
1645 @propertycache
1646 def dirfoldmap(self):
1646 def dirfoldmap(self):
1647 f = {}
1647 f = {}
1648 normcase = util.normcase
1648 normcase = util.normcase
1649 for name in self._dirs:
1649 for name in self._dirs:
1650 f[normcase(name)] = name
1650 f[normcase(name)] = name
1651 return f
1651 return f
1652
1652
1653
1653
1654 if rustmod is not None:
1654 if rustmod is not None:
1655
1655
1656 class dirstatemap(object):
1656 class dirstatemap(object):
1657 def __init__(self, ui, opener, root):
1657 def __init__(self, ui, opener, root):
1658 self._ui = ui
1658 self._ui = ui
1659 self._opener = opener
1659 self._opener = opener
1660 self._root = root
1660 self._root = root
1661 self._filename = b'dirstate'
1661 self._filename = b'dirstate'
1662 self._parents = None
1662 self._parents = None
1663 self._dirtyparents = False
1663 self._dirtyparents = False
1664
1664
1665 # for consistent view between _pl() and _read() invocations
1665 # for consistent view between _pl() and _read() invocations
1666 self._pendingmode = None
1666 self._pendingmode = None
1667
1667
1668 def addfile(self, *args, **kwargs):
1668 def addfile(self, *args, **kwargs):
1669 return self._rustmap.addfile(*args, **kwargs)
1669 return self._rustmap.addfile(*args, **kwargs)
1670
1670
1671 def removefile(self, *args, **kwargs):
1671 def removefile(self, *args, **kwargs):
1672 return self._rustmap.removefile(*args, **kwargs)
1672 return self._rustmap.removefile(*args, **kwargs)
1673
1673
1674 def dropfile(self, *args, **kwargs):
1674 def dropfile(self, *args, **kwargs):
1675 return self._rustmap.dropfile(*args, **kwargs)
1675 return self._rustmap.dropfile(*args, **kwargs)
1676
1676
1677 def clearambiguoustimes(self, *args, **kwargs):
1677 def clearambiguoustimes(self, *args, **kwargs):
1678 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1678 return self._rustmap.clearambiguoustimes(*args, **kwargs)
1679
1679
1680 def nonnormalentries(self):
1680 def nonnormalentries(self):
1681 return self._rustmap.nonnormalentries()
1681 return self._rustmap.nonnormalentries()
1682
1682
1683 def get(self, *args, **kwargs):
1683 def get(self, *args, **kwargs):
1684 return self._rustmap.get(*args, **kwargs)
1684 return self._rustmap.get(*args, **kwargs)
1685
1685
1686 @propertycache
1686 @propertycache
1687 def _rustmap(self):
1687 def _rustmap(self):
1688 self._rustmap = rustmod.DirstateMap(self._root)
1688 self._rustmap = rustmod.DirstateMap(self._root)
1689 self.read()
1689 self.read()
1690 return self._rustmap
1690 return self._rustmap
1691
1691
1692 @property
1692 @property
1693 def copymap(self):
1693 def copymap(self):
1694 return self._rustmap.copymap()
1694 return self._rustmap.copymap()
1695
1695
1696 def preload(self):
1696 def preload(self):
1697 self._rustmap
1697 self._rustmap
1698
1698
1699 def clear(self):
1699 def clear(self):
1700 self._rustmap.clear()
1700 self._rustmap.clear()
1701 self.setparents(nullid, nullid)
1701 self.setparents(nullid, nullid)
1702 util.clearcachedproperty(self, b"_dirs")
1702 util.clearcachedproperty(self, b"_dirs")
1703 util.clearcachedproperty(self, b"_alldirs")
1703 util.clearcachedproperty(self, b"_alldirs")
1704 util.clearcachedproperty(self, b"dirfoldmap")
1704 util.clearcachedproperty(self, b"dirfoldmap")
1705
1705
1706 def items(self):
1706 def items(self):
1707 return self._rustmap.items()
1707 return self._rustmap.items()
1708
1708
1709 def keys(self):
1709 def keys(self):
1710 return iter(self._rustmap)
1710 return iter(self._rustmap)
1711
1711
1712 def __contains__(self, key):
1712 def __contains__(self, key):
1713 return key in self._rustmap
1713 return key in self._rustmap
1714
1714
1715 def __getitem__(self, item):
1715 def __getitem__(self, item):
1716 return self._rustmap[item]
1716 return self._rustmap[item]
1717
1717
1718 def __len__(self):
1718 def __len__(self):
1719 return len(self._rustmap)
1719 return len(self._rustmap)
1720
1720
1721 def __iter__(self):
1721 def __iter__(self):
1722 return iter(self._rustmap)
1722 return iter(self._rustmap)
1723
1723
1724 # forward for python2,3 compat
1724 # forward for python2,3 compat
1725 iteritems = items
1725 iteritems = items
1726
1726
1727 def _opendirstatefile(self):
1727 def _opendirstatefile(self):
1728 fp, mode = txnutil.trypending(
1728 fp, mode = txnutil.trypending(
1729 self._root, self._opener, self._filename
1729 self._root, self._opener, self._filename
1730 )
1730 )
1731 if self._pendingmode is not None and self._pendingmode != mode:
1731 if self._pendingmode is not None and self._pendingmode != mode:
1732 fp.close()
1732 fp.close()
1733 raise error.Abort(
1733 raise error.Abort(
1734 _(b'working directory state may be changed parallelly')
1734 _(b'working directory state may be changed parallelly')
1735 )
1735 )
1736 self._pendingmode = mode
1736 self._pendingmode = mode
1737 return fp
1737 return fp
1738
1738
1739 def setparents(self, p1, p2):
1739 def setparents(self, p1, p2):
1740 self._rustmap.setparents(p1, p2)
1740 self._rustmap.setparents(p1, p2)
1741 self._parents = (p1, p2)
1741 self._parents = (p1, p2)
1742 self._dirtyparents = True
1742 self._dirtyparents = True
1743
1743
1744 def parents(self):
1744 def parents(self):
1745 if not self._parents:
1745 if not self._parents:
1746 try:
1746 try:
1747 fp = self._opendirstatefile()
1747 fp = self._opendirstatefile()
1748 st = fp.read(40)
1748 st = fp.read(40)
1749 fp.close()
1749 fp.close()
1750 except IOError as err:
1750 except IOError as err:
1751 if err.errno != errno.ENOENT:
1751 if err.errno != errno.ENOENT:
1752 raise
1752 raise
1753 # File doesn't exist, so the current state is empty
1753 # File doesn't exist, so the current state is empty
1754 st = b''
1754 st = b''
1755
1755
1756 try:
1756 try:
1757 self._parents = self._rustmap.parents(st)
1757 self._parents = self._rustmap.parents(st)
1758 except ValueError:
1758 except ValueError:
1759 raise error.Abort(
1759 raise error.Abort(
1760 _(b'working directory state appears damaged!')
1760 _(b'working directory state appears damaged!')
1761 )
1761 )
1762
1762
1763 return self._parents
1763 return self._parents
1764
1764
1765 def read(self):
1765 def read(self):
1766 # ignore HG_PENDING because identity is used only for writing
1766 # ignore HG_PENDING because identity is used only for writing
1767 self.identity = util.filestat.frompath(
1767 self.identity = util.filestat.frompath(
1768 self._opener.join(self._filename)
1768 self._opener.join(self._filename)
1769 )
1769 )
1770
1770
1771 try:
1771 try:
1772 fp = self._opendirstatefile()
1772 fp = self._opendirstatefile()
1773 try:
1773 try:
1774 st = fp.read()
1774 st = fp.read()
1775 finally:
1775 finally:
1776 fp.close()
1776 fp.close()
1777 except IOError as err:
1777 except IOError as err:
1778 if err.errno != errno.ENOENT:
1778 if err.errno != errno.ENOENT:
1779 raise
1779 raise
1780 return
1780 return
1781 if not st:
1781 if not st:
1782 return
1782 return
1783
1783
1784 parse_dirstate = util.nogc(self._rustmap.read)
1784 parse_dirstate = util.nogc(self._rustmap.read)
1785 parents = parse_dirstate(st)
1785 parents = parse_dirstate(st)
1786 if parents and not self._dirtyparents:
1786 if parents and not self._dirtyparents:
1787 self.setparents(*parents)
1787 self.setparents(*parents)
1788
1788
1789 self.__contains__ = self._rustmap.__contains__
1789 self.__contains__ = self._rustmap.__contains__
1790 self.__getitem__ = self._rustmap.__getitem__
1790 self.__getitem__ = self._rustmap.__getitem__
1791 self.get = self._rustmap.get
1791 self.get = self._rustmap.get
1792
1792
1793 def write(self, st, now):
1793 def write(self, st, now):
1794 parents = self.parents()
1794 parents = self.parents()
1795 st.write(self._rustmap.write(parents[0], parents[1], now))
1795 st.write(self._rustmap.write(parents[0], parents[1], now))
1796 st.close()
1796 st.close()
1797 self._dirtyparents = False
1797 self._dirtyparents = False
1798
1798
1799 @propertycache
1799 @propertycache
1800 def filefoldmap(self):
1800 def filefoldmap(self):
1801 """Returns a dictionary mapping normalized case paths to their
1801 """Returns a dictionary mapping normalized case paths to their
1802 non-normalized versions.
1802 non-normalized versions.
1803 """
1803 """
1804 return self._rustmap.filefoldmapasdict()
1804 return self._rustmap.filefoldmapasdict()
1805
1805
1806 def hastrackeddir(self, d):
1806 def hastrackeddir(self, d):
1807 self._dirs # Trigger Python's propertycache
1807 self._dirs # Trigger Python's propertycache
1808 return self._rustmap.hastrackeddir(d)
1808 return self._rustmap.hastrackeddir(d)
1809
1809
1810 def hasdir(self, d):
1810 def hasdir(self, d):
1811 self._dirs # Trigger Python's propertycache
1811 self._dirs # Trigger Python's propertycache
1812 return self._rustmap.hasdir(d)
1812 return self._rustmap.hasdir(d)
1813
1813
1814 @propertycache
1814 @propertycache
1815 def _dirs(self):
1815 def _dirs(self):
1816 return self._rustmap.getdirs()
1816 return self._rustmap.getdirs()
1817
1817
1818 @propertycache
1818 @propertycache
1819 def _alldirs(self):
1819 def _alldirs(self):
1820 return self._rustmap.getalldirs()
1820 return self._rustmap.getalldirs()
1821
1821
1822 @propertycache
1822 @propertycache
1823 def identity(self):
1823 def identity(self):
1824 self._rustmap
1824 self._rustmap
1825 return self.identity
1825 return self.identity
1826
1826
1827 @property
1827 @property
1828 def nonnormalset(self):
1828 def nonnormalset(self):
1829 nonnorm, otherparents = self._rustmap.nonnormalentries()
1829 nonnorm, otherparents = self._rustmap.nonnormalentries()
1830 return nonnorm
1830 return nonnorm
1831
1831
1832 @property
1832 @property
1833 def otherparentset(self):
1833 def otherparentset(self):
1834 nonnorm, otherparents = self._rustmap.nonnormalentries()
1834 nonnorm, otherparents = self._rustmap.nonnormalentries()
1835 return otherparents
1835 return otherparents
1836
1836
1837 @propertycache
1837 @propertycache
1838 def dirfoldmap(self):
1838 def dirfoldmap(self):
1839 f = {}
1839 f = {}
1840 normcase = util.normcase
1840 normcase = util.normcase
1841 for name in self._dirs:
1841 for name in self._dirs:
1842 f[normcase(name)] = name
1842 f[normcase(name)] = name
1843 return f
1843 return f
@@ -1,578 +1,579 b''
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
1 # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories.
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 import gc
11 import gc
12 import os
12 import os
13 import time
13 import time
14
14
15 from ..i18n import _
15 from ..i18n import _
16
16
17 from .common import (
17 from .common import (
18 ErrorResponse,
18 ErrorResponse,
19 HTTP_SERVER_ERROR,
19 HTTP_SERVER_ERROR,
20 cspvalues,
20 cspvalues,
21 get_contact,
21 get_contact,
22 get_mtime,
22 get_mtime,
23 ismember,
23 ismember,
24 paritygen,
24 paritygen,
25 staticfile,
25 staticfile,
26 statusmessage,
26 statusmessage,
27 )
27 )
28
28
29 from .. import (
29 from .. import (
30 configitems,
30 configitems,
31 encoding,
31 encoding,
32 error,
32 error,
33 extensions,
33 extensions,
34 hg,
34 hg,
35 pathutil,
35 profiling,
36 profiling,
36 pycompat,
37 pycompat,
37 registrar,
38 registrar,
38 scmutil,
39 scmutil,
39 templater,
40 templater,
40 templateutil,
41 templateutil,
41 ui as uimod,
42 ui as uimod,
42 util,
43 util,
43 )
44 )
44
45
45 from . import (
46 from . import (
46 hgweb_mod,
47 hgweb_mod,
47 request as requestmod,
48 request as requestmod,
48 webutil,
49 webutil,
49 wsgicgi,
50 wsgicgi,
50 )
51 )
51 from ..utils import dateutil
52 from ..utils import dateutil
52
53
53
54
54 def cleannames(items):
55 def cleannames(items):
55 return [(util.pconvert(name).strip(b'/'), path) for name, path in items]
56 return [(util.pconvert(name).strip(b'/'), path) for name, path in items]
56
57
57
58
58 def findrepos(paths):
59 def findrepos(paths):
59 repos = []
60 repos = []
60 for prefix, root in cleannames(paths):
61 for prefix, root in cleannames(paths):
61 roothead, roottail = os.path.split(root)
62 roothead, roottail = os.path.split(root)
62 # "foo = /bar/*" or "foo = /bar/**" lets every repo /bar/N in or below
63 # "foo = /bar/*" or "foo = /bar/**" lets every repo /bar/N in or below
63 # /bar/ be served as as foo/N .
64 # /bar/ be served as as foo/N .
64 # '*' will not search inside dirs with .hg (except .hg/patches),
65 # '*' will not search inside dirs with .hg (except .hg/patches),
65 # '**' will search inside dirs with .hg (and thus also find subrepos).
66 # '**' will search inside dirs with .hg (and thus also find subrepos).
66 try:
67 try:
67 recurse = {b'*': False, b'**': True}[roottail]
68 recurse = {b'*': False, b'**': True}[roottail]
68 except KeyError:
69 except KeyError:
69 repos.append((prefix, root))
70 repos.append((prefix, root))
70 continue
71 continue
71 roothead = os.path.normpath(os.path.abspath(roothead))
72 roothead = os.path.normpath(os.path.abspath(roothead))
72 paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
73 paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse)
73 repos.extend(urlrepos(prefix, roothead, paths))
74 repos.extend(urlrepos(prefix, roothead, paths))
74 return repos
75 return repos
75
76
76
77
77 def urlrepos(prefix, roothead, paths):
78 def urlrepos(prefix, roothead, paths):
78 """yield url paths and filesystem paths from a list of repo paths
79 """yield url paths and filesystem paths from a list of repo paths
79
80
80 >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq]
81 >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq]
81 >>> conv(urlrepos(b'hg', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
82 >>> conv(urlrepos(b'hg', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
82 [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')]
83 [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')]
83 >>> conv(urlrepos(b'', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
84 >>> conv(urlrepos(b'', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt']))
84 [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')]
85 [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')]
85 """
86 """
86 for path in paths:
87 for path in paths:
87 path = os.path.normpath(path)
88 path = os.path.normpath(path)
88 yield (
89 yield (
89 prefix + b'/' + util.pconvert(path[len(roothead) :]).lstrip(b'/')
90 prefix + b'/' + util.pconvert(path[len(roothead) :]).lstrip(b'/')
90 ).strip(b'/'), path
91 ).strip(b'/'), path
91
92
92
93
93 def readallowed(ui, req):
94 def readallowed(ui, req):
94 """Check allow_read and deny_read config options of a repo's ui object
95 """Check allow_read and deny_read config options of a repo's ui object
95 to determine user permissions. By default, with neither option set (or
96 to determine user permissions. By default, with neither option set (or
96 both empty), allow all users to read the repo. There are two ways a
97 both empty), allow all users to read the repo. There are two ways a
97 user can be denied read access: (1) deny_read is not empty, and the
98 user can be denied read access: (1) deny_read is not empty, and the
98 user is unauthenticated or deny_read contains user (or *), and (2)
99 user is unauthenticated or deny_read contains user (or *), and (2)
99 allow_read is not empty and the user is not in allow_read. Return True
100 allow_read is not empty and the user is not in allow_read. Return True
100 if user is allowed to read the repo, else return False."""
101 if user is allowed to read the repo, else return False."""
101
102
102 user = req.remoteuser
103 user = req.remoteuser
103
104
104 deny_read = ui.configlist(b'web', b'deny_read', untrusted=True)
105 deny_read = ui.configlist(b'web', b'deny_read', untrusted=True)
105 if deny_read and (not user or ismember(ui, user, deny_read)):
106 if deny_read and (not user or ismember(ui, user, deny_read)):
106 return False
107 return False
107
108
108 allow_read = ui.configlist(b'web', b'allow_read', untrusted=True)
109 allow_read = ui.configlist(b'web', b'allow_read', untrusted=True)
109 # by default, allow reading if no allow_read option has been set
110 # by default, allow reading if no allow_read option has been set
110 if not allow_read or ismember(ui, user, allow_read):
111 if not allow_read or ismember(ui, user, allow_read):
111 return True
112 return True
112
113
113 return False
114 return False
114
115
115
116
116 def rawindexentries(ui, repos, req, subdir=b''):
117 def rawindexentries(ui, repos, req, subdir=b''):
117 descend = ui.configbool(b'web', b'descend')
118 descend = ui.configbool(b'web', b'descend')
118 collapse = ui.configbool(b'web', b'collapse')
119 collapse = ui.configbool(b'web', b'collapse')
119 seenrepos = set()
120 seenrepos = set()
120 seendirs = set()
121 seendirs = set()
121 for name, path in repos:
122 for name, path in repos:
122
123
123 if not name.startswith(subdir):
124 if not name.startswith(subdir):
124 continue
125 continue
125 name = name[len(subdir) :]
126 name = name[len(subdir) :]
126 directory = False
127 directory = False
127
128
128 if b'/' in name:
129 if b'/' in name:
129 if not descend:
130 if not descend:
130 continue
131 continue
131
132
132 nameparts = name.split(b'/')
133 nameparts = name.split(b'/')
133 rootname = nameparts[0]
134 rootname = nameparts[0]
134
135
135 if not collapse:
136 if not collapse:
136 pass
137 pass
137 elif rootname in seendirs:
138 elif rootname in seendirs:
138 continue
139 continue
139 elif rootname in seenrepos:
140 elif rootname in seenrepos:
140 pass
141 pass
141 else:
142 else:
142 directory = True
143 directory = True
143 name = rootname
144 name = rootname
144
145
145 # redefine the path to refer to the directory
146 # redefine the path to refer to the directory
146 discarded = b'/'.join(nameparts[1:])
147 discarded = b'/'.join(nameparts[1:])
147
148
148 # remove name parts plus accompanying slash
149 # remove name parts plus accompanying slash
149 path = path[: -len(discarded) - 1]
150 path = path[: -len(discarded) - 1]
150
151
151 try:
152 try:
152 hg.repository(ui, path)
153 hg.repository(ui, path)
153 directory = False
154 directory = False
154 except (IOError, error.RepoError):
155 except (IOError, error.RepoError):
155 pass
156 pass
156
157
157 parts = [
158 parts = [
158 req.apppath.strip(b'/'),
159 req.apppath.strip(b'/'),
159 subdir.strip(b'/'),
160 subdir.strip(b'/'),
160 name.strip(b'/'),
161 name.strip(b'/'),
161 ]
162 ]
162 url = b'/' + b'/'.join(p for p in parts if p) + b'/'
163 url = b'/' + b'/'.join(p for p in parts if p) + b'/'
163
164
164 # show either a directory entry or a repository
165 # show either a directory entry or a repository
165 if directory:
166 if directory:
166 # get the directory's time information
167 # get the directory's time information
167 try:
168 try:
168 d = (get_mtime(path), dateutil.makedate()[1])
169 d = (get_mtime(path), dateutil.makedate()[1])
169 except OSError:
170 except OSError:
170 continue
171 continue
171
172
172 # add '/' to the name to make it obvious that
173 # add '/' to the name to make it obvious that
173 # the entry is a directory, not a regular repository
174 # the entry is a directory, not a regular repository
174 row = {
175 row = {
175 b'contact': b"",
176 b'contact': b"",
176 b'contact_sort': b"",
177 b'contact_sort': b"",
177 b'name': name + b'/',
178 b'name': name + b'/',
178 b'name_sort': name,
179 b'name_sort': name,
179 b'url': url,
180 b'url': url,
180 b'description': b"",
181 b'description': b"",
181 b'description_sort': b"",
182 b'description_sort': b"",
182 b'lastchange': d,
183 b'lastchange': d,
183 b'lastchange_sort': d[1] - d[0],
184 b'lastchange_sort': d[1] - d[0],
184 b'archives': templateutil.mappinglist([]),
185 b'archives': templateutil.mappinglist([]),
185 b'isdirectory': True,
186 b'isdirectory': True,
186 b'labels': templateutil.hybridlist([], name=b'label'),
187 b'labels': templateutil.hybridlist([], name=b'label'),
187 }
188 }
188
189
189 seendirs.add(name)
190 seendirs.add(name)
190 yield row
191 yield row
191 continue
192 continue
192
193
193 u = ui.copy()
194 u = ui.copy()
194 try:
195 try:
195 u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
196 u.readconfig(os.path.join(path, b'.hg', b'hgrc'))
196 except Exception as e:
197 except Exception as e:
197 u.warn(_(b'error reading %s/.hg/hgrc: %s\n') % (path, e))
198 u.warn(_(b'error reading %s/.hg/hgrc: %s\n') % (path, e))
198 continue
199 continue
199
200
200 def get(section, name, default=uimod._unset):
201 def get(section, name, default=uimod._unset):
201 return u.config(section, name, default, untrusted=True)
202 return u.config(section, name, default, untrusted=True)
202
203
203 if u.configbool(b"web", b"hidden", untrusted=True):
204 if u.configbool(b"web", b"hidden", untrusted=True):
204 continue
205 continue
205
206
206 if not readallowed(u, req):
207 if not readallowed(u, req):
207 continue
208 continue
208
209
209 # update time with local timezone
210 # update time with local timezone
210 try:
211 try:
211 r = hg.repository(ui, path)
212 r = hg.repository(ui, path)
212 except IOError:
213 except IOError:
213 u.warn(_(b'error accessing repository at %s\n') % path)
214 u.warn(_(b'error accessing repository at %s\n') % path)
214 continue
215 continue
215 except error.RepoError:
216 except error.RepoError:
216 u.warn(_(b'error accessing repository at %s\n') % path)
217 u.warn(_(b'error accessing repository at %s\n') % path)
217 continue
218 continue
218 try:
219 try:
219 d = (get_mtime(r.spath), dateutil.makedate()[1])
220 d = (get_mtime(r.spath), dateutil.makedate()[1])
220 except OSError:
221 except OSError:
221 continue
222 continue
222
223
223 contact = get_contact(get)
224 contact = get_contact(get)
224 description = get(b"web", b"description")
225 description = get(b"web", b"description")
225 seenrepos.add(name)
226 seenrepos.add(name)
226 name = get(b"web", b"name", name)
227 name = get(b"web", b"name", name)
227 labels = u.configlist(b'web', b'labels', untrusted=True)
228 labels = u.configlist(b'web', b'labels', untrusted=True)
228 row = {
229 row = {
229 b'contact': contact or b"unknown",
230 b'contact': contact or b"unknown",
230 b'contact_sort': contact.upper() or b"unknown",
231 b'contact_sort': contact.upper() or b"unknown",
231 b'name': name,
232 b'name': name,
232 b'name_sort': name,
233 b'name_sort': name,
233 b'url': url,
234 b'url': url,
234 b'description': description or b"unknown",
235 b'description': description or b"unknown",
235 b'description_sort': description.upper() or b"unknown",
236 b'description_sort': description.upper() or b"unknown",
236 b'lastchange': d,
237 b'lastchange': d,
237 b'lastchange_sort': d[1] - d[0],
238 b'lastchange_sort': d[1] - d[0],
238 b'archives': webutil.archivelist(u, b"tip", url),
239 b'archives': webutil.archivelist(u, b"tip", url),
239 b'isdirectory': None,
240 b'isdirectory': None,
240 b'labels': templateutil.hybridlist(labels, name=b'label'),
241 b'labels': templateutil.hybridlist(labels, name=b'label'),
241 }
242 }
242
243
243 yield row
244 yield row
244
245
245
246
246 def _indexentriesgen(
247 def _indexentriesgen(
247 context, ui, repos, req, stripecount, sortcolumn, descending, subdir
248 context, ui, repos, req, stripecount, sortcolumn, descending, subdir
248 ):
249 ):
249 rows = rawindexentries(ui, repos, req, subdir=subdir)
250 rows = rawindexentries(ui, repos, req, subdir=subdir)
250
251
251 sortdefault = None, False
252 sortdefault = None, False
252
253
253 if sortcolumn and sortdefault != (sortcolumn, descending):
254 if sortcolumn and sortdefault != (sortcolumn, descending):
254 sortkey = b'%s_sort' % sortcolumn
255 sortkey = b'%s_sort' % sortcolumn
255 rows = sorted(rows, key=lambda x: x[sortkey], reverse=descending)
256 rows = sorted(rows, key=lambda x: x[sortkey], reverse=descending)
256
257
257 for row, parity in zip(rows, paritygen(stripecount)):
258 for row, parity in zip(rows, paritygen(stripecount)):
258 row[b'parity'] = parity
259 row[b'parity'] = parity
259 yield row
260 yield row
260
261
261
262
262 def indexentries(
263 def indexentries(
263 ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b''
264 ui, repos, req, stripecount, sortcolumn=b'', descending=False, subdir=b''
264 ):
265 ):
265 args = (ui, repos, req, stripecount, sortcolumn, descending, subdir)
266 args = (ui, repos, req, stripecount, sortcolumn, descending, subdir)
266 return templateutil.mappinggenerator(_indexentriesgen, args=args)
267 return templateutil.mappinggenerator(_indexentriesgen, args=args)
267
268
268
269
269 class hgwebdir(object):
270 class hgwebdir(object):
270 """HTTP server for multiple repositories.
271 """HTTP server for multiple repositories.
271
272
272 Given a configuration, different repositories will be served depending
273 Given a configuration, different repositories will be served depending
273 on the request path.
274 on the request path.
274
275
275 Instances are typically used as WSGI applications.
276 Instances are typically used as WSGI applications.
276 """
277 """
277
278
278 def __init__(self, conf, baseui=None):
279 def __init__(self, conf, baseui=None):
279 self.conf = conf
280 self.conf = conf
280 self.baseui = baseui
281 self.baseui = baseui
281 self.ui = None
282 self.ui = None
282 self.lastrefresh = 0
283 self.lastrefresh = 0
283 self.motd = None
284 self.motd = None
284 self.refresh()
285 self.refresh()
285 if not baseui:
286 if not baseui:
286 # set up environment for new ui
287 # set up environment for new ui
287 extensions.loadall(self.ui)
288 extensions.loadall(self.ui)
288 extensions.populateui(self.ui)
289 extensions.populateui(self.ui)
289
290
290 def refresh(self):
291 def refresh(self):
291 if self.ui:
292 if self.ui:
292 refreshinterval = self.ui.configint(b'web', b'refreshinterval')
293 refreshinterval = self.ui.configint(b'web', b'refreshinterval')
293 else:
294 else:
294 item = configitems.coreitems[b'web'][b'refreshinterval']
295 item = configitems.coreitems[b'web'][b'refreshinterval']
295 refreshinterval = item.default
296 refreshinterval = item.default
296
297
297 # refreshinterval <= 0 means to always refresh.
298 # refreshinterval <= 0 means to always refresh.
298 if (
299 if (
299 refreshinterval > 0
300 refreshinterval > 0
300 and self.lastrefresh + refreshinterval > time.time()
301 and self.lastrefresh + refreshinterval > time.time()
301 ):
302 ):
302 return
303 return
303
304
304 if self.baseui:
305 if self.baseui:
305 u = self.baseui.copy()
306 u = self.baseui.copy()
306 else:
307 else:
307 u = uimod.ui.load()
308 u = uimod.ui.load()
308 u.setconfig(b'ui', b'report_untrusted', b'off', b'hgwebdir')
309 u.setconfig(b'ui', b'report_untrusted', b'off', b'hgwebdir')
309 u.setconfig(b'ui', b'nontty', b'true', b'hgwebdir')
310 u.setconfig(b'ui', b'nontty', b'true', b'hgwebdir')
310 # displaying bundling progress bar while serving feels wrong and may
311 # displaying bundling progress bar while serving feels wrong and may
311 # break some wsgi implementations.
312 # break some wsgi implementations.
312 u.setconfig(b'progress', b'disable', b'true', b'hgweb')
313 u.setconfig(b'progress', b'disable', b'true', b'hgweb')
313
314
314 if not isinstance(self.conf, (dict, list, tuple)):
315 if not isinstance(self.conf, (dict, list, tuple)):
315 map = {b'paths': b'hgweb-paths'}
316 map = {b'paths': b'hgweb-paths'}
316 if not os.path.exists(self.conf):
317 if not os.path.exists(self.conf):
317 raise error.Abort(_(b'config file %s not found!') % self.conf)
318 raise error.Abort(_(b'config file %s not found!') % self.conf)
318 u.readconfig(self.conf, remap=map, trust=True)
319 u.readconfig(self.conf, remap=map, trust=True)
319 paths = []
320 paths = []
320 for name, ignored in u.configitems(b'hgweb-paths'):
321 for name, ignored in u.configitems(b'hgweb-paths'):
321 for path in u.configlist(b'hgweb-paths', name):
322 for path in u.configlist(b'hgweb-paths', name):
322 paths.append((name, path))
323 paths.append((name, path))
323 elif isinstance(self.conf, (list, tuple)):
324 elif isinstance(self.conf, (list, tuple)):
324 paths = self.conf
325 paths = self.conf
325 elif isinstance(self.conf, dict):
326 elif isinstance(self.conf, dict):
326 paths = self.conf.items()
327 paths = self.conf.items()
327 extensions.populateui(u)
328 extensions.populateui(u)
328
329
329 repos = findrepos(paths)
330 repos = findrepos(paths)
330 for prefix, root in u.configitems(b'collections'):
331 for prefix, root in u.configitems(b'collections'):
331 prefix = util.pconvert(prefix)
332 prefix = util.pconvert(prefix)
332 for path in scmutil.walkrepos(root, followsym=True):
333 for path in scmutil.walkrepos(root, followsym=True):
333 repo = os.path.normpath(path)
334 repo = os.path.normpath(path)
334 name = util.pconvert(repo)
335 name = util.pconvert(repo)
335 if name.startswith(prefix):
336 if name.startswith(prefix):
336 name = name[len(prefix) :]
337 name = name[len(prefix) :]
337 repos.append((name.lstrip(b'/'), repo))
338 repos.append((name.lstrip(b'/'), repo))
338
339
339 self.repos = repos
340 self.repos = repos
340 self.ui = u
341 self.ui = u
341 encoding.encoding = self.ui.config(b'web', b'encoding')
342 encoding.encoding = self.ui.config(b'web', b'encoding')
342 self.style = self.ui.config(b'web', b'style')
343 self.style = self.ui.config(b'web', b'style')
343 self.templatepath = self.ui.config(
344 self.templatepath = self.ui.config(
344 b'web', b'templates', untrusted=False
345 b'web', b'templates', untrusted=False
345 )
346 )
346 self.stripecount = self.ui.config(b'web', b'stripes')
347 self.stripecount = self.ui.config(b'web', b'stripes')
347 if self.stripecount:
348 if self.stripecount:
348 self.stripecount = int(self.stripecount)
349 self.stripecount = int(self.stripecount)
349 prefix = self.ui.config(b'web', b'prefix')
350 prefix = self.ui.config(b'web', b'prefix')
350 if prefix.startswith(b'/'):
351 if prefix.startswith(b'/'):
351 prefix = prefix[1:]
352 prefix = prefix[1:]
352 if prefix.endswith(b'/'):
353 if prefix.endswith(b'/'):
353 prefix = prefix[:-1]
354 prefix = prefix[:-1]
354 self.prefix = prefix
355 self.prefix = prefix
355 self.lastrefresh = time.time()
356 self.lastrefresh = time.time()
356
357
357 def run(self):
358 def run(self):
358 if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
359 if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith(
359 b"CGI/1."
360 b"CGI/1."
360 ):
361 ):
361 raise RuntimeError(
362 raise RuntimeError(
362 b"This function is only intended to be "
363 b"This function is only intended to be "
363 b"called while running as a CGI script."
364 b"called while running as a CGI script."
364 )
365 )
365 wsgicgi.launch(self)
366 wsgicgi.launch(self)
366
367
367 def __call__(self, env, respond):
368 def __call__(self, env, respond):
368 baseurl = self.ui.config(b'web', b'baseurl')
369 baseurl = self.ui.config(b'web', b'baseurl')
369 req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
370 req = requestmod.parserequestfromenv(env, altbaseurl=baseurl)
370 res = requestmod.wsgiresponse(req, respond)
371 res = requestmod.wsgiresponse(req, respond)
371
372
372 return self.run_wsgi(req, res)
373 return self.run_wsgi(req, res)
373
374
374 def run_wsgi(self, req, res):
375 def run_wsgi(self, req, res):
375 profile = self.ui.configbool(b'profiling', b'enabled')
376 profile = self.ui.configbool(b'profiling', b'enabled')
376 with profiling.profile(self.ui, enabled=profile):
377 with profiling.profile(self.ui, enabled=profile):
377 try:
378 try:
378 for r in self._runwsgi(req, res):
379 for r in self._runwsgi(req, res):
379 yield r
380 yield r
380 finally:
381 finally:
381 # There are known cycles in localrepository that prevent
382 # There are known cycles in localrepository that prevent
382 # those objects (and tons of held references) from being
383 # those objects (and tons of held references) from being
383 # collected through normal refcounting. We mitigate those
384 # collected through normal refcounting. We mitigate those
384 # leaks by performing an explicit GC on every request.
385 # leaks by performing an explicit GC on every request.
385 # TODO remove this once leaks are fixed.
386 # TODO remove this once leaks are fixed.
386 # TODO only run this on requests that create localrepository
387 # TODO only run this on requests that create localrepository
387 # instances instead of every request.
388 # instances instead of every request.
388 gc.collect()
389 gc.collect()
389
390
390 def _runwsgi(self, req, res):
391 def _runwsgi(self, req, res):
391 try:
392 try:
392 self.refresh()
393 self.refresh()
393
394
394 csp, nonce = cspvalues(self.ui)
395 csp, nonce = cspvalues(self.ui)
395 if csp:
396 if csp:
396 res.headers[b'Content-Security-Policy'] = csp
397 res.headers[b'Content-Security-Policy'] = csp
397
398
398 virtual = req.dispatchpath.strip(b'/')
399 virtual = req.dispatchpath.strip(b'/')
399 tmpl = self.templater(req, nonce)
400 tmpl = self.templater(req, nonce)
400 ctype = tmpl.render(b'mimetype', {b'encoding': encoding.encoding})
401 ctype = tmpl.render(b'mimetype', {b'encoding': encoding.encoding})
401
402
402 # Global defaults. These can be overridden by any handler.
403 # Global defaults. These can be overridden by any handler.
403 res.status = b'200 Script output follows'
404 res.status = b'200 Script output follows'
404 res.headers[b'Content-Type'] = ctype
405 res.headers[b'Content-Type'] = ctype
405
406
406 # a static file
407 # a static file
407 if virtual.startswith(b'static/') or b'static' in req.qsparams:
408 if virtual.startswith(b'static/') or b'static' in req.qsparams:
408 if virtual.startswith(b'static/'):
409 if virtual.startswith(b'static/'):
409 fname = virtual[7:]
410 fname = virtual[7:]
410 else:
411 else:
411 fname = req.qsparams[b'static']
412 fname = req.qsparams[b'static']
412 static = self.ui.config(b"web", b"static", untrusted=False)
413 static = self.ui.config(b"web", b"static", untrusted=False)
413 if not static:
414 if not static:
414 tp = self.templatepath or templater.templatepaths()
415 tp = self.templatepath or templater.templatepaths()
415 if isinstance(tp, str):
416 if isinstance(tp, str):
416 tp = [tp]
417 tp = [tp]
417 static = [os.path.join(p, b'static') for p in tp]
418 static = [os.path.join(p, b'static') for p in tp]
418
419
419 staticfile(static, fname, res)
420 staticfile(static, fname, res)
420 return res.sendresponse()
421 return res.sendresponse()
421
422
422 # top-level index
423 # top-level index
423
424
424 repos = dict(self.repos)
425 repos = dict(self.repos)
425
426
426 if (not virtual or virtual == b'index') and virtual not in repos:
427 if (not virtual or virtual == b'index') and virtual not in repos:
427 return self.makeindex(req, res, tmpl)
428 return self.makeindex(req, res, tmpl)
428
429
429 # nested indexes and hgwebs
430 # nested indexes and hgwebs
430
431
431 if virtual.endswith(b'/index') and virtual not in repos:
432 if virtual.endswith(b'/index') and virtual not in repos:
432 subdir = virtual[: -len(b'index')]
433 subdir = virtual[: -len(b'index')]
433 if any(r.startswith(subdir) for r in repos):
434 if any(r.startswith(subdir) for r in repos):
434 return self.makeindex(req, res, tmpl, subdir)
435 return self.makeindex(req, res, tmpl, subdir)
435
436
436 def _virtualdirs():
437 def _virtualdirs():
437 # Check the full virtual path, and each parent
438 # Check the full virtual path, and each parent
438 yield virtual
439 yield virtual
439 for p in util.finddirs(virtual):
440 for p in pathutil.finddirs(virtual):
440 yield p
441 yield p
441
442
442 for virtualrepo in _virtualdirs():
443 for virtualrepo in _virtualdirs():
443 real = repos.get(virtualrepo)
444 real = repos.get(virtualrepo)
444 if real:
445 if real:
445 # Re-parse the WSGI environment to take into account our
446 # Re-parse the WSGI environment to take into account our
446 # repository path component.
447 # repository path component.
447 uenv = req.rawenv
448 uenv = req.rawenv
448 if pycompat.ispy3:
449 if pycompat.ispy3:
449 uenv = {
450 uenv = {
450 k.decode('latin1'): v
451 k.decode('latin1'): v
451 for k, v in pycompat.iteritems(uenv)
452 for k, v in pycompat.iteritems(uenv)
452 }
453 }
453 req = requestmod.parserequestfromenv(
454 req = requestmod.parserequestfromenv(
454 uenv,
455 uenv,
455 reponame=virtualrepo,
456 reponame=virtualrepo,
456 altbaseurl=self.ui.config(b'web', b'baseurl'),
457 altbaseurl=self.ui.config(b'web', b'baseurl'),
457 # Reuse wrapped body file object otherwise state
458 # Reuse wrapped body file object otherwise state
458 # tracking can get confused.
459 # tracking can get confused.
459 bodyfh=req.bodyfh,
460 bodyfh=req.bodyfh,
460 )
461 )
461 try:
462 try:
462 # ensure caller gets private copy of ui
463 # ensure caller gets private copy of ui
463 repo = hg.repository(self.ui.copy(), real)
464 repo = hg.repository(self.ui.copy(), real)
464 return hgweb_mod.hgweb(repo).run_wsgi(req, res)
465 return hgweb_mod.hgweb(repo).run_wsgi(req, res)
465 except IOError as inst:
466 except IOError as inst:
466 msg = encoding.strtolocal(inst.strerror)
467 msg = encoding.strtolocal(inst.strerror)
467 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
468 raise ErrorResponse(HTTP_SERVER_ERROR, msg)
468 except error.RepoError as inst:
469 except error.RepoError as inst:
469 raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst))
470 raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst))
470
471
471 # browse subdirectories
472 # browse subdirectories
472 subdir = virtual + b'/'
473 subdir = virtual + b'/'
473 if [r for r in repos if r.startswith(subdir)]:
474 if [r for r in repos if r.startswith(subdir)]:
474 return self.makeindex(req, res, tmpl, subdir)
475 return self.makeindex(req, res, tmpl, subdir)
475
476
476 # prefixes not found
477 # prefixes not found
477 res.status = b'404 Not Found'
478 res.status = b'404 Not Found'
478 res.setbodygen(tmpl.generate(b'notfound', {b'repo': virtual}))
479 res.setbodygen(tmpl.generate(b'notfound', {b'repo': virtual}))
479 return res.sendresponse()
480 return res.sendresponse()
480
481
481 except ErrorResponse as e:
482 except ErrorResponse as e:
482 res.status = statusmessage(e.code, pycompat.bytestr(e))
483 res.status = statusmessage(e.code, pycompat.bytestr(e))
483 res.setbodygen(
484 res.setbodygen(
484 tmpl.generate(b'error', {b'error': e.message or b''})
485 tmpl.generate(b'error', {b'error': e.message or b''})
485 )
486 )
486 return res.sendresponse()
487 return res.sendresponse()
487 finally:
488 finally:
488 tmpl = None
489 tmpl = None
489
490
490 def makeindex(self, req, res, tmpl, subdir=b""):
491 def makeindex(self, req, res, tmpl, subdir=b""):
491 self.refresh()
492 self.refresh()
492 sortable = [b"name", b"description", b"contact", b"lastchange"]
493 sortable = [b"name", b"description", b"contact", b"lastchange"]
493 sortcolumn, descending = None, False
494 sortcolumn, descending = None, False
494 if b'sort' in req.qsparams:
495 if b'sort' in req.qsparams:
495 sortcolumn = req.qsparams[b'sort']
496 sortcolumn = req.qsparams[b'sort']
496 descending = sortcolumn.startswith(b'-')
497 descending = sortcolumn.startswith(b'-')
497 if descending:
498 if descending:
498 sortcolumn = sortcolumn[1:]
499 sortcolumn = sortcolumn[1:]
499 if sortcolumn not in sortable:
500 if sortcolumn not in sortable:
500 sortcolumn = b""
501 sortcolumn = b""
501
502
502 sort = [
503 sort = [
503 (
504 (
504 b"sort_%s" % column,
505 b"sort_%s" % column,
505 b"%s%s"
506 b"%s%s"
506 % (
507 % (
507 (not descending and column == sortcolumn) and b"-" or b"",
508 (not descending and column == sortcolumn) and b"-" or b"",
508 column,
509 column,
509 ),
510 ),
510 )
511 )
511 for column in sortable
512 for column in sortable
512 ]
513 ]
513
514
514 self.refresh()
515 self.refresh()
515
516
516 entries = indexentries(
517 entries = indexentries(
517 self.ui,
518 self.ui,
518 self.repos,
519 self.repos,
519 req,
520 req,
520 self.stripecount,
521 self.stripecount,
521 sortcolumn=sortcolumn,
522 sortcolumn=sortcolumn,
522 descending=descending,
523 descending=descending,
523 subdir=subdir,
524 subdir=subdir,
524 )
525 )
525
526
526 mapping = {
527 mapping = {
527 b'entries': entries,
528 b'entries': entries,
528 b'subdir': subdir,
529 b'subdir': subdir,
529 b'pathdef': hgweb_mod.makebreadcrumb(b'/' + subdir, self.prefix),
530 b'pathdef': hgweb_mod.makebreadcrumb(b'/' + subdir, self.prefix),
530 b'sortcolumn': sortcolumn,
531 b'sortcolumn': sortcolumn,
531 b'descending': descending,
532 b'descending': descending,
532 }
533 }
533 mapping.update(sort)
534 mapping.update(sort)
534 res.setbodygen(tmpl.generate(b'index', mapping))
535 res.setbodygen(tmpl.generate(b'index', mapping))
535 return res.sendresponse()
536 return res.sendresponse()
536
537
537 def templater(self, req, nonce):
538 def templater(self, req, nonce):
538 def config(section, name, default=uimod._unset, untrusted=True):
539 def config(section, name, default=uimod._unset, untrusted=True):
539 return self.ui.config(section, name, default, untrusted)
540 return self.ui.config(section, name, default, untrusted)
540
541
541 vars = {}
542 vars = {}
542 styles, (style, mapfile) = hgweb_mod.getstyle(
543 styles, (style, mapfile) = hgweb_mod.getstyle(
543 req, config, self.templatepath
544 req, config, self.templatepath
544 )
545 )
545 if style == styles[0]:
546 if style == styles[0]:
546 vars[b'style'] = style
547 vars[b'style'] = style
547
548
548 sessionvars = webutil.sessionvars(vars, b'?')
549 sessionvars = webutil.sessionvars(vars, b'?')
549 logourl = config(b'web', b'logourl')
550 logourl = config(b'web', b'logourl')
550 logoimg = config(b'web', b'logoimg')
551 logoimg = config(b'web', b'logoimg')
551 staticurl = (
552 staticurl = (
552 config(b'web', b'staticurl')
553 config(b'web', b'staticurl')
553 or req.apppath.rstrip(b'/') + b'/static/'
554 or req.apppath.rstrip(b'/') + b'/static/'
554 )
555 )
555 if not staticurl.endswith(b'/'):
556 if not staticurl.endswith(b'/'):
556 staticurl += b'/'
557 staticurl += b'/'
557
558
558 defaults = {
559 defaults = {
559 b"encoding": encoding.encoding,
560 b"encoding": encoding.encoding,
560 b"url": req.apppath + b'/',
561 b"url": req.apppath + b'/',
561 b"logourl": logourl,
562 b"logourl": logourl,
562 b"logoimg": logoimg,
563 b"logoimg": logoimg,
563 b"staticurl": staticurl,
564 b"staticurl": staticurl,
564 b"sessionvars": sessionvars,
565 b"sessionvars": sessionvars,
565 b"style": style,
566 b"style": style,
566 b"nonce": nonce,
567 b"nonce": nonce,
567 }
568 }
568 templatekeyword = registrar.templatekeyword(defaults)
569 templatekeyword = registrar.templatekeyword(defaults)
569
570
570 @templatekeyword(b'motd', requires=())
571 @templatekeyword(b'motd', requires=())
571 def motd(context, mapping):
572 def motd(context, mapping):
572 if self.motd is not None:
573 if self.motd is not None:
573 yield self.motd
574 yield self.motd
574 else:
575 else:
575 yield config(b'web', b'motd')
576 yield config(b'web', b'motd')
576
577
577 tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
578 tmpl = templater.templater.frommapfile(mapfile, defaults=defaults)
578 return tmpl
579 return tmpl
@@ -1,1621 +1,1625 b''
1 # match.py - filename matching
1 # match.py - filename matching
2 #
2 #
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
3 # Copyright 2008, 2009 Matt Mackall <mpm@selenic.com> and others
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import, print_function
8 from __future__ import absolute_import, print_function
9
9
10 import copy
10 import copy
11 import itertools
11 import itertools
12 import os
12 import os
13 import re
13 import re
14
14
15 from .i18n import _
15 from .i18n import _
16 from .pycompat import open
16 from .pycompat import open
17 from . import (
17 from . import (
18 encoding,
18 encoding,
19 error,
19 error,
20 pathutil,
20 pathutil,
21 pathutil,
22 policy,
21 policy,
23 pycompat,
22 pycompat,
24 util,
23 util,
25 )
24 )
26 from .utils import stringutil
25 from .utils import stringutil
27
26
28 rustmod = policy.importrust('filepatterns')
27 rustmod = policy.importrust('filepatterns')
29
28
30 allpatternkinds = (
29 allpatternkinds = (
31 b're',
30 b're',
32 b'glob',
31 b'glob',
33 b'path',
32 b'path',
34 b'relglob',
33 b'relglob',
35 b'relpath',
34 b'relpath',
36 b'relre',
35 b'relre',
37 b'rootglob',
36 b'rootglob',
38 b'listfile',
37 b'listfile',
39 b'listfile0',
38 b'listfile0',
40 b'set',
39 b'set',
41 b'include',
40 b'include',
42 b'subinclude',
41 b'subinclude',
43 b'rootfilesin',
42 b'rootfilesin',
44 )
43 )
45 cwdrelativepatternkinds = (b'relpath', b'glob')
44 cwdrelativepatternkinds = (b'relpath', b'glob')
46
45
47 propertycache = util.propertycache
46 propertycache = util.propertycache
48
47
49
48
50 def _rematcher(regex):
49 def _rematcher(regex):
51 '''compile the regexp with the best available regexp engine and return a
50 '''compile the regexp with the best available regexp engine and return a
52 matcher function'''
51 matcher function'''
53 m = util.re.compile(regex)
52 m = util.re.compile(regex)
54 try:
53 try:
55 # slightly faster, provided by facebook's re2 bindings
54 # slightly faster, provided by facebook's re2 bindings
56 return m.test_match
55 return m.test_match
57 except AttributeError:
56 except AttributeError:
58 return m.match
57 return m.match
59
58
60
59
61 def _expandsets(kindpats, ctx=None, listsubrepos=False, badfn=None):
60 def _expandsets(kindpats, ctx=None, listsubrepos=False, badfn=None):
62 '''Returns the kindpats list with the 'set' patterns expanded to matchers'''
61 '''Returns the kindpats list with the 'set' patterns expanded to matchers'''
63 matchers = []
62 matchers = []
64 other = []
63 other = []
65
64
66 for kind, pat, source in kindpats:
65 for kind, pat, source in kindpats:
67 if kind == b'set':
66 if kind == b'set':
68 if ctx is None:
67 if ctx is None:
69 raise error.ProgrammingError(
68 raise error.ProgrammingError(
70 b"fileset expression with no context"
69 b"fileset expression with no context"
71 )
70 )
72 matchers.append(ctx.matchfileset(pat, badfn=badfn))
71 matchers.append(ctx.matchfileset(pat, badfn=badfn))
73
72
74 if listsubrepos:
73 if listsubrepos:
75 for subpath in ctx.substate:
74 for subpath in ctx.substate:
76 sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn)
75 sm = ctx.sub(subpath).matchfileset(pat, badfn=badfn)
77 pm = prefixdirmatcher(subpath, sm, badfn=badfn)
76 pm = prefixdirmatcher(subpath, sm, badfn=badfn)
78 matchers.append(pm)
77 matchers.append(pm)
79
78
80 continue
79 continue
81 other.append((kind, pat, source))
80 other.append((kind, pat, source))
82 return matchers, other
81 return matchers, other
83
82
84
83
85 def _expandsubinclude(kindpats, root):
84 def _expandsubinclude(kindpats, root):
86 '''Returns the list of subinclude matcher args and the kindpats without the
85 '''Returns the list of subinclude matcher args and the kindpats without the
87 subincludes in it.'''
86 subincludes in it.'''
88 relmatchers = []
87 relmatchers = []
89 other = []
88 other = []
90
89
91 for kind, pat, source in kindpats:
90 for kind, pat, source in kindpats:
92 if kind == b'subinclude':
91 if kind == b'subinclude':
93 sourceroot = pathutil.dirname(util.normpath(source))
92 sourceroot = pathutil.dirname(util.normpath(source))
94 pat = util.pconvert(pat)
93 pat = util.pconvert(pat)
95 path = pathutil.join(sourceroot, pat)
94 path = pathutil.join(sourceroot, pat)
96
95
97 newroot = pathutil.dirname(path)
96 newroot = pathutil.dirname(path)
98 matcherargs = (newroot, b'', [], [b'include:%s' % path])
97 matcherargs = (newroot, b'', [], [b'include:%s' % path])
99
98
100 prefix = pathutil.canonpath(root, root, newroot)
99 prefix = pathutil.canonpath(root, root, newroot)
101 if prefix:
100 if prefix:
102 prefix += b'/'
101 prefix += b'/'
103 relmatchers.append((prefix, matcherargs))
102 relmatchers.append((prefix, matcherargs))
104 else:
103 else:
105 other.append((kind, pat, source))
104 other.append((kind, pat, source))
106
105
107 return relmatchers, other
106 return relmatchers, other
108
107
109
108
110 def _kindpatsalwaysmatch(kindpats):
109 def _kindpatsalwaysmatch(kindpats):
111 """"Checks whether the kindspats match everything, as e.g.
110 """"Checks whether the kindspats match everything, as e.g.
112 'relpath:.' does.
111 'relpath:.' does.
113 """
112 """
114 for kind, pat, source in kindpats:
113 for kind, pat, source in kindpats:
115 if pat != b'' or kind not in [b'relpath', b'glob']:
114 if pat != b'' or kind not in [b'relpath', b'glob']:
116 return False
115 return False
117 return True
116 return True
118
117
119
118
120 def _buildkindpatsmatcher(
119 def _buildkindpatsmatcher(
121 matchercls, root, kindpats, ctx=None, listsubrepos=False, badfn=None
120 matchercls, root, kindpats, ctx=None, listsubrepos=False, badfn=None
122 ):
121 ):
123 matchers = []
122 matchers = []
124 fms, kindpats = _expandsets(
123 fms, kindpats = _expandsets(
125 kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn
124 kindpats, ctx=ctx, listsubrepos=listsubrepos, badfn=badfn
126 )
125 )
127 if kindpats:
126 if kindpats:
128 m = matchercls(root, kindpats, badfn=badfn)
127 m = matchercls(root, kindpats, badfn=badfn)
129 matchers.append(m)
128 matchers.append(m)
130 if fms:
129 if fms:
131 matchers.extend(fms)
130 matchers.extend(fms)
132 if not matchers:
131 if not matchers:
133 return nevermatcher(badfn=badfn)
132 return nevermatcher(badfn=badfn)
134 if len(matchers) == 1:
133 if len(matchers) == 1:
135 return matchers[0]
134 return matchers[0]
136 return unionmatcher(matchers)
135 return unionmatcher(matchers)
137
136
138
137
139 def match(
138 def match(
140 root,
139 root,
141 cwd,
140 cwd,
142 patterns=None,
141 patterns=None,
143 include=None,
142 include=None,
144 exclude=None,
143 exclude=None,
145 default=b'glob',
144 default=b'glob',
146 auditor=None,
145 auditor=None,
147 ctx=None,
146 ctx=None,
148 listsubrepos=False,
147 listsubrepos=False,
149 warn=None,
148 warn=None,
150 badfn=None,
149 badfn=None,
151 icasefs=False,
150 icasefs=False,
152 ):
151 ):
153 r"""build an object to match a set of file patterns
152 r"""build an object to match a set of file patterns
154
153
155 arguments:
154 arguments:
156 root - the canonical root of the tree you're matching against
155 root - the canonical root of the tree you're matching against
157 cwd - the current working directory, if relevant
156 cwd - the current working directory, if relevant
158 patterns - patterns to find
157 patterns - patterns to find
159 include - patterns to include (unless they are excluded)
158 include - patterns to include (unless they are excluded)
160 exclude - patterns to exclude (even if they are included)
159 exclude - patterns to exclude (even if they are included)
161 default - if a pattern in patterns has no explicit type, assume this one
160 default - if a pattern in patterns has no explicit type, assume this one
162 auditor - optional path auditor
161 auditor - optional path auditor
163 ctx - optional changecontext
162 ctx - optional changecontext
164 listsubrepos - if True, recurse into subrepositories
163 listsubrepos - if True, recurse into subrepositories
165 warn - optional function used for printing warnings
164 warn - optional function used for printing warnings
166 badfn - optional bad() callback for this matcher instead of the default
165 badfn - optional bad() callback for this matcher instead of the default
167 icasefs - make a matcher for wdir on case insensitive filesystems, which
166 icasefs - make a matcher for wdir on case insensitive filesystems, which
168 normalizes the given patterns to the case in the filesystem
167 normalizes the given patterns to the case in the filesystem
169
168
170 a pattern is one of:
169 a pattern is one of:
171 'glob:<glob>' - a glob relative to cwd
170 'glob:<glob>' - a glob relative to cwd
172 're:<regexp>' - a regular expression
171 're:<regexp>' - a regular expression
173 'path:<path>' - a path relative to repository root, which is matched
172 'path:<path>' - a path relative to repository root, which is matched
174 recursively
173 recursively
175 'rootfilesin:<path>' - a path relative to repository root, which is
174 'rootfilesin:<path>' - a path relative to repository root, which is
176 matched non-recursively (will not match subdirectories)
175 matched non-recursively (will not match subdirectories)
177 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
176 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
178 'relpath:<path>' - a path relative to cwd
177 'relpath:<path>' - a path relative to cwd
179 'relre:<regexp>' - a regexp that needn't match the start of a name
178 'relre:<regexp>' - a regexp that needn't match the start of a name
180 'set:<fileset>' - a fileset expression
179 'set:<fileset>' - a fileset expression
181 'include:<path>' - a file of patterns to read and include
180 'include:<path>' - a file of patterns to read and include
182 'subinclude:<path>' - a file of patterns to match against files under
181 'subinclude:<path>' - a file of patterns to match against files under
183 the same directory
182 the same directory
184 '<something>' - a pattern of the specified default type
183 '<something>' - a pattern of the specified default type
185
184
186 Usually a patternmatcher is returned:
185 Usually a patternmatcher is returned:
187 >>> match(b'foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
186 >>> match(b'foo', b'.', [b're:.*\.c$', b'path:foo/a', b'*.py'])
188 <patternmatcher patterns='.*\\.c$|foo/a(?:/|$)|[^/]*\\.py$'>
187 <patternmatcher patterns='.*\\.c$|foo/a(?:/|$)|[^/]*\\.py$'>
189
188
190 Combining 'patterns' with 'include' (resp. 'exclude') gives an
189 Combining 'patterns' with 'include' (resp. 'exclude') gives an
191 intersectionmatcher (resp. a differencematcher):
190 intersectionmatcher (resp. a differencematcher):
192 >>> type(match(b'foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
191 >>> type(match(b'foo', b'.', [b're:.*\.c$'], include=[b'path:lib']))
193 <class 'mercurial.match.intersectionmatcher'>
192 <class 'mercurial.match.intersectionmatcher'>
194 >>> type(match(b'foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
193 >>> type(match(b'foo', b'.', [b're:.*\.c$'], exclude=[b'path:build']))
195 <class 'mercurial.match.differencematcher'>
194 <class 'mercurial.match.differencematcher'>
196
195
197 Notice that, if 'patterns' is empty, an alwaysmatcher is returned:
196 Notice that, if 'patterns' is empty, an alwaysmatcher is returned:
198 >>> match(b'foo', b'.', [])
197 >>> match(b'foo', b'.', [])
199 <alwaysmatcher>
198 <alwaysmatcher>
200
199
201 The 'default' argument determines which kind of pattern is assumed if a
200 The 'default' argument determines which kind of pattern is assumed if a
202 pattern has no prefix:
201 pattern has no prefix:
203 >>> match(b'foo', b'.', [b'.*\.c$'], default=b're')
202 >>> match(b'foo', b'.', [b'.*\.c$'], default=b're')
204 <patternmatcher patterns='.*\\.c$'>
203 <patternmatcher patterns='.*\\.c$'>
205 >>> match(b'foo', b'.', [b'main.py'], default=b'relpath')
204 >>> match(b'foo', b'.', [b'main.py'], default=b'relpath')
206 <patternmatcher patterns='main\\.py(?:/|$)'>
205 <patternmatcher patterns='main\\.py(?:/|$)'>
207 >>> match(b'foo', b'.', [b'main.py'], default=b're')
206 >>> match(b'foo', b'.', [b'main.py'], default=b're')
208 <patternmatcher patterns='main.py'>
207 <patternmatcher patterns='main.py'>
209
208
210 The primary use of matchers is to check whether a value (usually a file
209 The primary use of matchers is to check whether a value (usually a file
211 name) matches againset one of the patterns given at initialization. There
210 name) matches againset one of the patterns given at initialization. There
212 are two ways of doing this check.
211 are two ways of doing this check.
213
212
214 >>> m = match(b'foo', b'', [b're:.*\.c$', b'relpath:a'])
213 >>> m = match(b'foo', b'', [b're:.*\.c$', b'relpath:a'])
215
214
216 1. Calling the matcher with a file name returns True if any pattern
215 1. Calling the matcher with a file name returns True if any pattern
217 matches that file name:
216 matches that file name:
218 >>> m(b'a')
217 >>> m(b'a')
219 True
218 True
220 >>> m(b'main.c')
219 >>> m(b'main.c')
221 True
220 True
222 >>> m(b'test.py')
221 >>> m(b'test.py')
223 False
222 False
224
223
225 2. Using the exact() method only returns True if the file name matches one
224 2. Using the exact() method only returns True if the file name matches one
226 of the exact patterns (i.e. not re: or glob: patterns):
225 of the exact patterns (i.e. not re: or glob: patterns):
227 >>> m.exact(b'a')
226 >>> m.exact(b'a')
228 True
227 True
229 >>> m.exact(b'main.c')
228 >>> m.exact(b'main.c')
230 False
229 False
231 """
230 """
232 normalize = _donormalize
231 normalize = _donormalize
233 if icasefs:
232 if icasefs:
234 dirstate = ctx.repo().dirstate
233 dirstate = ctx.repo().dirstate
235 dsnormalize = dirstate.normalize
234 dsnormalize = dirstate.normalize
236
235
237 def normalize(patterns, default, root, cwd, auditor, warn):
236 def normalize(patterns, default, root, cwd, auditor, warn):
238 kp = _donormalize(patterns, default, root, cwd, auditor, warn)
237 kp = _donormalize(patterns, default, root, cwd, auditor, warn)
239 kindpats = []
238 kindpats = []
240 for kind, pats, source in kp:
239 for kind, pats, source in kp:
241 if kind not in (b're', b'relre'): # regex can't be normalized
240 if kind not in (b're', b'relre'): # regex can't be normalized
242 p = pats
241 p = pats
243 pats = dsnormalize(pats)
242 pats = dsnormalize(pats)
244
243
245 # Preserve the original to handle a case only rename.
244 # Preserve the original to handle a case only rename.
246 if p != pats and p in dirstate:
245 if p != pats and p in dirstate:
247 kindpats.append((kind, p, source))
246 kindpats.append((kind, p, source))
248
247
249 kindpats.append((kind, pats, source))
248 kindpats.append((kind, pats, source))
250 return kindpats
249 return kindpats
251
250
252 if patterns:
251 if patterns:
253 kindpats = normalize(patterns, default, root, cwd, auditor, warn)
252 kindpats = normalize(patterns, default, root, cwd, auditor, warn)
254 if _kindpatsalwaysmatch(kindpats):
253 if _kindpatsalwaysmatch(kindpats):
255 m = alwaysmatcher(badfn)
254 m = alwaysmatcher(badfn)
256 else:
255 else:
257 m = _buildkindpatsmatcher(
256 m = _buildkindpatsmatcher(
258 patternmatcher,
257 patternmatcher,
259 root,
258 root,
260 kindpats,
259 kindpats,
261 ctx=ctx,
260 ctx=ctx,
262 listsubrepos=listsubrepos,
261 listsubrepos=listsubrepos,
263 badfn=badfn,
262 badfn=badfn,
264 )
263 )
265 else:
264 else:
266 # It's a little strange that no patterns means to match everything.
265 # It's a little strange that no patterns means to match everything.
267 # Consider changing this to match nothing (probably using nevermatcher).
266 # Consider changing this to match nothing (probably using nevermatcher).
268 m = alwaysmatcher(badfn)
267 m = alwaysmatcher(badfn)
269
268
270 if include:
269 if include:
271 kindpats = normalize(include, b'glob', root, cwd, auditor, warn)
270 kindpats = normalize(include, b'glob', root, cwd, auditor, warn)
272 im = _buildkindpatsmatcher(
271 im = _buildkindpatsmatcher(
273 includematcher,
272 includematcher,
274 root,
273 root,
275 kindpats,
274 kindpats,
276 ctx=ctx,
275 ctx=ctx,
277 listsubrepos=listsubrepos,
276 listsubrepos=listsubrepos,
278 badfn=None,
277 badfn=None,
279 )
278 )
280 m = intersectmatchers(m, im)
279 m = intersectmatchers(m, im)
281 if exclude:
280 if exclude:
282 kindpats = normalize(exclude, b'glob', root, cwd, auditor, warn)
281 kindpats = normalize(exclude, b'glob', root, cwd, auditor, warn)
283 em = _buildkindpatsmatcher(
282 em = _buildkindpatsmatcher(
284 includematcher,
283 includematcher,
285 root,
284 root,
286 kindpats,
285 kindpats,
287 ctx=ctx,
286 ctx=ctx,
288 listsubrepos=listsubrepos,
287 listsubrepos=listsubrepos,
289 badfn=None,
288 badfn=None,
290 )
289 )
291 m = differencematcher(m, em)
290 m = differencematcher(m, em)
292 return m
291 return m
293
292
294
293
295 def exact(files, badfn=None):
294 def exact(files, badfn=None):
296 return exactmatcher(files, badfn=badfn)
295 return exactmatcher(files, badfn=badfn)
297
296
298
297
299 def always(badfn=None):
298 def always(badfn=None):
300 return alwaysmatcher(badfn)
299 return alwaysmatcher(badfn)
301
300
302
301
303 def never(badfn=None):
302 def never(badfn=None):
304 return nevermatcher(badfn)
303 return nevermatcher(badfn)
305
304
306
305
307 def badmatch(match, badfn):
306 def badmatch(match, badfn):
308 """Make a copy of the given matcher, replacing its bad method with the given
307 """Make a copy of the given matcher, replacing its bad method with the given
309 one.
308 one.
310 """
309 """
311 m = copy.copy(match)
310 m = copy.copy(match)
312 m.bad = badfn
311 m.bad = badfn
313 return m
312 return m
314
313
315
314
316 def _donormalize(patterns, default, root, cwd, auditor=None, warn=None):
315 def _donormalize(patterns, default, root, cwd, auditor=None, warn=None):
317 '''Convert 'kind:pat' from the patterns list to tuples with kind and
316 '''Convert 'kind:pat' from the patterns list to tuples with kind and
318 normalized and rooted patterns and with listfiles expanded.'''
317 normalized and rooted patterns and with listfiles expanded.'''
319 kindpats = []
318 kindpats = []
320 for kind, pat in [_patsplit(p, default) for p in patterns]:
319 for kind, pat in [_patsplit(p, default) for p in patterns]:
321 if kind in cwdrelativepatternkinds:
320 if kind in cwdrelativepatternkinds:
322 pat = pathutil.canonpath(root, cwd, pat, auditor=auditor)
321 pat = pathutil.canonpath(root, cwd, pat, auditor=auditor)
323 elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'):
322 elif kind in (b'relglob', b'path', b'rootfilesin', b'rootglob'):
324 pat = util.normpath(pat)
323 pat = util.normpath(pat)
325 elif kind in (b'listfile', b'listfile0'):
324 elif kind in (b'listfile', b'listfile0'):
326 try:
325 try:
327 files = util.readfile(pat)
326 files = util.readfile(pat)
328 if kind == b'listfile0':
327 if kind == b'listfile0':
329 files = files.split(b'\0')
328 files = files.split(b'\0')
330 else:
329 else:
331 files = files.splitlines()
330 files = files.splitlines()
332 files = [f for f in files if f]
331 files = [f for f in files if f]
333 except EnvironmentError:
332 except EnvironmentError:
334 raise error.Abort(_(b"unable to read file list (%s)") % pat)
333 raise error.Abort(_(b"unable to read file list (%s)") % pat)
335 for k, p, source in _donormalize(
334 for k, p, source in _donormalize(
336 files, default, root, cwd, auditor, warn
335 files, default, root, cwd, auditor, warn
337 ):
336 ):
338 kindpats.append((k, p, pat))
337 kindpats.append((k, p, pat))
339 continue
338 continue
340 elif kind == b'include':
339 elif kind == b'include':
341 try:
340 try:
342 fullpath = os.path.join(root, util.localpath(pat))
341 fullpath = os.path.join(root, util.localpath(pat))
343 includepats = readpatternfile(fullpath, warn)
342 includepats = readpatternfile(fullpath, warn)
344 for k, p, source in _donormalize(
343 for k, p, source in _donormalize(
345 includepats, default, root, cwd, auditor, warn
344 includepats, default, root, cwd, auditor, warn
346 ):
345 ):
347 kindpats.append((k, p, source or pat))
346 kindpats.append((k, p, source or pat))
348 except error.Abort as inst:
347 except error.Abort as inst:
349 raise error.Abort(
348 raise error.Abort(
350 b'%s: %s'
349 b'%s: %s'
351 % (pat, inst[0]) # pytype: disable=unsupported-operands
350 % (pat, inst[0]) # pytype: disable=unsupported-operands
352 )
351 )
353 except IOError as inst:
352 except IOError as inst:
354 if warn:
353 if warn:
355 warn(
354 warn(
356 _(b"skipping unreadable pattern file '%s': %s\n")
355 _(b"skipping unreadable pattern file '%s': %s\n")
357 % (pat, stringutil.forcebytestr(inst.strerror))
356 % (pat, stringutil.forcebytestr(inst.strerror))
358 )
357 )
359 continue
358 continue
360 # else: re or relre - which cannot be normalized
359 # else: re or relre - which cannot be normalized
361 kindpats.append((kind, pat, b''))
360 kindpats.append((kind, pat, b''))
362 return kindpats
361 return kindpats
363
362
364
363
365 class basematcher(object):
364 class basematcher(object):
366 def __init__(self, badfn=None):
365 def __init__(self, badfn=None):
367 if badfn is not None:
366 if badfn is not None:
368 self.bad = badfn
367 self.bad = badfn
369
368
370 def __call__(self, fn):
369 def __call__(self, fn):
371 return self.matchfn(fn)
370 return self.matchfn(fn)
372
371
373 # Callbacks related to how the matcher is used by dirstate.walk.
372 # Callbacks related to how the matcher is used by dirstate.walk.
374 # Subscribers to these events must monkeypatch the matcher object.
373 # Subscribers to these events must monkeypatch the matcher object.
375 def bad(self, f, msg):
374 def bad(self, f, msg):
376 '''Callback from dirstate.walk for each explicit file that can't be
375 '''Callback from dirstate.walk for each explicit file that can't be
377 found/accessed, with an error message.'''
376 found/accessed, with an error message.'''
378
377
379 # If an explicitdir is set, it will be called when an explicitly listed
378 # If an explicitdir is set, it will be called when an explicitly listed
380 # directory is visited.
379 # directory is visited.
381 explicitdir = None
380 explicitdir = None
382
381
383 # If an traversedir is set, it will be called when a directory discovered
382 # If an traversedir is set, it will be called when a directory discovered
384 # by recursive traversal is visited.
383 # by recursive traversal is visited.
385 traversedir = None
384 traversedir = None
386
385
387 @propertycache
386 @propertycache
388 def _files(self):
387 def _files(self):
389 return []
388 return []
390
389
391 def files(self):
390 def files(self):
392 '''Explicitly listed files or patterns or roots:
391 '''Explicitly listed files or patterns or roots:
393 if no patterns or .always(): empty list,
392 if no patterns or .always(): empty list,
394 if exact: list exact files,
393 if exact: list exact files,
395 if not .anypats(): list all files and dirs,
394 if not .anypats(): list all files and dirs,
396 else: optimal roots'''
395 else: optimal roots'''
397 return self._files
396 return self._files
398
397
399 @propertycache
398 @propertycache
400 def _fileset(self):
399 def _fileset(self):
401 return set(self._files)
400 return set(self._files)
402
401
403 def exact(self, f):
402 def exact(self, f):
404 '''Returns True if f is in .files().'''
403 '''Returns True if f is in .files().'''
405 return f in self._fileset
404 return f in self._fileset
406
405
407 def matchfn(self, f):
406 def matchfn(self, f):
408 return False
407 return False
409
408
410 def visitdir(self, dir):
409 def visitdir(self, dir):
411 '''Decides whether a directory should be visited based on whether it
410 '''Decides whether a directory should be visited based on whether it
412 has potential matches in it or one of its subdirectories. This is
411 has potential matches in it or one of its subdirectories. This is
413 based on the match's primary, included, and excluded patterns.
412 based on the match's primary, included, and excluded patterns.
414
413
415 Returns the string 'all' if the given directory and all subdirectories
414 Returns the string 'all' if the given directory and all subdirectories
416 should be visited. Otherwise returns True or False indicating whether
415 should be visited. Otherwise returns True or False indicating whether
417 the given directory should be visited.
416 the given directory should be visited.
418 '''
417 '''
419 return True
418 return True
420
419
421 def visitchildrenset(self, dir):
420 def visitchildrenset(self, dir):
422 '''Decides whether a directory should be visited based on whether it
421 '''Decides whether a directory should be visited based on whether it
423 has potential matches in it or one of its subdirectories, and
422 has potential matches in it or one of its subdirectories, and
424 potentially lists which subdirectories of that directory should be
423 potentially lists which subdirectories of that directory should be
425 visited. This is based on the match's primary, included, and excluded
424 visited. This is based on the match's primary, included, and excluded
426 patterns.
425 patterns.
427
426
428 This function is very similar to 'visitdir', and the following mapping
427 This function is very similar to 'visitdir', and the following mapping
429 can be applied:
428 can be applied:
430
429
431 visitdir | visitchildrenlist
430 visitdir | visitchildrenlist
432 ----------+-------------------
431 ----------+-------------------
433 False | set()
432 False | set()
434 'all' | 'all'
433 'all' | 'all'
435 True | 'this' OR non-empty set of subdirs -or files- to visit
434 True | 'this' OR non-empty set of subdirs -or files- to visit
436
435
437 Example:
436 Example:
438 Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
437 Assume matchers ['path:foo/bar', 'rootfilesin:qux'], we would return
439 the following values (assuming the implementation of visitchildrenset
438 the following values (assuming the implementation of visitchildrenset
440 is capable of recognizing this; some implementations are not).
439 is capable of recognizing this; some implementations are not).
441
440
442 '' -> {'foo', 'qux'}
441 '' -> {'foo', 'qux'}
443 'baz' -> set()
442 'baz' -> set()
444 'foo' -> {'bar'}
443 'foo' -> {'bar'}
445 # Ideally this would be 'all', but since the prefix nature of matchers
444 # Ideally this would be 'all', but since the prefix nature of matchers
446 # is applied to the entire matcher, we have to downgrade this to
445 # is applied to the entire matcher, we have to downgrade this to
447 # 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
446 # 'this' due to the non-prefix 'rootfilesin'-kind matcher being mixed
448 # in.
447 # in.
449 'foo/bar' -> 'this'
448 'foo/bar' -> 'this'
450 'qux' -> 'this'
449 'qux' -> 'this'
451
450
452 Important:
451 Important:
453 Most matchers do not know if they're representing files or
452 Most matchers do not know if they're representing files or
454 directories. They see ['path:dir/f'] and don't know whether 'f' is a
453 directories. They see ['path:dir/f'] and don't know whether 'f' is a
455 file or a directory, so visitchildrenset('dir') for most matchers will
454 file or a directory, so visitchildrenset('dir') for most matchers will
456 return {'f'}, but if the matcher knows it's a file (like exactmatcher
455 return {'f'}, but if the matcher knows it's a file (like exactmatcher
457 does), it may return 'this'. Do not rely on the return being a set
456 does), it may return 'this'. Do not rely on the return being a set
458 indicating that there are no files in this dir to investigate (or
457 indicating that there are no files in this dir to investigate (or
459 equivalently that if there are files to investigate in 'dir' that it
458 equivalently that if there are files to investigate in 'dir' that it
460 will always return 'this').
459 will always return 'this').
461 '''
460 '''
462 return b'this'
461 return b'this'
463
462
464 def always(self):
463 def always(self):
465 '''Matcher will match everything and .files() will be empty --
464 '''Matcher will match everything and .files() will be empty --
466 optimization might be possible.'''
465 optimization might be possible.'''
467 return False
466 return False
468
467
469 def isexact(self):
468 def isexact(self):
470 '''Matcher will match exactly the list of files in .files() --
469 '''Matcher will match exactly the list of files in .files() --
471 optimization might be possible.'''
470 optimization might be possible.'''
472 return False
471 return False
473
472
474 def prefix(self):
473 def prefix(self):
475 '''Matcher will match the paths in .files() recursively --
474 '''Matcher will match the paths in .files() recursively --
476 optimization might be possible.'''
475 optimization might be possible.'''
477 return False
476 return False
478
477
479 def anypats(self):
478 def anypats(self):
480 '''None of .always(), .isexact(), and .prefix() is true --
479 '''None of .always(), .isexact(), and .prefix() is true --
481 optimizations will be difficult.'''
480 optimizations will be difficult.'''
482 return not self.always() and not self.isexact() and not self.prefix()
481 return not self.always() and not self.isexact() and not self.prefix()
483
482
484
483
485 class alwaysmatcher(basematcher):
484 class alwaysmatcher(basematcher):
486 '''Matches everything.'''
485 '''Matches everything.'''
487
486
488 def __init__(self, badfn=None):
487 def __init__(self, badfn=None):
489 super(alwaysmatcher, self).__init__(badfn)
488 super(alwaysmatcher, self).__init__(badfn)
490
489
491 def always(self):
490 def always(self):
492 return True
491 return True
493
492
494 def matchfn(self, f):
493 def matchfn(self, f):
495 return True
494 return True
496
495
497 def visitdir(self, dir):
496 def visitdir(self, dir):
498 return b'all'
497 return b'all'
499
498
500 def visitchildrenset(self, dir):
499 def visitchildrenset(self, dir):
501 return b'all'
500 return b'all'
502
501
503 def __repr__(self):
502 def __repr__(self):
504 return r'<alwaysmatcher>'
503 return r'<alwaysmatcher>'
505
504
506
505
507 class nevermatcher(basematcher):
506 class nevermatcher(basematcher):
508 '''Matches nothing.'''
507 '''Matches nothing.'''
509
508
510 def __init__(self, badfn=None):
509 def __init__(self, badfn=None):
511 super(nevermatcher, self).__init__(badfn)
510 super(nevermatcher, self).__init__(badfn)
512
511
513 # It's a little weird to say that the nevermatcher is an exact matcher
512 # It's a little weird to say that the nevermatcher is an exact matcher
514 # or a prefix matcher, but it seems to make sense to let callers take
513 # or a prefix matcher, but it seems to make sense to let callers take
515 # fast paths based on either. There will be no exact matches, nor any
514 # fast paths based on either. There will be no exact matches, nor any
516 # prefixes (files() returns []), so fast paths iterating over them should
515 # prefixes (files() returns []), so fast paths iterating over them should
517 # be efficient (and correct).
516 # be efficient (and correct).
518 def isexact(self):
517 def isexact(self):
519 return True
518 return True
520
519
521 def prefix(self):
520 def prefix(self):
522 return True
521 return True
523
522
524 def visitdir(self, dir):
523 def visitdir(self, dir):
525 return False
524 return False
526
525
527 def visitchildrenset(self, dir):
526 def visitchildrenset(self, dir):
528 return set()
527 return set()
529
528
530 def __repr__(self):
529 def __repr__(self):
531 return r'<nevermatcher>'
530 return r'<nevermatcher>'
532
531
533
532
534 class predicatematcher(basematcher):
533 class predicatematcher(basematcher):
535 """A matcher adapter for a simple boolean function"""
534 """A matcher adapter for a simple boolean function"""
536
535
537 def __init__(self, predfn, predrepr=None, badfn=None):
536 def __init__(self, predfn, predrepr=None, badfn=None):
538 super(predicatematcher, self).__init__(badfn)
537 super(predicatematcher, self).__init__(badfn)
539 self.matchfn = predfn
538 self.matchfn = predfn
540 self._predrepr = predrepr
539 self._predrepr = predrepr
541
540
542 @encoding.strmethod
541 @encoding.strmethod
543 def __repr__(self):
542 def __repr__(self):
544 s = stringutil.buildrepr(self._predrepr) or pycompat.byterepr(
543 s = stringutil.buildrepr(self._predrepr) or pycompat.byterepr(
545 self.matchfn
544 self.matchfn
546 )
545 )
547 return b'<predicatenmatcher pred=%s>' % s
546 return b'<predicatenmatcher pred=%s>' % s
548
547
549
548
550 class patternmatcher(basematcher):
549 class patternmatcher(basematcher):
551 """Matches a set of (kind, pat, source) against a 'root' directory.
550 """Matches a set of (kind, pat, source) against a 'root' directory.
552
551
553 >>> kindpats = [
552 >>> kindpats = [
554 ... (b're', br'.*\.c$', b''),
553 ... (b're', br'.*\.c$', b''),
555 ... (b'path', b'foo/a', b''),
554 ... (b'path', b'foo/a', b''),
556 ... (b'relpath', b'b', b''),
555 ... (b'relpath', b'b', b''),
557 ... (b'glob', b'*.h', b''),
556 ... (b'glob', b'*.h', b''),
558 ... ]
557 ... ]
559 >>> m = patternmatcher(b'foo', kindpats)
558 >>> m = patternmatcher(b'foo', kindpats)
560 >>> m(b'main.c') # matches re:.*\.c$
559 >>> m(b'main.c') # matches re:.*\.c$
561 True
560 True
562 >>> m(b'b.txt')
561 >>> m(b'b.txt')
563 False
562 False
564 >>> m(b'foo/a') # matches path:foo/a
563 >>> m(b'foo/a') # matches path:foo/a
565 True
564 True
566 >>> m(b'a') # does not match path:b, since 'root' is 'foo'
565 >>> m(b'a') # does not match path:b, since 'root' is 'foo'
567 False
566 False
568 >>> m(b'b') # matches relpath:b, since 'root' is 'foo'
567 >>> m(b'b') # matches relpath:b, since 'root' is 'foo'
569 True
568 True
570 >>> m(b'lib.h') # matches glob:*.h
569 >>> m(b'lib.h') # matches glob:*.h
571 True
570 True
572
571
573 >>> m.files()
572 >>> m.files()
574 ['', 'foo/a', 'b', '']
573 ['', 'foo/a', 'b', '']
575 >>> m.exact(b'foo/a')
574 >>> m.exact(b'foo/a')
576 True
575 True
577 >>> m.exact(b'b')
576 >>> m.exact(b'b')
578 True
577 True
579 >>> m.exact(b'lib.h') # exact matches are for (rel)path kinds
578 >>> m.exact(b'lib.h') # exact matches are for (rel)path kinds
580 False
579 False
581 """
580 """
582
581
583 def __init__(self, root, kindpats, badfn=None):
582 def __init__(self, root, kindpats, badfn=None):
584 super(patternmatcher, self).__init__(badfn)
583 super(patternmatcher, self).__init__(badfn)
585
584
586 self._files = _explicitfiles(kindpats)
585 self._files = _explicitfiles(kindpats)
587 self._prefix = _prefix(kindpats)
586 self._prefix = _prefix(kindpats)
588 self._pats, self.matchfn = _buildmatch(kindpats, b'$', root)
587 self._pats, self.matchfn = _buildmatch(kindpats, b'$', root)
589
588
590 @propertycache
589 @propertycache
591 def _dirs(self):
590 def _dirs(self):
592 return set(pathutil.dirs(self._fileset))
591 return set(pathutil.dirs(self._fileset))
593
592
594 def visitdir(self, dir):
593 def visitdir(self, dir):
595 if self._prefix and dir in self._fileset:
594 if self._prefix and dir in self._fileset:
596 return b'all'
595 return b'all'
597 return (
596 return (
598 dir in self._fileset
597 dir in self._fileset
599 or dir in self._dirs
598 or dir in self._dirs
600 or any(
599 or any(
601 parentdir in self._fileset for parentdir in util.finddirs(dir)
600 parentdir in self._fileset
601 for parentdir in pathutil.finddirs(dir)
602 )
602 )
603 )
603 )
604
604
605 def visitchildrenset(self, dir):
605 def visitchildrenset(self, dir):
606 ret = self.visitdir(dir)
606 ret = self.visitdir(dir)
607 if ret is True:
607 if ret is True:
608 return b'this'
608 return b'this'
609 elif not ret:
609 elif not ret:
610 return set()
610 return set()
611 assert ret == b'all'
611 assert ret == b'all'
612 return b'all'
612 return b'all'
613
613
614 def prefix(self):
614 def prefix(self):
615 return self._prefix
615 return self._prefix
616
616
617 @encoding.strmethod
617 @encoding.strmethod
618 def __repr__(self):
618 def __repr__(self):
619 return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
619 return b'<patternmatcher patterns=%r>' % pycompat.bytestr(self._pats)
620
620
621
621
622 # This is basically a reimplementation of pathutil.dirs that stores the
622 # This is basically a reimplementation of pathutil.dirs that stores the
623 # children instead of just a count of them, plus a small optional optimization
623 # children instead of just a count of them, plus a small optional optimization
624 # to avoid some directories we don't need.
624 # to avoid some directories we don't need.
625 class _dirchildren(object):
625 class _dirchildren(object):
626 def __init__(self, paths, onlyinclude=None):
626 def __init__(self, paths, onlyinclude=None):
627 self._dirs = {}
627 self._dirs = {}
628 self._onlyinclude = onlyinclude or []
628 self._onlyinclude = onlyinclude or []
629 addpath = self.addpath
629 addpath = self.addpath
630 for f in paths:
630 for f in paths:
631 addpath(f)
631 addpath(f)
632
632
633 def addpath(self, path):
633 def addpath(self, path):
634 if path == b'':
634 if path == b'':
635 return
635 return
636 dirs = self._dirs
636 dirs = self._dirs
637 findsplitdirs = _dirchildren._findsplitdirs
637 findsplitdirs = _dirchildren._findsplitdirs
638 for d, b in findsplitdirs(path):
638 for d, b in findsplitdirs(path):
639 if d not in self._onlyinclude:
639 if d not in self._onlyinclude:
640 continue
640 continue
641 dirs.setdefault(d, set()).add(b)
641 dirs.setdefault(d, set()).add(b)
642
642
643 @staticmethod
643 @staticmethod
644 def _findsplitdirs(path):
644 def _findsplitdirs(path):
645 # yields (dirname, basename) tuples, walking back to the root. This is
645 # yields (dirname, basename) tuples, walking back to the root. This is
646 # very similar to util.finddirs, except:
646 # very similar to pathutil.finddirs, except:
647 # - produces a (dirname, basename) tuple, not just 'dirname'
647 # - produces a (dirname, basename) tuple, not just 'dirname'
648 # Unlike manifest._splittopdir, this does not suffix `dirname` with a
648 # Unlike manifest._splittopdir, this does not suffix `dirname` with a
649 # slash.
649 # slash.
650 oldpos = len(path)
650 oldpos = len(path)
651 pos = path.rfind(b'/')
651 pos = path.rfind(b'/')
652 while pos != -1:
652 while pos != -1:
653 yield path[:pos], path[pos + 1 : oldpos]
653 yield path[:pos], path[pos + 1 : oldpos]
654 oldpos = pos
654 oldpos = pos
655 pos = path.rfind(b'/', 0, pos)
655 pos = path.rfind(b'/', 0, pos)
656 yield b'', path[:oldpos]
656 yield b'', path[:oldpos]
657
657
658 def get(self, path):
658 def get(self, path):
659 return self._dirs.get(path, set())
659 return self._dirs.get(path, set())
660
660
661
661
662 class includematcher(basematcher):
662 class includematcher(basematcher):
663 def __init__(self, root, kindpats, badfn=None):
663 def __init__(self, root, kindpats, badfn=None):
664 super(includematcher, self).__init__(badfn)
664 super(includematcher, self).__init__(badfn)
665
665
666 self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root)
666 self._pats, self.matchfn = _buildmatch(kindpats, b'(?:/|$)', root)
667 self._prefix = _prefix(kindpats)
667 self._prefix = _prefix(kindpats)
668 roots, dirs, parents = _rootsdirsandparents(kindpats)
668 roots, dirs, parents = _rootsdirsandparents(kindpats)
669 # roots are directories which are recursively included.
669 # roots are directories which are recursively included.
670 self._roots = set(roots)
670 self._roots = set(roots)
671 # dirs are directories which are non-recursively included.
671 # dirs are directories which are non-recursively included.
672 self._dirs = set(dirs)
672 self._dirs = set(dirs)
673 # parents are directories which are non-recursively included because
673 # parents are directories which are non-recursively included because
674 # they are needed to get to items in _dirs or _roots.
674 # they are needed to get to items in _dirs or _roots.
675 self._parents = parents
675 self._parents = parents
676
676
677 def visitdir(self, dir):
677 def visitdir(self, dir):
678 if self._prefix and dir in self._roots:
678 if self._prefix and dir in self._roots:
679 return b'all'
679 return b'all'
680 return (
680 return (
681 dir in self._roots
681 dir in self._roots
682 or dir in self._dirs
682 or dir in self._dirs
683 or dir in self._parents
683 or dir in self._parents
684 or any(parentdir in self._roots for parentdir in util.finddirs(dir))
684 or any(
685 parentdir in self._roots for parentdir in pathutil.finddirs(dir)
686 )
685 )
687 )
686
688
687 @propertycache
689 @propertycache
688 def _allparentschildren(self):
690 def _allparentschildren(self):
689 # It may seem odd that we add dirs, roots, and parents, and then
691 # It may seem odd that we add dirs, roots, and parents, and then
690 # restrict to only parents. This is to catch the case of:
692 # restrict to only parents. This is to catch the case of:
691 # dirs = ['foo/bar']
693 # dirs = ['foo/bar']
692 # parents = ['foo']
694 # parents = ['foo']
693 # if we asked for the children of 'foo', but had only added
695 # if we asked for the children of 'foo', but had only added
694 # self._parents, we wouldn't be able to respond ['bar'].
696 # self._parents, we wouldn't be able to respond ['bar'].
695 return _dirchildren(
697 return _dirchildren(
696 itertools.chain(self._dirs, self._roots, self._parents),
698 itertools.chain(self._dirs, self._roots, self._parents),
697 onlyinclude=self._parents,
699 onlyinclude=self._parents,
698 )
700 )
699
701
700 def visitchildrenset(self, dir):
702 def visitchildrenset(self, dir):
701 if self._prefix and dir in self._roots:
703 if self._prefix and dir in self._roots:
702 return b'all'
704 return b'all'
703 # Note: this does *not* include the 'dir in self._parents' case from
705 # Note: this does *not* include the 'dir in self._parents' case from
704 # visitdir, that's handled below.
706 # visitdir, that's handled below.
705 if (
707 if (
706 b'' in self._roots
708 b'' in self._roots
707 or dir in self._roots
709 or dir in self._roots
708 or dir in self._dirs
710 or dir in self._dirs
709 or any(parentdir in self._roots for parentdir in util.finddirs(dir))
711 or any(
712 parentdir in self._roots for parentdir in pathutil.finddirs(dir)
713 )
710 ):
714 ):
711 return b'this'
715 return b'this'
712
716
713 if dir in self._parents:
717 if dir in self._parents:
714 return self._allparentschildren.get(dir) or set()
718 return self._allparentschildren.get(dir) or set()
715 return set()
719 return set()
716
720
717 @encoding.strmethod
721 @encoding.strmethod
718 def __repr__(self):
722 def __repr__(self):
719 return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats)
723 return b'<includematcher includes=%r>' % pycompat.bytestr(self._pats)
720
724
721
725
722 class exactmatcher(basematcher):
726 class exactmatcher(basematcher):
723 r'''Matches the input files exactly. They are interpreted as paths, not
727 r'''Matches the input files exactly. They are interpreted as paths, not
724 patterns (so no kind-prefixes).
728 patterns (so no kind-prefixes).
725
729
726 >>> m = exactmatcher([b'a.txt', br're:.*\.c$'])
730 >>> m = exactmatcher([b'a.txt', br're:.*\.c$'])
727 >>> m(b'a.txt')
731 >>> m(b'a.txt')
728 True
732 True
729 >>> m(b'b.txt')
733 >>> m(b'b.txt')
730 False
734 False
731
735
732 Input files that would be matched are exactly those returned by .files()
736 Input files that would be matched are exactly those returned by .files()
733 >>> m.files()
737 >>> m.files()
734 ['a.txt', 're:.*\\.c$']
738 ['a.txt', 're:.*\\.c$']
735
739
736 So pattern 're:.*\.c$' is not considered as a regex, but as a file name
740 So pattern 're:.*\.c$' is not considered as a regex, but as a file name
737 >>> m(b'main.c')
741 >>> m(b'main.c')
738 False
742 False
739 >>> m(br're:.*\.c$')
743 >>> m(br're:.*\.c$')
740 True
744 True
741 '''
745 '''
742
746
743 def __init__(self, files, badfn=None):
747 def __init__(self, files, badfn=None):
744 super(exactmatcher, self).__init__(badfn)
748 super(exactmatcher, self).__init__(badfn)
745
749
746 if isinstance(files, list):
750 if isinstance(files, list):
747 self._files = files
751 self._files = files
748 else:
752 else:
749 self._files = list(files)
753 self._files = list(files)
750
754
751 matchfn = basematcher.exact
755 matchfn = basematcher.exact
752
756
753 @propertycache
757 @propertycache
754 def _dirs(self):
758 def _dirs(self):
755 return set(pathutil.dirs(self._fileset))
759 return set(pathutil.dirs(self._fileset))
756
760
757 def visitdir(self, dir):
761 def visitdir(self, dir):
758 return dir in self._dirs
762 return dir in self._dirs
759
763
760 def visitchildrenset(self, dir):
764 def visitchildrenset(self, dir):
761 if not self._fileset or dir not in self._dirs:
765 if not self._fileset or dir not in self._dirs:
762 return set()
766 return set()
763
767
764 candidates = self._fileset | self._dirs - {b''}
768 candidates = self._fileset | self._dirs - {b''}
765 if dir != b'':
769 if dir != b'':
766 d = dir + b'/'
770 d = dir + b'/'
767 candidates = set(c[len(d) :] for c in candidates if c.startswith(d))
771 candidates = set(c[len(d) :] for c in candidates if c.startswith(d))
768 # self._dirs includes all of the directories, recursively, so if
772 # self._dirs includes all of the directories, recursively, so if
769 # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
773 # we're attempting to match foo/bar/baz.txt, it'll have '', 'foo',
770 # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
774 # 'foo/bar' in it. Thus we can safely ignore a candidate that has a
771 # '/' in it, indicating a it's for a subdir-of-a-subdir; the
775 # '/' in it, indicating a it's for a subdir-of-a-subdir; the
772 # immediate subdir will be in there without a slash.
776 # immediate subdir will be in there without a slash.
773 ret = {c for c in candidates if b'/' not in c}
777 ret = {c for c in candidates if b'/' not in c}
774 # We really do not expect ret to be empty, since that would imply that
778 # We really do not expect ret to be empty, since that would imply that
775 # there's something in _dirs that didn't have a file in _fileset.
779 # there's something in _dirs that didn't have a file in _fileset.
776 assert ret
780 assert ret
777 return ret
781 return ret
778
782
779 def isexact(self):
783 def isexact(self):
780 return True
784 return True
781
785
782 @encoding.strmethod
786 @encoding.strmethod
783 def __repr__(self):
787 def __repr__(self):
784 return b'<exactmatcher files=%r>' % self._files
788 return b'<exactmatcher files=%r>' % self._files
785
789
786
790
787 class differencematcher(basematcher):
791 class differencematcher(basematcher):
788 '''Composes two matchers by matching if the first matches and the second
792 '''Composes two matchers by matching if the first matches and the second
789 does not.
793 does not.
790
794
791 The second matcher's non-matching-attributes (bad, explicitdir,
795 The second matcher's non-matching-attributes (bad, explicitdir,
792 traversedir) are ignored.
796 traversedir) are ignored.
793 '''
797 '''
794
798
795 def __init__(self, m1, m2):
799 def __init__(self, m1, m2):
796 super(differencematcher, self).__init__()
800 super(differencematcher, self).__init__()
797 self._m1 = m1
801 self._m1 = m1
798 self._m2 = m2
802 self._m2 = m2
799 self.bad = m1.bad
803 self.bad = m1.bad
800 self.explicitdir = m1.explicitdir
804 self.explicitdir = m1.explicitdir
801 self.traversedir = m1.traversedir
805 self.traversedir = m1.traversedir
802
806
803 def matchfn(self, f):
807 def matchfn(self, f):
804 return self._m1(f) and not self._m2(f)
808 return self._m1(f) and not self._m2(f)
805
809
806 @propertycache
810 @propertycache
807 def _files(self):
811 def _files(self):
808 if self.isexact():
812 if self.isexact():
809 return [f for f in self._m1.files() if self(f)]
813 return [f for f in self._m1.files() if self(f)]
810 # If m1 is not an exact matcher, we can't easily figure out the set of
814 # If m1 is not an exact matcher, we can't easily figure out the set of
811 # files, because its files() are not always files. For example, if
815 # files, because its files() are not always files. For example, if
812 # m1 is "path:dir" and m2 is "rootfileins:.", we don't
816 # m1 is "path:dir" and m2 is "rootfileins:.", we don't
813 # want to remove "dir" from the set even though it would match m2,
817 # want to remove "dir" from the set even though it would match m2,
814 # because the "dir" in m1 may not be a file.
818 # because the "dir" in m1 may not be a file.
815 return self._m1.files()
819 return self._m1.files()
816
820
817 def visitdir(self, dir):
821 def visitdir(self, dir):
818 if self._m2.visitdir(dir) == b'all':
822 if self._m2.visitdir(dir) == b'all':
819 return False
823 return False
820 elif not self._m2.visitdir(dir):
824 elif not self._m2.visitdir(dir):
821 # m2 does not match dir, we can return 'all' here if possible
825 # m2 does not match dir, we can return 'all' here if possible
822 return self._m1.visitdir(dir)
826 return self._m1.visitdir(dir)
823 return bool(self._m1.visitdir(dir))
827 return bool(self._m1.visitdir(dir))
824
828
825 def visitchildrenset(self, dir):
829 def visitchildrenset(self, dir):
826 m2_set = self._m2.visitchildrenset(dir)
830 m2_set = self._m2.visitchildrenset(dir)
827 if m2_set == b'all':
831 if m2_set == b'all':
828 return set()
832 return set()
829 m1_set = self._m1.visitchildrenset(dir)
833 m1_set = self._m1.visitchildrenset(dir)
830 # Possible values for m1: 'all', 'this', set(...), set()
834 # Possible values for m1: 'all', 'this', set(...), set()
831 # Possible values for m2: 'this', set(...), set()
835 # Possible values for m2: 'this', set(...), set()
832 # If m2 has nothing under here that we care about, return m1, even if
836 # If m2 has nothing under here that we care about, return m1, even if
833 # it's 'all'. This is a change in behavior from visitdir, which would
837 # it's 'all'. This is a change in behavior from visitdir, which would
834 # return True, not 'all', for some reason.
838 # return True, not 'all', for some reason.
835 if not m2_set:
839 if not m2_set:
836 return m1_set
840 return m1_set
837 if m1_set in [b'all', b'this']:
841 if m1_set in [b'all', b'this']:
838 # Never return 'all' here if m2_set is any kind of non-empty (either
842 # Never return 'all' here if m2_set is any kind of non-empty (either
839 # 'this' or set(foo)), since m2 might return set() for a
843 # 'this' or set(foo)), since m2 might return set() for a
840 # subdirectory.
844 # subdirectory.
841 return b'this'
845 return b'this'
842 # Possible values for m1: set(...), set()
846 # Possible values for m1: set(...), set()
843 # Possible values for m2: 'this', set(...)
847 # Possible values for m2: 'this', set(...)
844 # We ignore m2's set results. They're possibly incorrect:
848 # We ignore m2's set results. They're possibly incorrect:
845 # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''):
849 # m1 = path:dir/subdir, m2=rootfilesin:dir, visitchildrenset(''):
846 # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
850 # m1 returns {'dir'}, m2 returns {'dir'}, if we subtracted we'd
847 # return set(), which is *not* correct, we still need to visit 'dir'!
851 # return set(), which is *not* correct, we still need to visit 'dir'!
848 return m1_set
852 return m1_set
849
853
850 def isexact(self):
854 def isexact(self):
851 return self._m1.isexact()
855 return self._m1.isexact()
852
856
853 @encoding.strmethod
857 @encoding.strmethod
854 def __repr__(self):
858 def __repr__(self):
855 return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)
859 return b'<differencematcher m1=%r, m2=%r>' % (self._m1, self._m2)
856
860
857
861
858 def intersectmatchers(m1, m2):
862 def intersectmatchers(m1, m2):
859 '''Composes two matchers by matching if both of them match.
863 '''Composes two matchers by matching if both of them match.
860
864
861 The second matcher's non-matching-attributes (bad, explicitdir,
865 The second matcher's non-matching-attributes (bad, explicitdir,
862 traversedir) are ignored.
866 traversedir) are ignored.
863 '''
867 '''
864 if m1 is None or m2 is None:
868 if m1 is None or m2 is None:
865 return m1 or m2
869 return m1 or m2
866 if m1.always():
870 if m1.always():
867 m = copy.copy(m2)
871 m = copy.copy(m2)
868 # TODO: Consider encapsulating these things in a class so there's only
872 # TODO: Consider encapsulating these things in a class so there's only
869 # one thing to copy from m1.
873 # one thing to copy from m1.
870 m.bad = m1.bad
874 m.bad = m1.bad
871 m.explicitdir = m1.explicitdir
875 m.explicitdir = m1.explicitdir
872 m.traversedir = m1.traversedir
876 m.traversedir = m1.traversedir
873 return m
877 return m
874 if m2.always():
878 if m2.always():
875 m = copy.copy(m1)
879 m = copy.copy(m1)
876 return m
880 return m
877 return intersectionmatcher(m1, m2)
881 return intersectionmatcher(m1, m2)
878
882
879
883
880 class intersectionmatcher(basematcher):
884 class intersectionmatcher(basematcher):
881 def __init__(self, m1, m2):
885 def __init__(self, m1, m2):
882 super(intersectionmatcher, self).__init__()
886 super(intersectionmatcher, self).__init__()
883 self._m1 = m1
887 self._m1 = m1
884 self._m2 = m2
888 self._m2 = m2
885 self.bad = m1.bad
889 self.bad = m1.bad
886 self.explicitdir = m1.explicitdir
890 self.explicitdir = m1.explicitdir
887 self.traversedir = m1.traversedir
891 self.traversedir = m1.traversedir
888
892
889 @propertycache
893 @propertycache
890 def _files(self):
894 def _files(self):
891 if self.isexact():
895 if self.isexact():
892 m1, m2 = self._m1, self._m2
896 m1, m2 = self._m1, self._m2
893 if not m1.isexact():
897 if not m1.isexact():
894 m1, m2 = m2, m1
898 m1, m2 = m2, m1
895 return [f for f in m1.files() if m2(f)]
899 return [f for f in m1.files() if m2(f)]
896 # It neither m1 nor m2 is an exact matcher, we can't easily intersect
900 # It neither m1 nor m2 is an exact matcher, we can't easily intersect
897 # the set of files, because their files() are not always files. For
901 # the set of files, because their files() are not always files. For
898 # example, if intersecting a matcher "-I glob:foo.txt" with matcher of
902 # example, if intersecting a matcher "-I glob:foo.txt" with matcher of
899 # "path:dir2", we don't want to remove "dir2" from the set.
903 # "path:dir2", we don't want to remove "dir2" from the set.
900 return self._m1.files() + self._m2.files()
904 return self._m1.files() + self._m2.files()
901
905
902 def matchfn(self, f):
906 def matchfn(self, f):
903 return self._m1(f) and self._m2(f)
907 return self._m1(f) and self._m2(f)
904
908
905 def visitdir(self, dir):
909 def visitdir(self, dir):
906 visit1 = self._m1.visitdir(dir)
910 visit1 = self._m1.visitdir(dir)
907 if visit1 == b'all':
911 if visit1 == b'all':
908 return self._m2.visitdir(dir)
912 return self._m2.visitdir(dir)
909 # bool() because visit1=True + visit2='all' should not be 'all'
913 # bool() because visit1=True + visit2='all' should not be 'all'
910 return bool(visit1 and self._m2.visitdir(dir))
914 return bool(visit1 and self._m2.visitdir(dir))
911
915
912 def visitchildrenset(self, dir):
916 def visitchildrenset(self, dir):
913 m1_set = self._m1.visitchildrenset(dir)
917 m1_set = self._m1.visitchildrenset(dir)
914 if not m1_set:
918 if not m1_set:
915 return set()
919 return set()
916 m2_set = self._m2.visitchildrenset(dir)
920 m2_set = self._m2.visitchildrenset(dir)
917 if not m2_set:
921 if not m2_set:
918 return set()
922 return set()
919
923
920 if m1_set == b'all':
924 if m1_set == b'all':
921 return m2_set
925 return m2_set
922 elif m2_set == b'all':
926 elif m2_set == b'all':
923 return m1_set
927 return m1_set
924
928
925 if m1_set == b'this' or m2_set == b'this':
929 if m1_set == b'this' or m2_set == b'this':
926 return b'this'
930 return b'this'
927
931
928 assert isinstance(m1_set, set) and isinstance(m2_set, set)
932 assert isinstance(m1_set, set) and isinstance(m2_set, set)
929 return m1_set.intersection(m2_set)
933 return m1_set.intersection(m2_set)
930
934
931 def always(self):
935 def always(self):
932 return self._m1.always() and self._m2.always()
936 return self._m1.always() and self._m2.always()
933
937
934 def isexact(self):
938 def isexact(self):
935 return self._m1.isexact() or self._m2.isexact()
939 return self._m1.isexact() or self._m2.isexact()
936
940
937 @encoding.strmethod
941 @encoding.strmethod
938 def __repr__(self):
942 def __repr__(self):
939 return b'<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)
943 return b'<intersectionmatcher m1=%r, m2=%r>' % (self._m1, self._m2)
940
944
941
945
942 class subdirmatcher(basematcher):
946 class subdirmatcher(basematcher):
943 """Adapt a matcher to work on a subdirectory only.
947 """Adapt a matcher to work on a subdirectory only.
944
948
945 The paths are remapped to remove/insert the path as needed:
949 The paths are remapped to remove/insert the path as needed:
946
950
947 >>> from . import pycompat
951 >>> from . import pycompat
948 >>> m1 = match(b'root', b'', [b'a.txt', b'sub/b.txt'])
952 >>> m1 = match(b'root', b'', [b'a.txt', b'sub/b.txt'])
949 >>> m2 = subdirmatcher(b'sub', m1)
953 >>> m2 = subdirmatcher(b'sub', m1)
950 >>> m2(b'a.txt')
954 >>> m2(b'a.txt')
951 False
955 False
952 >>> m2(b'b.txt')
956 >>> m2(b'b.txt')
953 True
957 True
954 >>> m2.matchfn(b'a.txt')
958 >>> m2.matchfn(b'a.txt')
955 False
959 False
956 >>> m2.matchfn(b'b.txt')
960 >>> m2.matchfn(b'b.txt')
957 True
961 True
958 >>> m2.files()
962 >>> m2.files()
959 ['b.txt']
963 ['b.txt']
960 >>> m2.exact(b'b.txt')
964 >>> m2.exact(b'b.txt')
961 True
965 True
962 >>> def bad(f, msg):
966 >>> def bad(f, msg):
963 ... print(pycompat.sysstr(b"%s: %s" % (f, msg)))
967 ... print(pycompat.sysstr(b"%s: %s" % (f, msg)))
964 >>> m1.bad = bad
968 >>> m1.bad = bad
965 >>> m2.bad(b'x.txt', b'No such file')
969 >>> m2.bad(b'x.txt', b'No such file')
966 sub/x.txt: No such file
970 sub/x.txt: No such file
967 """
971 """
968
972
969 def __init__(self, path, matcher):
973 def __init__(self, path, matcher):
970 super(subdirmatcher, self).__init__()
974 super(subdirmatcher, self).__init__()
971 self._path = path
975 self._path = path
972 self._matcher = matcher
976 self._matcher = matcher
973 self._always = matcher.always()
977 self._always = matcher.always()
974
978
975 self._files = [
979 self._files = [
976 f[len(path) + 1 :]
980 f[len(path) + 1 :]
977 for f in matcher._files
981 for f in matcher._files
978 if f.startswith(path + b"/")
982 if f.startswith(path + b"/")
979 ]
983 ]
980
984
981 # If the parent repo had a path to this subrepo and the matcher is
985 # If the parent repo had a path to this subrepo and the matcher is
982 # a prefix matcher, this submatcher always matches.
986 # a prefix matcher, this submatcher always matches.
983 if matcher.prefix():
987 if matcher.prefix():
984 self._always = any(f == path for f in matcher._files)
988 self._always = any(f == path for f in matcher._files)
985
989
986 def bad(self, f, msg):
990 def bad(self, f, msg):
987 self._matcher.bad(self._path + b"/" + f, msg)
991 self._matcher.bad(self._path + b"/" + f, msg)
988
992
989 def matchfn(self, f):
993 def matchfn(self, f):
990 # Some information is lost in the superclass's constructor, so we
994 # Some information is lost in the superclass's constructor, so we
991 # can not accurately create the matching function for the subdirectory
995 # can not accurately create the matching function for the subdirectory
992 # from the inputs. Instead, we override matchfn() and visitdir() to
996 # from the inputs. Instead, we override matchfn() and visitdir() to
993 # call the original matcher with the subdirectory path prepended.
997 # call the original matcher with the subdirectory path prepended.
994 return self._matcher.matchfn(self._path + b"/" + f)
998 return self._matcher.matchfn(self._path + b"/" + f)
995
999
996 def visitdir(self, dir):
1000 def visitdir(self, dir):
997 if dir == b'':
1001 if dir == b'':
998 dir = self._path
1002 dir = self._path
999 else:
1003 else:
1000 dir = self._path + b"/" + dir
1004 dir = self._path + b"/" + dir
1001 return self._matcher.visitdir(dir)
1005 return self._matcher.visitdir(dir)
1002
1006
1003 def visitchildrenset(self, dir):
1007 def visitchildrenset(self, dir):
1004 if dir == b'':
1008 if dir == b'':
1005 dir = self._path
1009 dir = self._path
1006 else:
1010 else:
1007 dir = self._path + b"/" + dir
1011 dir = self._path + b"/" + dir
1008 return self._matcher.visitchildrenset(dir)
1012 return self._matcher.visitchildrenset(dir)
1009
1013
1010 def always(self):
1014 def always(self):
1011 return self._always
1015 return self._always
1012
1016
1013 def prefix(self):
1017 def prefix(self):
1014 return self._matcher.prefix() and not self._always
1018 return self._matcher.prefix() and not self._always
1015
1019
1016 @encoding.strmethod
1020 @encoding.strmethod
1017 def __repr__(self):
1021 def __repr__(self):
1018 return b'<subdirmatcher path=%r, matcher=%r>' % (
1022 return b'<subdirmatcher path=%r, matcher=%r>' % (
1019 self._path,
1023 self._path,
1020 self._matcher,
1024 self._matcher,
1021 )
1025 )
1022
1026
1023
1027
1024 class prefixdirmatcher(basematcher):
1028 class prefixdirmatcher(basematcher):
1025 """Adapt a matcher to work on a parent directory.
1029 """Adapt a matcher to work on a parent directory.
1026
1030
1027 The matcher's non-matching-attributes (bad, explicitdir, traversedir) are
1031 The matcher's non-matching-attributes (bad, explicitdir, traversedir) are
1028 ignored.
1032 ignored.
1029
1033
1030 The prefix path should usually be the relative path from the root of
1034 The prefix path should usually be the relative path from the root of
1031 this matcher to the root of the wrapped matcher.
1035 this matcher to the root of the wrapped matcher.
1032
1036
1033 >>> m1 = match(util.localpath(b'root/d/e'), b'f', [b'../a.txt', b'b.txt'])
1037 >>> m1 = match(util.localpath(b'root/d/e'), b'f', [b'../a.txt', b'b.txt'])
1034 >>> m2 = prefixdirmatcher(b'd/e', m1)
1038 >>> m2 = prefixdirmatcher(b'd/e', m1)
1035 >>> m2(b'a.txt')
1039 >>> m2(b'a.txt')
1036 False
1040 False
1037 >>> m2(b'd/e/a.txt')
1041 >>> m2(b'd/e/a.txt')
1038 True
1042 True
1039 >>> m2(b'd/e/b.txt')
1043 >>> m2(b'd/e/b.txt')
1040 False
1044 False
1041 >>> m2.files()
1045 >>> m2.files()
1042 ['d/e/a.txt', 'd/e/f/b.txt']
1046 ['d/e/a.txt', 'd/e/f/b.txt']
1043 >>> m2.exact(b'd/e/a.txt')
1047 >>> m2.exact(b'd/e/a.txt')
1044 True
1048 True
1045 >>> m2.visitdir(b'd')
1049 >>> m2.visitdir(b'd')
1046 True
1050 True
1047 >>> m2.visitdir(b'd/e')
1051 >>> m2.visitdir(b'd/e')
1048 True
1052 True
1049 >>> m2.visitdir(b'd/e/f')
1053 >>> m2.visitdir(b'd/e/f')
1050 True
1054 True
1051 >>> m2.visitdir(b'd/e/g')
1055 >>> m2.visitdir(b'd/e/g')
1052 False
1056 False
1053 >>> m2.visitdir(b'd/ef')
1057 >>> m2.visitdir(b'd/ef')
1054 False
1058 False
1055 """
1059 """
1056
1060
1057 def __init__(self, path, matcher, badfn=None):
1061 def __init__(self, path, matcher, badfn=None):
1058 super(prefixdirmatcher, self).__init__(badfn)
1062 super(prefixdirmatcher, self).__init__(badfn)
1059 if not path:
1063 if not path:
1060 raise error.ProgrammingError(b'prefix path must not be empty')
1064 raise error.ProgrammingError(b'prefix path must not be empty')
1061 self._path = path
1065 self._path = path
1062 self._pathprefix = path + b'/'
1066 self._pathprefix = path + b'/'
1063 self._matcher = matcher
1067 self._matcher = matcher
1064
1068
1065 @propertycache
1069 @propertycache
1066 def _files(self):
1070 def _files(self):
1067 return [self._pathprefix + f for f in self._matcher._files]
1071 return [self._pathprefix + f for f in self._matcher._files]
1068
1072
1069 def matchfn(self, f):
1073 def matchfn(self, f):
1070 if not f.startswith(self._pathprefix):
1074 if not f.startswith(self._pathprefix):
1071 return False
1075 return False
1072 return self._matcher.matchfn(f[len(self._pathprefix) :])
1076 return self._matcher.matchfn(f[len(self._pathprefix) :])
1073
1077
1074 @propertycache
1078 @propertycache
1075 def _pathdirs(self):
1079 def _pathdirs(self):
1076 return set(util.finddirs(self._path))
1080 return set(pathutil.finddirs(self._path))
1077
1081
1078 def visitdir(self, dir):
1082 def visitdir(self, dir):
1079 if dir == self._path:
1083 if dir == self._path:
1080 return self._matcher.visitdir(b'')
1084 return self._matcher.visitdir(b'')
1081 if dir.startswith(self._pathprefix):
1085 if dir.startswith(self._pathprefix):
1082 return self._matcher.visitdir(dir[len(self._pathprefix) :])
1086 return self._matcher.visitdir(dir[len(self._pathprefix) :])
1083 return dir in self._pathdirs
1087 return dir in self._pathdirs
1084
1088
1085 def visitchildrenset(self, dir):
1089 def visitchildrenset(self, dir):
1086 if dir == self._path:
1090 if dir == self._path:
1087 return self._matcher.visitchildrenset(b'')
1091 return self._matcher.visitchildrenset(b'')
1088 if dir.startswith(self._pathprefix):
1092 if dir.startswith(self._pathprefix):
1089 return self._matcher.visitchildrenset(dir[len(self._pathprefix) :])
1093 return self._matcher.visitchildrenset(dir[len(self._pathprefix) :])
1090 if dir in self._pathdirs:
1094 if dir in self._pathdirs:
1091 return b'this'
1095 return b'this'
1092 return set()
1096 return set()
1093
1097
1094 def isexact(self):
1098 def isexact(self):
1095 return self._matcher.isexact()
1099 return self._matcher.isexact()
1096
1100
1097 def prefix(self):
1101 def prefix(self):
1098 return self._matcher.prefix()
1102 return self._matcher.prefix()
1099
1103
1100 @encoding.strmethod
1104 @encoding.strmethod
1101 def __repr__(self):
1105 def __repr__(self):
1102 return b'<prefixdirmatcher path=%r, matcher=%r>' % (
1106 return b'<prefixdirmatcher path=%r, matcher=%r>' % (
1103 pycompat.bytestr(self._path),
1107 pycompat.bytestr(self._path),
1104 self._matcher,
1108 self._matcher,
1105 )
1109 )
1106
1110
1107
1111
1108 class unionmatcher(basematcher):
1112 class unionmatcher(basematcher):
1109 """A matcher that is the union of several matchers.
1113 """A matcher that is the union of several matchers.
1110
1114
1111 The non-matching-attributes (bad, explicitdir, traversedir) are taken from
1115 The non-matching-attributes (bad, explicitdir, traversedir) are taken from
1112 the first matcher.
1116 the first matcher.
1113 """
1117 """
1114
1118
1115 def __init__(self, matchers):
1119 def __init__(self, matchers):
1116 m1 = matchers[0]
1120 m1 = matchers[0]
1117 super(unionmatcher, self).__init__()
1121 super(unionmatcher, self).__init__()
1118 self.explicitdir = m1.explicitdir
1122 self.explicitdir = m1.explicitdir
1119 self.traversedir = m1.traversedir
1123 self.traversedir = m1.traversedir
1120 self._matchers = matchers
1124 self._matchers = matchers
1121
1125
1122 def matchfn(self, f):
1126 def matchfn(self, f):
1123 for match in self._matchers:
1127 for match in self._matchers:
1124 if match(f):
1128 if match(f):
1125 return True
1129 return True
1126 return False
1130 return False
1127
1131
1128 def visitdir(self, dir):
1132 def visitdir(self, dir):
1129 r = False
1133 r = False
1130 for m in self._matchers:
1134 for m in self._matchers:
1131 v = m.visitdir(dir)
1135 v = m.visitdir(dir)
1132 if v == b'all':
1136 if v == b'all':
1133 return v
1137 return v
1134 r |= v
1138 r |= v
1135 return r
1139 return r
1136
1140
1137 def visitchildrenset(self, dir):
1141 def visitchildrenset(self, dir):
1138 r = set()
1142 r = set()
1139 this = False
1143 this = False
1140 for m in self._matchers:
1144 for m in self._matchers:
1141 v = m.visitchildrenset(dir)
1145 v = m.visitchildrenset(dir)
1142 if not v:
1146 if not v:
1143 continue
1147 continue
1144 if v == b'all':
1148 if v == b'all':
1145 return v
1149 return v
1146 if this or v == b'this':
1150 if this or v == b'this':
1147 this = True
1151 this = True
1148 # don't break, we might have an 'all' in here.
1152 # don't break, we might have an 'all' in here.
1149 continue
1153 continue
1150 assert isinstance(v, set)
1154 assert isinstance(v, set)
1151 r = r.union(v)
1155 r = r.union(v)
1152 if this:
1156 if this:
1153 return b'this'
1157 return b'this'
1154 return r
1158 return r
1155
1159
1156 @encoding.strmethod
1160 @encoding.strmethod
1157 def __repr__(self):
1161 def __repr__(self):
1158 return b'<unionmatcher matchers=%r>' % self._matchers
1162 return b'<unionmatcher matchers=%r>' % self._matchers
1159
1163
1160
1164
1161 def patkind(pattern, default=None):
1165 def patkind(pattern, default=None):
1162 '''If pattern is 'kind:pat' with a known kind, return kind.
1166 '''If pattern is 'kind:pat' with a known kind, return kind.
1163
1167
1164 >>> patkind(br're:.*\.c$')
1168 >>> patkind(br're:.*\.c$')
1165 're'
1169 're'
1166 >>> patkind(b'glob:*.c')
1170 >>> patkind(b'glob:*.c')
1167 'glob'
1171 'glob'
1168 >>> patkind(b'relpath:test.py')
1172 >>> patkind(b'relpath:test.py')
1169 'relpath'
1173 'relpath'
1170 >>> patkind(b'main.py')
1174 >>> patkind(b'main.py')
1171 >>> patkind(b'main.py', default=b're')
1175 >>> patkind(b'main.py', default=b're')
1172 're'
1176 're'
1173 '''
1177 '''
1174 return _patsplit(pattern, default)[0]
1178 return _patsplit(pattern, default)[0]
1175
1179
1176
1180
1177 def _patsplit(pattern, default):
1181 def _patsplit(pattern, default):
1178 """Split a string into the optional pattern kind prefix and the actual
1182 """Split a string into the optional pattern kind prefix and the actual
1179 pattern."""
1183 pattern."""
1180 if b':' in pattern:
1184 if b':' in pattern:
1181 kind, pat = pattern.split(b':', 1)
1185 kind, pat = pattern.split(b':', 1)
1182 if kind in allpatternkinds:
1186 if kind in allpatternkinds:
1183 return kind, pat
1187 return kind, pat
1184 return default, pattern
1188 return default, pattern
1185
1189
1186
1190
1187 def _globre(pat):
1191 def _globre(pat):
1188 r'''Convert an extended glob string to a regexp string.
1192 r'''Convert an extended glob string to a regexp string.
1189
1193
1190 >>> from . import pycompat
1194 >>> from . import pycompat
1191 >>> def bprint(s):
1195 >>> def bprint(s):
1192 ... print(pycompat.sysstr(s))
1196 ... print(pycompat.sysstr(s))
1193 >>> bprint(_globre(br'?'))
1197 >>> bprint(_globre(br'?'))
1194 .
1198 .
1195 >>> bprint(_globre(br'*'))
1199 >>> bprint(_globre(br'*'))
1196 [^/]*
1200 [^/]*
1197 >>> bprint(_globre(br'**'))
1201 >>> bprint(_globre(br'**'))
1198 .*
1202 .*
1199 >>> bprint(_globre(br'**/a'))
1203 >>> bprint(_globre(br'**/a'))
1200 (?:.*/)?a
1204 (?:.*/)?a
1201 >>> bprint(_globre(br'a/**/b'))
1205 >>> bprint(_globre(br'a/**/b'))
1202 a/(?:.*/)?b
1206 a/(?:.*/)?b
1203 >>> bprint(_globre(br'[a*?!^][^b][!c]'))
1207 >>> bprint(_globre(br'[a*?!^][^b][!c]'))
1204 [a*?!^][\^b][^c]
1208 [a*?!^][\^b][^c]
1205 >>> bprint(_globre(br'{a,b}'))
1209 >>> bprint(_globre(br'{a,b}'))
1206 (?:a|b)
1210 (?:a|b)
1207 >>> bprint(_globre(br'.\*\?'))
1211 >>> bprint(_globre(br'.\*\?'))
1208 \.\*\?
1212 \.\*\?
1209 '''
1213 '''
1210 i, n = 0, len(pat)
1214 i, n = 0, len(pat)
1211 res = b''
1215 res = b''
1212 group = 0
1216 group = 0
1213 escape = util.stringutil.regexbytesescapemap.get
1217 escape = util.stringutil.regexbytesescapemap.get
1214
1218
1215 def peek():
1219 def peek():
1216 return i < n and pat[i : i + 1]
1220 return i < n and pat[i : i + 1]
1217
1221
1218 while i < n:
1222 while i < n:
1219 c = pat[i : i + 1]
1223 c = pat[i : i + 1]
1220 i += 1
1224 i += 1
1221 if c not in b'*?[{},\\':
1225 if c not in b'*?[{},\\':
1222 res += escape(c, c)
1226 res += escape(c, c)
1223 elif c == b'*':
1227 elif c == b'*':
1224 if peek() == b'*':
1228 if peek() == b'*':
1225 i += 1
1229 i += 1
1226 if peek() == b'/':
1230 if peek() == b'/':
1227 i += 1
1231 i += 1
1228 res += b'(?:.*/)?'
1232 res += b'(?:.*/)?'
1229 else:
1233 else:
1230 res += b'.*'
1234 res += b'.*'
1231 else:
1235 else:
1232 res += b'[^/]*'
1236 res += b'[^/]*'
1233 elif c == b'?':
1237 elif c == b'?':
1234 res += b'.'
1238 res += b'.'
1235 elif c == b'[':
1239 elif c == b'[':
1236 j = i
1240 j = i
1237 if j < n and pat[j : j + 1] in b'!]':
1241 if j < n and pat[j : j + 1] in b'!]':
1238 j += 1
1242 j += 1
1239 while j < n and pat[j : j + 1] != b']':
1243 while j < n and pat[j : j + 1] != b']':
1240 j += 1
1244 j += 1
1241 if j >= n:
1245 if j >= n:
1242 res += b'\\['
1246 res += b'\\['
1243 else:
1247 else:
1244 stuff = pat[i:j].replace(b'\\', b'\\\\')
1248 stuff = pat[i:j].replace(b'\\', b'\\\\')
1245 i = j + 1
1249 i = j + 1
1246 if stuff[0:1] == b'!':
1250 if stuff[0:1] == b'!':
1247 stuff = b'^' + stuff[1:]
1251 stuff = b'^' + stuff[1:]
1248 elif stuff[0:1] == b'^':
1252 elif stuff[0:1] == b'^':
1249 stuff = b'\\' + stuff
1253 stuff = b'\\' + stuff
1250 res = b'%s[%s]' % (res, stuff)
1254 res = b'%s[%s]' % (res, stuff)
1251 elif c == b'{':
1255 elif c == b'{':
1252 group += 1
1256 group += 1
1253 res += b'(?:'
1257 res += b'(?:'
1254 elif c == b'}' and group:
1258 elif c == b'}' and group:
1255 res += b')'
1259 res += b')'
1256 group -= 1
1260 group -= 1
1257 elif c == b',' and group:
1261 elif c == b',' and group:
1258 res += b'|'
1262 res += b'|'
1259 elif c == b'\\':
1263 elif c == b'\\':
1260 p = peek()
1264 p = peek()
1261 if p:
1265 if p:
1262 i += 1
1266 i += 1
1263 res += escape(p, p)
1267 res += escape(p, p)
1264 else:
1268 else:
1265 res += escape(c, c)
1269 res += escape(c, c)
1266 else:
1270 else:
1267 res += escape(c, c)
1271 res += escape(c, c)
1268 return res
1272 return res
1269
1273
1270
1274
1271 def _regex(kind, pat, globsuffix):
1275 def _regex(kind, pat, globsuffix):
1272 '''Convert a (normalized) pattern of any kind into a
1276 '''Convert a (normalized) pattern of any kind into a
1273 regular expression.
1277 regular expression.
1274 globsuffix is appended to the regexp of globs.'''
1278 globsuffix is appended to the regexp of globs.'''
1275
1279
1276 if rustmod is not None:
1280 if rustmod is not None:
1277 try:
1281 try:
1278 return rustmod.build_single_regex(kind, pat, globsuffix)
1282 return rustmod.build_single_regex(kind, pat, globsuffix)
1279 except rustmod.PatternError:
1283 except rustmod.PatternError:
1280 raise error.ProgrammingError(
1284 raise error.ProgrammingError(
1281 b'not a regex pattern: %s:%s' % (kind, pat)
1285 b'not a regex pattern: %s:%s' % (kind, pat)
1282 )
1286 )
1283
1287
1284 if not pat and kind in (b'glob', b'relpath'):
1288 if not pat and kind in (b'glob', b'relpath'):
1285 return b''
1289 return b''
1286 if kind == b're':
1290 if kind == b're':
1287 return pat
1291 return pat
1288 if kind in (b'path', b'relpath'):
1292 if kind in (b'path', b'relpath'):
1289 if pat == b'.':
1293 if pat == b'.':
1290 return b''
1294 return b''
1291 return util.stringutil.reescape(pat) + b'(?:/|$)'
1295 return util.stringutil.reescape(pat) + b'(?:/|$)'
1292 if kind == b'rootfilesin':
1296 if kind == b'rootfilesin':
1293 if pat == b'.':
1297 if pat == b'.':
1294 escaped = b''
1298 escaped = b''
1295 else:
1299 else:
1296 # Pattern is a directory name.
1300 # Pattern is a directory name.
1297 escaped = util.stringutil.reescape(pat) + b'/'
1301 escaped = util.stringutil.reescape(pat) + b'/'
1298 # Anything after the pattern must be a non-directory.
1302 # Anything after the pattern must be a non-directory.
1299 return escaped + b'[^/]+$'
1303 return escaped + b'[^/]+$'
1300 if kind == b'relglob':
1304 if kind == b'relglob':
1301 globre = _globre(pat)
1305 globre = _globre(pat)
1302 if globre.startswith(b'[^/]*'):
1306 if globre.startswith(b'[^/]*'):
1303 # When pat has the form *XYZ (common), make the returned regex more
1307 # When pat has the form *XYZ (common), make the returned regex more
1304 # legible by returning the regex for **XYZ instead of **/*XYZ.
1308 # legible by returning the regex for **XYZ instead of **/*XYZ.
1305 return b'.*' + globre[len(b'[^/]*') :] + globsuffix
1309 return b'.*' + globre[len(b'[^/]*') :] + globsuffix
1306 return b'(?:|.*/)' + globre + globsuffix
1310 return b'(?:|.*/)' + globre + globsuffix
1307 if kind == b'relre':
1311 if kind == b'relre':
1308 if pat.startswith(b'^'):
1312 if pat.startswith(b'^'):
1309 return pat
1313 return pat
1310 return b'.*' + pat
1314 return b'.*' + pat
1311 if kind in (b'glob', b'rootglob'):
1315 if kind in (b'glob', b'rootglob'):
1312 return _globre(pat) + globsuffix
1316 return _globre(pat) + globsuffix
1313 raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat))
1317 raise error.ProgrammingError(b'not a regex pattern: %s:%s' % (kind, pat))
1314
1318
1315
1319
1316 def _buildmatch(kindpats, globsuffix, root):
1320 def _buildmatch(kindpats, globsuffix, root):
1317 '''Return regexp string and a matcher function for kindpats.
1321 '''Return regexp string and a matcher function for kindpats.
1318 globsuffix is appended to the regexp of globs.'''
1322 globsuffix is appended to the regexp of globs.'''
1319 matchfuncs = []
1323 matchfuncs = []
1320
1324
1321 subincludes, kindpats = _expandsubinclude(kindpats, root)
1325 subincludes, kindpats = _expandsubinclude(kindpats, root)
1322 if subincludes:
1326 if subincludes:
1323 submatchers = {}
1327 submatchers = {}
1324
1328
1325 def matchsubinclude(f):
1329 def matchsubinclude(f):
1326 for prefix, matcherargs in subincludes:
1330 for prefix, matcherargs in subincludes:
1327 if f.startswith(prefix):
1331 if f.startswith(prefix):
1328 mf = submatchers.get(prefix)
1332 mf = submatchers.get(prefix)
1329 if mf is None:
1333 if mf is None:
1330 mf = match(*matcherargs)
1334 mf = match(*matcherargs)
1331 submatchers[prefix] = mf
1335 submatchers[prefix] = mf
1332
1336
1333 if mf(f[len(prefix) :]):
1337 if mf(f[len(prefix) :]):
1334 return True
1338 return True
1335 return False
1339 return False
1336
1340
1337 matchfuncs.append(matchsubinclude)
1341 matchfuncs.append(matchsubinclude)
1338
1342
1339 regex = b''
1343 regex = b''
1340 if kindpats:
1344 if kindpats:
1341 if all(k == b'rootfilesin' for k, p, s in kindpats):
1345 if all(k == b'rootfilesin' for k, p, s in kindpats):
1342 dirs = {p for k, p, s in kindpats}
1346 dirs = {p for k, p, s in kindpats}
1343
1347
1344 def mf(f):
1348 def mf(f):
1345 i = f.rfind(b'/')
1349 i = f.rfind(b'/')
1346 if i >= 0:
1350 if i >= 0:
1347 dir = f[:i]
1351 dir = f[:i]
1348 else:
1352 else:
1349 dir = b'.'
1353 dir = b'.'
1350 return dir in dirs
1354 return dir in dirs
1351
1355
1352 regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
1356 regex = b'rootfilesin: %s' % stringutil.pprint(list(sorted(dirs)))
1353 matchfuncs.append(mf)
1357 matchfuncs.append(mf)
1354 else:
1358 else:
1355 regex, mf = _buildregexmatch(kindpats, globsuffix)
1359 regex, mf = _buildregexmatch(kindpats, globsuffix)
1356 matchfuncs.append(mf)
1360 matchfuncs.append(mf)
1357
1361
1358 if len(matchfuncs) == 1:
1362 if len(matchfuncs) == 1:
1359 return regex, matchfuncs[0]
1363 return regex, matchfuncs[0]
1360 else:
1364 else:
1361 return regex, lambda f: any(mf(f) for mf in matchfuncs)
1365 return regex, lambda f: any(mf(f) for mf in matchfuncs)
1362
1366
1363
1367
1364 MAX_RE_SIZE = 20000
1368 MAX_RE_SIZE = 20000
1365
1369
1366
1370
1367 def _joinregexes(regexps):
1371 def _joinregexes(regexps):
1368 """gather multiple regular expressions into a single one"""
1372 """gather multiple regular expressions into a single one"""
1369 return b'|'.join(regexps)
1373 return b'|'.join(regexps)
1370
1374
1371
1375
1372 def _buildregexmatch(kindpats, globsuffix):
1376 def _buildregexmatch(kindpats, globsuffix):
1373 """Build a match function from a list of kinds and kindpats,
1377 """Build a match function from a list of kinds and kindpats,
1374 return regexp string and a matcher function.
1378 return regexp string and a matcher function.
1375
1379
1376 Test too large input
1380 Test too large input
1377 >>> _buildregexmatch([
1381 >>> _buildregexmatch([
1378 ... (b'relglob', b'?' * MAX_RE_SIZE, b'')
1382 ... (b'relglob', b'?' * MAX_RE_SIZE, b'')
1379 ... ], b'$')
1383 ... ], b'$')
1380 Traceback (most recent call last):
1384 Traceback (most recent call last):
1381 ...
1385 ...
1382 Abort: matcher pattern is too long (20009 bytes)
1386 Abort: matcher pattern is too long (20009 bytes)
1383 """
1387 """
1384 try:
1388 try:
1385 allgroups = []
1389 allgroups = []
1386 regexps = [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
1390 regexps = [_regex(k, p, globsuffix) for (k, p, s) in kindpats]
1387 fullregexp = _joinregexes(regexps)
1391 fullregexp = _joinregexes(regexps)
1388
1392
1389 startidx = 0
1393 startidx = 0
1390 groupsize = 0
1394 groupsize = 0
1391 for idx, r in enumerate(regexps):
1395 for idx, r in enumerate(regexps):
1392 piecesize = len(r)
1396 piecesize = len(r)
1393 if piecesize > MAX_RE_SIZE:
1397 if piecesize > MAX_RE_SIZE:
1394 msg = _(b"matcher pattern is too long (%d bytes)") % piecesize
1398 msg = _(b"matcher pattern is too long (%d bytes)") % piecesize
1395 raise error.Abort(msg)
1399 raise error.Abort(msg)
1396 elif (groupsize + piecesize) > MAX_RE_SIZE:
1400 elif (groupsize + piecesize) > MAX_RE_SIZE:
1397 group = regexps[startidx:idx]
1401 group = regexps[startidx:idx]
1398 allgroups.append(_joinregexes(group))
1402 allgroups.append(_joinregexes(group))
1399 startidx = idx
1403 startidx = idx
1400 groupsize = 0
1404 groupsize = 0
1401 groupsize += piecesize + 1
1405 groupsize += piecesize + 1
1402
1406
1403 if startidx == 0:
1407 if startidx == 0:
1404 matcher = _rematcher(fullregexp)
1408 matcher = _rematcher(fullregexp)
1405 func = lambda s: bool(matcher(s))
1409 func = lambda s: bool(matcher(s))
1406 else:
1410 else:
1407 group = regexps[startidx:]
1411 group = regexps[startidx:]
1408 allgroups.append(_joinregexes(group))
1412 allgroups.append(_joinregexes(group))
1409 allmatchers = [_rematcher(g) for g in allgroups]
1413 allmatchers = [_rematcher(g) for g in allgroups]
1410 func = lambda s: any(m(s) for m in allmatchers)
1414 func = lambda s: any(m(s) for m in allmatchers)
1411 return fullregexp, func
1415 return fullregexp, func
1412 except re.error:
1416 except re.error:
1413 for k, p, s in kindpats:
1417 for k, p, s in kindpats:
1414 try:
1418 try:
1415 _rematcher(_regex(k, p, globsuffix))
1419 _rematcher(_regex(k, p, globsuffix))
1416 except re.error:
1420 except re.error:
1417 if s:
1421 if s:
1418 raise error.Abort(
1422 raise error.Abort(
1419 _(b"%s: invalid pattern (%s): %s") % (s, k, p)
1423 _(b"%s: invalid pattern (%s): %s") % (s, k, p)
1420 )
1424 )
1421 else:
1425 else:
1422 raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p))
1426 raise error.Abort(_(b"invalid pattern (%s): %s") % (k, p))
1423 raise error.Abort(_(b"invalid pattern"))
1427 raise error.Abort(_(b"invalid pattern"))
1424
1428
1425
1429
1426 def _patternrootsanddirs(kindpats):
1430 def _patternrootsanddirs(kindpats):
1427 '''Returns roots and directories corresponding to each pattern.
1431 '''Returns roots and directories corresponding to each pattern.
1428
1432
1429 This calculates the roots and directories exactly matching the patterns and
1433 This calculates the roots and directories exactly matching the patterns and
1430 returns a tuple of (roots, dirs) for each. It does not return other
1434 returns a tuple of (roots, dirs) for each. It does not return other
1431 directories which may also need to be considered, like the parent
1435 directories which may also need to be considered, like the parent
1432 directories.
1436 directories.
1433 '''
1437 '''
1434 r = []
1438 r = []
1435 d = []
1439 d = []
1436 for kind, pat, source in kindpats:
1440 for kind, pat, source in kindpats:
1437 if kind in (b'glob', b'rootglob'): # find the non-glob prefix
1441 if kind in (b'glob', b'rootglob'): # find the non-glob prefix
1438 root = []
1442 root = []
1439 for p in pat.split(b'/'):
1443 for p in pat.split(b'/'):
1440 if b'[' in p or b'{' in p or b'*' in p or b'?' in p:
1444 if b'[' in p or b'{' in p or b'*' in p or b'?' in p:
1441 break
1445 break
1442 root.append(p)
1446 root.append(p)
1443 r.append(b'/'.join(root))
1447 r.append(b'/'.join(root))
1444 elif kind in (b'relpath', b'path'):
1448 elif kind in (b'relpath', b'path'):
1445 if pat == b'.':
1449 if pat == b'.':
1446 pat = b''
1450 pat = b''
1447 r.append(pat)
1451 r.append(pat)
1448 elif kind in (b'rootfilesin',):
1452 elif kind in (b'rootfilesin',):
1449 if pat == b'.':
1453 if pat == b'.':
1450 pat = b''
1454 pat = b''
1451 d.append(pat)
1455 d.append(pat)
1452 else: # relglob, re, relre
1456 else: # relglob, re, relre
1453 r.append(b'')
1457 r.append(b'')
1454 return r, d
1458 return r, d
1455
1459
1456
1460
1457 def _roots(kindpats):
1461 def _roots(kindpats):
1458 '''Returns root directories to match recursively from the given patterns.'''
1462 '''Returns root directories to match recursively from the given patterns.'''
1459 roots, dirs = _patternrootsanddirs(kindpats)
1463 roots, dirs = _patternrootsanddirs(kindpats)
1460 return roots
1464 return roots
1461
1465
1462
1466
1463 def _rootsdirsandparents(kindpats):
1467 def _rootsdirsandparents(kindpats):
1464 '''Returns roots and exact directories from patterns.
1468 '''Returns roots and exact directories from patterns.
1465
1469
1466 `roots` are directories to match recursively, `dirs` should
1470 `roots` are directories to match recursively, `dirs` should
1467 be matched non-recursively, and `parents` are the implicitly required
1471 be matched non-recursively, and `parents` are the implicitly required
1468 directories to walk to items in either roots or dirs.
1472 directories to walk to items in either roots or dirs.
1469
1473
1470 Returns a tuple of (roots, dirs, parents).
1474 Returns a tuple of (roots, dirs, parents).
1471
1475
1472 >>> r = _rootsdirsandparents(
1476 >>> r = _rootsdirsandparents(
1473 ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
1477 ... [(b'glob', b'g/h/*', b''), (b'glob', b'g/h', b''),
1474 ... (b'glob', b'g*', b'')])
1478 ... (b'glob', b'g*', b'')])
1475 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1479 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1476 (['g/h', 'g/h', ''], []) ['', 'g']
1480 (['g/h', 'g/h', ''], []) ['', 'g']
1477 >>> r = _rootsdirsandparents(
1481 >>> r = _rootsdirsandparents(
1478 ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
1482 ... [(b'rootfilesin', b'g/h', b''), (b'rootfilesin', b'', b'')])
1479 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1483 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1480 ([], ['g/h', '']) ['', 'g']
1484 ([], ['g/h', '']) ['', 'g']
1481 >>> r = _rootsdirsandparents(
1485 >>> r = _rootsdirsandparents(
1482 ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
1486 ... [(b'relpath', b'r', b''), (b'path', b'p/p', b''),
1483 ... (b'path', b'', b'')])
1487 ... (b'path', b'', b'')])
1484 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1488 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1485 (['r', 'p/p', ''], []) ['', 'p']
1489 (['r', 'p/p', ''], []) ['', 'p']
1486 >>> r = _rootsdirsandparents(
1490 >>> r = _rootsdirsandparents(
1487 ... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
1491 ... [(b'relglob', b'rg*', b''), (b're', b're/', b''),
1488 ... (b'relre', b'rr', b'')])
1492 ... (b'relre', b'rr', b'')])
1489 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1493 >>> print(r[0:2], sorted(r[2])) # the set has an unstable output
1490 (['', '', ''], []) ['']
1494 (['', '', ''], []) ['']
1491 '''
1495 '''
1492 r, d = _patternrootsanddirs(kindpats)
1496 r, d = _patternrootsanddirs(kindpats)
1493
1497
1494 p = set()
1498 p = set()
1495 # Add the parents as non-recursive/exact directories, since they must be
1499 # Add the parents as non-recursive/exact directories, since they must be
1496 # scanned to get to either the roots or the other exact directories.
1500 # scanned to get to either the roots or the other exact directories.
1497 p.update(pathutil.dirs(d))
1501 p.update(pathutil.dirs(d))
1498 p.update(pathutil.dirs(r))
1502 p.update(pathutil.dirs(r))
1499
1503
1500 # FIXME: all uses of this function convert these to sets, do so before
1504 # FIXME: all uses of this function convert these to sets, do so before
1501 # returning.
1505 # returning.
1502 # FIXME: all uses of this function do not need anything in 'roots' and
1506 # FIXME: all uses of this function do not need anything in 'roots' and
1503 # 'dirs' to also be in 'parents', consider removing them before returning.
1507 # 'dirs' to also be in 'parents', consider removing them before returning.
1504 return r, d, p
1508 return r, d, p
1505
1509
1506
1510
1507 def _explicitfiles(kindpats):
1511 def _explicitfiles(kindpats):
1508 '''Returns the potential explicit filenames from the patterns.
1512 '''Returns the potential explicit filenames from the patterns.
1509
1513
1510 >>> _explicitfiles([(b'path', b'foo/bar', b'')])
1514 >>> _explicitfiles([(b'path', b'foo/bar', b'')])
1511 ['foo/bar']
1515 ['foo/bar']
1512 >>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
1516 >>> _explicitfiles([(b'rootfilesin', b'foo/bar', b'')])
1513 []
1517 []
1514 '''
1518 '''
1515 # Keep only the pattern kinds where one can specify filenames (vs only
1519 # Keep only the pattern kinds where one can specify filenames (vs only
1516 # directory names).
1520 # directory names).
1517 filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)]
1521 filable = [kp for kp in kindpats if kp[0] not in (b'rootfilesin',)]
1518 return _roots(filable)
1522 return _roots(filable)
1519
1523
1520
1524
1521 def _prefix(kindpats):
1525 def _prefix(kindpats):
1522 '''Whether all the patterns match a prefix (i.e. recursively)'''
1526 '''Whether all the patterns match a prefix (i.e. recursively)'''
1523 for kind, pat, source in kindpats:
1527 for kind, pat, source in kindpats:
1524 if kind not in (b'path', b'relpath'):
1528 if kind not in (b'path', b'relpath'):
1525 return False
1529 return False
1526 return True
1530 return True
1527
1531
1528
1532
1529 _commentre = None
1533 _commentre = None
1530
1534
1531
1535
1532 def readpatternfile(filepath, warn, sourceinfo=False):
1536 def readpatternfile(filepath, warn, sourceinfo=False):
1533 '''parse a pattern file, returning a list of
1537 '''parse a pattern file, returning a list of
1534 patterns. These patterns should be given to compile()
1538 patterns. These patterns should be given to compile()
1535 to be validated and converted into a match function.
1539 to be validated and converted into a match function.
1536
1540
1537 trailing white space is dropped.
1541 trailing white space is dropped.
1538 the escape character is backslash.
1542 the escape character is backslash.
1539 comments start with #.
1543 comments start with #.
1540 empty lines are skipped.
1544 empty lines are skipped.
1541
1545
1542 lines can be of the following formats:
1546 lines can be of the following formats:
1543
1547
1544 syntax: regexp # defaults following lines to non-rooted regexps
1548 syntax: regexp # defaults following lines to non-rooted regexps
1545 syntax: glob # defaults following lines to non-rooted globs
1549 syntax: glob # defaults following lines to non-rooted globs
1546 re:pattern # non-rooted regular expression
1550 re:pattern # non-rooted regular expression
1547 glob:pattern # non-rooted glob
1551 glob:pattern # non-rooted glob
1548 rootglob:pat # rooted glob (same root as ^ in regexps)
1552 rootglob:pat # rooted glob (same root as ^ in regexps)
1549 pattern # pattern of the current default type
1553 pattern # pattern of the current default type
1550
1554
1551 if sourceinfo is set, returns a list of tuples:
1555 if sourceinfo is set, returns a list of tuples:
1552 (pattern, lineno, originalline).
1556 (pattern, lineno, originalline).
1553 This is useful to debug ignore patterns.
1557 This is useful to debug ignore patterns.
1554 '''
1558 '''
1555
1559
1556 if rustmod is not None:
1560 if rustmod is not None:
1557 result, warnings = rustmod.read_pattern_file(
1561 result, warnings = rustmod.read_pattern_file(
1558 filepath, bool(warn), sourceinfo,
1562 filepath, bool(warn), sourceinfo,
1559 )
1563 )
1560
1564
1561 for warning_params in warnings:
1565 for warning_params in warnings:
1562 # Can't be easily emitted from Rust, because it would require
1566 # Can't be easily emitted from Rust, because it would require
1563 # a mechanism for both gettext and calling the `warn` function.
1567 # a mechanism for both gettext and calling the `warn` function.
1564 warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
1568 warn(_(b"%s: ignoring invalid syntax '%s'\n") % warning_params)
1565
1569
1566 return result
1570 return result
1567
1571
1568 syntaxes = {
1572 syntaxes = {
1569 b're': b'relre:',
1573 b're': b'relre:',
1570 b'regexp': b'relre:',
1574 b'regexp': b'relre:',
1571 b'glob': b'relglob:',
1575 b'glob': b'relglob:',
1572 b'rootglob': b'rootglob:',
1576 b'rootglob': b'rootglob:',
1573 b'include': b'include',
1577 b'include': b'include',
1574 b'subinclude': b'subinclude',
1578 b'subinclude': b'subinclude',
1575 }
1579 }
1576 syntax = b'relre:'
1580 syntax = b'relre:'
1577 patterns = []
1581 patterns = []
1578
1582
1579 fp = open(filepath, b'rb')
1583 fp = open(filepath, b'rb')
1580 for lineno, line in enumerate(util.iterfile(fp), start=1):
1584 for lineno, line in enumerate(util.iterfile(fp), start=1):
1581 if b"#" in line:
1585 if b"#" in line:
1582 global _commentre
1586 global _commentre
1583 if not _commentre:
1587 if not _commentre:
1584 _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
1588 _commentre = util.re.compile(br'((?:^|[^\\])(?:\\\\)*)#.*')
1585 # remove comments prefixed by an even number of escapes
1589 # remove comments prefixed by an even number of escapes
1586 m = _commentre.search(line)
1590 m = _commentre.search(line)
1587 if m:
1591 if m:
1588 line = line[: m.end(1)]
1592 line = line[: m.end(1)]
1589 # fixup properly escaped comments that survived the above
1593 # fixup properly escaped comments that survived the above
1590 line = line.replace(b"\\#", b"#")
1594 line = line.replace(b"\\#", b"#")
1591 line = line.rstrip()
1595 line = line.rstrip()
1592 if not line:
1596 if not line:
1593 continue
1597 continue
1594
1598
1595 if line.startswith(b'syntax:'):
1599 if line.startswith(b'syntax:'):
1596 s = line[7:].strip()
1600 s = line[7:].strip()
1597 try:
1601 try:
1598 syntax = syntaxes[s]
1602 syntax = syntaxes[s]
1599 except KeyError:
1603 except KeyError:
1600 if warn:
1604 if warn:
1601 warn(
1605 warn(
1602 _(b"%s: ignoring invalid syntax '%s'\n") % (filepath, s)
1606 _(b"%s: ignoring invalid syntax '%s'\n") % (filepath, s)
1603 )
1607 )
1604 continue
1608 continue
1605
1609
1606 linesyntax = syntax
1610 linesyntax = syntax
1607 for s, rels in pycompat.iteritems(syntaxes):
1611 for s, rels in pycompat.iteritems(syntaxes):
1608 if line.startswith(rels):
1612 if line.startswith(rels):
1609 linesyntax = rels
1613 linesyntax = rels
1610 line = line[len(rels) :]
1614 line = line[len(rels) :]
1611 break
1615 break
1612 elif line.startswith(s + b':'):
1616 elif line.startswith(s + b':'):
1613 linesyntax = rels
1617 linesyntax = rels
1614 line = line[len(s) + 1 :]
1618 line = line[len(s) + 1 :]
1615 break
1619 break
1616 if sourceinfo:
1620 if sourceinfo:
1617 patterns.append((linesyntax + line, lineno, line))
1621 patterns.append((linesyntax + line, lineno, line))
1618 else:
1622 else:
1619 patterns.append(linesyntax + line)
1623 patterns.append(linesyntax + line)
1620 fp.close()
1624 fp.close()
1621 return patterns
1625 return patterns
@@ -1,2709 +1,2710 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import hashlib
11 import hashlib
12 import shutil
12 import shutil
13 import stat
13 import stat
14 import struct
14 import struct
15
15
16 from .i18n import _
16 from .i18n import _
17 from .node import (
17 from .node import (
18 addednodeid,
18 addednodeid,
19 bin,
19 bin,
20 hex,
20 hex,
21 modifiednodeid,
21 modifiednodeid,
22 nullhex,
22 nullhex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 )
25 )
26 from .pycompat import delattr
26 from .pycompat import delattr
27 from .thirdparty import attr
27 from .thirdparty import attr
28 from . import (
28 from . import (
29 copies,
29 copies,
30 encoding,
30 encoding,
31 error,
31 error,
32 filemerge,
32 filemerge,
33 match as matchmod,
33 match as matchmod,
34 obsutil,
34 obsutil,
35 pathutil,
35 pycompat,
36 pycompat,
36 scmutil,
37 scmutil,
37 subrepoutil,
38 subrepoutil,
38 util,
39 util,
39 worker,
40 worker,
40 )
41 )
41
42
42 _pack = struct.pack
43 _pack = struct.pack
43 _unpack = struct.unpack
44 _unpack = struct.unpack
44
45
45
46
46 def _droponode(data):
47 def _droponode(data):
47 # used for compatibility for v1
48 # used for compatibility for v1
48 bits = data.split(b'\0')
49 bits = data.split(b'\0')
49 bits = bits[:-2] + bits[-1:]
50 bits = bits[:-2] + bits[-1:]
50 return b'\0'.join(bits)
51 return b'\0'.join(bits)
51
52
52
53
53 # Merge state record types. See ``mergestate`` docs for more.
54 # Merge state record types. See ``mergestate`` docs for more.
54 RECORD_LOCAL = b'L'
55 RECORD_LOCAL = b'L'
55 RECORD_OTHER = b'O'
56 RECORD_OTHER = b'O'
56 RECORD_MERGED = b'F'
57 RECORD_MERGED = b'F'
57 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_CHANGEDELETE_CONFLICT = b'C'
58 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_MERGE_DRIVER_MERGE = b'D'
59 RECORD_PATH_CONFLICT = b'P'
60 RECORD_PATH_CONFLICT = b'P'
60 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_MERGE_DRIVER_STATE = b'm'
61 RECORD_FILE_VALUES = b'f'
62 RECORD_FILE_VALUES = b'f'
62 RECORD_LABELS = b'l'
63 RECORD_LABELS = b'l'
63 RECORD_OVERRIDE = b't'
64 RECORD_OVERRIDE = b't'
64 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_MANDATORY = b'X'
65 RECORD_UNSUPPORTED_ADVISORY = b'x'
66 RECORD_UNSUPPORTED_ADVISORY = b'x'
66
67
67 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_UNMARKED = b'u'
68 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_MARKED = b'm'
69 MERGE_DRIVER_STATE_SUCCESS = b's'
70 MERGE_DRIVER_STATE_SUCCESS = b's'
70
71
71 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_UNRESOLVED = b'u'
72 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_RESOLVED = b'r'
73 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_UNRESOLVED_PATH = b'pu'
74 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_RESOLVED_PATH = b'pr'
75 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76 MERGE_RECORD_DRIVER_RESOLVED = b'd'
76
77
77 ACTION_FORGET = b'f'
78 ACTION_FORGET = b'f'
78 ACTION_REMOVE = b'r'
79 ACTION_REMOVE = b'r'
79 ACTION_ADD = b'a'
80 ACTION_ADD = b'a'
80 ACTION_GET = b'g'
81 ACTION_GET = b'g'
81 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT = b'p'
82 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_PATH_CONFLICT_RESOLVE = b'pr'
83 ACTION_ADD_MODIFIED = b'am'
84 ACTION_ADD_MODIFIED = b'am'
84 ACTION_CREATED = b'c'
85 ACTION_CREATED = b'c'
85 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_DELETED_CHANGED = b'dc'
86 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_CHANGED_DELETED = b'cd'
87 ACTION_MERGE = b'm'
88 ACTION_MERGE = b'm'
88 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_LOCAL_DIR_RENAME_GET = b'dg'
89 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_DIR_RENAME_MOVE_LOCAL = b'dm'
90 ACTION_KEEP = b'k'
91 ACTION_KEEP = b'k'
91 ACTION_EXEC = b'e'
92 ACTION_EXEC = b'e'
92 ACTION_CREATED_MERGE = b'cm'
93 ACTION_CREATED_MERGE = b'cm'
93
94
94
95
95 class mergestate(object):
96 class mergestate(object):
96 '''track 3-way merge state of individual files
97 '''track 3-way merge state of individual files
97
98
98 The merge state is stored on disk when needed. Two files are used: one with
99 The merge state is stored on disk when needed. Two files are used: one with
99 an old format (version 1), and one with a new format (version 2). Version 2
100 an old format (version 1), and one with a new format (version 2). Version 2
100 stores a superset of the data in version 1, including new kinds of records
101 stores a superset of the data in version 1, including new kinds of records
101 in the future. For more about the new format, see the documentation for
102 in the future. For more about the new format, see the documentation for
102 `_readrecordsv2`.
103 `_readrecordsv2`.
103
104
104 Each record can contain arbitrary content, and has an associated type. This
105 Each record can contain arbitrary content, and has an associated type. This
105 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 `type` should be a letter. If `type` is uppercase, the record is mandatory:
106 versions of Mercurial that don't support it should abort. If `type` is
107 versions of Mercurial that don't support it should abort. If `type` is
107 lowercase, the record can be safely ignored.
108 lowercase, the record can be safely ignored.
108
109
109 Currently known records:
110 Currently known records:
110
111
111 L: the node of the "local" part of the merge (hexified version)
112 L: the node of the "local" part of the merge (hexified version)
112 O: the node of the "other" part of the merge (hexified version)
113 O: the node of the "other" part of the merge (hexified version)
113 F: a file to be merged entry
114 F: a file to be merged entry
114 C: a change/delete or delete/change conflict
115 C: a change/delete or delete/change conflict
115 D: a file that the external merge driver will merge internally
116 D: a file that the external merge driver will merge internally
116 (experimental)
117 (experimental)
117 P: a path conflict (file vs directory)
118 P: a path conflict (file vs directory)
118 m: the external merge driver defined for this merge plus its run state
119 m: the external merge driver defined for this merge plus its run state
119 (experimental)
120 (experimental)
120 f: a (filename, dictionary) tuple of optional values for a given file
121 f: a (filename, dictionary) tuple of optional values for a given file
121 X: unsupported mandatory record type (used in tests)
122 X: unsupported mandatory record type (used in tests)
122 x: unsupported advisory record type (used in tests)
123 x: unsupported advisory record type (used in tests)
123 l: the labels for the parts of the merge.
124 l: the labels for the parts of the merge.
124
125
125 Merge driver run states (experimental):
126 Merge driver run states (experimental):
126 u: driver-resolved files unmarked -- needs to be run next time we're about
127 u: driver-resolved files unmarked -- needs to be run next time we're about
127 to resolve or commit
128 to resolve or commit
128 m: driver-resolved files marked -- only needs to be run before commit
129 m: driver-resolved files marked -- only needs to be run before commit
129 s: success/skipped -- does not need to be run any more
130 s: success/skipped -- does not need to be run any more
130
131
131 Merge record states (stored in self._state, indexed by filename):
132 Merge record states (stored in self._state, indexed by filename):
132 u: unresolved conflict
133 u: unresolved conflict
133 r: resolved conflict
134 r: resolved conflict
134 pu: unresolved path conflict (file conflicts with directory)
135 pu: unresolved path conflict (file conflicts with directory)
135 pr: resolved path conflict
136 pr: resolved path conflict
136 d: driver-resolved conflict
137 d: driver-resolved conflict
137
138
138 The resolve command transitions between 'u' and 'r' for conflicts and
139 The resolve command transitions between 'u' and 'r' for conflicts and
139 'pu' and 'pr' for path conflicts.
140 'pu' and 'pr' for path conflicts.
140 '''
141 '''
141
142
142 statepathv1 = b'merge/state'
143 statepathv1 = b'merge/state'
143 statepathv2 = b'merge/state2'
144 statepathv2 = b'merge/state2'
144
145
145 @staticmethod
146 @staticmethod
146 def clean(repo, node=None, other=None, labels=None):
147 def clean(repo, node=None, other=None, labels=None):
147 """Initialize a brand new merge state, removing any existing state on
148 """Initialize a brand new merge state, removing any existing state on
148 disk."""
149 disk."""
149 ms = mergestate(repo)
150 ms = mergestate(repo)
150 ms.reset(node, other, labels)
151 ms.reset(node, other, labels)
151 return ms
152 return ms
152
153
153 @staticmethod
154 @staticmethod
154 def read(repo):
155 def read(repo):
155 """Initialize the merge state, reading it from disk."""
156 """Initialize the merge state, reading it from disk."""
156 ms = mergestate(repo)
157 ms = mergestate(repo)
157 ms._read()
158 ms._read()
158 return ms
159 return ms
159
160
160 def __init__(self, repo):
161 def __init__(self, repo):
161 """Initialize the merge state.
162 """Initialize the merge state.
162
163
163 Do not use this directly! Instead call read() or clean()."""
164 Do not use this directly! Instead call read() or clean()."""
164 self._repo = repo
165 self._repo = repo
165 self._dirty = False
166 self._dirty = False
166 self._labels = None
167 self._labels = None
167
168
168 def reset(self, node=None, other=None, labels=None):
169 def reset(self, node=None, other=None, labels=None):
169 self._state = {}
170 self._state = {}
170 self._stateextras = {}
171 self._stateextras = {}
171 self._local = None
172 self._local = None
172 self._other = None
173 self._other = None
173 self._labels = labels
174 self._labels = labels
174 for var in ('localctx', 'otherctx'):
175 for var in ('localctx', 'otherctx'):
175 if var in vars(self):
176 if var in vars(self):
176 delattr(self, var)
177 delattr(self, var)
177 if node:
178 if node:
178 self._local = node
179 self._local = node
179 self._other = other
180 self._other = other
180 self._readmergedriver = None
181 self._readmergedriver = None
181 if self.mergedriver:
182 if self.mergedriver:
182 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
183 else:
184 else:
184 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 self._mdstate = MERGE_DRIVER_STATE_UNMARKED
185 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 shutil.rmtree(self._repo.vfs.join(b'merge'), True)
186 self._results = {}
187 self._results = {}
187 self._dirty = False
188 self._dirty = False
188
189
189 def _read(self):
190 def _read(self):
190 """Analyse each record content to restore a serialized state from disk
191 """Analyse each record content to restore a serialized state from disk
191
192
192 This function process "record" entry produced by the de-serialization
193 This function process "record" entry produced by the de-serialization
193 of on disk file.
194 of on disk file.
194 """
195 """
195 self._state = {}
196 self._state = {}
196 self._stateextras = {}
197 self._stateextras = {}
197 self._local = None
198 self._local = None
198 self._other = None
199 self._other = None
199 for var in ('localctx', 'otherctx'):
200 for var in ('localctx', 'otherctx'):
200 if var in vars(self):
201 if var in vars(self):
201 delattr(self, var)
202 delattr(self, var)
202 self._readmergedriver = None
203 self._readmergedriver = None
203 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 self._mdstate = MERGE_DRIVER_STATE_SUCCESS
204 unsupported = set()
205 unsupported = set()
205 records = self._readrecords()
206 records = self._readrecords()
206 for rtype, record in records:
207 for rtype, record in records:
207 if rtype == RECORD_LOCAL:
208 if rtype == RECORD_LOCAL:
208 self._local = bin(record)
209 self._local = bin(record)
209 elif rtype == RECORD_OTHER:
210 elif rtype == RECORD_OTHER:
210 self._other = bin(record)
211 self._other = bin(record)
211 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 elif rtype == RECORD_MERGE_DRIVER_STATE:
212 bits = record.split(b'\0', 1)
213 bits = record.split(b'\0', 1)
213 mdstate = bits[1]
214 mdstate = bits[1]
214 if len(mdstate) != 1 or mdstate not in (
215 if len(mdstate) != 1 or mdstate not in (
215 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_UNMARKED,
216 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_MARKED,
217 MERGE_DRIVER_STATE_SUCCESS,
218 MERGE_DRIVER_STATE_SUCCESS,
218 ):
219 ):
219 # the merge driver should be idempotent, so just rerun it
220 # the merge driver should be idempotent, so just rerun it
220 mdstate = MERGE_DRIVER_STATE_UNMARKED
221 mdstate = MERGE_DRIVER_STATE_UNMARKED
221
222
222 self._readmergedriver = bits[0]
223 self._readmergedriver = bits[0]
223 self._mdstate = mdstate
224 self._mdstate = mdstate
224 elif rtype in (
225 elif rtype in (
225 RECORD_MERGED,
226 RECORD_MERGED,
226 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_CHANGEDELETE_CONFLICT,
227 RECORD_PATH_CONFLICT,
228 RECORD_PATH_CONFLICT,
228 RECORD_MERGE_DRIVER_MERGE,
229 RECORD_MERGE_DRIVER_MERGE,
229 ):
230 ):
230 bits = record.split(b'\0')
231 bits = record.split(b'\0')
231 self._state[bits[0]] = bits[1:]
232 self._state[bits[0]] = bits[1:]
232 elif rtype == RECORD_FILE_VALUES:
233 elif rtype == RECORD_FILE_VALUES:
233 filename, rawextras = record.split(b'\0', 1)
234 filename, rawextras = record.split(b'\0', 1)
234 extraparts = rawextras.split(b'\0')
235 extraparts = rawextras.split(b'\0')
235 extras = {}
236 extras = {}
236 i = 0
237 i = 0
237 while i < len(extraparts):
238 while i < len(extraparts):
238 extras[extraparts[i]] = extraparts[i + 1]
239 extras[extraparts[i]] = extraparts[i + 1]
239 i += 2
240 i += 2
240
241
241 self._stateextras[filename] = extras
242 self._stateextras[filename] = extras
242 elif rtype == RECORD_LABELS:
243 elif rtype == RECORD_LABELS:
243 labels = record.split(b'\0', 2)
244 labels = record.split(b'\0', 2)
244 self._labels = [l for l in labels if len(l) > 0]
245 self._labels = [l for l in labels if len(l) > 0]
245 elif not rtype.islower():
246 elif not rtype.islower():
246 unsupported.add(rtype)
247 unsupported.add(rtype)
247 self._results = {}
248 self._results = {}
248 self._dirty = False
249 self._dirty = False
249
250
250 if unsupported:
251 if unsupported:
251 raise error.UnsupportedMergeRecords(unsupported)
252 raise error.UnsupportedMergeRecords(unsupported)
252
253
253 def _readrecords(self):
254 def _readrecords(self):
254 """Read merge state from disk and return a list of record (TYPE, data)
255 """Read merge state from disk and return a list of record (TYPE, data)
255
256
256 We read data from both v1 and v2 files and decide which one to use.
257 We read data from both v1 and v2 files and decide which one to use.
257
258
258 V1 has been used by version prior to 2.9.1 and contains less data than
259 V1 has been used by version prior to 2.9.1 and contains less data than
259 v2. We read both versions and check if no data in v2 contradicts
260 v2. We read both versions and check if no data in v2 contradicts
260 v1. If there is not contradiction we can safely assume that both v1
261 v1. If there is not contradiction we can safely assume that both v1
261 and v2 were written at the same time and use the extract data in v2. If
262 and v2 were written at the same time and use the extract data in v2. If
262 there is contradiction we ignore v2 content as we assume an old version
263 there is contradiction we ignore v2 content as we assume an old version
263 of Mercurial has overwritten the mergestate file and left an old v2
264 of Mercurial has overwritten the mergestate file and left an old v2
264 file around.
265 file around.
265
266
266 returns list of record [(TYPE, data), ...]"""
267 returns list of record [(TYPE, data), ...]"""
267 v1records = self._readrecordsv1()
268 v1records = self._readrecordsv1()
268 v2records = self._readrecordsv2()
269 v2records = self._readrecordsv2()
269 if self._v1v2match(v1records, v2records):
270 if self._v1v2match(v1records, v2records):
270 return v2records
271 return v2records
271 else:
272 else:
272 # v1 file is newer than v2 file, use it
273 # v1 file is newer than v2 file, use it
273 # we have to infer the "other" changeset of the merge
274 # we have to infer the "other" changeset of the merge
274 # we cannot do better than that with v1 of the format
275 # we cannot do better than that with v1 of the format
275 mctx = self._repo[None].parents()[-1]
276 mctx = self._repo[None].parents()[-1]
276 v1records.append((RECORD_OTHER, mctx.hex()))
277 v1records.append((RECORD_OTHER, mctx.hex()))
277 # add place holder "other" file node information
278 # add place holder "other" file node information
278 # nobody is using it yet so we do no need to fetch the data
279 # nobody is using it yet so we do no need to fetch the data
279 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 # if mctx was wrong `mctx[bits[-2]]` may fails.
280 for idx, r in enumerate(v1records):
281 for idx, r in enumerate(v1records):
281 if r[0] == RECORD_MERGED:
282 if r[0] == RECORD_MERGED:
282 bits = r[1].split(b'\0')
283 bits = r[1].split(b'\0')
283 bits.insert(-2, b'')
284 bits.insert(-2, b'')
284 v1records[idx] = (r[0], b'\0'.join(bits))
285 v1records[idx] = (r[0], b'\0'.join(bits))
285 return v1records
286 return v1records
286
287
287 def _v1v2match(self, v1records, v2records):
288 def _v1v2match(self, v1records, v2records):
288 oldv2 = set() # old format version of v2 record
289 oldv2 = set() # old format version of v2 record
289 for rec in v2records:
290 for rec in v2records:
290 if rec[0] == RECORD_LOCAL:
291 if rec[0] == RECORD_LOCAL:
291 oldv2.add(rec)
292 oldv2.add(rec)
292 elif rec[0] == RECORD_MERGED:
293 elif rec[0] == RECORD_MERGED:
293 # drop the onode data (not contained in v1)
294 # drop the onode data (not contained in v1)
294 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 oldv2.add((RECORD_MERGED, _droponode(rec[1])))
295 for rec in v1records:
296 for rec in v1records:
296 if rec not in oldv2:
297 if rec not in oldv2:
297 return False
298 return False
298 else:
299 else:
299 return True
300 return True
300
301
301 def _readrecordsv1(self):
302 def _readrecordsv1(self):
302 """read on disk merge state for version 1 file
303 """read on disk merge state for version 1 file
303
304
304 returns list of record [(TYPE, data), ...]
305 returns list of record [(TYPE, data), ...]
305
306
306 Note: the "F" data from this file are one entry short
307 Note: the "F" data from this file are one entry short
307 (no "other file node" entry)
308 (no "other file node" entry)
308 """
309 """
309 records = []
310 records = []
310 try:
311 try:
311 f = self._repo.vfs(self.statepathv1)
312 f = self._repo.vfs(self.statepathv1)
312 for i, l in enumerate(f):
313 for i, l in enumerate(f):
313 if i == 0:
314 if i == 0:
314 records.append((RECORD_LOCAL, l[:-1]))
315 records.append((RECORD_LOCAL, l[:-1]))
315 else:
316 else:
316 records.append((RECORD_MERGED, l[:-1]))
317 records.append((RECORD_MERGED, l[:-1]))
317 f.close()
318 f.close()
318 except IOError as err:
319 except IOError as err:
319 if err.errno != errno.ENOENT:
320 if err.errno != errno.ENOENT:
320 raise
321 raise
321 return records
322 return records
322
323
323 def _readrecordsv2(self):
324 def _readrecordsv2(self):
324 """read on disk merge state for version 2 file
325 """read on disk merge state for version 2 file
325
326
326 This format is a list of arbitrary records of the form:
327 This format is a list of arbitrary records of the form:
327
328
328 [type][length][content]
329 [type][length][content]
329
330
330 `type` is a single character, `length` is a 4 byte integer, and
331 `type` is a single character, `length` is a 4 byte integer, and
331 `content` is an arbitrary byte sequence of length `length`.
332 `content` is an arbitrary byte sequence of length `length`.
332
333
333 Mercurial versions prior to 3.7 have a bug where if there are
334 Mercurial versions prior to 3.7 have a bug where if there are
334 unsupported mandatory merge records, attempting to clear out the merge
335 unsupported mandatory merge records, attempting to clear out the merge
335 state with hg update --clean or similar aborts. The 't' record type
336 state with hg update --clean or similar aborts. The 't' record type
336 works around that by writing out what those versions treat as an
337 works around that by writing out what those versions treat as an
337 advisory record, but later versions interpret as special: the first
338 advisory record, but later versions interpret as special: the first
338 character is the 'real' record type and everything onwards is the data.
339 character is the 'real' record type and everything onwards is the data.
339
340
340 Returns list of records [(TYPE, data), ...]."""
341 Returns list of records [(TYPE, data), ...]."""
341 records = []
342 records = []
342 try:
343 try:
343 f = self._repo.vfs(self.statepathv2)
344 f = self._repo.vfs(self.statepathv2)
344 data = f.read()
345 data = f.read()
345 off = 0
346 off = 0
346 end = len(data)
347 end = len(data)
347 while off < end:
348 while off < end:
348 rtype = data[off : off + 1]
349 rtype = data[off : off + 1]
349 off += 1
350 off += 1
350 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 length = _unpack(b'>I', data[off : (off + 4)])[0]
351 off += 4
352 off += 4
352 record = data[off : (off + length)]
353 record = data[off : (off + length)]
353 off += length
354 off += length
354 if rtype == RECORD_OVERRIDE:
355 if rtype == RECORD_OVERRIDE:
355 rtype, record = record[0:1], record[1:]
356 rtype, record = record[0:1], record[1:]
356 records.append((rtype, record))
357 records.append((rtype, record))
357 f.close()
358 f.close()
358 except IOError as err:
359 except IOError as err:
359 if err.errno != errno.ENOENT:
360 if err.errno != errno.ENOENT:
360 raise
361 raise
361 return records
362 return records
362
363
363 @util.propertycache
364 @util.propertycache
364 def mergedriver(self):
365 def mergedriver(self):
365 # protect against the following:
366 # protect against the following:
366 # - A configures a malicious merge driver in their hgrc, then
367 # - A configures a malicious merge driver in their hgrc, then
367 # pauses the merge
368 # pauses the merge
368 # - A edits their hgrc to remove references to the merge driver
369 # - A edits their hgrc to remove references to the merge driver
369 # - A gives a copy of their entire repo, including .hg, to B
370 # - A gives a copy of their entire repo, including .hg, to B
370 # - B inspects .hgrc and finds it to be clean
371 # - B inspects .hgrc and finds it to be clean
371 # - B then continues the merge and the malicious merge driver
372 # - B then continues the merge and the malicious merge driver
372 # gets invoked
373 # gets invoked
373 configmergedriver = self._repo.ui.config(
374 configmergedriver = self._repo.ui.config(
374 b'experimental', b'mergedriver'
375 b'experimental', b'mergedriver'
375 )
376 )
376 if (
377 if (
377 self._readmergedriver is not None
378 self._readmergedriver is not None
378 and self._readmergedriver != configmergedriver
379 and self._readmergedriver != configmergedriver
379 ):
380 ):
380 raise error.ConfigError(
381 raise error.ConfigError(
381 _(b"merge driver changed since merge started"),
382 _(b"merge driver changed since merge started"),
382 hint=_(b"revert merge driver change or abort merge"),
383 hint=_(b"revert merge driver change or abort merge"),
383 )
384 )
384
385
385 return configmergedriver
386 return configmergedriver
386
387
387 @util.propertycache
388 @util.propertycache
388 def localctx(self):
389 def localctx(self):
389 if self._local is None:
390 if self._local is None:
390 msg = b"localctx accessed but self._local isn't set"
391 msg = b"localctx accessed but self._local isn't set"
391 raise error.ProgrammingError(msg)
392 raise error.ProgrammingError(msg)
392 return self._repo[self._local]
393 return self._repo[self._local]
393
394
394 @util.propertycache
395 @util.propertycache
395 def otherctx(self):
396 def otherctx(self):
396 if self._other is None:
397 if self._other is None:
397 msg = b"otherctx accessed but self._other isn't set"
398 msg = b"otherctx accessed but self._other isn't set"
398 raise error.ProgrammingError(msg)
399 raise error.ProgrammingError(msg)
399 return self._repo[self._other]
400 return self._repo[self._other]
400
401
401 def active(self):
402 def active(self):
402 """Whether mergestate is active.
403 """Whether mergestate is active.
403
404
404 Returns True if there appears to be mergestate. This is a rough proxy
405 Returns True if there appears to be mergestate. This is a rough proxy
405 for "is a merge in progress."
406 for "is a merge in progress."
406 """
407 """
407 # Check local variables before looking at filesystem for performance
408 # Check local variables before looking at filesystem for performance
408 # reasons.
409 # reasons.
409 return (
410 return (
410 bool(self._local)
411 bool(self._local)
411 or bool(self._state)
412 or bool(self._state)
412 or self._repo.vfs.exists(self.statepathv1)
413 or self._repo.vfs.exists(self.statepathv1)
413 or self._repo.vfs.exists(self.statepathv2)
414 or self._repo.vfs.exists(self.statepathv2)
414 )
415 )
415
416
416 def commit(self):
417 def commit(self):
417 """Write current state on disk (if necessary)"""
418 """Write current state on disk (if necessary)"""
418 if self._dirty:
419 if self._dirty:
419 records = self._makerecords()
420 records = self._makerecords()
420 self._writerecords(records)
421 self._writerecords(records)
421 self._dirty = False
422 self._dirty = False
422
423
423 def _makerecords(self):
424 def _makerecords(self):
424 records = []
425 records = []
425 records.append((RECORD_LOCAL, hex(self._local)))
426 records.append((RECORD_LOCAL, hex(self._local)))
426 records.append((RECORD_OTHER, hex(self._other)))
427 records.append((RECORD_OTHER, hex(self._other)))
427 if self.mergedriver:
428 if self.mergedriver:
428 records.append(
429 records.append(
429 (
430 (
430 RECORD_MERGE_DRIVER_STATE,
431 RECORD_MERGE_DRIVER_STATE,
431 b'\0'.join([self.mergedriver, self._mdstate]),
432 b'\0'.join([self.mergedriver, self._mdstate]),
432 )
433 )
433 )
434 )
434 # Write out state items. In all cases, the value of the state map entry
435 # Write out state items. In all cases, the value of the state map entry
435 # is written as the contents of the record. The record type depends on
436 # is written as the contents of the record. The record type depends on
436 # the type of state that is stored, and capital-letter records are used
437 # the type of state that is stored, and capital-letter records are used
437 # to prevent older versions of Mercurial that do not support the feature
438 # to prevent older versions of Mercurial that do not support the feature
438 # from loading them.
439 # from loading them.
439 for filename, v in pycompat.iteritems(self._state):
440 for filename, v in pycompat.iteritems(self._state):
440 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
441 if v[0] == MERGE_RECORD_DRIVER_RESOLVED:
441 # Driver-resolved merge. These are stored in 'D' records.
442 # Driver-resolved merge. These are stored in 'D' records.
442 records.append(
443 records.append(
443 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
444 (RECORD_MERGE_DRIVER_MERGE, b'\0'.join([filename] + v))
444 )
445 )
445 elif v[0] in (
446 elif v[0] in (
446 MERGE_RECORD_UNRESOLVED_PATH,
447 MERGE_RECORD_UNRESOLVED_PATH,
447 MERGE_RECORD_RESOLVED_PATH,
448 MERGE_RECORD_RESOLVED_PATH,
448 ):
449 ):
449 # Path conflicts. These are stored in 'P' records. The current
450 # Path conflicts. These are stored in 'P' records. The current
450 # resolution state ('pu' or 'pr') is stored within the record.
451 # resolution state ('pu' or 'pr') is stored within the record.
451 records.append(
452 records.append(
452 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
453 (RECORD_PATH_CONFLICT, b'\0'.join([filename] + v))
453 )
454 )
454 elif v[1] == nullhex or v[6] == nullhex:
455 elif v[1] == nullhex or v[6] == nullhex:
455 # Change/Delete or Delete/Change conflicts. These are stored in
456 # Change/Delete or Delete/Change conflicts. These are stored in
456 # 'C' records. v[1] is the local file, and is nullhex when the
457 # 'C' records. v[1] is the local file, and is nullhex when the
457 # file is deleted locally ('dc'). v[6] is the remote file, and
458 # file is deleted locally ('dc'). v[6] is the remote file, and
458 # is nullhex when the file is deleted remotely ('cd').
459 # is nullhex when the file is deleted remotely ('cd').
459 records.append(
460 records.append(
460 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
461 (RECORD_CHANGEDELETE_CONFLICT, b'\0'.join([filename] + v))
461 )
462 )
462 else:
463 else:
463 # Normal files. These are stored in 'F' records.
464 # Normal files. These are stored in 'F' records.
464 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
465 records.append((RECORD_MERGED, b'\0'.join([filename] + v)))
465 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
466 for filename, extras in sorted(pycompat.iteritems(self._stateextras)):
466 rawextras = b'\0'.join(
467 rawextras = b'\0'.join(
467 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
468 b'%s\0%s' % (k, v) for k, v in pycompat.iteritems(extras)
468 )
469 )
469 records.append(
470 records.append(
470 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
471 (RECORD_FILE_VALUES, b'%s\0%s' % (filename, rawextras))
471 )
472 )
472 if self._labels is not None:
473 if self._labels is not None:
473 labels = b'\0'.join(self._labels)
474 labels = b'\0'.join(self._labels)
474 records.append((RECORD_LABELS, labels))
475 records.append((RECORD_LABELS, labels))
475 return records
476 return records
476
477
477 def _writerecords(self, records):
478 def _writerecords(self, records):
478 """Write current state on disk (both v1 and v2)"""
479 """Write current state on disk (both v1 and v2)"""
479 self._writerecordsv1(records)
480 self._writerecordsv1(records)
480 self._writerecordsv2(records)
481 self._writerecordsv2(records)
481
482
482 def _writerecordsv1(self, records):
483 def _writerecordsv1(self, records):
483 """Write current state on disk in a version 1 file"""
484 """Write current state on disk in a version 1 file"""
484 f = self._repo.vfs(self.statepathv1, b'wb')
485 f = self._repo.vfs(self.statepathv1, b'wb')
485 irecords = iter(records)
486 irecords = iter(records)
486 lrecords = next(irecords)
487 lrecords = next(irecords)
487 assert lrecords[0] == RECORD_LOCAL
488 assert lrecords[0] == RECORD_LOCAL
488 f.write(hex(self._local) + b'\n')
489 f.write(hex(self._local) + b'\n')
489 for rtype, data in irecords:
490 for rtype, data in irecords:
490 if rtype == RECORD_MERGED:
491 if rtype == RECORD_MERGED:
491 f.write(b'%s\n' % _droponode(data))
492 f.write(b'%s\n' % _droponode(data))
492 f.close()
493 f.close()
493
494
494 def _writerecordsv2(self, records):
495 def _writerecordsv2(self, records):
495 """Write current state on disk in a version 2 file
496 """Write current state on disk in a version 2 file
496
497
497 See the docstring for _readrecordsv2 for why we use 't'."""
498 See the docstring for _readrecordsv2 for why we use 't'."""
498 # these are the records that all version 2 clients can read
499 # these are the records that all version 2 clients can read
499 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
500 allowlist = (RECORD_LOCAL, RECORD_OTHER, RECORD_MERGED)
500 f = self._repo.vfs(self.statepathv2, b'wb')
501 f = self._repo.vfs(self.statepathv2, b'wb')
501 for key, data in records:
502 for key, data in records:
502 assert len(key) == 1
503 assert len(key) == 1
503 if key not in allowlist:
504 if key not in allowlist:
504 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
505 key, data = RECORD_OVERRIDE, b'%s%s' % (key, data)
505 format = b'>sI%is' % len(data)
506 format = b'>sI%is' % len(data)
506 f.write(_pack(format, key, len(data), data))
507 f.write(_pack(format, key, len(data), data))
507 f.close()
508 f.close()
508
509
509 @staticmethod
510 @staticmethod
510 def getlocalkey(path):
511 def getlocalkey(path):
511 """hash the path of a local file context for storage in the .hg/merge
512 """hash the path of a local file context for storage in the .hg/merge
512 directory."""
513 directory."""
513
514
514 return hex(hashlib.sha1(path).digest())
515 return hex(hashlib.sha1(path).digest())
515
516
516 def add(self, fcl, fco, fca, fd):
517 def add(self, fcl, fco, fca, fd):
517 """add a new (potentially?) conflicting file the merge state
518 """add a new (potentially?) conflicting file the merge state
518 fcl: file context for local,
519 fcl: file context for local,
519 fco: file context for remote,
520 fco: file context for remote,
520 fca: file context for ancestors,
521 fca: file context for ancestors,
521 fd: file path of the resulting merge.
522 fd: file path of the resulting merge.
522
523
523 note: also write the local version to the `.hg/merge` directory.
524 note: also write the local version to the `.hg/merge` directory.
524 """
525 """
525 if fcl.isabsent():
526 if fcl.isabsent():
526 localkey = nullhex
527 localkey = nullhex
527 else:
528 else:
528 localkey = mergestate.getlocalkey(fcl.path())
529 localkey = mergestate.getlocalkey(fcl.path())
529 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
530 self._repo.vfs.write(b'merge/' + localkey, fcl.data())
530 self._state[fd] = [
531 self._state[fd] = [
531 MERGE_RECORD_UNRESOLVED,
532 MERGE_RECORD_UNRESOLVED,
532 localkey,
533 localkey,
533 fcl.path(),
534 fcl.path(),
534 fca.path(),
535 fca.path(),
535 hex(fca.filenode()),
536 hex(fca.filenode()),
536 fco.path(),
537 fco.path(),
537 hex(fco.filenode()),
538 hex(fco.filenode()),
538 fcl.flags(),
539 fcl.flags(),
539 ]
540 ]
540 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
541 self._stateextras[fd] = {b'ancestorlinknode': hex(fca.node())}
541 self._dirty = True
542 self._dirty = True
542
543
543 def addpath(self, path, frename, forigin):
544 def addpath(self, path, frename, forigin):
544 """add a new conflicting path to the merge state
545 """add a new conflicting path to the merge state
545 path: the path that conflicts
546 path: the path that conflicts
546 frename: the filename the conflicting file was renamed to
547 frename: the filename the conflicting file was renamed to
547 forigin: origin of the file ('l' or 'r' for local/remote)
548 forigin: origin of the file ('l' or 'r' for local/remote)
548 """
549 """
549 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
550 self._state[path] = [MERGE_RECORD_UNRESOLVED_PATH, frename, forigin]
550 self._dirty = True
551 self._dirty = True
551
552
552 def __contains__(self, dfile):
553 def __contains__(self, dfile):
553 return dfile in self._state
554 return dfile in self._state
554
555
555 def __getitem__(self, dfile):
556 def __getitem__(self, dfile):
556 return self._state[dfile][0]
557 return self._state[dfile][0]
557
558
558 def __iter__(self):
559 def __iter__(self):
559 return iter(sorted(self._state))
560 return iter(sorted(self._state))
560
561
561 def files(self):
562 def files(self):
562 return self._state.keys()
563 return self._state.keys()
563
564
564 def mark(self, dfile, state):
565 def mark(self, dfile, state):
565 self._state[dfile][0] = state
566 self._state[dfile][0] = state
566 self._dirty = True
567 self._dirty = True
567
568
568 def mdstate(self):
569 def mdstate(self):
569 return self._mdstate
570 return self._mdstate
570
571
571 def unresolved(self):
572 def unresolved(self):
572 """Obtain the paths of unresolved files."""
573 """Obtain the paths of unresolved files."""
573
574
574 for f, entry in pycompat.iteritems(self._state):
575 for f, entry in pycompat.iteritems(self._state):
575 if entry[0] in (
576 if entry[0] in (
576 MERGE_RECORD_UNRESOLVED,
577 MERGE_RECORD_UNRESOLVED,
577 MERGE_RECORD_UNRESOLVED_PATH,
578 MERGE_RECORD_UNRESOLVED_PATH,
578 ):
579 ):
579 yield f
580 yield f
580
581
581 def driverresolved(self):
582 def driverresolved(self):
582 """Obtain the paths of driver-resolved files."""
583 """Obtain the paths of driver-resolved files."""
583
584
584 for f, entry in self._state.items():
585 for f, entry in self._state.items():
585 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
586 if entry[0] == MERGE_RECORD_DRIVER_RESOLVED:
586 yield f
587 yield f
587
588
588 def extras(self, filename):
589 def extras(self, filename):
589 return self._stateextras.setdefault(filename, {})
590 return self._stateextras.setdefault(filename, {})
590
591
591 def _resolve(self, preresolve, dfile, wctx):
592 def _resolve(self, preresolve, dfile, wctx):
592 """rerun merge process for file path `dfile`"""
593 """rerun merge process for file path `dfile`"""
593 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
594 if self[dfile] in (MERGE_RECORD_RESOLVED, MERGE_RECORD_DRIVER_RESOLVED):
594 return True, 0
595 return True, 0
595 stateentry = self._state[dfile]
596 stateentry = self._state[dfile]
596 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
597 state, localkey, lfile, afile, anode, ofile, onode, flags = stateentry
597 octx = self._repo[self._other]
598 octx = self._repo[self._other]
598 extras = self.extras(dfile)
599 extras = self.extras(dfile)
599 anccommitnode = extras.get(b'ancestorlinknode')
600 anccommitnode = extras.get(b'ancestorlinknode')
600 if anccommitnode:
601 if anccommitnode:
601 actx = self._repo[anccommitnode]
602 actx = self._repo[anccommitnode]
602 else:
603 else:
603 actx = None
604 actx = None
604 fcd = self._filectxorabsent(localkey, wctx, dfile)
605 fcd = self._filectxorabsent(localkey, wctx, dfile)
605 fco = self._filectxorabsent(onode, octx, ofile)
606 fco = self._filectxorabsent(onode, octx, ofile)
606 # TODO: move this to filectxorabsent
607 # TODO: move this to filectxorabsent
607 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
608 fca = self._repo.filectx(afile, fileid=anode, changectx=actx)
608 # "premerge" x flags
609 # "premerge" x flags
609 flo = fco.flags()
610 flo = fco.flags()
610 fla = fca.flags()
611 fla = fca.flags()
611 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
612 if b'x' in flags + flo + fla and b'l' not in flags + flo + fla:
612 if fca.node() == nullid and flags != flo:
613 if fca.node() == nullid and flags != flo:
613 if preresolve:
614 if preresolve:
614 self._repo.ui.warn(
615 self._repo.ui.warn(
615 _(
616 _(
616 b'warning: cannot merge flags for %s '
617 b'warning: cannot merge flags for %s '
617 b'without common ancestor - keeping local flags\n'
618 b'without common ancestor - keeping local flags\n'
618 )
619 )
619 % afile
620 % afile
620 )
621 )
621 elif flags == fla:
622 elif flags == fla:
622 flags = flo
623 flags = flo
623 if preresolve:
624 if preresolve:
624 # restore local
625 # restore local
625 if localkey != nullhex:
626 if localkey != nullhex:
626 f = self._repo.vfs(b'merge/' + localkey)
627 f = self._repo.vfs(b'merge/' + localkey)
627 wctx[dfile].write(f.read(), flags)
628 wctx[dfile].write(f.read(), flags)
628 f.close()
629 f.close()
629 else:
630 else:
630 wctx[dfile].remove(ignoremissing=True)
631 wctx[dfile].remove(ignoremissing=True)
631 complete, r, deleted = filemerge.premerge(
632 complete, r, deleted = filemerge.premerge(
632 self._repo,
633 self._repo,
633 wctx,
634 wctx,
634 self._local,
635 self._local,
635 lfile,
636 lfile,
636 fcd,
637 fcd,
637 fco,
638 fco,
638 fca,
639 fca,
639 labels=self._labels,
640 labels=self._labels,
640 )
641 )
641 else:
642 else:
642 complete, r, deleted = filemerge.filemerge(
643 complete, r, deleted = filemerge.filemerge(
643 self._repo,
644 self._repo,
644 wctx,
645 wctx,
645 self._local,
646 self._local,
646 lfile,
647 lfile,
647 fcd,
648 fcd,
648 fco,
649 fco,
649 fca,
650 fca,
650 labels=self._labels,
651 labels=self._labels,
651 )
652 )
652 if r is None:
653 if r is None:
653 # no real conflict
654 # no real conflict
654 del self._state[dfile]
655 del self._state[dfile]
655 self._stateextras.pop(dfile, None)
656 self._stateextras.pop(dfile, None)
656 self._dirty = True
657 self._dirty = True
657 elif not r:
658 elif not r:
658 self.mark(dfile, MERGE_RECORD_RESOLVED)
659 self.mark(dfile, MERGE_RECORD_RESOLVED)
659
660
660 if complete:
661 if complete:
661 action = None
662 action = None
662 if deleted:
663 if deleted:
663 if fcd.isabsent():
664 if fcd.isabsent():
664 # dc: local picked. Need to drop if present, which may
665 # dc: local picked. Need to drop if present, which may
665 # happen on re-resolves.
666 # happen on re-resolves.
666 action = ACTION_FORGET
667 action = ACTION_FORGET
667 else:
668 else:
668 # cd: remote picked (or otherwise deleted)
669 # cd: remote picked (or otherwise deleted)
669 action = ACTION_REMOVE
670 action = ACTION_REMOVE
670 else:
671 else:
671 if fcd.isabsent(): # dc: remote picked
672 if fcd.isabsent(): # dc: remote picked
672 action = ACTION_GET
673 action = ACTION_GET
673 elif fco.isabsent(): # cd: local picked
674 elif fco.isabsent(): # cd: local picked
674 if dfile in self.localctx:
675 if dfile in self.localctx:
675 action = ACTION_ADD_MODIFIED
676 action = ACTION_ADD_MODIFIED
676 else:
677 else:
677 action = ACTION_ADD
678 action = ACTION_ADD
678 # else: regular merges (no action necessary)
679 # else: regular merges (no action necessary)
679 self._results[dfile] = r, action
680 self._results[dfile] = r, action
680
681
681 return complete, r
682 return complete, r
682
683
683 def _filectxorabsent(self, hexnode, ctx, f):
684 def _filectxorabsent(self, hexnode, ctx, f):
684 if hexnode == nullhex:
685 if hexnode == nullhex:
685 return filemerge.absentfilectx(ctx, f)
686 return filemerge.absentfilectx(ctx, f)
686 else:
687 else:
687 return ctx[f]
688 return ctx[f]
688
689
689 def preresolve(self, dfile, wctx):
690 def preresolve(self, dfile, wctx):
690 """run premerge process for dfile
691 """run premerge process for dfile
691
692
692 Returns whether the merge is complete, and the exit code."""
693 Returns whether the merge is complete, and the exit code."""
693 return self._resolve(True, dfile, wctx)
694 return self._resolve(True, dfile, wctx)
694
695
695 def resolve(self, dfile, wctx):
696 def resolve(self, dfile, wctx):
696 """run merge process (assuming premerge was run) for dfile
697 """run merge process (assuming premerge was run) for dfile
697
698
698 Returns the exit code of the merge."""
699 Returns the exit code of the merge."""
699 return self._resolve(False, dfile, wctx)[1]
700 return self._resolve(False, dfile, wctx)[1]
700
701
701 def counts(self):
702 def counts(self):
702 """return counts for updated, merged and removed files in this
703 """return counts for updated, merged and removed files in this
703 session"""
704 session"""
704 updated, merged, removed = 0, 0, 0
705 updated, merged, removed = 0, 0, 0
705 for r, action in pycompat.itervalues(self._results):
706 for r, action in pycompat.itervalues(self._results):
706 if r is None:
707 if r is None:
707 updated += 1
708 updated += 1
708 elif r == 0:
709 elif r == 0:
709 if action == ACTION_REMOVE:
710 if action == ACTION_REMOVE:
710 removed += 1
711 removed += 1
711 else:
712 else:
712 merged += 1
713 merged += 1
713 return updated, merged, removed
714 return updated, merged, removed
714
715
715 def unresolvedcount(self):
716 def unresolvedcount(self):
716 """get unresolved count for this merge (persistent)"""
717 """get unresolved count for this merge (persistent)"""
717 return len(list(self.unresolved()))
718 return len(list(self.unresolved()))
718
719
719 def actions(self):
720 def actions(self):
720 """return lists of actions to perform on the dirstate"""
721 """return lists of actions to perform on the dirstate"""
721 actions = {
722 actions = {
722 ACTION_REMOVE: [],
723 ACTION_REMOVE: [],
723 ACTION_FORGET: [],
724 ACTION_FORGET: [],
724 ACTION_ADD: [],
725 ACTION_ADD: [],
725 ACTION_ADD_MODIFIED: [],
726 ACTION_ADD_MODIFIED: [],
726 ACTION_GET: [],
727 ACTION_GET: [],
727 }
728 }
728 for f, (r, action) in pycompat.iteritems(self._results):
729 for f, (r, action) in pycompat.iteritems(self._results):
729 if action is not None:
730 if action is not None:
730 actions[action].append((f, None, b"merge result"))
731 actions[action].append((f, None, b"merge result"))
731 return actions
732 return actions
732
733
733 def recordactions(self):
734 def recordactions(self):
734 """record remove/add/get actions in the dirstate"""
735 """record remove/add/get actions in the dirstate"""
735 branchmerge = self._repo.dirstate.p2() != nullid
736 branchmerge = self._repo.dirstate.p2() != nullid
736 recordupdates(self._repo, self.actions(), branchmerge, None)
737 recordupdates(self._repo, self.actions(), branchmerge, None)
737
738
738 def queueremove(self, f):
739 def queueremove(self, f):
739 """queues a file to be removed from the dirstate
740 """queues a file to be removed from the dirstate
740
741
741 Meant for use by custom merge drivers."""
742 Meant for use by custom merge drivers."""
742 self._results[f] = 0, ACTION_REMOVE
743 self._results[f] = 0, ACTION_REMOVE
743
744
744 def queueadd(self, f):
745 def queueadd(self, f):
745 """queues a file to be added to the dirstate
746 """queues a file to be added to the dirstate
746
747
747 Meant for use by custom merge drivers."""
748 Meant for use by custom merge drivers."""
748 self._results[f] = 0, ACTION_ADD
749 self._results[f] = 0, ACTION_ADD
749
750
750 def queueget(self, f):
751 def queueget(self, f):
751 """queues a file to be marked modified in the dirstate
752 """queues a file to be marked modified in the dirstate
752
753
753 Meant for use by custom merge drivers."""
754 Meant for use by custom merge drivers."""
754 self._results[f] = 0, ACTION_GET
755 self._results[f] = 0, ACTION_GET
755
756
756
757
757 def _getcheckunknownconfig(repo, section, name):
758 def _getcheckunknownconfig(repo, section, name):
758 config = repo.ui.config(section, name)
759 config = repo.ui.config(section, name)
759 valid = [b'abort', b'ignore', b'warn']
760 valid = [b'abort', b'ignore', b'warn']
760 if config not in valid:
761 if config not in valid:
761 validstr = b', '.join([b"'" + v + b"'" for v in valid])
762 validstr = b', '.join([b"'" + v + b"'" for v in valid])
762 raise error.ConfigError(
763 raise error.ConfigError(
763 _(b"%s.%s not valid ('%s' is none of %s)")
764 _(b"%s.%s not valid ('%s' is none of %s)")
764 % (section, name, config, validstr)
765 % (section, name, config, validstr)
765 )
766 )
766 return config
767 return config
767
768
768
769
769 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
770 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
770 if wctx.isinmemory():
771 if wctx.isinmemory():
771 # Nothing to do in IMM because nothing in the "working copy" can be an
772 # Nothing to do in IMM because nothing in the "working copy" can be an
772 # unknown file.
773 # unknown file.
773 #
774 #
774 # Note that we should bail out here, not in ``_checkunknownfiles()``,
775 # Note that we should bail out here, not in ``_checkunknownfiles()``,
775 # because that function does other useful work.
776 # because that function does other useful work.
776 return False
777 return False
777
778
778 if f2 is None:
779 if f2 is None:
779 f2 = f
780 f2 = f
780 return (
781 return (
781 repo.wvfs.audit.check(f)
782 repo.wvfs.audit.check(f)
782 and repo.wvfs.isfileorlink(f)
783 and repo.wvfs.isfileorlink(f)
783 and repo.dirstate.normalize(f) not in repo.dirstate
784 and repo.dirstate.normalize(f) not in repo.dirstate
784 and mctx[f2].cmp(wctx[f])
785 and mctx[f2].cmp(wctx[f])
785 )
786 )
786
787
787
788
788 class _unknowndirschecker(object):
789 class _unknowndirschecker(object):
789 """
790 """
790 Look for any unknown files or directories that may have a path conflict
791 Look for any unknown files or directories that may have a path conflict
791 with a file. If any path prefix of the file exists as a file or link,
792 with a file. If any path prefix of the file exists as a file or link,
792 then it conflicts. If the file itself is a directory that contains any
793 then it conflicts. If the file itself is a directory that contains any
793 file that is not tracked, then it conflicts.
794 file that is not tracked, then it conflicts.
794
795
795 Returns the shortest path at which a conflict occurs, or None if there is
796 Returns the shortest path at which a conflict occurs, or None if there is
796 no conflict.
797 no conflict.
797 """
798 """
798
799
799 def __init__(self):
800 def __init__(self):
800 # A set of paths known to be good. This prevents repeated checking of
801 # A set of paths known to be good. This prevents repeated checking of
801 # dirs. It will be updated with any new dirs that are checked and found
802 # dirs. It will be updated with any new dirs that are checked and found
802 # to be safe.
803 # to be safe.
803 self._unknowndircache = set()
804 self._unknowndircache = set()
804
805
805 # A set of paths that are known to be absent. This prevents repeated
806 # A set of paths that are known to be absent. This prevents repeated
806 # checking of subdirectories that are known not to exist. It will be
807 # checking of subdirectories that are known not to exist. It will be
807 # updated with any new dirs that are checked and found to be absent.
808 # updated with any new dirs that are checked and found to be absent.
808 self._missingdircache = set()
809 self._missingdircache = set()
809
810
810 def __call__(self, repo, wctx, f):
811 def __call__(self, repo, wctx, f):
811 if wctx.isinmemory():
812 if wctx.isinmemory():
812 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
813 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
813 return False
814 return False
814
815
815 # Check for path prefixes that exist as unknown files.
816 # Check for path prefixes that exist as unknown files.
816 for p in reversed(list(util.finddirs(f))):
817 for p in reversed(list(pathutil.finddirs(f))):
817 if p in self._missingdircache:
818 if p in self._missingdircache:
818 return
819 return
819 if p in self._unknowndircache:
820 if p in self._unknowndircache:
820 continue
821 continue
821 if repo.wvfs.audit.check(p):
822 if repo.wvfs.audit.check(p):
822 if (
823 if (
823 repo.wvfs.isfileorlink(p)
824 repo.wvfs.isfileorlink(p)
824 and repo.dirstate.normalize(p) not in repo.dirstate
825 and repo.dirstate.normalize(p) not in repo.dirstate
825 ):
826 ):
826 return p
827 return p
827 if not repo.wvfs.lexists(p):
828 if not repo.wvfs.lexists(p):
828 self._missingdircache.add(p)
829 self._missingdircache.add(p)
829 return
830 return
830 self._unknowndircache.add(p)
831 self._unknowndircache.add(p)
831
832
832 # Check if the file conflicts with a directory containing unknown files.
833 # Check if the file conflicts with a directory containing unknown files.
833 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
834 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
834 # Does the directory contain any files that are not in the dirstate?
835 # Does the directory contain any files that are not in the dirstate?
835 for p, dirs, files in repo.wvfs.walk(f):
836 for p, dirs, files in repo.wvfs.walk(f):
836 for fn in files:
837 for fn in files:
837 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
838 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
838 relf = repo.dirstate.normalize(relf, isknown=True)
839 relf = repo.dirstate.normalize(relf, isknown=True)
839 if relf not in repo.dirstate:
840 if relf not in repo.dirstate:
840 return f
841 return f
841 return None
842 return None
842
843
843
844
844 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
845 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
845 """
846 """
846 Considers any actions that care about the presence of conflicting unknown
847 Considers any actions that care about the presence of conflicting unknown
847 files. For some actions, the result is to abort; for others, it is to
848 files. For some actions, the result is to abort; for others, it is to
848 choose a different action.
849 choose a different action.
849 """
850 """
850 fileconflicts = set()
851 fileconflicts = set()
851 pathconflicts = set()
852 pathconflicts = set()
852 warnconflicts = set()
853 warnconflicts = set()
853 abortconflicts = set()
854 abortconflicts = set()
854 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
855 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
855 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
856 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
856 pathconfig = repo.ui.configbool(
857 pathconfig = repo.ui.configbool(
857 b'experimental', b'merge.checkpathconflicts'
858 b'experimental', b'merge.checkpathconflicts'
858 )
859 )
859 if not force:
860 if not force:
860
861
861 def collectconflicts(conflicts, config):
862 def collectconflicts(conflicts, config):
862 if config == b'abort':
863 if config == b'abort':
863 abortconflicts.update(conflicts)
864 abortconflicts.update(conflicts)
864 elif config == b'warn':
865 elif config == b'warn':
865 warnconflicts.update(conflicts)
866 warnconflicts.update(conflicts)
866
867
867 checkunknowndirs = _unknowndirschecker()
868 checkunknowndirs = _unknowndirschecker()
868 for f, (m, args, msg) in pycompat.iteritems(actions):
869 for f, (m, args, msg) in pycompat.iteritems(actions):
869 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
870 if m in (ACTION_CREATED, ACTION_DELETED_CHANGED):
870 if _checkunknownfile(repo, wctx, mctx, f):
871 if _checkunknownfile(repo, wctx, mctx, f):
871 fileconflicts.add(f)
872 fileconflicts.add(f)
872 elif pathconfig and f not in wctx:
873 elif pathconfig and f not in wctx:
873 path = checkunknowndirs(repo, wctx, f)
874 path = checkunknowndirs(repo, wctx, f)
874 if path is not None:
875 if path is not None:
875 pathconflicts.add(path)
876 pathconflicts.add(path)
876 elif m == ACTION_LOCAL_DIR_RENAME_GET:
877 elif m == ACTION_LOCAL_DIR_RENAME_GET:
877 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
878 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
878 fileconflicts.add(f)
879 fileconflicts.add(f)
879
880
880 allconflicts = fileconflicts | pathconflicts
881 allconflicts = fileconflicts | pathconflicts
881 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
882 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
882 unknownconflicts = allconflicts - ignoredconflicts
883 unknownconflicts = allconflicts - ignoredconflicts
883 collectconflicts(ignoredconflicts, ignoredconfig)
884 collectconflicts(ignoredconflicts, ignoredconfig)
884 collectconflicts(unknownconflicts, unknownconfig)
885 collectconflicts(unknownconflicts, unknownconfig)
885 else:
886 else:
886 for f, (m, args, msg) in pycompat.iteritems(actions):
887 for f, (m, args, msg) in pycompat.iteritems(actions):
887 if m == ACTION_CREATED_MERGE:
888 if m == ACTION_CREATED_MERGE:
888 fl2, anc = args
889 fl2, anc = args
889 different = _checkunknownfile(repo, wctx, mctx, f)
890 different = _checkunknownfile(repo, wctx, mctx, f)
890 if repo.dirstate._ignore(f):
891 if repo.dirstate._ignore(f):
891 config = ignoredconfig
892 config = ignoredconfig
892 else:
893 else:
893 config = unknownconfig
894 config = unknownconfig
894
895
895 # The behavior when force is True is described by this table:
896 # The behavior when force is True is described by this table:
896 # config different mergeforce | action backup
897 # config different mergeforce | action backup
897 # * n * | get n
898 # * n * | get n
898 # * y y | merge -
899 # * y y | merge -
899 # abort y n | merge - (1)
900 # abort y n | merge - (1)
900 # warn y n | warn + get y
901 # warn y n | warn + get y
901 # ignore y n | get y
902 # ignore y n | get y
902 #
903 #
903 # (1) this is probably the wrong behavior here -- we should
904 # (1) this is probably the wrong behavior here -- we should
904 # probably abort, but some actions like rebases currently
905 # probably abort, but some actions like rebases currently
905 # don't like an abort happening in the middle of
906 # don't like an abort happening in the middle of
906 # merge.update.
907 # merge.update.
907 if not different:
908 if not different:
908 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
909 actions[f] = (ACTION_GET, (fl2, False), b'remote created')
909 elif mergeforce or config == b'abort':
910 elif mergeforce or config == b'abort':
910 actions[f] = (
911 actions[f] = (
911 ACTION_MERGE,
912 ACTION_MERGE,
912 (f, f, None, False, anc),
913 (f, f, None, False, anc),
913 b'remote differs from untracked local',
914 b'remote differs from untracked local',
914 )
915 )
915 elif config == b'abort':
916 elif config == b'abort':
916 abortconflicts.add(f)
917 abortconflicts.add(f)
917 else:
918 else:
918 if config == b'warn':
919 if config == b'warn':
919 warnconflicts.add(f)
920 warnconflicts.add(f)
920 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
921 actions[f] = (ACTION_GET, (fl2, True), b'remote created')
921
922
922 for f in sorted(abortconflicts):
923 for f in sorted(abortconflicts):
923 warn = repo.ui.warn
924 warn = repo.ui.warn
924 if f in pathconflicts:
925 if f in pathconflicts:
925 if repo.wvfs.isfileorlink(f):
926 if repo.wvfs.isfileorlink(f):
926 warn(_(b"%s: untracked file conflicts with directory\n") % f)
927 warn(_(b"%s: untracked file conflicts with directory\n") % f)
927 else:
928 else:
928 warn(_(b"%s: untracked directory conflicts with file\n") % f)
929 warn(_(b"%s: untracked directory conflicts with file\n") % f)
929 else:
930 else:
930 warn(_(b"%s: untracked file differs\n") % f)
931 warn(_(b"%s: untracked file differs\n") % f)
931 if abortconflicts:
932 if abortconflicts:
932 raise error.Abort(
933 raise error.Abort(
933 _(
934 _(
934 b"untracked files in working directory "
935 b"untracked files in working directory "
935 b"differ from files in requested revision"
936 b"differ from files in requested revision"
936 )
937 )
937 )
938 )
938
939
939 for f in sorted(warnconflicts):
940 for f in sorted(warnconflicts):
940 if repo.wvfs.isfileorlink(f):
941 if repo.wvfs.isfileorlink(f):
941 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
942 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
942 else:
943 else:
943 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
944 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
944
945
945 for f, (m, args, msg) in pycompat.iteritems(actions):
946 for f, (m, args, msg) in pycompat.iteritems(actions):
946 if m == ACTION_CREATED:
947 if m == ACTION_CREATED:
947 backup = (
948 backup = (
948 f in fileconflicts
949 f in fileconflicts
949 or f in pathconflicts
950 or f in pathconflicts
950 or any(p in pathconflicts for p in util.finddirs(f))
951 or any(p in pathconflicts for p in pathutil.finddirs(f))
951 )
952 )
952 (flags,) = args
953 (flags,) = args
953 actions[f] = (ACTION_GET, (flags, backup), msg)
954 actions[f] = (ACTION_GET, (flags, backup), msg)
954
955
955
956
956 def _forgetremoved(wctx, mctx, branchmerge):
957 def _forgetremoved(wctx, mctx, branchmerge):
957 """
958 """
958 Forget removed files
959 Forget removed files
959
960
960 If we're jumping between revisions (as opposed to merging), and if
961 If we're jumping between revisions (as opposed to merging), and if
961 neither the working directory nor the target rev has the file,
962 neither the working directory nor the target rev has the file,
962 then we need to remove it from the dirstate, to prevent the
963 then we need to remove it from the dirstate, to prevent the
963 dirstate from listing the file when it is no longer in the
964 dirstate from listing the file when it is no longer in the
964 manifest.
965 manifest.
965
966
966 If we're merging, and the other revision has removed a file
967 If we're merging, and the other revision has removed a file
967 that is not present in the working directory, we need to mark it
968 that is not present in the working directory, we need to mark it
968 as removed.
969 as removed.
969 """
970 """
970
971
971 actions = {}
972 actions = {}
972 m = ACTION_FORGET
973 m = ACTION_FORGET
973 if branchmerge:
974 if branchmerge:
974 m = ACTION_REMOVE
975 m = ACTION_REMOVE
975 for f in wctx.deleted():
976 for f in wctx.deleted():
976 if f not in mctx:
977 if f not in mctx:
977 actions[f] = m, None, b"forget deleted"
978 actions[f] = m, None, b"forget deleted"
978
979
979 if not branchmerge:
980 if not branchmerge:
980 for f in wctx.removed():
981 for f in wctx.removed():
981 if f not in mctx:
982 if f not in mctx:
982 actions[f] = ACTION_FORGET, None, b"forget removed"
983 actions[f] = ACTION_FORGET, None, b"forget removed"
983
984
984 return actions
985 return actions
985
986
986
987
987 def _checkcollision(repo, wmf, actions):
988 def _checkcollision(repo, wmf, actions):
988 """
989 """
989 Check for case-folding collisions.
990 Check for case-folding collisions.
990 """
991 """
991
992
992 # If the repo is narrowed, filter out files outside the narrowspec.
993 # If the repo is narrowed, filter out files outside the narrowspec.
993 narrowmatch = repo.narrowmatch()
994 narrowmatch = repo.narrowmatch()
994 if not narrowmatch.always():
995 if not narrowmatch.always():
995 wmf = wmf.matches(narrowmatch)
996 wmf = wmf.matches(narrowmatch)
996 if actions:
997 if actions:
997 narrowactions = {}
998 narrowactions = {}
998 for m, actionsfortype in pycompat.iteritems(actions):
999 for m, actionsfortype in pycompat.iteritems(actions):
999 narrowactions[m] = []
1000 narrowactions[m] = []
1000 for (f, args, msg) in actionsfortype:
1001 for (f, args, msg) in actionsfortype:
1001 if narrowmatch(f):
1002 if narrowmatch(f):
1002 narrowactions[m].append((f, args, msg))
1003 narrowactions[m].append((f, args, msg))
1003 actions = narrowactions
1004 actions = narrowactions
1004
1005
1005 # build provisional merged manifest up
1006 # build provisional merged manifest up
1006 pmmf = set(wmf)
1007 pmmf = set(wmf)
1007
1008
1008 if actions:
1009 if actions:
1009 # KEEP and EXEC are no-op
1010 # KEEP and EXEC are no-op
1010 for m in (
1011 for m in (
1011 ACTION_ADD,
1012 ACTION_ADD,
1012 ACTION_ADD_MODIFIED,
1013 ACTION_ADD_MODIFIED,
1013 ACTION_FORGET,
1014 ACTION_FORGET,
1014 ACTION_GET,
1015 ACTION_GET,
1015 ACTION_CHANGED_DELETED,
1016 ACTION_CHANGED_DELETED,
1016 ACTION_DELETED_CHANGED,
1017 ACTION_DELETED_CHANGED,
1017 ):
1018 ):
1018 for f, args, msg in actions[m]:
1019 for f, args, msg in actions[m]:
1019 pmmf.add(f)
1020 pmmf.add(f)
1020 for f, args, msg in actions[ACTION_REMOVE]:
1021 for f, args, msg in actions[ACTION_REMOVE]:
1021 pmmf.discard(f)
1022 pmmf.discard(f)
1022 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1023 f2, flags = args
1024 f2, flags = args
1024 pmmf.discard(f2)
1025 pmmf.discard(f2)
1025 pmmf.add(f)
1026 pmmf.add(f)
1026 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1027 pmmf.add(f)
1028 pmmf.add(f)
1028 for f, args, msg in actions[ACTION_MERGE]:
1029 for f, args, msg in actions[ACTION_MERGE]:
1029 f1, f2, fa, move, anc = args
1030 f1, f2, fa, move, anc = args
1030 if move:
1031 if move:
1031 pmmf.discard(f1)
1032 pmmf.discard(f1)
1032 pmmf.add(f)
1033 pmmf.add(f)
1033
1034
1034 # check case-folding collision in provisional merged manifest
1035 # check case-folding collision in provisional merged manifest
1035 foldmap = {}
1036 foldmap = {}
1036 for f in pmmf:
1037 for f in pmmf:
1037 fold = util.normcase(f)
1038 fold = util.normcase(f)
1038 if fold in foldmap:
1039 if fold in foldmap:
1039 raise error.Abort(
1040 raise error.Abort(
1040 _(b"case-folding collision between %s and %s")
1041 _(b"case-folding collision between %s and %s")
1041 % (f, foldmap[fold])
1042 % (f, foldmap[fold])
1042 )
1043 )
1043 foldmap[fold] = f
1044 foldmap[fold] = f
1044
1045
1045 # check case-folding of directories
1046 # check case-folding of directories
1046 foldprefix = unfoldprefix = lastfull = b''
1047 foldprefix = unfoldprefix = lastfull = b''
1047 for fold, f in sorted(foldmap.items()):
1048 for fold, f in sorted(foldmap.items()):
1048 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
1049 # the folded prefix matches but actual casing is different
1050 # the folded prefix matches but actual casing is different
1050 raise error.Abort(
1051 raise error.Abort(
1051 _(b"case-folding collision between %s and directory of %s")
1052 _(b"case-folding collision between %s and directory of %s")
1052 % (lastfull, f)
1053 % (lastfull, f)
1053 )
1054 )
1054 foldprefix = fold + b'/'
1055 foldprefix = fold + b'/'
1055 unfoldprefix = f + b'/'
1056 unfoldprefix = f + b'/'
1056 lastfull = f
1057 lastfull = f
1057
1058
1058
1059
1059 def driverpreprocess(repo, ms, wctx, labels=None):
1060 def driverpreprocess(repo, ms, wctx, labels=None):
1060 """run the preprocess step of the merge driver, if any
1061 """run the preprocess step of the merge driver, if any
1061
1062
1062 This is currently not implemented -- it's an extension point."""
1063 This is currently not implemented -- it's an extension point."""
1063 return True
1064 return True
1064
1065
1065
1066
1066 def driverconclude(repo, ms, wctx, labels=None):
1067 def driverconclude(repo, ms, wctx, labels=None):
1067 """run the conclude step of the merge driver, if any
1068 """run the conclude step of the merge driver, if any
1068
1069
1069 This is currently not implemented -- it's an extension point."""
1070 This is currently not implemented -- it's an extension point."""
1070 return True
1071 return True
1071
1072
1072
1073
1073 def _filesindirs(repo, manifest, dirs):
1074 def _filesindirs(repo, manifest, dirs):
1074 """
1075 """
1075 Generator that yields pairs of all the files in the manifest that are found
1076 Generator that yields pairs of all the files in the manifest that are found
1076 inside the directories listed in dirs, and which directory they are found
1077 inside the directories listed in dirs, and which directory they are found
1077 in.
1078 in.
1078 """
1079 """
1079 for f in manifest:
1080 for f in manifest:
1080 for p in util.finddirs(f):
1081 for p in pathutil.finddirs(f):
1081 if p in dirs:
1082 if p in dirs:
1082 yield f, p
1083 yield f, p
1083 break
1084 break
1084
1085
1085
1086
1086 def checkpathconflicts(repo, wctx, mctx, actions):
1087 def checkpathconflicts(repo, wctx, mctx, actions):
1087 """
1088 """
1088 Check if any actions introduce path conflicts in the repository, updating
1089 Check if any actions introduce path conflicts in the repository, updating
1089 actions to record or handle the path conflict accordingly.
1090 actions to record or handle the path conflict accordingly.
1090 """
1091 """
1091 mf = wctx.manifest()
1092 mf = wctx.manifest()
1092
1093
1093 # The set of local files that conflict with a remote directory.
1094 # The set of local files that conflict with a remote directory.
1094 localconflicts = set()
1095 localconflicts = set()
1095
1096
1096 # The set of directories that conflict with a remote file, and so may cause
1097 # The set of directories that conflict with a remote file, and so may cause
1097 # conflicts if they still contain any files after the merge.
1098 # conflicts if they still contain any files after the merge.
1098 remoteconflicts = set()
1099 remoteconflicts = set()
1099
1100
1100 # The set of directories that appear as both a file and a directory in the
1101 # The set of directories that appear as both a file and a directory in the
1101 # remote manifest. These indicate an invalid remote manifest, which
1102 # remote manifest. These indicate an invalid remote manifest, which
1102 # can't be updated to cleanly.
1103 # can't be updated to cleanly.
1103 invalidconflicts = set()
1104 invalidconflicts = set()
1104
1105
1105 # The set of directories that contain files that are being created.
1106 # The set of directories that contain files that are being created.
1106 createdfiledirs = set()
1107 createdfiledirs = set()
1107
1108
1108 # The set of files deleted by all the actions.
1109 # The set of files deleted by all the actions.
1109 deletedfiles = set()
1110 deletedfiles = set()
1110
1111
1111 for f, (m, args, msg) in actions.items():
1112 for f, (m, args, msg) in actions.items():
1112 if m in (
1113 if m in (
1113 ACTION_CREATED,
1114 ACTION_CREATED,
1114 ACTION_DELETED_CHANGED,
1115 ACTION_DELETED_CHANGED,
1115 ACTION_MERGE,
1116 ACTION_MERGE,
1116 ACTION_CREATED_MERGE,
1117 ACTION_CREATED_MERGE,
1117 ):
1118 ):
1118 # This action may create a new local file.
1119 # This action may create a new local file.
1119 createdfiledirs.update(util.finddirs(f))
1120 createdfiledirs.update(pathutil.finddirs(f))
1120 if mf.hasdir(f):
1121 if mf.hasdir(f):
1121 # The file aliases a local directory. This might be ok if all
1122 # The file aliases a local directory. This might be ok if all
1122 # the files in the local directory are being deleted. This
1123 # the files in the local directory are being deleted. This
1123 # will be checked once we know what all the deleted files are.
1124 # will be checked once we know what all the deleted files are.
1124 remoteconflicts.add(f)
1125 remoteconflicts.add(f)
1125 # Track the names of all deleted files.
1126 # Track the names of all deleted files.
1126 if m == ACTION_REMOVE:
1127 if m == ACTION_REMOVE:
1127 deletedfiles.add(f)
1128 deletedfiles.add(f)
1128 if m == ACTION_MERGE:
1129 if m == ACTION_MERGE:
1129 f1, f2, fa, move, anc = args
1130 f1, f2, fa, move, anc = args
1130 if move:
1131 if move:
1131 deletedfiles.add(f1)
1132 deletedfiles.add(f1)
1132 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 if m == ACTION_DIR_RENAME_MOVE_LOCAL:
1133 f2, flags = args
1134 f2, flags = args
1134 deletedfiles.add(f2)
1135 deletedfiles.add(f2)
1135
1136
1136 # Check all directories that contain created files for path conflicts.
1137 # Check all directories that contain created files for path conflicts.
1137 for p in createdfiledirs:
1138 for p in createdfiledirs:
1138 if p in mf:
1139 if p in mf:
1139 if p in mctx:
1140 if p in mctx:
1140 # A file is in a directory which aliases both a local
1141 # A file is in a directory which aliases both a local
1141 # and a remote file. This is an internal inconsistency
1142 # and a remote file. This is an internal inconsistency
1142 # within the remote manifest.
1143 # within the remote manifest.
1143 invalidconflicts.add(p)
1144 invalidconflicts.add(p)
1144 else:
1145 else:
1145 # A file is in a directory which aliases a local file.
1146 # A file is in a directory which aliases a local file.
1146 # We will need to rename the local file.
1147 # We will need to rename the local file.
1147 localconflicts.add(p)
1148 localconflicts.add(p)
1148 if p in actions and actions[p][0] in (
1149 if p in actions and actions[p][0] in (
1149 ACTION_CREATED,
1150 ACTION_CREATED,
1150 ACTION_DELETED_CHANGED,
1151 ACTION_DELETED_CHANGED,
1151 ACTION_MERGE,
1152 ACTION_MERGE,
1152 ACTION_CREATED_MERGE,
1153 ACTION_CREATED_MERGE,
1153 ):
1154 ):
1154 # The file is in a directory which aliases a remote file.
1155 # The file is in a directory which aliases a remote file.
1155 # This is an internal inconsistency within the remote
1156 # This is an internal inconsistency within the remote
1156 # manifest.
1157 # manifest.
1157 invalidconflicts.add(p)
1158 invalidconflicts.add(p)
1158
1159
1159 # Rename all local conflicting files that have not been deleted.
1160 # Rename all local conflicting files that have not been deleted.
1160 for p in localconflicts:
1161 for p in localconflicts:
1161 if p not in deletedfiles:
1162 if p not in deletedfiles:
1162 ctxname = bytes(wctx).rstrip(b'+')
1163 ctxname = bytes(wctx).rstrip(b'+')
1163 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1164 actions[pnew] = (
1165 actions[pnew] = (
1165 ACTION_PATH_CONFLICT_RESOLVE,
1166 ACTION_PATH_CONFLICT_RESOLVE,
1166 (p,),
1167 (p,),
1167 b'local path conflict',
1168 b'local path conflict',
1168 )
1169 )
1169 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170 actions[p] = (ACTION_PATH_CONFLICT, (pnew, b'l'), b'path conflict')
1170
1171
1171 if remoteconflicts:
1172 if remoteconflicts:
1172 # Check if all files in the conflicting directories have been removed.
1173 # Check if all files in the conflicting directories have been removed.
1173 ctxname = bytes(mctx).rstrip(b'+')
1174 ctxname = bytes(mctx).rstrip(b'+')
1174 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 for f, p in _filesindirs(repo, mf, remoteconflicts):
1175 if f not in deletedfiles:
1176 if f not in deletedfiles:
1176 m, args, msg = actions[p]
1177 m, args, msg = actions[p]
1177 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
1178 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 if m in (ACTION_DELETED_CHANGED, ACTION_MERGE):
1179 # Action was merge, just update target.
1180 # Action was merge, just update target.
1180 actions[pnew] = (m, args, msg)
1181 actions[pnew] = (m, args, msg)
1181 else:
1182 else:
1182 # Action was create, change to renamed get action.
1183 # Action was create, change to renamed get action.
1183 fl = args[0]
1184 fl = args[0]
1184 actions[pnew] = (
1185 actions[pnew] = (
1185 ACTION_LOCAL_DIR_RENAME_GET,
1186 ACTION_LOCAL_DIR_RENAME_GET,
1186 (p, fl),
1187 (p, fl),
1187 b'remote path conflict',
1188 b'remote path conflict',
1188 )
1189 )
1189 actions[p] = (
1190 actions[p] = (
1190 ACTION_PATH_CONFLICT,
1191 ACTION_PATH_CONFLICT,
1191 (pnew, ACTION_REMOVE),
1192 (pnew, ACTION_REMOVE),
1192 b'path conflict',
1193 b'path conflict',
1193 )
1194 )
1194 remoteconflicts.remove(p)
1195 remoteconflicts.remove(p)
1195 break
1196 break
1196
1197
1197 if invalidconflicts:
1198 if invalidconflicts:
1198 for p in invalidconflicts:
1199 for p in invalidconflicts:
1199 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
1200 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201 raise error.Abort(_(b"destination manifest contains path conflicts"))
1201
1202
1202
1203
1203 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 def _filternarrowactions(narrowmatch, branchmerge, actions):
1204 """
1205 """
1205 Filters out actions that can ignored because the repo is narrowed.
1206 Filters out actions that can ignored because the repo is narrowed.
1206
1207
1207 Raise an exception if the merge cannot be completed because the repo is
1208 Raise an exception if the merge cannot be completed because the repo is
1208 narrowed.
1209 narrowed.
1209 """
1210 """
1210 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
1211 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 nonconflicttypes = set(b'a am c cm f g r e'.split())
1212 # We mutate the items in the dict during iteration, so iterate
1213 # We mutate the items in the dict during iteration, so iterate
1213 # over a copy.
1214 # over a copy.
1214 for f, action in list(actions.items()):
1215 for f, action in list(actions.items()):
1215 if narrowmatch(f):
1216 if narrowmatch(f):
1216 pass
1217 pass
1217 elif not branchmerge:
1218 elif not branchmerge:
1218 del actions[f] # just updating, ignore changes outside clone
1219 del actions[f] # just updating, ignore changes outside clone
1219 elif action[0] in nooptypes:
1220 elif action[0] in nooptypes:
1220 del actions[f] # merge does not affect file
1221 del actions[f] # merge does not affect file
1221 elif action[0] in nonconflicttypes:
1222 elif action[0] in nonconflicttypes:
1222 raise error.Abort(
1223 raise error.Abort(
1223 _(
1224 _(
1224 b'merge affects file \'%s\' outside narrow, '
1225 b'merge affects file \'%s\' outside narrow, '
1225 b'which is not yet supported'
1226 b'which is not yet supported'
1226 )
1227 )
1227 % f,
1228 % f,
1228 hint=_(b'merging in the other direction may work'),
1229 hint=_(b'merging in the other direction may work'),
1229 )
1230 )
1230 else:
1231 else:
1231 raise error.Abort(
1232 raise error.Abort(
1232 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 _(b'conflict in file \'%s\' is outside narrow clone') % f
1233 )
1234 )
1234
1235
1235
1236
1236 def manifestmerge(
1237 def manifestmerge(
1237 repo,
1238 repo,
1238 wctx,
1239 wctx,
1239 p2,
1240 p2,
1240 pa,
1241 pa,
1241 branchmerge,
1242 branchmerge,
1242 force,
1243 force,
1243 matcher,
1244 matcher,
1244 acceptremote,
1245 acceptremote,
1245 followcopies,
1246 followcopies,
1246 forcefulldiff=False,
1247 forcefulldiff=False,
1247 ):
1248 ):
1248 """
1249 """
1249 Merge wctx and p2 with ancestor pa and generate merge action list
1250 Merge wctx and p2 with ancestor pa and generate merge action list
1250
1251
1251 branchmerge and force are as passed in to update
1252 branchmerge and force are as passed in to update
1252 matcher = matcher to filter file lists
1253 matcher = matcher to filter file lists
1253 acceptremote = accept the incoming changes without prompting
1254 acceptremote = accept the incoming changes without prompting
1254 """
1255 """
1255 if matcher is not None and matcher.always():
1256 if matcher is not None and matcher.always():
1256 matcher = None
1257 matcher = None
1257
1258
1258 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1259 copy, movewithdir, diverge, renamedelete, dirmove = {}, {}, {}, {}, {}
1259
1260
1260 # manifests fetched in order are going to be faster, so prime the caches
1261 # manifests fetched in order are going to be faster, so prime the caches
1261 [
1262 [
1262 x.manifest()
1263 x.manifest()
1263 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1264 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
1264 ]
1265 ]
1265
1266
1266 if followcopies:
1267 if followcopies:
1267 ret = copies.mergecopies(repo, wctx, p2, pa)
1268 ret = copies.mergecopies(repo, wctx, p2, pa)
1268 copy, movewithdir, diverge, renamedelete, dirmove = ret
1269 copy, movewithdir, diverge, renamedelete, dirmove = ret
1269
1270
1270 boolbm = pycompat.bytestr(bool(branchmerge))
1271 boolbm = pycompat.bytestr(bool(branchmerge))
1271 boolf = pycompat.bytestr(bool(force))
1272 boolf = pycompat.bytestr(bool(force))
1272 boolm = pycompat.bytestr(bool(matcher))
1273 boolm = pycompat.bytestr(bool(matcher))
1273 repo.ui.note(_(b"resolving manifests\n"))
1274 repo.ui.note(_(b"resolving manifests\n"))
1274 repo.ui.debug(
1275 repo.ui.debug(
1275 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1276 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
1276 )
1277 )
1277 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1278 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
1278
1279
1279 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1280 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
1280 copied = set(copy.values())
1281 copied = set(copy.values())
1281 copied.update(movewithdir.values())
1282 copied.update(movewithdir.values())
1282
1283
1283 if b'.hgsubstate' in m1 and wctx.rev() is None:
1284 if b'.hgsubstate' in m1 and wctx.rev() is None:
1284 # Check whether sub state is modified, and overwrite the manifest
1285 # Check whether sub state is modified, and overwrite the manifest
1285 # to flag the change. If wctx is a committed revision, we shouldn't
1286 # to flag the change. If wctx is a committed revision, we shouldn't
1286 # care for the dirty state of the working directory.
1287 # care for the dirty state of the working directory.
1287 if any(wctx.sub(s).dirty() for s in wctx.substate):
1288 if any(wctx.sub(s).dirty() for s in wctx.substate):
1288 m1[b'.hgsubstate'] = modifiednodeid
1289 m1[b'.hgsubstate'] = modifiednodeid
1289
1290
1290 # Don't use m2-vs-ma optimization if:
1291 # Don't use m2-vs-ma optimization if:
1291 # - ma is the same as m1 or m2, which we're just going to diff again later
1292 # - ma is the same as m1 or m2, which we're just going to diff again later
1292 # - The caller specifically asks for a full diff, which is useful during bid
1293 # - The caller specifically asks for a full diff, which is useful during bid
1293 # merge.
1294 # merge.
1294 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1295 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
1295 # Identify which files are relevant to the merge, so we can limit the
1296 # Identify which files are relevant to the merge, so we can limit the
1296 # total m1-vs-m2 diff to just those files. This has significant
1297 # total m1-vs-m2 diff to just those files. This has significant
1297 # performance benefits in large repositories.
1298 # performance benefits in large repositories.
1298 relevantfiles = set(ma.diff(m2).keys())
1299 relevantfiles = set(ma.diff(m2).keys())
1299
1300
1300 # For copied and moved files, we need to add the source file too.
1301 # For copied and moved files, we need to add the source file too.
1301 for copykey, copyvalue in pycompat.iteritems(copy):
1302 for copykey, copyvalue in pycompat.iteritems(copy):
1302 if copyvalue in relevantfiles:
1303 if copyvalue in relevantfiles:
1303 relevantfiles.add(copykey)
1304 relevantfiles.add(copykey)
1304 for movedirkey in movewithdir:
1305 for movedirkey in movewithdir:
1305 relevantfiles.add(movedirkey)
1306 relevantfiles.add(movedirkey)
1306 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1307 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
1307 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1308 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
1308
1309
1309 diff = m1.diff(m2, match=matcher)
1310 diff = m1.diff(m2, match=matcher)
1310
1311
1311 actions = {}
1312 actions = {}
1312 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1313 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
1313 if n1 and n2: # file exists on both local and remote side
1314 if n1 and n2: # file exists on both local and remote side
1314 if f not in ma:
1315 if f not in ma:
1315 fa = copy.get(f, None)
1316 fa = copy.get(f, None)
1316 if fa is not None:
1317 if fa is not None:
1317 actions[f] = (
1318 actions[f] = (
1318 ACTION_MERGE,
1319 ACTION_MERGE,
1319 (f, f, fa, False, pa.node()),
1320 (f, f, fa, False, pa.node()),
1320 b'both renamed from %s' % fa,
1321 b'both renamed from %s' % fa,
1321 )
1322 )
1322 else:
1323 else:
1323 actions[f] = (
1324 actions[f] = (
1324 ACTION_MERGE,
1325 ACTION_MERGE,
1325 (f, f, None, False, pa.node()),
1326 (f, f, None, False, pa.node()),
1326 b'both created',
1327 b'both created',
1327 )
1328 )
1328 else:
1329 else:
1329 a = ma[f]
1330 a = ma[f]
1330 fla = ma.flags(f)
1331 fla = ma.flags(f)
1331 nol = b'l' not in fl1 + fl2 + fla
1332 nol = b'l' not in fl1 + fl2 + fla
1332 if n2 == a and fl2 == fla:
1333 if n2 == a and fl2 == fla:
1333 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1334 actions[f] = (ACTION_KEEP, (), b'remote unchanged')
1334 elif n1 == a and fl1 == fla: # local unchanged - use remote
1335 elif n1 == a and fl1 == fla: # local unchanged - use remote
1335 if n1 == n2: # optimization: keep local content
1336 if n1 == n2: # optimization: keep local content
1336 actions[f] = (
1337 actions[f] = (
1337 ACTION_EXEC,
1338 ACTION_EXEC,
1338 (fl2,),
1339 (fl2,),
1339 b'update permissions',
1340 b'update permissions',
1340 )
1341 )
1341 else:
1342 else:
1342 actions[f] = (
1343 actions[f] = (
1343 ACTION_GET,
1344 ACTION_GET,
1344 (fl2, False),
1345 (fl2, False),
1345 b'remote is newer',
1346 b'remote is newer',
1346 )
1347 )
1347 elif nol and n2 == a: # remote only changed 'x'
1348 elif nol and n2 == a: # remote only changed 'x'
1348 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1349 actions[f] = (ACTION_EXEC, (fl2,), b'update permissions')
1349 elif nol and n1 == a: # local only changed 'x'
1350 elif nol and n1 == a: # local only changed 'x'
1350 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1351 actions[f] = (ACTION_GET, (fl1, False), b'remote is newer')
1351 else: # both changed something
1352 else: # both changed something
1352 actions[f] = (
1353 actions[f] = (
1353 ACTION_MERGE,
1354 ACTION_MERGE,
1354 (f, f, f, False, pa.node()),
1355 (f, f, f, False, pa.node()),
1355 b'versions differ',
1356 b'versions differ',
1356 )
1357 )
1357 elif n1: # file exists only on local side
1358 elif n1: # file exists only on local side
1358 if f in copied:
1359 if f in copied:
1359 pass # we'll deal with it on m2 side
1360 pass # we'll deal with it on m2 side
1360 elif f in movewithdir: # directory rename, move local
1361 elif f in movewithdir: # directory rename, move local
1361 f2 = movewithdir[f]
1362 f2 = movewithdir[f]
1362 if f2 in m2:
1363 if f2 in m2:
1363 actions[f2] = (
1364 actions[f2] = (
1364 ACTION_MERGE,
1365 ACTION_MERGE,
1365 (f, f2, None, True, pa.node()),
1366 (f, f2, None, True, pa.node()),
1366 b'remote directory rename, both created',
1367 b'remote directory rename, both created',
1367 )
1368 )
1368 else:
1369 else:
1369 actions[f2] = (
1370 actions[f2] = (
1370 ACTION_DIR_RENAME_MOVE_LOCAL,
1371 ACTION_DIR_RENAME_MOVE_LOCAL,
1371 (f, fl1),
1372 (f, fl1),
1372 b'remote directory rename - move from %s' % f,
1373 b'remote directory rename - move from %s' % f,
1373 )
1374 )
1374 elif f in copy:
1375 elif f in copy:
1375 f2 = copy[f]
1376 f2 = copy[f]
1376 actions[f] = (
1377 actions[f] = (
1377 ACTION_MERGE,
1378 ACTION_MERGE,
1378 (f, f2, f2, False, pa.node()),
1379 (f, f2, f2, False, pa.node()),
1379 b'local copied/moved from %s' % f2,
1380 b'local copied/moved from %s' % f2,
1380 )
1381 )
1381 elif f in ma: # clean, a different, no remote
1382 elif f in ma: # clean, a different, no remote
1382 if n1 != ma[f]:
1383 if n1 != ma[f]:
1383 if acceptremote:
1384 if acceptremote:
1384 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1385 actions[f] = (ACTION_REMOVE, None, b'remote delete')
1385 else:
1386 else:
1386 actions[f] = (
1387 actions[f] = (
1387 ACTION_CHANGED_DELETED,
1388 ACTION_CHANGED_DELETED,
1388 (f, None, f, False, pa.node()),
1389 (f, None, f, False, pa.node()),
1389 b'prompt changed/deleted',
1390 b'prompt changed/deleted',
1390 )
1391 )
1391 elif n1 == addednodeid:
1392 elif n1 == addednodeid:
1392 # This extra 'a' is added by working copy manifest to mark
1393 # This extra 'a' is added by working copy manifest to mark
1393 # the file as locally added. We should forget it instead of
1394 # the file as locally added. We should forget it instead of
1394 # deleting it.
1395 # deleting it.
1395 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1396 actions[f] = (ACTION_FORGET, None, b'remote deleted')
1396 else:
1397 else:
1397 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1398 actions[f] = (ACTION_REMOVE, None, b'other deleted')
1398 elif n2: # file exists only on remote side
1399 elif n2: # file exists only on remote side
1399 if f in copied:
1400 if f in copied:
1400 pass # we'll deal with it on m1 side
1401 pass # we'll deal with it on m1 side
1401 elif f in movewithdir:
1402 elif f in movewithdir:
1402 f2 = movewithdir[f]
1403 f2 = movewithdir[f]
1403 if f2 in m1:
1404 if f2 in m1:
1404 actions[f2] = (
1405 actions[f2] = (
1405 ACTION_MERGE,
1406 ACTION_MERGE,
1406 (f2, f, None, False, pa.node()),
1407 (f2, f, None, False, pa.node()),
1407 b'local directory rename, both created',
1408 b'local directory rename, both created',
1408 )
1409 )
1409 else:
1410 else:
1410 actions[f2] = (
1411 actions[f2] = (
1411 ACTION_LOCAL_DIR_RENAME_GET,
1412 ACTION_LOCAL_DIR_RENAME_GET,
1412 (f, fl2),
1413 (f, fl2),
1413 b'local directory rename - get from %s' % f,
1414 b'local directory rename - get from %s' % f,
1414 )
1415 )
1415 elif f in copy:
1416 elif f in copy:
1416 f2 = copy[f]
1417 f2 = copy[f]
1417 if f2 in m2:
1418 if f2 in m2:
1418 actions[f] = (
1419 actions[f] = (
1419 ACTION_MERGE,
1420 ACTION_MERGE,
1420 (f2, f, f2, False, pa.node()),
1421 (f2, f, f2, False, pa.node()),
1421 b'remote copied from %s' % f2,
1422 b'remote copied from %s' % f2,
1422 )
1423 )
1423 else:
1424 else:
1424 actions[f] = (
1425 actions[f] = (
1425 ACTION_MERGE,
1426 ACTION_MERGE,
1426 (f2, f, f2, True, pa.node()),
1427 (f2, f, f2, True, pa.node()),
1427 b'remote moved from %s' % f2,
1428 b'remote moved from %s' % f2,
1428 )
1429 )
1429 elif f not in ma:
1430 elif f not in ma:
1430 # local unknown, remote created: the logic is described by the
1431 # local unknown, remote created: the logic is described by the
1431 # following table:
1432 # following table:
1432 #
1433 #
1433 # force branchmerge different | action
1434 # force branchmerge different | action
1434 # n * * | create
1435 # n * * | create
1435 # y n * | create
1436 # y n * | create
1436 # y y n | create
1437 # y y n | create
1437 # y y y | merge
1438 # y y y | merge
1438 #
1439 #
1439 # Checking whether the files are different is expensive, so we
1440 # Checking whether the files are different is expensive, so we
1440 # don't do that when we can avoid it.
1441 # don't do that when we can avoid it.
1441 if not force:
1442 if not force:
1442 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1443 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1443 elif not branchmerge:
1444 elif not branchmerge:
1444 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1445 actions[f] = (ACTION_CREATED, (fl2,), b'remote created')
1445 else:
1446 else:
1446 actions[f] = (
1447 actions[f] = (
1447 ACTION_CREATED_MERGE,
1448 ACTION_CREATED_MERGE,
1448 (fl2, pa.node()),
1449 (fl2, pa.node()),
1449 b'remote created, get or merge',
1450 b'remote created, get or merge',
1450 )
1451 )
1451 elif n2 != ma[f]:
1452 elif n2 != ma[f]:
1452 df = None
1453 df = None
1453 for d in dirmove:
1454 for d in dirmove:
1454 if f.startswith(d):
1455 if f.startswith(d):
1455 # new file added in a directory that was moved
1456 # new file added in a directory that was moved
1456 df = dirmove[d] + f[len(d) :]
1457 df = dirmove[d] + f[len(d) :]
1457 break
1458 break
1458 if df is not None and df in m1:
1459 if df is not None and df in m1:
1459 actions[df] = (
1460 actions[df] = (
1460 ACTION_MERGE,
1461 ACTION_MERGE,
1461 (df, f, f, False, pa.node()),
1462 (df, f, f, False, pa.node()),
1462 b'local directory rename - respect move '
1463 b'local directory rename - respect move '
1463 b'from %s' % f,
1464 b'from %s' % f,
1464 )
1465 )
1465 elif acceptremote:
1466 elif acceptremote:
1466 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1467 actions[f] = (ACTION_CREATED, (fl2,), b'remote recreating')
1467 else:
1468 else:
1468 actions[f] = (
1469 actions[f] = (
1469 ACTION_DELETED_CHANGED,
1470 ACTION_DELETED_CHANGED,
1470 (None, f, f, False, pa.node()),
1471 (None, f, f, False, pa.node()),
1471 b'prompt deleted/changed',
1472 b'prompt deleted/changed',
1472 )
1473 )
1473
1474
1474 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1475 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1475 # If we are merging, look for path conflicts.
1476 # If we are merging, look for path conflicts.
1476 checkpathconflicts(repo, wctx, p2, actions)
1477 checkpathconflicts(repo, wctx, p2, actions)
1477
1478
1478 narrowmatch = repo.narrowmatch()
1479 narrowmatch = repo.narrowmatch()
1479 if not narrowmatch.always():
1480 if not narrowmatch.always():
1480 # Updates "actions" in place
1481 # Updates "actions" in place
1481 _filternarrowactions(narrowmatch, branchmerge, actions)
1482 _filternarrowactions(narrowmatch, branchmerge, actions)
1482
1483
1483 return actions, diverge, renamedelete
1484 return actions, diverge, renamedelete
1484
1485
1485
1486
1486 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1487 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
1487 """Resolves false conflicts where the nodeid changed but the content
1488 """Resolves false conflicts where the nodeid changed but the content
1488 remained the same."""
1489 remained the same."""
1489 # We force a copy of actions.items() because we're going to mutate
1490 # We force a copy of actions.items() because we're going to mutate
1490 # actions as we resolve trivial conflicts.
1491 # actions as we resolve trivial conflicts.
1491 for f, (m, args, msg) in list(actions.items()):
1492 for f, (m, args, msg) in list(actions.items()):
1492 if (
1493 if (
1493 m == ACTION_CHANGED_DELETED
1494 m == ACTION_CHANGED_DELETED
1494 and f in ancestor
1495 and f in ancestor
1495 and not wctx[f].cmp(ancestor[f])
1496 and not wctx[f].cmp(ancestor[f])
1496 ):
1497 ):
1497 # local did change but ended up with same content
1498 # local did change but ended up with same content
1498 actions[f] = ACTION_REMOVE, None, b'prompt same'
1499 actions[f] = ACTION_REMOVE, None, b'prompt same'
1499 elif (
1500 elif (
1500 m == ACTION_DELETED_CHANGED
1501 m == ACTION_DELETED_CHANGED
1501 and f in ancestor
1502 and f in ancestor
1502 and not mctx[f].cmp(ancestor[f])
1503 and not mctx[f].cmp(ancestor[f])
1503 ):
1504 ):
1504 # remote did change but ended up with same content
1505 # remote did change but ended up with same content
1505 del actions[f] # don't get = keep local deleted
1506 del actions[f] # don't get = keep local deleted
1506
1507
1507
1508
1508 def calculateupdates(
1509 def calculateupdates(
1509 repo,
1510 repo,
1510 wctx,
1511 wctx,
1511 mctx,
1512 mctx,
1512 ancestors,
1513 ancestors,
1513 branchmerge,
1514 branchmerge,
1514 force,
1515 force,
1515 acceptremote,
1516 acceptremote,
1516 followcopies,
1517 followcopies,
1517 matcher=None,
1518 matcher=None,
1518 mergeforce=False,
1519 mergeforce=False,
1519 ):
1520 ):
1520 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1521 """Calculate the actions needed to merge mctx into wctx using ancestors"""
1521 # Avoid cycle.
1522 # Avoid cycle.
1522 from . import sparse
1523 from . import sparse
1523
1524
1524 if len(ancestors) == 1: # default
1525 if len(ancestors) == 1: # default
1525 actions, diverge, renamedelete = manifestmerge(
1526 actions, diverge, renamedelete = manifestmerge(
1526 repo,
1527 repo,
1527 wctx,
1528 wctx,
1528 mctx,
1529 mctx,
1529 ancestors[0],
1530 ancestors[0],
1530 branchmerge,
1531 branchmerge,
1531 force,
1532 force,
1532 matcher,
1533 matcher,
1533 acceptremote,
1534 acceptremote,
1534 followcopies,
1535 followcopies,
1535 )
1536 )
1536 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1537 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1537
1538
1538 else: # only when merge.preferancestor=* - the default
1539 else: # only when merge.preferancestor=* - the default
1539 repo.ui.note(
1540 repo.ui.note(
1540 _(b"note: merging %s and %s using bids from ancestors %s\n")
1541 _(b"note: merging %s and %s using bids from ancestors %s\n")
1541 % (
1542 % (
1542 wctx,
1543 wctx,
1543 mctx,
1544 mctx,
1544 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1545 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1545 )
1546 )
1546 )
1547 )
1547
1548
1548 # Call for bids
1549 # Call for bids
1549 fbids = (
1550 fbids = (
1550 {}
1551 {}
1551 ) # mapping filename to bids (action method to list af actions)
1552 ) # mapping filename to bids (action method to list af actions)
1552 diverge, renamedelete = None, None
1553 diverge, renamedelete = None, None
1553 for ancestor in ancestors:
1554 for ancestor in ancestors:
1554 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1555 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1555 actions, diverge1, renamedelete1 = manifestmerge(
1556 actions, diverge1, renamedelete1 = manifestmerge(
1556 repo,
1557 repo,
1557 wctx,
1558 wctx,
1558 mctx,
1559 mctx,
1559 ancestor,
1560 ancestor,
1560 branchmerge,
1561 branchmerge,
1561 force,
1562 force,
1562 matcher,
1563 matcher,
1563 acceptremote,
1564 acceptremote,
1564 followcopies,
1565 followcopies,
1565 forcefulldiff=True,
1566 forcefulldiff=True,
1566 )
1567 )
1567 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1568 _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce)
1568
1569
1569 # Track the shortest set of warning on the theory that bid
1570 # Track the shortest set of warning on the theory that bid
1570 # merge will correctly incorporate more information
1571 # merge will correctly incorporate more information
1571 if diverge is None or len(diverge1) < len(diverge):
1572 if diverge is None or len(diverge1) < len(diverge):
1572 diverge = diverge1
1573 diverge = diverge1
1573 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1574 if renamedelete is None or len(renamedelete) < len(renamedelete1):
1574 renamedelete = renamedelete1
1575 renamedelete = renamedelete1
1575
1576
1576 for f, a in sorted(pycompat.iteritems(actions)):
1577 for f, a in sorted(pycompat.iteritems(actions)):
1577 m, args, msg = a
1578 m, args, msg = a
1578 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1579 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1579 if f in fbids:
1580 if f in fbids:
1580 d = fbids[f]
1581 d = fbids[f]
1581 if m in d:
1582 if m in d:
1582 d[m].append(a)
1583 d[m].append(a)
1583 else:
1584 else:
1584 d[m] = [a]
1585 d[m] = [a]
1585 else:
1586 else:
1586 fbids[f] = {m: [a]}
1587 fbids[f] = {m: [a]}
1587
1588
1588 # Pick the best bid for each file
1589 # Pick the best bid for each file
1589 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1590 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1590 actions = {}
1591 actions = {}
1591 for f, bids in sorted(fbids.items()):
1592 for f, bids in sorted(fbids.items()):
1592 # bids is a mapping from action method to list af actions
1593 # bids is a mapping from action method to list af actions
1593 # Consensus?
1594 # Consensus?
1594 if len(bids) == 1: # all bids are the same kind of method
1595 if len(bids) == 1: # all bids are the same kind of method
1595 m, l = list(bids.items())[0]
1596 m, l = list(bids.items())[0]
1596 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1597 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1597 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1598 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1598 actions[f] = l[0]
1599 actions[f] = l[0]
1599 continue
1600 continue
1600 # If keep is an option, just do it.
1601 # If keep is an option, just do it.
1601 if ACTION_KEEP in bids:
1602 if ACTION_KEEP in bids:
1602 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1603 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1603 actions[f] = bids[ACTION_KEEP][0]
1604 actions[f] = bids[ACTION_KEEP][0]
1604 continue
1605 continue
1605 # If there are gets and they all agree [how could they not?], do it.
1606 # If there are gets and they all agree [how could they not?], do it.
1606 if ACTION_GET in bids:
1607 if ACTION_GET in bids:
1607 ga0 = bids[ACTION_GET][0]
1608 ga0 = bids[ACTION_GET][0]
1608 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1609 if all(a == ga0 for a in bids[ACTION_GET][1:]):
1609 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1610 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1610 actions[f] = ga0
1611 actions[f] = ga0
1611 continue
1612 continue
1612 # TODO: Consider other simple actions such as mode changes
1613 # TODO: Consider other simple actions such as mode changes
1613 # Handle inefficient democrazy.
1614 # Handle inefficient democrazy.
1614 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1615 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1615 for m, l in sorted(bids.items()):
1616 for m, l in sorted(bids.items()):
1616 for _f, args, msg in l:
1617 for _f, args, msg in l:
1617 repo.ui.note(b' %s -> %s\n' % (msg, m))
1618 repo.ui.note(b' %s -> %s\n' % (msg, m))
1618 # Pick random action. TODO: Instead, prompt user when resolving
1619 # Pick random action. TODO: Instead, prompt user when resolving
1619 m, l = list(bids.items())[0]
1620 m, l = list(bids.items())[0]
1620 repo.ui.warn(
1621 repo.ui.warn(
1621 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1622 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1622 )
1623 )
1623 actions[f] = l[0]
1624 actions[f] = l[0]
1624 continue
1625 continue
1625 repo.ui.note(_(b'end of auction\n\n'))
1626 repo.ui.note(_(b'end of auction\n\n'))
1626
1627
1627 if wctx.rev() is None:
1628 if wctx.rev() is None:
1628 fractions = _forgetremoved(wctx, mctx, branchmerge)
1629 fractions = _forgetremoved(wctx, mctx, branchmerge)
1629 actions.update(fractions)
1630 actions.update(fractions)
1630
1631
1631 prunedactions = sparse.filterupdatesactions(
1632 prunedactions = sparse.filterupdatesactions(
1632 repo, wctx, mctx, branchmerge, actions
1633 repo, wctx, mctx, branchmerge, actions
1633 )
1634 )
1634 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1635 _resolvetrivial(repo, wctx, mctx, ancestors[0], actions)
1635
1636
1636 return prunedactions, diverge, renamedelete
1637 return prunedactions, diverge, renamedelete
1637
1638
1638
1639
1639 def _getcwd():
1640 def _getcwd():
1640 try:
1641 try:
1641 return encoding.getcwd()
1642 return encoding.getcwd()
1642 except OSError as err:
1643 except OSError as err:
1643 if err.errno == errno.ENOENT:
1644 if err.errno == errno.ENOENT:
1644 return None
1645 return None
1645 raise
1646 raise
1646
1647
1647
1648
1648 def batchremove(repo, wctx, actions):
1649 def batchremove(repo, wctx, actions):
1649 """apply removes to the working directory
1650 """apply removes to the working directory
1650
1651
1651 yields tuples for progress updates
1652 yields tuples for progress updates
1652 """
1653 """
1653 verbose = repo.ui.verbose
1654 verbose = repo.ui.verbose
1654 cwd = _getcwd()
1655 cwd = _getcwd()
1655 i = 0
1656 i = 0
1656 for f, args, msg in actions:
1657 for f, args, msg in actions:
1657 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1658 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1658 if verbose:
1659 if verbose:
1659 repo.ui.note(_(b"removing %s\n") % f)
1660 repo.ui.note(_(b"removing %s\n") % f)
1660 wctx[f].audit()
1661 wctx[f].audit()
1661 try:
1662 try:
1662 wctx[f].remove(ignoremissing=True)
1663 wctx[f].remove(ignoremissing=True)
1663 except OSError as inst:
1664 except OSError as inst:
1664 repo.ui.warn(
1665 repo.ui.warn(
1665 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1666 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1666 )
1667 )
1667 if i == 100:
1668 if i == 100:
1668 yield i, f
1669 yield i, f
1669 i = 0
1670 i = 0
1670 i += 1
1671 i += 1
1671 if i > 0:
1672 if i > 0:
1672 yield i, f
1673 yield i, f
1673
1674
1674 if cwd and not _getcwd():
1675 if cwd and not _getcwd():
1675 # cwd was removed in the course of removing files; print a helpful
1676 # cwd was removed in the course of removing files; print a helpful
1676 # warning.
1677 # warning.
1677 repo.ui.warn(
1678 repo.ui.warn(
1678 _(
1679 _(
1679 b"current directory was removed\n"
1680 b"current directory was removed\n"
1680 b"(consider changing to repo root: %s)\n"
1681 b"(consider changing to repo root: %s)\n"
1681 )
1682 )
1682 % repo.root
1683 % repo.root
1683 )
1684 )
1684
1685
1685
1686
1686 def batchget(repo, mctx, wctx, wantfiledata, actions):
1687 def batchget(repo, mctx, wctx, wantfiledata, actions):
1687 """apply gets to the working directory
1688 """apply gets to the working directory
1688
1689
1689 mctx is the context to get from
1690 mctx is the context to get from
1690
1691
1691 Yields arbitrarily many (False, tuple) for progress updates, followed by
1692 Yields arbitrarily many (False, tuple) for progress updates, followed by
1692 exactly one (True, filedata). When wantfiledata is false, filedata is an
1693 exactly one (True, filedata). When wantfiledata is false, filedata is an
1693 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1694 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1694 mtime) of the file f written for each action.
1695 mtime) of the file f written for each action.
1695 """
1696 """
1696 filedata = {}
1697 filedata = {}
1697 verbose = repo.ui.verbose
1698 verbose = repo.ui.verbose
1698 fctx = mctx.filectx
1699 fctx = mctx.filectx
1699 ui = repo.ui
1700 ui = repo.ui
1700 i = 0
1701 i = 0
1701 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1702 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1702 for f, (flags, backup), msg in actions:
1703 for f, (flags, backup), msg in actions:
1703 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1704 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1704 if verbose:
1705 if verbose:
1705 repo.ui.note(_(b"getting %s\n") % f)
1706 repo.ui.note(_(b"getting %s\n") % f)
1706
1707
1707 if backup:
1708 if backup:
1708 # If a file or directory exists with the same name, back that
1709 # If a file or directory exists with the same name, back that
1709 # up. Otherwise, look to see if there is a file that conflicts
1710 # up. Otherwise, look to see if there is a file that conflicts
1710 # with a directory this file is in, and if so, back that up.
1711 # with a directory this file is in, and if so, back that up.
1711 conflicting = f
1712 conflicting = f
1712 if not repo.wvfs.lexists(f):
1713 if not repo.wvfs.lexists(f):
1713 for p in util.finddirs(f):
1714 for p in pathutil.finddirs(f):
1714 if repo.wvfs.isfileorlink(p):
1715 if repo.wvfs.isfileorlink(p):
1715 conflicting = p
1716 conflicting = p
1716 break
1717 break
1717 if repo.wvfs.lexists(conflicting):
1718 if repo.wvfs.lexists(conflicting):
1718 orig = scmutil.backuppath(ui, repo, conflicting)
1719 orig = scmutil.backuppath(ui, repo, conflicting)
1719 util.rename(repo.wjoin(conflicting), orig)
1720 util.rename(repo.wjoin(conflicting), orig)
1720 wfctx = wctx[f]
1721 wfctx = wctx[f]
1721 wfctx.clearunknown()
1722 wfctx.clearunknown()
1722 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1723 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1723 size = wfctx.write(
1724 size = wfctx.write(
1724 fctx(f).data(),
1725 fctx(f).data(),
1725 flags,
1726 flags,
1726 backgroundclose=True,
1727 backgroundclose=True,
1727 atomictemp=atomictemp,
1728 atomictemp=atomictemp,
1728 )
1729 )
1729 if wantfiledata:
1730 if wantfiledata:
1730 s = wfctx.lstat()
1731 s = wfctx.lstat()
1731 mode = s.st_mode
1732 mode = s.st_mode
1732 mtime = s[stat.ST_MTIME]
1733 mtime = s[stat.ST_MTIME]
1733 filedata[f] = (mode, size, mtime) # for dirstate.normal
1734 filedata[f] = (mode, size, mtime) # for dirstate.normal
1734 if i == 100:
1735 if i == 100:
1735 yield False, (i, f)
1736 yield False, (i, f)
1736 i = 0
1737 i = 0
1737 i += 1
1738 i += 1
1738 if i > 0:
1739 if i > 0:
1739 yield False, (i, f)
1740 yield False, (i, f)
1740 yield True, filedata
1741 yield True, filedata
1741
1742
1742
1743
1743 def _prefetchfiles(repo, ctx, actions):
1744 def _prefetchfiles(repo, ctx, actions):
1744 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1745 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1745 of merge actions. ``ctx`` is the context being merged in."""
1746 of merge actions. ``ctx`` is the context being merged in."""
1746
1747
1747 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1748 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1748 # don't touch the context to be merged in. 'cd' is skipped, because
1749 # don't touch the context to be merged in. 'cd' is skipped, because
1749 # changed/deleted never resolves to something from the remote side.
1750 # changed/deleted never resolves to something from the remote side.
1750 oplist = [
1751 oplist = [
1751 actions[a]
1752 actions[a]
1752 for a in (
1753 for a in (
1753 ACTION_GET,
1754 ACTION_GET,
1754 ACTION_DELETED_CHANGED,
1755 ACTION_DELETED_CHANGED,
1755 ACTION_LOCAL_DIR_RENAME_GET,
1756 ACTION_LOCAL_DIR_RENAME_GET,
1756 ACTION_MERGE,
1757 ACTION_MERGE,
1757 )
1758 )
1758 ]
1759 ]
1759 prefetch = scmutil.prefetchfiles
1760 prefetch = scmutil.prefetchfiles
1760 matchfiles = scmutil.matchfiles
1761 matchfiles = scmutil.matchfiles
1761 prefetch(
1762 prefetch(
1762 repo,
1763 repo,
1763 [ctx.rev()],
1764 [ctx.rev()],
1764 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1765 matchfiles(repo, [f for sublist in oplist for f, args, msg in sublist]),
1765 )
1766 )
1766
1767
1767
1768
1768 @attr.s(frozen=True)
1769 @attr.s(frozen=True)
1769 class updateresult(object):
1770 class updateresult(object):
1770 updatedcount = attr.ib()
1771 updatedcount = attr.ib()
1771 mergedcount = attr.ib()
1772 mergedcount = attr.ib()
1772 removedcount = attr.ib()
1773 removedcount = attr.ib()
1773 unresolvedcount = attr.ib()
1774 unresolvedcount = attr.ib()
1774
1775
1775 def isempty(self):
1776 def isempty(self):
1776 return not (
1777 return not (
1777 self.updatedcount
1778 self.updatedcount
1778 or self.mergedcount
1779 or self.mergedcount
1779 or self.removedcount
1780 or self.removedcount
1780 or self.unresolvedcount
1781 or self.unresolvedcount
1781 )
1782 )
1782
1783
1783
1784
1784 def emptyactions():
1785 def emptyactions():
1785 """create an actions dict, to be populated and passed to applyupdates()"""
1786 """create an actions dict, to be populated and passed to applyupdates()"""
1786 return dict(
1787 return dict(
1787 (m, [])
1788 (m, [])
1788 for m in (
1789 for m in (
1789 ACTION_ADD,
1790 ACTION_ADD,
1790 ACTION_ADD_MODIFIED,
1791 ACTION_ADD_MODIFIED,
1791 ACTION_FORGET,
1792 ACTION_FORGET,
1792 ACTION_GET,
1793 ACTION_GET,
1793 ACTION_CHANGED_DELETED,
1794 ACTION_CHANGED_DELETED,
1794 ACTION_DELETED_CHANGED,
1795 ACTION_DELETED_CHANGED,
1795 ACTION_REMOVE,
1796 ACTION_REMOVE,
1796 ACTION_DIR_RENAME_MOVE_LOCAL,
1797 ACTION_DIR_RENAME_MOVE_LOCAL,
1797 ACTION_LOCAL_DIR_RENAME_GET,
1798 ACTION_LOCAL_DIR_RENAME_GET,
1798 ACTION_MERGE,
1799 ACTION_MERGE,
1799 ACTION_EXEC,
1800 ACTION_EXEC,
1800 ACTION_KEEP,
1801 ACTION_KEEP,
1801 ACTION_PATH_CONFLICT,
1802 ACTION_PATH_CONFLICT,
1802 ACTION_PATH_CONFLICT_RESOLVE,
1803 ACTION_PATH_CONFLICT_RESOLVE,
1803 )
1804 )
1804 )
1805 )
1805
1806
1806
1807
1807 def applyupdates(
1808 def applyupdates(
1808 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1809 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1809 ):
1810 ):
1810 """apply the merge action list to the working directory
1811 """apply the merge action list to the working directory
1811
1812
1812 wctx is the working copy context
1813 wctx is the working copy context
1813 mctx is the context to be merged into the working copy
1814 mctx is the context to be merged into the working copy
1814
1815
1815 Return a tuple of (counts, filedata), where counts is a tuple
1816 Return a tuple of (counts, filedata), where counts is a tuple
1816 (updated, merged, removed, unresolved) that describes how many
1817 (updated, merged, removed, unresolved) that describes how many
1817 files were affected by the update, and filedata is as described in
1818 files were affected by the update, and filedata is as described in
1818 batchget.
1819 batchget.
1819 """
1820 """
1820
1821
1821 _prefetchfiles(repo, mctx, actions)
1822 _prefetchfiles(repo, mctx, actions)
1822
1823
1823 updated, merged, removed = 0, 0, 0
1824 updated, merged, removed = 0, 0, 0
1824 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1825 ms = mergestate.clean(repo, wctx.p1().node(), mctx.node(), labels)
1825 moves = []
1826 moves = []
1826 for m, l in actions.items():
1827 for m, l in actions.items():
1827 l.sort()
1828 l.sort()
1828
1829
1829 # 'cd' and 'dc' actions are treated like other merge conflicts
1830 # 'cd' and 'dc' actions are treated like other merge conflicts
1830 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1831 mergeactions = sorted(actions[ACTION_CHANGED_DELETED])
1831 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1832 mergeactions.extend(sorted(actions[ACTION_DELETED_CHANGED]))
1832 mergeactions.extend(actions[ACTION_MERGE])
1833 mergeactions.extend(actions[ACTION_MERGE])
1833 for f, args, msg in mergeactions:
1834 for f, args, msg in mergeactions:
1834 f1, f2, fa, move, anc = args
1835 f1, f2, fa, move, anc = args
1835 if f == b'.hgsubstate': # merged internally
1836 if f == b'.hgsubstate': # merged internally
1836 continue
1837 continue
1837 if f1 is None:
1838 if f1 is None:
1838 fcl = filemerge.absentfilectx(wctx, fa)
1839 fcl = filemerge.absentfilectx(wctx, fa)
1839 else:
1840 else:
1840 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1841 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1841 fcl = wctx[f1]
1842 fcl = wctx[f1]
1842 if f2 is None:
1843 if f2 is None:
1843 fco = filemerge.absentfilectx(mctx, fa)
1844 fco = filemerge.absentfilectx(mctx, fa)
1844 else:
1845 else:
1845 fco = mctx[f2]
1846 fco = mctx[f2]
1846 actx = repo[anc]
1847 actx = repo[anc]
1847 if fa in actx:
1848 if fa in actx:
1848 fca = actx[fa]
1849 fca = actx[fa]
1849 else:
1850 else:
1850 # TODO: move to absentfilectx
1851 # TODO: move to absentfilectx
1851 fca = repo.filectx(f1, fileid=nullrev)
1852 fca = repo.filectx(f1, fileid=nullrev)
1852 ms.add(fcl, fco, fca, f)
1853 ms.add(fcl, fco, fca, f)
1853 if f1 != f and move:
1854 if f1 != f and move:
1854 moves.append(f1)
1855 moves.append(f1)
1855
1856
1856 # remove renamed files after safely stored
1857 # remove renamed files after safely stored
1857 for f in moves:
1858 for f in moves:
1858 if wctx[f].lexists():
1859 if wctx[f].lexists():
1859 repo.ui.debug(b"removing %s\n" % f)
1860 repo.ui.debug(b"removing %s\n" % f)
1860 wctx[f].audit()
1861 wctx[f].audit()
1861 wctx[f].remove()
1862 wctx[f].remove()
1862
1863
1863 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1864 numupdates = sum(len(l) for m, l in actions.items() if m != ACTION_KEEP)
1864 progress = repo.ui.makeprogress(
1865 progress = repo.ui.makeprogress(
1865 _(b'updating'), unit=_(b'files'), total=numupdates
1866 _(b'updating'), unit=_(b'files'), total=numupdates
1866 )
1867 )
1867
1868
1868 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1869 if [a for a in actions[ACTION_REMOVE] if a[0] == b'.hgsubstate']:
1869 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1870 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1870
1871
1871 # record path conflicts
1872 # record path conflicts
1872 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1873 for f, args, msg in actions[ACTION_PATH_CONFLICT]:
1873 f1, fo = args
1874 f1, fo = args
1874 s = repo.ui.status
1875 s = repo.ui.status
1875 s(
1876 s(
1876 _(
1877 _(
1877 b"%s: path conflict - a file or link has the same name as a "
1878 b"%s: path conflict - a file or link has the same name as a "
1878 b"directory\n"
1879 b"directory\n"
1879 )
1880 )
1880 % f
1881 % f
1881 )
1882 )
1882 if fo == b'l':
1883 if fo == b'l':
1883 s(_(b"the local file has been renamed to %s\n") % f1)
1884 s(_(b"the local file has been renamed to %s\n") % f1)
1884 else:
1885 else:
1885 s(_(b"the remote file has been renamed to %s\n") % f1)
1886 s(_(b"the remote file has been renamed to %s\n") % f1)
1886 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1887 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1887 ms.addpath(f, f1, fo)
1888 ms.addpath(f, f1, fo)
1888 progress.increment(item=f)
1889 progress.increment(item=f)
1889
1890
1890 # When merging in-memory, we can't support worker processes, so set the
1891 # When merging in-memory, we can't support worker processes, so set the
1891 # per-item cost at 0 in that case.
1892 # per-item cost at 0 in that case.
1892 cost = 0 if wctx.isinmemory() else 0.001
1893 cost = 0 if wctx.isinmemory() else 0.001
1893
1894
1894 # remove in parallel (must come before resolving path conflicts and getting)
1895 # remove in parallel (must come before resolving path conflicts and getting)
1895 prog = worker.worker(
1896 prog = worker.worker(
1896 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1897 repo.ui, cost, batchremove, (repo, wctx), actions[ACTION_REMOVE]
1897 )
1898 )
1898 for i, item in prog:
1899 for i, item in prog:
1899 progress.increment(step=i, item=item)
1900 progress.increment(step=i, item=item)
1900 removed = len(actions[ACTION_REMOVE])
1901 removed = len(actions[ACTION_REMOVE])
1901
1902
1902 # resolve path conflicts (must come before getting)
1903 # resolve path conflicts (must come before getting)
1903 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1904 for f, args, msg in actions[ACTION_PATH_CONFLICT_RESOLVE]:
1904 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1905 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1905 (f0,) = args
1906 (f0,) = args
1906 if wctx[f0].lexists():
1907 if wctx[f0].lexists():
1907 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1908 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1908 wctx[f].audit()
1909 wctx[f].audit()
1909 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1910 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1910 wctx[f0].remove()
1911 wctx[f0].remove()
1911 progress.increment(item=f)
1912 progress.increment(item=f)
1912
1913
1913 # get in parallel.
1914 # get in parallel.
1914 threadsafe = repo.ui.configbool(
1915 threadsafe = repo.ui.configbool(
1915 b'experimental', b'worker.wdir-get-thread-safe'
1916 b'experimental', b'worker.wdir-get-thread-safe'
1916 )
1917 )
1917 prog = worker.worker(
1918 prog = worker.worker(
1918 repo.ui,
1919 repo.ui,
1919 cost,
1920 cost,
1920 batchget,
1921 batchget,
1921 (repo, mctx, wctx, wantfiledata),
1922 (repo, mctx, wctx, wantfiledata),
1922 actions[ACTION_GET],
1923 actions[ACTION_GET],
1923 threadsafe=threadsafe,
1924 threadsafe=threadsafe,
1924 hasretval=True,
1925 hasretval=True,
1925 )
1926 )
1926 getfiledata = {}
1927 getfiledata = {}
1927 for final, res in prog:
1928 for final, res in prog:
1928 if final:
1929 if final:
1929 getfiledata = res
1930 getfiledata = res
1930 else:
1931 else:
1931 i, item = res
1932 i, item = res
1932 progress.increment(step=i, item=item)
1933 progress.increment(step=i, item=item)
1933 updated = len(actions[ACTION_GET])
1934 updated = len(actions[ACTION_GET])
1934
1935
1935 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1936 if [a for a in actions[ACTION_GET] if a[0] == b'.hgsubstate']:
1936 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1937 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1937
1938
1938 # forget (manifest only, just log it) (must come first)
1939 # forget (manifest only, just log it) (must come first)
1939 for f, args, msg in actions[ACTION_FORGET]:
1940 for f, args, msg in actions[ACTION_FORGET]:
1940 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1941 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1941 progress.increment(item=f)
1942 progress.increment(item=f)
1942
1943
1943 # re-add (manifest only, just log it)
1944 # re-add (manifest only, just log it)
1944 for f, args, msg in actions[ACTION_ADD]:
1945 for f, args, msg in actions[ACTION_ADD]:
1945 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1946 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1946 progress.increment(item=f)
1947 progress.increment(item=f)
1947
1948
1948 # re-add/mark as modified (manifest only, just log it)
1949 # re-add/mark as modified (manifest only, just log it)
1949 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1950 for f, args, msg in actions[ACTION_ADD_MODIFIED]:
1950 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1951 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1951 progress.increment(item=f)
1952 progress.increment(item=f)
1952
1953
1953 # keep (noop, just log it)
1954 # keep (noop, just log it)
1954 for f, args, msg in actions[ACTION_KEEP]:
1955 for f, args, msg in actions[ACTION_KEEP]:
1955 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1956 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1956 # no progress
1957 # no progress
1957
1958
1958 # directory rename, move local
1959 # directory rename, move local
1959 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1960 for f, args, msg in actions[ACTION_DIR_RENAME_MOVE_LOCAL]:
1960 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1961 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1961 progress.increment(item=f)
1962 progress.increment(item=f)
1962 f0, flags = args
1963 f0, flags = args
1963 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1964 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1964 wctx[f].audit()
1965 wctx[f].audit()
1965 wctx[f].write(wctx.filectx(f0).data(), flags)
1966 wctx[f].write(wctx.filectx(f0).data(), flags)
1966 wctx[f0].remove()
1967 wctx[f0].remove()
1967 updated += 1
1968 updated += 1
1968
1969
1969 # local directory rename, get
1970 # local directory rename, get
1970 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1971 for f, args, msg in actions[ACTION_LOCAL_DIR_RENAME_GET]:
1971 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1972 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1972 progress.increment(item=f)
1973 progress.increment(item=f)
1973 f0, flags = args
1974 f0, flags = args
1974 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1975 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1975 wctx[f].write(mctx.filectx(f0).data(), flags)
1976 wctx[f].write(mctx.filectx(f0).data(), flags)
1976 updated += 1
1977 updated += 1
1977
1978
1978 # exec
1979 # exec
1979 for f, args, msg in actions[ACTION_EXEC]:
1980 for f, args, msg in actions[ACTION_EXEC]:
1980 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1981 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1981 progress.increment(item=f)
1982 progress.increment(item=f)
1982 (flags,) = args
1983 (flags,) = args
1983 wctx[f].audit()
1984 wctx[f].audit()
1984 wctx[f].setflags(b'l' in flags, b'x' in flags)
1985 wctx[f].setflags(b'l' in flags, b'x' in flags)
1985 updated += 1
1986 updated += 1
1986
1987
1987 # the ordering is important here -- ms.mergedriver will raise if the merge
1988 # the ordering is important here -- ms.mergedriver will raise if the merge
1988 # driver has changed, and we want to be able to bypass it when overwrite is
1989 # driver has changed, and we want to be able to bypass it when overwrite is
1989 # True
1990 # True
1990 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1991 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1991
1992
1992 if usemergedriver:
1993 if usemergedriver:
1993 if wctx.isinmemory():
1994 if wctx.isinmemory():
1994 raise error.InMemoryMergeConflictsError(
1995 raise error.InMemoryMergeConflictsError(
1995 b"in-memory merge does not support mergedriver"
1996 b"in-memory merge does not support mergedriver"
1996 )
1997 )
1997 ms.commit()
1998 ms.commit()
1998 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1999 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1999 # the driver might leave some files unresolved
2000 # the driver might leave some files unresolved
2000 unresolvedf = set(ms.unresolved())
2001 unresolvedf = set(ms.unresolved())
2001 if not proceed:
2002 if not proceed:
2002 # XXX setting unresolved to at least 1 is a hack to make sure we
2003 # XXX setting unresolved to at least 1 is a hack to make sure we
2003 # error out
2004 # error out
2004 return updateresult(
2005 return updateresult(
2005 updated, merged, removed, max(len(unresolvedf), 1)
2006 updated, merged, removed, max(len(unresolvedf), 1)
2006 )
2007 )
2007 newactions = []
2008 newactions = []
2008 for f, args, msg in mergeactions:
2009 for f, args, msg in mergeactions:
2009 if f in unresolvedf:
2010 if f in unresolvedf:
2010 newactions.append((f, args, msg))
2011 newactions.append((f, args, msg))
2011 mergeactions = newactions
2012 mergeactions = newactions
2012
2013
2013 try:
2014 try:
2014 # premerge
2015 # premerge
2015 tocomplete = []
2016 tocomplete = []
2016 for f, args, msg in mergeactions:
2017 for f, args, msg in mergeactions:
2017 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2018 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
2018 progress.increment(item=f)
2019 progress.increment(item=f)
2019 if f == b'.hgsubstate': # subrepo states need updating
2020 if f == b'.hgsubstate': # subrepo states need updating
2020 subrepoutil.submerge(
2021 subrepoutil.submerge(
2021 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2022 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
2022 )
2023 )
2023 continue
2024 continue
2024 wctx[f].audit()
2025 wctx[f].audit()
2025 complete, r = ms.preresolve(f, wctx)
2026 complete, r = ms.preresolve(f, wctx)
2026 if not complete:
2027 if not complete:
2027 numupdates += 1
2028 numupdates += 1
2028 tocomplete.append((f, args, msg))
2029 tocomplete.append((f, args, msg))
2029
2030
2030 # merge
2031 # merge
2031 for f, args, msg in tocomplete:
2032 for f, args, msg in tocomplete:
2032 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2033 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
2033 progress.increment(item=f, total=numupdates)
2034 progress.increment(item=f, total=numupdates)
2034 ms.resolve(f, wctx)
2035 ms.resolve(f, wctx)
2035
2036
2036 finally:
2037 finally:
2037 ms.commit()
2038 ms.commit()
2038
2039
2039 unresolved = ms.unresolvedcount()
2040 unresolved = ms.unresolvedcount()
2040
2041
2041 if (
2042 if (
2042 usemergedriver
2043 usemergedriver
2043 and not unresolved
2044 and not unresolved
2044 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2045 and ms.mdstate() != MERGE_DRIVER_STATE_SUCCESS
2045 ):
2046 ):
2046 if not driverconclude(repo, ms, wctx, labels=labels):
2047 if not driverconclude(repo, ms, wctx, labels=labels):
2047 # XXX setting unresolved to at least 1 is a hack to make sure we
2048 # XXX setting unresolved to at least 1 is a hack to make sure we
2048 # error out
2049 # error out
2049 unresolved = max(unresolved, 1)
2050 unresolved = max(unresolved, 1)
2050
2051
2051 ms.commit()
2052 ms.commit()
2052
2053
2053 msupdated, msmerged, msremoved = ms.counts()
2054 msupdated, msmerged, msremoved = ms.counts()
2054 updated += msupdated
2055 updated += msupdated
2055 merged += msmerged
2056 merged += msmerged
2056 removed += msremoved
2057 removed += msremoved
2057
2058
2058 extraactions = ms.actions()
2059 extraactions = ms.actions()
2059 if extraactions:
2060 if extraactions:
2060 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2061 mfiles = set(a[0] for a in actions[ACTION_MERGE])
2061 for k, acts in pycompat.iteritems(extraactions):
2062 for k, acts in pycompat.iteritems(extraactions):
2062 actions[k].extend(acts)
2063 actions[k].extend(acts)
2063 if k == ACTION_GET and wantfiledata:
2064 if k == ACTION_GET and wantfiledata:
2064 # no filedata until mergestate is updated to provide it
2065 # no filedata until mergestate is updated to provide it
2065 for a in acts:
2066 for a in acts:
2066 getfiledata[a[0]] = None
2067 getfiledata[a[0]] = None
2067 # Remove these files from actions[ACTION_MERGE] as well. This is
2068 # Remove these files from actions[ACTION_MERGE] as well. This is
2068 # important because in recordupdates, files in actions[ACTION_MERGE]
2069 # important because in recordupdates, files in actions[ACTION_MERGE]
2069 # are processed after files in other actions, and the merge driver
2070 # are processed after files in other actions, and the merge driver
2070 # might add files to those actions via extraactions above. This can
2071 # might add files to those actions via extraactions above. This can
2071 # lead to a file being recorded twice, with poor results. This is
2072 # lead to a file being recorded twice, with poor results. This is
2072 # especially problematic for actions[ACTION_REMOVE] (currently only
2073 # especially problematic for actions[ACTION_REMOVE] (currently only
2073 # possible with the merge driver in the initial merge process;
2074 # possible with the merge driver in the initial merge process;
2074 # interrupted merges don't go through this flow).
2075 # interrupted merges don't go through this flow).
2075 #
2076 #
2076 # The real fix here is to have indexes by both file and action so
2077 # The real fix here is to have indexes by both file and action so
2077 # that when the action for a file is changed it is automatically
2078 # that when the action for a file is changed it is automatically
2078 # reflected in the other action lists. But that involves a more
2079 # reflected in the other action lists. But that involves a more
2079 # complex data structure, so this will do for now.
2080 # complex data structure, so this will do for now.
2080 #
2081 #
2081 # We don't need to do the same operation for 'dc' and 'cd' because
2082 # We don't need to do the same operation for 'dc' and 'cd' because
2082 # those lists aren't consulted again.
2083 # those lists aren't consulted again.
2083 mfiles.difference_update(a[0] for a in acts)
2084 mfiles.difference_update(a[0] for a in acts)
2084
2085
2085 actions[ACTION_MERGE] = [
2086 actions[ACTION_MERGE] = [
2086 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2087 a for a in actions[ACTION_MERGE] if a[0] in mfiles
2087 ]
2088 ]
2088
2089
2089 progress.complete()
2090 progress.complete()
2090 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2091 assert len(getfiledata) == (len(actions[ACTION_GET]) if wantfiledata else 0)
2091 return updateresult(updated, merged, removed, unresolved), getfiledata
2092 return updateresult(updated, merged, removed, unresolved), getfiledata
2092
2093
2093
2094
2094 def recordupdates(repo, actions, branchmerge, getfiledata):
2095 def recordupdates(repo, actions, branchmerge, getfiledata):
2095 b"record merge actions to the dirstate"
2096 b"record merge actions to the dirstate"
2096 # remove (must come first)
2097 # remove (must come first)
2097 for f, args, msg in actions.get(ACTION_REMOVE, []):
2098 for f, args, msg in actions.get(ACTION_REMOVE, []):
2098 if branchmerge:
2099 if branchmerge:
2099 repo.dirstate.remove(f)
2100 repo.dirstate.remove(f)
2100 else:
2101 else:
2101 repo.dirstate.drop(f)
2102 repo.dirstate.drop(f)
2102
2103
2103 # forget (must come first)
2104 # forget (must come first)
2104 for f, args, msg in actions.get(ACTION_FORGET, []):
2105 for f, args, msg in actions.get(ACTION_FORGET, []):
2105 repo.dirstate.drop(f)
2106 repo.dirstate.drop(f)
2106
2107
2107 # resolve path conflicts
2108 # resolve path conflicts
2108 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2109 for f, args, msg in actions.get(ACTION_PATH_CONFLICT_RESOLVE, []):
2109 (f0,) = args
2110 (f0,) = args
2110 origf0 = repo.dirstate.copied(f0) or f0
2111 origf0 = repo.dirstate.copied(f0) or f0
2111 repo.dirstate.add(f)
2112 repo.dirstate.add(f)
2112 repo.dirstate.copy(origf0, f)
2113 repo.dirstate.copy(origf0, f)
2113 if f0 == origf0:
2114 if f0 == origf0:
2114 repo.dirstate.remove(f0)
2115 repo.dirstate.remove(f0)
2115 else:
2116 else:
2116 repo.dirstate.drop(f0)
2117 repo.dirstate.drop(f0)
2117
2118
2118 # re-add
2119 # re-add
2119 for f, args, msg in actions.get(ACTION_ADD, []):
2120 for f, args, msg in actions.get(ACTION_ADD, []):
2120 repo.dirstate.add(f)
2121 repo.dirstate.add(f)
2121
2122
2122 # re-add/mark as modified
2123 # re-add/mark as modified
2123 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2124 for f, args, msg in actions.get(ACTION_ADD_MODIFIED, []):
2124 if branchmerge:
2125 if branchmerge:
2125 repo.dirstate.normallookup(f)
2126 repo.dirstate.normallookup(f)
2126 else:
2127 else:
2127 repo.dirstate.add(f)
2128 repo.dirstate.add(f)
2128
2129
2129 # exec change
2130 # exec change
2130 for f, args, msg in actions.get(ACTION_EXEC, []):
2131 for f, args, msg in actions.get(ACTION_EXEC, []):
2131 repo.dirstate.normallookup(f)
2132 repo.dirstate.normallookup(f)
2132
2133
2133 # keep
2134 # keep
2134 for f, args, msg in actions.get(ACTION_KEEP, []):
2135 for f, args, msg in actions.get(ACTION_KEEP, []):
2135 pass
2136 pass
2136
2137
2137 # get
2138 # get
2138 for f, args, msg in actions.get(ACTION_GET, []):
2139 for f, args, msg in actions.get(ACTION_GET, []):
2139 if branchmerge:
2140 if branchmerge:
2140 repo.dirstate.otherparent(f)
2141 repo.dirstate.otherparent(f)
2141 else:
2142 else:
2142 parentfiledata = getfiledata[f] if getfiledata else None
2143 parentfiledata = getfiledata[f] if getfiledata else None
2143 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2144 repo.dirstate.normal(f, parentfiledata=parentfiledata)
2144
2145
2145 # merge
2146 # merge
2146 for f, args, msg in actions.get(ACTION_MERGE, []):
2147 for f, args, msg in actions.get(ACTION_MERGE, []):
2147 f1, f2, fa, move, anc = args
2148 f1, f2, fa, move, anc = args
2148 if branchmerge:
2149 if branchmerge:
2149 # We've done a branch merge, mark this file as merged
2150 # We've done a branch merge, mark this file as merged
2150 # so that we properly record the merger later
2151 # so that we properly record the merger later
2151 repo.dirstate.merge(f)
2152 repo.dirstate.merge(f)
2152 if f1 != f2: # copy/rename
2153 if f1 != f2: # copy/rename
2153 if move:
2154 if move:
2154 repo.dirstate.remove(f1)
2155 repo.dirstate.remove(f1)
2155 if f1 != f:
2156 if f1 != f:
2156 repo.dirstate.copy(f1, f)
2157 repo.dirstate.copy(f1, f)
2157 else:
2158 else:
2158 repo.dirstate.copy(f2, f)
2159 repo.dirstate.copy(f2, f)
2159 else:
2160 else:
2160 # We've update-merged a locally modified file, so
2161 # We've update-merged a locally modified file, so
2161 # we set the dirstate to emulate a normal checkout
2162 # we set the dirstate to emulate a normal checkout
2162 # of that file some time in the past. Thus our
2163 # of that file some time in the past. Thus our
2163 # merge will appear as a normal local file
2164 # merge will appear as a normal local file
2164 # modification.
2165 # modification.
2165 if f2 == f: # file not locally copied/moved
2166 if f2 == f: # file not locally copied/moved
2166 repo.dirstate.normallookup(f)
2167 repo.dirstate.normallookup(f)
2167 if move:
2168 if move:
2168 repo.dirstate.drop(f1)
2169 repo.dirstate.drop(f1)
2169
2170
2170 # directory rename, move local
2171 # directory rename, move local
2171 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2172 for f, args, msg in actions.get(ACTION_DIR_RENAME_MOVE_LOCAL, []):
2172 f0, flag = args
2173 f0, flag = args
2173 if branchmerge:
2174 if branchmerge:
2174 repo.dirstate.add(f)
2175 repo.dirstate.add(f)
2175 repo.dirstate.remove(f0)
2176 repo.dirstate.remove(f0)
2176 repo.dirstate.copy(f0, f)
2177 repo.dirstate.copy(f0, f)
2177 else:
2178 else:
2178 repo.dirstate.normal(f)
2179 repo.dirstate.normal(f)
2179 repo.dirstate.drop(f0)
2180 repo.dirstate.drop(f0)
2180
2181
2181 # directory rename, get
2182 # directory rename, get
2182 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2183 for f, args, msg in actions.get(ACTION_LOCAL_DIR_RENAME_GET, []):
2183 f0, flag = args
2184 f0, flag = args
2184 if branchmerge:
2185 if branchmerge:
2185 repo.dirstate.add(f)
2186 repo.dirstate.add(f)
2186 repo.dirstate.copy(f0, f)
2187 repo.dirstate.copy(f0, f)
2187 else:
2188 else:
2188 repo.dirstate.normal(f)
2189 repo.dirstate.normal(f)
2189
2190
2190
2191
2191 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2192 UPDATECHECK_ABORT = b'abort' # handled at higher layers
2192 UPDATECHECK_NONE = b'none'
2193 UPDATECHECK_NONE = b'none'
2193 UPDATECHECK_LINEAR = b'linear'
2194 UPDATECHECK_LINEAR = b'linear'
2194 UPDATECHECK_NO_CONFLICT = b'noconflict'
2195 UPDATECHECK_NO_CONFLICT = b'noconflict'
2195
2196
2196
2197
2197 def update(
2198 def update(
2198 repo,
2199 repo,
2199 node,
2200 node,
2200 branchmerge,
2201 branchmerge,
2201 force,
2202 force,
2202 ancestor=None,
2203 ancestor=None,
2203 mergeancestor=False,
2204 mergeancestor=False,
2204 labels=None,
2205 labels=None,
2205 matcher=None,
2206 matcher=None,
2206 mergeforce=False,
2207 mergeforce=False,
2207 updatecheck=None,
2208 updatecheck=None,
2208 wc=None,
2209 wc=None,
2209 ):
2210 ):
2210 """
2211 """
2211 Perform a merge between the working directory and the given node
2212 Perform a merge between the working directory and the given node
2212
2213
2213 node = the node to update to
2214 node = the node to update to
2214 branchmerge = whether to merge between branches
2215 branchmerge = whether to merge between branches
2215 force = whether to force branch merging or file overwriting
2216 force = whether to force branch merging or file overwriting
2216 matcher = a matcher to filter file lists (dirstate not updated)
2217 matcher = a matcher to filter file lists (dirstate not updated)
2217 mergeancestor = whether it is merging with an ancestor. If true,
2218 mergeancestor = whether it is merging with an ancestor. If true,
2218 we should accept the incoming changes for any prompts that occur.
2219 we should accept the incoming changes for any prompts that occur.
2219 If false, merging with an ancestor (fast-forward) is only allowed
2220 If false, merging with an ancestor (fast-forward) is only allowed
2220 between different named branches. This flag is used by rebase extension
2221 between different named branches. This flag is used by rebase extension
2221 as a temporary fix and should be avoided in general.
2222 as a temporary fix and should be avoided in general.
2222 labels = labels to use for base, local and other
2223 labels = labels to use for base, local and other
2223 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2224 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
2224 this is True, then 'force' should be True as well.
2225 this is True, then 'force' should be True as well.
2225
2226
2226 The table below shows all the behaviors of the update command given the
2227 The table below shows all the behaviors of the update command given the
2227 -c/--check and -C/--clean or no options, whether the working directory is
2228 -c/--check and -C/--clean or no options, whether the working directory is
2228 dirty, whether a revision is specified, and the relationship of the parent
2229 dirty, whether a revision is specified, and the relationship of the parent
2229 rev to the target rev (linear or not). Match from top first. The -n
2230 rev to the target rev (linear or not). Match from top first. The -n
2230 option doesn't exist on the command line, but represents the
2231 option doesn't exist on the command line, but represents the
2231 experimental.updatecheck=noconflict option.
2232 experimental.updatecheck=noconflict option.
2232
2233
2233 This logic is tested by test-update-branches.t.
2234 This logic is tested by test-update-branches.t.
2234
2235
2235 -c -C -n -m dirty rev linear | result
2236 -c -C -n -m dirty rev linear | result
2236 y y * * * * * | (1)
2237 y y * * * * * | (1)
2237 y * y * * * * | (1)
2238 y * y * * * * | (1)
2238 y * * y * * * | (1)
2239 y * * y * * * | (1)
2239 * y y * * * * | (1)
2240 * y y * * * * | (1)
2240 * y * y * * * | (1)
2241 * y * y * * * | (1)
2241 * * y y * * * | (1)
2242 * * y y * * * | (1)
2242 * * * * * n n | x
2243 * * * * * n n | x
2243 * * * * n * * | ok
2244 * * * * n * * | ok
2244 n n n n y * y | merge
2245 n n n n y * y | merge
2245 n n n n y y n | (2)
2246 n n n n y y n | (2)
2246 n n n y y * * | merge
2247 n n n y y * * | merge
2247 n n y n y * * | merge if no conflict
2248 n n y n y * * | merge if no conflict
2248 n y n n y * * | discard
2249 n y n n y * * | discard
2249 y n n n y * * | (3)
2250 y n n n y * * | (3)
2250
2251
2251 x = can't happen
2252 x = can't happen
2252 * = don't-care
2253 * = don't-care
2253 1 = incompatible options (checked in commands.py)
2254 1 = incompatible options (checked in commands.py)
2254 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2255 2 = abort: uncommitted changes (commit or update --clean to discard changes)
2255 3 = abort: uncommitted changes (checked in commands.py)
2256 3 = abort: uncommitted changes (checked in commands.py)
2256
2257
2257 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2258 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
2258 to repo[None] if None is passed.
2259 to repo[None] if None is passed.
2259
2260
2260 Return the same tuple as applyupdates().
2261 Return the same tuple as applyupdates().
2261 """
2262 """
2262 # Avoid cycle.
2263 # Avoid cycle.
2263 from . import sparse
2264 from . import sparse
2264
2265
2265 # This function used to find the default destination if node was None, but
2266 # This function used to find the default destination if node was None, but
2266 # that's now in destutil.py.
2267 # that's now in destutil.py.
2267 assert node is not None
2268 assert node is not None
2268 if not branchmerge and not force:
2269 if not branchmerge and not force:
2269 # TODO: remove the default once all callers that pass branchmerge=False
2270 # TODO: remove the default once all callers that pass branchmerge=False
2270 # and force=False pass a value for updatecheck. We may want to allow
2271 # and force=False pass a value for updatecheck. We may want to allow
2271 # updatecheck='abort' to better suppport some of these callers.
2272 # updatecheck='abort' to better suppport some of these callers.
2272 if updatecheck is None:
2273 if updatecheck is None:
2273 updatecheck = UPDATECHECK_LINEAR
2274 updatecheck = UPDATECHECK_LINEAR
2274 if updatecheck not in (
2275 if updatecheck not in (
2275 UPDATECHECK_NONE,
2276 UPDATECHECK_NONE,
2276 UPDATECHECK_LINEAR,
2277 UPDATECHECK_LINEAR,
2277 UPDATECHECK_NO_CONFLICT,
2278 UPDATECHECK_NO_CONFLICT,
2278 ):
2279 ):
2279 raise ValueError(
2280 raise ValueError(
2280 r'Invalid updatecheck %r (can accept %r)'
2281 r'Invalid updatecheck %r (can accept %r)'
2281 % (
2282 % (
2282 updatecheck,
2283 updatecheck,
2283 (
2284 (
2284 UPDATECHECK_NONE,
2285 UPDATECHECK_NONE,
2285 UPDATECHECK_LINEAR,
2286 UPDATECHECK_LINEAR,
2286 UPDATECHECK_NO_CONFLICT,
2287 UPDATECHECK_NO_CONFLICT,
2287 ),
2288 ),
2288 )
2289 )
2289 )
2290 )
2290 # If we're doing a partial update, we need to skip updating
2291 # If we're doing a partial update, we need to skip updating
2291 # the dirstate, so make a note of any partial-ness to the
2292 # the dirstate, so make a note of any partial-ness to the
2292 # update here.
2293 # update here.
2293 if matcher is None or matcher.always():
2294 if matcher is None or matcher.always():
2294 partial = False
2295 partial = False
2295 else:
2296 else:
2296 partial = True
2297 partial = True
2297 with repo.wlock():
2298 with repo.wlock():
2298 if wc is None:
2299 if wc is None:
2299 wc = repo[None]
2300 wc = repo[None]
2300 pl = wc.parents()
2301 pl = wc.parents()
2301 p1 = pl[0]
2302 p1 = pl[0]
2302 p2 = repo[node]
2303 p2 = repo[node]
2303 if ancestor is not None:
2304 if ancestor is not None:
2304 pas = [repo[ancestor]]
2305 pas = [repo[ancestor]]
2305 else:
2306 else:
2306 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2307 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
2307 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2308 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
2308 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2309 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
2309 else:
2310 else:
2310 pas = [p1.ancestor(p2, warn=branchmerge)]
2311 pas = [p1.ancestor(p2, warn=branchmerge)]
2311
2312
2312 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2313 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
2313
2314
2314 overwrite = force and not branchmerge
2315 overwrite = force and not branchmerge
2315 ### check phase
2316 ### check phase
2316 if not overwrite:
2317 if not overwrite:
2317 if len(pl) > 1:
2318 if len(pl) > 1:
2318 raise error.Abort(_(b"outstanding uncommitted merge"))
2319 raise error.Abort(_(b"outstanding uncommitted merge"))
2319 ms = mergestate.read(repo)
2320 ms = mergestate.read(repo)
2320 if list(ms.unresolved()):
2321 if list(ms.unresolved()):
2321 raise error.Abort(
2322 raise error.Abort(
2322 _(b"outstanding merge conflicts"),
2323 _(b"outstanding merge conflicts"),
2323 hint=_(b"use 'hg resolve' to resolve"),
2324 hint=_(b"use 'hg resolve' to resolve"),
2324 )
2325 )
2325 if branchmerge:
2326 if branchmerge:
2326 if pas == [p2]:
2327 if pas == [p2]:
2327 raise error.Abort(
2328 raise error.Abort(
2328 _(
2329 _(
2329 b"merging with a working directory ancestor"
2330 b"merging with a working directory ancestor"
2330 b" has no effect"
2331 b" has no effect"
2331 )
2332 )
2332 )
2333 )
2333 elif pas == [p1]:
2334 elif pas == [p1]:
2334 if not mergeancestor and wc.branch() == p2.branch():
2335 if not mergeancestor and wc.branch() == p2.branch():
2335 raise error.Abort(
2336 raise error.Abort(
2336 _(b"nothing to merge"),
2337 _(b"nothing to merge"),
2337 hint=_(b"use 'hg update' or check 'hg heads'"),
2338 hint=_(b"use 'hg update' or check 'hg heads'"),
2338 )
2339 )
2339 if not force and (wc.files() or wc.deleted()):
2340 if not force and (wc.files() or wc.deleted()):
2340 raise error.Abort(
2341 raise error.Abort(
2341 _(b"uncommitted changes"),
2342 _(b"uncommitted changes"),
2342 hint=_(b"use 'hg status' to list changes"),
2343 hint=_(b"use 'hg status' to list changes"),
2343 )
2344 )
2344 if not wc.isinmemory():
2345 if not wc.isinmemory():
2345 for s in sorted(wc.substate):
2346 for s in sorted(wc.substate):
2346 wc.sub(s).bailifchanged()
2347 wc.sub(s).bailifchanged()
2347
2348
2348 elif not overwrite:
2349 elif not overwrite:
2349 if p1 == p2: # no-op update
2350 if p1 == p2: # no-op update
2350 # call the hooks and exit early
2351 # call the hooks and exit early
2351 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2352 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
2352 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2353 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
2353 return updateresult(0, 0, 0, 0)
2354 return updateresult(0, 0, 0, 0)
2354
2355
2355 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2356 if updatecheck == UPDATECHECK_LINEAR and pas not in (
2356 [p1],
2357 [p1],
2357 [p2],
2358 [p2],
2358 ): # nonlinear
2359 ): # nonlinear
2359 dirty = wc.dirty(missing=True)
2360 dirty = wc.dirty(missing=True)
2360 if dirty:
2361 if dirty:
2361 # Branching is a bit strange to ensure we do the minimal
2362 # Branching is a bit strange to ensure we do the minimal
2362 # amount of call to obsutil.foreground.
2363 # amount of call to obsutil.foreground.
2363 foreground = obsutil.foreground(repo, [p1.node()])
2364 foreground = obsutil.foreground(repo, [p1.node()])
2364 # note: the <node> variable contains a random identifier
2365 # note: the <node> variable contains a random identifier
2365 if repo[node].node() in foreground:
2366 if repo[node].node() in foreground:
2366 pass # allow updating to successors
2367 pass # allow updating to successors
2367 else:
2368 else:
2368 msg = _(b"uncommitted changes")
2369 msg = _(b"uncommitted changes")
2369 hint = _(b"commit or update --clean to discard changes")
2370 hint = _(b"commit or update --clean to discard changes")
2370 raise error.UpdateAbort(msg, hint=hint)
2371 raise error.UpdateAbort(msg, hint=hint)
2371 else:
2372 else:
2372 # Allow jumping branches if clean and specific rev given
2373 # Allow jumping branches if clean and specific rev given
2373 pass
2374 pass
2374
2375
2375 if overwrite:
2376 if overwrite:
2376 pas = [wc]
2377 pas = [wc]
2377 elif not branchmerge:
2378 elif not branchmerge:
2378 pas = [p1]
2379 pas = [p1]
2379
2380
2380 # deprecated config: merge.followcopies
2381 # deprecated config: merge.followcopies
2381 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2382 followcopies = repo.ui.configbool(b'merge', b'followcopies')
2382 if overwrite:
2383 if overwrite:
2383 followcopies = False
2384 followcopies = False
2384 elif not pas[0]:
2385 elif not pas[0]:
2385 followcopies = False
2386 followcopies = False
2386 if not branchmerge and not wc.dirty(missing=True):
2387 if not branchmerge and not wc.dirty(missing=True):
2387 followcopies = False
2388 followcopies = False
2388
2389
2389 ### calculate phase
2390 ### calculate phase
2390 actionbyfile, diverge, renamedelete = calculateupdates(
2391 actionbyfile, diverge, renamedelete = calculateupdates(
2391 repo,
2392 repo,
2392 wc,
2393 wc,
2393 p2,
2394 p2,
2394 pas,
2395 pas,
2395 branchmerge,
2396 branchmerge,
2396 force,
2397 force,
2397 mergeancestor,
2398 mergeancestor,
2398 followcopies,
2399 followcopies,
2399 matcher=matcher,
2400 matcher=matcher,
2400 mergeforce=mergeforce,
2401 mergeforce=mergeforce,
2401 )
2402 )
2402
2403
2403 if updatecheck == UPDATECHECK_NO_CONFLICT:
2404 if updatecheck == UPDATECHECK_NO_CONFLICT:
2404 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2405 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2405 if m not in (
2406 if m not in (
2406 ACTION_GET,
2407 ACTION_GET,
2407 ACTION_KEEP,
2408 ACTION_KEEP,
2408 ACTION_EXEC,
2409 ACTION_EXEC,
2409 ACTION_REMOVE,
2410 ACTION_REMOVE,
2410 ACTION_PATH_CONFLICT_RESOLVE,
2411 ACTION_PATH_CONFLICT_RESOLVE,
2411 ):
2412 ):
2412 msg = _(b"conflicting changes")
2413 msg = _(b"conflicting changes")
2413 hint = _(b"commit or update --clean to discard changes")
2414 hint = _(b"commit or update --clean to discard changes")
2414 raise error.Abort(msg, hint=hint)
2415 raise error.Abort(msg, hint=hint)
2415
2416
2416 # Prompt and create actions. Most of this is in the resolve phase
2417 # Prompt and create actions. Most of this is in the resolve phase
2417 # already, but we can't handle .hgsubstate in filemerge or
2418 # already, but we can't handle .hgsubstate in filemerge or
2418 # subrepoutil.submerge yet so we have to keep prompting for it.
2419 # subrepoutil.submerge yet so we have to keep prompting for it.
2419 if b'.hgsubstate' in actionbyfile:
2420 if b'.hgsubstate' in actionbyfile:
2420 f = b'.hgsubstate'
2421 f = b'.hgsubstate'
2421 m, args, msg = actionbyfile[f]
2422 m, args, msg = actionbyfile[f]
2422 prompts = filemerge.partextras(labels)
2423 prompts = filemerge.partextras(labels)
2423 prompts[b'f'] = f
2424 prompts[b'f'] = f
2424 if m == ACTION_CHANGED_DELETED:
2425 if m == ACTION_CHANGED_DELETED:
2425 if repo.ui.promptchoice(
2426 if repo.ui.promptchoice(
2426 _(
2427 _(
2427 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2428 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
2428 b"use (c)hanged version or (d)elete?"
2429 b"use (c)hanged version or (d)elete?"
2429 b"$$ &Changed $$ &Delete"
2430 b"$$ &Changed $$ &Delete"
2430 )
2431 )
2431 % prompts,
2432 % prompts,
2432 0,
2433 0,
2433 ):
2434 ):
2434 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2435 actionbyfile[f] = (ACTION_REMOVE, None, b'prompt delete')
2435 elif f in p1:
2436 elif f in p1:
2436 actionbyfile[f] = (
2437 actionbyfile[f] = (
2437 ACTION_ADD_MODIFIED,
2438 ACTION_ADD_MODIFIED,
2438 None,
2439 None,
2439 b'prompt keep',
2440 b'prompt keep',
2440 )
2441 )
2441 else:
2442 else:
2442 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2443 actionbyfile[f] = (ACTION_ADD, None, b'prompt keep')
2443 elif m == ACTION_DELETED_CHANGED:
2444 elif m == ACTION_DELETED_CHANGED:
2444 f1, f2, fa, move, anc = args
2445 f1, f2, fa, move, anc = args
2445 flags = p2[f2].flags()
2446 flags = p2[f2].flags()
2446 if (
2447 if (
2447 repo.ui.promptchoice(
2448 repo.ui.promptchoice(
2448 _(
2449 _(
2449 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2450 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2450 b"use (c)hanged version or leave (d)eleted?"
2451 b"use (c)hanged version or leave (d)eleted?"
2451 b"$$ &Changed $$ &Deleted"
2452 b"$$ &Changed $$ &Deleted"
2452 )
2453 )
2453 % prompts,
2454 % prompts,
2454 0,
2455 0,
2455 )
2456 )
2456 == 0
2457 == 0
2457 ):
2458 ):
2458 actionbyfile[f] = (
2459 actionbyfile[f] = (
2459 ACTION_GET,
2460 ACTION_GET,
2460 (flags, False),
2461 (flags, False),
2461 b'prompt recreating',
2462 b'prompt recreating',
2462 )
2463 )
2463 else:
2464 else:
2464 del actionbyfile[f]
2465 del actionbyfile[f]
2465
2466
2466 # Convert to dictionary-of-lists format
2467 # Convert to dictionary-of-lists format
2467 actions = emptyactions()
2468 actions = emptyactions()
2468 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2469 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
2469 if m not in actions:
2470 if m not in actions:
2470 actions[m] = []
2471 actions[m] = []
2471 actions[m].append((f, args, msg))
2472 actions[m].append((f, args, msg))
2472
2473
2473 if not util.fscasesensitive(repo.path):
2474 if not util.fscasesensitive(repo.path):
2474 # check collision between files only in p2 for clean update
2475 # check collision between files only in p2 for clean update
2475 if not branchmerge and (
2476 if not branchmerge and (
2476 force or not wc.dirty(missing=True, branch=False)
2477 force or not wc.dirty(missing=True, branch=False)
2477 ):
2478 ):
2478 _checkcollision(repo, p2.manifest(), None)
2479 _checkcollision(repo, p2.manifest(), None)
2479 else:
2480 else:
2480 _checkcollision(repo, wc.manifest(), actions)
2481 _checkcollision(repo, wc.manifest(), actions)
2481
2482
2482 # divergent renames
2483 # divergent renames
2483 for f, fl in sorted(pycompat.iteritems(diverge)):
2484 for f, fl in sorted(pycompat.iteritems(diverge)):
2484 repo.ui.warn(
2485 repo.ui.warn(
2485 _(
2486 _(
2486 b"note: possible conflict - %s was renamed "
2487 b"note: possible conflict - %s was renamed "
2487 b"multiple times to:\n"
2488 b"multiple times to:\n"
2488 )
2489 )
2489 % f
2490 % f
2490 )
2491 )
2491 for nf in sorted(fl):
2492 for nf in sorted(fl):
2492 repo.ui.warn(b" %s\n" % nf)
2493 repo.ui.warn(b" %s\n" % nf)
2493
2494
2494 # rename and delete
2495 # rename and delete
2495 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2496 for f, fl in sorted(pycompat.iteritems(renamedelete)):
2496 repo.ui.warn(
2497 repo.ui.warn(
2497 _(
2498 _(
2498 b"note: possible conflict - %s was deleted "
2499 b"note: possible conflict - %s was deleted "
2499 b"and renamed to:\n"
2500 b"and renamed to:\n"
2500 )
2501 )
2501 % f
2502 % f
2502 )
2503 )
2503 for nf in sorted(fl):
2504 for nf in sorted(fl):
2504 repo.ui.warn(b" %s\n" % nf)
2505 repo.ui.warn(b" %s\n" % nf)
2505
2506
2506 ### apply phase
2507 ### apply phase
2507 if not branchmerge: # just jump to the new rev
2508 if not branchmerge: # just jump to the new rev
2508 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2509 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2509 if not partial and not wc.isinmemory():
2510 if not partial and not wc.isinmemory():
2510 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2511 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2511 # note that we're in the middle of an update
2512 # note that we're in the middle of an update
2512 repo.vfs.write(b'updatestate', p2.hex())
2513 repo.vfs.write(b'updatestate', p2.hex())
2513
2514
2514 # Advertise fsmonitor when its presence could be useful.
2515 # Advertise fsmonitor when its presence could be useful.
2515 #
2516 #
2516 # We only advertise when performing an update from an empty working
2517 # We only advertise when performing an update from an empty working
2517 # directory. This typically only occurs during initial clone.
2518 # directory. This typically only occurs during initial clone.
2518 #
2519 #
2519 # We give users a mechanism to disable the warning in case it is
2520 # We give users a mechanism to disable the warning in case it is
2520 # annoying.
2521 # annoying.
2521 #
2522 #
2522 # We only allow on Linux and MacOS because that's where fsmonitor is
2523 # We only allow on Linux and MacOS because that's where fsmonitor is
2523 # considered stable.
2524 # considered stable.
2524 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2525 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
2525 fsmonitorthreshold = repo.ui.configint(
2526 fsmonitorthreshold = repo.ui.configint(
2526 b'fsmonitor', b'warn_update_file_count'
2527 b'fsmonitor', b'warn_update_file_count'
2527 )
2528 )
2528 try:
2529 try:
2529 # avoid cycle: extensions -> cmdutil -> merge
2530 # avoid cycle: extensions -> cmdutil -> merge
2530 from . import extensions
2531 from . import extensions
2531
2532
2532 extensions.find(b'fsmonitor')
2533 extensions.find(b'fsmonitor')
2533 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2534 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
2534 # We intentionally don't look at whether fsmonitor has disabled
2535 # We intentionally don't look at whether fsmonitor has disabled
2535 # itself because a) fsmonitor may have already printed a warning
2536 # itself because a) fsmonitor may have already printed a warning
2536 # b) we only care about the config state here.
2537 # b) we only care about the config state here.
2537 except KeyError:
2538 except KeyError:
2538 fsmonitorenabled = False
2539 fsmonitorenabled = False
2539
2540
2540 if (
2541 if (
2541 fsmonitorwarning
2542 fsmonitorwarning
2542 and not fsmonitorenabled
2543 and not fsmonitorenabled
2543 and p1.node() == nullid
2544 and p1.node() == nullid
2544 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2545 and len(actions[ACTION_GET]) >= fsmonitorthreshold
2545 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2546 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
2546 ):
2547 ):
2547 repo.ui.warn(
2548 repo.ui.warn(
2548 _(
2549 _(
2549 b'(warning: large working directory being used without '
2550 b'(warning: large working directory being used without '
2550 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2551 b'fsmonitor enabled; enable fsmonitor to improve performance; '
2551 b'see "hg help -e fsmonitor")\n'
2552 b'see "hg help -e fsmonitor")\n'
2552 )
2553 )
2553 )
2554 )
2554
2555
2555 updatedirstate = not partial and not wc.isinmemory()
2556 updatedirstate = not partial and not wc.isinmemory()
2556 wantfiledata = updatedirstate and not branchmerge
2557 wantfiledata = updatedirstate and not branchmerge
2557 stats, getfiledata = applyupdates(
2558 stats, getfiledata = applyupdates(
2558 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2559 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
2559 )
2560 )
2560
2561
2561 if updatedirstate:
2562 if updatedirstate:
2562 with repo.dirstate.parentchange():
2563 with repo.dirstate.parentchange():
2563 repo.setparents(fp1, fp2)
2564 repo.setparents(fp1, fp2)
2564 recordupdates(repo, actions, branchmerge, getfiledata)
2565 recordupdates(repo, actions, branchmerge, getfiledata)
2565 # update completed, clear state
2566 # update completed, clear state
2566 util.unlink(repo.vfs.join(b'updatestate'))
2567 util.unlink(repo.vfs.join(b'updatestate'))
2567
2568
2568 if not branchmerge:
2569 if not branchmerge:
2569 repo.dirstate.setbranch(p2.branch())
2570 repo.dirstate.setbranch(p2.branch())
2570
2571
2571 # If we're updating to a location, clean up any stale temporary includes
2572 # If we're updating to a location, clean up any stale temporary includes
2572 # (ex: this happens during hg rebase --abort).
2573 # (ex: this happens during hg rebase --abort).
2573 if not branchmerge:
2574 if not branchmerge:
2574 sparse.prunetemporaryincludes(repo)
2575 sparse.prunetemporaryincludes(repo)
2575
2576
2576 if not partial:
2577 if not partial:
2577 repo.hook(
2578 repo.hook(
2578 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2579 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2579 )
2580 )
2580 return stats
2581 return stats
2581
2582
2582
2583
2583 def graft(
2584 def graft(
2584 repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
2585 repo, ctx, pctx, labels=None, keepparent=False, keepconflictparent=False
2585 ):
2586 ):
2586 """Do a graft-like merge.
2587 """Do a graft-like merge.
2587
2588
2588 This is a merge where the merge ancestor is chosen such that one
2589 This is a merge where the merge ancestor is chosen such that one
2589 or more changesets are grafted onto the current changeset. In
2590 or more changesets are grafted onto the current changeset. In
2590 addition to the merge, this fixes up the dirstate to include only
2591 addition to the merge, this fixes up the dirstate to include only
2591 a single parent (if keepparent is False) and tries to duplicate any
2592 a single parent (if keepparent is False) and tries to duplicate any
2592 renames/copies appropriately.
2593 renames/copies appropriately.
2593
2594
2594 ctx - changeset to rebase
2595 ctx - changeset to rebase
2595 pctx - merge base, usually ctx.p1()
2596 pctx - merge base, usually ctx.p1()
2596 labels - merge labels eg ['local', 'graft']
2597 labels - merge labels eg ['local', 'graft']
2597 keepparent - keep second parent if any
2598 keepparent - keep second parent if any
2598 keepconflictparent - if unresolved, keep parent used for the merge
2599 keepconflictparent - if unresolved, keep parent used for the merge
2599
2600
2600 """
2601 """
2601 # If we're grafting a descendant onto an ancestor, be sure to pass
2602 # If we're grafting a descendant onto an ancestor, be sure to pass
2602 # mergeancestor=True to update. This does two things: 1) allows the merge if
2603 # mergeancestor=True to update. This does two things: 1) allows the merge if
2603 # the destination is the same as the parent of the ctx (so we can use graft
2604 # the destination is the same as the parent of the ctx (so we can use graft
2604 # to copy commits), and 2) informs update that the incoming changes are
2605 # to copy commits), and 2) informs update that the incoming changes are
2605 # newer than the destination so it doesn't prompt about "remote changed foo
2606 # newer than the destination so it doesn't prompt about "remote changed foo
2606 # which local deleted".
2607 # which local deleted".
2607 mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
2608 mergeancestor = repo.changelog.isancestor(repo[b'.'].node(), ctx.node())
2608
2609
2609 stats = update(
2610 stats = update(
2610 repo,
2611 repo,
2611 ctx.node(),
2612 ctx.node(),
2612 True,
2613 True,
2613 True,
2614 True,
2614 pctx.node(),
2615 pctx.node(),
2615 mergeancestor=mergeancestor,
2616 mergeancestor=mergeancestor,
2616 labels=labels,
2617 labels=labels,
2617 )
2618 )
2618
2619
2619 if keepconflictparent and stats.unresolvedcount:
2620 if keepconflictparent and stats.unresolvedcount:
2620 pother = ctx.node()
2621 pother = ctx.node()
2621 else:
2622 else:
2622 pother = nullid
2623 pother = nullid
2623 parents = ctx.parents()
2624 parents = ctx.parents()
2624 if keepparent and len(parents) == 2 and pctx in parents:
2625 if keepparent and len(parents) == 2 and pctx in parents:
2625 parents.remove(pctx)
2626 parents.remove(pctx)
2626 pother = parents[0].node()
2627 pother = parents[0].node()
2627
2628
2628 with repo.dirstate.parentchange():
2629 with repo.dirstate.parentchange():
2629 repo.setparents(repo[b'.'].node(), pother)
2630 repo.setparents(repo[b'.'].node(), pother)
2630 repo.dirstate.write(repo.currenttransaction())
2631 repo.dirstate.write(repo.currenttransaction())
2631 # fix up dirstate for copies and renames
2632 # fix up dirstate for copies and renames
2632 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2633 copies.duplicatecopies(repo, repo[None], ctx.rev(), pctx.rev())
2633 return stats
2634 return stats
2634
2635
2635
2636
2636 def purge(
2637 def purge(
2637 repo,
2638 repo,
2638 matcher,
2639 matcher,
2639 ignored=False,
2640 ignored=False,
2640 removeemptydirs=True,
2641 removeemptydirs=True,
2641 removefiles=True,
2642 removefiles=True,
2642 abortonerror=False,
2643 abortonerror=False,
2643 noop=False,
2644 noop=False,
2644 ):
2645 ):
2645 """Purge the working directory of untracked files.
2646 """Purge the working directory of untracked files.
2646
2647
2647 ``matcher`` is a matcher configured to scan the working directory -
2648 ``matcher`` is a matcher configured to scan the working directory -
2648 potentially a subset.
2649 potentially a subset.
2649
2650
2650 ``ignored`` controls whether ignored files should also be purged.
2651 ``ignored`` controls whether ignored files should also be purged.
2651
2652
2652 ``removeemptydirs`` controls whether empty directories should be removed.
2653 ``removeemptydirs`` controls whether empty directories should be removed.
2653
2654
2654 ``removefiles`` controls whether files are removed.
2655 ``removefiles`` controls whether files are removed.
2655
2656
2656 ``abortonerror`` causes an exception to be raised if an error occurs
2657 ``abortonerror`` causes an exception to be raised if an error occurs
2657 deleting a file or directory.
2658 deleting a file or directory.
2658
2659
2659 ``noop`` controls whether to actually remove files. If not defined, actions
2660 ``noop`` controls whether to actually remove files. If not defined, actions
2660 will be taken.
2661 will be taken.
2661
2662
2662 Returns an iterable of relative paths in the working directory that were
2663 Returns an iterable of relative paths in the working directory that were
2663 or would be removed.
2664 or would be removed.
2664 """
2665 """
2665
2666
2666 def remove(removefn, path):
2667 def remove(removefn, path):
2667 try:
2668 try:
2668 removefn(path)
2669 removefn(path)
2669 except OSError:
2670 except OSError:
2670 m = _(b'%s cannot be removed') % path
2671 m = _(b'%s cannot be removed') % path
2671 if abortonerror:
2672 if abortonerror:
2672 raise error.Abort(m)
2673 raise error.Abort(m)
2673 else:
2674 else:
2674 repo.ui.warn(_(b'warning: %s\n') % m)
2675 repo.ui.warn(_(b'warning: %s\n') % m)
2675
2676
2676 # There's no API to copy a matcher. So mutate the passed matcher and
2677 # There's no API to copy a matcher. So mutate the passed matcher and
2677 # restore it when we're done.
2678 # restore it when we're done.
2678 oldexplicitdir = matcher.explicitdir
2679 oldexplicitdir = matcher.explicitdir
2679 oldtraversedir = matcher.traversedir
2680 oldtraversedir = matcher.traversedir
2680
2681
2681 res = []
2682 res = []
2682
2683
2683 try:
2684 try:
2684 if removeemptydirs:
2685 if removeemptydirs:
2685 directories = []
2686 directories = []
2686 matcher.explicitdir = matcher.traversedir = directories.append
2687 matcher.explicitdir = matcher.traversedir = directories.append
2687
2688
2688 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2689 status = repo.status(match=matcher, ignored=ignored, unknown=True)
2689
2690
2690 if removefiles:
2691 if removefiles:
2691 for f in sorted(status.unknown + status.ignored):
2692 for f in sorted(status.unknown + status.ignored):
2692 if not noop:
2693 if not noop:
2693 repo.ui.note(_(b'removing file %s\n') % f)
2694 repo.ui.note(_(b'removing file %s\n') % f)
2694 remove(repo.wvfs.unlink, f)
2695 remove(repo.wvfs.unlink, f)
2695 res.append(f)
2696 res.append(f)
2696
2697
2697 if removeemptydirs:
2698 if removeemptydirs:
2698 for f in sorted(directories, reverse=True):
2699 for f in sorted(directories, reverse=True):
2699 if matcher(f) and not repo.wvfs.listdir(f):
2700 if matcher(f) and not repo.wvfs.listdir(f):
2700 if not noop:
2701 if not noop:
2701 repo.ui.note(_(b'removing directory %s\n') % f)
2702 repo.ui.note(_(b'removing directory %s\n') % f)
2702 remove(repo.wvfs.rmdir, f)
2703 remove(repo.wvfs.rmdir, f)
2703 res.append(f)
2704 res.append(f)
2704
2705
2705 return res
2706 return res
2706
2707
2707 finally:
2708 finally:
2708 matcher.explicitdir = oldexplicitdir
2709 matcher.explicitdir = oldexplicitdir
2709 matcher.traversedir = oldtraversedir
2710 matcher.traversedir = oldtraversedir
@@ -1,334 +1,342 b''
1 from __future__ import absolute_import
1 from __future__ import absolute_import
2
2
3 import errno
3 import errno
4 import os
4 import os
5 import posixpath
5 import posixpath
6 import stat
6 import stat
7
7
8 from .i18n import _
8 from .i18n import _
9 from . import (
9 from . import (
10 encoding,
10 encoding,
11 error,
11 error,
12 policy,
12 policy,
13 pycompat,
13 pycompat,
14 util,
14 util,
15 )
15 )
16
16
17 rustdirs = policy.importrust('dirstate', 'Dirs')
17 rustdirs = policy.importrust('dirstate', 'Dirs')
18 parsers = policy.importmod('parsers')
18 parsers = policy.importmod('parsers')
19
19
20
20
21 def _lowerclean(s):
21 def _lowerclean(s):
22 return encoding.hfsignoreclean(s.lower())
22 return encoding.hfsignoreclean(s.lower())
23
23
24
24
25 class pathauditor(object):
25 class pathauditor(object):
26 '''ensure that a filesystem path contains no banned components.
26 '''ensure that a filesystem path contains no banned components.
27 the following properties of a path are checked:
27 the following properties of a path are checked:
28
28
29 - ends with a directory separator
29 - ends with a directory separator
30 - under top-level .hg
30 - under top-level .hg
31 - starts at the root of a windows drive
31 - starts at the root of a windows drive
32 - contains ".."
32 - contains ".."
33
33
34 More check are also done about the file system states:
34 More check are also done about the file system states:
35 - traverses a symlink (e.g. a/symlink_here/b)
35 - traverses a symlink (e.g. a/symlink_here/b)
36 - inside a nested repository (a callback can be used to approve
36 - inside a nested repository (a callback can be used to approve
37 some nested repositories, e.g., subrepositories)
37 some nested repositories, e.g., subrepositories)
38
38
39 The file system checks are only done when 'realfs' is set to True (the
39 The file system checks are only done when 'realfs' is set to True (the
40 default). They should be disable then we are auditing path for operation on
40 default). They should be disable then we are auditing path for operation on
41 stored history.
41 stored history.
42
42
43 If 'cached' is set to True, audited paths and sub-directories are cached.
43 If 'cached' is set to True, audited paths and sub-directories are cached.
44 Be careful to not keep the cache of unmanaged directories for long because
44 Be careful to not keep the cache of unmanaged directories for long because
45 audited paths may be replaced with symlinks.
45 audited paths may be replaced with symlinks.
46 '''
46 '''
47
47
48 def __init__(self, root, callback=None, realfs=True, cached=False):
48 def __init__(self, root, callback=None, realfs=True, cached=False):
49 self.audited = set()
49 self.audited = set()
50 self.auditeddir = set()
50 self.auditeddir = set()
51 self.root = root
51 self.root = root
52 self._realfs = realfs
52 self._realfs = realfs
53 self._cached = cached
53 self._cached = cached
54 self.callback = callback
54 self.callback = callback
55 if os.path.lexists(root) and not util.fscasesensitive(root):
55 if os.path.lexists(root) and not util.fscasesensitive(root):
56 self.normcase = util.normcase
56 self.normcase = util.normcase
57 else:
57 else:
58 self.normcase = lambda x: x
58 self.normcase = lambda x: x
59
59
60 def __call__(self, path, mode=None):
60 def __call__(self, path, mode=None):
61 '''Check the relative path.
61 '''Check the relative path.
62 path may contain a pattern (e.g. foodir/**.txt)'''
62 path may contain a pattern (e.g. foodir/**.txt)'''
63
63
64 path = util.localpath(path)
64 path = util.localpath(path)
65 normpath = self.normcase(path)
65 normpath = self.normcase(path)
66 if normpath in self.audited:
66 if normpath in self.audited:
67 return
67 return
68 # AIX ignores "/" at end of path, others raise EISDIR.
68 # AIX ignores "/" at end of path, others raise EISDIR.
69 if util.endswithsep(path):
69 if util.endswithsep(path):
70 raise error.Abort(_(b"path ends in directory separator: %s") % path)
70 raise error.Abort(_(b"path ends in directory separator: %s") % path)
71 parts = util.splitpath(path)
71 parts = util.splitpath(path)
72 if (
72 if (
73 os.path.splitdrive(path)[0]
73 os.path.splitdrive(path)[0]
74 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
74 or _lowerclean(parts[0]) in (b'.hg', b'.hg.', b'')
75 or pycompat.ospardir in parts
75 or pycompat.ospardir in parts
76 ):
76 ):
77 raise error.Abort(_(b"path contains illegal component: %s") % path)
77 raise error.Abort(_(b"path contains illegal component: %s") % path)
78 # Windows shortname aliases
78 # Windows shortname aliases
79 for p in parts:
79 for p in parts:
80 if b"~" in p:
80 if b"~" in p:
81 first, last = p.split(b"~", 1)
81 first, last = p.split(b"~", 1)
82 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
82 if last.isdigit() and first.upper() in [b"HG", b"HG8B6C"]:
83 raise error.Abort(
83 raise error.Abort(
84 _(b"path contains illegal component: %s") % path
84 _(b"path contains illegal component: %s") % path
85 )
85 )
86 if b'.hg' in _lowerclean(path):
86 if b'.hg' in _lowerclean(path):
87 lparts = [_lowerclean(p.lower()) for p in parts]
87 lparts = [_lowerclean(p.lower()) for p in parts]
88 for p in b'.hg', b'.hg.':
88 for p in b'.hg', b'.hg.':
89 if p in lparts[1:]:
89 if p in lparts[1:]:
90 pos = lparts.index(p)
90 pos = lparts.index(p)
91 base = os.path.join(*parts[:pos])
91 base = os.path.join(*parts[:pos])
92 raise error.Abort(
92 raise error.Abort(
93 _(b"path '%s' is inside nested repo %r")
93 _(b"path '%s' is inside nested repo %r")
94 % (path, pycompat.bytestr(base))
94 % (path, pycompat.bytestr(base))
95 )
95 )
96
96
97 normparts = util.splitpath(normpath)
97 normparts = util.splitpath(normpath)
98 assert len(parts) == len(normparts)
98 assert len(parts) == len(normparts)
99
99
100 parts.pop()
100 parts.pop()
101 normparts.pop()
101 normparts.pop()
102 prefixes = []
102 prefixes = []
103 # It's important that we check the path parts starting from the root.
103 # It's important that we check the path parts starting from the root.
104 # This means we won't accidentally traverse a symlink into some other
104 # This means we won't accidentally traverse a symlink into some other
105 # filesystem (which is potentially expensive to access).
105 # filesystem (which is potentially expensive to access).
106 for i in range(len(parts)):
106 for i in range(len(parts)):
107 prefix = pycompat.ossep.join(parts[: i + 1])
107 prefix = pycompat.ossep.join(parts[: i + 1])
108 normprefix = pycompat.ossep.join(normparts[: i + 1])
108 normprefix = pycompat.ossep.join(normparts[: i + 1])
109 if normprefix in self.auditeddir:
109 if normprefix in self.auditeddir:
110 continue
110 continue
111 if self._realfs:
111 if self._realfs:
112 self._checkfs(prefix, path)
112 self._checkfs(prefix, path)
113 prefixes.append(normprefix)
113 prefixes.append(normprefix)
114
114
115 if self._cached:
115 if self._cached:
116 self.audited.add(normpath)
116 self.audited.add(normpath)
117 # only add prefixes to the cache after checking everything: we don't
117 # only add prefixes to the cache after checking everything: we don't
118 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
118 # want to add "foo/bar/baz" before checking if there's a "foo/.hg"
119 self.auditeddir.update(prefixes)
119 self.auditeddir.update(prefixes)
120
120
121 def _checkfs(self, prefix, path):
121 def _checkfs(self, prefix, path):
122 """raise exception if a file system backed check fails"""
122 """raise exception if a file system backed check fails"""
123 curpath = os.path.join(self.root, prefix)
123 curpath = os.path.join(self.root, prefix)
124 try:
124 try:
125 st = os.lstat(curpath)
125 st = os.lstat(curpath)
126 except OSError as err:
126 except OSError as err:
127 # EINVAL can be raised as invalid path syntax under win32.
127 # EINVAL can be raised as invalid path syntax under win32.
128 # They must be ignored for patterns can be checked too.
128 # They must be ignored for patterns can be checked too.
129 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
129 if err.errno not in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL):
130 raise
130 raise
131 else:
131 else:
132 if stat.S_ISLNK(st.st_mode):
132 if stat.S_ISLNK(st.st_mode):
133 msg = _(b'path %r traverses symbolic link %r') % (
133 msg = _(b'path %r traverses symbolic link %r') % (
134 pycompat.bytestr(path),
134 pycompat.bytestr(path),
135 pycompat.bytestr(prefix),
135 pycompat.bytestr(prefix),
136 )
136 )
137 raise error.Abort(msg)
137 raise error.Abort(msg)
138 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
138 elif stat.S_ISDIR(st.st_mode) and os.path.isdir(
139 os.path.join(curpath, b'.hg')
139 os.path.join(curpath, b'.hg')
140 ):
140 ):
141 if not self.callback or not self.callback(curpath):
141 if not self.callback or not self.callback(curpath):
142 msg = _(b"path '%s' is inside nested repo %r")
142 msg = _(b"path '%s' is inside nested repo %r")
143 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
143 raise error.Abort(msg % (path, pycompat.bytestr(prefix)))
144
144
145 def check(self, path):
145 def check(self, path):
146 try:
146 try:
147 self(path)
147 self(path)
148 return True
148 return True
149 except (OSError, error.Abort):
149 except (OSError, error.Abort):
150 return False
150 return False
151
151
152
152
153 def canonpath(root, cwd, myname, auditor=None):
153 def canonpath(root, cwd, myname, auditor=None):
154 '''return the canonical path of myname, given cwd and root
154 '''return the canonical path of myname, given cwd and root
155
155
156 >>> def check(root, cwd, myname):
156 >>> def check(root, cwd, myname):
157 ... a = pathauditor(root, realfs=False)
157 ... a = pathauditor(root, realfs=False)
158 ... try:
158 ... try:
159 ... return canonpath(root, cwd, myname, a)
159 ... return canonpath(root, cwd, myname, a)
160 ... except error.Abort:
160 ... except error.Abort:
161 ... return 'aborted'
161 ... return 'aborted'
162 >>> def unixonly(root, cwd, myname, expected='aborted'):
162 >>> def unixonly(root, cwd, myname, expected='aborted'):
163 ... if pycompat.iswindows:
163 ... if pycompat.iswindows:
164 ... return expected
164 ... return expected
165 ... return check(root, cwd, myname)
165 ... return check(root, cwd, myname)
166 >>> def winonly(root, cwd, myname, expected='aborted'):
166 >>> def winonly(root, cwd, myname, expected='aborted'):
167 ... if not pycompat.iswindows:
167 ... if not pycompat.iswindows:
168 ... return expected
168 ... return expected
169 ... return check(root, cwd, myname)
169 ... return check(root, cwd, myname)
170 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
170 >>> winonly(b'd:\\\\repo', b'c:\\\\dir', b'filename')
171 'aborted'
171 'aborted'
172 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
172 >>> winonly(b'c:\\\\repo', b'c:\\\\dir', b'filename')
173 'aborted'
173 'aborted'
174 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
174 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'filename')
175 'aborted'
175 'aborted'
176 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
176 >>> winonly(b'c:\\\\repo', b'c:\\\\', b'repo\\\\filename',
177 ... b'filename')
177 ... b'filename')
178 'filename'
178 'filename'
179 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
179 >>> winonly(b'c:\\\\repo', b'c:\\\\repo', b'filename', b'filename')
180 'filename'
180 'filename'
181 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
181 >>> winonly(b'c:\\\\repo', b'c:\\\\repo\\\\subdir', b'filename',
182 ... b'subdir/filename')
182 ... b'subdir/filename')
183 'subdir/filename'
183 'subdir/filename'
184 >>> unixonly(b'/repo', b'/dir', b'filename')
184 >>> unixonly(b'/repo', b'/dir', b'filename')
185 'aborted'
185 'aborted'
186 >>> unixonly(b'/repo', b'/', b'filename')
186 >>> unixonly(b'/repo', b'/', b'filename')
187 'aborted'
187 'aborted'
188 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
188 >>> unixonly(b'/repo', b'/', b'repo/filename', b'filename')
189 'filename'
189 'filename'
190 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
190 >>> unixonly(b'/repo', b'/repo', b'filename', b'filename')
191 'filename'
191 'filename'
192 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
192 >>> unixonly(b'/repo', b'/repo/subdir', b'filename', b'subdir/filename')
193 'subdir/filename'
193 'subdir/filename'
194 '''
194 '''
195 if util.endswithsep(root):
195 if util.endswithsep(root):
196 rootsep = root
196 rootsep = root
197 else:
197 else:
198 rootsep = root + pycompat.ossep
198 rootsep = root + pycompat.ossep
199 name = myname
199 name = myname
200 if not os.path.isabs(name):
200 if not os.path.isabs(name):
201 name = os.path.join(root, cwd, name)
201 name = os.path.join(root, cwd, name)
202 name = os.path.normpath(name)
202 name = os.path.normpath(name)
203 if auditor is None:
203 if auditor is None:
204 auditor = pathauditor(root)
204 auditor = pathauditor(root)
205 if name != rootsep and name.startswith(rootsep):
205 if name != rootsep and name.startswith(rootsep):
206 name = name[len(rootsep) :]
206 name = name[len(rootsep) :]
207 auditor(name)
207 auditor(name)
208 return util.pconvert(name)
208 return util.pconvert(name)
209 elif name == root:
209 elif name == root:
210 return b''
210 return b''
211 else:
211 else:
212 # Determine whether `name' is in the hierarchy at or beneath `root',
212 # Determine whether `name' is in the hierarchy at or beneath `root',
213 # by iterating name=dirname(name) until that causes no change (can't
213 # by iterating name=dirname(name) until that causes no change (can't
214 # check name == '/', because that doesn't work on windows). The list
214 # check name == '/', because that doesn't work on windows). The list
215 # `rel' holds the reversed list of components making up the relative
215 # `rel' holds the reversed list of components making up the relative
216 # file name we want.
216 # file name we want.
217 rel = []
217 rel = []
218 while True:
218 while True:
219 try:
219 try:
220 s = util.samefile(name, root)
220 s = util.samefile(name, root)
221 except OSError:
221 except OSError:
222 s = False
222 s = False
223 if s:
223 if s:
224 if not rel:
224 if not rel:
225 # name was actually the same as root (maybe a symlink)
225 # name was actually the same as root (maybe a symlink)
226 return b''
226 return b''
227 rel.reverse()
227 rel.reverse()
228 name = os.path.join(*rel)
228 name = os.path.join(*rel)
229 auditor(name)
229 auditor(name)
230 return util.pconvert(name)
230 return util.pconvert(name)
231 dirname, basename = util.split(name)
231 dirname, basename = util.split(name)
232 rel.append(basename)
232 rel.append(basename)
233 if dirname == name:
233 if dirname == name:
234 break
234 break
235 name = dirname
235 name = dirname
236
236
237 # A common mistake is to use -R, but specify a file relative to the repo
237 # A common mistake is to use -R, but specify a file relative to the repo
238 # instead of cwd. Detect that case, and provide a hint to the user.
238 # instead of cwd. Detect that case, and provide a hint to the user.
239 hint = None
239 hint = None
240 try:
240 try:
241 if cwd != root:
241 if cwd != root:
242 canonpath(root, root, myname, auditor)
242 canonpath(root, root, myname, auditor)
243 relpath = util.pathto(root, cwd, b'')
243 relpath = util.pathto(root, cwd, b'')
244 if relpath.endswith(pycompat.ossep):
244 if relpath.endswith(pycompat.ossep):
245 relpath = relpath[:-1]
245 relpath = relpath[:-1]
246 hint = _(b"consider using '--cwd %s'") % relpath
246 hint = _(b"consider using '--cwd %s'") % relpath
247 except error.Abort:
247 except error.Abort:
248 pass
248 pass
249
249
250 raise error.Abort(
250 raise error.Abort(
251 _(b"%s not under root '%s'") % (myname, root), hint=hint
251 _(b"%s not under root '%s'") % (myname, root), hint=hint
252 )
252 )
253
253
254
254
255 def normasprefix(path):
255 def normasprefix(path):
256 '''normalize the specified path as path prefix
256 '''normalize the specified path as path prefix
257
257
258 Returned value can be used safely for "p.startswith(prefix)",
258 Returned value can be used safely for "p.startswith(prefix)",
259 "p[len(prefix):]", and so on.
259 "p[len(prefix):]", and so on.
260
260
261 For efficiency, this expects "path" argument to be already
261 For efficiency, this expects "path" argument to be already
262 normalized by "os.path.normpath", "os.path.realpath", and so on.
262 normalized by "os.path.normpath", "os.path.realpath", and so on.
263
263
264 See also issue3033 for detail about need of this function.
264 See also issue3033 for detail about need of this function.
265
265
266 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
266 >>> normasprefix(b'/foo/bar').replace(pycompat.ossep, b'/')
267 '/foo/bar/'
267 '/foo/bar/'
268 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
268 >>> normasprefix(b'/').replace(pycompat.ossep, b'/')
269 '/'
269 '/'
270 '''
270 '''
271 d, p = os.path.splitdrive(path)
271 d, p = os.path.splitdrive(path)
272 if len(p) != len(pycompat.ossep):
272 if len(p) != len(pycompat.ossep):
273 return path + pycompat.ossep
273 return path + pycompat.ossep
274 else:
274 else:
275 return path
275 return path
276
276
277
277
278 def finddirs(path):
279 pos = path.rfind(b'/')
280 while pos != -1:
281 yield path[:pos]
282 pos = path.rfind(b'/', 0, pos)
283 yield b''
284
285
278 class dirs(object):
286 class dirs(object):
279 '''a multiset of directory names from a set of file paths'''
287 '''a multiset of directory names from a set of file paths'''
280
288
281 def __init__(self, map, skip=None):
289 def __init__(self, map, skip=None):
282 self._dirs = {}
290 self._dirs = {}
283 addpath = self.addpath
291 addpath = self.addpath
284 if isinstance(map, dict) and skip is not None:
292 if isinstance(map, dict) and skip is not None:
285 for f, s in pycompat.iteritems(map):
293 for f, s in pycompat.iteritems(map):
286 if s[0] != skip:
294 if s[0] != skip:
287 addpath(f)
295 addpath(f)
288 elif skip is not None:
296 elif skip is not None:
289 raise error.ProgrammingError(
297 raise error.ProgrammingError(
290 b"skip character is only supported with a dict source"
298 b"skip character is only supported with a dict source"
291 )
299 )
292 else:
300 else:
293 for f in map:
301 for f in map:
294 addpath(f)
302 addpath(f)
295
303
296 def addpath(self, path):
304 def addpath(self, path):
297 dirs = self._dirs
305 dirs = self._dirs
298 for base in util.finddirs(path):
306 for base in finddirs(path):
299 if base.endswith(b'/'):
307 if base.endswith(b'/'):
300 raise ValueError(
308 raise ValueError(
301 "found invalid consecutive slashes in path: %r" % base
309 "found invalid consecutive slashes in path: %r" % base
302 )
310 )
303 if base in dirs:
311 if base in dirs:
304 dirs[base] += 1
312 dirs[base] += 1
305 return
313 return
306 dirs[base] = 1
314 dirs[base] = 1
307
315
308 def delpath(self, path):
316 def delpath(self, path):
309 dirs = self._dirs
317 dirs = self._dirs
310 for base in util.finddirs(path):
318 for base in finddirs(path):
311 if dirs[base] > 1:
319 if dirs[base] > 1:
312 dirs[base] -= 1
320 dirs[base] -= 1
313 return
321 return
314 del dirs[base]
322 del dirs[base]
315
323
316 def __iter__(self):
324 def __iter__(self):
317 return iter(self._dirs)
325 return iter(self._dirs)
318
326
319 def __contains__(self, d):
327 def __contains__(self, d):
320 return d in self._dirs
328 return d in self._dirs
321
329
322
330
323 if util.safehasattr(parsers, 'dirs'):
331 if util.safehasattr(parsers, 'dirs'):
324 dirs = parsers.dirs
332 dirs = parsers.dirs
325
333
326 if rustdirs is not None:
334 if rustdirs is not None:
327 dirs = rustdirs
335 dirs = rustdirs
328
336
329
337
330 # forward two methods from posixpath that do what we need, but we'd
338 # forward two methods from posixpath that do what we need, but we'd
331 # rather not let our internals know that we're thinking in posix terms
339 # rather not let our internals know that we're thinking in posix terms
332 # - instead we'll let them be oblivious.
340 # - instead we'll let them be oblivious.
333 join = posixpath.join
341 join = posixpath.join
334 dirname = posixpath.dirname
342 dirname = posixpath.dirname
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now