##// END OF EJS Templates
cleanup: fix leakage of dirstate._map to client code...
Augie Fackler -
r43196:34ed651b default
parent child Browse files
Show More
@@ -1,393 +1,393 b''
1 # Copyright 2009-2010 Gregory P. Ward
1 # Copyright 2009-2010 Gregory P. Ward
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 # Copyright 2010-2011 Fog Creek Software
3 # Copyright 2010-2011 Fog Creek Software
4 # Copyright 2010-2011 Unity Technologies
4 # Copyright 2010-2011 Unity Technologies
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 '''setup for largefiles repositories: reposetup'''
9 '''setup for largefiles repositories: reposetup'''
10 from __future__ import absolute_import
10 from __future__ import absolute_import
11
11
12 import copy
12 import copy
13
13
14 from mercurial.i18n import _
14 from mercurial.i18n import _
15
15
16 from mercurial import (
16 from mercurial import (
17 error,
17 error,
18 localrepo,
18 localrepo,
19 match as matchmod,
19 match as matchmod,
20 scmutil,
20 scmutil,
21 )
21 )
22
22
23 from . import (
23 from . import (
24 lfcommands,
24 lfcommands,
25 lfutil,
25 lfutil,
26 )
26 )
27
27
28 def reposetup(ui, repo):
28 def reposetup(ui, repo):
29 # wire repositories should be given new wireproto functions
29 # wire repositories should be given new wireproto functions
30 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
30 # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs"
31 if not repo.local():
31 if not repo.local():
32 return
32 return
33
33
34 class lfilesrepo(repo.__class__):
34 class lfilesrepo(repo.__class__):
35 # the mark to examine whether "repo" object enables largefiles or not
35 # the mark to examine whether "repo" object enables largefiles or not
36 _largefilesenabled = True
36 _largefilesenabled = True
37
37
38 lfstatus = False
38 lfstatus = False
39 def status_nolfiles(self, *args, **kwargs):
39 def status_nolfiles(self, *args, **kwargs):
40 return super(lfilesrepo, self).status(*args, **kwargs)
40 return super(lfilesrepo, self).status(*args, **kwargs)
41
41
42 # When lfstatus is set, return a context that gives the names
42 # When lfstatus is set, return a context that gives the names
43 # of largefiles instead of their corresponding standins and
43 # of largefiles instead of their corresponding standins and
44 # identifies the largefiles as always binary, regardless of
44 # identifies the largefiles as always binary, regardless of
45 # their actual contents.
45 # their actual contents.
46 def __getitem__(self, changeid):
46 def __getitem__(self, changeid):
47 ctx = super(lfilesrepo, self).__getitem__(changeid)
47 ctx = super(lfilesrepo, self).__getitem__(changeid)
48 if self.lfstatus:
48 if self.lfstatus:
49 class lfilesctx(ctx.__class__):
49 class lfilesctx(ctx.__class__):
50 def files(self):
50 def files(self):
51 filenames = super(lfilesctx, self).files()
51 filenames = super(lfilesctx, self).files()
52 return [lfutil.splitstandin(f) or f for f in filenames]
52 return [lfutil.splitstandin(f) or f for f in filenames]
53 def manifest(self):
53 def manifest(self):
54 man1 = super(lfilesctx, self).manifest()
54 man1 = super(lfilesctx, self).manifest()
55 class lfilesmanifest(man1.__class__):
55 class lfilesmanifest(man1.__class__):
56 def __contains__(self, filename):
56 def __contains__(self, filename):
57 orig = super(lfilesmanifest, self).__contains__
57 orig = super(lfilesmanifest, self).__contains__
58 return (orig(filename) or
58 return (orig(filename) or
59 orig(lfutil.standin(filename)))
59 orig(lfutil.standin(filename)))
60 man1.__class__ = lfilesmanifest
60 man1.__class__ = lfilesmanifest
61 return man1
61 return man1
62 def filectx(self, path, fileid=None, filelog=None):
62 def filectx(self, path, fileid=None, filelog=None):
63 orig = super(lfilesctx, self).filectx
63 orig = super(lfilesctx, self).filectx
64 try:
64 try:
65 if filelog is not None:
65 if filelog is not None:
66 result = orig(path, fileid, filelog)
66 result = orig(path, fileid, filelog)
67 else:
67 else:
68 result = orig(path, fileid)
68 result = orig(path, fileid)
69 except error.LookupError:
69 except error.LookupError:
70 # Adding a null character will cause Mercurial to
70 # Adding a null character will cause Mercurial to
71 # identify this as a binary file.
71 # identify this as a binary file.
72 if filelog is not None:
72 if filelog is not None:
73 result = orig(lfutil.standin(path), fileid,
73 result = orig(lfutil.standin(path), fileid,
74 filelog)
74 filelog)
75 else:
75 else:
76 result = orig(lfutil.standin(path), fileid)
76 result = orig(lfutil.standin(path), fileid)
77 olddata = result.data
77 olddata = result.data
78 result.data = lambda: olddata() + '\0'
78 result.data = lambda: olddata() + '\0'
79 return result
79 return result
80 ctx.__class__ = lfilesctx
80 ctx.__class__ = lfilesctx
81 return ctx
81 return ctx
82
82
83 # Figure out the status of big files and insert them into the
83 # Figure out the status of big files and insert them into the
84 # appropriate list in the result. Also removes standin files
84 # appropriate list in the result. Also removes standin files
85 # from the listing. Revert to the original status if
85 # from the listing. Revert to the original status if
86 # self.lfstatus is False.
86 # self.lfstatus is False.
87 # XXX large file status is buggy when used on repo proxy.
87 # XXX large file status is buggy when used on repo proxy.
88 # XXX this needs to be investigated.
88 # XXX this needs to be investigated.
89 @localrepo.unfilteredmethod
89 @localrepo.unfilteredmethod
90 def status(self, node1='.', node2=None, match=None, ignored=False,
90 def status(self, node1='.', node2=None, match=None, ignored=False,
91 clean=False, unknown=False, listsubrepos=False):
91 clean=False, unknown=False, listsubrepos=False):
92 listignored, listclean, listunknown = ignored, clean, unknown
92 listignored, listclean, listunknown = ignored, clean, unknown
93 orig = super(lfilesrepo, self).status
93 orig = super(lfilesrepo, self).status
94 if not self.lfstatus:
94 if not self.lfstatus:
95 return orig(node1, node2, match, listignored, listclean,
95 return orig(node1, node2, match, listignored, listclean,
96 listunknown, listsubrepos)
96 listunknown, listsubrepos)
97
97
98 # some calls in this function rely on the old version of status
98 # some calls in this function rely on the old version of status
99 self.lfstatus = False
99 self.lfstatus = False
100 ctx1 = self[node1]
100 ctx1 = self[node1]
101 ctx2 = self[node2]
101 ctx2 = self[node2]
102 working = ctx2.rev() is None
102 working = ctx2.rev() is None
103 parentworking = working and ctx1 == self['.']
103 parentworking = working and ctx1 == self['.']
104
104
105 if match is None:
105 if match is None:
106 match = matchmod.always()
106 match = matchmod.always()
107
107
108 wlock = None
108 wlock = None
109 try:
109 try:
110 try:
110 try:
111 # updating the dirstate is optional
111 # updating the dirstate is optional
112 # so we don't wait on the lock
112 # so we don't wait on the lock
113 wlock = self.wlock(False)
113 wlock = self.wlock(False)
114 except error.LockError:
114 except error.LockError:
115 pass
115 pass
116
116
117 # First check if paths or patterns were specified on the
117 # First check if paths or patterns were specified on the
118 # command line. If there were, and they don't match any
118 # command line. If there were, and they don't match any
119 # largefiles, we should just bail here and let super
119 # largefiles, we should just bail here and let super
120 # handle it -- thus gaining a big performance boost.
120 # handle it -- thus gaining a big performance boost.
121 lfdirstate = lfutil.openlfdirstate(ui, self)
121 lfdirstate = lfutil.openlfdirstate(ui, self)
122 if not match.always():
122 if not match.always():
123 for f in lfdirstate:
123 for f in lfdirstate:
124 if match(f):
124 if match(f):
125 break
125 break
126 else:
126 else:
127 return orig(node1, node2, match, listignored, listclean,
127 return orig(node1, node2, match, listignored, listclean,
128 listunknown, listsubrepos)
128 listunknown, listsubrepos)
129
129
130 # Create a copy of match that matches standins instead
130 # Create a copy of match that matches standins instead
131 # of largefiles.
131 # of largefiles.
132 def tostandins(files):
132 def tostandins(files):
133 if not working:
133 if not working:
134 return files
134 return files
135 newfiles = []
135 newfiles = []
136 dirstate = self.dirstate
136 dirstate = self.dirstate
137 for f in files:
137 for f in files:
138 sf = lfutil.standin(f)
138 sf = lfutil.standin(f)
139 if sf in dirstate:
139 if sf in dirstate:
140 newfiles.append(sf)
140 newfiles.append(sf)
141 elif dirstate.hasdir(sf):
141 elif dirstate.hasdir(sf):
142 # Directory entries could be regular or
142 # Directory entries could be regular or
143 # standin, check both
143 # standin, check both
144 newfiles.extend((f, sf))
144 newfiles.extend((f, sf))
145 else:
145 else:
146 newfiles.append(f)
146 newfiles.append(f)
147 return newfiles
147 return newfiles
148
148
149 m = copy.copy(match)
149 m = copy.copy(match)
150 m._files = tostandins(m._files)
150 m._files = tostandins(m._files)
151
151
152 result = orig(node1, node2, m, ignored, clean, unknown,
152 result = orig(node1, node2, m, ignored, clean, unknown,
153 listsubrepos)
153 listsubrepos)
154 if working:
154 if working:
155
155
156 def sfindirstate(f):
156 def sfindirstate(f):
157 sf = lfutil.standin(f)
157 sf = lfutil.standin(f)
158 dirstate = self.dirstate
158 dirstate = self.dirstate
159 return sf in dirstate or dirstate.hasdir(sf)
159 return sf in dirstate or dirstate.hasdir(sf)
160
160
161 match._files = [f for f in match._files
161 match._files = [f for f in match._files
162 if sfindirstate(f)]
162 if sfindirstate(f)]
163 # Don't waste time getting the ignored and unknown
163 # Don't waste time getting the ignored and unknown
164 # files from lfdirstate
164 # files from lfdirstate
165 unsure, s = lfdirstate.status(match, subrepos=[],
165 unsure, s = lfdirstate.status(match, subrepos=[],
166 ignored=False,
166 ignored=False,
167 clean=listclean,
167 clean=listclean,
168 unknown=False)
168 unknown=False)
169 (modified, added, removed, deleted, clean) = (
169 (modified, added, removed, deleted, clean) = (
170 s.modified, s.added, s.removed, s.deleted, s.clean)
170 s.modified, s.added, s.removed, s.deleted, s.clean)
171 if parentworking:
171 if parentworking:
172 for lfile in unsure:
172 for lfile in unsure:
173 standin = lfutil.standin(lfile)
173 standin = lfutil.standin(lfile)
174 if standin not in ctx1:
174 if standin not in ctx1:
175 # from second parent
175 # from second parent
176 modified.append(lfile)
176 modified.append(lfile)
177 elif (lfutil.readasstandin(ctx1[standin])
177 elif (lfutil.readasstandin(ctx1[standin])
178 != lfutil.hashfile(self.wjoin(lfile))):
178 != lfutil.hashfile(self.wjoin(lfile))):
179 modified.append(lfile)
179 modified.append(lfile)
180 else:
180 else:
181 if listclean:
181 if listclean:
182 clean.append(lfile)
182 clean.append(lfile)
183 lfdirstate.normal(lfile)
183 lfdirstate.normal(lfile)
184 else:
184 else:
185 tocheck = unsure + modified + added + clean
185 tocheck = unsure + modified + added + clean
186 modified, added, clean = [], [], []
186 modified, added, clean = [], [], []
187 checkexec = self.dirstate._checkexec
187 checkexec = self.dirstate._checkexec
188
188
189 for lfile in tocheck:
189 for lfile in tocheck:
190 standin = lfutil.standin(lfile)
190 standin = lfutil.standin(lfile)
191 if standin in ctx1:
191 if standin in ctx1:
192 abslfile = self.wjoin(lfile)
192 abslfile = self.wjoin(lfile)
193 if ((lfutil.readasstandin(ctx1[standin]) !=
193 if ((lfutil.readasstandin(ctx1[standin]) !=
194 lfutil.hashfile(abslfile)) or
194 lfutil.hashfile(abslfile)) or
195 (checkexec and
195 (checkexec and
196 ('x' in ctx1.flags(standin)) !=
196 ('x' in ctx1.flags(standin)) !=
197 bool(lfutil.getexecutable(abslfile)))):
197 bool(lfutil.getexecutable(abslfile)))):
198 modified.append(lfile)
198 modified.append(lfile)
199 elif listclean:
199 elif listclean:
200 clean.append(lfile)
200 clean.append(lfile)
201 else:
201 else:
202 added.append(lfile)
202 added.append(lfile)
203
203
204 # at this point, 'removed' contains largefiles
204 # at this point, 'removed' contains largefiles
205 # marked as 'R' in the working context.
205 # marked as 'R' in the working context.
206 # then, largefiles not managed also in the target
206 # then, largefiles not managed also in the target
207 # context should be excluded from 'removed'.
207 # context should be excluded from 'removed'.
208 removed = [lfile for lfile in removed
208 removed = [lfile for lfile in removed
209 if lfutil.standin(lfile) in ctx1]
209 if lfutil.standin(lfile) in ctx1]
210
210
211 # Standins no longer found in lfdirstate have been deleted
211 # Standins no longer found in lfdirstate have been deleted
212 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
212 for standin in ctx1.walk(lfutil.getstandinmatcher(self)):
213 lfile = lfutil.splitstandin(standin)
213 lfile = lfutil.splitstandin(standin)
214 if not match(lfile):
214 if not match(lfile):
215 continue
215 continue
216 if lfile not in lfdirstate:
216 if lfile not in lfdirstate:
217 deleted.append(lfile)
217 deleted.append(lfile)
218 # Sync "largefile has been removed" back to the
218 # Sync "largefile has been removed" back to the
219 # standin. Removing a file as a side effect of
219 # standin. Removing a file as a side effect of
220 # running status is gross, but the alternatives (if
220 # running status is gross, but the alternatives (if
221 # any) are worse.
221 # any) are worse.
222 self.wvfs.unlinkpath(standin, ignoremissing=True)
222 self.wvfs.unlinkpath(standin, ignoremissing=True)
223
223
224 # Filter result lists
224 # Filter result lists
225 result = list(result)
225 result = list(result)
226
226
227 # Largefiles are not really removed when they're
227 # Largefiles are not really removed when they're
228 # still in the normal dirstate. Likewise, normal
228 # still in the normal dirstate. Likewise, normal
229 # files are not really removed if they are still in
229 # files are not really removed if they are still in
230 # lfdirstate. This happens in merges where files
230 # lfdirstate. This happens in merges where files
231 # change type.
231 # change type.
232 removed = [f for f in removed
232 removed = [f for f in removed
233 if f not in self.dirstate]
233 if f not in self.dirstate]
234 result[2] = [f for f in result[2]
234 result[2] = [f for f in result[2]
235 if f not in lfdirstate]
235 if f not in lfdirstate]
236
236
237 lfiles = set(lfdirstate._map)
237 lfiles = set(lfdirstate)
238 # Unknown files
238 # Unknown files
239 result[4] = set(result[4]).difference(lfiles)
239 result[4] = set(result[4]).difference(lfiles)
240 # Ignored files
240 # Ignored files
241 result[5] = set(result[5]).difference(lfiles)
241 result[5] = set(result[5]).difference(lfiles)
242 # combine normal files and largefiles
242 # combine normal files and largefiles
243 normals = [[fn for fn in filelist
243 normals = [[fn for fn in filelist
244 if not lfutil.isstandin(fn)]
244 if not lfutil.isstandin(fn)]
245 for filelist in result]
245 for filelist in result]
246 lfstatus = (modified, added, removed, deleted, [], [],
246 lfstatus = (modified, added, removed, deleted, [], [],
247 clean)
247 clean)
248 result = [sorted(list1 + list2)
248 result = [sorted(list1 + list2)
249 for (list1, list2) in zip(normals, lfstatus)]
249 for (list1, list2) in zip(normals, lfstatus)]
250 else: # not against working directory
250 else: # not against working directory
251 result = [[lfutil.splitstandin(f) or f for f in items]
251 result = [[lfutil.splitstandin(f) or f for f in items]
252 for items in result]
252 for items in result]
253
253
254 if wlock:
254 if wlock:
255 lfdirstate.write()
255 lfdirstate.write()
256
256
257 finally:
257 finally:
258 if wlock:
258 if wlock:
259 wlock.release()
259 wlock.release()
260
260
261 self.lfstatus = True
261 self.lfstatus = True
262 return scmutil.status(*result)
262 return scmutil.status(*result)
263
263
264 def commitctx(self, ctx, *args, **kwargs):
264 def commitctx(self, ctx, *args, **kwargs):
265 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
265 node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs)
266 class lfilesctx(ctx.__class__):
266 class lfilesctx(ctx.__class__):
267 def markcommitted(self, node):
267 def markcommitted(self, node):
268 orig = super(lfilesctx, self).markcommitted
268 orig = super(lfilesctx, self).markcommitted
269 return lfutil.markcommitted(orig, self, node)
269 return lfutil.markcommitted(orig, self, node)
270 ctx.__class__ = lfilesctx
270 ctx.__class__ = lfilesctx
271 return node
271 return node
272
272
273 # Before commit, largefile standins have not had their
273 # Before commit, largefile standins have not had their
274 # contents updated to reflect the hash of their largefile.
274 # contents updated to reflect the hash of their largefile.
275 # Do that here.
275 # Do that here.
276 def commit(self, text="", user=None, date=None, match=None,
276 def commit(self, text="", user=None, date=None, match=None,
277 force=False, editor=False, extra=None):
277 force=False, editor=False, extra=None):
278 if extra is None:
278 if extra is None:
279 extra = {}
279 extra = {}
280 orig = super(lfilesrepo, self).commit
280 orig = super(lfilesrepo, self).commit
281
281
282 with self.wlock():
282 with self.wlock():
283 lfcommithook = self._lfcommithooks[-1]
283 lfcommithook = self._lfcommithooks[-1]
284 match = lfcommithook(self, match)
284 match = lfcommithook(self, match)
285 result = orig(text=text, user=user, date=date, match=match,
285 result = orig(text=text, user=user, date=date, match=match,
286 force=force, editor=editor, extra=extra)
286 force=force, editor=editor, extra=extra)
287 return result
287 return result
288
288
289 def push(self, remote, force=False, revs=None, newbranch=False):
289 def push(self, remote, force=False, revs=None, newbranch=False):
290 if remote.local():
290 if remote.local():
291 missing = set(self.requirements) - remote.local().supported
291 missing = set(self.requirements) - remote.local().supported
292 if missing:
292 if missing:
293 msg = _("required features are not"
293 msg = _("required features are not"
294 " supported in the destination:"
294 " supported in the destination:"
295 " %s") % (', '.join(sorted(missing)))
295 " %s") % (', '.join(sorted(missing)))
296 raise error.Abort(msg)
296 raise error.Abort(msg)
297 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
297 return super(lfilesrepo, self).push(remote, force=force, revs=revs,
298 newbranch=newbranch)
298 newbranch=newbranch)
299
299
300 # TODO: _subdirlfs should be moved into "lfutil.py", because
300 # TODO: _subdirlfs should be moved into "lfutil.py", because
301 # it is referred only from "lfutil.updatestandinsbymatch"
301 # it is referred only from "lfutil.updatestandinsbymatch"
302 def _subdirlfs(self, files, lfiles):
302 def _subdirlfs(self, files, lfiles):
303 '''
303 '''
304 Adjust matched file list
304 Adjust matched file list
305 If we pass a directory to commit whose only committable files
305 If we pass a directory to commit whose only committable files
306 are largefiles, the core commit code aborts before finding
306 are largefiles, the core commit code aborts before finding
307 the largefiles.
307 the largefiles.
308 So we do the following:
308 So we do the following:
309 For directories that only have largefiles as matches,
309 For directories that only have largefiles as matches,
310 we explicitly add the largefiles to the match list and remove
310 we explicitly add the largefiles to the match list and remove
311 the directory.
311 the directory.
312 In other cases, we leave the match list unmodified.
312 In other cases, we leave the match list unmodified.
313 '''
313 '''
314 actualfiles = []
314 actualfiles = []
315 dirs = []
315 dirs = []
316 regulars = []
316 regulars = []
317
317
318 for f in files:
318 for f in files:
319 if lfutil.isstandin(f + '/'):
319 if lfutil.isstandin(f + '/'):
320 raise error.Abort(
320 raise error.Abort(
321 _('file "%s" is a largefile standin') % f,
321 _('file "%s" is a largefile standin') % f,
322 hint=('commit the largefile itself instead'))
322 hint=('commit the largefile itself instead'))
323 # Scan directories
323 # Scan directories
324 if self.wvfs.isdir(f):
324 if self.wvfs.isdir(f):
325 dirs.append(f)
325 dirs.append(f)
326 else:
326 else:
327 regulars.append(f)
327 regulars.append(f)
328
328
329 for f in dirs:
329 for f in dirs:
330 matcheddir = False
330 matcheddir = False
331 d = self.dirstate.normalize(f) + '/'
331 d = self.dirstate.normalize(f) + '/'
332 # Check for matched normal files
332 # Check for matched normal files
333 for mf in regulars:
333 for mf in regulars:
334 if self.dirstate.normalize(mf).startswith(d):
334 if self.dirstate.normalize(mf).startswith(d):
335 actualfiles.append(f)
335 actualfiles.append(f)
336 matcheddir = True
336 matcheddir = True
337 break
337 break
338 if not matcheddir:
338 if not matcheddir:
339 # If no normal match, manually append
339 # If no normal match, manually append
340 # any matching largefiles
340 # any matching largefiles
341 for lf in lfiles:
341 for lf in lfiles:
342 if self.dirstate.normalize(lf).startswith(d):
342 if self.dirstate.normalize(lf).startswith(d):
343 actualfiles.append(lf)
343 actualfiles.append(lf)
344 if not matcheddir:
344 if not matcheddir:
345 # There may still be normal files in the dir, so
345 # There may still be normal files in the dir, so
346 # add a directory to the list, which
346 # add a directory to the list, which
347 # forces status/dirstate to walk all files and
347 # forces status/dirstate to walk all files and
348 # call the match function on the matcher, even
348 # call the match function on the matcher, even
349 # on case sensitive filesystems.
349 # on case sensitive filesystems.
350 actualfiles.append('.')
350 actualfiles.append('.')
351 matcheddir = True
351 matcheddir = True
352 # Nothing in dir, so readd it
352 # Nothing in dir, so readd it
353 # and let commit reject it
353 # and let commit reject it
354 if not matcheddir:
354 if not matcheddir:
355 actualfiles.append(f)
355 actualfiles.append(f)
356
356
357 # Always add normal files
357 # Always add normal files
358 actualfiles += regulars
358 actualfiles += regulars
359 return actualfiles
359 return actualfiles
360
360
361 repo.__class__ = lfilesrepo
361 repo.__class__ = lfilesrepo
362
362
363 # stack of hooks being executed before committing.
363 # stack of hooks being executed before committing.
364 # only last element ("_lfcommithooks[-1]") is used for each committing.
364 # only last element ("_lfcommithooks[-1]") is used for each committing.
365 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
365 repo._lfcommithooks = [lfutil.updatestandinsbymatch]
366
366
367 # Stack of status writer functions taking "*msg, **opts" arguments
367 # Stack of status writer functions taking "*msg, **opts" arguments
368 # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
368 # like "ui.status()". Only last element ("_lfstatuswriters[-1]")
369 # is used to write status out.
369 # is used to write status out.
370 repo._lfstatuswriters = [ui.status]
370 repo._lfstatuswriters = [ui.status]
371
371
372 def prepushoutgoinghook(pushop):
372 def prepushoutgoinghook(pushop):
373 """Push largefiles for pushop before pushing revisions."""
373 """Push largefiles for pushop before pushing revisions."""
374 lfrevs = pushop.lfrevs
374 lfrevs = pushop.lfrevs
375 if lfrevs is None:
375 if lfrevs is None:
376 lfrevs = pushop.outgoing.missing
376 lfrevs = pushop.outgoing.missing
377 if lfrevs:
377 if lfrevs:
378 toupload = set()
378 toupload = set()
379 addfunc = lambda fn, lfhash: toupload.add(lfhash)
379 addfunc = lambda fn, lfhash: toupload.add(lfhash)
380 lfutil.getlfilestoupload(pushop.repo, lfrevs,
380 lfutil.getlfilestoupload(pushop.repo, lfrevs,
381 addfunc)
381 addfunc)
382 lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
382 lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload)
383 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
383 repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook)
384
384
385 def checkrequireslfiles(ui, repo, **kwargs):
385 def checkrequireslfiles(ui, repo, **kwargs):
386 if 'largefiles' not in repo.requirements and any(
386 if 'largefiles' not in repo.requirements and any(
387 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
387 lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()):
388 repo.requirements.add('largefiles')
388 repo.requirements.add('largefiles')
389 repo._writerequirements()
389 repo._writerequirements()
390
390
391 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
391 ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles,
392 'largefiles')
392 'largefiles')
393 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
393 ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles')
@@ -1,3499 +1,3499 b''
1 # debugcommands.py - command processing for debug* commands
1 # debugcommands.py - command processing for debug* commands
2 #
2 #
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2016 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import codecs
10 import codecs
11 import collections
11 import collections
12 import difflib
12 import difflib
13 import errno
13 import errno
14 import operator
14 import operator
15 import os
15 import os
16 import random
16 import random
17 import re
17 import re
18 import socket
18 import socket
19 import ssl
19 import ssl
20 import stat
20 import stat
21 import string
21 import string
22 import subprocess
22 import subprocess
23 import sys
23 import sys
24 import time
24 import time
25
25
26 from .i18n import _
26 from .i18n import _
27 from .node import (
27 from .node import (
28 bin,
28 bin,
29 hex,
29 hex,
30 nullhex,
30 nullhex,
31 nullid,
31 nullid,
32 nullrev,
32 nullrev,
33 short,
33 short,
34 )
34 )
35 from . import (
35 from . import (
36 bundle2,
36 bundle2,
37 changegroup,
37 changegroup,
38 cmdutil,
38 cmdutil,
39 color,
39 color,
40 context,
40 context,
41 copies,
41 copies,
42 dagparser,
42 dagparser,
43 encoding,
43 encoding,
44 error,
44 error,
45 exchange,
45 exchange,
46 extensions,
46 extensions,
47 filemerge,
47 filemerge,
48 filesetlang,
48 filesetlang,
49 formatter,
49 formatter,
50 hg,
50 hg,
51 httppeer,
51 httppeer,
52 localrepo,
52 localrepo,
53 lock as lockmod,
53 lock as lockmod,
54 logcmdutil,
54 logcmdutil,
55 merge as mergemod,
55 merge as mergemod,
56 obsolete,
56 obsolete,
57 obsutil,
57 obsutil,
58 phases,
58 phases,
59 policy,
59 policy,
60 pvec,
60 pvec,
61 pycompat,
61 pycompat,
62 registrar,
62 registrar,
63 repair,
63 repair,
64 revlog,
64 revlog,
65 revset,
65 revset,
66 revsetlang,
66 revsetlang,
67 scmutil,
67 scmutil,
68 setdiscovery,
68 setdiscovery,
69 simplemerge,
69 simplemerge,
70 sshpeer,
70 sshpeer,
71 sslutil,
71 sslutil,
72 streamclone,
72 streamclone,
73 templater,
73 templater,
74 treediscovery,
74 treediscovery,
75 upgrade,
75 upgrade,
76 url as urlmod,
76 url as urlmod,
77 util,
77 util,
78 vfs as vfsmod,
78 vfs as vfsmod,
79 wireprotoframing,
79 wireprotoframing,
80 wireprotoserver,
80 wireprotoserver,
81 wireprotov2peer,
81 wireprotov2peer,
82 )
82 )
83 from .utils import (
83 from .utils import (
84 cborutil,
84 cborutil,
85 compression,
85 compression,
86 dateutil,
86 dateutil,
87 procutil,
87 procutil,
88 stringutil,
88 stringutil,
89 )
89 )
90
90
91 from .revlogutils import (
91 from .revlogutils import (
92 deltas as deltautil
92 deltas as deltautil
93 )
93 )
94
94
95 release = lockmod.release
95 release = lockmod.release
96
96
97 command = registrar.command()
97 command = registrar.command()
98
98
99 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
99 @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True)
100 def debugancestor(ui, repo, *args):
100 def debugancestor(ui, repo, *args):
101 """find the ancestor revision of two revisions in a given index"""
101 """find the ancestor revision of two revisions in a given index"""
102 if len(args) == 3:
102 if len(args) == 3:
103 index, rev1, rev2 = args
103 index, rev1, rev2 = args
104 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
104 r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index)
105 lookup = r.lookup
105 lookup = r.lookup
106 elif len(args) == 2:
106 elif len(args) == 2:
107 if not repo:
107 if not repo:
108 raise error.Abort(_('there is no Mercurial repository here '
108 raise error.Abort(_('there is no Mercurial repository here '
109 '(.hg not found)'))
109 '(.hg not found)'))
110 rev1, rev2 = args
110 rev1, rev2 = args
111 r = repo.changelog
111 r = repo.changelog
112 lookup = repo.lookup
112 lookup = repo.lookup
113 else:
113 else:
114 raise error.Abort(_('either two or three arguments required'))
114 raise error.Abort(_('either two or three arguments required'))
115 a = r.ancestor(lookup(rev1), lookup(rev2))
115 a = r.ancestor(lookup(rev1), lookup(rev2))
116 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
116 ui.write('%d:%s\n' % (r.rev(a), hex(a)))
117
117
118 @command('debugapplystreamclonebundle', [], 'FILE')
118 @command('debugapplystreamclonebundle', [], 'FILE')
119 def debugapplystreamclonebundle(ui, repo, fname):
119 def debugapplystreamclonebundle(ui, repo, fname):
120 """apply a stream clone bundle file"""
120 """apply a stream clone bundle file"""
121 f = hg.openpath(ui, fname)
121 f = hg.openpath(ui, fname)
122 gen = exchange.readbundle(ui, f, fname)
122 gen = exchange.readbundle(ui, f, fname)
123 gen.apply(repo)
123 gen.apply(repo)
124
124
125 @command('debugbuilddag',
125 @command('debugbuilddag',
126 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
126 [('m', 'mergeable-file', None, _('add single file mergeable changes')),
127 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
127 ('o', 'overwritten-file', None, _('add single file all revs overwrite')),
128 ('n', 'new-file', None, _('add new file at each rev'))],
128 ('n', 'new-file', None, _('add new file at each rev'))],
129 _('[OPTION]... [TEXT]'))
129 _('[OPTION]... [TEXT]'))
130 def debugbuilddag(ui, repo, text=None,
130 def debugbuilddag(ui, repo, text=None,
131 mergeable_file=False,
131 mergeable_file=False,
132 overwritten_file=False,
132 overwritten_file=False,
133 new_file=False):
133 new_file=False):
134 """builds a repo with a given DAG from scratch in the current empty repo
134 """builds a repo with a given DAG from scratch in the current empty repo
135
135
136 The description of the DAG is read from stdin if not given on the
136 The description of the DAG is read from stdin if not given on the
137 command line.
137 command line.
138
138
139 Elements:
139 Elements:
140
140
141 - "+n" is a linear run of n nodes based on the current default parent
141 - "+n" is a linear run of n nodes based on the current default parent
142 - "." is a single node based on the current default parent
142 - "." is a single node based on the current default parent
143 - "$" resets the default parent to null (implied at the start);
143 - "$" resets the default parent to null (implied at the start);
144 otherwise the default parent is always the last node created
144 otherwise the default parent is always the last node created
145 - "<p" sets the default parent to the backref p
145 - "<p" sets the default parent to the backref p
146 - "*p" is a fork at parent p, which is a backref
146 - "*p" is a fork at parent p, which is a backref
147 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
147 - "*p1/p2" is a merge of parents p1 and p2, which are backrefs
148 - "/p2" is a merge of the preceding node and p2
148 - "/p2" is a merge of the preceding node and p2
149 - ":tag" defines a local tag for the preceding node
149 - ":tag" defines a local tag for the preceding node
150 - "@branch" sets the named branch for subsequent nodes
150 - "@branch" sets the named branch for subsequent nodes
151 - "#...\\n" is a comment up to the end of the line
151 - "#...\\n" is a comment up to the end of the line
152
152
153 Whitespace between the above elements is ignored.
153 Whitespace between the above elements is ignored.
154
154
155 A backref is either
155 A backref is either
156
156
157 - a number n, which references the node curr-n, where curr is the current
157 - a number n, which references the node curr-n, where curr is the current
158 node, or
158 node, or
159 - the name of a local tag you placed earlier using ":tag", or
159 - the name of a local tag you placed earlier using ":tag", or
160 - empty to denote the default parent.
160 - empty to denote the default parent.
161
161
162 All string valued-elements are either strictly alphanumeric, or must
162 All string valued-elements are either strictly alphanumeric, or must
163 be enclosed in double quotes ("..."), with "\\" as escape character.
163 be enclosed in double quotes ("..."), with "\\" as escape character.
164 """
164 """
165
165
166 if text is None:
166 if text is None:
167 ui.status(_("reading DAG from stdin\n"))
167 ui.status(_("reading DAG from stdin\n"))
168 text = ui.fin.read()
168 text = ui.fin.read()
169
169
170 cl = repo.changelog
170 cl = repo.changelog
171 if len(cl) > 0:
171 if len(cl) > 0:
172 raise error.Abort(_('repository is not empty'))
172 raise error.Abort(_('repository is not empty'))
173
173
174 # determine number of revs in DAG
174 # determine number of revs in DAG
175 total = 0
175 total = 0
176 for type, data in dagparser.parsedag(text):
176 for type, data in dagparser.parsedag(text):
177 if type == 'n':
177 if type == 'n':
178 total += 1
178 total += 1
179
179
180 if mergeable_file:
180 if mergeable_file:
181 linesperrev = 2
181 linesperrev = 2
182 # make a file with k lines per rev
182 # make a file with k lines per rev
183 initialmergedlines = ['%d' % i
183 initialmergedlines = ['%d' % i
184 for i in pycompat.xrange(0, total * linesperrev)]
184 for i in pycompat.xrange(0, total * linesperrev)]
185 initialmergedlines.append("")
185 initialmergedlines.append("")
186
186
187 tags = []
187 tags = []
188 progress = ui.makeprogress(_('building'), unit=_('revisions'),
188 progress = ui.makeprogress(_('building'), unit=_('revisions'),
189 total=total)
189 total=total)
190 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
190 with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"):
191 at = -1
191 at = -1
192 atbranch = 'default'
192 atbranch = 'default'
193 nodeids = []
193 nodeids = []
194 id = 0
194 id = 0
195 progress.update(id)
195 progress.update(id)
196 for type, data in dagparser.parsedag(text):
196 for type, data in dagparser.parsedag(text):
197 if type == 'n':
197 if type == 'n':
198 ui.note(('node %s\n' % pycompat.bytestr(data)))
198 ui.note(('node %s\n' % pycompat.bytestr(data)))
199 id, ps = data
199 id, ps = data
200
200
201 files = []
201 files = []
202 filecontent = {}
202 filecontent = {}
203
203
204 p2 = None
204 p2 = None
205 if mergeable_file:
205 if mergeable_file:
206 fn = "mf"
206 fn = "mf"
207 p1 = repo[ps[0]]
207 p1 = repo[ps[0]]
208 if len(ps) > 1:
208 if len(ps) > 1:
209 p2 = repo[ps[1]]
209 p2 = repo[ps[1]]
210 pa = p1.ancestor(p2)
210 pa = p1.ancestor(p2)
211 base, local, other = [x[fn].data() for x in (pa, p1,
211 base, local, other = [x[fn].data() for x in (pa, p1,
212 p2)]
212 p2)]
213 m3 = simplemerge.Merge3Text(base, local, other)
213 m3 = simplemerge.Merge3Text(base, local, other)
214 ml = [l.strip() for l in m3.merge_lines()]
214 ml = [l.strip() for l in m3.merge_lines()]
215 ml.append("")
215 ml.append("")
216 elif at > 0:
216 elif at > 0:
217 ml = p1[fn].data().split("\n")
217 ml = p1[fn].data().split("\n")
218 else:
218 else:
219 ml = initialmergedlines
219 ml = initialmergedlines
220 ml[id * linesperrev] += " r%i" % id
220 ml[id * linesperrev] += " r%i" % id
221 mergedtext = "\n".join(ml)
221 mergedtext = "\n".join(ml)
222 files.append(fn)
222 files.append(fn)
223 filecontent[fn] = mergedtext
223 filecontent[fn] = mergedtext
224
224
225 if overwritten_file:
225 if overwritten_file:
226 fn = "of"
226 fn = "of"
227 files.append(fn)
227 files.append(fn)
228 filecontent[fn] = "r%i\n" % id
228 filecontent[fn] = "r%i\n" % id
229
229
230 if new_file:
230 if new_file:
231 fn = "nf%i" % id
231 fn = "nf%i" % id
232 files.append(fn)
232 files.append(fn)
233 filecontent[fn] = "r%i\n" % id
233 filecontent[fn] = "r%i\n" % id
234 if len(ps) > 1:
234 if len(ps) > 1:
235 if not p2:
235 if not p2:
236 p2 = repo[ps[1]]
236 p2 = repo[ps[1]]
237 for fn in p2:
237 for fn in p2:
238 if fn.startswith("nf"):
238 if fn.startswith("nf"):
239 files.append(fn)
239 files.append(fn)
240 filecontent[fn] = p2[fn].data()
240 filecontent[fn] = p2[fn].data()
241
241
242 def fctxfn(repo, cx, path):
242 def fctxfn(repo, cx, path):
243 if path in filecontent:
243 if path in filecontent:
244 return context.memfilectx(repo, cx, path,
244 return context.memfilectx(repo, cx, path,
245 filecontent[path])
245 filecontent[path])
246 return None
246 return None
247
247
248 if len(ps) == 0 or ps[0] < 0:
248 if len(ps) == 0 or ps[0] < 0:
249 pars = [None, None]
249 pars = [None, None]
250 elif len(ps) == 1:
250 elif len(ps) == 1:
251 pars = [nodeids[ps[0]], None]
251 pars = [nodeids[ps[0]], None]
252 else:
252 else:
253 pars = [nodeids[p] for p in ps]
253 pars = [nodeids[p] for p in ps]
254 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
254 cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn,
255 date=(id, 0),
255 date=(id, 0),
256 user="debugbuilddag",
256 user="debugbuilddag",
257 extra={'branch': atbranch})
257 extra={'branch': atbranch})
258 nodeid = repo.commitctx(cx)
258 nodeid = repo.commitctx(cx)
259 nodeids.append(nodeid)
259 nodeids.append(nodeid)
260 at = id
260 at = id
261 elif type == 'l':
261 elif type == 'l':
262 id, name = data
262 id, name = data
263 ui.note(('tag %s\n' % name))
263 ui.note(('tag %s\n' % name))
264 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
264 tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name))
265 elif type == 'a':
265 elif type == 'a':
266 ui.note(('branch %s\n' % data))
266 ui.note(('branch %s\n' % data))
267 atbranch = data
267 atbranch = data
268 progress.update(id)
268 progress.update(id)
269
269
270 if tags:
270 if tags:
271 repo.vfs.write("localtags", "".join(tags))
271 repo.vfs.write("localtags", "".join(tags))
272
272
273 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
273 def _debugchangegroup(ui, gen, all=None, indent=0, **opts):
274 indent_string = ' ' * indent
274 indent_string = ' ' * indent
275 if all:
275 if all:
276 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
276 ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n")
277 % indent_string)
277 % indent_string)
278
278
279 def showchunks(named):
279 def showchunks(named):
280 ui.write("\n%s%s\n" % (indent_string, named))
280 ui.write("\n%s%s\n" % (indent_string, named))
281 for deltadata in gen.deltaiter():
281 for deltadata in gen.deltaiter():
282 node, p1, p2, cs, deltabase, delta, flags = deltadata
282 node, p1, p2, cs, deltabase, delta, flags = deltadata
283 ui.write("%s%s %s %s %s %s %d\n" %
283 ui.write("%s%s %s %s %s %s %d\n" %
284 (indent_string, hex(node), hex(p1), hex(p2),
284 (indent_string, hex(node), hex(p1), hex(p2),
285 hex(cs), hex(deltabase), len(delta)))
285 hex(cs), hex(deltabase), len(delta)))
286
286
287 chunkdata = gen.changelogheader()
287 chunkdata = gen.changelogheader()
288 showchunks("changelog")
288 showchunks("changelog")
289 chunkdata = gen.manifestheader()
289 chunkdata = gen.manifestheader()
290 showchunks("manifest")
290 showchunks("manifest")
291 for chunkdata in iter(gen.filelogheader, {}):
291 for chunkdata in iter(gen.filelogheader, {}):
292 fname = chunkdata['filename']
292 fname = chunkdata['filename']
293 showchunks(fname)
293 showchunks(fname)
294 else:
294 else:
295 if isinstance(gen, bundle2.unbundle20):
295 if isinstance(gen, bundle2.unbundle20):
296 raise error.Abort(_('use debugbundle2 for this file'))
296 raise error.Abort(_('use debugbundle2 for this file'))
297 chunkdata = gen.changelogheader()
297 chunkdata = gen.changelogheader()
298 for deltadata in gen.deltaiter():
298 for deltadata in gen.deltaiter():
299 node, p1, p2, cs, deltabase, delta, flags = deltadata
299 node, p1, p2, cs, deltabase, delta, flags = deltadata
300 ui.write("%s%s\n" % (indent_string, hex(node)))
300 ui.write("%s%s\n" % (indent_string, hex(node)))
301
301
302 def _debugobsmarkers(ui, part, indent=0, **opts):
302 def _debugobsmarkers(ui, part, indent=0, **opts):
303 """display version and markers contained in 'data'"""
303 """display version and markers contained in 'data'"""
304 opts = pycompat.byteskwargs(opts)
304 opts = pycompat.byteskwargs(opts)
305 data = part.read()
305 data = part.read()
306 indent_string = ' ' * indent
306 indent_string = ' ' * indent
307 try:
307 try:
308 version, markers = obsolete._readmarkers(data)
308 version, markers = obsolete._readmarkers(data)
309 except error.UnknownVersion as exc:
309 except error.UnknownVersion as exc:
310 msg = "%sunsupported version: %s (%d bytes)\n"
310 msg = "%sunsupported version: %s (%d bytes)\n"
311 msg %= indent_string, exc.version, len(data)
311 msg %= indent_string, exc.version, len(data)
312 ui.write(msg)
312 ui.write(msg)
313 else:
313 else:
314 msg = "%sversion: %d (%d bytes)\n"
314 msg = "%sversion: %d (%d bytes)\n"
315 msg %= indent_string, version, len(data)
315 msg %= indent_string, version, len(data)
316 ui.write(msg)
316 ui.write(msg)
317 fm = ui.formatter('debugobsolete', opts)
317 fm = ui.formatter('debugobsolete', opts)
318 for rawmarker in sorted(markers):
318 for rawmarker in sorted(markers):
319 m = obsutil.marker(None, rawmarker)
319 m = obsutil.marker(None, rawmarker)
320 fm.startitem()
320 fm.startitem()
321 fm.plain(indent_string)
321 fm.plain(indent_string)
322 cmdutil.showmarker(fm, m)
322 cmdutil.showmarker(fm, m)
323 fm.end()
323 fm.end()
324
324
325 def _debugphaseheads(ui, data, indent=0):
325 def _debugphaseheads(ui, data, indent=0):
326 """display version and markers contained in 'data'"""
326 """display version and markers contained in 'data'"""
327 indent_string = ' ' * indent
327 indent_string = ' ' * indent
328 headsbyphase = phases.binarydecode(data)
328 headsbyphase = phases.binarydecode(data)
329 for phase in phases.allphases:
329 for phase in phases.allphases:
330 for head in headsbyphase[phase]:
330 for head in headsbyphase[phase]:
331 ui.write(indent_string)
331 ui.write(indent_string)
332 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
332 ui.write('%s %s\n' % (hex(head), phases.phasenames[phase]))
333
333
334 def _quasirepr(thing):
334 def _quasirepr(thing):
335 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
335 if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)):
336 return '{%s}' % (
336 return '{%s}' % (
337 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
337 b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing)))
338 return pycompat.bytestr(repr(thing))
338 return pycompat.bytestr(repr(thing))
339
339
340 def _debugbundle2(ui, gen, all=None, **opts):
340 def _debugbundle2(ui, gen, all=None, **opts):
341 """lists the contents of a bundle2"""
341 """lists the contents of a bundle2"""
342 if not isinstance(gen, bundle2.unbundle20):
342 if not isinstance(gen, bundle2.unbundle20):
343 raise error.Abort(_('not a bundle2 file'))
343 raise error.Abort(_('not a bundle2 file'))
344 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
344 ui.write(('Stream params: %s\n' % _quasirepr(gen.params)))
345 parttypes = opts.get(r'part_type', [])
345 parttypes = opts.get(r'part_type', [])
346 for part in gen.iterparts():
346 for part in gen.iterparts():
347 if parttypes and part.type not in parttypes:
347 if parttypes and part.type not in parttypes:
348 continue
348 continue
349 msg = '%s -- %s (mandatory: %r)\n'
349 msg = '%s -- %s (mandatory: %r)\n'
350 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
350 ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory)))
351 if part.type == 'changegroup':
351 if part.type == 'changegroup':
352 version = part.params.get('version', '01')
352 version = part.params.get('version', '01')
353 cg = changegroup.getunbundler(version, part, 'UN')
353 cg = changegroup.getunbundler(version, part, 'UN')
354 if not ui.quiet:
354 if not ui.quiet:
355 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
355 _debugchangegroup(ui, cg, all=all, indent=4, **opts)
356 if part.type == 'obsmarkers':
356 if part.type == 'obsmarkers':
357 if not ui.quiet:
357 if not ui.quiet:
358 _debugobsmarkers(ui, part, indent=4, **opts)
358 _debugobsmarkers(ui, part, indent=4, **opts)
359 if part.type == 'phase-heads':
359 if part.type == 'phase-heads':
360 if not ui.quiet:
360 if not ui.quiet:
361 _debugphaseheads(ui, part, indent=4)
361 _debugphaseheads(ui, part, indent=4)
362
362
363 @command('debugbundle',
363 @command('debugbundle',
364 [('a', 'all', None, _('show all details')),
364 [('a', 'all', None, _('show all details')),
365 ('', 'part-type', [], _('show only the named part type')),
365 ('', 'part-type', [], _('show only the named part type')),
366 ('', 'spec', None, _('print the bundlespec of the bundle'))],
366 ('', 'spec', None, _('print the bundlespec of the bundle'))],
367 _('FILE'),
367 _('FILE'),
368 norepo=True)
368 norepo=True)
369 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
369 def debugbundle(ui, bundlepath, all=None, spec=None, **opts):
370 """lists the contents of a bundle"""
370 """lists the contents of a bundle"""
371 with hg.openpath(ui, bundlepath) as f:
371 with hg.openpath(ui, bundlepath) as f:
372 if spec:
372 if spec:
373 spec = exchange.getbundlespec(ui, f)
373 spec = exchange.getbundlespec(ui, f)
374 ui.write('%s\n' % spec)
374 ui.write('%s\n' % spec)
375 return
375 return
376
376
377 gen = exchange.readbundle(ui, f, bundlepath)
377 gen = exchange.readbundle(ui, f, bundlepath)
378 if isinstance(gen, bundle2.unbundle20):
378 if isinstance(gen, bundle2.unbundle20):
379 return _debugbundle2(ui, gen, all=all, **opts)
379 return _debugbundle2(ui, gen, all=all, **opts)
380 _debugchangegroup(ui, gen, all=all, **opts)
380 _debugchangegroup(ui, gen, all=all, **opts)
381
381
382 @command('debugcapabilities',
382 @command('debugcapabilities',
383 [], _('PATH'),
383 [], _('PATH'),
384 norepo=True)
384 norepo=True)
385 def debugcapabilities(ui, path, **opts):
385 def debugcapabilities(ui, path, **opts):
386 """lists the capabilities of a remote peer"""
386 """lists the capabilities of a remote peer"""
387 opts = pycompat.byteskwargs(opts)
387 opts = pycompat.byteskwargs(opts)
388 peer = hg.peer(ui, opts, path)
388 peer = hg.peer(ui, opts, path)
389 caps = peer.capabilities()
389 caps = peer.capabilities()
390 ui.write(('Main capabilities:\n'))
390 ui.write(('Main capabilities:\n'))
391 for c in sorted(caps):
391 for c in sorted(caps):
392 ui.write((' %s\n') % c)
392 ui.write((' %s\n') % c)
393 b2caps = bundle2.bundle2caps(peer)
393 b2caps = bundle2.bundle2caps(peer)
394 if b2caps:
394 if b2caps:
395 ui.write(('Bundle2 capabilities:\n'))
395 ui.write(('Bundle2 capabilities:\n'))
396 for key, values in sorted(b2caps.iteritems()):
396 for key, values in sorted(b2caps.iteritems()):
397 ui.write((' %s\n') % key)
397 ui.write((' %s\n') % key)
398 for v in values:
398 for v in values:
399 ui.write((' %s\n') % v)
399 ui.write((' %s\n') % v)
400
400
401 @command('debugcheckstate', [], '')
401 @command('debugcheckstate', [], '')
402 def debugcheckstate(ui, repo):
402 def debugcheckstate(ui, repo):
403 """validate the correctness of the current dirstate"""
403 """validate the correctness of the current dirstate"""
404 parent1, parent2 = repo.dirstate.parents()
404 parent1, parent2 = repo.dirstate.parents()
405 m1 = repo[parent1].manifest()
405 m1 = repo[parent1].manifest()
406 m2 = repo[parent2].manifest()
406 m2 = repo[parent2].manifest()
407 errors = 0
407 errors = 0
408 for f in repo.dirstate:
408 for f in repo.dirstate:
409 state = repo.dirstate[f]
409 state = repo.dirstate[f]
410 if state in "nr" and f not in m1:
410 if state in "nr" and f not in m1:
411 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
411 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
412 errors += 1
412 errors += 1
413 if state in "a" and f in m1:
413 if state in "a" and f in m1:
414 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
414 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
415 errors += 1
415 errors += 1
416 if state in "m" and f not in m1 and f not in m2:
416 if state in "m" and f not in m1 and f not in m2:
417 ui.warn(_("%s in state %s, but not in either manifest\n") %
417 ui.warn(_("%s in state %s, but not in either manifest\n") %
418 (f, state))
418 (f, state))
419 errors += 1
419 errors += 1
420 for f in m1:
420 for f in m1:
421 state = repo.dirstate[f]
421 state = repo.dirstate[f]
422 if state not in "nrm":
422 if state not in "nrm":
423 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
423 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
424 errors += 1
424 errors += 1
425 if errors:
425 if errors:
426 error = _(".hg/dirstate inconsistent with current parent's manifest")
426 error = _(".hg/dirstate inconsistent with current parent's manifest")
427 raise error.Abort(error)
427 raise error.Abort(error)
428
428
429 @command('debugcolor',
429 @command('debugcolor',
430 [('', 'style', None, _('show all configured styles'))],
430 [('', 'style', None, _('show all configured styles'))],
431 'hg debugcolor')
431 'hg debugcolor')
432 def debugcolor(ui, repo, **opts):
432 def debugcolor(ui, repo, **opts):
433 """show available color, effects or style"""
433 """show available color, effects or style"""
434 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
434 ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode))
435 if opts.get(r'style'):
435 if opts.get(r'style'):
436 return _debugdisplaystyle(ui)
436 return _debugdisplaystyle(ui)
437 else:
437 else:
438 return _debugdisplaycolor(ui)
438 return _debugdisplaycolor(ui)
439
439
440 def _debugdisplaycolor(ui):
440 def _debugdisplaycolor(ui):
441 ui = ui.copy()
441 ui = ui.copy()
442 ui._styles.clear()
442 ui._styles.clear()
443 for effect in color._activeeffects(ui).keys():
443 for effect in color._activeeffects(ui).keys():
444 ui._styles[effect] = effect
444 ui._styles[effect] = effect
445 if ui._terminfoparams:
445 if ui._terminfoparams:
446 for k, v in ui.configitems('color'):
446 for k, v in ui.configitems('color'):
447 if k.startswith('color.'):
447 if k.startswith('color.'):
448 ui._styles[k] = k[6:]
448 ui._styles[k] = k[6:]
449 elif k.startswith('terminfo.'):
449 elif k.startswith('terminfo.'):
450 ui._styles[k] = k[9:]
450 ui._styles[k] = k[9:]
451 ui.write(_('available colors:\n'))
451 ui.write(_('available colors:\n'))
452 # sort label with a '_' after the other to group '_background' entry.
452 # sort label with a '_' after the other to group '_background' entry.
453 items = sorted(ui._styles.items(),
453 items = sorted(ui._styles.items(),
454 key=lambda i: ('_' in i[0], i[0], i[1]))
454 key=lambda i: ('_' in i[0], i[0], i[1]))
455 for colorname, label in items:
455 for colorname, label in items:
456 ui.write(('%s\n') % colorname, label=label)
456 ui.write(('%s\n') % colorname, label=label)
457
457
458 def _debugdisplaystyle(ui):
458 def _debugdisplaystyle(ui):
459 ui.write(_('available style:\n'))
459 ui.write(_('available style:\n'))
460 if not ui._styles:
460 if not ui._styles:
461 return
461 return
462 width = max(len(s) for s in ui._styles)
462 width = max(len(s) for s in ui._styles)
463 for label, effects in sorted(ui._styles.items()):
463 for label, effects in sorted(ui._styles.items()):
464 ui.write('%s' % label, label=label)
464 ui.write('%s' % label, label=label)
465 if effects:
465 if effects:
466 # 50
466 # 50
467 ui.write(': ')
467 ui.write(': ')
468 ui.write(' ' * (max(0, width - len(label))))
468 ui.write(' ' * (max(0, width - len(label))))
469 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
469 ui.write(', '.join(ui.label(e, e) for e in effects.split()))
470 ui.write('\n')
470 ui.write('\n')
471
471
472 @command('debugcreatestreamclonebundle', [], 'FILE')
472 @command('debugcreatestreamclonebundle', [], 'FILE')
473 def debugcreatestreamclonebundle(ui, repo, fname):
473 def debugcreatestreamclonebundle(ui, repo, fname):
474 """create a stream clone bundle file
474 """create a stream clone bundle file
475
475
476 Stream bundles are special bundles that are essentially archives of
476 Stream bundles are special bundles that are essentially archives of
477 revlog files. They are commonly used for cloning very quickly.
477 revlog files. They are commonly used for cloning very quickly.
478 """
478 """
479 # TODO we may want to turn this into an abort when this functionality
479 # TODO we may want to turn this into an abort when this functionality
480 # is moved into `hg bundle`.
480 # is moved into `hg bundle`.
481 if phases.hassecret(repo):
481 if phases.hassecret(repo):
482 ui.warn(_('(warning: stream clone bundle will contain secret '
482 ui.warn(_('(warning: stream clone bundle will contain secret '
483 'revisions)\n'))
483 'revisions)\n'))
484
484
485 requirements, gen = streamclone.generatebundlev1(repo)
485 requirements, gen = streamclone.generatebundlev1(repo)
486 changegroup.writechunks(ui, gen, fname)
486 changegroup.writechunks(ui, gen, fname)
487
487
488 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
488 ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements)))
489
489
490 @command('debugdag',
490 @command('debugdag',
491 [('t', 'tags', None, _('use tags as labels')),
491 [('t', 'tags', None, _('use tags as labels')),
492 ('b', 'branches', None, _('annotate with branch names')),
492 ('b', 'branches', None, _('annotate with branch names')),
493 ('', 'dots', None, _('use dots for runs')),
493 ('', 'dots', None, _('use dots for runs')),
494 ('s', 'spaces', None, _('separate elements by spaces'))],
494 ('s', 'spaces', None, _('separate elements by spaces'))],
495 _('[OPTION]... [FILE [REV]...]'),
495 _('[OPTION]... [FILE [REV]...]'),
496 optionalrepo=True)
496 optionalrepo=True)
497 def debugdag(ui, repo, file_=None, *revs, **opts):
497 def debugdag(ui, repo, file_=None, *revs, **opts):
498 """format the changelog or an index DAG as a concise textual description
498 """format the changelog or an index DAG as a concise textual description
499
499
500 If you pass a revlog index, the revlog's DAG is emitted. If you list
500 If you pass a revlog index, the revlog's DAG is emitted. If you list
501 revision numbers, they get labeled in the output as rN.
501 revision numbers, they get labeled in the output as rN.
502
502
503 Otherwise, the changelog DAG of the current repo is emitted.
503 Otherwise, the changelog DAG of the current repo is emitted.
504 """
504 """
505 spaces = opts.get(r'spaces')
505 spaces = opts.get(r'spaces')
506 dots = opts.get(r'dots')
506 dots = opts.get(r'dots')
507 if file_:
507 if file_:
508 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
508 rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False),
509 file_)
509 file_)
510 revs = set((int(r) for r in revs))
510 revs = set((int(r) for r in revs))
511 def events():
511 def events():
512 for r in rlog:
512 for r in rlog:
513 yield 'n', (r, list(p for p in rlog.parentrevs(r)
513 yield 'n', (r, list(p for p in rlog.parentrevs(r)
514 if p != -1))
514 if p != -1))
515 if r in revs:
515 if r in revs:
516 yield 'l', (r, "r%i" % r)
516 yield 'l', (r, "r%i" % r)
517 elif repo:
517 elif repo:
518 cl = repo.changelog
518 cl = repo.changelog
519 tags = opts.get(r'tags')
519 tags = opts.get(r'tags')
520 branches = opts.get(r'branches')
520 branches = opts.get(r'branches')
521 if tags:
521 if tags:
522 labels = {}
522 labels = {}
523 for l, n in repo.tags().items():
523 for l, n in repo.tags().items():
524 labels.setdefault(cl.rev(n), []).append(l)
524 labels.setdefault(cl.rev(n), []).append(l)
525 def events():
525 def events():
526 b = "default"
526 b = "default"
527 for r in cl:
527 for r in cl:
528 if branches:
528 if branches:
529 newb = cl.read(cl.node(r))[5]['branch']
529 newb = cl.read(cl.node(r))[5]['branch']
530 if newb != b:
530 if newb != b:
531 yield 'a', newb
531 yield 'a', newb
532 b = newb
532 b = newb
533 yield 'n', (r, list(p for p in cl.parentrevs(r)
533 yield 'n', (r, list(p for p in cl.parentrevs(r)
534 if p != -1))
534 if p != -1))
535 if tags:
535 if tags:
536 ls = labels.get(r)
536 ls = labels.get(r)
537 if ls:
537 if ls:
538 for l in ls:
538 for l in ls:
539 yield 'l', (r, l)
539 yield 'l', (r, l)
540 else:
540 else:
541 raise error.Abort(_('need repo for changelog dag'))
541 raise error.Abort(_('need repo for changelog dag'))
542
542
543 for line in dagparser.dagtextlines(events(),
543 for line in dagparser.dagtextlines(events(),
544 addspaces=spaces,
544 addspaces=spaces,
545 wraplabels=True,
545 wraplabels=True,
546 wrapannotations=True,
546 wrapannotations=True,
547 wrapnonlinear=dots,
547 wrapnonlinear=dots,
548 usedots=dots,
548 usedots=dots,
549 maxlinewidth=70):
549 maxlinewidth=70):
550 ui.write(line)
550 ui.write(line)
551 ui.write("\n")
551 ui.write("\n")
552
552
553 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
553 @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV'))
554 def debugdata(ui, repo, file_, rev=None, **opts):
554 def debugdata(ui, repo, file_, rev=None, **opts):
555 """dump the contents of a data file revision"""
555 """dump the contents of a data file revision"""
556 opts = pycompat.byteskwargs(opts)
556 opts = pycompat.byteskwargs(opts)
557 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
557 if opts.get('changelog') or opts.get('manifest') or opts.get('dir'):
558 if rev is not None:
558 if rev is not None:
559 raise error.CommandError('debugdata', _('invalid arguments'))
559 raise error.CommandError('debugdata', _('invalid arguments'))
560 file_, rev = None, file_
560 file_, rev = None, file_
561 elif rev is None:
561 elif rev is None:
562 raise error.CommandError('debugdata', _('invalid arguments'))
562 raise error.CommandError('debugdata', _('invalid arguments'))
563 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
563 r = cmdutil.openstorage(repo, 'debugdata', file_, opts)
564 try:
564 try:
565 ui.write(r.rawdata(r.lookup(rev)))
565 ui.write(r.rawdata(r.lookup(rev)))
566 except KeyError:
566 except KeyError:
567 raise error.Abort(_('invalid revision identifier %s') % rev)
567 raise error.Abort(_('invalid revision identifier %s') % rev)
568
568
569 @command('debugdate',
569 @command('debugdate',
570 [('e', 'extended', None, _('try extended date formats'))],
570 [('e', 'extended', None, _('try extended date formats'))],
571 _('[-e] DATE [RANGE]'),
571 _('[-e] DATE [RANGE]'),
572 norepo=True, optionalrepo=True)
572 norepo=True, optionalrepo=True)
573 def debugdate(ui, date, range=None, **opts):
573 def debugdate(ui, date, range=None, **opts):
574 """parse and display a date"""
574 """parse and display a date"""
575 if opts[r"extended"]:
575 if opts[r"extended"]:
576 d = dateutil.parsedate(date, util.extendeddateformats)
576 d = dateutil.parsedate(date, util.extendeddateformats)
577 else:
577 else:
578 d = dateutil.parsedate(date)
578 d = dateutil.parsedate(date)
579 ui.write(("internal: %d %d\n") % d)
579 ui.write(("internal: %d %d\n") % d)
580 ui.write(("standard: %s\n") % dateutil.datestr(d))
580 ui.write(("standard: %s\n") % dateutil.datestr(d))
581 if range:
581 if range:
582 m = dateutil.matchdate(range)
582 m = dateutil.matchdate(range)
583 ui.write(("match: %s\n") % m(d[0]))
583 ui.write(("match: %s\n") % m(d[0]))
584
584
585 @command('debugdeltachain',
585 @command('debugdeltachain',
586 cmdutil.debugrevlogopts + cmdutil.formatteropts,
586 cmdutil.debugrevlogopts + cmdutil.formatteropts,
587 _('-c|-m|FILE'),
587 _('-c|-m|FILE'),
588 optionalrepo=True)
588 optionalrepo=True)
589 def debugdeltachain(ui, repo, file_=None, **opts):
589 def debugdeltachain(ui, repo, file_=None, **opts):
590 """dump information about delta chains in a revlog
590 """dump information about delta chains in a revlog
591
591
592 Output can be templatized. Available template keywords are:
592 Output can be templatized. Available template keywords are:
593
593
594 :``rev``: revision number
594 :``rev``: revision number
595 :``chainid``: delta chain identifier (numbered by unique base)
595 :``chainid``: delta chain identifier (numbered by unique base)
596 :``chainlen``: delta chain length to this revision
596 :``chainlen``: delta chain length to this revision
597 :``prevrev``: previous revision in delta chain
597 :``prevrev``: previous revision in delta chain
598 :``deltatype``: role of delta / how it was computed
598 :``deltatype``: role of delta / how it was computed
599 :``compsize``: compressed size of revision
599 :``compsize``: compressed size of revision
600 :``uncompsize``: uncompressed size of revision
600 :``uncompsize``: uncompressed size of revision
601 :``chainsize``: total size of compressed revisions in chain
601 :``chainsize``: total size of compressed revisions in chain
602 :``chainratio``: total chain size divided by uncompressed revision size
602 :``chainratio``: total chain size divided by uncompressed revision size
603 (new delta chains typically start at ratio 2.00)
603 (new delta chains typically start at ratio 2.00)
604 :``lindist``: linear distance from base revision in delta chain to end
604 :``lindist``: linear distance from base revision in delta chain to end
605 of this revision
605 of this revision
606 :``extradist``: total size of revisions not part of this delta chain from
606 :``extradist``: total size of revisions not part of this delta chain from
607 base of delta chain to end of this revision; a measurement
607 base of delta chain to end of this revision; a measurement
608 of how much extra data we need to read/seek across to read
608 of how much extra data we need to read/seek across to read
609 the delta chain for this revision
609 the delta chain for this revision
610 :``extraratio``: extradist divided by chainsize; another representation of
610 :``extraratio``: extradist divided by chainsize; another representation of
611 how much unrelated data is needed to load this delta chain
611 how much unrelated data is needed to load this delta chain
612
612
613 If the repository is configured to use the sparse read, additional keywords
613 If the repository is configured to use the sparse read, additional keywords
614 are available:
614 are available:
615
615
616 :``readsize``: total size of data read from the disk for a revision
616 :``readsize``: total size of data read from the disk for a revision
617 (sum of the sizes of all the blocks)
617 (sum of the sizes of all the blocks)
618 :``largestblock``: size of the largest block of data read from the disk
618 :``largestblock``: size of the largest block of data read from the disk
619 :``readdensity``: density of useful bytes in the data read from the disk
619 :``readdensity``: density of useful bytes in the data read from the disk
620 :``srchunks``: in how many data hunks the whole revision would be read
620 :``srchunks``: in how many data hunks the whole revision would be read
621
621
622 The sparse read can be enabled with experimental.sparse-read = True
622 The sparse read can be enabled with experimental.sparse-read = True
623 """
623 """
624 opts = pycompat.byteskwargs(opts)
624 opts = pycompat.byteskwargs(opts)
625 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
625 r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts)
626 index = r.index
626 index = r.index
627 start = r.start
627 start = r.start
628 length = r.length
628 length = r.length
629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
629 generaldelta = r.version & revlog.FLAG_GENERALDELTA
630 withsparseread = getattr(r, '_withsparseread', False)
630 withsparseread = getattr(r, '_withsparseread', False)
631
631
632 def revinfo(rev):
632 def revinfo(rev):
633 e = index[rev]
633 e = index[rev]
634 compsize = e[1]
634 compsize = e[1]
635 uncompsize = e[2]
635 uncompsize = e[2]
636 chainsize = 0
636 chainsize = 0
637
637
638 if generaldelta:
638 if generaldelta:
639 if e[3] == e[5]:
639 if e[3] == e[5]:
640 deltatype = 'p1'
640 deltatype = 'p1'
641 elif e[3] == e[6]:
641 elif e[3] == e[6]:
642 deltatype = 'p2'
642 deltatype = 'p2'
643 elif e[3] == rev - 1:
643 elif e[3] == rev - 1:
644 deltatype = 'prev'
644 deltatype = 'prev'
645 elif e[3] == rev:
645 elif e[3] == rev:
646 deltatype = 'base'
646 deltatype = 'base'
647 else:
647 else:
648 deltatype = 'other'
648 deltatype = 'other'
649 else:
649 else:
650 if e[3] == rev:
650 if e[3] == rev:
651 deltatype = 'base'
651 deltatype = 'base'
652 else:
652 else:
653 deltatype = 'prev'
653 deltatype = 'prev'
654
654
655 chain = r._deltachain(rev)[0]
655 chain = r._deltachain(rev)[0]
656 for iterrev in chain:
656 for iterrev in chain:
657 e = index[iterrev]
657 e = index[iterrev]
658 chainsize += e[1]
658 chainsize += e[1]
659
659
660 return compsize, uncompsize, deltatype, chain, chainsize
660 return compsize, uncompsize, deltatype, chain, chainsize
661
661
662 fm = ui.formatter('debugdeltachain', opts)
662 fm = ui.formatter('debugdeltachain', opts)
663
663
664 fm.plain(' rev chain# chainlen prev delta '
664 fm.plain(' rev chain# chainlen prev delta '
665 'size rawsize chainsize ratio lindist extradist '
665 'size rawsize chainsize ratio lindist extradist '
666 'extraratio')
666 'extraratio')
667 if withsparseread:
667 if withsparseread:
668 fm.plain(' readsize largestblk rddensity srchunks')
668 fm.plain(' readsize largestblk rddensity srchunks')
669 fm.plain('\n')
669 fm.plain('\n')
670
670
671 chainbases = {}
671 chainbases = {}
672 for rev in r:
672 for rev in r:
673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
673 comp, uncomp, deltatype, chain, chainsize = revinfo(rev)
674 chainbase = chain[0]
674 chainbase = chain[0]
675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
675 chainid = chainbases.setdefault(chainbase, len(chainbases) + 1)
676 basestart = start(chainbase)
676 basestart = start(chainbase)
677 revstart = start(rev)
677 revstart = start(rev)
678 lineardist = revstart + comp - basestart
678 lineardist = revstart + comp - basestart
679 extradist = lineardist - chainsize
679 extradist = lineardist - chainsize
680 try:
680 try:
681 prevrev = chain[-2]
681 prevrev = chain[-2]
682 except IndexError:
682 except IndexError:
683 prevrev = -1
683 prevrev = -1
684
684
685 if uncomp != 0:
685 if uncomp != 0:
686 chainratio = float(chainsize) / float(uncomp)
686 chainratio = float(chainsize) / float(uncomp)
687 else:
687 else:
688 chainratio = chainsize
688 chainratio = chainsize
689
689
690 if chainsize != 0:
690 if chainsize != 0:
691 extraratio = float(extradist) / float(chainsize)
691 extraratio = float(extradist) / float(chainsize)
692 else:
692 else:
693 extraratio = extradist
693 extraratio = extradist
694
694
695 fm.startitem()
695 fm.startitem()
696 fm.write('rev chainid chainlen prevrev deltatype compsize '
696 fm.write('rev chainid chainlen prevrev deltatype compsize '
697 'uncompsize chainsize chainratio lindist extradist '
697 'uncompsize chainsize chainratio lindist extradist '
698 'extraratio',
698 'extraratio',
699 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
699 '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f',
700 rev, chainid, len(chain), prevrev, deltatype, comp,
700 rev, chainid, len(chain), prevrev, deltatype, comp,
701 uncomp, chainsize, chainratio, lineardist, extradist,
701 uncomp, chainsize, chainratio, lineardist, extradist,
702 extraratio,
702 extraratio,
703 rev=rev, chainid=chainid, chainlen=len(chain),
703 rev=rev, chainid=chainid, chainlen=len(chain),
704 prevrev=prevrev, deltatype=deltatype, compsize=comp,
704 prevrev=prevrev, deltatype=deltatype, compsize=comp,
705 uncompsize=uncomp, chainsize=chainsize,
705 uncompsize=uncomp, chainsize=chainsize,
706 chainratio=chainratio, lindist=lineardist,
706 chainratio=chainratio, lindist=lineardist,
707 extradist=extradist, extraratio=extraratio)
707 extradist=extradist, extraratio=extraratio)
708 if withsparseread:
708 if withsparseread:
709 readsize = 0
709 readsize = 0
710 largestblock = 0
710 largestblock = 0
711 srchunks = 0
711 srchunks = 0
712
712
713 for revschunk in deltautil.slicechunk(r, chain):
713 for revschunk in deltautil.slicechunk(r, chain):
714 srchunks += 1
714 srchunks += 1
715 blkend = start(revschunk[-1]) + length(revschunk[-1])
715 blkend = start(revschunk[-1]) + length(revschunk[-1])
716 blksize = blkend - start(revschunk[0])
716 blksize = blkend - start(revschunk[0])
717
717
718 readsize += blksize
718 readsize += blksize
719 if largestblock < blksize:
719 if largestblock < blksize:
720 largestblock = blksize
720 largestblock = blksize
721
721
722 if readsize:
722 if readsize:
723 readdensity = float(chainsize) / float(readsize)
723 readdensity = float(chainsize) / float(readsize)
724 else:
724 else:
725 readdensity = 1
725 readdensity = 1
726
726
727 fm.write('readsize largestblock readdensity srchunks',
727 fm.write('readsize largestblock readdensity srchunks',
728 ' %10d %10d %9.5f %8d',
728 ' %10d %10d %9.5f %8d',
729 readsize, largestblock, readdensity, srchunks,
729 readsize, largestblock, readdensity, srchunks,
730 readsize=readsize, largestblock=largestblock,
730 readsize=readsize, largestblock=largestblock,
731 readdensity=readdensity, srchunks=srchunks)
731 readdensity=readdensity, srchunks=srchunks)
732
732
733 fm.plain('\n')
733 fm.plain('\n')
734
734
735 fm.end()
735 fm.end()
736
736
737 @command('debugdirstate|debugstate',
737 @command('debugdirstate|debugstate',
738 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
738 [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')),
739 ('', 'dates', True, _('display the saved mtime')),
739 ('', 'dates', True, _('display the saved mtime')),
740 ('', 'datesort', None, _('sort by saved mtime'))],
740 ('', 'datesort', None, _('sort by saved mtime'))],
741 _('[OPTION]...'))
741 _('[OPTION]...'))
742 def debugstate(ui, repo, **opts):
742 def debugstate(ui, repo, **opts):
743 """show the contents of the current dirstate"""
743 """show the contents of the current dirstate"""
744
744
745 nodates = not opts[r'dates']
745 nodates = not opts[r'dates']
746 if opts.get(r'nodates') is not None:
746 if opts.get(r'nodates') is not None:
747 nodates = True
747 nodates = True
748 datesort = opts.get(r'datesort')
748 datesort = opts.get(r'datesort')
749
749
750 if datesort:
750 if datesort:
751 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
751 keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename
752 else:
752 else:
753 keyfunc = None # sort by filename
753 keyfunc = None # sort by filename
754 for file_, ent in sorted(repo.dirstate._map.iteritems(), key=keyfunc):
754 for file_, ent in sorted(repo.dirstate.iteritems(), key=keyfunc):
755 if ent[3] == -1:
755 if ent[3] == -1:
756 timestr = 'unset '
756 timestr = 'unset '
757 elif nodates:
757 elif nodates:
758 timestr = 'set '
758 timestr = 'set '
759 else:
759 else:
760 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
760 timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ",
761 time.localtime(ent[3]))
761 time.localtime(ent[3]))
762 timestr = encoding.strtolocal(timestr)
762 timestr = encoding.strtolocal(timestr)
763 if ent[1] & 0o20000:
763 if ent[1] & 0o20000:
764 mode = 'lnk'
764 mode = 'lnk'
765 else:
765 else:
766 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
766 mode = '%3o' % (ent[1] & 0o777 & ~util.umask)
767 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
767 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
768 for f in repo.dirstate.copies():
768 for f in repo.dirstate.copies():
769 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
769 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
770
770
771 @command('debugdiscovery',
771 @command('debugdiscovery',
772 [('', 'old', None, _('use old-style discovery')),
772 [('', 'old', None, _('use old-style discovery')),
773 ('', 'nonheads', None,
773 ('', 'nonheads', None,
774 _('use old-style discovery with non-heads included')),
774 _('use old-style discovery with non-heads included')),
775 ('', 'rev', [], 'restrict discovery to this set of revs'),
775 ('', 'rev', [], 'restrict discovery to this set of revs'),
776 ('', 'seed', '12323', 'specify the random seed use for discovery'),
776 ('', 'seed', '12323', 'specify the random seed use for discovery'),
777 ] + cmdutil.remoteopts,
777 ] + cmdutil.remoteopts,
778 _('[--rev REV] [OTHER]'))
778 _('[--rev REV] [OTHER]'))
779 def debugdiscovery(ui, repo, remoteurl="default", **opts):
779 def debugdiscovery(ui, repo, remoteurl="default", **opts):
780 """runs the changeset discovery protocol in isolation"""
780 """runs the changeset discovery protocol in isolation"""
781 opts = pycompat.byteskwargs(opts)
781 opts = pycompat.byteskwargs(opts)
782 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
782 remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl))
783 remote = hg.peer(repo, opts, remoteurl)
783 remote = hg.peer(repo, opts, remoteurl)
784 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
784 ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl))
785
785
786 # make sure tests are repeatable
786 # make sure tests are repeatable
787 random.seed(int(opts['seed']))
787 random.seed(int(opts['seed']))
788
788
789
789
790
790
791 if opts.get('old'):
791 if opts.get('old'):
792 def doit(pushedrevs, remoteheads, remote=remote):
792 def doit(pushedrevs, remoteheads, remote=remote):
793 if not util.safehasattr(remote, 'branches'):
793 if not util.safehasattr(remote, 'branches'):
794 # enable in-client legacy support
794 # enable in-client legacy support
795 remote = localrepo.locallegacypeer(remote.local())
795 remote = localrepo.locallegacypeer(remote.local())
796 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
796 common, _in, hds = treediscovery.findcommonincoming(repo, remote,
797 force=True)
797 force=True)
798 common = set(common)
798 common = set(common)
799 if not opts.get('nonheads'):
799 if not opts.get('nonheads'):
800 ui.write(("unpruned common: %s\n") %
800 ui.write(("unpruned common: %s\n") %
801 " ".join(sorted(short(n) for n in common)))
801 " ".join(sorted(short(n) for n in common)))
802
802
803 clnode = repo.changelog.node
803 clnode = repo.changelog.node
804 common = repo.revs('heads(::%ln)', common)
804 common = repo.revs('heads(::%ln)', common)
805 common = {clnode(r) for r in common}
805 common = {clnode(r) for r in common}
806 return common, hds
806 return common, hds
807 else:
807 else:
808 def doit(pushedrevs, remoteheads, remote=remote):
808 def doit(pushedrevs, remoteheads, remote=remote):
809 nodes = None
809 nodes = None
810 if pushedrevs:
810 if pushedrevs:
811 revs = scmutil.revrange(repo, pushedrevs)
811 revs = scmutil.revrange(repo, pushedrevs)
812 nodes = [repo[r].node() for r in revs]
812 nodes = [repo[r].node() for r in revs]
813 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
813 common, any, hds = setdiscovery.findcommonheads(ui, repo, remote,
814 ancestorsof=nodes)
814 ancestorsof=nodes)
815 return common, hds
815 return common, hds
816
816
817 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
817 remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None)
818 localrevs = opts['rev']
818 localrevs = opts['rev']
819 with util.timedcm('debug-discovery') as t:
819 with util.timedcm('debug-discovery') as t:
820 common, hds = doit(localrevs, remoterevs)
820 common, hds = doit(localrevs, remoterevs)
821
821
822 # compute all statistics
822 # compute all statistics
823 common = set(common)
823 common = set(common)
824 rheads = set(hds)
824 rheads = set(hds)
825 lheads = set(repo.heads())
825 lheads = set(repo.heads())
826
826
827 data = {}
827 data = {}
828 data['elapsed'] = t.elapsed
828 data['elapsed'] = t.elapsed
829 data['nb-common'] = len(common)
829 data['nb-common'] = len(common)
830 data['nb-common-local'] = len(common & lheads)
830 data['nb-common-local'] = len(common & lheads)
831 data['nb-common-remote'] = len(common & rheads)
831 data['nb-common-remote'] = len(common & rheads)
832 data['nb-common-both'] = len(common & rheads & lheads)
832 data['nb-common-both'] = len(common & rheads & lheads)
833 data['nb-local'] = len(lheads)
833 data['nb-local'] = len(lheads)
834 data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
834 data['nb-local-missing'] = data['nb-local'] - data['nb-common-local']
835 data['nb-remote'] = len(rheads)
835 data['nb-remote'] = len(rheads)
836 data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
836 data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote']
837 data['nb-revs'] = len(repo.revs('all()'))
837 data['nb-revs'] = len(repo.revs('all()'))
838 data['nb-revs-common'] = len(repo.revs('::%ln', common))
838 data['nb-revs-common'] = len(repo.revs('::%ln', common))
839 data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
839 data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common']
840
840
841 # display discovery summary
841 # display discovery summary
842 ui.write(("elapsed time: %(elapsed)f seconds\n") % data)
842 ui.write(("elapsed time: %(elapsed)f seconds\n") % data)
843 ui.write(("heads summary:\n"))
843 ui.write(("heads summary:\n"))
844 ui.write((" total common heads: %(nb-common)9d\n") % data)
844 ui.write((" total common heads: %(nb-common)9d\n") % data)
845 ui.write((" also local heads: %(nb-common-local)9d\n") % data)
845 ui.write((" also local heads: %(nb-common-local)9d\n") % data)
846 ui.write((" also remote heads: %(nb-common-remote)9d\n") % data)
846 ui.write((" also remote heads: %(nb-common-remote)9d\n") % data)
847 ui.write((" both: %(nb-common-both)9d\n") % data)
847 ui.write((" both: %(nb-common-both)9d\n") % data)
848 ui.write((" local heads: %(nb-local)9d\n") % data)
848 ui.write((" local heads: %(nb-local)9d\n") % data)
849 ui.write((" common: %(nb-common-local)9d\n") % data)
849 ui.write((" common: %(nb-common-local)9d\n") % data)
850 ui.write((" missing: %(nb-local-missing)9d\n") % data)
850 ui.write((" missing: %(nb-local-missing)9d\n") % data)
851 ui.write((" remote heads: %(nb-remote)9d\n") % data)
851 ui.write((" remote heads: %(nb-remote)9d\n") % data)
852 ui.write((" common: %(nb-common-remote)9d\n") % data)
852 ui.write((" common: %(nb-common-remote)9d\n") % data)
853 ui.write((" unknown: %(nb-remote-unknown)9d\n") % data)
853 ui.write((" unknown: %(nb-remote-unknown)9d\n") % data)
854 ui.write(("local changesets: %(nb-revs)9d\n") % data)
854 ui.write(("local changesets: %(nb-revs)9d\n") % data)
855 ui.write((" common: %(nb-revs-common)9d\n") % data)
855 ui.write((" common: %(nb-revs-common)9d\n") % data)
856 ui.write((" missing: %(nb-revs-missing)9d\n") % data)
856 ui.write((" missing: %(nb-revs-missing)9d\n") % data)
857
857
858 if ui.verbose:
858 if ui.verbose:
859 ui.write(("common heads: %s\n") %
859 ui.write(("common heads: %s\n") %
860 " ".join(sorted(short(n) for n in common)))
860 " ".join(sorted(short(n) for n in common)))
861
861
862 _chunksize = 4 << 10
862 _chunksize = 4 << 10
863
863
864 @command('debugdownload',
864 @command('debugdownload',
865 [
865 [
866 ('o', 'output', '', _('path')),
866 ('o', 'output', '', _('path')),
867 ],
867 ],
868 optionalrepo=True)
868 optionalrepo=True)
869 def debugdownload(ui, repo, url, output=None, **opts):
869 def debugdownload(ui, repo, url, output=None, **opts):
870 """download a resource using Mercurial logic and config
870 """download a resource using Mercurial logic and config
871 """
871 """
872 fh = urlmod.open(ui, url, output)
872 fh = urlmod.open(ui, url, output)
873
873
874 dest = ui
874 dest = ui
875 if output:
875 if output:
876 dest = open(output, "wb", _chunksize)
876 dest = open(output, "wb", _chunksize)
877 try:
877 try:
878 data = fh.read(_chunksize)
878 data = fh.read(_chunksize)
879 while data:
879 while data:
880 dest.write(data)
880 dest.write(data)
881 data = fh.read(_chunksize)
881 data = fh.read(_chunksize)
882 finally:
882 finally:
883 if output:
883 if output:
884 dest.close()
884 dest.close()
885
885
886 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
886 @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True)
887 def debugextensions(ui, repo, **opts):
887 def debugextensions(ui, repo, **opts):
888 '''show information about active extensions'''
888 '''show information about active extensions'''
889 opts = pycompat.byteskwargs(opts)
889 opts = pycompat.byteskwargs(opts)
890 exts = extensions.extensions(ui)
890 exts = extensions.extensions(ui)
891 hgver = util.version()
891 hgver = util.version()
892 fm = ui.formatter('debugextensions', opts)
892 fm = ui.formatter('debugextensions', opts)
893 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
893 for extname, extmod in sorted(exts, key=operator.itemgetter(0)):
894 isinternal = extensions.ismoduleinternal(extmod)
894 isinternal = extensions.ismoduleinternal(extmod)
895 extsource = pycompat.fsencode(extmod.__file__)
895 extsource = pycompat.fsencode(extmod.__file__)
896 if isinternal:
896 if isinternal:
897 exttestedwith = [] # never expose magic string to users
897 exttestedwith = [] # never expose magic string to users
898 else:
898 else:
899 exttestedwith = getattr(extmod, 'testedwith', '').split()
899 exttestedwith = getattr(extmod, 'testedwith', '').split()
900 extbuglink = getattr(extmod, 'buglink', None)
900 extbuglink = getattr(extmod, 'buglink', None)
901
901
902 fm.startitem()
902 fm.startitem()
903
903
904 if ui.quiet or ui.verbose:
904 if ui.quiet or ui.verbose:
905 fm.write('name', '%s\n', extname)
905 fm.write('name', '%s\n', extname)
906 else:
906 else:
907 fm.write('name', '%s', extname)
907 fm.write('name', '%s', extname)
908 if isinternal or hgver in exttestedwith:
908 if isinternal or hgver in exttestedwith:
909 fm.plain('\n')
909 fm.plain('\n')
910 elif not exttestedwith:
910 elif not exttestedwith:
911 fm.plain(_(' (untested!)\n'))
911 fm.plain(_(' (untested!)\n'))
912 else:
912 else:
913 lasttestedversion = exttestedwith[-1]
913 lasttestedversion = exttestedwith[-1]
914 fm.plain(' (%s!)\n' % lasttestedversion)
914 fm.plain(' (%s!)\n' % lasttestedversion)
915
915
916 fm.condwrite(ui.verbose and extsource, 'source',
916 fm.condwrite(ui.verbose and extsource, 'source',
917 _(' location: %s\n'), extsource or "")
917 _(' location: %s\n'), extsource or "")
918
918
919 if ui.verbose:
919 if ui.verbose:
920 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
920 fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal])
921 fm.data(bundled=isinternal)
921 fm.data(bundled=isinternal)
922
922
923 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
923 fm.condwrite(ui.verbose and exttestedwith, 'testedwith',
924 _(' tested with: %s\n'),
924 _(' tested with: %s\n'),
925 fm.formatlist(exttestedwith, name='ver'))
925 fm.formatlist(exttestedwith, name='ver'))
926
926
927 fm.condwrite(ui.verbose and extbuglink, 'buglink',
927 fm.condwrite(ui.verbose and extbuglink, 'buglink',
928 _(' bug reporting: %s\n'), extbuglink or "")
928 _(' bug reporting: %s\n'), extbuglink or "")
929
929
930 fm.end()
930 fm.end()
931
931
932 @command('debugfileset',
932 @command('debugfileset',
933 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
933 [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')),
934 ('', 'all-files', False,
934 ('', 'all-files', False,
935 _('test files from all revisions and working directory')),
935 _('test files from all revisions and working directory')),
936 ('s', 'show-matcher', None,
936 ('s', 'show-matcher', None,
937 _('print internal representation of matcher')),
937 _('print internal representation of matcher')),
938 ('p', 'show-stage', [],
938 ('p', 'show-stage', [],
939 _('print parsed tree at the given stage'), _('NAME'))],
939 _('print parsed tree at the given stage'), _('NAME'))],
940 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
940 _('[-r REV] [--all-files] [OPTION]... FILESPEC'))
941 def debugfileset(ui, repo, expr, **opts):
941 def debugfileset(ui, repo, expr, **opts):
942 '''parse and apply a fileset specification'''
942 '''parse and apply a fileset specification'''
943 from . import fileset
943 from . import fileset
944 fileset.symbols # force import of fileset so we have predicates to optimize
944 fileset.symbols # force import of fileset so we have predicates to optimize
945 opts = pycompat.byteskwargs(opts)
945 opts = pycompat.byteskwargs(opts)
946 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
946 ctx = scmutil.revsingle(repo, opts.get('rev'), None)
947
947
948 stages = [
948 stages = [
949 ('parsed', pycompat.identity),
949 ('parsed', pycompat.identity),
950 ('analyzed', filesetlang.analyze),
950 ('analyzed', filesetlang.analyze),
951 ('optimized', filesetlang.optimize),
951 ('optimized', filesetlang.optimize),
952 ]
952 ]
953 stagenames = set(n for n, f in stages)
953 stagenames = set(n for n, f in stages)
954
954
955 showalways = set()
955 showalways = set()
956 if ui.verbose and not opts['show_stage']:
956 if ui.verbose and not opts['show_stage']:
957 # show parsed tree by --verbose (deprecated)
957 # show parsed tree by --verbose (deprecated)
958 showalways.add('parsed')
958 showalways.add('parsed')
959 if opts['show_stage'] == ['all']:
959 if opts['show_stage'] == ['all']:
960 showalways.update(stagenames)
960 showalways.update(stagenames)
961 else:
961 else:
962 for n in opts['show_stage']:
962 for n in opts['show_stage']:
963 if n not in stagenames:
963 if n not in stagenames:
964 raise error.Abort(_('invalid stage name: %s') % n)
964 raise error.Abort(_('invalid stage name: %s') % n)
965 showalways.update(opts['show_stage'])
965 showalways.update(opts['show_stage'])
966
966
967 tree = filesetlang.parse(expr)
967 tree = filesetlang.parse(expr)
968 for n, f in stages:
968 for n, f in stages:
969 tree = f(tree)
969 tree = f(tree)
970 if n in showalways:
970 if n in showalways:
971 if opts['show_stage'] or n != 'parsed':
971 if opts['show_stage'] or n != 'parsed':
972 ui.write(("* %s:\n") % n)
972 ui.write(("* %s:\n") % n)
973 ui.write(filesetlang.prettyformat(tree), "\n")
973 ui.write(filesetlang.prettyformat(tree), "\n")
974
974
975 files = set()
975 files = set()
976 if opts['all_files']:
976 if opts['all_files']:
977 for r in repo:
977 for r in repo:
978 c = repo[r]
978 c = repo[r]
979 files.update(c.files())
979 files.update(c.files())
980 files.update(c.substate)
980 files.update(c.substate)
981 if opts['all_files'] or ctx.rev() is None:
981 if opts['all_files'] or ctx.rev() is None:
982 wctx = repo[None]
982 wctx = repo[None]
983 files.update(repo.dirstate.walk(scmutil.matchall(repo),
983 files.update(repo.dirstate.walk(scmutil.matchall(repo),
984 subrepos=list(wctx.substate),
984 subrepos=list(wctx.substate),
985 unknown=True, ignored=True))
985 unknown=True, ignored=True))
986 files.update(wctx.substate)
986 files.update(wctx.substate)
987 else:
987 else:
988 files.update(ctx.files())
988 files.update(ctx.files())
989 files.update(ctx.substate)
989 files.update(ctx.substate)
990
990
991 m = ctx.matchfileset(expr)
991 m = ctx.matchfileset(expr)
992 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
992 if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose):
993 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
993 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
994 for f in sorted(files):
994 for f in sorted(files):
995 if not m(f):
995 if not m(f):
996 continue
996 continue
997 ui.write("%s\n" % f)
997 ui.write("%s\n" % f)
998
998
999 @command('debugformat',
999 @command('debugformat',
1000 [] + cmdutil.formatteropts)
1000 [] + cmdutil.formatteropts)
1001 def debugformat(ui, repo, **opts):
1001 def debugformat(ui, repo, **opts):
1002 """display format information about the current repository
1002 """display format information about the current repository
1003
1003
1004 Use --verbose to get extra information about current config value and
1004 Use --verbose to get extra information about current config value and
1005 Mercurial default."""
1005 Mercurial default."""
1006 opts = pycompat.byteskwargs(opts)
1006 opts = pycompat.byteskwargs(opts)
1007 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1007 maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant)
1008 maxvariantlength = max(len('format-variant'), maxvariantlength)
1008 maxvariantlength = max(len('format-variant'), maxvariantlength)
1009
1009
1010 def makeformatname(name):
1010 def makeformatname(name):
1011 return '%s:' + (' ' * (maxvariantlength - len(name)))
1011 return '%s:' + (' ' * (maxvariantlength - len(name)))
1012
1012
1013 fm = ui.formatter('debugformat', opts)
1013 fm = ui.formatter('debugformat', opts)
1014 if fm.isplain():
1014 if fm.isplain():
1015 def formatvalue(value):
1015 def formatvalue(value):
1016 if util.safehasattr(value, 'startswith'):
1016 if util.safehasattr(value, 'startswith'):
1017 return value
1017 return value
1018 if value:
1018 if value:
1019 return 'yes'
1019 return 'yes'
1020 else:
1020 else:
1021 return 'no'
1021 return 'no'
1022 else:
1022 else:
1023 formatvalue = pycompat.identity
1023 formatvalue = pycompat.identity
1024
1024
1025 fm.plain('format-variant')
1025 fm.plain('format-variant')
1026 fm.plain(' ' * (maxvariantlength - len('format-variant')))
1026 fm.plain(' ' * (maxvariantlength - len('format-variant')))
1027 fm.plain(' repo')
1027 fm.plain(' repo')
1028 if ui.verbose:
1028 if ui.verbose:
1029 fm.plain(' config default')
1029 fm.plain(' config default')
1030 fm.plain('\n')
1030 fm.plain('\n')
1031 for fv in upgrade.allformatvariant:
1031 for fv in upgrade.allformatvariant:
1032 fm.startitem()
1032 fm.startitem()
1033 repovalue = fv.fromrepo(repo)
1033 repovalue = fv.fromrepo(repo)
1034 configvalue = fv.fromconfig(repo)
1034 configvalue = fv.fromconfig(repo)
1035
1035
1036 if repovalue != configvalue:
1036 if repovalue != configvalue:
1037 namelabel = 'formatvariant.name.mismatchconfig'
1037 namelabel = 'formatvariant.name.mismatchconfig'
1038 repolabel = 'formatvariant.repo.mismatchconfig'
1038 repolabel = 'formatvariant.repo.mismatchconfig'
1039 elif repovalue != fv.default:
1039 elif repovalue != fv.default:
1040 namelabel = 'formatvariant.name.mismatchdefault'
1040 namelabel = 'formatvariant.name.mismatchdefault'
1041 repolabel = 'formatvariant.repo.mismatchdefault'
1041 repolabel = 'formatvariant.repo.mismatchdefault'
1042 else:
1042 else:
1043 namelabel = 'formatvariant.name.uptodate'
1043 namelabel = 'formatvariant.name.uptodate'
1044 repolabel = 'formatvariant.repo.uptodate'
1044 repolabel = 'formatvariant.repo.uptodate'
1045
1045
1046 fm.write('name', makeformatname(fv.name), fv.name,
1046 fm.write('name', makeformatname(fv.name), fv.name,
1047 label=namelabel)
1047 label=namelabel)
1048 fm.write('repo', ' %3s', formatvalue(repovalue),
1048 fm.write('repo', ' %3s', formatvalue(repovalue),
1049 label=repolabel)
1049 label=repolabel)
1050 if fv.default != configvalue:
1050 if fv.default != configvalue:
1051 configlabel = 'formatvariant.config.special'
1051 configlabel = 'formatvariant.config.special'
1052 else:
1052 else:
1053 configlabel = 'formatvariant.config.default'
1053 configlabel = 'formatvariant.config.default'
1054 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1054 fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue),
1055 label=configlabel)
1055 label=configlabel)
1056 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1056 fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default),
1057 label='formatvariant.default')
1057 label='formatvariant.default')
1058 fm.plain('\n')
1058 fm.plain('\n')
1059 fm.end()
1059 fm.end()
1060
1060
1061 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1061 @command('debugfsinfo', [], _('[PATH]'), norepo=True)
1062 def debugfsinfo(ui, path="."):
1062 def debugfsinfo(ui, path="."):
1063 """show information detected about current filesystem"""
1063 """show information detected about current filesystem"""
1064 ui.write(('path: %s\n') % path)
1064 ui.write(('path: %s\n') % path)
1065 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1065 ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)'))
1066 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1066 ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no'))
1067 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1067 ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)'))
1068 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1068 ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no'))
1069 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1069 ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no'))
1070 casesensitive = '(unknown)'
1070 casesensitive = '(unknown)'
1071 try:
1071 try:
1072 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1072 with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f:
1073 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1073 casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no'
1074 except OSError:
1074 except OSError:
1075 pass
1075 pass
1076 ui.write(('case-sensitive: %s\n') % casesensitive)
1076 ui.write(('case-sensitive: %s\n') % casesensitive)
1077
1077
1078 @command('debuggetbundle',
1078 @command('debuggetbundle',
1079 [('H', 'head', [], _('id of head node'), _('ID')),
1079 [('H', 'head', [], _('id of head node'), _('ID')),
1080 ('C', 'common', [], _('id of common node'), _('ID')),
1080 ('C', 'common', [], _('id of common node'), _('ID')),
1081 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1081 ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))],
1082 _('REPO FILE [-H|-C ID]...'),
1082 _('REPO FILE [-H|-C ID]...'),
1083 norepo=True)
1083 norepo=True)
1084 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1084 def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts):
1085 """retrieves a bundle from a repo
1085 """retrieves a bundle from a repo
1086
1086
1087 Every ID must be a full-length hex node id string. Saves the bundle to the
1087 Every ID must be a full-length hex node id string. Saves the bundle to the
1088 given file.
1088 given file.
1089 """
1089 """
1090 opts = pycompat.byteskwargs(opts)
1090 opts = pycompat.byteskwargs(opts)
1091 repo = hg.peer(ui, opts, repopath)
1091 repo = hg.peer(ui, opts, repopath)
1092 if not repo.capable('getbundle'):
1092 if not repo.capable('getbundle'):
1093 raise error.Abort("getbundle() not supported by target repository")
1093 raise error.Abort("getbundle() not supported by target repository")
1094 args = {}
1094 args = {}
1095 if common:
1095 if common:
1096 args[r'common'] = [bin(s) for s in common]
1096 args[r'common'] = [bin(s) for s in common]
1097 if head:
1097 if head:
1098 args[r'heads'] = [bin(s) for s in head]
1098 args[r'heads'] = [bin(s) for s in head]
1099 # TODO: get desired bundlecaps from command line.
1099 # TODO: get desired bundlecaps from command line.
1100 args[r'bundlecaps'] = None
1100 args[r'bundlecaps'] = None
1101 bundle = repo.getbundle('debug', **args)
1101 bundle = repo.getbundle('debug', **args)
1102
1102
1103 bundletype = opts.get('type', 'bzip2').lower()
1103 bundletype = opts.get('type', 'bzip2').lower()
1104 btypes = {'none': 'HG10UN',
1104 btypes = {'none': 'HG10UN',
1105 'bzip2': 'HG10BZ',
1105 'bzip2': 'HG10BZ',
1106 'gzip': 'HG10GZ',
1106 'gzip': 'HG10GZ',
1107 'bundle2': 'HG20'}
1107 'bundle2': 'HG20'}
1108 bundletype = btypes.get(bundletype)
1108 bundletype = btypes.get(bundletype)
1109 if bundletype not in bundle2.bundletypes:
1109 if bundletype not in bundle2.bundletypes:
1110 raise error.Abort(_('unknown bundle type specified with --type'))
1110 raise error.Abort(_('unknown bundle type specified with --type'))
1111 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1111 bundle2.writebundle(ui, bundle, bundlepath, bundletype)
1112
1112
1113 @command('debugignore', [], '[FILE]')
1113 @command('debugignore', [], '[FILE]')
1114 def debugignore(ui, repo, *files, **opts):
1114 def debugignore(ui, repo, *files, **opts):
1115 """display the combined ignore pattern and information about ignored files
1115 """display the combined ignore pattern and information about ignored files
1116
1116
1117 With no argument display the combined ignore pattern.
1117 With no argument display the combined ignore pattern.
1118
1118
1119 Given space separated file names, shows if the given file is ignored and
1119 Given space separated file names, shows if the given file is ignored and
1120 if so, show the ignore rule (file and line number) that matched it.
1120 if so, show the ignore rule (file and line number) that matched it.
1121 """
1121 """
1122 ignore = repo.dirstate._ignore
1122 ignore = repo.dirstate._ignore
1123 if not files:
1123 if not files:
1124 # Show all the patterns
1124 # Show all the patterns
1125 ui.write("%s\n" % pycompat.byterepr(ignore))
1125 ui.write("%s\n" % pycompat.byterepr(ignore))
1126 else:
1126 else:
1127 m = scmutil.match(repo[None], pats=files)
1127 m = scmutil.match(repo[None], pats=files)
1128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1128 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1129 for f in m.files():
1129 for f in m.files():
1130 nf = util.normpath(f)
1130 nf = util.normpath(f)
1131 ignored = None
1131 ignored = None
1132 ignoredata = None
1132 ignoredata = None
1133 if nf != '.':
1133 if nf != '.':
1134 if ignore(nf):
1134 if ignore(nf):
1135 ignored = nf
1135 ignored = nf
1136 ignoredata = repo.dirstate._ignorefileandline(nf)
1136 ignoredata = repo.dirstate._ignorefileandline(nf)
1137 else:
1137 else:
1138 for p in util.finddirs(nf):
1138 for p in util.finddirs(nf):
1139 if ignore(p):
1139 if ignore(p):
1140 ignored = p
1140 ignored = p
1141 ignoredata = repo.dirstate._ignorefileandline(p)
1141 ignoredata = repo.dirstate._ignorefileandline(p)
1142 break
1142 break
1143 if ignored:
1143 if ignored:
1144 if ignored == nf:
1144 if ignored == nf:
1145 ui.write(_("%s is ignored\n") % uipathfn(f))
1145 ui.write(_("%s is ignored\n") % uipathfn(f))
1146 else:
1146 else:
1147 ui.write(_("%s is ignored because of "
1147 ui.write(_("%s is ignored because of "
1148 "containing directory %s\n")
1148 "containing directory %s\n")
1149 % (uipathfn(f), ignored))
1149 % (uipathfn(f), ignored))
1150 ignorefile, lineno, line = ignoredata
1150 ignorefile, lineno, line = ignoredata
1151 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1151 ui.write(_("(ignore rule in %s, line %d: '%s')\n")
1152 % (ignorefile, lineno, line))
1152 % (ignorefile, lineno, line))
1153 else:
1153 else:
1154 ui.write(_("%s is not ignored\n") % uipathfn(f))
1154 ui.write(_("%s is not ignored\n") % uipathfn(f))
1155
1155
1156 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1156 @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts,
1157 _('-c|-m|FILE'))
1157 _('-c|-m|FILE'))
1158 def debugindex(ui, repo, file_=None, **opts):
1158 def debugindex(ui, repo, file_=None, **opts):
1159 """dump index data for a storage primitive"""
1159 """dump index data for a storage primitive"""
1160 opts = pycompat.byteskwargs(opts)
1160 opts = pycompat.byteskwargs(opts)
1161 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1161 store = cmdutil.openstorage(repo, 'debugindex', file_, opts)
1162
1162
1163 if ui.debugflag:
1163 if ui.debugflag:
1164 shortfn = hex
1164 shortfn = hex
1165 else:
1165 else:
1166 shortfn = short
1166 shortfn = short
1167
1167
1168 idlen = 12
1168 idlen = 12
1169 for i in store:
1169 for i in store:
1170 idlen = len(shortfn(store.node(i)))
1170 idlen = len(shortfn(store.node(i)))
1171 break
1171 break
1172
1172
1173 fm = ui.formatter('debugindex', opts)
1173 fm = ui.formatter('debugindex', opts)
1174 fm.plain(b' rev linkrev %s %s p2\n' % (
1174 fm.plain(b' rev linkrev %s %s p2\n' % (
1175 b'nodeid'.ljust(idlen),
1175 b'nodeid'.ljust(idlen),
1176 b'p1'.ljust(idlen)))
1176 b'p1'.ljust(idlen)))
1177
1177
1178 for rev in store:
1178 for rev in store:
1179 node = store.node(rev)
1179 node = store.node(rev)
1180 parents = store.parents(node)
1180 parents = store.parents(node)
1181
1181
1182 fm.startitem()
1182 fm.startitem()
1183 fm.write(b'rev', b'%6d ', rev)
1183 fm.write(b'rev', b'%6d ', rev)
1184 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1184 fm.write(b'linkrev', '%7d ', store.linkrev(rev))
1185 fm.write(b'node', '%s ', shortfn(node))
1185 fm.write(b'node', '%s ', shortfn(node))
1186 fm.write(b'p1', '%s ', shortfn(parents[0]))
1186 fm.write(b'p1', '%s ', shortfn(parents[0]))
1187 fm.write(b'p2', '%s', shortfn(parents[1]))
1187 fm.write(b'p2', '%s', shortfn(parents[1]))
1188 fm.plain(b'\n')
1188 fm.plain(b'\n')
1189
1189
1190 fm.end()
1190 fm.end()
1191
1191
1192 @command('debugindexdot', cmdutil.debugrevlogopts,
1192 @command('debugindexdot', cmdutil.debugrevlogopts,
1193 _('-c|-m|FILE'), optionalrepo=True)
1193 _('-c|-m|FILE'), optionalrepo=True)
1194 def debugindexdot(ui, repo, file_=None, **opts):
1194 def debugindexdot(ui, repo, file_=None, **opts):
1195 """dump an index DAG as a graphviz dot file"""
1195 """dump an index DAG as a graphviz dot file"""
1196 opts = pycompat.byteskwargs(opts)
1196 opts = pycompat.byteskwargs(opts)
1197 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1197 r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts)
1198 ui.write(("digraph G {\n"))
1198 ui.write(("digraph G {\n"))
1199 for i in r:
1199 for i in r:
1200 node = r.node(i)
1200 node = r.node(i)
1201 pp = r.parents(node)
1201 pp = r.parents(node)
1202 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1202 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
1203 if pp[1] != nullid:
1203 if pp[1] != nullid:
1204 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1204 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
1205 ui.write("}\n")
1205 ui.write("}\n")
1206
1206
1207 @command('debugindexstats', [])
1207 @command('debugindexstats', [])
1208 def debugindexstats(ui, repo):
1208 def debugindexstats(ui, repo):
1209 """show stats related to the changelog index"""
1209 """show stats related to the changelog index"""
1210 repo.changelog.shortest(nullid, 1)
1210 repo.changelog.shortest(nullid, 1)
1211 index = repo.changelog.index
1211 index = repo.changelog.index
1212 if not util.safehasattr(index, 'stats'):
1212 if not util.safehasattr(index, 'stats'):
1213 raise error.Abort(_('debugindexstats only works with native code'))
1213 raise error.Abort(_('debugindexstats only works with native code'))
1214 for k, v in sorted(index.stats().items()):
1214 for k, v in sorted(index.stats().items()):
1215 ui.write('%s: %d\n' % (k, v))
1215 ui.write('%s: %d\n' % (k, v))
1216
1216
1217 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1217 @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True)
1218 def debuginstall(ui, **opts):
1218 def debuginstall(ui, **opts):
1219 '''test Mercurial installation
1219 '''test Mercurial installation
1220
1220
1221 Returns 0 on success.
1221 Returns 0 on success.
1222 '''
1222 '''
1223 opts = pycompat.byteskwargs(opts)
1223 opts = pycompat.byteskwargs(opts)
1224
1224
1225 problems = 0
1225 problems = 0
1226
1226
1227 fm = ui.formatter('debuginstall', opts)
1227 fm = ui.formatter('debuginstall', opts)
1228 fm.startitem()
1228 fm.startitem()
1229
1229
1230 # encoding
1230 # encoding
1231 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1231 fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding)
1232 err = None
1232 err = None
1233 try:
1233 try:
1234 codecs.lookup(pycompat.sysstr(encoding.encoding))
1234 codecs.lookup(pycompat.sysstr(encoding.encoding))
1235 except LookupError as inst:
1235 except LookupError as inst:
1236 err = stringutil.forcebytestr(inst)
1236 err = stringutil.forcebytestr(inst)
1237 problems += 1
1237 problems += 1
1238 fm.condwrite(err, 'encodingerror', _(" %s\n"
1238 fm.condwrite(err, 'encodingerror', _(" %s\n"
1239 " (check that your locale is properly set)\n"), err)
1239 " (check that your locale is properly set)\n"), err)
1240
1240
1241 # Python
1241 # Python
1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1242 fm.write('pythonexe', _("checking Python executable (%s)\n"),
1243 pycompat.sysexecutable or _("unknown"))
1243 pycompat.sysexecutable or _("unknown"))
1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1244 fm.write('pythonver', _("checking Python version (%s)\n"),
1245 ("%d.%d.%d" % sys.version_info[:3]))
1245 ("%d.%d.%d" % sys.version_info[:3]))
1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1246 fm.write('pythonlib', _("checking Python lib (%s)...\n"),
1247 os.path.dirname(pycompat.fsencode(os.__file__)))
1247 os.path.dirname(pycompat.fsencode(os.__file__)))
1248
1248
1249 security = set(sslutil.supportedprotocols)
1249 security = set(sslutil.supportedprotocols)
1250 if sslutil.hassni:
1250 if sslutil.hassni:
1251 security.add('sni')
1251 security.add('sni')
1252
1252
1253 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1253 fm.write('pythonsecurity', _("checking Python security support (%s)\n"),
1254 fm.formatlist(sorted(security), name='protocol',
1254 fm.formatlist(sorted(security), name='protocol',
1255 fmt='%s', sep=','))
1255 fmt='%s', sep=','))
1256
1256
1257 # These are warnings, not errors. So don't increment problem count. This
1257 # These are warnings, not errors. So don't increment problem count. This
1258 # may change in the future.
1258 # may change in the future.
1259 if 'tls1.2' not in security:
1259 if 'tls1.2' not in security:
1260 fm.plain(_(' TLS 1.2 not supported by Python install; '
1260 fm.plain(_(' TLS 1.2 not supported by Python install; '
1261 'network connections lack modern security\n'))
1261 'network connections lack modern security\n'))
1262 if 'sni' not in security:
1262 if 'sni' not in security:
1263 fm.plain(_(' SNI not supported by Python install; may have '
1263 fm.plain(_(' SNI not supported by Python install; may have '
1264 'connectivity issues with some servers\n'))
1264 'connectivity issues with some servers\n'))
1265
1265
1266 # TODO print CA cert info
1266 # TODO print CA cert info
1267
1267
1268 # hg version
1268 # hg version
1269 hgver = util.version()
1269 hgver = util.version()
1270 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1270 fm.write('hgver', _("checking Mercurial version (%s)\n"),
1271 hgver.split('+')[0])
1271 hgver.split('+')[0])
1272 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1272 fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"),
1273 '+'.join(hgver.split('+')[1:]))
1273 '+'.join(hgver.split('+')[1:]))
1274
1274
1275 # compiled modules
1275 # compiled modules
1276 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1276 fm.write('hgmodulepolicy', _("checking module policy (%s)\n"),
1277 policy.policy)
1277 policy.policy)
1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1278 fm.write('hgmodules', _("checking installed modules (%s)...\n"),
1279 os.path.dirname(pycompat.fsencode(__file__)))
1279 os.path.dirname(pycompat.fsencode(__file__)))
1280
1280
1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1281 rustandc = policy.policy in ('rust+c', 'rust+c-allow')
1282 rustext = rustandc # for now, that's the only case
1282 rustext = rustandc # for now, that's the only case
1283 cext = policy.policy in ('c', 'allow') or rustandc
1283 cext = policy.policy in ('c', 'allow') or rustandc
1284 nopure = cext or rustext
1284 nopure = cext or rustext
1285 if nopure:
1285 if nopure:
1286 err = None
1286 err = None
1287 try:
1287 try:
1288 if cext:
1288 if cext:
1289 from .cext import (
1289 from .cext import (
1290 base85,
1290 base85,
1291 bdiff,
1291 bdiff,
1292 mpatch,
1292 mpatch,
1293 osutil,
1293 osutil,
1294 )
1294 )
1295 # quiet pyflakes
1295 # quiet pyflakes
1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1296 dir(bdiff), dir(mpatch), dir(base85), dir(osutil)
1297 if rustext:
1297 if rustext:
1298 from .rustext import (
1298 from .rustext import (
1299 ancestor,
1299 ancestor,
1300 dirstate,
1300 dirstate,
1301 )
1301 )
1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1302 dir(ancestor), dir(dirstate) # quiet pyflakes
1303 except Exception as inst:
1303 except Exception as inst:
1304 err = stringutil.forcebytestr(inst)
1304 err = stringutil.forcebytestr(inst)
1305 problems += 1
1305 problems += 1
1306 fm.condwrite(err, 'extensionserror', " %s\n", err)
1306 fm.condwrite(err, 'extensionserror', " %s\n", err)
1307
1307
1308 compengines = util.compengines._engines.values()
1308 compengines = util.compengines._engines.values()
1309 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1309 fm.write('compengines', _('checking registered compression engines (%s)\n'),
1310 fm.formatlist(sorted(e.name() for e in compengines),
1310 fm.formatlist(sorted(e.name() for e in compengines),
1311 name='compengine', fmt='%s', sep=', '))
1311 name='compengine', fmt='%s', sep=', '))
1312 fm.write('compenginesavail', _('checking available compression engines '
1312 fm.write('compenginesavail', _('checking available compression engines '
1313 '(%s)\n'),
1313 '(%s)\n'),
1314 fm.formatlist(sorted(e.name() for e in compengines
1314 fm.formatlist(sorted(e.name() for e in compengines
1315 if e.available()),
1315 if e.available()),
1316 name='compengine', fmt='%s', sep=', '))
1316 name='compengine', fmt='%s', sep=', '))
1317 wirecompengines = compression.compengines.supportedwireengines(
1317 wirecompengines = compression.compengines.supportedwireengines(
1318 compression.SERVERROLE)
1318 compression.SERVERROLE)
1319 fm.write('compenginesserver', _('checking available compression engines '
1319 fm.write('compenginesserver', _('checking available compression engines '
1320 'for wire protocol (%s)\n'),
1320 'for wire protocol (%s)\n'),
1321 fm.formatlist([e.name() for e in wirecompengines
1321 fm.formatlist([e.name() for e in wirecompengines
1322 if e.wireprotosupport()],
1322 if e.wireprotosupport()],
1323 name='compengine', fmt='%s', sep=', '))
1323 name='compengine', fmt='%s', sep=', '))
1324 re2 = 'missing'
1324 re2 = 'missing'
1325 if util._re2:
1325 if util._re2:
1326 re2 = 'available'
1326 re2 = 'available'
1327 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1327 fm.plain(_('checking "re2" regexp engine (%s)\n') % re2)
1328 fm.data(re2=bool(util._re2))
1328 fm.data(re2=bool(util._re2))
1329
1329
1330 # templates
1330 # templates
1331 p = templater.templatepaths()
1331 p = templater.templatepaths()
1332 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1332 fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p))
1333 fm.condwrite(not p, '', _(" no template directories found\n"))
1333 fm.condwrite(not p, '', _(" no template directories found\n"))
1334 if p:
1334 if p:
1335 m = templater.templatepath("map-cmdline.default")
1335 m = templater.templatepath("map-cmdline.default")
1336 if m:
1336 if m:
1337 # template found, check if it is working
1337 # template found, check if it is working
1338 err = None
1338 err = None
1339 try:
1339 try:
1340 templater.templater.frommapfile(m)
1340 templater.templater.frommapfile(m)
1341 except Exception as inst:
1341 except Exception as inst:
1342 err = stringutil.forcebytestr(inst)
1342 err = stringutil.forcebytestr(inst)
1343 p = None
1343 p = None
1344 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1344 fm.condwrite(err, 'defaulttemplateerror', " %s\n", err)
1345 else:
1345 else:
1346 p = None
1346 p = None
1347 fm.condwrite(p, 'defaulttemplate',
1347 fm.condwrite(p, 'defaulttemplate',
1348 _("checking default template (%s)\n"), m)
1348 _("checking default template (%s)\n"), m)
1349 fm.condwrite(not m, 'defaulttemplatenotfound',
1349 fm.condwrite(not m, 'defaulttemplatenotfound',
1350 _(" template '%s' not found\n"), "default")
1350 _(" template '%s' not found\n"), "default")
1351 if not p:
1351 if not p:
1352 problems += 1
1352 problems += 1
1353 fm.condwrite(not p, '',
1353 fm.condwrite(not p, '',
1354 _(" (templates seem to have been installed incorrectly)\n"))
1354 _(" (templates seem to have been installed incorrectly)\n"))
1355
1355
1356 # editor
1356 # editor
1357 editor = ui.geteditor()
1357 editor = ui.geteditor()
1358 editor = util.expandpath(editor)
1358 editor = util.expandpath(editor)
1359 editorbin = procutil.shellsplit(editor)[0]
1359 editorbin = procutil.shellsplit(editor)[0]
1360 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1360 fm.write('editor', _("checking commit editor... (%s)\n"), editorbin)
1361 cmdpath = procutil.findexe(editorbin)
1361 cmdpath = procutil.findexe(editorbin)
1362 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1362 fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound',
1363 _(" No commit editor set and can't find %s in PATH\n"
1363 _(" No commit editor set and can't find %s in PATH\n"
1364 " (specify a commit editor in your configuration"
1364 " (specify a commit editor in your configuration"
1365 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1365 " file)\n"), not cmdpath and editor == 'vi' and editorbin)
1366 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1366 fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound',
1367 _(" Can't find editor '%s' in PATH\n"
1367 _(" Can't find editor '%s' in PATH\n"
1368 " (specify a commit editor in your configuration"
1368 " (specify a commit editor in your configuration"
1369 " file)\n"), not cmdpath and editorbin)
1369 " file)\n"), not cmdpath and editorbin)
1370 if not cmdpath and editor != 'vi':
1370 if not cmdpath and editor != 'vi':
1371 problems += 1
1371 problems += 1
1372
1372
1373 # check username
1373 # check username
1374 username = None
1374 username = None
1375 err = None
1375 err = None
1376 try:
1376 try:
1377 username = ui.username()
1377 username = ui.username()
1378 except error.Abort as e:
1378 except error.Abort as e:
1379 err = stringutil.forcebytestr(e)
1379 err = stringutil.forcebytestr(e)
1380 problems += 1
1380 problems += 1
1381
1381
1382 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1382 fm.condwrite(username, 'username', _("checking username (%s)\n"), username)
1383 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1383 fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n"
1384 " (specify a username in your configuration file)\n"), err)
1384 " (specify a username in your configuration file)\n"), err)
1385
1385
1386 for name, mod in extensions.extensions():
1386 for name, mod in extensions.extensions():
1387 handler = getattr(mod, 'debuginstall', None)
1387 handler = getattr(mod, 'debuginstall', None)
1388 if handler is not None:
1388 if handler is not None:
1389 problems += handler(ui, fm)
1389 problems += handler(ui, fm)
1390
1390
1391 fm.condwrite(not problems, '',
1391 fm.condwrite(not problems, '',
1392 _("no problems detected\n"))
1392 _("no problems detected\n"))
1393 if not problems:
1393 if not problems:
1394 fm.data(problems=problems)
1394 fm.data(problems=problems)
1395 fm.condwrite(problems, 'problems',
1395 fm.condwrite(problems, 'problems',
1396 _("%d problems detected,"
1396 _("%d problems detected,"
1397 " please check your install!\n"), problems)
1397 " please check your install!\n"), problems)
1398 fm.end()
1398 fm.end()
1399
1399
1400 return problems
1400 return problems
1401
1401
1402 @command('debugknown', [], _('REPO ID...'), norepo=True)
1402 @command('debugknown', [], _('REPO ID...'), norepo=True)
1403 def debugknown(ui, repopath, *ids, **opts):
1403 def debugknown(ui, repopath, *ids, **opts):
1404 """test whether node ids are known to a repo
1404 """test whether node ids are known to a repo
1405
1405
1406 Every ID must be a full-length hex node id string. Returns a list of 0s
1406 Every ID must be a full-length hex node id string. Returns a list of 0s
1407 and 1s indicating unknown/known.
1407 and 1s indicating unknown/known.
1408 """
1408 """
1409 opts = pycompat.byteskwargs(opts)
1409 opts = pycompat.byteskwargs(opts)
1410 repo = hg.peer(ui, opts, repopath)
1410 repo = hg.peer(ui, opts, repopath)
1411 if not repo.capable('known'):
1411 if not repo.capable('known'):
1412 raise error.Abort("known() not supported by target repository")
1412 raise error.Abort("known() not supported by target repository")
1413 flags = repo.known([bin(s) for s in ids])
1413 flags = repo.known([bin(s) for s in ids])
1414 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1414 ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags])))
1415
1415
1416 @command('debuglabelcomplete', [], _('LABEL...'))
1416 @command('debuglabelcomplete', [], _('LABEL...'))
1417 def debuglabelcomplete(ui, repo, *args):
1417 def debuglabelcomplete(ui, repo, *args):
1418 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1418 '''backwards compatibility with old bash completion scripts (DEPRECATED)'''
1419 debugnamecomplete(ui, repo, *args)
1419 debugnamecomplete(ui, repo, *args)
1420
1420
1421 @command('debuglocks',
1421 @command('debuglocks',
1422 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1422 [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')),
1423 ('W', 'force-wlock', None,
1423 ('W', 'force-wlock', None,
1424 _('free the working state lock (DANGEROUS)')),
1424 _('free the working state lock (DANGEROUS)')),
1425 ('s', 'set-lock', None, _('set the store lock until stopped')),
1425 ('s', 'set-lock', None, _('set the store lock until stopped')),
1426 ('S', 'set-wlock', None,
1426 ('S', 'set-wlock', None,
1427 _('set the working state lock until stopped'))],
1427 _('set the working state lock until stopped'))],
1428 _('[OPTION]...'))
1428 _('[OPTION]...'))
1429 def debuglocks(ui, repo, **opts):
1429 def debuglocks(ui, repo, **opts):
1430 """show or modify state of locks
1430 """show or modify state of locks
1431
1431
1432 By default, this command will show which locks are held. This
1432 By default, this command will show which locks are held. This
1433 includes the user and process holding the lock, the amount of time
1433 includes the user and process holding the lock, the amount of time
1434 the lock has been held, and the machine name where the process is
1434 the lock has been held, and the machine name where the process is
1435 running if it's not local.
1435 running if it's not local.
1436
1436
1437 Locks protect the integrity of Mercurial's data, so should be
1437 Locks protect the integrity of Mercurial's data, so should be
1438 treated with care. System crashes or other interruptions may cause
1438 treated with care. System crashes or other interruptions may cause
1439 locks to not be properly released, though Mercurial will usually
1439 locks to not be properly released, though Mercurial will usually
1440 detect and remove such stale locks automatically.
1440 detect and remove such stale locks automatically.
1441
1441
1442 However, detecting stale locks may not always be possible (for
1442 However, detecting stale locks may not always be possible (for
1443 instance, on a shared filesystem). Removing locks may also be
1443 instance, on a shared filesystem). Removing locks may also be
1444 blocked by filesystem permissions.
1444 blocked by filesystem permissions.
1445
1445
1446 Setting a lock will prevent other commands from changing the data.
1446 Setting a lock will prevent other commands from changing the data.
1447 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1447 The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs.
1448 The set locks are removed when the command exits.
1448 The set locks are removed when the command exits.
1449
1449
1450 Returns 0 if no locks are held.
1450 Returns 0 if no locks are held.
1451
1451
1452 """
1452 """
1453
1453
1454 if opts.get(r'force_lock'):
1454 if opts.get(r'force_lock'):
1455 repo.svfs.unlink('lock')
1455 repo.svfs.unlink('lock')
1456 if opts.get(r'force_wlock'):
1456 if opts.get(r'force_wlock'):
1457 repo.vfs.unlink('wlock')
1457 repo.vfs.unlink('wlock')
1458 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1458 if opts.get(r'force_lock') or opts.get(r'force_wlock'):
1459 return 0
1459 return 0
1460
1460
1461 locks = []
1461 locks = []
1462 try:
1462 try:
1463 if opts.get(r'set_wlock'):
1463 if opts.get(r'set_wlock'):
1464 try:
1464 try:
1465 locks.append(repo.wlock(False))
1465 locks.append(repo.wlock(False))
1466 except error.LockHeld:
1466 except error.LockHeld:
1467 raise error.Abort(_('wlock is already held'))
1467 raise error.Abort(_('wlock is already held'))
1468 if opts.get(r'set_lock'):
1468 if opts.get(r'set_lock'):
1469 try:
1469 try:
1470 locks.append(repo.lock(False))
1470 locks.append(repo.lock(False))
1471 except error.LockHeld:
1471 except error.LockHeld:
1472 raise error.Abort(_('lock is already held'))
1472 raise error.Abort(_('lock is already held'))
1473 if len(locks):
1473 if len(locks):
1474 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1474 ui.promptchoice(_("ready to release the lock (y)? $$ &Yes"))
1475 return 0
1475 return 0
1476 finally:
1476 finally:
1477 release(*locks)
1477 release(*locks)
1478
1478
1479 now = time.time()
1479 now = time.time()
1480 held = 0
1480 held = 0
1481
1481
1482 def report(vfs, name, method):
1482 def report(vfs, name, method):
1483 # this causes stale locks to get reaped for more accurate reporting
1483 # this causes stale locks to get reaped for more accurate reporting
1484 try:
1484 try:
1485 l = method(False)
1485 l = method(False)
1486 except error.LockHeld:
1486 except error.LockHeld:
1487 l = None
1487 l = None
1488
1488
1489 if l:
1489 if l:
1490 l.release()
1490 l.release()
1491 else:
1491 else:
1492 try:
1492 try:
1493 st = vfs.lstat(name)
1493 st = vfs.lstat(name)
1494 age = now - st[stat.ST_MTIME]
1494 age = now - st[stat.ST_MTIME]
1495 user = util.username(st.st_uid)
1495 user = util.username(st.st_uid)
1496 locker = vfs.readlock(name)
1496 locker = vfs.readlock(name)
1497 if ":" in locker:
1497 if ":" in locker:
1498 host, pid = locker.split(':')
1498 host, pid = locker.split(':')
1499 if host == socket.gethostname():
1499 if host == socket.gethostname():
1500 locker = 'user %s, process %s' % (user or b'None', pid)
1500 locker = 'user %s, process %s' % (user or b'None', pid)
1501 else:
1501 else:
1502 locker = ('user %s, process %s, host %s'
1502 locker = ('user %s, process %s, host %s'
1503 % (user or b'None', pid, host))
1503 % (user or b'None', pid, host))
1504 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1504 ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age))
1505 return 1
1505 return 1
1506 except OSError as e:
1506 except OSError as e:
1507 if e.errno != errno.ENOENT:
1507 if e.errno != errno.ENOENT:
1508 raise
1508 raise
1509
1509
1510 ui.write(("%-6s free\n") % (name + ":"))
1510 ui.write(("%-6s free\n") % (name + ":"))
1511 return 0
1511 return 0
1512
1512
1513 held += report(repo.svfs, "lock", repo.lock)
1513 held += report(repo.svfs, "lock", repo.lock)
1514 held += report(repo.vfs, "wlock", repo.wlock)
1514 held += report(repo.vfs, "wlock", repo.wlock)
1515
1515
1516 return held
1516 return held
1517
1517
1518 @command('debugmanifestfulltextcache', [
1518 @command('debugmanifestfulltextcache', [
1519 ('', 'clear', False, _('clear the cache')),
1519 ('', 'clear', False, _('clear the cache')),
1520 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1520 ('a', 'add', [], _('add the given manifest nodes to the cache'),
1521 _('NODE'))
1521 _('NODE'))
1522 ], '')
1522 ], '')
1523 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1523 def debugmanifestfulltextcache(ui, repo, add=(), **opts):
1524 """show, clear or amend the contents of the manifest fulltext cache"""
1524 """show, clear or amend the contents of the manifest fulltext cache"""
1525
1525
1526 def getcache():
1526 def getcache():
1527 r = repo.manifestlog.getstorage(b'')
1527 r = repo.manifestlog.getstorage(b'')
1528 try:
1528 try:
1529 return r._fulltextcache
1529 return r._fulltextcache
1530 except AttributeError:
1530 except AttributeError:
1531 msg = _("Current revlog implementation doesn't appear to have a "
1531 msg = _("Current revlog implementation doesn't appear to have a "
1532 "manifest fulltext cache\n")
1532 "manifest fulltext cache\n")
1533 raise error.Abort(msg)
1533 raise error.Abort(msg)
1534
1534
1535 if opts.get(r'clear'):
1535 if opts.get(r'clear'):
1536 with repo.wlock():
1536 with repo.wlock():
1537 cache = getcache()
1537 cache = getcache()
1538 cache.clear(clear_persisted_data=True)
1538 cache.clear(clear_persisted_data=True)
1539 return
1539 return
1540
1540
1541 if add:
1541 if add:
1542 with repo.wlock():
1542 with repo.wlock():
1543 m = repo.manifestlog
1543 m = repo.manifestlog
1544 store = m.getstorage(b'')
1544 store = m.getstorage(b'')
1545 for n in add:
1545 for n in add:
1546 try:
1546 try:
1547 manifest = m[store.lookup(n)]
1547 manifest = m[store.lookup(n)]
1548 except error.LookupError as e:
1548 except error.LookupError as e:
1549 raise error.Abort(e, hint="Check your manifest node id")
1549 raise error.Abort(e, hint="Check your manifest node id")
1550 manifest.read() # stores revisision in cache too
1550 manifest.read() # stores revisision in cache too
1551 return
1551 return
1552
1552
1553 cache = getcache()
1553 cache = getcache()
1554 if not len(cache):
1554 if not len(cache):
1555 ui.write(_('cache empty\n'))
1555 ui.write(_('cache empty\n'))
1556 else:
1556 else:
1557 ui.write(
1557 ui.write(
1558 _('cache contains %d manifest entries, in order of most to '
1558 _('cache contains %d manifest entries, in order of most to '
1559 'least recent:\n') % (len(cache),))
1559 'least recent:\n') % (len(cache),))
1560 totalsize = 0
1560 totalsize = 0
1561 for nodeid in cache:
1561 for nodeid in cache:
1562 # Use cache.get to not update the LRU order
1562 # Use cache.get to not update the LRU order
1563 data = cache.peek(nodeid)
1563 data = cache.peek(nodeid)
1564 size = len(data)
1564 size = len(data)
1565 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1565 totalsize += size + 24 # 20 bytes nodeid, 4 bytes size
1566 ui.write(_('id: %s, size %s\n') % (
1566 ui.write(_('id: %s, size %s\n') % (
1567 hex(nodeid), util.bytecount(size)))
1567 hex(nodeid), util.bytecount(size)))
1568 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1568 ondisk = cache._opener.stat('manifestfulltextcache').st_size
1569 ui.write(
1569 ui.write(
1570 _('total cache data size %s, on-disk %s\n') % (
1570 _('total cache data size %s, on-disk %s\n') % (
1571 util.bytecount(totalsize), util.bytecount(ondisk))
1571 util.bytecount(totalsize), util.bytecount(ondisk))
1572 )
1572 )
1573
1573
1574 @command('debugmergestate', [], '')
1574 @command('debugmergestate', [], '')
1575 def debugmergestate(ui, repo, *args):
1575 def debugmergestate(ui, repo, *args):
1576 """print merge state
1576 """print merge state
1577
1577
1578 Use --verbose to print out information about whether v1 or v2 merge state
1578 Use --verbose to print out information about whether v1 or v2 merge state
1579 was chosen."""
1579 was chosen."""
1580 def _hashornull(h):
1580 def _hashornull(h):
1581 if h == nullhex:
1581 if h == nullhex:
1582 return 'null'
1582 return 'null'
1583 else:
1583 else:
1584 return h
1584 return h
1585
1585
1586 def printrecords(version):
1586 def printrecords(version):
1587 ui.write(('* version %d records\n') % version)
1587 ui.write(('* version %d records\n') % version)
1588 if version == 1:
1588 if version == 1:
1589 records = v1records
1589 records = v1records
1590 else:
1590 else:
1591 records = v2records
1591 records = v2records
1592
1592
1593 for rtype, record in records:
1593 for rtype, record in records:
1594 # pretty print some record types
1594 # pretty print some record types
1595 if rtype == 'L':
1595 if rtype == 'L':
1596 ui.write(('local: %s\n') % record)
1596 ui.write(('local: %s\n') % record)
1597 elif rtype == 'O':
1597 elif rtype == 'O':
1598 ui.write(('other: %s\n') % record)
1598 ui.write(('other: %s\n') % record)
1599 elif rtype == 'm':
1599 elif rtype == 'm':
1600 driver, mdstate = record.split('\0', 1)
1600 driver, mdstate = record.split('\0', 1)
1601 ui.write(('merge driver: %s (state "%s")\n')
1601 ui.write(('merge driver: %s (state "%s")\n')
1602 % (driver, mdstate))
1602 % (driver, mdstate))
1603 elif rtype in 'FDC':
1603 elif rtype in 'FDC':
1604 r = record.split('\0')
1604 r = record.split('\0')
1605 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1605 f, state, hash, lfile, afile, anode, ofile = r[0:7]
1606 if version == 1:
1606 if version == 1:
1607 onode = 'not stored in v1 format'
1607 onode = 'not stored in v1 format'
1608 flags = r[7]
1608 flags = r[7]
1609 else:
1609 else:
1610 onode, flags = r[7:9]
1610 onode, flags = r[7:9]
1611 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1611 ui.write(('file: %s (record type "%s", state "%s", hash %s)\n')
1612 % (f, rtype, state, _hashornull(hash)))
1612 % (f, rtype, state, _hashornull(hash)))
1613 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1613 ui.write((' local path: %s (flags "%s")\n') % (lfile, flags))
1614 ui.write((' ancestor path: %s (node %s)\n')
1614 ui.write((' ancestor path: %s (node %s)\n')
1615 % (afile, _hashornull(anode)))
1615 % (afile, _hashornull(anode)))
1616 ui.write((' other path: %s (node %s)\n')
1616 ui.write((' other path: %s (node %s)\n')
1617 % (ofile, _hashornull(onode)))
1617 % (ofile, _hashornull(onode)))
1618 elif rtype == 'f':
1618 elif rtype == 'f':
1619 filename, rawextras = record.split('\0', 1)
1619 filename, rawextras = record.split('\0', 1)
1620 extras = rawextras.split('\0')
1620 extras = rawextras.split('\0')
1621 i = 0
1621 i = 0
1622 extrastrings = []
1622 extrastrings = []
1623 while i < len(extras):
1623 while i < len(extras):
1624 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1624 extrastrings.append('%s = %s' % (extras[i], extras[i + 1]))
1625 i += 2
1625 i += 2
1626
1626
1627 ui.write(('file extras: %s (%s)\n')
1627 ui.write(('file extras: %s (%s)\n')
1628 % (filename, ', '.join(extrastrings)))
1628 % (filename, ', '.join(extrastrings)))
1629 elif rtype == 'l':
1629 elif rtype == 'l':
1630 labels = record.split('\0', 2)
1630 labels = record.split('\0', 2)
1631 labels = [l for l in labels if len(l) > 0]
1631 labels = [l for l in labels if len(l) > 0]
1632 ui.write(('labels:\n'))
1632 ui.write(('labels:\n'))
1633 ui.write((' local: %s\n' % labels[0]))
1633 ui.write((' local: %s\n' % labels[0]))
1634 ui.write((' other: %s\n' % labels[1]))
1634 ui.write((' other: %s\n' % labels[1]))
1635 if len(labels) > 2:
1635 if len(labels) > 2:
1636 ui.write((' base: %s\n' % labels[2]))
1636 ui.write((' base: %s\n' % labels[2]))
1637 else:
1637 else:
1638 ui.write(('unrecognized entry: %s\t%s\n')
1638 ui.write(('unrecognized entry: %s\t%s\n')
1639 % (rtype, record.replace('\0', '\t')))
1639 % (rtype, record.replace('\0', '\t')))
1640
1640
1641 # Avoid mergestate.read() since it may raise an exception for unsupported
1641 # Avoid mergestate.read() since it may raise an exception for unsupported
1642 # merge state records. We shouldn't be doing this, but this is OK since this
1642 # merge state records. We shouldn't be doing this, but this is OK since this
1643 # command is pretty low-level.
1643 # command is pretty low-level.
1644 ms = mergemod.mergestate(repo)
1644 ms = mergemod.mergestate(repo)
1645
1645
1646 # sort so that reasonable information is on top
1646 # sort so that reasonable information is on top
1647 v1records = ms._readrecordsv1()
1647 v1records = ms._readrecordsv1()
1648 v2records = ms._readrecordsv2()
1648 v2records = ms._readrecordsv2()
1649 order = 'LOml'
1649 order = 'LOml'
1650 def key(r):
1650 def key(r):
1651 idx = order.find(r[0])
1651 idx = order.find(r[0])
1652 if idx == -1:
1652 if idx == -1:
1653 return (1, r[1])
1653 return (1, r[1])
1654 else:
1654 else:
1655 return (0, idx)
1655 return (0, idx)
1656 v1records.sort(key=key)
1656 v1records.sort(key=key)
1657 v2records.sort(key=key)
1657 v2records.sort(key=key)
1658
1658
1659 if not v1records and not v2records:
1659 if not v1records and not v2records:
1660 ui.write(('no merge state found\n'))
1660 ui.write(('no merge state found\n'))
1661 elif not v2records:
1661 elif not v2records:
1662 ui.note(('no version 2 merge state\n'))
1662 ui.note(('no version 2 merge state\n'))
1663 printrecords(1)
1663 printrecords(1)
1664 elif ms._v1v2match(v1records, v2records):
1664 elif ms._v1v2match(v1records, v2records):
1665 ui.note(('v1 and v2 states match: using v2\n'))
1665 ui.note(('v1 and v2 states match: using v2\n'))
1666 printrecords(2)
1666 printrecords(2)
1667 else:
1667 else:
1668 ui.note(('v1 and v2 states mismatch: using v1\n'))
1668 ui.note(('v1 and v2 states mismatch: using v1\n'))
1669 printrecords(1)
1669 printrecords(1)
1670 if ui.verbose:
1670 if ui.verbose:
1671 printrecords(2)
1671 printrecords(2)
1672
1672
1673 @command('debugnamecomplete', [], _('NAME...'))
1673 @command('debugnamecomplete', [], _('NAME...'))
1674 def debugnamecomplete(ui, repo, *args):
1674 def debugnamecomplete(ui, repo, *args):
1675 '''complete "names" - tags, open branch names, bookmark names'''
1675 '''complete "names" - tags, open branch names, bookmark names'''
1676
1676
1677 names = set()
1677 names = set()
1678 # since we previously only listed open branches, we will handle that
1678 # since we previously only listed open branches, we will handle that
1679 # specially (after this for loop)
1679 # specially (after this for loop)
1680 for name, ns in repo.names.iteritems():
1680 for name, ns in repo.names.iteritems():
1681 if name != 'branches':
1681 if name != 'branches':
1682 names.update(ns.listnames(repo))
1682 names.update(ns.listnames(repo))
1683 names.update(tag for (tag, heads, tip, closed)
1683 names.update(tag for (tag, heads, tip, closed)
1684 in repo.branchmap().iterbranches() if not closed)
1684 in repo.branchmap().iterbranches() if not closed)
1685 completions = set()
1685 completions = set()
1686 if not args:
1686 if not args:
1687 args = ['']
1687 args = ['']
1688 for a in args:
1688 for a in args:
1689 completions.update(n for n in names if n.startswith(a))
1689 completions.update(n for n in names if n.startswith(a))
1690 ui.write('\n'.join(sorted(completions)))
1690 ui.write('\n'.join(sorted(completions)))
1691 ui.write('\n')
1691 ui.write('\n')
1692
1692
1693 @command('debugobsolete',
1693 @command('debugobsolete',
1694 [('', 'flags', 0, _('markers flag')),
1694 [('', 'flags', 0, _('markers flag')),
1695 ('', 'record-parents', False,
1695 ('', 'record-parents', False,
1696 _('record parent information for the precursor')),
1696 _('record parent information for the precursor')),
1697 ('r', 'rev', [], _('display markers relevant to REV')),
1697 ('r', 'rev', [], _('display markers relevant to REV')),
1698 ('', 'exclusive', False, _('restrict display to markers only '
1698 ('', 'exclusive', False, _('restrict display to markers only '
1699 'relevant to REV')),
1699 'relevant to REV')),
1700 ('', 'index', False, _('display index of the marker')),
1700 ('', 'index', False, _('display index of the marker')),
1701 ('', 'delete', [], _('delete markers specified by indices')),
1701 ('', 'delete', [], _('delete markers specified by indices')),
1702 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1702 ] + cmdutil.commitopts2 + cmdutil.formatteropts,
1703 _('[OBSOLETED [REPLACEMENT ...]]'))
1703 _('[OBSOLETED [REPLACEMENT ...]]'))
1704 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1704 def debugobsolete(ui, repo, precursor=None, *successors, **opts):
1705 """create arbitrary obsolete marker
1705 """create arbitrary obsolete marker
1706
1706
1707 With no arguments, displays the list of obsolescence markers."""
1707 With no arguments, displays the list of obsolescence markers."""
1708
1708
1709 opts = pycompat.byteskwargs(opts)
1709 opts = pycompat.byteskwargs(opts)
1710
1710
1711 def parsenodeid(s):
1711 def parsenodeid(s):
1712 try:
1712 try:
1713 # We do not use revsingle/revrange functions here to accept
1713 # We do not use revsingle/revrange functions here to accept
1714 # arbitrary node identifiers, possibly not present in the
1714 # arbitrary node identifiers, possibly not present in the
1715 # local repository.
1715 # local repository.
1716 n = bin(s)
1716 n = bin(s)
1717 if len(n) != len(nullid):
1717 if len(n) != len(nullid):
1718 raise TypeError()
1718 raise TypeError()
1719 return n
1719 return n
1720 except TypeError:
1720 except TypeError:
1721 raise error.Abort('changeset references must be full hexadecimal '
1721 raise error.Abort('changeset references must be full hexadecimal '
1722 'node identifiers')
1722 'node identifiers')
1723
1723
1724 if opts.get('delete'):
1724 if opts.get('delete'):
1725 indices = []
1725 indices = []
1726 for v in opts.get('delete'):
1726 for v in opts.get('delete'):
1727 try:
1727 try:
1728 indices.append(int(v))
1728 indices.append(int(v))
1729 except ValueError:
1729 except ValueError:
1730 raise error.Abort(_('invalid index value: %r') % v,
1730 raise error.Abort(_('invalid index value: %r') % v,
1731 hint=_('use integers for indices'))
1731 hint=_('use integers for indices'))
1732
1732
1733 if repo.currenttransaction():
1733 if repo.currenttransaction():
1734 raise error.Abort(_('cannot delete obsmarkers in the middle '
1734 raise error.Abort(_('cannot delete obsmarkers in the middle '
1735 'of transaction.'))
1735 'of transaction.'))
1736
1736
1737 with repo.lock():
1737 with repo.lock():
1738 n = repair.deleteobsmarkers(repo.obsstore, indices)
1738 n = repair.deleteobsmarkers(repo.obsstore, indices)
1739 ui.write(_('deleted %i obsolescence markers\n') % n)
1739 ui.write(_('deleted %i obsolescence markers\n') % n)
1740
1740
1741 return
1741 return
1742
1742
1743 if precursor is not None:
1743 if precursor is not None:
1744 if opts['rev']:
1744 if opts['rev']:
1745 raise error.Abort('cannot select revision when creating marker')
1745 raise error.Abort('cannot select revision when creating marker')
1746 metadata = {}
1746 metadata = {}
1747 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1747 metadata['user'] = encoding.fromlocal(opts['user'] or ui.username())
1748 succs = tuple(parsenodeid(succ) for succ in successors)
1748 succs = tuple(parsenodeid(succ) for succ in successors)
1749 l = repo.lock()
1749 l = repo.lock()
1750 try:
1750 try:
1751 tr = repo.transaction('debugobsolete')
1751 tr = repo.transaction('debugobsolete')
1752 try:
1752 try:
1753 date = opts.get('date')
1753 date = opts.get('date')
1754 if date:
1754 if date:
1755 date = dateutil.parsedate(date)
1755 date = dateutil.parsedate(date)
1756 else:
1756 else:
1757 date = None
1757 date = None
1758 prec = parsenodeid(precursor)
1758 prec = parsenodeid(precursor)
1759 parents = None
1759 parents = None
1760 if opts['record_parents']:
1760 if opts['record_parents']:
1761 if prec not in repo.unfiltered():
1761 if prec not in repo.unfiltered():
1762 raise error.Abort('cannot used --record-parents on '
1762 raise error.Abort('cannot used --record-parents on '
1763 'unknown changesets')
1763 'unknown changesets')
1764 parents = repo.unfiltered()[prec].parents()
1764 parents = repo.unfiltered()[prec].parents()
1765 parents = tuple(p.node() for p in parents)
1765 parents = tuple(p.node() for p in parents)
1766 repo.obsstore.create(tr, prec, succs, opts['flags'],
1766 repo.obsstore.create(tr, prec, succs, opts['flags'],
1767 parents=parents, date=date,
1767 parents=parents, date=date,
1768 metadata=metadata, ui=ui)
1768 metadata=metadata, ui=ui)
1769 tr.close()
1769 tr.close()
1770 except ValueError as exc:
1770 except ValueError as exc:
1771 raise error.Abort(_('bad obsmarker input: %s') %
1771 raise error.Abort(_('bad obsmarker input: %s') %
1772 pycompat.bytestr(exc))
1772 pycompat.bytestr(exc))
1773 finally:
1773 finally:
1774 tr.release()
1774 tr.release()
1775 finally:
1775 finally:
1776 l.release()
1776 l.release()
1777 else:
1777 else:
1778 if opts['rev']:
1778 if opts['rev']:
1779 revs = scmutil.revrange(repo, opts['rev'])
1779 revs = scmutil.revrange(repo, opts['rev'])
1780 nodes = [repo[r].node() for r in revs]
1780 nodes = [repo[r].node() for r in revs]
1781 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1781 markers = list(obsutil.getmarkers(repo, nodes=nodes,
1782 exclusive=opts['exclusive']))
1782 exclusive=opts['exclusive']))
1783 markers.sort(key=lambda x: x._data)
1783 markers.sort(key=lambda x: x._data)
1784 else:
1784 else:
1785 markers = obsutil.getmarkers(repo)
1785 markers = obsutil.getmarkers(repo)
1786
1786
1787 markerstoiter = markers
1787 markerstoiter = markers
1788 isrelevant = lambda m: True
1788 isrelevant = lambda m: True
1789 if opts.get('rev') and opts.get('index'):
1789 if opts.get('rev') and opts.get('index'):
1790 markerstoiter = obsutil.getmarkers(repo)
1790 markerstoiter = obsutil.getmarkers(repo)
1791 markerset = set(markers)
1791 markerset = set(markers)
1792 isrelevant = lambda m: m in markerset
1792 isrelevant = lambda m: m in markerset
1793
1793
1794 fm = ui.formatter('debugobsolete', opts)
1794 fm = ui.formatter('debugobsolete', opts)
1795 for i, m in enumerate(markerstoiter):
1795 for i, m in enumerate(markerstoiter):
1796 if not isrelevant(m):
1796 if not isrelevant(m):
1797 # marker can be irrelevant when we're iterating over a set
1797 # marker can be irrelevant when we're iterating over a set
1798 # of markers (markerstoiter) which is bigger than the set
1798 # of markers (markerstoiter) which is bigger than the set
1799 # of markers we want to display (markers)
1799 # of markers we want to display (markers)
1800 # this can happen if both --index and --rev options are
1800 # this can happen if both --index and --rev options are
1801 # provided and thus we need to iterate over all of the markers
1801 # provided and thus we need to iterate over all of the markers
1802 # to get the correct indices, but only display the ones that
1802 # to get the correct indices, but only display the ones that
1803 # are relevant to --rev value
1803 # are relevant to --rev value
1804 continue
1804 continue
1805 fm.startitem()
1805 fm.startitem()
1806 ind = i if opts.get('index') else None
1806 ind = i if opts.get('index') else None
1807 cmdutil.showmarker(fm, m, index=ind)
1807 cmdutil.showmarker(fm, m, index=ind)
1808 fm.end()
1808 fm.end()
1809
1809
1810 @command('debugp1copies',
1810 @command('debugp1copies',
1811 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1811 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1812 _('[-r REV]'))
1812 _('[-r REV]'))
1813 def debugp1copies(ui, repo, **opts):
1813 def debugp1copies(ui, repo, **opts):
1814 """dump copy information compared to p1"""
1814 """dump copy information compared to p1"""
1815
1815
1816 opts = pycompat.byteskwargs(opts)
1816 opts = pycompat.byteskwargs(opts)
1817 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1817 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1818 for dst, src in ctx.p1copies().items():
1818 for dst, src in ctx.p1copies().items():
1819 ui.write('%s -> %s\n' % (src, dst))
1819 ui.write('%s -> %s\n' % (src, dst))
1820
1820
1821 @command('debugp2copies',
1821 @command('debugp2copies',
1822 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1822 [('r', 'rev', '', _('revision to debug'), _('REV'))],
1823 _('[-r REV]'))
1823 _('[-r REV]'))
1824 def debugp1copies(ui, repo, **opts):
1824 def debugp1copies(ui, repo, **opts):
1825 """dump copy information compared to p2"""
1825 """dump copy information compared to p2"""
1826
1826
1827 opts = pycompat.byteskwargs(opts)
1827 opts = pycompat.byteskwargs(opts)
1828 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1828 ctx = scmutil.revsingle(repo, opts.get('rev'), default=None)
1829 for dst, src in ctx.p2copies().items():
1829 for dst, src in ctx.p2copies().items():
1830 ui.write('%s -> %s\n' % (src, dst))
1830 ui.write('%s -> %s\n' % (src, dst))
1831
1831
1832 @command('debugpathcomplete',
1832 @command('debugpathcomplete',
1833 [('f', 'full', None, _('complete an entire path')),
1833 [('f', 'full', None, _('complete an entire path')),
1834 ('n', 'normal', None, _('show only normal files')),
1834 ('n', 'normal', None, _('show only normal files')),
1835 ('a', 'added', None, _('show only added files')),
1835 ('a', 'added', None, _('show only added files')),
1836 ('r', 'removed', None, _('show only removed files'))],
1836 ('r', 'removed', None, _('show only removed files'))],
1837 _('FILESPEC...'))
1837 _('FILESPEC...'))
1838 def debugpathcomplete(ui, repo, *specs, **opts):
1838 def debugpathcomplete(ui, repo, *specs, **opts):
1839 '''complete part or all of a tracked path
1839 '''complete part or all of a tracked path
1840
1840
1841 This command supports shells that offer path name completion. It
1841 This command supports shells that offer path name completion. It
1842 currently completes only files already known to the dirstate.
1842 currently completes only files already known to the dirstate.
1843
1843
1844 Completion extends only to the next path segment unless
1844 Completion extends only to the next path segment unless
1845 --full is specified, in which case entire paths are used.'''
1845 --full is specified, in which case entire paths are used.'''
1846
1846
1847 def complete(path, acceptable):
1847 def complete(path, acceptable):
1848 dirstate = repo.dirstate
1848 dirstate = repo.dirstate
1849 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1849 spec = os.path.normpath(os.path.join(encoding.getcwd(), path))
1850 rootdir = repo.root + pycompat.ossep
1850 rootdir = repo.root + pycompat.ossep
1851 if spec != repo.root and not spec.startswith(rootdir):
1851 if spec != repo.root and not spec.startswith(rootdir):
1852 return [], []
1852 return [], []
1853 if os.path.isdir(spec):
1853 if os.path.isdir(spec):
1854 spec += '/'
1854 spec += '/'
1855 spec = spec[len(rootdir):]
1855 spec = spec[len(rootdir):]
1856 fixpaths = pycompat.ossep != '/'
1856 fixpaths = pycompat.ossep != '/'
1857 if fixpaths:
1857 if fixpaths:
1858 spec = spec.replace(pycompat.ossep, '/')
1858 spec = spec.replace(pycompat.ossep, '/')
1859 speclen = len(spec)
1859 speclen = len(spec)
1860 fullpaths = opts[r'full']
1860 fullpaths = opts[r'full']
1861 files, dirs = set(), set()
1861 files, dirs = set(), set()
1862 adddir, addfile = dirs.add, files.add
1862 adddir, addfile = dirs.add, files.add
1863 for f, st in dirstate.iteritems():
1863 for f, st in dirstate.iteritems():
1864 if f.startswith(spec) and st[0] in acceptable:
1864 if f.startswith(spec) and st[0] in acceptable:
1865 if fixpaths:
1865 if fixpaths:
1866 f = f.replace('/', pycompat.ossep)
1866 f = f.replace('/', pycompat.ossep)
1867 if fullpaths:
1867 if fullpaths:
1868 addfile(f)
1868 addfile(f)
1869 continue
1869 continue
1870 s = f.find(pycompat.ossep, speclen)
1870 s = f.find(pycompat.ossep, speclen)
1871 if s >= 0:
1871 if s >= 0:
1872 adddir(f[:s])
1872 adddir(f[:s])
1873 else:
1873 else:
1874 addfile(f)
1874 addfile(f)
1875 return files, dirs
1875 return files, dirs
1876
1876
1877 acceptable = ''
1877 acceptable = ''
1878 if opts[r'normal']:
1878 if opts[r'normal']:
1879 acceptable += 'nm'
1879 acceptable += 'nm'
1880 if opts[r'added']:
1880 if opts[r'added']:
1881 acceptable += 'a'
1881 acceptable += 'a'
1882 if opts[r'removed']:
1882 if opts[r'removed']:
1883 acceptable += 'r'
1883 acceptable += 'r'
1884 cwd = repo.getcwd()
1884 cwd = repo.getcwd()
1885 if not specs:
1885 if not specs:
1886 specs = ['.']
1886 specs = ['.']
1887
1887
1888 files, dirs = set(), set()
1888 files, dirs = set(), set()
1889 for spec in specs:
1889 for spec in specs:
1890 f, d = complete(spec, acceptable or 'nmar')
1890 f, d = complete(spec, acceptable or 'nmar')
1891 files.update(f)
1891 files.update(f)
1892 dirs.update(d)
1892 dirs.update(d)
1893 files.update(dirs)
1893 files.update(dirs)
1894 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1894 ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files)))
1895 ui.write('\n')
1895 ui.write('\n')
1896
1896
1897 @command('debugpathcopies',
1897 @command('debugpathcopies',
1898 cmdutil.walkopts,
1898 cmdutil.walkopts,
1899 'hg debugpathcopies REV1 REV2 [FILE]',
1899 'hg debugpathcopies REV1 REV2 [FILE]',
1900 inferrepo=True)
1900 inferrepo=True)
1901 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1901 def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts):
1902 """show copies between two revisions"""
1902 """show copies between two revisions"""
1903 ctx1 = scmutil.revsingle(repo, rev1)
1903 ctx1 = scmutil.revsingle(repo, rev1)
1904 ctx2 = scmutil.revsingle(repo, rev2)
1904 ctx2 = scmutil.revsingle(repo, rev2)
1905 m = scmutil.match(ctx1, pats, opts)
1905 m = scmutil.match(ctx1, pats, opts)
1906 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1906 for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()):
1907 ui.write('%s -> %s\n' % (src, dst))
1907 ui.write('%s -> %s\n' % (src, dst))
1908
1908
1909 @command('debugpeer', [], _('PATH'), norepo=True)
1909 @command('debugpeer', [], _('PATH'), norepo=True)
1910 def debugpeer(ui, path):
1910 def debugpeer(ui, path):
1911 """establish a connection to a peer repository"""
1911 """establish a connection to a peer repository"""
1912 # Always enable peer request logging. Requires --debug to display
1912 # Always enable peer request logging. Requires --debug to display
1913 # though.
1913 # though.
1914 overrides = {
1914 overrides = {
1915 ('devel', 'debug.peer-request'): True,
1915 ('devel', 'debug.peer-request'): True,
1916 }
1916 }
1917
1917
1918 with ui.configoverride(overrides):
1918 with ui.configoverride(overrides):
1919 peer = hg.peer(ui, {}, path)
1919 peer = hg.peer(ui, {}, path)
1920
1920
1921 local = peer.local() is not None
1921 local = peer.local() is not None
1922 canpush = peer.canpush()
1922 canpush = peer.canpush()
1923
1923
1924 ui.write(_('url: %s\n') % peer.url())
1924 ui.write(_('url: %s\n') % peer.url())
1925 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1925 ui.write(_('local: %s\n') % (_('yes') if local else _('no')))
1926 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1926 ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no')))
1927
1927
1928 @command('debugpickmergetool',
1928 @command('debugpickmergetool',
1929 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1929 [('r', 'rev', '', _('check for files in this revision'), _('REV')),
1930 ('', 'changedelete', None, _('emulate merging change and delete')),
1930 ('', 'changedelete', None, _('emulate merging change and delete')),
1931 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1931 ] + cmdutil.walkopts + cmdutil.mergetoolopts,
1932 _('[PATTERN]...'),
1932 _('[PATTERN]...'),
1933 inferrepo=True)
1933 inferrepo=True)
1934 def debugpickmergetool(ui, repo, *pats, **opts):
1934 def debugpickmergetool(ui, repo, *pats, **opts):
1935 """examine which merge tool is chosen for specified file
1935 """examine which merge tool is chosen for specified file
1936
1936
1937 As described in :hg:`help merge-tools`, Mercurial examines
1937 As described in :hg:`help merge-tools`, Mercurial examines
1938 configurations below in this order to decide which merge tool is
1938 configurations below in this order to decide which merge tool is
1939 chosen for specified file.
1939 chosen for specified file.
1940
1940
1941 1. ``--tool`` option
1941 1. ``--tool`` option
1942 2. ``HGMERGE`` environment variable
1942 2. ``HGMERGE`` environment variable
1943 3. configurations in ``merge-patterns`` section
1943 3. configurations in ``merge-patterns`` section
1944 4. configuration of ``ui.merge``
1944 4. configuration of ``ui.merge``
1945 5. configurations in ``merge-tools`` section
1945 5. configurations in ``merge-tools`` section
1946 6. ``hgmerge`` tool (for historical reason only)
1946 6. ``hgmerge`` tool (for historical reason only)
1947 7. default tool for fallback (``:merge`` or ``:prompt``)
1947 7. default tool for fallback (``:merge`` or ``:prompt``)
1948
1948
1949 This command writes out examination result in the style below::
1949 This command writes out examination result in the style below::
1950
1950
1951 FILE = MERGETOOL
1951 FILE = MERGETOOL
1952
1952
1953 By default, all files known in the first parent context of the
1953 By default, all files known in the first parent context of the
1954 working directory are examined. Use file patterns and/or -I/-X
1954 working directory are examined. Use file patterns and/or -I/-X
1955 options to limit target files. -r/--rev is also useful to examine
1955 options to limit target files. -r/--rev is also useful to examine
1956 files in another context without actual updating to it.
1956 files in another context without actual updating to it.
1957
1957
1958 With --debug, this command shows warning messages while matching
1958 With --debug, this command shows warning messages while matching
1959 against ``merge-patterns`` and so on, too. It is recommended to
1959 against ``merge-patterns`` and so on, too. It is recommended to
1960 use this option with explicit file patterns and/or -I/-X options,
1960 use this option with explicit file patterns and/or -I/-X options,
1961 because this option increases amount of output per file according
1961 because this option increases amount of output per file according
1962 to configurations in hgrc.
1962 to configurations in hgrc.
1963
1963
1964 With -v/--verbose, this command shows configurations below at
1964 With -v/--verbose, this command shows configurations below at
1965 first (only if specified).
1965 first (only if specified).
1966
1966
1967 - ``--tool`` option
1967 - ``--tool`` option
1968 - ``HGMERGE`` environment variable
1968 - ``HGMERGE`` environment variable
1969 - configuration of ``ui.merge``
1969 - configuration of ``ui.merge``
1970
1970
1971 If merge tool is chosen before matching against
1971 If merge tool is chosen before matching against
1972 ``merge-patterns``, this command can't show any helpful
1972 ``merge-patterns``, this command can't show any helpful
1973 information, even with --debug. In such case, information above is
1973 information, even with --debug. In such case, information above is
1974 useful to know why a merge tool is chosen.
1974 useful to know why a merge tool is chosen.
1975 """
1975 """
1976 opts = pycompat.byteskwargs(opts)
1976 opts = pycompat.byteskwargs(opts)
1977 overrides = {}
1977 overrides = {}
1978 if opts['tool']:
1978 if opts['tool']:
1979 overrides[('ui', 'forcemerge')] = opts['tool']
1979 overrides[('ui', 'forcemerge')] = opts['tool']
1980 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1980 ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool'])))
1981
1981
1982 with ui.configoverride(overrides, 'debugmergepatterns'):
1982 with ui.configoverride(overrides, 'debugmergepatterns'):
1983 hgmerge = encoding.environ.get("HGMERGE")
1983 hgmerge = encoding.environ.get("HGMERGE")
1984 if hgmerge is not None:
1984 if hgmerge is not None:
1985 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1985 ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge)))
1986 uimerge = ui.config("ui", "merge")
1986 uimerge = ui.config("ui", "merge")
1987 if uimerge:
1987 if uimerge:
1988 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1988 ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge)))
1989
1989
1990 ctx = scmutil.revsingle(repo, opts.get('rev'))
1990 ctx = scmutil.revsingle(repo, opts.get('rev'))
1991 m = scmutil.match(ctx, pats, opts)
1991 m = scmutil.match(ctx, pats, opts)
1992 changedelete = opts['changedelete']
1992 changedelete = opts['changedelete']
1993 for path in ctx.walk(m):
1993 for path in ctx.walk(m):
1994 fctx = ctx[path]
1994 fctx = ctx[path]
1995 try:
1995 try:
1996 if not ui.debugflag:
1996 if not ui.debugflag:
1997 ui.pushbuffer(error=True)
1997 ui.pushbuffer(error=True)
1998 tool, toolpath = filemerge._picktool(repo, ui, path,
1998 tool, toolpath = filemerge._picktool(repo, ui, path,
1999 fctx.isbinary(),
1999 fctx.isbinary(),
2000 'l' in fctx.flags(),
2000 'l' in fctx.flags(),
2001 changedelete)
2001 changedelete)
2002 finally:
2002 finally:
2003 if not ui.debugflag:
2003 if not ui.debugflag:
2004 ui.popbuffer()
2004 ui.popbuffer()
2005 ui.write(('%s = %s\n') % (path, tool))
2005 ui.write(('%s = %s\n') % (path, tool))
2006
2006
2007 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2007 @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True)
2008 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2008 def debugpushkey(ui, repopath, namespace, *keyinfo, **opts):
2009 '''access the pushkey key/value protocol
2009 '''access the pushkey key/value protocol
2010
2010
2011 With two args, list the keys in the given namespace.
2011 With two args, list the keys in the given namespace.
2012
2012
2013 With five args, set a key to new if it currently is set to old.
2013 With five args, set a key to new if it currently is set to old.
2014 Reports success or failure.
2014 Reports success or failure.
2015 '''
2015 '''
2016
2016
2017 target = hg.peer(ui, {}, repopath)
2017 target = hg.peer(ui, {}, repopath)
2018 if keyinfo:
2018 if keyinfo:
2019 key, old, new = keyinfo
2019 key, old, new = keyinfo
2020 with target.commandexecutor() as e:
2020 with target.commandexecutor() as e:
2021 r = e.callcommand('pushkey', {
2021 r = e.callcommand('pushkey', {
2022 'namespace': namespace,
2022 'namespace': namespace,
2023 'key': key,
2023 'key': key,
2024 'old': old,
2024 'old': old,
2025 'new': new,
2025 'new': new,
2026 }).result()
2026 }).result()
2027
2027
2028 ui.status(pycompat.bytestr(r) + '\n')
2028 ui.status(pycompat.bytestr(r) + '\n')
2029 return not r
2029 return not r
2030 else:
2030 else:
2031 for k, v in sorted(target.listkeys(namespace).iteritems()):
2031 for k, v in sorted(target.listkeys(namespace).iteritems()):
2032 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
2032 ui.write("%s\t%s\n" % (stringutil.escapestr(k),
2033 stringutil.escapestr(v)))
2033 stringutil.escapestr(v)))
2034
2034
2035 @command('debugpvec', [], _('A B'))
2035 @command('debugpvec', [], _('A B'))
2036 def debugpvec(ui, repo, a, b=None):
2036 def debugpvec(ui, repo, a, b=None):
2037 ca = scmutil.revsingle(repo, a)
2037 ca = scmutil.revsingle(repo, a)
2038 cb = scmutil.revsingle(repo, b)
2038 cb = scmutil.revsingle(repo, b)
2039 pa = pvec.ctxpvec(ca)
2039 pa = pvec.ctxpvec(ca)
2040 pb = pvec.ctxpvec(cb)
2040 pb = pvec.ctxpvec(cb)
2041 if pa == pb:
2041 if pa == pb:
2042 rel = "="
2042 rel = "="
2043 elif pa > pb:
2043 elif pa > pb:
2044 rel = ">"
2044 rel = ">"
2045 elif pa < pb:
2045 elif pa < pb:
2046 rel = "<"
2046 rel = "<"
2047 elif pa | pb:
2047 elif pa | pb:
2048 rel = "|"
2048 rel = "|"
2049 ui.write(_("a: %s\n") % pa)
2049 ui.write(_("a: %s\n") % pa)
2050 ui.write(_("b: %s\n") % pb)
2050 ui.write(_("b: %s\n") % pb)
2051 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2051 ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth))
2052 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2052 ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") %
2053 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2053 (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec),
2054 pa.distance(pb), rel))
2054 pa.distance(pb), rel))
2055
2055
2056 @command('debugrebuilddirstate|debugrebuildstate',
2056 @command('debugrebuilddirstate|debugrebuildstate',
2057 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2057 [('r', 'rev', '', _('revision to rebuild to'), _('REV')),
2058 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2058 ('', 'minimal', None, _('only rebuild files that are inconsistent with '
2059 'the working copy parent')),
2059 'the working copy parent')),
2060 ],
2060 ],
2061 _('[-r REV]'))
2061 _('[-r REV]'))
2062 def debugrebuilddirstate(ui, repo, rev, **opts):
2062 def debugrebuilddirstate(ui, repo, rev, **opts):
2063 """rebuild the dirstate as it would look like for the given revision
2063 """rebuild the dirstate as it would look like for the given revision
2064
2064
2065 If no revision is specified the first current parent will be used.
2065 If no revision is specified the first current parent will be used.
2066
2066
2067 The dirstate will be set to the files of the given revision.
2067 The dirstate will be set to the files of the given revision.
2068 The actual working directory content or existing dirstate
2068 The actual working directory content or existing dirstate
2069 information such as adds or removes is not considered.
2069 information such as adds or removes is not considered.
2070
2070
2071 ``minimal`` will only rebuild the dirstate status for files that claim to be
2071 ``minimal`` will only rebuild the dirstate status for files that claim to be
2072 tracked but are not in the parent manifest, or that exist in the parent
2072 tracked but are not in the parent manifest, or that exist in the parent
2073 manifest but are not in the dirstate. It will not change adds, removes, or
2073 manifest but are not in the dirstate. It will not change adds, removes, or
2074 modified files that are in the working copy parent.
2074 modified files that are in the working copy parent.
2075
2075
2076 One use of this command is to make the next :hg:`status` invocation
2076 One use of this command is to make the next :hg:`status` invocation
2077 check the actual file content.
2077 check the actual file content.
2078 """
2078 """
2079 ctx = scmutil.revsingle(repo, rev)
2079 ctx = scmutil.revsingle(repo, rev)
2080 with repo.wlock():
2080 with repo.wlock():
2081 dirstate = repo.dirstate
2081 dirstate = repo.dirstate
2082 changedfiles = None
2082 changedfiles = None
2083 # See command doc for what minimal does.
2083 # See command doc for what minimal does.
2084 if opts.get(r'minimal'):
2084 if opts.get(r'minimal'):
2085 manifestfiles = set(ctx.manifest().keys())
2085 manifestfiles = set(ctx.manifest().keys())
2086 dirstatefiles = set(dirstate)
2086 dirstatefiles = set(dirstate)
2087 manifestonly = manifestfiles - dirstatefiles
2087 manifestonly = manifestfiles - dirstatefiles
2088 dsonly = dirstatefiles - manifestfiles
2088 dsonly = dirstatefiles - manifestfiles
2089 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2089 dsnotadded = set(f for f in dsonly if dirstate[f] != 'a')
2090 changedfiles = manifestonly | dsnotadded
2090 changedfiles = manifestonly | dsnotadded
2091
2091
2092 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2092 dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles)
2093
2093
2094 @command('debugrebuildfncache', [], '')
2094 @command('debugrebuildfncache', [], '')
2095 def debugrebuildfncache(ui, repo):
2095 def debugrebuildfncache(ui, repo):
2096 """rebuild the fncache file"""
2096 """rebuild the fncache file"""
2097 repair.rebuildfncache(ui, repo)
2097 repair.rebuildfncache(ui, repo)
2098
2098
2099 @command('debugrename',
2099 @command('debugrename',
2100 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2100 [('r', 'rev', '', _('revision to debug'), _('REV'))],
2101 _('[-r REV] [FILE]...'))
2101 _('[-r REV] [FILE]...'))
2102 def debugrename(ui, repo, *pats, **opts):
2102 def debugrename(ui, repo, *pats, **opts):
2103 """dump rename information"""
2103 """dump rename information"""
2104
2104
2105 opts = pycompat.byteskwargs(opts)
2105 opts = pycompat.byteskwargs(opts)
2106 ctx = scmutil.revsingle(repo, opts.get('rev'))
2106 ctx = scmutil.revsingle(repo, opts.get('rev'))
2107 m = scmutil.match(ctx, pats, opts)
2107 m = scmutil.match(ctx, pats, opts)
2108 for abs in ctx.walk(m):
2108 for abs in ctx.walk(m):
2109 fctx = ctx[abs]
2109 fctx = ctx[abs]
2110 o = fctx.filelog().renamed(fctx.filenode())
2110 o = fctx.filelog().renamed(fctx.filenode())
2111 rel = repo.pathto(abs)
2111 rel = repo.pathto(abs)
2112 if o:
2112 if o:
2113 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2113 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
2114 else:
2114 else:
2115 ui.write(_("%s not renamed\n") % rel)
2115 ui.write(_("%s not renamed\n") % rel)
2116
2116
2117 @command('debugrevlog', cmdutil.debugrevlogopts +
2117 @command('debugrevlog', cmdutil.debugrevlogopts +
2118 [('d', 'dump', False, _('dump index data'))],
2118 [('d', 'dump', False, _('dump index data'))],
2119 _('-c|-m|FILE'),
2119 _('-c|-m|FILE'),
2120 optionalrepo=True)
2120 optionalrepo=True)
2121 def debugrevlog(ui, repo, file_=None, **opts):
2121 def debugrevlog(ui, repo, file_=None, **opts):
2122 """show data and statistics about a revlog"""
2122 """show data and statistics about a revlog"""
2123 opts = pycompat.byteskwargs(opts)
2123 opts = pycompat.byteskwargs(opts)
2124 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2124 r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts)
2125
2125
2126 if opts.get("dump"):
2126 if opts.get("dump"):
2127 numrevs = len(r)
2127 numrevs = len(r)
2128 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2128 ui.write(("# rev p1rev p2rev start end deltastart base p1 p2"
2129 " rawsize totalsize compression heads chainlen\n"))
2129 " rawsize totalsize compression heads chainlen\n"))
2130 ts = 0
2130 ts = 0
2131 heads = set()
2131 heads = set()
2132
2132
2133 for rev in pycompat.xrange(numrevs):
2133 for rev in pycompat.xrange(numrevs):
2134 dbase = r.deltaparent(rev)
2134 dbase = r.deltaparent(rev)
2135 if dbase == -1:
2135 if dbase == -1:
2136 dbase = rev
2136 dbase = rev
2137 cbase = r.chainbase(rev)
2137 cbase = r.chainbase(rev)
2138 clen = r.chainlen(rev)
2138 clen = r.chainlen(rev)
2139 p1, p2 = r.parentrevs(rev)
2139 p1, p2 = r.parentrevs(rev)
2140 rs = r.rawsize(rev)
2140 rs = r.rawsize(rev)
2141 ts = ts + rs
2141 ts = ts + rs
2142 heads -= set(r.parentrevs(rev))
2142 heads -= set(r.parentrevs(rev))
2143 heads.add(rev)
2143 heads.add(rev)
2144 try:
2144 try:
2145 compression = ts / r.end(rev)
2145 compression = ts / r.end(rev)
2146 except ZeroDivisionError:
2146 except ZeroDivisionError:
2147 compression = 0
2147 compression = 0
2148 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2148 ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d "
2149 "%11d %5d %8d\n" %
2149 "%11d %5d %8d\n" %
2150 (rev, p1, p2, r.start(rev), r.end(rev),
2150 (rev, p1, p2, r.start(rev), r.end(rev),
2151 r.start(dbase), r.start(cbase),
2151 r.start(dbase), r.start(cbase),
2152 r.start(p1), r.start(p2),
2152 r.start(p1), r.start(p2),
2153 rs, ts, compression, len(heads), clen))
2153 rs, ts, compression, len(heads), clen))
2154 return 0
2154 return 0
2155
2155
2156 v = r.version
2156 v = r.version
2157 format = v & 0xFFFF
2157 format = v & 0xFFFF
2158 flags = []
2158 flags = []
2159 gdelta = False
2159 gdelta = False
2160 if v & revlog.FLAG_INLINE_DATA:
2160 if v & revlog.FLAG_INLINE_DATA:
2161 flags.append('inline')
2161 flags.append('inline')
2162 if v & revlog.FLAG_GENERALDELTA:
2162 if v & revlog.FLAG_GENERALDELTA:
2163 gdelta = True
2163 gdelta = True
2164 flags.append('generaldelta')
2164 flags.append('generaldelta')
2165 if not flags:
2165 if not flags:
2166 flags = ['(none)']
2166 flags = ['(none)']
2167
2167
2168 ### tracks merge vs single parent
2168 ### tracks merge vs single parent
2169 nummerges = 0
2169 nummerges = 0
2170
2170
2171 ### tracks ways the "delta" are build
2171 ### tracks ways the "delta" are build
2172 # nodelta
2172 # nodelta
2173 numempty = 0
2173 numempty = 0
2174 numemptytext = 0
2174 numemptytext = 0
2175 numemptydelta = 0
2175 numemptydelta = 0
2176 # full file content
2176 # full file content
2177 numfull = 0
2177 numfull = 0
2178 # intermediate snapshot against a prior snapshot
2178 # intermediate snapshot against a prior snapshot
2179 numsemi = 0
2179 numsemi = 0
2180 # snapshot count per depth
2180 # snapshot count per depth
2181 numsnapdepth = collections.defaultdict(lambda: 0)
2181 numsnapdepth = collections.defaultdict(lambda: 0)
2182 # delta against previous revision
2182 # delta against previous revision
2183 numprev = 0
2183 numprev = 0
2184 # delta against first or second parent (not prev)
2184 # delta against first or second parent (not prev)
2185 nump1 = 0
2185 nump1 = 0
2186 nump2 = 0
2186 nump2 = 0
2187 # delta against neither prev nor parents
2187 # delta against neither prev nor parents
2188 numother = 0
2188 numother = 0
2189 # delta against prev that are also first or second parent
2189 # delta against prev that are also first or second parent
2190 # (details of `numprev`)
2190 # (details of `numprev`)
2191 nump1prev = 0
2191 nump1prev = 0
2192 nump2prev = 0
2192 nump2prev = 0
2193
2193
2194 # data about delta chain of each revs
2194 # data about delta chain of each revs
2195 chainlengths = []
2195 chainlengths = []
2196 chainbases = []
2196 chainbases = []
2197 chainspans = []
2197 chainspans = []
2198
2198
2199 # data about each revision
2199 # data about each revision
2200 datasize = [None, 0, 0]
2200 datasize = [None, 0, 0]
2201 fullsize = [None, 0, 0]
2201 fullsize = [None, 0, 0]
2202 semisize = [None, 0, 0]
2202 semisize = [None, 0, 0]
2203 # snapshot count per depth
2203 # snapshot count per depth
2204 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2204 snapsizedepth = collections.defaultdict(lambda: [None, 0, 0])
2205 deltasize = [None, 0, 0]
2205 deltasize = [None, 0, 0]
2206 chunktypecounts = {}
2206 chunktypecounts = {}
2207 chunktypesizes = {}
2207 chunktypesizes = {}
2208
2208
2209 def addsize(size, l):
2209 def addsize(size, l):
2210 if l[0] is None or size < l[0]:
2210 if l[0] is None or size < l[0]:
2211 l[0] = size
2211 l[0] = size
2212 if size > l[1]:
2212 if size > l[1]:
2213 l[1] = size
2213 l[1] = size
2214 l[2] += size
2214 l[2] += size
2215
2215
2216 numrevs = len(r)
2216 numrevs = len(r)
2217 for rev in pycompat.xrange(numrevs):
2217 for rev in pycompat.xrange(numrevs):
2218 p1, p2 = r.parentrevs(rev)
2218 p1, p2 = r.parentrevs(rev)
2219 delta = r.deltaparent(rev)
2219 delta = r.deltaparent(rev)
2220 if format > 0:
2220 if format > 0:
2221 addsize(r.rawsize(rev), datasize)
2221 addsize(r.rawsize(rev), datasize)
2222 if p2 != nullrev:
2222 if p2 != nullrev:
2223 nummerges += 1
2223 nummerges += 1
2224 size = r.length(rev)
2224 size = r.length(rev)
2225 if delta == nullrev:
2225 if delta == nullrev:
2226 chainlengths.append(0)
2226 chainlengths.append(0)
2227 chainbases.append(r.start(rev))
2227 chainbases.append(r.start(rev))
2228 chainspans.append(size)
2228 chainspans.append(size)
2229 if size == 0:
2229 if size == 0:
2230 numempty += 1
2230 numempty += 1
2231 numemptytext += 1
2231 numemptytext += 1
2232 else:
2232 else:
2233 numfull += 1
2233 numfull += 1
2234 numsnapdepth[0] += 1
2234 numsnapdepth[0] += 1
2235 addsize(size, fullsize)
2235 addsize(size, fullsize)
2236 addsize(size, snapsizedepth[0])
2236 addsize(size, snapsizedepth[0])
2237 else:
2237 else:
2238 chainlengths.append(chainlengths[delta] + 1)
2238 chainlengths.append(chainlengths[delta] + 1)
2239 baseaddr = chainbases[delta]
2239 baseaddr = chainbases[delta]
2240 revaddr = r.start(rev)
2240 revaddr = r.start(rev)
2241 chainbases.append(baseaddr)
2241 chainbases.append(baseaddr)
2242 chainspans.append((revaddr - baseaddr) + size)
2242 chainspans.append((revaddr - baseaddr) + size)
2243 if size == 0:
2243 if size == 0:
2244 numempty += 1
2244 numempty += 1
2245 numemptydelta += 1
2245 numemptydelta += 1
2246 elif r.issnapshot(rev):
2246 elif r.issnapshot(rev):
2247 addsize(size, semisize)
2247 addsize(size, semisize)
2248 numsemi += 1
2248 numsemi += 1
2249 depth = r.snapshotdepth(rev)
2249 depth = r.snapshotdepth(rev)
2250 numsnapdepth[depth] += 1
2250 numsnapdepth[depth] += 1
2251 addsize(size, snapsizedepth[depth])
2251 addsize(size, snapsizedepth[depth])
2252 else:
2252 else:
2253 addsize(size, deltasize)
2253 addsize(size, deltasize)
2254 if delta == rev - 1:
2254 if delta == rev - 1:
2255 numprev += 1
2255 numprev += 1
2256 if delta == p1:
2256 if delta == p1:
2257 nump1prev += 1
2257 nump1prev += 1
2258 elif delta == p2:
2258 elif delta == p2:
2259 nump2prev += 1
2259 nump2prev += 1
2260 elif delta == p1:
2260 elif delta == p1:
2261 nump1 += 1
2261 nump1 += 1
2262 elif delta == p2:
2262 elif delta == p2:
2263 nump2 += 1
2263 nump2 += 1
2264 elif delta != nullrev:
2264 elif delta != nullrev:
2265 numother += 1
2265 numother += 1
2266
2266
2267 # Obtain data on the raw chunks in the revlog.
2267 # Obtain data on the raw chunks in the revlog.
2268 if util.safehasattr(r, '_getsegmentforrevs'):
2268 if util.safehasattr(r, '_getsegmentforrevs'):
2269 segment = r._getsegmentforrevs(rev, rev)[1]
2269 segment = r._getsegmentforrevs(rev, rev)[1]
2270 else:
2270 else:
2271 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2271 segment = r._revlog._getsegmentforrevs(rev, rev)[1]
2272 if segment:
2272 if segment:
2273 chunktype = bytes(segment[0:1])
2273 chunktype = bytes(segment[0:1])
2274 else:
2274 else:
2275 chunktype = 'empty'
2275 chunktype = 'empty'
2276
2276
2277 if chunktype not in chunktypecounts:
2277 if chunktype not in chunktypecounts:
2278 chunktypecounts[chunktype] = 0
2278 chunktypecounts[chunktype] = 0
2279 chunktypesizes[chunktype] = 0
2279 chunktypesizes[chunktype] = 0
2280
2280
2281 chunktypecounts[chunktype] += 1
2281 chunktypecounts[chunktype] += 1
2282 chunktypesizes[chunktype] += size
2282 chunktypesizes[chunktype] += size
2283
2283
2284 # Adjust size min value for empty cases
2284 # Adjust size min value for empty cases
2285 for size in (datasize, fullsize, semisize, deltasize):
2285 for size in (datasize, fullsize, semisize, deltasize):
2286 if size[0] is None:
2286 if size[0] is None:
2287 size[0] = 0
2287 size[0] = 0
2288
2288
2289 numdeltas = numrevs - numfull - numempty - numsemi
2289 numdeltas = numrevs - numfull - numempty - numsemi
2290 numoprev = numprev - nump1prev - nump2prev
2290 numoprev = numprev - nump1prev - nump2prev
2291 totalrawsize = datasize[2]
2291 totalrawsize = datasize[2]
2292 datasize[2] /= numrevs
2292 datasize[2] /= numrevs
2293 fulltotal = fullsize[2]
2293 fulltotal = fullsize[2]
2294 if numfull == 0:
2294 if numfull == 0:
2295 fullsize[2] = 0
2295 fullsize[2] = 0
2296 else:
2296 else:
2297 fullsize[2] /= numfull
2297 fullsize[2] /= numfull
2298 semitotal = semisize[2]
2298 semitotal = semisize[2]
2299 snaptotal = {}
2299 snaptotal = {}
2300 if numsemi > 0:
2300 if numsemi > 0:
2301 semisize[2] /= numsemi
2301 semisize[2] /= numsemi
2302 for depth in snapsizedepth:
2302 for depth in snapsizedepth:
2303 snaptotal[depth] = snapsizedepth[depth][2]
2303 snaptotal[depth] = snapsizedepth[depth][2]
2304 snapsizedepth[depth][2] /= numsnapdepth[depth]
2304 snapsizedepth[depth][2] /= numsnapdepth[depth]
2305
2305
2306 deltatotal = deltasize[2]
2306 deltatotal = deltasize[2]
2307 if numdeltas > 0:
2307 if numdeltas > 0:
2308 deltasize[2] /= numdeltas
2308 deltasize[2] /= numdeltas
2309 totalsize = fulltotal + semitotal + deltatotal
2309 totalsize = fulltotal + semitotal + deltatotal
2310 avgchainlen = sum(chainlengths) / numrevs
2310 avgchainlen = sum(chainlengths) / numrevs
2311 maxchainlen = max(chainlengths)
2311 maxchainlen = max(chainlengths)
2312 maxchainspan = max(chainspans)
2312 maxchainspan = max(chainspans)
2313 compratio = 1
2313 compratio = 1
2314 if totalsize:
2314 if totalsize:
2315 compratio = totalrawsize / totalsize
2315 compratio = totalrawsize / totalsize
2316
2316
2317 basedfmtstr = '%%%dd\n'
2317 basedfmtstr = '%%%dd\n'
2318 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2318 basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n'
2319
2319
2320 def dfmtstr(max):
2320 def dfmtstr(max):
2321 return basedfmtstr % len(str(max))
2321 return basedfmtstr % len(str(max))
2322 def pcfmtstr(max, padding=0):
2322 def pcfmtstr(max, padding=0):
2323 return basepcfmtstr % (len(str(max)), ' ' * padding)
2323 return basepcfmtstr % (len(str(max)), ' ' * padding)
2324
2324
2325 def pcfmt(value, total):
2325 def pcfmt(value, total):
2326 if total:
2326 if total:
2327 return (value, 100 * float(value) / total)
2327 return (value, 100 * float(value) / total)
2328 else:
2328 else:
2329 return value, 100.0
2329 return value, 100.0
2330
2330
2331 ui.write(('format : %d\n') % format)
2331 ui.write(('format : %d\n') % format)
2332 ui.write(('flags : %s\n') % ', '.join(flags))
2332 ui.write(('flags : %s\n') % ', '.join(flags))
2333
2333
2334 ui.write('\n')
2334 ui.write('\n')
2335 fmt = pcfmtstr(totalsize)
2335 fmt = pcfmtstr(totalsize)
2336 fmt2 = dfmtstr(totalsize)
2336 fmt2 = dfmtstr(totalsize)
2337 ui.write(('revisions : ') + fmt2 % numrevs)
2337 ui.write(('revisions : ') + fmt2 % numrevs)
2338 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2338 ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs))
2339 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2339 ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs))
2340 ui.write(('revisions : ') + fmt2 % numrevs)
2340 ui.write(('revisions : ') + fmt2 % numrevs)
2341 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2341 ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs))
2342 ui.write((' text : ')
2342 ui.write((' text : ')
2343 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2343 + fmt % pcfmt(numemptytext, numemptytext + numemptydelta))
2344 ui.write((' delta : ')
2344 ui.write((' delta : ')
2345 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2345 + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta))
2346 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2346 ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs))
2347 for depth in sorted(numsnapdepth):
2347 for depth in sorted(numsnapdepth):
2348 ui.write((' lvl-%-3d : ' % depth)
2348 ui.write((' lvl-%-3d : ' % depth)
2349 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2349 + fmt % pcfmt(numsnapdepth[depth], numrevs))
2350 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2350 ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs))
2351 ui.write(('revision size : ') + fmt2 % totalsize)
2351 ui.write(('revision size : ') + fmt2 % totalsize)
2352 ui.write((' snapshot : ')
2352 ui.write((' snapshot : ')
2353 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2353 + fmt % pcfmt(fulltotal + semitotal, totalsize))
2354 for depth in sorted(numsnapdepth):
2354 for depth in sorted(numsnapdepth):
2355 ui.write((' lvl-%-3d : ' % depth)
2355 ui.write((' lvl-%-3d : ' % depth)
2356 + fmt % pcfmt(snaptotal[depth], totalsize))
2356 + fmt % pcfmt(snaptotal[depth], totalsize))
2357 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2357 ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize))
2358
2358
2359 def fmtchunktype(chunktype):
2359 def fmtchunktype(chunktype):
2360 if chunktype == 'empty':
2360 if chunktype == 'empty':
2361 return ' %s : ' % chunktype
2361 return ' %s : ' % chunktype
2362 elif chunktype in pycompat.bytestr(string.ascii_letters):
2362 elif chunktype in pycompat.bytestr(string.ascii_letters):
2363 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2363 return ' 0x%s (%s) : ' % (hex(chunktype), chunktype)
2364 else:
2364 else:
2365 return ' 0x%s : ' % hex(chunktype)
2365 return ' 0x%s : ' % hex(chunktype)
2366
2366
2367 ui.write('\n')
2367 ui.write('\n')
2368 ui.write(('chunks : ') + fmt2 % numrevs)
2368 ui.write(('chunks : ') + fmt2 % numrevs)
2369 for chunktype in sorted(chunktypecounts):
2369 for chunktype in sorted(chunktypecounts):
2370 ui.write(fmtchunktype(chunktype))
2370 ui.write(fmtchunktype(chunktype))
2371 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2371 ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs))
2372 ui.write(('chunks size : ') + fmt2 % totalsize)
2372 ui.write(('chunks size : ') + fmt2 % totalsize)
2373 for chunktype in sorted(chunktypecounts):
2373 for chunktype in sorted(chunktypecounts):
2374 ui.write(fmtchunktype(chunktype))
2374 ui.write(fmtchunktype(chunktype))
2375 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2375 ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize))
2376
2376
2377 ui.write('\n')
2377 ui.write('\n')
2378 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2378 fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio))
2379 ui.write(('avg chain length : ') + fmt % avgchainlen)
2379 ui.write(('avg chain length : ') + fmt % avgchainlen)
2380 ui.write(('max chain length : ') + fmt % maxchainlen)
2380 ui.write(('max chain length : ') + fmt % maxchainlen)
2381 ui.write(('max chain reach : ') + fmt % maxchainspan)
2381 ui.write(('max chain reach : ') + fmt % maxchainspan)
2382 ui.write(('compression ratio : ') + fmt % compratio)
2382 ui.write(('compression ratio : ') + fmt % compratio)
2383
2383
2384 if format > 0:
2384 if format > 0:
2385 ui.write('\n')
2385 ui.write('\n')
2386 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2386 ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n')
2387 % tuple(datasize))
2387 % tuple(datasize))
2388 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2388 ui.write(('full revision size (min/max/avg) : %d / %d / %d\n')
2389 % tuple(fullsize))
2389 % tuple(fullsize))
2390 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2390 ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n')
2391 % tuple(semisize))
2391 % tuple(semisize))
2392 for depth in sorted(snapsizedepth):
2392 for depth in sorted(snapsizedepth):
2393 if depth == 0:
2393 if depth == 0:
2394 continue
2394 continue
2395 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2395 ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n')
2396 % ((depth,) + tuple(snapsizedepth[depth])))
2396 % ((depth,) + tuple(snapsizedepth[depth])))
2397 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2397 ui.write(('delta size (min/max/avg) : %d / %d / %d\n')
2398 % tuple(deltasize))
2398 % tuple(deltasize))
2399
2399
2400 if numdeltas > 0:
2400 if numdeltas > 0:
2401 ui.write('\n')
2401 ui.write('\n')
2402 fmt = pcfmtstr(numdeltas)
2402 fmt = pcfmtstr(numdeltas)
2403 fmt2 = pcfmtstr(numdeltas, 4)
2403 fmt2 = pcfmtstr(numdeltas, 4)
2404 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2404 ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas))
2405 if numprev > 0:
2405 if numprev > 0:
2406 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2406 ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev,
2407 numprev))
2407 numprev))
2408 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2408 ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev,
2409 numprev))
2409 numprev))
2410 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2410 ui.write((' other : ') + fmt2 % pcfmt(numoprev,
2411 numprev))
2411 numprev))
2412 if gdelta:
2412 if gdelta:
2413 ui.write(('deltas against p1 : ')
2413 ui.write(('deltas against p1 : ')
2414 + fmt % pcfmt(nump1, numdeltas))
2414 + fmt % pcfmt(nump1, numdeltas))
2415 ui.write(('deltas against p2 : ')
2415 ui.write(('deltas against p2 : ')
2416 + fmt % pcfmt(nump2, numdeltas))
2416 + fmt % pcfmt(nump2, numdeltas))
2417 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2417 ui.write(('deltas against other : ') + fmt % pcfmt(numother,
2418 numdeltas))
2418 numdeltas))
2419
2419
2420 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2420 @command('debugrevlogindex', cmdutil.debugrevlogopts +
2421 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2421 [('f', 'format', 0, _('revlog format'), _('FORMAT'))],
2422 _('[-f FORMAT] -c|-m|FILE'),
2422 _('[-f FORMAT] -c|-m|FILE'),
2423 optionalrepo=True)
2423 optionalrepo=True)
2424 def debugrevlogindex(ui, repo, file_=None, **opts):
2424 def debugrevlogindex(ui, repo, file_=None, **opts):
2425 """dump the contents of a revlog index"""
2425 """dump the contents of a revlog index"""
2426 opts = pycompat.byteskwargs(opts)
2426 opts = pycompat.byteskwargs(opts)
2427 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2427 r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts)
2428 format = opts.get('format', 0)
2428 format = opts.get('format', 0)
2429 if format not in (0, 1):
2429 if format not in (0, 1):
2430 raise error.Abort(_("unknown format %d") % format)
2430 raise error.Abort(_("unknown format %d") % format)
2431
2431
2432 if ui.debugflag:
2432 if ui.debugflag:
2433 shortfn = hex
2433 shortfn = hex
2434 else:
2434 else:
2435 shortfn = short
2435 shortfn = short
2436
2436
2437 # There might not be anything in r, so have a sane default
2437 # There might not be anything in r, so have a sane default
2438 idlen = 12
2438 idlen = 12
2439 for i in r:
2439 for i in r:
2440 idlen = len(shortfn(r.node(i)))
2440 idlen = len(shortfn(r.node(i)))
2441 break
2441 break
2442
2442
2443 if format == 0:
2443 if format == 0:
2444 if ui.verbose:
2444 if ui.verbose:
2445 ui.write((" rev offset length linkrev"
2445 ui.write((" rev offset length linkrev"
2446 " %s %s p2\n") % ("nodeid".ljust(idlen),
2446 " %s %s p2\n") % ("nodeid".ljust(idlen),
2447 "p1".ljust(idlen)))
2447 "p1".ljust(idlen)))
2448 else:
2448 else:
2449 ui.write((" rev linkrev %s %s p2\n") % (
2449 ui.write((" rev linkrev %s %s p2\n") % (
2450 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2450 "nodeid".ljust(idlen), "p1".ljust(idlen)))
2451 elif format == 1:
2451 elif format == 1:
2452 if ui.verbose:
2452 if ui.verbose:
2453 ui.write((" rev flag offset length size link p1"
2453 ui.write((" rev flag offset length size link p1"
2454 " p2 %s\n") % "nodeid".rjust(idlen))
2454 " p2 %s\n") % "nodeid".rjust(idlen))
2455 else:
2455 else:
2456 ui.write((" rev flag size link p1 p2 %s\n") %
2456 ui.write((" rev flag size link p1 p2 %s\n") %
2457 "nodeid".rjust(idlen))
2457 "nodeid".rjust(idlen))
2458
2458
2459 for i in r:
2459 for i in r:
2460 node = r.node(i)
2460 node = r.node(i)
2461 if format == 0:
2461 if format == 0:
2462 try:
2462 try:
2463 pp = r.parents(node)
2463 pp = r.parents(node)
2464 except Exception:
2464 except Exception:
2465 pp = [nullid, nullid]
2465 pp = [nullid, nullid]
2466 if ui.verbose:
2466 if ui.verbose:
2467 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2467 ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % (
2468 i, r.start(i), r.length(i), r.linkrev(i),
2468 i, r.start(i), r.length(i), r.linkrev(i),
2469 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2469 shortfn(node), shortfn(pp[0]), shortfn(pp[1])))
2470 else:
2470 else:
2471 ui.write("% 6d % 7d %s %s %s\n" % (
2471 ui.write("% 6d % 7d %s %s %s\n" % (
2472 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2472 i, r.linkrev(i), shortfn(node), shortfn(pp[0]),
2473 shortfn(pp[1])))
2473 shortfn(pp[1])))
2474 elif format == 1:
2474 elif format == 1:
2475 pr = r.parentrevs(i)
2475 pr = r.parentrevs(i)
2476 if ui.verbose:
2476 if ui.verbose:
2477 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2477 ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % (
2478 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2478 i, r.flags(i), r.start(i), r.length(i), r.rawsize(i),
2479 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2479 r.linkrev(i), pr[0], pr[1], shortfn(node)))
2480 else:
2480 else:
2481 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2481 ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % (
2482 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2482 i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1],
2483 shortfn(node)))
2483 shortfn(node)))
2484
2484
2485 @command('debugrevspec',
2485 @command('debugrevspec',
2486 [('', 'optimize', None,
2486 [('', 'optimize', None,
2487 _('print parsed tree after optimizing (DEPRECATED)')),
2487 _('print parsed tree after optimizing (DEPRECATED)')),
2488 ('', 'show-revs', True, _('print list of result revisions (default)')),
2488 ('', 'show-revs', True, _('print list of result revisions (default)')),
2489 ('s', 'show-set', None, _('print internal representation of result set')),
2489 ('s', 'show-set', None, _('print internal representation of result set')),
2490 ('p', 'show-stage', [],
2490 ('p', 'show-stage', [],
2491 _('print parsed tree at the given stage'), _('NAME')),
2491 _('print parsed tree at the given stage'), _('NAME')),
2492 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2492 ('', 'no-optimized', False, _('evaluate tree without optimization')),
2493 ('', 'verify-optimized', False, _('verify optimized result')),
2493 ('', 'verify-optimized', False, _('verify optimized result')),
2494 ],
2494 ],
2495 ('REVSPEC'))
2495 ('REVSPEC'))
2496 def debugrevspec(ui, repo, expr, **opts):
2496 def debugrevspec(ui, repo, expr, **opts):
2497 """parse and apply a revision specification
2497 """parse and apply a revision specification
2498
2498
2499 Use -p/--show-stage option to print the parsed tree at the given stages.
2499 Use -p/--show-stage option to print the parsed tree at the given stages.
2500 Use -p all to print tree at every stage.
2500 Use -p all to print tree at every stage.
2501
2501
2502 Use --no-show-revs option with -s or -p to print only the set
2502 Use --no-show-revs option with -s or -p to print only the set
2503 representation or the parsed tree respectively.
2503 representation or the parsed tree respectively.
2504
2504
2505 Use --verify-optimized to compare the optimized result with the unoptimized
2505 Use --verify-optimized to compare the optimized result with the unoptimized
2506 one. Returns 1 if the optimized result differs.
2506 one. Returns 1 if the optimized result differs.
2507 """
2507 """
2508 opts = pycompat.byteskwargs(opts)
2508 opts = pycompat.byteskwargs(opts)
2509 aliases = ui.configitems('revsetalias')
2509 aliases = ui.configitems('revsetalias')
2510 stages = [
2510 stages = [
2511 ('parsed', lambda tree: tree),
2511 ('parsed', lambda tree: tree),
2512 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2512 ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases,
2513 ui.warn)),
2513 ui.warn)),
2514 ('concatenated', revsetlang.foldconcat),
2514 ('concatenated', revsetlang.foldconcat),
2515 ('analyzed', revsetlang.analyze),
2515 ('analyzed', revsetlang.analyze),
2516 ('optimized', revsetlang.optimize),
2516 ('optimized', revsetlang.optimize),
2517 ]
2517 ]
2518 if opts['no_optimized']:
2518 if opts['no_optimized']:
2519 stages = stages[:-1]
2519 stages = stages[:-1]
2520 if opts['verify_optimized'] and opts['no_optimized']:
2520 if opts['verify_optimized'] and opts['no_optimized']:
2521 raise error.Abort(_('cannot use --verify-optimized with '
2521 raise error.Abort(_('cannot use --verify-optimized with '
2522 '--no-optimized'))
2522 '--no-optimized'))
2523 stagenames = set(n for n, f in stages)
2523 stagenames = set(n for n, f in stages)
2524
2524
2525 showalways = set()
2525 showalways = set()
2526 showchanged = set()
2526 showchanged = set()
2527 if ui.verbose and not opts['show_stage']:
2527 if ui.verbose and not opts['show_stage']:
2528 # show parsed tree by --verbose (deprecated)
2528 # show parsed tree by --verbose (deprecated)
2529 showalways.add('parsed')
2529 showalways.add('parsed')
2530 showchanged.update(['expanded', 'concatenated'])
2530 showchanged.update(['expanded', 'concatenated'])
2531 if opts['optimize']:
2531 if opts['optimize']:
2532 showalways.add('optimized')
2532 showalways.add('optimized')
2533 if opts['show_stage'] and opts['optimize']:
2533 if opts['show_stage'] and opts['optimize']:
2534 raise error.Abort(_('cannot use --optimize with --show-stage'))
2534 raise error.Abort(_('cannot use --optimize with --show-stage'))
2535 if opts['show_stage'] == ['all']:
2535 if opts['show_stage'] == ['all']:
2536 showalways.update(stagenames)
2536 showalways.update(stagenames)
2537 else:
2537 else:
2538 for n in opts['show_stage']:
2538 for n in opts['show_stage']:
2539 if n not in stagenames:
2539 if n not in stagenames:
2540 raise error.Abort(_('invalid stage name: %s') % n)
2540 raise error.Abort(_('invalid stage name: %s') % n)
2541 showalways.update(opts['show_stage'])
2541 showalways.update(opts['show_stage'])
2542
2542
2543 treebystage = {}
2543 treebystage = {}
2544 printedtree = None
2544 printedtree = None
2545 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2545 tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo))
2546 for n, f in stages:
2546 for n, f in stages:
2547 treebystage[n] = tree = f(tree)
2547 treebystage[n] = tree = f(tree)
2548 if n in showalways or (n in showchanged and tree != printedtree):
2548 if n in showalways or (n in showchanged and tree != printedtree):
2549 if opts['show_stage'] or n != 'parsed':
2549 if opts['show_stage'] or n != 'parsed':
2550 ui.write(("* %s:\n") % n)
2550 ui.write(("* %s:\n") % n)
2551 ui.write(revsetlang.prettyformat(tree), "\n")
2551 ui.write(revsetlang.prettyformat(tree), "\n")
2552 printedtree = tree
2552 printedtree = tree
2553
2553
2554 if opts['verify_optimized']:
2554 if opts['verify_optimized']:
2555 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2555 arevs = revset.makematcher(treebystage['analyzed'])(repo)
2556 brevs = revset.makematcher(treebystage['optimized'])(repo)
2556 brevs = revset.makematcher(treebystage['optimized'])(repo)
2557 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2557 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2558 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2558 ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n")
2559 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2559 ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n")
2560 arevs = list(arevs)
2560 arevs = list(arevs)
2561 brevs = list(brevs)
2561 brevs = list(brevs)
2562 if arevs == brevs:
2562 if arevs == brevs:
2563 return 0
2563 return 0
2564 ui.write(('--- analyzed\n'), label='diff.file_a')
2564 ui.write(('--- analyzed\n'), label='diff.file_a')
2565 ui.write(('+++ optimized\n'), label='diff.file_b')
2565 ui.write(('+++ optimized\n'), label='diff.file_b')
2566 sm = difflib.SequenceMatcher(None, arevs, brevs)
2566 sm = difflib.SequenceMatcher(None, arevs, brevs)
2567 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2567 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
2568 if tag in (r'delete', r'replace'):
2568 if tag in (r'delete', r'replace'):
2569 for c in arevs[alo:ahi]:
2569 for c in arevs[alo:ahi]:
2570 ui.write('-%d\n' % c, label='diff.deleted')
2570 ui.write('-%d\n' % c, label='diff.deleted')
2571 if tag in (r'insert', r'replace'):
2571 if tag in (r'insert', r'replace'):
2572 for c in brevs[blo:bhi]:
2572 for c in brevs[blo:bhi]:
2573 ui.write('+%d\n' % c, label='diff.inserted')
2573 ui.write('+%d\n' % c, label='diff.inserted')
2574 if tag == r'equal':
2574 if tag == r'equal':
2575 for c in arevs[alo:ahi]:
2575 for c in arevs[alo:ahi]:
2576 ui.write(' %d\n' % c)
2576 ui.write(' %d\n' % c)
2577 return 1
2577 return 1
2578
2578
2579 func = revset.makematcher(tree)
2579 func = revset.makematcher(tree)
2580 revs = func(repo)
2580 revs = func(repo)
2581 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2581 if opts['show_set'] or (opts['show_set'] is None and ui.verbose):
2582 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2582 ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n")
2583 if not opts['show_revs']:
2583 if not opts['show_revs']:
2584 return
2584 return
2585 for c in revs:
2585 for c in revs:
2586 ui.write("%d\n" % c)
2586 ui.write("%d\n" % c)
2587
2587
2588 @command('debugserve', [
2588 @command('debugserve', [
2589 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2589 ('', 'sshstdio', False, _('run an SSH server bound to process handles')),
2590 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2590 ('', 'logiofd', '', _('file descriptor to log server I/O to')),
2591 ('', 'logiofile', '', _('file to log server I/O to')),
2591 ('', 'logiofile', '', _('file to log server I/O to')),
2592 ], '')
2592 ], '')
2593 def debugserve(ui, repo, **opts):
2593 def debugserve(ui, repo, **opts):
2594 """run a server with advanced settings
2594 """run a server with advanced settings
2595
2595
2596 This command is similar to :hg:`serve`. It exists partially as a
2596 This command is similar to :hg:`serve`. It exists partially as a
2597 workaround to the fact that ``hg serve --stdio`` must have specific
2597 workaround to the fact that ``hg serve --stdio`` must have specific
2598 arguments for security reasons.
2598 arguments for security reasons.
2599 """
2599 """
2600 opts = pycompat.byteskwargs(opts)
2600 opts = pycompat.byteskwargs(opts)
2601
2601
2602 if not opts['sshstdio']:
2602 if not opts['sshstdio']:
2603 raise error.Abort(_('only --sshstdio is currently supported'))
2603 raise error.Abort(_('only --sshstdio is currently supported'))
2604
2604
2605 logfh = None
2605 logfh = None
2606
2606
2607 if opts['logiofd'] and opts['logiofile']:
2607 if opts['logiofd'] and opts['logiofile']:
2608 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2608 raise error.Abort(_('cannot use both --logiofd and --logiofile'))
2609
2609
2610 if opts['logiofd']:
2610 if opts['logiofd']:
2611 # Line buffered because output is line based.
2611 # Line buffered because output is line based.
2612 try:
2612 try:
2613 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2613 logfh = os.fdopen(int(opts['logiofd']), r'ab', 1)
2614 except OSError as e:
2614 except OSError as e:
2615 if e.errno != errno.ESPIPE:
2615 if e.errno != errno.ESPIPE:
2616 raise
2616 raise
2617 # can't seek a pipe, so `ab` mode fails on py3
2617 # can't seek a pipe, so `ab` mode fails on py3
2618 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2618 logfh = os.fdopen(int(opts['logiofd']), r'wb', 1)
2619 elif opts['logiofile']:
2619 elif opts['logiofile']:
2620 logfh = open(opts['logiofile'], 'ab', 1)
2620 logfh = open(opts['logiofile'], 'ab', 1)
2621
2621
2622 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2622 s = wireprotoserver.sshserver(ui, repo, logfh=logfh)
2623 s.serve_forever()
2623 s.serve_forever()
2624
2624
2625 @command('debugsetparents', [], _('REV1 [REV2]'))
2625 @command('debugsetparents', [], _('REV1 [REV2]'))
2626 def debugsetparents(ui, repo, rev1, rev2=None):
2626 def debugsetparents(ui, repo, rev1, rev2=None):
2627 """manually set the parents of the current working directory
2627 """manually set the parents of the current working directory
2628
2628
2629 This is useful for writing repository conversion tools, but should
2629 This is useful for writing repository conversion tools, but should
2630 be used with care. For example, neither the working directory nor the
2630 be used with care. For example, neither the working directory nor the
2631 dirstate is updated, so file status may be incorrect after running this
2631 dirstate is updated, so file status may be incorrect after running this
2632 command.
2632 command.
2633
2633
2634 Returns 0 on success.
2634 Returns 0 on success.
2635 """
2635 """
2636
2636
2637 node1 = scmutil.revsingle(repo, rev1).node()
2637 node1 = scmutil.revsingle(repo, rev1).node()
2638 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2638 node2 = scmutil.revsingle(repo, rev2, 'null').node()
2639
2639
2640 with repo.wlock():
2640 with repo.wlock():
2641 repo.setparents(node1, node2)
2641 repo.setparents(node1, node2)
2642
2642
2643 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2643 @command('debugssl', [], '[SOURCE]', optionalrepo=True)
2644 def debugssl(ui, repo, source=None, **opts):
2644 def debugssl(ui, repo, source=None, **opts):
2645 '''test a secure connection to a server
2645 '''test a secure connection to a server
2646
2646
2647 This builds the certificate chain for the server on Windows, installing the
2647 This builds the certificate chain for the server on Windows, installing the
2648 missing intermediates and trusted root via Windows Update if necessary. It
2648 missing intermediates and trusted root via Windows Update if necessary. It
2649 does nothing on other platforms.
2649 does nothing on other platforms.
2650
2650
2651 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2651 If SOURCE is omitted, the 'default' path will be used. If a URL is given,
2652 that server is used. See :hg:`help urls` for more information.
2652 that server is used. See :hg:`help urls` for more information.
2653
2653
2654 If the update succeeds, retry the original operation. Otherwise, the cause
2654 If the update succeeds, retry the original operation. Otherwise, the cause
2655 of the SSL error is likely another issue.
2655 of the SSL error is likely another issue.
2656 '''
2656 '''
2657 if not pycompat.iswindows:
2657 if not pycompat.iswindows:
2658 raise error.Abort(_('certificate chain building is only possible on '
2658 raise error.Abort(_('certificate chain building is only possible on '
2659 'Windows'))
2659 'Windows'))
2660
2660
2661 if not source:
2661 if not source:
2662 if not repo:
2662 if not repo:
2663 raise error.Abort(_("there is no Mercurial repository here, and no "
2663 raise error.Abort(_("there is no Mercurial repository here, and no "
2664 "server specified"))
2664 "server specified"))
2665 source = "default"
2665 source = "default"
2666
2666
2667 source, branches = hg.parseurl(ui.expandpath(source))
2667 source, branches = hg.parseurl(ui.expandpath(source))
2668 url = util.url(source)
2668 url = util.url(source)
2669
2669
2670 defaultport = {'https': 443, 'ssh': 22}
2670 defaultport = {'https': 443, 'ssh': 22}
2671 if url.scheme in defaultport:
2671 if url.scheme in defaultport:
2672 try:
2672 try:
2673 addr = (url.host, int(url.port or defaultport[url.scheme]))
2673 addr = (url.host, int(url.port or defaultport[url.scheme]))
2674 except ValueError:
2674 except ValueError:
2675 raise error.Abort(_("malformed port number in URL"))
2675 raise error.Abort(_("malformed port number in URL"))
2676 else:
2676 else:
2677 raise error.Abort(_("only https and ssh connections are supported"))
2677 raise error.Abort(_("only https and ssh connections are supported"))
2678
2678
2679 from . import win32
2679 from . import win32
2680
2680
2681 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2681 s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS,
2682 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2682 cert_reqs=ssl.CERT_NONE, ca_certs=None)
2683
2683
2684 try:
2684 try:
2685 s.connect(addr)
2685 s.connect(addr)
2686 cert = s.getpeercert(True)
2686 cert = s.getpeercert(True)
2687
2687
2688 ui.status(_('checking the certificate chain for %s\n') % url.host)
2688 ui.status(_('checking the certificate chain for %s\n') % url.host)
2689
2689
2690 complete = win32.checkcertificatechain(cert, build=False)
2690 complete = win32.checkcertificatechain(cert, build=False)
2691
2691
2692 if not complete:
2692 if not complete:
2693 ui.status(_('certificate chain is incomplete, updating... '))
2693 ui.status(_('certificate chain is incomplete, updating... '))
2694
2694
2695 if not win32.checkcertificatechain(cert):
2695 if not win32.checkcertificatechain(cert):
2696 ui.status(_('failed.\n'))
2696 ui.status(_('failed.\n'))
2697 else:
2697 else:
2698 ui.status(_('done.\n'))
2698 ui.status(_('done.\n'))
2699 else:
2699 else:
2700 ui.status(_('full certificate chain is available\n'))
2700 ui.status(_('full certificate chain is available\n'))
2701 finally:
2701 finally:
2702 s.close()
2702 s.close()
2703
2703
2704 @command('debugsub',
2704 @command('debugsub',
2705 [('r', 'rev', '',
2705 [('r', 'rev', '',
2706 _('revision to check'), _('REV'))],
2706 _('revision to check'), _('REV'))],
2707 _('[-r REV] [REV]'))
2707 _('[-r REV] [REV]'))
2708 def debugsub(ui, repo, rev=None):
2708 def debugsub(ui, repo, rev=None):
2709 ctx = scmutil.revsingle(repo, rev, None)
2709 ctx = scmutil.revsingle(repo, rev, None)
2710 for k, v in sorted(ctx.substate.items()):
2710 for k, v in sorted(ctx.substate.items()):
2711 ui.write(('path %s\n') % k)
2711 ui.write(('path %s\n') % k)
2712 ui.write((' source %s\n') % v[0])
2712 ui.write((' source %s\n') % v[0])
2713 ui.write((' revision %s\n') % v[1])
2713 ui.write((' revision %s\n') % v[1])
2714
2714
2715 @command('debugsuccessorssets',
2715 @command('debugsuccessorssets',
2716 [('', 'closest', False, _('return closest successors sets only'))],
2716 [('', 'closest', False, _('return closest successors sets only'))],
2717 _('[REV]'))
2717 _('[REV]'))
2718 def debugsuccessorssets(ui, repo, *revs, **opts):
2718 def debugsuccessorssets(ui, repo, *revs, **opts):
2719 """show set of successors for revision
2719 """show set of successors for revision
2720
2720
2721 A successors set of changeset A is a consistent group of revisions that
2721 A successors set of changeset A is a consistent group of revisions that
2722 succeed A. It contains non-obsolete changesets only unless closests
2722 succeed A. It contains non-obsolete changesets only unless closests
2723 successors set is set.
2723 successors set is set.
2724
2724
2725 In most cases a changeset A has a single successors set containing a single
2725 In most cases a changeset A has a single successors set containing a single
2726 successor (changeset A replaced by A').
2726 successor (changeset A replaced by A').
2727
2727
2728 A changeset that is made obsolete with no successors are called "pruned".
2728 A changeset that is made obsolete with no successors are called "pruned".
2729 Such changesets have no successors sets at all.
2729 Such changesets have no successors sets at all.
2730
2730
2731 A changeset that has been "split" will have a successors set containing
2731 A changeset that has been "split" will have a successors set containing
2732 more than one successor.
2732 more than one successor.
2733
2733
2734 A changeset that has been rewritten in multiple different ways is called
2734 A changeset that has been rewritten in multiple different ways is called
2735 "divergent". Such changesets have multiple successor sets (each of which
2735 "divergent". Such changesets have multiple successor sets (each of which
2736 may also be split, i.e. have multiple successors).
2736 may also be split, i.e. have multiple successors).
2737
2737
2738 Results are displayed as follows::
2738 Results are displayed as follows::
2739
2739
2740 <rev1>
2740 <rev1>
2741 <successors-1A>
2741 <successors-1A>
2742 <rev2>
2742 <rev2>
2743 <successors-2A>
2743 <successors-2A>
2744 <successors-2B1> <successors-2B2> <successors-2B3>
2744 <successors-2B1> <successors-2B2> <successors-2B3>
2745
2745
2746 Here rev2 has two possible (i.e. divergent) successors sets. The first
2746 Here rev2 has two possible (i.e. divergent) successors sets. The first
2747 holds one element, whereas the second holds three (i.e. the changeset has
2747 holds one element, whereas the second holds three (i.e. the changeset has
2748 been split).
2748 been split).
2749 """
2749 """
2750 # passed to successorssets caching computation from one call to another
2750 # passed to successorssets caching computation from one call to another
2751 cache = {}
2751 cache = {}
2752 ctx2str = bytes
2752 ctx2str = bytes
2753 node2str = short
2753 node2str = short
2754 for rev in scmutil.revrange(repo, revs):
2754 for rev in scmutil.revrange(repo, revs):
2755 ctx = repo[rev]
2755 ctx = repo[rev]
2756 ui.write('%s\n'% ctx2str(ctx))
2756 ui.write('%s\n'% ctx2str(ctx))
2757 for succsset in obsutil.successorssets(repo, ctx.node(),
2757 for succsset in obsutil.successorssets(repo, ctx.node(),
2758 closest=opts[r'closest'],
2758 closest=opts[r'closest'],
2759 cache=cache):
2759 cache=cache):
2760 if succsset:
2760 if succsset:
2761 ui.write(' ')
2761 ui.write(' ')
2762 ui.write(node2str(succsset[0]))
2762 ui.write(node2str(succsset[0]))
2763 for node in succsset[1:]:
2763 for node in succsset[1:]:
2764 ui.write(' ')
2764 ui.write(' ')
2765 ui.write(node2str(node))
2765 ui.write(node2str(node))
2766 ui.write('\n')
2766 ui.write('\n')
2767
2767
2768 @command('debugtemplate',
2768 @command('debugtemplate',
2769 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2769 [('r', 'rev', [], _('apply template on changesets'), _('REV')),
2770 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2770 ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))],
2771 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2771 _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'),
2772 optionalrepo=True)
2772 optionalrepo=True)
2773 def debugtemplate(ui, repo, tmpl, **opts):
2773 def debugtemplate(ui, repo, tmpl, **opts):
2774 """parse and apply a template
2774 """parse and apply a template
2775
2775
2776 If -r/--rev is given, the template is processed as a log template and
2776 If -r/--rev is given, the template is processed as a log template and
2777 applied to the given changesets. Otherwise, it is processed as a generic
2777 applied to the given changesets. Otherwise, it is processed as a generic
2778 template.
2778 template.
2779
2779
2780 Use --verbose to print the parsed tree.
2780 Use --verbose to print the parsed tree.
2781 """
2781 """
2782 revs = None
2782 revs = None
2783 if opts[r'rev']:
2783 if opts[r'rev']:
2784 if repo is None:
2784 if repo is None:
2785 raise error.RepoError(_('there is no Mercurial repository here '
2785 raise error.RepoError(_('there is no Mercurial repository here '
2786 '(.hg not found)'))
2786 '(.hg not found)'))
2787 revs = scmutil.revrange(repo, opts[r'rev'])
2787 revs = scmutil.revrange(repo, opts[r'rev'])
2788
2788
2789 props = {}
2789 props = {}
2790 for d in opts[r'define']:
2790 for d in opts[r'define']:
2791 try:
2791 try:
2792 k, v = (e.strip() for e in d.split('=', 1))
2792 k, v = (e.strip() for e in d.split('=', 1))
2793 if not k or k == 'ui':
2793 if not k or k == 'ui':
2794 raise ValueError
2794 raise ValueError
2795 props[k] = v
2795 props[k] = v
2796 except ValueError:
2796 except ValueError:
2797 raise error.Abort(_('malformed keyword definition: %s') % d)
2797 raise error.Abort(_('malformed keyword definition: %s') % d)
2798
2798
2799 if ui.verbose:
2799 if ui.verbose:
2800 aliases = ui.configitems('templatealias')
2800 aliases = ui.configitems('templatealias')
2801 tree = templater.parse(tmpl)
2801 tree = templater.parse(tmpl)
2802 ui.note(templater.prettyformat(tree), '\n')
2802 ui.note(templater.prettyformat(tree), '\n')
2803 newtree = templater.expandaliases(tree, aliases)
2803 newtree = templater.expandaliases(tree, aliases)
2804 if newtree != tree:
2804 if newtree != tree:
2805 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2805 ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n')
2806
2806
2807 if revs is None:
2807 if revs is None:
2808 tres = formatter.templateresources(ui, repo)
2808 tres = formatter.templateresources(ui, repo)
2809 t = formatter.maketemplater(ui, tmpl, resources=tres)
2809 t = formatter.maketemplater(ui, tmpl, resources=tres)
2810 if ui.verbose:
2810 if ui.verbose:
2811 kwds, funcs = t.symbolsuseddefault()
2811 kwds, funcs = t.symbolsuseddefault()
2812 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2812 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2813 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2813 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2814 ui.write(t.renderdefault(props))
2814 ui.write(t.renderdefault(props))
2815 else:
2815 else:
2816 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2816 displayer = logcmdutil.maketemplater(ui, repo, tmpl)
2817 if ui.verbose:
2817 if ui.verbose:
2818 kwds, funcs = displayer.t.symbolsuseddefault()
2818 kwds, funcs = displayer.t.symbolsuseddefault()
2819 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2819 ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds)))
2820 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2820 ui.write(("* functions: %s\n") % ', '.join(sorted(funcs)))
2821 for r in revs:
2821 for r in revs:
2822 displayer.show(repo[r], **pycompat.strkwargs(props))
2822 displayer.show(repo[r], **pycompat.strkwargs(props))
2823 displayer.close()
2823 displayer.close()
2824
2824
2825 @command('debuguigetpass', [
2825 @command('debuguigetpass', [
2826 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2826 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2827 ], _('[-p TEXT]'), norepo=True)
2827 ], _('[-p TEXT]'), norepo=True)
2828 def debuguigetpass(ui, prompt=''):
2828 def debuguigetpass(ui, prompt=''):
2829 """show prompt to type password"""
2829 """show prompt to type password"""
2830 r = ui.getpass(prompt)
2830 r = ui.getpass(prompt)
2831 ui.write(('respose: %s\n') % r)
2831 ui.write(('respose: %s\n') % r)
2832
2832
2833 @command('debuguiprompt', [
2833 @command('debuguiprompt', [
2834 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2834 ('p', 'prompt', '', _('prompt text'), _('TEXT')),
2835 ], _('[-p TEXT]'), norepo=True)
2835 ], _('[-p TEXT]'), norepo=True)
2836 def debuguiprompt(ui, prompt=''):
2836 def debuguiprompt(ui, prompt=''):
2837 """show plain prompt"""
2837 """show plain prompt"""
2838 r = ui.prompt(prompt)
2838 r = ui.prompt(prompt)
2839 ui.write(('response: %s\n') % r)
2839 ui.write(('response: %s\n') % r)
2840
2840
2841 @command('debugupdatecaches', [])
2841 @command('debugupdatecaches', [])
2842 def debugupdatecaches(ui, repo, *pats, **opts):
2842 def debugupdatecaches(ui, repo, *pats, **opts):
2843 """warm all known caches in the repository"""
2843 """warm all known caches in the repository"""
2844 with repo.wlock(), repo.lock():
2844 with repo.wlock(), repo.lock():
2845 repo.updatecaches(full=True)
2845 repo.updatecaches(full=True)
2846
2846
2847 @command('debugupgraderepo', [
2847 @command('debugupgraderepo', [
2848 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2848 ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')),
2849 ('', 'run', False, _('performs an upgrade')),
2849 ('', 'run', False, _('performs an upgrade')),
2850 ('', 'backup', True, _('keep the old repository content around')),
2850 ('', 'backup', True, _('keep the old repository content around')),
2851 ('', 'changelog', None, _('select the changelog for upgrade')),
2851 ('', 'changelog', None, _('select the changelog for upgrade')),
2852 ('', 'manifest', None, _('select the manifest for upgrade')),
2852 ('', 'manifest', None, _('select the manifest for upgrade')),
2853 ])
2853 ])
2854 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
2854 def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts):
2855 """upgrade a repository to use different features
2855 """upgrade a repository to use different features
2856
2856
2857 If no arguments are specified, the repository is evaluated for upgrade
2857 If no arguments are specified, the repository is evaluated for upgrade
2858 and a list of problems and potential optimizations is printed.
2858 and a list of problems and potential optimizations is printed.
2859
2859
2860 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2860 With ``--run``, a repository upgrade is performed. Behavior of the upgrade
2861 can be influenced via additional arguments. More details will be provided
2861 can be influenced via additional arguments. More details will be provided
2862 by the command output when run without ``--run``.
2862 by the command output when run without ``--run``.
2863
2863
2864 During the upgrade, the repository will be locked and no writes will be
2864 During the upgrade, the repository will be locked and no writes will be
2865 allowed.
2865 allowed.
2866
2866
2867 At the end of the upgrade, the repository may not be readable while new
2867 At the end of the upgrade, the repository may not be readable while new
2868 repository data is swapped in. This window will be as long as it takes to
2868 repository data is swapped in. This window will be as long as it takes to
2869 rename some directories inside the ``.hg`` directory. On most machines, this
2869 rename some directories inside the ``.hg`` directory. On most machines, this
2870 should complete almost instantaneously and the chances of a consumer being
2870 should complete almost instantaneously and the chances of a consumer being
2871 unable to access the repository should be low.
2871 unable to access the repository should be low.
2872
2872
2873 By default, all revlog will be upgraded. You can restrict this using flag
2873 By default, all revlog will be upgraded. You can restrict this using flag
2874 such as `--manifest`:
2874 such as `--manifest`:
2875
2875
2876 * `--manifest`: only optimize the manifest
2876 * `--manifest`: only optimize the manifest
2877 * `--no-manifest`: optimize all revlog but the manifest
2877 * `--no-manifest`: optimize all revlog but the manifest
2878 * `--changelog`: optimize the changelog only
2878 * `--changelog`: optimize the changelog only
2879 * `--no-changelog --no-manifest`: optimize filelogs only
2879 * `--no-changelog --no-manifest`: optimize filelogs only
2880 """
2880 """
2881 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2881 return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize,
2882 backup=backup, **opts)
2882 backup=backup, **opts)
2883
2883
2884 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2884 @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'),
2885 inferrepo=True)
2885 inferrepo=True)
2886 def debugwalk(ui, repo, *pats, **opts):
2886 def debugwalk(ui, repo, *pats, **opts):
2887 """show how files match on given patterns"""
2887 """show how files match on given patterns"""
2888 opts = pycompat.byteskwargs(opts)
2888 opts = pycompat.byteskwargs(opts)
2889 m = scmutil.match(repo[None], pats, opts)
2889 m = scmutil.match(repo[None], pats, opts)
2890 if ui.verbose:
2890 if ui.verbose:
2891 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2891 ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n')
2892 items = list(repo[None].walk(m))
2892 items = list(repo[None].walk(m))
2893 if not items:
2893 if not items:
2894 return
2894 return
2895 f = lambda fn: fn
2895 f = lambda fn: fn
2896 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2896 if ui.configbool('ui', 'slash') and pycompat.ossep != '/':
2897 f = lambda fn: util.normpath(fn)
2897 f = lambda fn: util.normpath(fn)
2898 fmt = 'f %%-%ds %%-%ds %%s' % (
2898 fmt = 'f %%-%ds %%-%ds %%s' % (
2899 max([len(abs) for abs in items]),
2899 max([len(abs) for abs in items]),
2900 max([len(repo.pathto(abs)) for abs in items]))
2900 max([len(repo.pathto(abs)) for abs in items]))
2901 for abs in items:
2901 for abs in items:
2902 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2902 line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '')
2903 ui.write("%s\n" % line.rstrip())
2903 ui.write("%s\n" % line.rstrip())
2904
2904
2905 @command('debugwhyunstable', [], _('REV'))
2905 @command('debugwhyunstable', [], _('REV'))
2906 def debugwhyunstable(ui, repo, rev):
2906 def debugwhyunstable(ui, repo, rev):
2907 """explain instabilities of a changeset"""
2907 """explain instabilities of a changeset"""
2908 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2908 for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)):
2909 dnodes = ''
2909 dnodes = ''
2910 if entry.get('divergentnodes'):
2910 if entry.get('divergentnodes'):
2911 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2911 dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr())
2912 for ctx in entry['divergentnodes']) + ' '
2912 for ctx in entry['divergentnodes']) + ' '
2913 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2913 ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes,
2914 entry['reason'], entry['node']))
2914 entry['reason'], entry['node']))
2915
2915
2916 @command('debugwireargs',
2916 @command('debugwireargs',
2917 [('', 'three', '', 'three'),
2917 [('', 'three', '', 'three'),
2918 ('', 'four', '', 'four'),
2918 ('', 'four', '', 'four'),
2919 ('', 'five', '', 'five'),
2919 ('', 'five', '', 'five'),
2920 ] + cmdutil.remoteopts,
2920 ] + cmdutil.remoteopts,
2921 _('REPO [OPTIONS]... [ONE [TWO]]'),
2921 _('REPO [OPTIONS]... [ONE [TWO]]'),
2922 norepo=True)
2922 norepo=True)
2923 def debugwireargs(ui, repopath, *vals, **opts):
2923 def debugwireargs(ui, repopath, *vals, **opts):
2924 opts = pycompat.byteskwargs(opts)
2924 opts = pycompat.byteskwargs(opts)
2925 repo = hg.peer(ui, opts, repopath)
2925 repo = hg.peer(ui, opts, repopath)
2926 for opt in cmdutil.remoteopts:
2926 for opt in cmdutil.remoteopts:
2927 del opts[opt[1]]
2927 del opts[opt[1]]
2928 args = {}
2928 args = {}
2929 for k, v in opts.iteritems():
2929 for k, v in opts.iteritems():
2930 if v:
2930 if v:
2931 args[k] = v
2931 args[k] = v
2932 args = pycompat.strkwargs(args)
2932 args = pycompat.strkwargs(args)
2933 # run twice to check that we don't mess up the stream for the next command
2933 # run twice to check that we don't mess up the stream for the next command
2934 res1 = repo.debugwireargs(*vals, **args)
2934 res1 = repo.debugwireargs(*vals, **args)
2935 res2 = repo.debugwireargs(*vals, **args)
2935 res2 = repo.debugwireargs(*vals, **args)
2936 ui.write("%s\n" % res1)
2936 ui.write("%s\n" % res1)
2937 if res1 != res2:
2937 if res1 != res2:
2938 ui.warn("%s\n" % res2)
2938 ui.warn("%s\n" % res2)
2939
2939
2940 def _parsewirelangblocks(fh):
2940 def _parsewirelangblocks(fh):
2941 activeaction = None
2941 activeaction = None
2942 blocklines = []
2942 blocklines = []
2943 lastindent = 0
2943 lastindent = 0
2944
2944
2945 for line in fh:
2945 for line in fh:
2946 line = line.rstrip()
2946 line = line.rstrip()
2947 if not line:
2947 if not line:
2948 continue
2948 continue
2949
2949
2950 if line.startswith(b'#'):
2950 if line.startswith(b'#'):
2951 continue
2951 continue
2952
2952
2953 if not line.startswith(b' '):
2953 if not line.startswith(b' '):
2954 # New block. Flush previous one.
2954 # New block. Flush previous one.
2955 if activeaction:
2955 if activeaction:
2956 yield activeaction, blocklines
2956 yield activeaction, blocklines
2957
2957
2958 activeaction = line
2958 activeaction = line
2959 blocklines = []
2959 blocklines = []
2960 lastindent = 0
2960 lastindent = 0
2961 continue
2961 continue
2962
2962
2963 # Else we start with an indent.
2963 # Else we start with an indent.
2964
2964
2965 if not activeaction:
2965 if not activeaction:
2966 raise error.Abort(_('indented line outside of block'))
2966 raise error.Abort(_('indented line outside of block'))
2967
2967
2968 indent = len(line) - len(line.lstrip())
2968 indent = len(line) - len(line.lstrip())
2969
2969
2970 # If this line is indented more than the last line, concatenate it.
2970 # If this line is indented more than the last line, concatenate it.
2971 if indent > lastindent and blocklines:
2971 if indent > lastindent and blocklines:
2972 blocklines[-1] += line.lstrip()
2972 blocklines[-1] += line.lstrip()
2973 else:
2973 else:
2974 blocklines.append(line)
2974 blocklines.append(line)
2975 lastindent = indent
2975 lastindent = indent
2976
2976
2977 # Flush last block.
2977 # Flush last block.
2978 if activeaction:
2978 if activeaction:
2979 yield activeaction, blocklines
2979 yield activeaction, blocklines
2980
2980
2981 @command('debugwireproto',
2981 @command('debugwireproto',
2982 [
2982 [
2983 ('', 'localssh', False, _('start an SSH server for this repo')),
2983 ('', 'localssh', False, _('start an SSH server for this repo')),
2984 ('', 'peer', '', _('construct a specific version of the peer')),
2984 ('', 'peer', '', _('construct a specific version of the peer')),
2985 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2985 ('', 'noreadstderr', False, _('do not read from stderr of the remote')),
2986 ('', 'nologhandshake', False,
2986 ('', 'nologhandshake', False,
2987 _('do not log I/O related to the peer handshake')),
2987 _('do not log I/O related to the peer handshake')),
2988 ] + cmdutil.remoteopts,
2988 ] + cmdutil.remoteopts,
2989 _('[PATH]'),
2989 _('[PATH]'),
2990 optionalrepo=True)
2990 optionalrepo=True)
2991 def debugwireproto(ui, repo, path=None, **opts):
2991 def debugwireproto(ui, repo, path=None, **opts):
2992 """send wire protocol commands to a server
2992 """send wire protocol commands to a server
2993
2993
2994 This command can be used to issue wire protocol commands to remote
2994 This command can be used to issue wire protocol commands to remote
2995 peers and to debug the raw data being exchanged.
2995 peers and to debug the raw data being exchanged.
2996
2996
2997 ``--localssh`` will start an SSH server against the current repository
2997 ``--localssh`` will start an SSH server against the current repository
2998 and connect to that. By default, the connection will perform a handshake
2998 and connect to that. By default, the connection will perform a handshake
2999 and establish an appropriate peer instance.
2999 and establish an appropriate peer instance.
3000
3000
3001 ``--peer`` can be used to bypass the handshake protocol and construct a
3001 ``--peer`` can be used to bypass the handshake protocol and construct a
3002 peer instance using the specified class type. Valid values are ``raw``,
3002 peer instance using the specified class type. Valid values are ``raw``,
3003 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3003 ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending
3004 raw data payloads and don't support higher-level command actions.
3004 raw data payloads and don't support higher-level command actions.
3005
3005
3006 ``--noreadstderr`` can be used to disable automatic reading from stderr
3006 ``--noreadstderr`` can be used to disable automatic reading from stderr
3007 of the peer (for SSH connections only). Disabling automatic reading of
3007 of the peer (for SSH connections only). Disabling automatic reading of
3008 stderr is useful for making output more deterministic.
3008 stderr is useful for making output more deterministic.
3009
3009
3010 Commands are issued via a mini language which is specified via stdin.
3010 Commands are issued via a mini language which is specified via stdin.
3011 The language consists of individual actions to perform. An action is
3011 The language consists of individual actions to perform. An action is
3012 defined by a block. A block is defined as a line with no leading
3012 defined by a block. A block is defined as a line with no leading
3013 space followed by 0 or more lines with leading space. Blocks are
3013 space followed by 0 or more lines with leading space. Blocks are
3014 effectively a high-level command with additional metadata.
3014 effectively a high-level command with additional metadata.
3015
3015
3016 Lines beginning with ``#`` are ignored.
3016 Lines beginning with ``#`` are ignored.
3017
3017
3018 The following sections denote available actions.
3018 The following sections denote available actions.
3019
3019
3020 raw
3020 raw
3021 ---
3021 ---
3022
3022
3023 Send raw data to the server.
3023 Send raw data to the server.
3024
3024
3025 The block payload contains the raw data to send as one atomic send
3025 The block payload contains the raw data to send as one atomic send
3026 operation. The data may not actually be delivered in a single system
3026 operation. The data may not actually be delivered in a single system
3027 call: it depends on the abilities of the transport being used.
3027 call: it depends on the abilities of the transport being used.
3028
3028
3029 Each line in the block is de-indented and concatenated. Then, that
3029 Each line in the block is de-indented and concatenated. Then, that
3030 value is evaluated as a Python b'' literal. This allows the use of
3030 value is evaluated as a Python b'' literal. This allows the use of
3031 backslash escaping, etc.
3031 backslash escaping, etc.
3032
3032
3033 raw+
3033 raw+
3034 ----
3034 ----
3035
3035
3036 Behaves like ``raw`` except flushes output afterwards.
3036 Behaves like ``raw`` except flushes output afterwards.
3037
3037
3038 command <X>
3038 command <X>
3039 -----------
3039 -----------
3040
3040
3041 Send a request to run a named command, whose name follows the ``command``
3041 Send a request to run a named command, whose name follows the ``command``
3042 string.
3042 string.
3043
3043
3044 Arguments to the command are defined as lines in this block. The format of
3044 Arguments to the command are defined as lines in this block. The format of
3045 each line is ``<key> <value>``. e.g.::
3045 each line is ``<key> <value>``. e.g.::
3046
3046
3047 command listkeys
3047 command listkeys
3048 namespace bookmarks
3048 namespace bookmarks
3049
3049
3050 If the value begins with ``eval:``, it will be interpreted as a Python
3050 If the value begins with ``eval:``, it will be interpreted as a Python
3051 literal expression. Otherwise values are interpreted as Python b'' literals.
3051 literal expression. Otherwise values are interpreted as Python b'' literals.
3052 This allows sending complex types and encoding special byte sequences via
3052 This allows sending complex types and encoding special byte sequences via
3053 backslash escaping.
3053 backslash escaping.
3054
3054
3055 The following arguments have special meaning:
3055 The following arguments have special meaning:
3056
3056
3057 ``PUSHFILE``
3057 ``PUSHFILE``
3058 When defined, the *push* mechanism of the peer will be used instead
3058 When defined, the *push* mechanism of the peer will be used instead
3059 of the static request-response mechanism and the content of the
3059 of the static request-response mechanism and the content of the
3060 file specified in the value of this argument will be sent as the
3060 file specified in the value of this argument will be sent as the
3061 command payload.
3061 command payload.
3062
3062
3063 This can be used to submit a local bundle file to the remote.
3063 This can be used to submit a local bundle file to the remote.
3064
3064
3065 batchbegin
3065 batchbegin
3066 ----------
3066 ----------
3067
3067
3068 Instruct the peer to begin a batched send.
3068 Instruct the peer to begin a batched send.
3069
3069
3070 All ``command`` blocks are queued for execution until the next
3070 All ``command`` blocks are queued for execution until the next
3071 ``batchsubmit`` block.
3071 ``batchsubmit`` block.
3072
3072
3073 batchsubmit
3073 batchsubmit
3074 -----------
3074 -----------
3075
3075
3076 Submit previously queued ``command`` blocks as a batch request.
3076 Submit previously queued ``command`` blocks as a batch request.
3077
3077
3078 This action MUST be paired with a ``batchbegin`` action.
3078 This action MUST be paired with a ``batchbegin`` action.
3079
3079
3080 httprequest <method> <path>
3080 httprequest <method> <path>
3081 ---------------------------
3081 ---------------------------
3082
3082
3083 (HTTP peer only)
3083 (HTTP peer only)
3084
3084
3085 Send an HTTP request to the peer.
3085 Send an HTTP request to the peer.
3086
3086
3087 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3087 The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``.
3088
3088
3089 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3089 Arguments of the form ``<key>: <value>`` are interpreted as HTTP request
3090 headers to add to the request. e.g. ``Accept: foo``.
3090 headers to add to the request. e.g. ``Accept: foo``.
3091
3091
3092 The following arguments are special:
3092 The following arguments are special:
3093
3093
3094 ``BODYFILE``
3094 ``BODYFILE``
3095 The content of the file defined as the value to this argument will be
3095 The content of the file defined as the value to this argument will be
3096 transferred verbatim as the HTTP request body.
3096 transferred verbatim as the HTTP request body.
3097
3097
3098 ``frame <type> <flags> <payload>``
3098 ``frame <type> <flags> <payload>``
3099 Send a unified protocol frame as part of the request body.
3099 Send a unified protocol frame as part of the request body.
3100
3100
3101 All frames will be collected and sent as the body to the HTTP
3101 All frames will be collected and sent as the body to the HTTP
3102 request.
3102 request.
3103
3103
3104 close
3104 close
3105 -----
3105 -----
3106
3106
3107 Close the connection to the server.
3107 Close the connection to the server.
3108
3108
3109 flush
3109 flush
3110 -----
3110 -----
3111
3111
3112 Flush data written to the server.
3112 Flush data written to the server.
3113
3113
3114 readavailable
3114 readavailable
3115 -------------
3115 -------------
3116
3116
3117 Close the write end of the connection and read all available data from
3117 Close the write end of the connection and read all available data from
3118 the server.
3118 the server.
3119
3119
3120 If the connection to the server encompasses multiple pipes, we poll both
3120 If the connection to the server encompasses multiple pipes, we poll both
3121 pipes and read available data.
3121 pipes and read available data.
3122
3122
3123 readline
3123 readline
3124 --------
3124 --------
3125
3125
3126 Read a line of output from the server. If there are multiple output
3126 Read a line of output from the server. If there are multiple output
3127 pipes, reads only the main pipe.
3127 pipes, reads only the main pipe.
3128
3128
3129 ereadline
3129 ereadline
3130 ---------
3130 ---------
3131
3131
3132 Like ``readline``, but read from the stderr pipe, if available.
3132 Like ``readline``, but read from the stderr pipe, if available.
3133
3133
3134 read <X>
3134 read <X>
3135 --------
3135 --------
3136
3136
3137 ``read()`` N bytes from the server's main output pipe.
3137 ``read()`` N bytes from the server's main output pipe.
3138
3138
3139 eread <X>
3139 eread <X>
3140 ---------
3140 ---------
3141
3141
3142 ``read()`` N bytes from the server's stderr pipe, if available.
3142 ``read()`` N bytes from the server's stderr pipe, if available.
3143
3143
3144 Specifying Unified Frame-Based Protocol Frames
3144 Specifying Unified Frame-Based Protocol Frames
3145 ----------------------------------------------
3145 ----------------------------------------------
3146
3146
3147 It is possible to emit a *Unified Frame-Based Protocol* by using special
3147 It is possible to emit a *Unified Frame-Based Protocol* by using special
3148 syntax.
3148 syntax.
3149
3149
3150 A frame is composed as a type, flags, and payload. These can be parsed
3150 A frame is composed as a type, flags, and payload. These can be parsed
3151 from a string of the form:
3151 from a string of the form:
3152
3152
3153 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3153 <request-id> <stream-id> <stream-flags> <type> <flags> <payload>
3154
3154
3155 ``request-id`` and ``stream-id`` are integers defining the request and
3155 ``request-id`` and ``stream-id`` are integers defining the request and
3156 stream identifiers.
3156 stream identifiers.
3157
3157
3158 ``type`` can be an integer value for the frame type or the string name
3158 ``type`` can be an integer value for the frame type or the string name
3159 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3159 of the type. The strings are defined in ``wireprotoframing.py``. e.g.
3160 ``command-name``.
3160 ``command-name``.
3161
3161
3162 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3162 ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag
3163 components. Each component (and there can be just one) can be an integer
3163 components. Each component (and there can be just one) can be an integer
3164 or a flag name for stream flags or frame flags, respectively. Values are
3164 or a flag name for stream flags or frame flags, respectively. Values are
3165 resolved to integers and then bitwise OR'd together.
3165 resolved to integers and then bitwise OR'd together.
3166
3166
3167 ``payload`` represents the raw frame payload. If it begins with
3167 ``payload`` represents the raw frame payload. If it begins with
3168 ``cbor:``, the following string is evaluated as Python code and the
3168 ``cbor:``, the following string is evaluated as Python code and the
3169 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3169 resulting object is fed into a CBOR encoder. Otherwise it is interpreted
3170 as a Python byte string literal.
3170 as a Python byte string literal.
3171 """
3171 """
3172 opts = pycompat.byteskwargs(opts)
3172 opts = pycompat.byteskwargs(opts)
3173
3173
3174 if opts['localssh'] and not repo:
3174 if opts['localssh'] and not repo:
3175 raise error.Abort(_('--localssh requires a repository'))
3175 raise error.Abort(_('--localssh requires a repository'))
3176
3176
3177 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3177 if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'):
3178 raise error.Abort(_('invalid value for --peer'),
3178 raise error.Abort(_('invalid value for --peer'),
3179 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3179 hint=_('valid values are "raw", "ssh1", and "ssh2"'))
3180
3180
3181 if path and opts['localssh']:
3181 if path and opts['localssh']:
3182 raise error.Abort(_('cannot specify --localssh with an explicit '
3182 raise error.Abort(_('cannot specify --localssh with an explicit '
3183 'path'))
3183 'path'))
3184
3184
3185 if ui.interactive():
3185 if ui.interactive():
3186 ui.write(_('(waiting for commands on stdin)\n'))
3186 ui.write(_('(waiting for commands on stdin)\n'))
3187
3187
3188 blocks = list(_parsewirelangblocks(ui.fin))
3188 blocks = list(_parsewirelangblocks(ui.fin))
3189
3189
3190 proc = None
3190 proc = None
3191 stdin = None
3191 stdin = None
3192 stdout = None
3192 stdout = None
3193 stderr = None
3193 stderr = None
3194 opener = None
3194 opener = None
3195
3195
3196 if opts['localssh']:
3196 if opts['localssh']:
3197 # We start the SSH server in its own process so there is process
3197 # We start the SSH server in its own process so there is process
3198 # separation. This prevents a whole class of potential bugs around
3198 # separation. This prevents a whole class of potential bugs around
3199 # shared state from interfering with server operation.
3199 # shared state from interfering with server operation.
3200 args = procutil.hgcmd() + [
3200 args = procutil.hgcmd() + [
3201 '-R', repo.root,
3201 '-R', repo.root,
3202 'debugserve', '--sshstdio',
3202 'debugserve', '--sshstdio',
3203 ]
3203 ]
3204 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3204 proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args),
3205 stdin=subprocess.PIPE,
3205 stdin=subprocess.PIPE,
3206 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3206 stdout=subprocess.PIPE, stderr=subprocess.PIPE,
3207 bufsize=0)
3207 bufsize=0)
3208
3208
3209 stdin = proc.stdin
3209 stdin = proc.stdin
3210 stdout = proc.stdout
3210 stdout = proc.stdout
3211 stderr = proc.stderr
3211 stderr = proc.stderr
3212
3212
3213 # We turn the pipes into observers so we can log I/O.
3213 # We turn the pipes into observers so we can log I/O.
3214 if ui.verbose or opts['peer'] == 'raw':
3214 if ui.verbose or opts['peer'] == 'raw':
3215 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3215 stdin = util.makeloggingfileobject(ui, proc.stdin, b'i',
3216 logdata=True)
3216 logdata=True)
3217 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3217 stdout = util.makeloggingfileobject(ui, proc.stdout, b'o',
3218 logdata=True)
3218 logdata=True)
3219 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3219 stderr = util.makeloggingfileobject(ui, proc.stderr, b'e',
3220 logdata=True)
3220 logdata=True)
3221
3221
3222 # --localssh also implies the peer connection settings.
3222 # --localssh also implies the peer connection settings.
3223
3223
3224 url = 'ssh://localserver'
3224 url = 'ssh://localserver'
3225 autoreadstderr = not opts['noreadstderr']
3225 autoreadstderr = not opts['noreadstderr']
3226
3226
3227 if opts['peer'] == 'ssh1':
3227 if opts['peer'] == 'ssh1':
3228 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3228 ui.write(_('creating ssh peer for wire protocol version 1\n'))
3229 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3229 peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr,
3230 None, autoreadstderr=autoreadstderr)
3230 None, autoreadstderr=autoreadstderr)
3231 elif opts['peer'] == 'ssh2':
3231 elif opts['peer'] == 'ssh2':
3232 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3232 ui.write(_('creating ssh peer for wire protocol version 2\n'))
3233 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3233 peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr,
3234 None, autoreadstderr=autoreadstderr)
3234 None, autoreadstderr=autoreadstderr)
3235 elif opts['peer'] == 'raw':
3235 elif opts['peer'] == 'raw':
3236 ui.write(_('using raw connection to peer\n'))
3236 ui.write(_('using raw connection to peer\n'))
3237 peer = None
3237 peer = None
3238 else:
3238 else:
3239 ui.write(_('creating ssh peer from handshake results\n'))
3239 ui.write(_('creating ssh peer from handshake results\n'))
3240 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3240 peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr,
3241 autoreadstderr=autoreadstderr)
3241 autoreadstderr=autoreadstderr)
3242
3242
3243 elif path:
3243 elif path:
3244 # We bypass hg.peer() so we can proxy the sockets.
3244 # We bypass hg.peer() so we can proxy the sockets.
3245 # TODO consider not doing this because we skip
3245 # TODO consider not doing this because we skip
3246 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3246 # ``hg.wirepeersetupfuncs`` and potentially other useful functionality.
3247 u = util.url(path)
3247 u = util.url(path)
3248 if u.scheme != 'http':
3248 if u.scheme != 'http':
3249 raise error.Abort(_('only http:// paths are currently supported'))
3249 raise error.Abort(_('only http:// paths are currently supported'))
3250
3250
3251 url, authinfo = u.authinfo()
3251 url, authinfo = u.authinfo()
3252 openerargs = {
3252 openerargs = {
3253 r'useragent': b'Mercurial debugwireproto',
3253 r'useragent': b'Mercurial debugwireproto',
3254 }
3254 }
3255
3255
3256 # Turn pipes/sockets into observers so we can log I/O.
3256 # Turn pipes/sockets into observers so we can log I/O.
3257 if ui.verbose:
3257 if ui.verbose:
3258 openerargs.update({
3258 openerargs.update({
3259 r'loggingfh': ui,
3259 r'loggingfh': ui,
3260 r'loggingname': b's',
3260 r'loggingname': b's',
3261 r'loggingopts': {
3261 r'loggingopts': {
3262 r'logdata': True,
3262 r'logdata': True,
3263 r'logdataapis': False,
3263 r'logdataapis': False,
3264 },
3264 },
3265 })
3265 })
3266
3266
3267 if ui.debugflag:
3267 if ui.debugflag:
3268 openerargs[r'loggingopts'][r'logdataapis'] = True
3268 openerargs[r'loggingopts'][r'logdataapis'] = True
3269
3269
3270 # Don't send default headers when in raw mode. This allows us to
3270 # Don't send default headers when in raw mode. This allows us to
3271 # bypass most of the behavior of our URL handling code so we can
3271 # bypass most of the behavior of our URL handling code so we can
3272 # have near complete control over what's sent on the wire.
3272 # have near complete control over what's sent on the wire.
3273 if opts['peer'] == 'raw':
3273 if opts['peer'] == 'raw':
3274 openerargs[r'sendaccept'] = False
3274 openerargs[r'sendaccept'] = False
3275
3275
3276 opener = urlmod.opener(ui, authinfo, **openerargs)
3276 opener = urlmod.opener(ui, authinfo, **openerargs)
3277
3277
3278 if opts['peer'] == 'http2':
3278 if opts['peer'] == 'http2':
3279 ui.write(_('creating http peer for wire protocol version 2\n'))
3279 ui.write(_('creating http peer for wire protocol version 2\n'))
3280 # We go through makepeer() because we need an API descriptor for
3280 # We go through makepeer() because we need an API descriptor for
3281 # the peer instance to be useful.
3281 # the peer instance to be useful.
3282 with ui.configoverride({
3282 with ui.configoverride({
3283 ('experimental', 'httppeer.advertise-v2'): True}):
3283 ('experimental', 'httppeer.advertise-v2'): True}):
3284 if opts['nologhandshake']:
3284 if opts['nologhandshake']:
3285 ui.pushbuffer()
3285 ui.pushbuffer()
3286
3286
3287 peer = httppeer.makepeer(ui, path, opener=opener)
3287 peer = httppeer.makepeer(ui, path, opener=opener)
3288
3288
3289 if opts['nologhandshake']:
3289 if opts['nologhandshake']:
3290 ui.popbuffer()
3290 ui.popbuffer()
3291
3291
3292 if not isinstance(peer, httppeer.httpv2peer):
3292 if not isinstance(peer, httppeer.httpv2peer):
3293 raise error.Abort(_('could not instantiate HTTP peer for '
3293 raise error.Abort(_('could not instantiate HTTP peer for '
3294 'wire protocol version 2'),
3294 'wire protocol version 2'),
3295 hint=_('the server may not have the feature '
3295 hint=_('the server may not have the feature '
3296 'enabled or is not allowing this '
3296 'enabled or is not allowing this '
3297 'client version'))
3297 'client version'))
3298
3298
3299 elif opts['peer'] == 'raw':
3299 elif opts['peer'] == 'raw':
3300 ui.write(_('using raw connection to peer\n'))
3300 ui.write(_('using raw connection to peer\n'))
3301 peer = None
3301 peer = None
3302 elif opts['peer']:
3302 elif opts['peer']:
3303 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3303 raise error.Abort(_('--peer %s not supported with HTTP peers') %
3304 opts['peer'])
3304 opts['peer'])
3305 else:
3305 else:
3306 peer = httppeer.makepeer(ui, path, opener=opener)
3306 peer = httppeer.makepeer(ui, path, opener=opener)
3307
3307
3308 # We /could/ populate stdin/stdout with sock.makefile()...
3308 # We /could/ populate stdin/stdout with sock.makefile()...
3309 else:
3309 else:
3310 raise error.Abort(_('unsupported connection configuration'))
3310 raise error.Abort(_('unsupported connection configuration'))
3311
3311
3312 batchedcommands = None
3312 batchedcommands = None
3313
3313
3314 # Now perform actions based on the parsed wire language instructions.
3314 # Now perform actions based on the parsed wire language instructions.
3315 for action, lines in blocks:
3315 for action, lines in blocks:
3316 if action in ('raw', 'raw+'):
3316 if action in ('raw', 'raw+'):
3317 if not stdin:
3317 if not stdin:
3318 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3318 raise error.Abort(_('cannot call raw/raw+ on this peer'))
3319
3319
3320 # Concatenate the data together.
3320 # Concatenate the data together.
3321 data = ''.join(l.lstrip() for l in lines)
3321 data = ''.join(l.lstrip() for l in lines)
3322 data = stringutil.unescapestr(data)
3322 data = stringutil.unescapestr(data)
3323 stdin.write(data)
3323 stdin.write(data)
3324
3324
3325 if action == 'raw+':
3325 if action == 'raw+':
3326 stdin.flush()
3326 stdin.flush()
3327 elif action == 'flush':
3327 elif action == 'flush':
3328 if not stdin:
3328 if not stdin:
3329 raise error.Abort(_('cannot call flush on this peer'))
3329 raise error.Abort(_('cannot call flush on this peer'))
3330 stdin.flush()
3330 stdin.flush()
3331 elif action.startswith('command'):
3331 elif action.startswith('command'):
3332 if not peer:
3332 if not peer:
3333 raise error.Abort(_('cannot send commands unless peer instance '
3333 raise error.Abort(_('cannot send commands unless peer instance '
3334 'is available'))
3334 'is available'))
3335
3335
3336 command = action.split(' ', 1)[1]
3336 command = action.split(' ', 1)[1]
3337
3337
3338 args = {}
3338 args = {}
3339 for line in lines:
3339 for line in lines:
3340 # We need to allow empty values.
3340 # We need to allow empty values.
3341 fields = line.lstrip().split(' ', 1)
3341 fields = line.lstrip().split(' ', 1)
3342 if len(fields) == 1:
3342 if len(fields) == 1:
3343 key = fields[0]
3343 key = fields[0]
3344 value = ''
3344 value = ''
3345 else:
3345 else:
3346 key, value = fields
3346 key, value = fields
3347
3347
3348 if value.startswith('eval:'):
3348 if value.startswith('eval:'):
3349 value = stringutil.evalpythonliteral(value[5:])
3349 value = stringutil.evalpythonliteral(value[5:])
3350 else:
3350 else:
3351 value = stringutil.unescapestr(value)
3351 value = stringutil.unescapestr(value)
3352
3352
3353 args[key] = value
3353 args[key] = value
3354
3354
3355 if batchedcommands is not None:
3355 if batchedcommands is not None:
3356 batchedcommands.append((command, args))
3356 batchedcommands.append((command, args))
3357 continue
3357 continue
3358
3358
3359 ui.status(_('sending %s command\n') % command)
3359 ui.status(_('sending %s command\n') % command)
3360
3360
3361 if 'PUSHFILE' in args:
3361 if 'PUSHFILE' in args:
3362 with open(args['PUSHFILE'], r'rb') as fh:
3362 with open(args['PUSHFILE'], r'rb') as fh:
3363 del args['PUSHFILE']
3363 del args['PUSHFILE']
3364 res, output = peer._callpush(command, fh,
3364 res, output = peer._callpush(command, fh,
3365 **pycompat.strkwargs(args))
3365 **pycompat.strkwargs(args))
3366 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3366 ui.status(_('result: %s\n') % stringutil.escapestr(res))
3367 ui.status(_('remote output: %s\n') %
3367 ui.status(_('remote output: %s\n') %
3368 stringutil.escapestr(output))
3368 stringutil.escapestr(output))
3369 else:
3369 else:
3370 with peer.commandexecutor() as e:
3370 with peer.commandexecutor() as e:
3371 res = e.callcommand(command, args).result()
3371 res = e.callcommand(command, args).result()
3372
3372
3373 if isinstance(res, wireprotov2peer.commandresponse):
3373 if isinstance(res, wireprotov2peer.commandresponse):
3374 val = res.objects()
3374 val = res.objects()
3375 ui.status(_('response: %s\n') %
3375 ui.status(_('response: %s\n') %
3376 stringutil.pprint(val, bprefix=True, indent=2))
3376 stringutil.pprint(val, bprefix=True, indent=2))
3377 else:
3377 else:
3378 ui.status(_('response: %s\n') %
3378 ui.status(_('response: %s\n') %
3379 stringutil.pprint(res, bprefix=True, indent=2))
3379 stringutil.pprint(res, bprefix=True, indent=2))
3380
3380
3381 elif action == 'batchbegin':
3381 elif action == 'batchbegin':
3382 if batchedcommands is not None:
3382 if batchedcommands is not None:
3383 raise error.Abort(_('nested batchbegin not allowed'))
3383 raise error.Abort(_('nested batchbegin not allowed'))
3384
3384
3385 batchedcommands = []
3385 batchedcommands = []
3386 elif action == 'batchsubmit':
3386 elif action == 'batchsubmit':
3387 # There is a batching API we could go through. But it would be
3387 # There is a batching API we could go through. But it would be
3388 # difficult to normalize requests into function calls. It is easier
3388 # difficult to normalize requests into function calls. It is easier
3389 # to bypass this layer and normalize to commands + args.
3389 # to bypass this layer and normalize to commands + args.
3390 ui.status(_('sending batch with %d sub-commands\n') %
3390 ui.status(_('sending batch with %d sub-commands\n') %
3391 len(batchedcommands))
3391 len(batchedcommands))
3392 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3392 for i, chunk in enumerate(peer._submitbatch(batchedcommands)):
3393 ui.status(_('response #%d: %s\n') %
3393 ui.status(_('response #%d: %s\n') %
3394 (i, stringutil.escapestr(chunk)))
3394 (i, stringutil.escapestr(chunk)))
3395
3395
3396 batchedcommands = None
3396 batchedcommands = None
3397
3397
3398 elif action.startswith('httprequest '):
3398 elif action.startswith('httprequest '):
3399 if not opener:
3399 if not opener:
3400 raise error.Abort(_('cannot use httprequest without an HTTP '
3400 raise error.Abort(_('cannot use httprequest without an HTTP '
3401 'peer'))
3401 'peer'))
3402
3402
3403 request = action.split(' ', 2)
3403 request = action.split(' ', 2)
3404 if len(request) != 3:
3404 if len(request) != 3:
3405 raise error.Abort(_('invalid httprequest: expected format is '
3405 raise error.Abort(_('invalid httprequest: expected format is '
3406 '"httprequest <method> <path>'))
3406 '"httprequest <method> <path>'))
3407
3407
3408 method, httppath = request[1:]
3408 method, httppath = request[1:]
3409 headers = {}
3409 headers = {}
3410 body = None
3410 body = None
3411 frames = []
3411 frames = []
3412 for line in lines:
3412 for line in lines:
3413 line = line.lstrip()
3413 line = line.lstrip()
3414 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3414 m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line)
3415 if m:
3415 if m:
3416 # Headers need to use native strings.
3416 # Headers need to use native strings.
3417 key = pycompat.strurl(m.group(1))
3417 key = pycompat.strurl(m.group(1))
3418 value = pycompat.strurl(m.group(2))
3418 value = pycompat.strurl(m.group(2))
3419 headers[key] = value
3419 headers[key] = value
3420 continue
3420 continue
3421
3421
3422 if line.startswith(b'BODYFILE '):
3422 if line.startswith(b'BODYFILE '):
3423 with open(line.split(b' ', 1), 'rb') as fh:
3423 with open(line.split(b' ', 1), 'rb') as fh:
3424 body = fh.read()
3424 body = fh.read()
3425 elif line.startswith(b'frame '):
3425 elif line.startswith(b'frame '):
3426 frame = wireprotoframing.makeframefromhumanstring(
3426 frame = wireprotoframing.makeframefromhumanstring(
3427 line[len(b'frame '):])
3427 line[len(b'frame '):])
3428
3428
3429 frames.append(frame)
3429 frames.append(frame)
3430 else:
3430 else:
3431 raise error.Abort(_('unknown argument to httprequest: %s') %
3431 raise error.Abort(_('unknown argument to httprequest: %s') %
3432 line)
3432 line)
3433
3433
3434 url = path + httppath
3434 url = path + httppath
3435
3435
3436 if frames:
3436 if frames:
3437 body = b''.join(bytes(f) for f in frames)
3437 body = b''.join(bytes(f) for f in frames)
3438
3438
3439 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3439 req = urlmod.urlreq.request(pycompat.strurl(url), body, headers)
3440
3440
3441 # urllib.Request insists on using has_data() as a proxy for
3441 # urllib.Request insists on using has_data() as a proxy for
3442 # determining the request method. Override that to use our
3442 # determining the request method. Override that to use our
3443 # explicitly requested method.
3443 # explicitly requested method.
3444 req.get_method = lambda: pycompat.sysstr(method)
3444 req.get_method = lambda: pycompat.sysstr(method)
3445
3445
3446 try:
3446 try:
3447 res = opener.open(req)
3447 res = opener.open(req)
3448 body = res.read()
3448 body = res.read()
3449 except util.urlerr.urlerror as e:
3449 except util.urlerr.urlerror as e:
3450 # read() method must be called, but only exists in Python 2
3450 # read() method must be called, but only exists in Python 2
3451 getattr(e, 'read', lambda: None)()
3451 getattr(e, 'read', lambda: None)()
3452 continue
3452 continue
3453
3453
3454 ct = res.headers.get(r'Content-Type')
3454 ct = res.headers.get(r'Content-Type')
3455 if ct == r'application/mercurial-cbor':
3455 if ct == r'application/mercurial-cbor':
3456 ui.write(_('cbor> %s\n') %
3456 ui.write(_('cbor> %s\n') %
3457 stringutil.pprint(cborutil.decodeall(body),
3457 stringutil.pprint(cborutil.decodeall(body),
3458 bprefix=True,
3458 bprefix=True,
3459 indent=2))
3459 indent=2))
3460
3460
3461 elif action == 'close':
3461 elif action == 'close':
3462 peer.close()
3462 peer.close()
3463 elif action == 'readavailable':
3463 elif action == 'readavailable':
3464 if not stdout or not stderr:
3464 if not stdout or not stderr:
3465 raise error.Abort(_('readavailable not available on this peer'))
3465 raise error.Abort(_('readavailable not available on this peer'))
3466
3466
3467 stdin.close()
3467 stdin.close()
3468 stdout.read()
3468 stdout.read()
3469 stderr.read()
3469 stderr.read()
3470
3470
3471 elif action == 'readline':
3471 elif action == 'readline':
3472 if not stdout:
3472 if not stdout:
3473 raise error.Abort(_('readline not available on this peer'))
3473 raise error.Abort(_('readline not available on this peer'))
3474 stdout.readline()
3474 stdout.readline()
3475 elif action == 'ereadline':
3475 elif action == 'ereadline':
3476 if not stderr:
3476 if not stderr:
3477 raise error.Abort(_('ereadline not available on this peer'))
3477 raise error.Abort(_('ereadline not available on this peer'))
3478 stderr.readline()
3478 stderr.readline()
3479 elif action.startswith('read '):
3479 elif action.startswith('read '):
3480 count = int(action.split(' ', 1)[1])
3480 count = int(action.split(' ', 1)[1])
3481 if not stdout:
3481 if not stdout:
3482 raise error.Abort(_('read not available on this peer'))
3482 raise error.Abort(_('read not available on this peer'))
3483 stdout.read(count)
3483 stdout.read(count)
3484 elif action.startswith('eread '):
3484 elif action.startswith('eread '):
3485 count = int(action.split(' ', 1)[1])
3485 count = int(action.split(' ', 1)[1])
3486 if not stderr:
3486 if not stderr:
3487 raise error.Abort(_('eread not available on this peer'))
3487 raise error.Abort(_('eread not available on this peer'))
3488 stderr.read(count)
3488 stderr.read(count)
3489 else:
3489 else:
3490 raise error.Abort(_('unknown action: %s') % action)
3490 raise error.Abort(_('unknown action: %s') % action)
3491
3491
3492 if batchedcommands is not None:
3492 if batchedcommands is not None:
3493 raise error.Abort(_('unclosed "batchbegin" request'))
3493 raise error.Abort(_('unclosed "batchbegin" request'))
3494
3494
3495 if peer:
3495 if peer:
3496 peer.close()
3496 peer.close()
3497
3497
3498 if proc:
3498 if proc:
3499 proc.kill()
3499 proc.kill()
@@ -1,2021 +1,2021 b''
1 # scmutil.py - Mercurial core utility functions
1 # scmutil.py - Mercurial core utility functions
2 #
2 #
3 # Copyright Matt Mackall <mpm@selenic.com>
3 # Copyright Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 import errno
10 import errno
11 import glob
11 import glob
12 import hashlib
12 import hashlib
13 import os
13 import os
14 import posixpath
14 import posixpath
15 import re
15 import re
16 import subprocess
16 import subprocess
17 import weakref
17 import weakref
18
18
19 from .i18n import _
19 from .i18n import _
20 from .node import (
20 from .node import (
21 bin,
21 bin,
22 hex,
22 hex,
23 nullid,
23 nullid,
24 nullrev,
24 nullrev,
25 short,
25 short,
26 wdirid,
26 wdirid,
27 wdirrev,
27 wdirrev,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 copies as copiesmod,
31 copies as copiesmod,
32 encoding,
32 encoding,
33 error,
33 error,
34 match as matchmod,
34 match as matchmod,
35 obsolete,
35 obsolete,
36 obsutil,
36 obsutil,
37 pathutil,
37 pathutil,
38 phases,
38 phases,
39 policy,
39 policy,
40 pycompat,
40 pycompat,
41 revsetlang,
41 revsetlang,
42 similar,
42 similar,
43 smartset,
43 smartset,
44 url,
44 url,
45 util,
45 util,
46 vfs,
46 vfs,
47 )
47 )
48
48
49 from .utils import (
49 from .utils import (
50 procutil,
50 procutil,
51 stringutil,
51 stringutil,
52 )
52 )
53
53
54 if pycompat.iswindows:
54 if pycompat.iswindows:
55 from . import scmwindows as scmplatform
55 from . import scmwindows as scmplatform
56 else:
56 else:
57 from . import scmposix as scmplatform
57 from . import scmposix as scmplatform
58
58
59 parsers = policy.importmod(r'parsers')
59 parsers = policy.importmod(r'parsers')
60
60
61 termsize = scmplatform.termsize
61 termsize = scmplatform.termsize
62
62
63 class status(tuple):
63 class status(tuple):
64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
64 '''Named tuple with a list of files per status. The 'deleted', 'unknown'
65 and 'ignored' properties are only relevant to the working copy.
65 and 'ignored' properties are only relevant to the working copy.
66 '''
66 '''
67
67
68 __slots__ = ()
68 __slots__ = ()
69
69
70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
70 def __new__(cls, modified, added, removed, deleted, unknown, ignored,
71 clean):
71 clean):
72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
72 return tuple.__new__(cls, (modified, added, removed, deleted, unknown,
73 ignored, clean))
73 ignored, clean))
74
74
75 @property
75 @property
76 def modified(self):
76 def modified(self):
77 '''files that have been modified'''
77 '''files that have been modified'''
78 return self[0]
78 return self[0]
79
79
80 @property
80 @property
81 def added(self):
81 def added(self):
82 '''files that have been added'''
82 '''files that have been added'''
83 return self[1]
83 return self[1]
84
84
85 @property
85 @property
86 def removed(self):
86 def removed(self):
87 '''files that have been removed'''
87 '''files that have been removed'''
88 return self[2]
88 return self[2]
89
89
90 @property
90 @property
91 def deleted(self):
91 def deleted(self):
92 '''files that are in the dirstate, but have been deleted from the
92 '''files that are in the dirstate, but have been deleted from the
93 working copy (aka "missing")
93 working copy (aka "missing")
94 '''
94 '''
95 return self[3]
95 return self[3]
96
96
97 @property
97 @property
98 def unknown(self):
98 def unknown(self):
99 '''files not in the dirstate that are not ignored'''
99 '''files not in the dirstate that are not ignored'''
100 return self[4]
100 return self[4]
101
101
102 @property
102 @property
103 def ignored(self):
103 def ignored(self):
104 '''files not in the dirstate that are ignored (by _dirignore())'''
104 '''files not in the dirstate that are ignored (by _dirignore())'''
105 return self[5]
105 return self[5]
106
106
107 @property
107 @property
108 def clean(self):
108 def clean(self):
109 '''files that have not been modified'''
109 '''files that have not been modified'''
110 return self[6]
110 return self[6]
111
111
112 def __repr__(self, *args, **kwargs):
112 def __repr__(self, *args, **kwargs):
113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
113 return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, '
114 r'unknown=%s, ignored=%s, clean=%s>') %
114 r'unknown=%s, ignored=%s, clean=%s>') %
115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
115 tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self))
116
116
117 def itersubrepos(ctx1, ctx2):
117 def itersubrepos(ctx1, ctx2):
118 """find subrepos in ctx1 or ctx2"""
118 """find subrepos in ctx1 or ctx2"""
119 # Create a (subpath, ctx) mapping where we prefer subpaths from
119 # Create a (subpath, ctx) mapping where we prefer subpaths from
120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
120 # ctx1. The subpaths from ctx2 are important when the .hgsub file
121 # has been modified (in ctx2) but not yet committed (in ctx1).
121 # has been modified (in ctx2) but not yet committed (in ctx1).
122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
122 subpaths = dict.fromkeys(ctx2.substate, ctx2)
123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
123 subpaths.update(dict.fromkeys(ctx1.substate, ctx1))
124
124
125 missing = set()
125 missing = set()
126
126
127 for subpath in ctx2.substate:
127 for subpath in ctx2.substate:
128 if subpath not in ctx1.substate:
128 if subpath not in ctx1.substate:
129 del subpaths[subpath]
129 del subpaths[subpath]
130 missing.add(subpath)
130 missing.add(subpath)
131
131
132 for subpath, ctx in sorted(subpaths.iteritems()):
132 for subpath, ctx in sorted(subpaths.iteritems()):
133 yield subpath, ctx.sub(subpath)
133 yield subpath, ctx.sub(subpath)
134
134
135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
135 # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way,
136 # status and diff will have an accurate result when it does
136 # status and diff will have an accurate result when it does
137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
137 # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared
138 # against itself.
138 # against itself.
139 for subpath in missing:
139 for subpath in missing:
140 yield subpath, ctx2.nullsub(subpath, ctx1)
140 yield subpath, ctx2.nullsub(subpath, ctx1)
141
141
142 def nochangesfound(ui, repo, excluded=None):
142 def nochangesfound(ui, repo, excluded=None):
143 '''Report no changes for push/pull, excluded is None or a list of
143 '''Report no changes for push/pull, excluded is None or a list of
144 nodes excluded from the push/pull.
144 nodes excluded from the push/pull.
145 '''
145 '''
146 secretlist = []
146 secretlist = []
147 if excluded:
147 if excluded:
148 for n in excluded:
148 for n in excluded:
149 ctx = repo[n]
149 ctx = repo[n]
150 if ctx.phase() >= phases.secret and not ctx.extinct():
150 if ctx.phase() >= phases.secret and not ctx.extinct():
151 secretlist.append(n)
151 secretlist.append(n)
152
152
153 if secretlist:
153 if secretlist:
154 ui.status(_("no changes found (ignored %d secret changesets)\n")
154 ui.status(_("no changes found (ignored %d secret changesets)\n")
155 % len(secretlist))
155 % len(secretlist))
156 else:
156 else:
157 ui.status(_("no changes found\n"))
157 ui.status(_("no changes found\n"))
158
158
159 def callcatch(ui, func):
159 def callcatch(ui, func):
160 """call func() with global exception handling
160 """call func() with global exception handling
161
161
162 return func() if no exception happens. otherwise do some error handling
162 return func() if no exception happens. otherwise do some error handling
163 and return an exit code accordingly. does not handle all exceptions.
163 and return an exit code accordingly. does not handle all exceptions.
164 """
164 """
165 try:
165 try:
166 try:
166 try:
167 return func()
167 return func()
168 except: # re-raises
168 except: # re-raises
169 ui.traceback()
169 ui.traceback()
170 raise
170 raise
171 # Global exception handling, alphabetically
171 # Global exception handling, alphabetically
172 # Mercurial-specific first, followed by built-in and library exceptions
172 # Mercurial-specific first, followed by built-in and library exceptions
173 except error.LockHeld as inst:
173 except error.LockHeld as inst:
174 if inst.errno == errno.ETIMEDOUT:
174 if inst.errno == errno.ETIMEDOUT:
175 reason = _('timed out waiting for lock held by %r') % (
175 reason = _('timed out waiting for lock held by %r') % (
176 pycompat.bytestr(inst.locker))
176 pycompat.bytestr(inst.locker))
177 else:
177 else:
178 reason = _('lock held by %r') % inst.locker
178 reason = _('lock held by %r') % inst.locker
179 ui.error(_("abort: %s: %s\n") % (
179 ui.error(_("abort: %s: %s\n") % (
180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
180 inst.desc or stringutil.forcebytestr(inst.filename), reason))
181 if not inst.locker:
181 if not inst.locker:
182 ui.error(_("(lock might be very busy)\n"))
182 ui.error(_("(lock might be very busy)\n"))
183 except error.LockUnavailable as inst:
183 except error.LockUnavailable as inst:
184 ui.error(_("abort: could not lock %s: %s\n") %
184 ui.error(_("abort: could not lock %s: %s\n") %
185 (inst.desc or stringutil.forcebytestr(inst.filename),
185 (inst.desc or stringutil.forcebytestr(inst.filename),
186 encoding.strtolocal(inst.strerror)))
186 encoding.strtolocal(inst.strerror)))
187 except error.OutOfBandError as inst:
187 except error.OutOfBandError as inst:
188 if inst.args:
188 if inst.args:
189 msg = _("abort: remote error:\n")
189 msg = _("abort: remote error:\n")
190 else:
190 else:
191 msg = _("abort: remote error\n")
191 msg = _("abort: remote error\n")
192 ui.error(msg)
192 ui.error(msg)
193 if inst.args:
193 if inst.args:
194 ui.error(''.join(inst.args))
194 ui.error(''.join(inst.args))
195 if inst.hint:
195 if inst.hint:
196 ui.error('(%s)\n' % inst.hint)
196 ui.error('(%s)\n' % inst.hint)
197 except error.RepoError as inst:
197 except error.RepoError as inst:
198 ui.error(_("abort: %s!\n") % inst)
198 ui.error(_("abort: %s!\n") % inst)
199 if inst.hint:
199 if inst.hint:
200 ui.error(_("(%s)\n") % inst.hint)
200 ui.error(_("(%s)\n") % inst.hint)
201 except error.ResponseError as inst:
201 except error.ResponseError as inst:
202 ui.error(_("abort: %s") % inst.args[0])
202 ui.error(_("abort: %s") % inst.args[0])
203 msg = inst.args[1]
203 msg = inst.args[1]
204 if isinstance(msg, type(u'')):
204 if isinstance(msg, type(u'')):
205 msg = pycompat.sysbytes(msg)
205 msg = pycompat.sysbytes(msg)
206 if not isinstance(msg, bytes):
206 if not isinstance(msg, bytes):
207 ui.error(" %r\n" % (msg,))
207 ui.error(" %r\n" % (msg,))
208 elif not msg:
208 elif not msg:
209 ui.error(_(" empty string\n"))
209 ui.error(_(" empty string\n"))
210 else:
210 else:
211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
211 ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg)))
212 except error.CensoredNodeError as inst:
212 except error.CensoredNodeError as inst:
213 ui.error(_("abort: file censored %s!\n") % inst)
213 ui.error(_("abort: file censored %s!\n") % inst)
214 except error.StorageError as inst:
214 except error.StorageError as inst:
215 ui.error(_("abort: %s!\n") % inst)
215 ui.error(_("abort: %s!\n") % inst)
216 if inst.hint:
216 if inst.hint:
217 ui.error(_("(%s)\n") % inst.hint)
217 ui.error(_("(%s)\n") % inst.hint)
218 except error.InterventionRequired as inst:
218 except error.InterventionRequired as inst:
219 ui.error("%s\n" % inst)
219 ui.error("%s\n" % inst)
220 if inst.hint:
220 if inst.hint:
221 ui.error(_("(%s)\n") % inst.hint)
221 ui.error(_("(%s)\n") % inst.hint)
222 return 1
222 return 1
223 except error.WdirUnsupported:
223 except error.WdirUnsupported:
224 ui.error(_("abort: working directory revision cannot be specified\n"))
224 ui.error(_("abort: working directory revision cannot be specified\n"))
225 except error.Abort as inst:
225 except error.Abort as inst:
226 ui.error(_("abort: %s\n") % inst)
226 ui.error(_("abort: %s\n") % inst)
227 if inst.hint:
227 if inst.hint:
228 ui.error(_("(%s)\n") % inst.hint)
228 ui.error(_("(%s)\n") % inst.hint)
229 except ImportError as inst:
229 except ImportError as inst:
230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
230 ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst))
231 m = stringutil.forcebytestr(inst).split()[-1]
231 m = stringutil.forcebytestr(inst).split()[-1]
232 if m in "mpatch bdiff".split():
232 if m in "mpatch bdiff".split():
233 ui.error(_("(did you forget to compile extensions?)\n"))
233 ui.error(_("(did you forget to compile extensions?)\n"))
234 elif m in "zlib".split():
234 elif m in "zlib".split():
235 ui.error(_("(is your Python install correct?)\n"))
235 ui.error(_("(is your Python install correct?)\n"))
236 except (IOError, OSError) as inst:
236 except (IOError, OSError) as inst:
237 if util.safehasattr(inst, "code"): # HTTPError
237 if util.safehasattr(inst, "code"): # HTTPError
238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
238 ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst))
239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
239 elif util.safehasattr(inst, "reason"): # URLError or SSLError
240 try: # usually it is in the form (errno, strerror)
240 try: # usually it is in the form (errno, strerror)
241 reason = inst.reason.args[1]
241 reason = inst.reason.args[1]
242 except (AttributeError, IndexError):
242 except (AttributeError, IndexError):
243 # it might be anything, for example a string
243 # it might be anything, for example a string
244 reason = inst.reason
244 reason = inst.reason
245 if isinstance(reason, pycompat.unicode):
245 if isinstance(reason, pycompat.unicode):
246 # SSLError of Python 2.7.9 contains a unicode
246 # SSLError of Python 2.7.9 contains a unicode
247 reason = encoding.unitolocal(reason)
247 reason = encoding.unitolocal(reason)
248 ui.error(_("abort: error: %s\n") % reason)
248 ui.error(_("abort: error: %s\n") % reason)
249 elif (util.safehasattr(inst, "args")
249 elif (util.safehasattr(inst, "args")
250 and inst.args and inst.args[0] == errno.EPIPE):
250 and inst.args and inst.args[0] == errno.EPIPE):
251 pass
251 pass
252 elif getattr(inst, "strerror", None): # common IOError or OSError
252 elif getattr(inst, "strerror", None): # common IOError or OSError
253 if getattr(inst, "filename", None) is not None:
253 if getattr(inst, "filename", None) is not None:
254 ui.error(_("abort: %s: '%s'\n") % (
254 ui.error(_("abort: %s: '%s'\n") % (
255 encoding.strtolocal(inst.strerror),
255 encoding.strtolocal(inst.strerror),
256 stringutil.forcebytestr(inst.filename)))
256 stringutil.forcebytestr(inst.filename)))
257 else:
257 else:
258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
258 ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror))
259 else: # suspicious IOError
259 else: # suspicious IOError
260 raise
260 raise
261 except MemoryError:
261 except MemoryError:
262 ui.error(_("abort: out of memory\n"))
262 ui.error(_("abort: out of memory\n"))
263 except SystemExit as inst:
263 except SystemExit as inst:
264 # Commands shouldn't sys.exit directly, but give a return code.
264 # Commands shouldn't sys.exit directly, but give a return code.
265 # Just in case catch this and and pass exit code to caller.
265 # Just in case catch this and and pass exit code to caller.
266 return inst.code
266 return inst.code
267
267
268 return -1
268 return -1
269
269
270 def checknewlabel(repo, lbl, kind):
270 def checknewlabel(repo, lbl, kind):
271 # Do not use the "kind" parameter in ui output.
271 # Do not use the "kind" parameter in ui output.
272 # It makes strings difficult to translate.
272 # It makes strings difficult to translate.
273 if lbl in ['tip', '.', 'null']:
273 if lbl in ['tip', '.', 'null']:
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
274 raise error.Abort(_("the name '%s' is reserved") % lbl)
275 for c in (':', '\0', '\n', '\r'):
275 for c in (':', '\0', '\n', '\r'):
276 if c in lbl:
276 if c in lbl:
277 raise error.Abort(
277 raise error.Abort(
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
278 _("%r cannot be used in a name") % pycompat.bytestr(c))
279 try:
279 try:
280 int(lbl)
280 int(lbl)
281 raise error.Abort(_("cannot use an integer as a name"))
281 raise error.Abort(_("cannot use an integer as a name"))
282 except ValueError:
282 except ValueError:
283 pass
283 pass
284 if lbl.strip() != lbl:
284 if lbl.strip() != lbl:
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
285 raise error.Abort(_("leading or trailing whitespace in name %r") % lbl)
286
286
287 def checkfilename(f):
287 def checkfilename(f):
288 '''Check that the filename f is an acceptable filename for a tracked file'''
288 '''Check that the filename f is an acceptable filename for a tracked file'''
289 if '\r' in f or '\n' in f:
289 if '\r' in f or '\n' in f:
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
290 raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r")
291 % pycompat.bytestr(f))
291 % pycompat.bytestr(f))
292
292
293 def checkportable(ui, f):
293 def checkportable(ui, f):
294 '''Check if filename f is portable and warn or abort depending on config'''
294 '''Check if filename f is portable and warn or abort depending on config'''
295 checkfilename(f)
295 checkfilename(f)
296 abort, warn = checkportabilityalert(ui)
296 abort, warn = checkportabilityalert(ui)
297 if abort or warn:
297 if abort or warn:
298 msg = util.checkwinfilename(f)
298 msg = util.checkwinfilename(f)
299 if msg:
299 if msg:
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
300 msg = "%s: %s" % (msg, procutil.shellquote(f))
301 if abort:
301 if abort:
302 raise error.Abort(msg)
302 raise error.Abort(msg)
303 ui.warn(_("warning: %s\n") % msg)
303 ui.warn(_("warning: %s\n") % msg)
304
304
305 def checkportabilityalert(ui):
305 def checkportabilityalert(ui):
306 '''check if the user's config requests nothing, a warning, or abort for
306 '''check if the user's config requests nothing, a warning, or abort for
307 non-portable filenames'''
307 non-portable filenames'''
308 val = ui.config('ui', 'portablefilenames')
308 val = ui.config('ui', 'portablefilenames')
309 lval = val.lower()
309 lval = val.lower()
310 bval = stringutil.parsebool(val)
310 bval = stringutil.parsebool(val)
311 abort = pycompat.iswindows or lval == 'abort'
311 abort = pycompat.iswindows or lval == 'abort'
312 warn = bval or lval == 'warn'
312 warn = bval or lval == 'warn'
313 if bval is None and not (warn or abort or lval == 'ignore'):
313 if bval is None and not (warn or abort or lval == 'ignore'):
314 raise error.ConfigError(
314 raise error.ConfigError(
315 _("ui.portablefilenames value is invalid ('%s')") % val)
315 _("ui.portablefilenames value is invalid ('%s')") % val)
316 return abort, warn
316 return abort, warn
317
317
318 class casecollisionauditor(object):
318 class casecollisionauditor(object):
319 def __init__(self, ui, abort, dirstate):
319 def __init__(self, ui, abort, dirstate):
320 self._ui = ui
320 self._ui = ui
321 self._abort = abort
321 self._abort = abort
322 allfiles = '\0'.join(dirstate._map)
322 allfiles = '\0'.join(dirstate)
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
323 self._loweredfiles = set(encoding.lower(allfiles).split('\0'))
324 self._dirstate = dirstate
324 self._dirstate = dirstate
325 # The purpose of _newfiles is so that we don't complain about
325 # The purpose of _newfiles is so that we don't complain about
326 # case collisions if someone were to call this object with the
326 # case collisions if someone were to call this object with the
327 # same filename twice.
327 # same filename twice.
328 self._newfiles = set()
328 self._newfiles = set()
329
329
330 def __call__(self, f):
330 def __call__(self, f):
331 if f in self._newfiles:
331 if f in self._newfiles:
332 return
332 return
333 fl = encoding.lower(f)
333 fl = encoding.lower(f)
334 if fl in self._loweredfiles and f not in self._dirstate:
334 if fl in self._loweredfiles and f not in self._dirstate:
335 msg = _('possible case-folding collision for %s') % f
335 msg = _('possible case-folding collision for %s') % f
336 if self._abort:
336 if self._abort:
337 raise error.Abort(msg)
337 raise error.Abort(msg)
338 self._ui.warn(_("warning: %s\n") % msg)
338 self._ui.warn(_("warning: %s\n") % msg)
339 self._loweredfiles.add(fl)
339 self._loweredfiles.add(fl)
340 self._newfiles.add(f)
340 self._newfiles.add(f)
341
341
342 def filteredhash(repo, maxrev):
342 def filteredhash(repo, maxrev):
343 """build hash of filtered revisions in the current repoview.
343 """build hash of filtered revisions in the current repoview.
344
344
345 Multiple caches perform up-to-date validation by checking that the
345 Multiple caches perform up-to-date validation by checking that the
346 tiprev and tipnode stored in the cache file match the current repository.
346 tiprev and tipnode stored in the cache file match the current repository.
347 However, this is not sufficient for validating repoviews because the set
347 However, this is not sufficient for validating repoviews because the set
348 of revisions in the view may change without the repository tiprev and
348 of revisions in the view may change without the repository tiprev and
349 tipnode changing.
349 tipnode changing.
350
350
351 This function hashes all the revs filtered from the view and returns
351 This function hashes all the revs filtered from the view and returns
352 that SHA-1 digest.
352 that SHA-1 digest.
353 """
353 """
354 cl = repo.changelog
354 cl = repo.changelog
355 if not cl.filteredrevs:
355 if not cl.filteredrevs:
356 return None
356 return None
357 key = None
357 key = None
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
358 revs = sorted(r for r in cl.filteredrevs if r <= maxrev)
359 if revs:
359 if revs:
360 s = hashlib.sha1()
360 s = hashlib.sha1()
361 for rev in revs:
361 for rev in revs:
362 s.update('%d;' % rev)
362 s.update('%d;' % rev)
363 key = s.digest()
363 key = s.digest()
364 return key
364 return key
365
365
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
366 def walkrepos(path, followsym=False, seen_dirs=None, recurse=False):
367 '''yield every hg repository under path, always recursively.
367 '''yield every hg repository under path, always recursively.
368 The recurse flag will only control recursion into repo working dirs'''
368 The recurse flag will only control recursion into repo working dirs'''
369 def errhandler(err):
369 def errhandler(err):
370 if err.filename == path:
370 if err.filename == path:
371 raise err
371 raise err
372 samestat = getattr(os.path, 'samestat', None)
372 samestat = getattr(os.path, 'samestat', None)
373 if followsym and samestat is not None:
373 if followsym and samestat is not None:
374 def adddir(dirlst, dirname):
374 def adddir(dirlst, dirname):
375 dirstat = os.stat(dirname)
375 dirstat = os.stat(dirname)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
376 match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst)
377 if not match:
377 if not match:
378 dirlst.append(dirstat)
378 dirlst.append(dirstat)
379 return not match
379 return not match
380 else:
380 else:
381 followsym = False
381 followsym = False
382
382
383 if (seen_dirs is None) and followsym:
383 if (seen_dirs is None) and followsym:
384 seen_dirs = []
384 seen_dirs = []
385 adddir(seen_dirs, path)
385 adddir(seen_dirs, path)
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
386 for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler):
387 dirs.sort()
387 dirs.sort()
388 if '.hg' in dirs:
388 if '.hg' in dirs:
389 yield root # found a repository
389 yield root # found a repository
390 qroot = os.path.join(root, '.hg', 'patches')
390 qroot = os.path.join(root, '.hg', 'patches')
391 if os.path.isdir(os.path.join(qroot, '.hg')):
391 if os.path.isdir(os.path.join(qroot, '.hg')):
392 yield qroot # we have a patch queue repo here
392 yield qroot # we have a patch queue repo here
393 if recurse:
393 if recurse:
394 # avoid recursing inside the .hg directory
394 # avoid recursing inside the .hg directory
395 dirs.remove('.hg')
395 dirs.remove('.hg')
396 else:
396 else:
397 dirs[:] = [] # don't descend further
397 dirs[:] = [] # don't descend further
398 elif followsym:
398 elif followsym:
399 newdirs = []
399 newdirs = []
400 for d in dirs:
400 for d in dirs:
401 fname = os.path.join(root, d)
401 fname = os.path.join(root, d)
402 if adddir(seen_dirs, fname):
402 if adddir(seen_dirs, fname):
403 if os.path.islink(fname):
403 if os.path.islink(fname):
404 for hgname in walkrepos(fname, True, seen_dirs):
404 for hgname in walkrepos(fname, True, seen_dirs):
405 yield hgname
405 yield hgname
406 else:
406 else:
407 newdirs.append(d)
407 newdirs.append(d)
408 dirs[:] = newdirs
408 dirs[:] = newdirs
409
409
410 def binnode(ctx):
410 def binnode(ctx):
411 """Return binary node id for a given basectx"""
411 """Return binary node id for a given basectx"""
412 node = ctx.node()
412 node = ctx.node()
413 if node is None:
413 if node is None:
414 return wdirid
414 return wdirid
415 return node
415 return node
416
416
417 def intrev(ctx):
417 def intrev(ctx):
418 """Return integer for a given basectx that can be used in comparison or
418 """Return integer for a given basectx that can be used in comparison or
419 arithmetic operation"""
419 arithmetic operation"""
420 rev = ctx.rev()
420 rev = ctx.rev()
421 if rev is None:
421 if rev is None:
422 return wdirrev
422 return wdirrev
423 return rev
423 return rev
424
424
425 def formatchangeid(ctx):
425 def formatchangeid(ctx):
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
426 """Format changectx as '{rev}:{node|formatnode}', which is the default
427 template provided by logcmdutil.changesettemplater"""
427 template provided by logcmdutil.changesettemplater"""
428 repo = ctx.repo()
428 repo = ctx.repo()
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
429 return formatrevnode(repo.ui, intrev(ctx), binnode(ctx))
430
430
431 def formatrevnode(ui, rev, node):
431 def formatrevnode(ui, rev, node):
432 """Format given revision and node depending on the current verbosity"""
432 """Format given revision and node depending on the current verbosity"""
433 if ui.debugflag:
433 if ui.debugflag:
434 hexfunc = hex
434 hexfunc = hex
435 else:
435 else:
436 hexfunc = short
436 hexfunc = short
437 return '%d:%s' % (rev, hexfunc(node))
437 return '%d:%s' % (rev, hexfunc(node))
438
438
439 def resolvehexnodeidprefix(repo, prefix):
439 def resolvehexnodeidprefix(repo, prefix):
440 if (prefix.startswith('x') and
440 if (prefix.startswith('x') and
441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
441 repo.ui.configbool('experimental', 'revisions.prefixhexnode')):
442 prefix = prefix[1:]
442 prefix = prefix[1:]
443 try:
443 try:
444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
444 # Uses unfiltered repo because it's faster when prefix is ambiguous/
445 # This matches the shortesthexnodeidprefix() function below.
445 # This matches the shortesthexnodeidprefix() function below.
446 node = repo.unfiltered().changelog._partialmatch(prefix)
446 node = repo.unfiltered().changelog._partialmatch(prefix)
447 except error.AmbiguousPrefixLookupError:
447 except error.AmbiguousPrefixLookupError:
448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
448 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
449 if revset:
449 if revset:
450 # Clear config to avoid infinite recursion
450 # Clear config to avoid infinite recursion
451 configoverrides = {('experimental',
451 configoverrides = {('experimental',
452 'revisions.disambiguatewithin'): None}
452 'revisions.disambiguatewithin'): None}
453 with repo.ui.configoverride(configoverrides):
453 with repo.ui.configoverride(configoverrides):
454 revs = repo.anyrevs([revset], user=True)
454 revs = repo.anyrevs([revset], user=True)
455 matches = []
455 matches = []
456 for rev in revs:
456 for rev in revs:
457 node = repo.changelog.node(rev)
457 node = repo.changelog.node(rev)
458 if hex(node).startswith(prefix):
458 if hex(node).startswith(prefix):
459 matches.append(node)
459 matches.append(node)
460 if len(matches) == 1:
460 if len(matches) == 1:
461 return matches[0]
461 return matches[0]
462 raise
462 raise
463 if node is None:
463 if node is None:
464 return
464 return
465 repo.changelog.rev(node) # make sure node isn't filtered
465 repo.changelog.rev(node) # make sure node isn't filtered
466 return node
466 return node
467
467
468 def mayberevnum(repo, prefix):
468 def mayberevnum(repo, prefix):
469 """Checks if the given prefix may be mistaken for a revision number"""
469 """Checks if the given prefix may be mistaken for a revision number"""
470 try:
470 try:
471 i = int(prefix)
471 i = int(prefix)
472 # if we are a pure int, then starting with zero will not be
472 # if we are a pure int, then starting with zero will not be
473 # confused as a rev; or, obviously, if the int is larger
473 # confused as a rev; or, obviously, if the int is larger
474 # than the value of the tip rev. We still need to disambiguate if
474 # than the value of the tip rev. We still need to disambiguate if
475 # prefix == '0', since that *is* a valid revnum.
475 # prefix == '0', since that *is* a valid revnum.
476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
476 if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo):
477 return False
477 return False
478 return True
478 return True
479 except ValueError:
479 except ValueError:
480 return False
480 return False
481
481
482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
482 def shortesthexnodeidprefix(repo, node, minlength=1, cache=None):
483 """Find the shortest unambiguous prefix that matches hexnode.
483 """Find the shortest unambiguous prefix that matches hexnode.
484
484
485 If "cache" is not None, it must be a dictionary that can be used for
485 If "cache" is not None, it must be a dictionary that can be used for
486 caching between calls to this method.
486 caching between calls to this method.
487 """
487 """
488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
488 # _partialmatch() of filtered changelog could take O(len(repo)) time,
489 # which would be unacceptably slow. so we look for hash collision in
489 # which would be unacceptably slow. so we look for hash collision in
490 # unfiltered space, which means some hashes may be slightly longer.
490 # unfiltered space, which means some hashes may be slightly longer.
491
491
492 minlength=max(minlength, 1)
492 minlength=max(minlength, 1)
493
493
494 def disambiguate(prefix):
494 def disambiguate(prefix):
495 """Disambiguate against revnums."""
495 """Disambiguate against revnums."""
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
496 if repo.ui.configbool('experimental', 'revisions.prefixhexnode'):
497 if mayberevnum(repo, prefix):
497 if mayberevnum(repo, prefix):
498 return 'x' + prefix
498 return 'x' + prefix
499 else:
499 else:
500 return prefix
500 return prefix
501
501
502 hexnode = hex(node)
502 hexnode = hex(node)
503 for length in range(len(prefix), len(hexnode) + 1):
503 for length in range(len(prefix), len(hexnode) + 1):
504 prefix = hexnode[:length]
504 prefix = hexnode[:length]
505 if not mayberevnum(repo, prefix):
505 if not mayberevnum(repo, prefix):
506 return prefix
506 return prefix
507
507
508 cl = repo.unfiltered().changelog
508 cl = repo.unfiltered().changelog
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
509 revset = repo.ui.config('experimental', 'revisions.disambiguatewithin')
510 if revset:
510 if revset:
511 revs = None
511 revs = None
512 if cache is not None:
512 if cache is not None:
513 revs = cache.get('disambiguationrevset')
513 revs = cache.get('disambiguationrevset')
514 if revs is None:
514 if revs is None:
515 revs = repo.anyrevs([revset], user=True)
515 revs = repo.anyrevs([revset], user=True)
516 if cache is not None:
516 if cache is not None:
517 cache['disambiguationrevset'] = revs
517 cache['disambiguationrevset'] = revs
518 if cl.rev(node) in revs:
518 if cl.rev(node) in revs:
519 hexnode = hex(node)
519 hexnode = hex(node)
520 nodetree = None
520 nodetree = None
521 if cache is not None:
521 if cache is not None:
522 nodetree = cache.get('disambiguationnodetree')
522 nodetree = cache.get('disambiguationnodetree')
523 if not nodetree:
523 if not nodetree:
524 try:
524 try:
525 nodetree = parsers.nodetree(cl.index, len(revs))
525 nodetree = parsers.nodetree(cl.index, len(revs))
526 except AttributeError:
526 except AttributeError:
527 # no native nodetree
527 # no native nodetree
528 pass
528 pass
529 else:
529 else:
530 for r in revs:
530 for r in revs:
531 nodetree.insert(r)
531 nodetree.insert(r)
532 if cache is not None:
532 if cache is not None:
533 cache['disambiguationnodetree'] = nodetree
533 cache['disambiguationnodetree'] = nodetree
534 if nodetree is not None:
534 if nodetree is not None:
535 length = max(nodetree.shortest(node), minlength)
535 length = max(nodetree.shortest(node), minlength)
536 prefix = hexnode[:length]
536 prefix = hexnode[:length]
537 return disambiguate(prefix)
537 return disambiguate(prefix)
538 for length in range(minlength, len(hexnode) + 1):
538 for length in range(minlength, len(hexnode) + 1):
539 matches = []
539 matches = []
540 prefix = hexnode[:length]
540 prefix = hexnode[:length]
541 for rev in revs:
541 for rev in revs:
542 otherhexnode = repo[rev].hex()
542 otherhexnode = repo[rev].hex()
543 if prefix == otherhexnode[:length]:
543 if prefix == otherhexnode[:length]:
544 matches.append(otherhexnode)
544 matches.append(otherhexnode)
545 if len(matches) == 1:
545 if len(matches) == 1:
546 return disambiguate(prefix)
546 return disambiguate(prefix)
547
547
548 try:
548 try:
549 return disambiguate(cl.shortest(node, minlength))
549 return disambiguate(cl.shortest(node, minlength))
550 except error.LookupError:
550 except error.LookupError:
551 raise error.RepoLookupError()
551 raise error.RepoLookupError()
552
552
553 def isrevsymbol(repo, symbol):
553 def isrevsymbol(repo, symbol):
554 """Checks if a symbol exists in the repo.
554 """Checks if a symbol exists in the repo.
555
555
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
556 See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the
557 symbol is an ambiguous nodeid prefix.
557 symbol is an ambiguous nodeid prefix.
558 """
558 """
559 try:
559 try:
560 revsymbol(repo, symbol)
560 revsymbol(repo, symbol)
561 return True
561 return True
562 except error.RepoLookupError:
562 except error.RepoLookupError:
563 return False
563 return False
564
564
565 def revsymbol(repo, symbol):
565 def revsymbol(repo, symbol):
566 """Returns a context given a single revision symbol (as string).
566 """Returns a context given a single revision symbol (as string).
567
567
568 This is similar to revsingle(), but accepts only a single revision symbol,
568 This is similar to revsingle(), but accepts only a single revision symbol,
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
569 i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but
570 not "max(public())".
570 not "max(public())".
571 """
571 """
572 if not isinstance(symbol, bytes):
572 if not isinstance(symbol, bytes):
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
573 msg = ("symbol (%s of type %s) was not a string, did you mean "
574 "repo[symbol]?" % (symbol, type(symbol)))
574 "repo[symbol]?" % (symbol, type(symbol)))
575 raise error.ProgrammingError(msg)
575 raise error.ProgrammingError(msg)
576 try:
576 try:
577 if symbol in ('.', 'tip', 'null'):
577 if symbol in ('.', 'tip', 'null'):
578 return repo[symbol]
578 return repo[symbol]
579
579
580 try:
580 try:
581 r = int(symbol)
581 r = int(symbol)
582 if '%d' % r != symbol:
582 if '%d' % r != symbol:
583 raise ValueError
583 raise ValueError
584 l = len(repo.changelog)
584 l = len(repo.changelog)
585 if r < 0:
585 if r < 0:
586 r += l
586 r += l
587 if r < 0 or r >= l and r != wdirrev:
587 if r < 0 or r >= l and r != wdirrev:
588 raise ValueError
588 raise ValueError
589 return repo[r]
589 return repo[r]
590 except error.FilteredIndexError:
590 except error.FilteredIndexError:
591 raise
591 raise
592 except (ValueError, OverflowError, IndexError):
592 except (ValueError, OverflowError, IndexError):
593 pass
593 pass
594
594
595 if len(symbol) == 40:
595 if len(symbol) == 40:
596 try:
596 try:
597 node = bin(symbol)
597 node = bin(symbol)
598 rev = repo.changelog.rev(node)
598 rev = repo.changelog.rev(node)
599 return repo[rev]
599 return repo[rev]
600 except error.FilteredLookupError:
600 except error.FilteredLookupError:
601 raise
601 raise
602 except (TypeError, LookupError):
602 except (TypeError, LookupError):
603 pass
603 pass
604
604
605 # look up bookmarks through the name interface
605 # look up bookmarks through the name interface
606 try:
606 try:
607 node = repo.names.singlenode(repo, symbol)
607 node = repo.names.singlenode(repo, symbol)
608 rev = repo.changelog.rev(node)
608 rev = repo.changelog.rev(node)
609 return repo[rev]
609 return repo[rev]
610 except KeyError:
610 except KeyError:
611 pass
611 pass
612
612
613 node = resolvehexnodeidprefix(repo, symbol)
613 node = resolvehexnodeidprefix(repo, symbol)
614 if node is not None:
614 if node is not None:
615 rev = repo.changelog.rev(node)
615 rev = repo.changelog.rev(node)
616 return repo[rev]
616 return repo[rev]
617
617
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
618 raise error.RepoLookupError(_("unknown revision '%s'") % symbol)
619
619
620 except error.WdirUnsupported:
620 except error.WdirUnsupported:
621 return repo[None]
621 return repo[None]
622 except (error.FilteredIndexError, error.FilteredLookupError,
622 except (error.FilteredIndexError, error.FilteredLookupError,
623 error.FilteredRepoLookupError):
623 error.FilteredRepoLookupError):
624 raise _filterederror(repo, symbol)
624 raise _filterederror(repo, symbol)
625
625
626 def _filterederror(repo, changeid):
626 def _filterederror(repo, changeid):
627 """build an exception to be raised about a filtered changeid
627 """build an exception to be raised about a filtered changeid
628
628
629 This is extracted in a function to help extensions (eg: evolve) to
629 This is extracted in a function to help extensions (eg: evolve) to
630 experiment with various message variants."""
630 experiment with various message variants."""
631 if repo.filtername.startswith('visible'):
631 if repo.filtername.startswith('visible'):
632
632
633 # Check if the changeset is obsolete
633 # Check if the changeset is obsolete
634 unfilteredrepo = repo.unfiltered()
634 unfilteredrepo = repo.unfiltered()
635 ctx = revsymbol(unfilteredrepo, changeid)
635 ctx = revsymbol(unfilteredrepo, changeid)
636
636
637 # If the changeset is obsolete, enrich the message with the reason
637 # If the changeset is obsolete, enrich the message with the reason
638 # that made this changeset not visible
638 # that made this changeset not visible
639 if ctx.obsolete():
639 if ctx.obsolete():
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
640 msg = obsutil._getfilteredreason(repo, changeid, ctx)
641 else:
641 else:
642 msg = _("hidden revision '%s'") % changeid
642 msg = _("hidden revision '%s'") % changeid
643
643
644 hint = _('use --hidden to access hidden revisions')
644 hint = _('use --hidden to access hidden revisions')
645
645
646 return error.FilteredRepoLookupError(msg, hint=hint)
646 return error.FilteredRepoLookupError(msg, hint=hint)
647 msg = _("filtered revision '%s' (not in '%s' subset)")
647 msg = _("filtered revision '%s' (not in '%s' subset)")
648 msg %= (changeid, repo.filtername)
648 msg %= (changeid, repo.filtername)
649 return error.FilteredRepoLookupError(msg)
649 return error.FilteredRepoLookupError(msg)
650
650
651 def revsingle(repo, revspec, default='.', localalias=None):
651 def revsingle(repo, revspec, default='.', localalias=None):
652 if not revspec and revspec != 0:
652 if not revspec and revspec != 0:
653 return repo[default]
653 return repo[default]
654
654
655 l = revrange(repo, [revspec], localalias=localalias)
655 l = revrange(repo, [revspec], localalias=localalias)
656 if not l:
656 if not l:
657 raise error.Abort(_('empty revision set'))
657 raise error.Abort(_('empty revision set'))
658 return repo[l.last()]
658 return repo[l.last()]
659
659
660 def _pairspec(revspec):
660 def _pairspec(revspec):
661 tree = revsetlang.parse(revspec)
661 tree = revsetlang.parse(revspec)
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
662 return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall')
663
663
664 def revpair(repo, revs):
664 def revpair(repo, revs):
665 if not revs:
665 if not revs:
666 return repo['.'], repo[None]
666 return repo['.'], repo[None]
667
667
668 l = revrange(repo, revs)
668 l = revrange(repo, revs)
669
669
670 if not l:
670 if not l:
671 raise error.Abort(_('empty revision range'))
671 raise error.Abort(_('empty revision range'))
672
672
673 first = l.first()
673 first = l.first()
674 second = l.last()
674 second = l.last()
675
675
676 if (first == second and len(revs) >= 2
676 if (first == second and len(revs) >= 2
677 and not all(revrange(repo, [r]) for r in revs)):
677 and not all(revrange(repo, [r]) for r in revs)):
678 raise error.Abort(_('empty revision on one side of range'))
678 raise error.Abort(_('empty revision on one side of range'))
679
679
680 # if top-level is range expression, the result must always be a pair
680 # if top-level is range expression, the result must always be a pair
681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
681 if first == second and len(revs) == 1 and not _pairspec(revs[0]):
682 return repo[first], repo[None]
682 return repo[first], repo[None]
683
683
684 return repo[first], repo[second]
684 return repo[first], repo[second]
685
685
686 def revrange(repo, specs, localalias=None):
686 def revrange(repo, specs, localalias=None):
687 """Execute 1 to many revsets and return the union.
687 """Execute 1 to many revsets and return the union.
688
688
689 This is the preferred mechanism for executing revsets using user-specified
689 This is the preferred mechanism for executing revsets using user-specified
690 config options, such as revset aliases.
690 config options, such as revset aliases.
691
691
692 The revsets specified by ``specs`` will be executed via a chained ``OR``
692 The revsets specified by ``specs`` will be executed via a chained ``OR``
693 expression. If ``specs`` is empty, an empty result is returned.
693 expression. If ``specs`` is empty, an empty result is returned.
694
694
695 ``specs`` can contain integers, in which case they are assumed to be
695 ``specs`` can contain integers, in which case they are assumed to be
696 revision numbers.
696 revision numbers.
697
697
698 It is assumed the revsets are already formatted. If you have arguments
698 It is assumed the revsets are already formatted. If you have arguments
699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
699 that need to be expanded in the revset, call ``revsetlang.formatspec()``
700 and pass the result as an element of ``specs``.
700 and pass the result as an element of ``specs``.
701
701
702 Specifying a single revset is allowed.
702 Specifying a single revset is allowed.
703
703
704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
704 Returns a ``revset.abstractsmartset`` which is a list-like interface over
705 integer revisions.
705 integer revisions.
706 """
706 """
707 allspecs = []
707 allspecs = []
708 for spec in specs:
708 for spec in specs:
709 if isinstance(spec, int):
709 if isinstance(spec, int):
710 spec = revsetlang.formatspec('%d', spec)
710 spec = revsetlang.formatspec('%d', spec)
711 allspecs.append(spec)
711 allspecs.append(spec)
712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
712 return repo.anyrevs(allspecs, user=True, localalias=localalias)
713
713
714 def meaningfulparents(repo, ctx):
714 def meaningfulparents(repo, ctx):
715 """Return list of meaningful (or all if debug) parentrevs for rev.
715 """Return list of meaningful (or all if debug) parentrevs for rev.
716
716
717 For merges (two non-nullrev revisions) both parents are meaningful.
717 For merges (two non-nullrev revisions) both parents are meaningful.
718 Otherwise the first parent revision is considered meaningful if it
718 Otherwise the first parent revision is considered meaningful if it
719 is not the preceding revision.
719 is not the preceding revision.
720 """
720 """
721 parents = ctx.parents()
721 parents = ctx.parents()
722 if len(parents) > 1:
722 if len(parents) > 1:
723 return parents
723 return parents
724 if repo.ui.debugflag:
724 if repo.ui.debugflag:
725 return [parents[0], repo[nullrev]]
725 return [parents[0], repo[nullrev]]
726 if parents[0].rev() >= intrev(ctx) - 1:
726 if parents[0].rev() >= intrev(ctx) - 1:
727 return []
727 return []
728 return parents
728 return parents
729
729
730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
730 def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None):
731 """Return a function that produced paths for presenting to the user.
731 """Return a function that produced paths for presenting to the user.
732
732
733 The returned function takes a repo-relative path and produces a path
733 The returned function takes a repo-relative path and produces a path
734 that can be presented in the UI.
734 that can be presented in the UI.
735
735
736 Depending on the value of ui.relative-paths, either a repo-relative or
736 Depending on the value of ui.relative-paths, either a repo-relative or
737 cwd-relative path will be produced.
737 cwd-relative path will be produced.
738
738
739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
739 legacyrelativevalue is the value to use if ui.relative-paths=legacy
740
740
741 If forcerelativevalue is not None, then that value will be used regardless
741 If forcerelativevalue is not None, then that value will be used regardless
742 of what ui.relative-paths is set to.
742 of what ui.relative-paths is set to.
743 """
743 """
744 if forcerelativevalue is not None:
744 if forcerelativevalue is not None:
745 relative = forcerelativevalue
745 relative = forcerelativevalue
746 else:
746 else:
747 config = repo.ui.config('ui', 'relative-paths')
747 config = repo.ui.config('ui', 'relative-paths')
748 if config == 'legacy':
748 if config == 'legacy':
749 relative = legacyrelativevalue
749 relative = legacyrelativevalue
750 else:
750 else:
751 relative = stringutil.parsebool(config)
751 relative = stringutil.parsebool(config)
752 if relative is None:
752 if relative is None:
753 raise error.ConfigError(
753 raise error.ConfigError(
754 _("ui.relative-paths is not a boolean ('%s')") % config)
754 _("ui.relative-paths is not a boolean ('%s')") % config)
755
755
756 if relative:
756 if relative:
757 cwd = repo.getcwd()
757 cwd = repo.getcwd()
758 pathto = repo.pathto
758 pathto = repo.pathto
759 return lambda f: pathto(f, cwd)
759 return lambda f: pathto(f, cwd)
760 elif repo.ui.configbool('ui', 'slash'):
760 elif repo.ui.configbool('ui', 'slash'):
761 return lambda f: f
761 return lambda f: f
762 else:
762 else:
763 return util.localpath
763 return util.localpath
764
764
765 def subdiruipathfn(subpath, uipathfn):
765 def subdiruipathfn(subpath, uipathfn):
766 '''Create a new uipathfn that treats the file as relative to subpath.'''
766 '''Create a new uipathfn that treats the file as relative to subpath.'''
767 return lambda f: uipathfn(posixpath.join(subpath, f))
767 return lambda f: uipathfn(posixpath.join(subpath, f))
768
768
769 def anypats(pats, opts):
769 def anypats(pats, opts):
770 '''Checks if any patterns, including --include and --exclude were given.
770 '''Checks if any patterns, including --include and --exclude were given.
771
771
772 Some commands (e.g. addremove) use this condition for deciding whether to
772 Some commands (e.g. addremove) use this condition for deciding whether to
773 print absolute or relative paths.
773 print absolute or relative paths.
774 '''
774 '''
775 return bool(pats or opts.get('include') or opts.get('exclude'))
775 return bool(pats or opts.get('include') or opts.get('exclude'))
776
776
777 def expandpats(pats):
777 def expandpats(pats):
778 '''Expand bare globs when running on windows.
778 '''Expand bare globs when running on windows.
779 On posix we assume it already has already been done by sh.'''
779 On posix we assume it already has already been done by sh.'''
780 if not util.expandglobs:
780 if not util.expandglobs:
781 return list(pats)
781 return list(pats)
782 ret = []
782 ret = []
783 for kindpat in pats:
783 for kindpat in pats:
784 kind, pat = matchmod._patsplit(kindpat, None)
784 kind, pat = matchmod._patsplit(kindpat, None)
785 if kind is None:
785 if kind is None:
786 try:
786 try:
787 globbed = glob.glob(pat)
787 globbed = glob.glob(pat)
788 except re.error:
788 except re.error:
789 globbed = [pat]
789 globbed = [pat]
790 if globbed:
790 if globbed:
791 ret.extend(globbed)
791 ret.extend(globbed)
792 continue
792 continue
793 ret.append(kindpat)
793 ret.append(kindpat)
794 return ret
794 return ret
795
795
796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
796 def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath',
797 badfn=None):
797 badfn=None):
798 '''Return a matcher and the patterns that were used.
798 '''Return a matcher and the patterns that were used.
799 The matcher will warn about bad matches, unless an alternate badfn callback
799 The matcher will warn about bad matches, unless an alternate badfn callback
800 is provided.'''
800 is provided.'''
801 if opts is None:
801 if opts is None:
802 opts = {}
802 opts = {}
803 if not globbed and default == 'relpath':
803 if not globbed and default == 'relpath':
804 pats = expandpats(pats or [])
804 pats = expandpats(pats or [])
805
805
806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
806 uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True)
807 def bad(f, msg):
807 def bad(f, msg):
808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
808 ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg))
809
809
810 if badfn is None:
810 if badfn is None:
811 badfn = bad
811 badfn = bad
812
812
813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
813 m = ctx.match(pats, opts.get('include'), opts.get('exclude'),
814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
814 default, listsubrepos=opts.get('subrepos'), badfn=badfn)
815
815
816 if m.always():
816 if m.always():
817 pats = []
817 pats = []
818 return m, pats
818 return m, pats
819
819
820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
820 def match(ctx, pats=(), opts=None, globbed=False, default='relpath',
821 badfn=None):
821 badfn=None):
822 '''Return a matcher that will warn about bad matches.'''
822 '''Return a matcher that will warn about bad matches.'''
823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
823 return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0]
824
824
825 def matchall(repo):
825 def matchall(repo):
826 '''Return a matcher that will efficiently match everything.'''
826 '''Return a matcher that will efficiently match everything.'''
827 return matchmod.always()
827 return matchmod.always()
828
828
829 def matchfiles(repo, files, badfn=None):
829 def matchfiles(repo, files, badfn=None):
830 '''Return a matcher that will efficiently match exactly these files.'''
830 '''Return a matcher that will efficiently match exactly these files.'''
831 return matchmod.exact(files, badfn=badfn)
831 return matchmod.exact(files, badfn=badfn)
832
832
833 def parsefollowlinespattern(repo, rev, pat, msg):
833 def parsefollowlinespattern(repo, rev, pat, msg):
834 """Return a file name from `pat` pattern suitable for usage in followlines
834 """Return a file name from `pat` pattern suitable for usage in followlines
835 logic.
835 logic.
836 """
836 """
837 if not matchmod.patkind(pat):
837 if not matchmod.patkind(pat):
838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
838 return pathutil.canonpath(repo.root, repo.getcwd(), pat)
839 else:
839 else:
840 ctx = repo[rev]
840 ctx = repo[rev]
841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
841 m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx)
842 files = [f for f in ctx if m(f)]
842 files = [f for f in ctx if m(f)]
843 if len(files) != 1:
843 if len(files) != 1:
844 raise error.ParseError(msg)
844 raise error.ParseError(msg)
845 return files[0]
845 return files[0]
846
846
847 def getorigvfs(ui, repo):
847 def getorigvfs(ui, repo):
848 """return a vfs suitable to save 'orig' file
848 """return a vfs suitable to save 'orig' file
849
849
850 return None if no special directory is configured"""
850 return None if no special directory is configured"""
851 origbackuppath = ui.config('ui', 'origbackuppath')
851 origbackuppath = ui.config('ui', 'origbackuppath')
852 if not origbackuppath:
852 if not origbackuppath:
853 return None
853 return None
854 return vfs.vfs(repo.wvfs.join(origbackuppath))
854 return vfs.vfs(repo.wvfs.join(origbackuppath))
855
855
856 def backuppath(ui, repo, filepath):
856 def backuppath(ui, repo, filepath):
857 '''customize where working copy backup files (.orig files) are created
857 '''customize where working copy backup files (.orig files) are created
858
858
859 Fetch user defined path from config file: [ui] origbackuppath = <path>
859 Fetch user defined path from config file: [ui] origbackuppath = <path>
860 Fall back to default (filepath with .orig suffix) if not specified
860 Fall back to default (filepath with .orig suffix) if not specified
861
861
862 filepath is repo-relative
862 filepath is repo-relative
863
863
864 Returns an absolute path
864 Returns an absolute path
865 '''
865 '''
866 origvfs = getorigvfs(ui, repo)
866 origvfs = getorigvfs(ui, repo)
867 if origvfs is None:
867 if origvfs is None:
868 return repo.wjoin(filepath + ".orig")
868 return repo.wjoin(filepath + ".orig")
869
869
870 origbackupdir = origvfs.dirname(filepath)
870 origbackupdir = origvfs.dirname(filepath)
871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
871 if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir):
872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
872 ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir))
873
873
874 # Remove any files that conflict with the backup file's path
874 # Remove any files that conflict with the backup file's path
875 for f in reversed(list(util.finddirs(filepath))):
875 for f in reversed(list(util.finddirs(filepath))):
876 if origvfs.isfileorlink(f):
876 if origvfs.isfileorlink(f):
877 ui.note(_('removing conflicting file: %s\n')
877 ui.note(_('removing conflicting file: %s\n')
878 % origvfs.join(f))
878 % origvfs.join(f))
879 origvfs.unlink(f)
879 origvfs.unlink(f)
880 break
880 break
881
881
882 origvfs.makedirs(origbackupdir)
882 origvfs.makedirs(origbackupdir)
883
883
884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
884 if origvfs.isdir(filepath) and not origvfs.islink(filepath):
885 ui.note(_('removing conflicting directory: %s\n')
885 ui.note(_('removing conflicting directory: %s\n')
886 % origvfs.join(filepath))
886 % origvfs.join(filepath))
887 origvfs.rmtree(filepath, forcibly=True)
887 origvfs.rmtree(filepath, forcibly=True)
888
888
889 return origvfs.join(filepath)
889 return origvfs.join(filepath)
890
890
891 class _containsnode(object):
891 class _containsnode(object):
892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
892 """proxy __contains__(node) to container.__contains__ which accepts revs"""
893
893
894 def __init__(self, repo, revcontainer):
894 def __init__(self, repo, revcontainer):
895 self._torev = repo.changelog.rev
895 self._torev = repo.changelog.rev
896 self._revcontains = revcontainer.__contains__
896 self._revcontains = revcontainer.__contains__
897
897
898 def __contains__(self, node):
898 def __contains__(self, node):
899 return self._revcontains(self._torev(node))
899 return self._revcontains(self._torev(node))
900
900
901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
901 def cleanupnodes(repo, replacements, operation, moves=None, metadata=None,
902 fixphase=False, targetphase=None, backup=True):
902 fixphase=False, targetphase=None, backup=True):
903 """do common cleanups when old nodes are replaced by new nodes
903 """do common cleanups when old nodes are replaced by new nodes
904
904
905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
905 That includes writing obsmarkers or stripping nodes, and moving bookmarks.
906 (we might also want to move working directory parent in the future)
906 (we might also want to move working directory parent in the future)
907
907
908 By default, bookmark moves are calculated automatically from 'replacements',
908 By default, bookmark moves are calculated automatically from 'replacements',
909 but 'moves' can be used to override that. Also, 'moves' may include
909 but 'moves' can be used to override that. Also, 'moves' may include
910 additional bookmark moves that should not have associated obsmarkers.
910 additional bookmark moves that should not have associated obsmarkers.
911
911
912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
912 replacements is {oldnode: [newnode]} or a iterable of nodes if they do not
913 have replacements. operation is a string, like "rebase".
913 have replacements. operation is a string, like "rebase".
914
914
915 metadata is dictionary containing metadata to be stored in obsmarker if
915 metadata is dictionary containing metadata to be stored in obsmarker if
916 obsolescence is enabled.
916 obsolescence is enabled.
917 """
917 """
918 assert fixphase or targetphase is None
918 assert fixphase or targetphase is None
919 if not replacements and not moves:
919 if not replacements and not moves:
920 return
920 return
921
921
922 # translate mapping's other forms
922 # translate mapping's other forms
923 if not util.safehasattr(replacements, 'items'):
923 if not util.safehasattr(replacements, 'items'):
924 replacements = {(n,): () for n in replacements}
924 replacements = {(n,): () for n in replacements}
925 else:
925 else:
926 # upgrading non tuple "source" to tuple ones for BC
926 # upgrading non tuple "source" to tuple ones for BC
927 repls = {}
927 repls = {}
928 for key, value in replacements.items():
928 for key, value in replacements.items():
929 if not isinstance(key, tuple):
929 if not isinstance(key, tuple):
930 key = (key,)
930 key = (key,)
931 repls[key] = value
931 repls[key] = value
932 replacements = repls
932 replacements = repls
933
933
934 # Unfiltered repo is needed since nodes in replacements might be hidden.
934 # Unfiltered repo is needed since nodes in replacements might be hidden.
935 unfi = repo.unfiltered()
935 unfi = repo.unfiltered()
936
936
937 # Calculate bookmark movements
937 # Calculate bookmark movements
938 if moves is None:
938 if moves is None:
939 moves = {}
939 moves = {}
940 for oldnodes, newnodes in replacements.items():
940 for oldnodes, newnodes in replacements.items():
941 for oldnode in oldnodes:
941 for oldnode in oldnodes:
942 if oldnode in moves:
942 if oldnode in moves:
943 continue
943 continue
944 if len(newnodes) > 1:
944 if len(newnodes) > 1:
945 # usually a split, take the one with biggest rev number
945 # usually a split, take the one with biggest rev number
946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
946 newnode = next(unfi.set('max(%ln)', newnodes)).node()
947 elif len(newnodes) == 0:
947 elif len(newnodes) == 0:
948 # move bookmark backwards
948 # move bookmark backwards
949 allreplaced = []
949 allreplaced = []
950 for rep in replacements:
950 for rep in replacements:
951 allreplaced.extend(rep)
951 allreplaced.extend(rep)
952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
952 roots = list(unfi.set('max((::%n) - %ln)', oldnode,
953 allreplaced))
953 allreplaced))
954 if roots:
954 if roots:
955 newnode = roots[0].node()
955 newnode = roots[0].node()
956 else:
956 else:
957 newnode = nullid
957 newnode = nullid
958 else:
958 else:
959 newnode = newnodes[0]
959 newnode = newnodes[0]
960 moves[oldnode] = newnode
960 moves[oldnode] = newnode
961
961
962 allnewnodes = [n for ns in replacements.values() for n in ns]
962 allnewnodes = [n for ns in replacements.values() for n in ns]
963 toretract = {}
963 toretract = {}
964 toadvance = {}
964 toadvance = {}
965 if fixphase:
965 if fixphase:
966 precursors = {}
966 precursors = {}
967 for oldnodes, newnodes in replacements.items():
967 for oldnodes, newnodes in replacements.items():
968 for oldnode in oldnodes:
968 for oldnode in oldnodes:
969 for newnode in newnodes:
969 for newnode in newnodes:
970 precursors.setdefault(newnode, []).append(oldnode)
970 precursors.setdefault(newnode, []).append(oldnode)
971
971
972 allnewnodes.sort(key=lambda n: unfi[n].rev())
972 allnewnodes.sort(key=lambda n: unfi[n].rev())
973 newphases = {}
973 newphases = {}
974 def phase(ctx):
974 def phase(ctx):
975 return newphases.get(ctx.node(), ctx.phase())
975 return newphases.get(ctx.node(), ctx.phase())
976 for newnode in allnewnodes:
976 for newnode in allnewnodes:
977 ctx = unfi[newnode]
977 ctx = unfi[newnode]
978 parentphase = max(phase(p) for p in ctx.parents())
978 parentphase = max(phase(p) for p in ctx.parents())
979 if targetphase is None:
979 if targetphase is None:
980 oldphase = max(unfi[oldnode].phase()
980 oldphase = max(unfi[oldnode].phase()
981 for oldnode in precursors[newnode])
981 for oldnode in precursors[newnode])
982 newphase = max(oldphase, parentphase)
982 newphase = max(oldphase, parentphase)
983 else:
983 else:
984 newphase = max(targetphase, parentphase)
984 newphase = max(targetphase, parentphase)
985 newphases[newnode] = newphase
985 newphases[newnode] = newphase
986 if newphase > ctx.phase():
986 if newphase > ctx.phase():
987 toretract.setdefault(newphase, []).append(newnode)
987 toretract.setdefault(newphase, []).append(newnode)
988 elif newphase < ctx.phase():
988 elif newphase < ctx.phase():
989 toadvance.setdefault(newphase, []).append(newnode)
989 toadvance.setdefault(newphase, []).append(newnode)
990
990
991 with repo.transaction('cleanup') as tr:
991 with repo.transaction('cleanup') as tr:
992 # Move bookmarks
992 # Move bookmarks
993 bmarks = repo._bookmarks
993 bmarks = repo._bookmarks
994 bmarkchanges = []
994 bmarkchanges = []
995 for oldnode, newnode in moves.items():
995 for oldnode, newnode in moves.items():
996 oldbmarks = repo.nodebookmarks(oldnode)
996 oldbmarks = repo.nodebookmarks(oldnode)
997 if not oldbmarks:
997 if not oldbmarks:
998 continue
998 continue
999 from . import bookmarks # avoid import cycle
999 from . import bookmarks # avoid import cycle
1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1000 repo.ui.debug('moving bookmarks %r from %s to %s\n' %
1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1001 (pycompat.rapply(pycompat.maybebytestr, oldbmarks),
1002 hex(oldnode), hex(newnode)))
1002 hex(oldnode), hex(newnode)))
1003 # Delete divergent bookmarks being parents of related newnodes
1003 # Delete divergent bookmarks being parents of related newnodes
1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1004 deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)',
1005 allnewnodes, newnode, oldnode)
1005 allnewnodes, newnode, oldnode)
1006 deletenodes = _containsnode(repo, deleterevs)
1006 deletenodes = _containsnode(repo, deleterevs)
1007 for name in oldbmarks:
1007 for name in oldbmarks:
1008 bmarkchanges.append((name, newnode))
1008 bmarkchanges.append((name, newnode))
1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1009 for b in bookmarks.divergent2delete(repo, deletenodes, name):
1010 bmarkchanges.append((b, None))
1010 bmarkchanges.append((b, None))
1011
1011
1012 if bmarkchanges:
1012 if bmarkchanges:
1013 bmarks.applychanges(repo, tr, bmarkchanges)
1013 bmarks.applychanges(repo, tr, bmarkchanges)
1014
1014
1015 for phase, nodes in toretract.items():
1015 for phase, nodes in toretract.items():
1016 phases.retractboundary(repo, tr, phase, nodes)
1016 phases.retractboundary(repo, tr, phase, nodes)
1017 for phase, nodes in toadvance.items():
1017 for phase, nodes in toadvance.items():
1018 phases.advanceboundary(repo, tr, phase, nodes)
1018 phases.advanceboundary(repo, tr, phase, nodes)
1019
1019
1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1020 mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived')
1021 # Obsolete or strip nodes
1021 # Obsolete or strip nodes
1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1022 if obsolete.isenabled(repo, obsolete.createmarkersopt):
1023 # If a node is already obsoleted, and we want to obsolete it
1023 # If a node is already obsoleted, and we want to obsolete it
1024 # without a successor, skip that obssolete request since it's
1024 # without a successor, skip that obssolete request since it's
1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1025 # unnecessary. That's the "if s or not isobs(n)" check below.
1026 # Also sort the node in topology order, that might be useful for
1026 # Also sort the node in topology order, that might be useful for
1027 # some obsstore logic.
1027 # some obsstore logic.
1028 # NOTE: the sorting might belong to createmarkers.
1028 # NOTE: the sorting might belong to createmarkers.
1029 torev = unfi.changelog.rev
1029 torev = unfi.changelog.rev
1030 sortfunc = lambda ns: torev(ns[0][0])
1030 sortfunc = lambda ns: torev(ns[0][0])
1031 rels = []
1031 rels = []
1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1032 for ns, s in sorted(replacements.items(), key=sortfunc):
1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1033 rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s))
1034 rels.append(rel)
1034 rels.append(rel)
1035 if rels:
1035 if rels:
1036 obsolete.createmarkers(repo, rels, operation=operation,
1036 obsolete.createmarkers(repo, rels, operation=operation,
1037 metadata=metadata)
1037 metadata=metadata)
1038 elif phases.supportinternal(repo) and mayusearchived:
1038 elif phases.supportinternal(repo) and mayusearchived:
1039 # this assume we do not have "unstable" nodes above the cleaned ones
1039 # this assume we do not have "unstable" nodes above the cleaned ones
1040 allreplaced = set()
1040 allreplaced = set()
1041 for ns in replacements.keys():
1041 for ns in replacements.keys():
1042 allreplaced.update(ns)
1042 allreplaced.update(ns)
1043 if backup:
1043 if backup:
1044 from . import repair # avoid import cycle
1044 from . import repair # avoid import cycle
1045 node = min(allreplaced, key=repo.changelog.rev)
1045 node = min(allreplaced, key=repo.changelog.rev)
1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1046 repair.backupbundle(repo, allreplaced, allreplaced, node,
1047 operation)
1047 operation)
1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1048 phases.retractboundary(repo, tr, phases.archived, allreplaced)
1049 else:
1049 else:
1050 from . import repair # avoid import cycle
1050 from . import repair # avoid import cycle
1051 tostrip = list(n for ns in replacements for n in ns)
1051 tostrip = list(n for ns in replacements for n in ns)
1052 if tostrip:
1052 if tostrip:
1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1053 repair.delayedstrip(repo.ui, repo, tostrip, operation,
1054 backup=backup)
1054 backup=backup)
1055
1055
1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1056 def addremove(repo, matcher, prefix, uipathfn, opts=None):
1057 if opts is None:
1057 if opts is None:
1058 opts = {}
1058 opts = {}
1059 m = matcher
1059 m = matcher
1060 dry_run = opts.get('dry_run')
1060 dry_run = opts.get('dry_run')
1061 try:
1061 try:
1062 similarity = float(opts.get('similarity') or 0)
1062 similarity = float(opts.get('similarity') or 0)
1063 except ValueError:
1063 except ValueError:
1064 raise error.Abort(_('similarity must be a number'))
1064 raise error.Abort(_('similarity must be a number'))
1065 if similarity < 0 or similarity > 100:
1065 if similarity < 0 or similarity > 100:
1066 raise error.Abort(_('similarity must be between 0 and 100'))
1066 raise error.Abort(_('similarity must be between 0 and 100'))
1067 similarity /= 100.0
1067 similarity /= 100.0
1068
1068
1069 ret = 0
1069 ret = 0
1070
1070
1071 wctx = repo[None]
1071 wctx = repo[None]
1072 for subpath in sorted(wctx.substate):
1072 for subpath in sorted(wctx.substate):
1073 submatch = matchmod.subdirmatcher(subpath, m)
1073 submatch = matchmod.subdirmatcher(subpath, m)
1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1074 if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()):
1075 sub = wctx.sub(subpath)
1075 sub = wctx.sub(subpath)
1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1076 subprefix = repo.wvfs.reljoin(prefix, subpath)
1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1077 subuipathfn = subdiruipathfn(subpath, uipathfn)
1078 try:
1078 try:
1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1079 if sub.addremove(submatch, subprefix, subuipathfn, opts):
1080 ret = 1
1080 ret = 1
1081 except error.LookupError:
1081 except error.LookupError:
1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1082 repo.ui.status(_("skipping missing subrepository: %s\n")
1083 % uipathfn(subpath))
1083 % uipathfn(subpath))
1084
1084
1085 rejected = []
1085 rejected = []
1086 def badfn(f, msg):
1086 def badfn(f, msg):
1087 if f in m.files():
1087 if f in m.files():
1088 m.bad(f, msg)
1088 m.bad(f, msg)
1089 rejected.append(f)
1089 rejected.append(f)
1090
1090
1091 badmatch = matchmod.badmatch(m, badfn)
1091 badmatch = matchmod.badmatch(m, badfn)
1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1092 added, unknown, deleted, removed, forgotten = _interestingfiles(repo,
1093 badmatch)
1093 badmatch)
1094
1094
1095 unknownset = set(unknown + forgotten)
1095 unknownset = set(unknown + forgotten)
1096 toprint = unknownset.copy()
1096 toprint = unknownset.copy()
1097 toprint.update(deleted)
1097 toprint.update(deleted)
1098 for abs in sorted(toprint):
1098 for abs in sorted(toprint):
1099 if repo.ui.verbose or not m.exact(abs):
1099 if repo.ui.verbose or not m.exact(abs):
1100 if abs in unknownset:
1100 if abs in unknownset:
1101 status = _('adding %s\n') % uipathfn(abs)
1101 status = _('adding %s\n') % uipathfn(abs)
1102 label = 'ui.addremove.added'
1102 label = 'ui.addremove.added'
1103 else:
1103 else:
1104 status = _('removing %s\n') % uipathfn(abs)
1104 status = _('removing %s\n') % uipathfn(abs)
1105 label = 'ui.addremove.removed'
1105 label = 'ui.addremove.removed'
1106 repo.ui.status(status, label=label)
1106 repo.ui.status(status, label=label)
1107
1107
1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1108 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1109 similarity, uipathfn)
1109 similarity, uipathfn)
1110
1110
1111 if not dry_run:
1111 if not dry_run:
1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1112 _markchanges(repo, unknown + forgotten, deleted, renames)
1113
1113
1114 for f in rejected:
1114 for f in rejected:
1115 if f in m.files():
1115 if f in m.files():
1116 return 1
1116 return 1
1117 return ret
1117 return ret
1118
1118
1119 def marktouched(repo, files, similarity=0.0):
1119 def marktouched(repo, files, similarity=0.0):
1120 '''Assert that files have somehow been operated upon. files are relative to
1120 '''Assert that files have somehow been operated upon. files are relative to
1121 the repo root.'''
1121 the repo root.'''
1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1122 m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x))
1123 rejected = []
1123 rejected = []
1124
1124
1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1125 added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m)
1126
1126
1127 if repo.ui.verbose:
1127 if repo.ui.verbose:
1128 unknownset = set(unknown + forgotten)
1128 unknownset = set(unknown + forgotten)
1129 toprint = unknownset.copy()
1129 toprint = unknownset.copy()
1130 toprint.update(deleted)
1130 toprint.update(deleted)
1131 for abs in sorted(toprint):
1131 for abs in sorted(toprint):
1132 if abs in unknownset:
1132 if abs in unknownset:
1133 status = _('adding %s\n') % abs
1133 status = _('adding %s\n') % abs
1134 else:
1134 else:
1135 status = _('removing %s\n') % abs
1135 status = _('removing %s\n') % abs
1136 repo.ui.status(status)
1136 repo.ui.status(status)
1137
1137
1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1138 # TODO: We should probably have the caller pass in uipathfn and apply it to
1139 # the messages above too. legacyrelativevalue=True is consistent with how
1139 # the messages above too. legacyrelativevalue=True is consistent with how
1140 # it used to work.
1140 # it used to work.
1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1141 uipathfn = getuipathfn(repo, legacyrelativevalue=True)
1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1142 renames = _findrenames(repo, m, added + unknown, removed + deleted,
1143 similarity, uipathfn)
1143 similarity, uipathfn)
1144
1144
1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1145 _markchanges(repo, unknown + forgotten, deleted, renames)
1146
1146
1147 for f in rejected:
1147 for f in rejected:
1148 if f in m.files():
1148 if f in m.files():
1149 return 1
1149 return 1
1150 return 0
1150 return 0
1151
1151
1152 def _interestingfiles(repo, matcher):
1152 def _interestingfiles(repo, matcher):
1153 '''Walk dirstate with matcher, looking for files that addremove would care
1153 '''Walk dirstate with matcher, looking for files that addremove would care
1154 about.
1154 about.
1155
1155
1156 This is different from dirstate.status because it doesn't care about
1156 This is different from dirstate.status because it doesn't care about
1157 whether files are modified or clean.'''
1157 whether files are modified or clean.'''
1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1158 added, unknown, deleted, removed, forgotten = [], [], [], [], []
1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1159 audit_path = pathutil.pathauditor(repo.root, cached=True)
1160
1160
1161 ctx = repo[None]
1161 ctx = repo[None]
1162 dirstate = repo.dirstate
1162 dirstate = repo.dirstate
1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1163 matcher = repo.narrowmatch(matcher, includeexact=True)
1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1164 walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate),
1165 unknown=True, ignored=False, full=False)
1165 unknown=True, ignored=False, full=False)
1166 for abs, st in walkresults.iteritems():
1166 for abs, st in walkresults.iteritems():
1167 dstate = dirstate[abs]
1167 dstate = dirstate[abs]
1168 if dstate == '?' and audit_path.check(abs):
1168 if dstate == '?' and audit_path.check(abs):
1169 unknown.append(abs)
1169 unknown.append(abs)
1170 elif dstate != 'r' and not st:
1170 elif dstate != 'r' and not st:
1171 deleted.append(abs)
1171 deleted.append(abs)
1172 elif dstate == 'r' and st:
1172 elif dstate == 'r' and st:
1173 forgotten.append(abs)
1173 forgotten.append(abs)
1174 # for finding renames
1174 # for finding renames
1175 elif dstate == 'r' and not st:
1175 elif dstate == 'r' and not st:
1176 removed.append(abs)
1176 removed.append(abs)
1177 elif dstate == 'a':
1177 elif dstate == 'a':
1178 added.append(abs)
1178 added.append(abs)
1179
1179
1180 return added, unknown, deleted, removed, forgotten
1180 return added, unknown, deleted, removed, forgotten
1181
1181
1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1182 def _findrenames(repo, matcher, added, removed, similarity, uipathfn):
1183 '''Find renames from removed files to added ones.'''
1183 '''Find renames from removed files to added ones.'''
1184 renames = {}
1184 renames = {}
1185 if similarity > 0:
1185 if similarity > 0:
1186 for old, new, score in similar.findrenames(repo, added, removed,
1186 for old, new, score in similar.findrenames(repo, added, removed,
1187 similarity):
1187 similarity):
1188 if (repo.ui.verbose or not matcher.exact(old)
1188 if (repo.ui.verbose or not matcher.exact(old)
1189 or not matcher.exact(new)):
1189 or not matcher.exact(new)):
1190 repo.ui.status(_('recording removal of %s as rename to %s '
1190 repo.ui.status(_('recording removal of %s as rename to %s '
1191 '(%d%% similar)\n') %
1191 '(%d%% similar)\n') %
1192 (uipathfn(old), uipathfn(new),
1192 (uipathfn(old), uipathfn(new),
1193 score * 100))
1193 score * 100))
1194 renames[new] = old
1194 renames[new] = old
1195 return renames
1195 return renames
1196
1196
1197 def _markchanges(repo, unknown, deleted, renames):
1197 def _markchanges(repo, unknown, deleted, renames):
1198 '''Marks the files in unknown as added, the files in deleted as removed,
1198 '''Marks the files in unknown as added, the files in deleted as removed,
1199 and the files in renames as copied.'''
1199 and the files in renames as copied.'''
1200 wctx = repo[None]
1200 wctx = repo[None]
1201 with repo.wlock():
1201 with repo.wlock():
1202 wctx.forget(deleted)
1202 wctx.forget(deleted)
1203 wctx.add(unknown)
1203 wctx.add(unknown)
1204 for new, old in renames.iteritems():
1204 for new, old in renames.iteritems():
1205 wctx.copy(old, new)
1205 wctx.copy(old, new)
1206
1206
1207 def getrenamedfn(repo, endrev=None):
1207 def getrenamedfn(repo, endrev=None):
1208 if copiesmod.usechangesetcentricalgo(repo):
1208 if copiesmod.usechangesetcentricalgo(repo):
1209 def getrenamed(fn, rev):
1209 def getrenamed(fn, rev):
1210 ctx = repo[rev]
1210 ctx = repo[rev]
1211 p1copies = ctx.p1copies()
1211 p1copies = ctx.p1copies()
1212 if fn in p1copies:
1212 if fn in p1copies:
1213 return p1copies[fn]
1213 return p1copies[fn]
1214 p2copies = ctx.p2copies()
1214 p2copies = ctx.p2copies()
1215 if fn in p2copies:
1215 if fn in p2copies:
1216 return p2copies[fn]
1216 return p2copies[fn]
1217 return None
1217 return None
1218 return getrenamed
1218 return getrenamed
1219
1219
1220 rcache = {}
1220 rcache = {}
1221 if endrev is None:
1221 if endrev is None:
1222 endrev = len(repo)
1222 endrev = len(repo)
1223
1223
1224 def getrenamed(fn, rev):
1224 def getrenamed(fn, rev):
1225 '''looks up all renames for a file (up to endrev) the first
1225 '''looks up all renames for a file (up to endrev) the first
1226 time the file is given. It indexes on the changerev and only
1226 time the file is given. It indexes on the changerev and only
1227 parses the manifest if linkrev != changerev.
1227 parses the manifest if linkrev != changerev.
1228 Returns rename info for fn at changerev rev.'''
1228 Returns rename info for fn at changerev rev.'''
1229 if fn not in rcache:
1229 if fn not in rcache:
1230 rcache[fn] = {}
1230 rcache[fn] = {}
1231 fl = repo.file(fn)
1231 fl = repo.file(fn)
1232 for i in fl:
1232 for i in fl:
1233 lr = fl.linkrev(i)
1233 lr = fl.linkrev(i)
1234 renamed = fl.renamed(fl.node(i))
1234 renamed = fl.renamed(fl.node(i))
1235 rcache[fn][lr] = renamed and renamed[0]
1235 rcache[fn][lr] = renamed and renamed[0]
1236 if lr >= endrev:
1236 if lr >= endrev:
1237 break
1237 break
1238 if rev in rcache[fn]:
1238 if rev in rcache[fn]:
1239 return rcache[fn][rev]
1239 return rcache[fn][rev]
1240
1240
1241 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1241 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1242 # filectx logic.
1242 # filectx logic.
1243 try:
1243 try:
1244 return repo[rev][fn].copysource()
1244 return repo[rev][fn].copysource()
1245 except error.LookupError:
1245 except error.LookupError:
1246 return None
1246 return None
1247
1247
1248 return getrenamed
1248 return getrenamed
1249
1249
1250 def getcopiesfn(repo, endrev=None):
1250 def getcopiesfn(repo, endrev=None):
1251 if copiesmod.usechangesetcentricalgo(repo):
1251 if copiesmod.usechangesetcentricalgo(repo):
1252 def copiesfn(ctx):
1252 def copiesfn(ctx):
1253 if ctx.p2copies():
1253 if ctx.p2copies():
1254 allcopies = ctx.p1copies().copy()
1254 allcopies = ctx.p1copies().copy()
1255 # There should be no overlap
1255 # There should be no overlap
1256 allcopies.update(ctx.p2copies())
1256 allcopies.update(ctx.p2copies())
1257 return sorted(allcopies.items())
1257 return sorted(allcopies.items())
1258 else:
1258 else:
1259 return sorted(ctx.p1copies().items())
1259 return sorted(ctx.p1copies().items())
1260 else:
1260 else:
1261 getrenamed = getrenamedfn(repo, endrev)
1261 getrenamed = getrenamedfn(repo, endrev)
1262 def copiesfn(ctx):
1262 def copiesfn(ctx):
1263 copies = []
1263 copies = []
1264 for fn in ctx.files():
1264 for fn in ctx.files():
1265 rename = getrenamed(fn, ctx.rev())
1265 rename = getrenamed(fn, ctx.rev())
1266 if rename:
1266 if rename:
1267 copies.append((fn, rename))
1267 copies.append((fn, rename))
1268 return copies
1268 return copies
1269
1269
1270 return copiesfn
1270 return copiesfn
1271
1271
1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1272 def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None):
1273 """Update the dirstate to reflect the intent of copying src to dst. For
1273 """Update the dirstate to reflect the intent of copying src to dst. For
1274 different reasons it might not end with dst being marked as copied from src.
1274 different reasons it might not end with dst being marked as copied from src.
1275 """
1275 """
1276 origsrc = repo.dirstate.copied(src) or src
1276 origsrc = repo.dirstate.copied(src) or src
1277 if dst == origsrc: # copying back a copy?
1277 if dst == origsrc: # copying back a copy?
1278 if repo.dirstate[dst] not in 'mn' and not dryrun:
1278 if repo.dirstate[dst] not in 'mn' and not dryrun:
1279 repo.dirstate.normallookup(dst)
1279 repo.dirstate.normallookup(dst)
1280 else:
1280 else:
1281 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1281 if repo.dirstate[origsrc] == 'a' and origsrc == src:
1282 if not ui.quiet:
1282 if not ui.quiet:
1283 ui.warn(_("%s has not been committed yet, so no copy "
1283 ui.warn(_("%s has not been committed yet, so no copy "
1284 "data will be stored for %s.\n")
1284 "data will be stored for %s.\n")
1285 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1285 % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd)))
1286 if repo.dirstate[dst] in '?r' and not dryrun:
1286 if repo.dirstate[dst] in '?r' and not dryrun:
1287 wctx.add([dst])
1287 wctx.add([dst])
1288 elif not dryrun:
1288 elif not dryrun:
1289 wctx.copy(origsrc, dst)
1289 wctx.copy(origsrc, dst)
1290
1290
1291 def movedirstate(repo, newctx, match=None):
1291 def movedirstate(repo, newctx, match=None):
1292 """Move the dirstate to newctx and adjust it as necessary.
1292 """Move the dirstate to newctx and adjust it as necessary.
1293
1293
1294 A matcher can be provided as an optimization. It is probably a bug to pass
1294 A matcher can be provided as an optimization. It is probably a bug to pass
1295 a matcher that doesn't match all the differences between the parent of the
1295 a matcher that doesn't match all the differences between the parent of the
1296 working copy and newctx.
1296 working copy and newctx.
1297 """
1297 """
1298 oldctx = repo['.']
1298 oldctx = repo['.']
1299 ds = repo.dirstate
1299 ds = repo.dirstate
1300 ds.setparents(newctx.node(), nullid)
1300 ds.setparents(newctx.node(), nullid)
1301 copies = dict(ds.copies())
1301 copies = dict(ds.copies())
1302 s = newctx.status(oldctx, match=match)
1302 s = newctx.status(oldctx, match=match)
1303 for f in s.modified:
1303 for f in s.modified:
1304 if ds[f] == 'r':
1304 if ds[f] == 'r':
1305 # modified + removed -> removed
1305 # modified + removed -> removed
1306 continue
1306 continue
1307 ds.normallookup(f)
1307 ds.normallookup(f)
1308
1308
1309 for f in s.added:
1309 for f in s.added:
1310 if ds[f] == 'r':
1310 if ds[f] == 'r':
1311 # added + removed -> unknown
1311 # added + removed -> unknown
1312 ds.drop(f)
1312 ds.drop(f)
1313 elif ds[f] != 'a':
1313 elif ds[f] != 'a':
1314 ds.add(f)
1314 ds.add(f)
1315
1315
1316 for f in s.removed:
1316 for f in s.removed:
1317 if ds[f] == 'a':
1317 if ds[f] == 'a':
1318 # removed + added -> normal
1318 # removed + added -> normal
1319 ds.normallookup(f)
1319 ds.normallookup(f)
1320 elif ds[f] != 'r':
1320 elif ds[f] != 'r':
1321 ds.remove(f)
1321 ds.remove(f)
1322
1322
1323 # Merge old parent and old working dir copies
1323 # Merge old parent and old working dir copies
1324 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1324 oldcopies = copiesmod.pathcopies(newctx, oldctx, match)
1325 oldcopies.update(copies)
1325 oldcopies.update(copies)
1326 copies = dict((dst, oldcopies.get(src, src))
1326 copies = dict((dst, oldcopies.get(src, src))
1327 for dst, src in oldcopies.iteritems())
1327 for dst, src in oldcopies.iteritems())
1328 # Adjust the dirstate copies
1328 # Adjust the dirstate copies
1329 for dst, src in copies.iteritems():
1329 for dst, src in copies.iteritems():
1330 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1330 if (src not in newctx or dst in newctx or ds[dst] != 'a'):
1331 src = None
1331 src = None
1332 ds.copy(src, dst)
1332 ds.copy(src, dst)
1333
1333
1334 def writerequires(opener, requirements):
1334 def writerequires(opener, requirements):
1335 with opener('requires', 'w', atomictemp=True) as fp:
1335 with opener('requires', 'w', atomictemp=True) as fp:
1336 for r in sorted(requirements):
1336 for r in sorted(requirements):
1337 fp.write("%s\n" % r)
1337 fp.write("%s\n" % r)
1338
1338
1339 class filecachesubentry(object):
1339 class filecachesubentry(object):
1340 def __init__(self, path, stat):
1340 def __init__(self, path, stat):
1341 self.path = path
1341 self.path = path
1342 self.cachestat = None
1342 self.cachestat = None
1343 self._cacheable = None
1343 self._cacheable = None
1344
1344
1345 if stat:
1345 if stat:
1346 self.cachestat = filecachesubentry.stat(self.path)
1346 self.cachestat = filecachesubentry.stat(self.path)
1347
1347
1348 if self.cachestat:
1348 if self.cachestat:
1349 self._cacheable = self.cachestat.cacheable()
1349 self._cacheable = self.cachestat.cacheable()
1350 else:
1350 else:
1351 # None means we don't know yet
1351 # None means we don't know yet
1352 self._cacheable = None
1352 self._cacheable = None
1353
1353
1354 def refresh(self):
1354 def refresh(self):
1355 if self.cacheable():
1355 if self.cacheable():
1356 self.cachestat = filecachesubentry.stat(self.path)
1356 self.cachestat = filecachesubentry.stat(self.path)
1357
1357
1358 def cacheable(self):
1358 def cacheable(self):
1359 if self._cacheable is not None:
1359 if self._cacheable is not None:
1360 return self._cacheable
1360 return self._cacheable
1361
1361
1362 # we don't know yet, assume it is for now
1362 # we don't know yet, assume it is for now
1363 return True
1363 return True
1364
1364
1365 def changed(self):
1365 def changed(self):
1366 # no point in going further if we can't cache it
1366 # no point in going further if we can't cache it
1367 if not self.cacheable():
1367 if not self.cacheable():
1368 return True
1368 return True
1369
1369
1370 newstat = filecachesubentry.stat(self.path)
1370 newstat = filecachesubentry.stat(self.path)
1371
1371
1372 # we may not know if it's cacheable yet, check again now
1372 # we may not know if it's cacheable yet, check again now
1373 if newstat and self._cacheable is None:
1373 if newstat and self._cacheable is None:
1374 self._cacheable = newstat.cacheable()
1374 self._cacheable = newstat.cacheable()
1375
1375
1376 # check again
1376 # check again
1377 if not self._cacheable:
1377 if not self._cacheable:
1378 return True
1378 return True
1379
1379
1380 if self.cachestat != newstat:
1380 if self.cachestat != newstat:
1381 self.cachestat = newstat
1381 self.cachestat = newstat
1382 return True
1382 return True
1383 else:
1383 else:
1384 return False
1384 return False
1385
1385
1386 @staticmethod
1386 @staticmethod
1387 def stat(path):
1387 def stat(path):
1388 try:
1388 try:
1389 return util.cachestat(path)
1389 return util.cachestat(path)
1390 except OSError as e:
1390 except OSError as e:
1391 if e.errno != errno.ENOENT:
1391 if e.errno != errno.ENOENT:
1392 raise
1392 raise
1393
1393
1394 class filecacheentry(object):
1394 class filecacheentry(object):
1395 def __init__(self, paths, stat=True):
1395 def __init__(self, paths, stat=True):
1396 self._entries = []
1396 self._entries = []
1397 for path in paths:
1397 for path in paths:
1398 self._entries.append(filecachesubentry(path, stat))
1398 self._entries.append(filecachesubentry(path, stat))
1399
1399
1400 def changed(self):
1400 def changed(self):
1401 '''true if any entry has changed'''
1401 '''true if any entry has changed'''
1402 for entry in self._entries:
1402 for entry in self._entries:
1403 if entry.changed():
1403 if entry.changed():
1404 return True
1404 return True
1405 return False
1405 return False
1406
1406
1407 def refresh(self):
1407 def refresh(self):
1408 for entry in self._entries:
1408 for entry in self._entries:
1409 entry.refresh()
1409 entry.refresh()
1410
1410
1411 class filecache(object):
1411 class filecache(object):
1412 """A property like decorator that tracks files under .hg/ for updates.
1412 """A property like decorator that tracks files under .hg/ for updates.
1413
1413
1414 On first access, the files defined as arguments are stat()ed and the
1414 On first access, the files defined as arguments are stat()ed and the
1415 results cached. The decorated function is called. The results are stashed
1415 results cached. The decorated function is called. The results are stashed
1416 away in a ``_filecache`` dict on the object whose method is decorated.
1416 away in a ``_filecache`` dict on the object whose method is decorated.
1417
1417
1418 On subsequent access, the cached result is used as it is set to the
1418 On subsequent access, the cached result is used as it is set to the
1419 instance dictionary.
1419 instance dictionary.
1420
1420
1421 On external property set/delete operations, the caller must update the
1421 On external property set/delete operations, the caller must update the
1422 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1422 corresponding _filecache entry appropriately. Use __class__.<attr>.set()
1423 instead of directly setting <attr>.
1423 instead of directly setting <attr>.
1424
1424
1425 When using the property API, the cached data is always used if available.
1425 When using the property API, the cached data is always used if available.
1426 No stat() is performed to check if the file has changed.
1426 No stat() is performed to check if the file has changed.
1427
1427
1428 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1428 Others can muck about with the state of the ``_filecache`` dict. e.g. they
1429 can populate an entry before the property's getter is called. In this case,
1429 can populate an entry before the property's getter is called. In this case,
1430 entries in ``_filecache`` will be used during property operations,
1430 entries in ``_filecache`` will be used during property operations,
1431 if available. If the underlying file changes, it is up to external callers
1431 if available. If the underlying file changes, it is up to external callers
1432 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1432 to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached
1433 method result as well as possibly calling ``del obj._filecache[attr]`` to
1433 method result as well as possibly calling ``del obj._filecache[attr]`` to
1434 remove the ``filecacheentry``.
1434 remove the ``filecacheentry``.
1435 """
1435 """
1436
1436
1437 def __init__(self, *paths):
1437 def __init__(self, *paths):
1438 self.paths = paths
1438 self.paths = paths
1439
1439
1440 def join(self, obj, fname):
1440 def join(self, obj, fname):
1441 """Used to compute the runtime path of a cached file.
1441 """Used to compute the runtime path of a cached file.
1442
1442
1443 Users should subclass filecache and provide their own version of this
1443 Users should subclass filecache and provide their own version of this
1444 function to call the appropriate join function on 'obj' (an instance
1444 function to call the appropriate join function on 'obj' (an instance
1445 of the class that its member function was decorated).
1445 of the class that its member function was decorated).
1446 """
1446 """
1447 raise NotImplementedError
1447 raise NotImplementedError
1448
1448
1449 def __call__(self, func):
1449 def __call__(self, func):
1450 self.func = func
1450 self.func = func
1451 self.sname = func.__name__
1451 self.sname = func.__name__
1452 self.name = pycompat.sysbytes(self.sname)
1452 self.name = pycompat.sysbytes(self.sname)
1453 return self
1453 return self
1454
1454
1455 def __get__(self, obj, type=None):
1455 def __get__(self, obj, type=None):
1456 # if accessed on the class, return the descriptor itself.
1456 # if accessed on the class, return the descriptor itself.
1457 if obj is None:
1457 if obj is None:
1458 return self
1458 return self
1459
1459
1460 assert self.sname not in obj.__dict__
1460 assert self.sname not in obj.__dict__
1461
1461
1462 entry = obj._filecache.get(self.name)
1462 entry = obj._filecache.get(self.name)
1463
1463
1464 if entry:
1464 if entry:
1465 if entry.changed():
1465 if entry.changed():
1466 entry.obj = self.func(obj)
1466 entry.obj = self.func(obj)
1467 else:
1467 else:
1468 paths = [self.join(obj, path) for path in self.paths]
1468 paths = [self.join(obj, path) for path in self.paths]
1469
1469
1470 # We stat -before- creating the object so our cache doesn't lie if
1470 # We stat -before- creating the object so our cache doesn't lie if
1471 # a writer modified between the time we read and stat
1471 # a writer modified between the time we read and stat
1472 entry = filecacheentry(paths, True)
1472 entry = filecacheentry(paths, True)
1473 entry.obj = self.func(obj)
1473 entry.obj = self.func(obj)
1474
1474
1475 obj._filecache[self.name] = entry
1475 obj._filecache[self.name] = entry
1476
1476
1477 obj.__dict__[self.sname] = entry.obj
1477 obj.__dict__[self.sname] = entry.obj
1478 return entry.obj
1478 return entry.obj
1479
1479
1480 # don't implement __set__(), which would make __dict__ lookup as slow as
1480 # don't implement __set__(), which would make __dict__ lookup as slow as
1481 # function call.
1481 # function call.
1482
1482
1483 def set(self, obj, value):
1483 def set(self, obj, value):
1484 if self.name not in obj._filecache:
1484 if self.name not in obj._filecache:
1485 # we add an entry for the missing value because X in __dict__
1485 # we add an entry for the missing value because X in __dict__
1486 # implies X in _filecache
1486 # implies X in _filecache
1487 paths = [self.join(obj, path) for path in self.paths]
1487 paths = [self.join(obj, path) for path in self.paths]
1488 ce = filecacheentry(paths, False)
1488 ce = filecacheentry(paths, False)
1489 obj._filecache[self.name] = ce
1489 obj._filecache[self.name] = ce
1490 else:
1490 else:
1491 ce = obj._filecache[self.name]
1491 ce = obj._filecache[self.name]
1492
1492
1493 ce.obj = value # update cached copy
1493 ce.obj = value # update cached copy
1494 obj.__dict__[self.sname] = value # update copy returned by obj.x
1494 obj.__dict__[self.sname] = value # update copy returned by obj.x
1495
1495
1496 def extdatasource(repo, source):
1496 def extdatasource(repo, source):
1497 """Gather a map of rev -> value dict from the specified source
1497 """Gather a map of rev -> value dict from the specified source
1498
1498
1499 A source spec is treated as a URL, with a special case shell: type
1499 A source spec is treated as a URL, with a special case shell: type
1500 for parsing the output from a shell command.
1500 for parsing the output from a shell command.
1501
1501
1502 The data is parsed as a series of newline-separated records where
1502 The data is parsed as a series of newline-separated records where
1503 each record is a revision specifier optionally followed by a space
1503 each record is a revision specifier optionally followed by a space
1504 and a freeform string value. If the revision is known locally, it
1504 and a freeform string value. If the revision is known locally, it
1505 is converted to a rev, otherwise the record is skipped.
1505 is converted to a rev, otherwise the record is skipped.
1506
1506
1507 Note that both key and value are treated as UTF-8 and converted to
1507 Note that both key and value are treated as UTF-8 and converted to
1508 the local encoding. This allows uniformity between local and
1508 the local encoding. This allows uniformity between local and
1509 remote data sources.
1509 remote data sources.
1510 """
1510 """
1511
1511
1512 spec = repo.ui.config("extdata", source)
1512 spec = repo.ui.config("extdata", source)
1513 if not spec:
1513 if not spec:
1514 raise error.Abort(_("unknown extdata source '%s'") % source)
1514 raise error.Abort(_("unknown extdata source '%s'") % source)
1515
1515
1516 data = {}
1516 data = {}
1517 src = proc = None
1517 src = proc = None
1518 try:
1518 try:
1519 if spec.startswith("shell:"):
1519 if spec.startswith("shell:"):
1520 # external commands should be run relative to the repo root
1520 # external commands should be run relative to the repo root
1521 cmd = spec[6:]
1521 cmd = spec[6:]
1522 proc = subprocess.Popen(procutil.tonativestr(cmd),
1522 proc = subprocess.Popen(procutil.tonativestr(cmd),
1523 shell=True, bufsize=-1,
1523 shell=True, bufsize=-1,
1524 close_fds=procutil.closefds,
1524 close_fds=procutil.closefds,
1525 stdout=subprocess.PIPE,
1525 stdout=subprocess.PIPE,
1526 cwd=procutil.tonativestr(repo.root))
1526 cwd=procutil.tonativestr(repo.root))
1527 src = proc.stdout
1527 src = proc.stdout
1528 else:
1528 else:
1529 # treat as a URL or file
1529 # treat as a URL or file
1530 src = url.open(repo.ui, spec)
1530 src = url.open(repo.ui, spec)
1531 for l in src:
1531 for l in src:
1532 if " " in l:
1532 if " " in l:
1533 k, v = l.strip().split(" ", 1)
1533 k, v = l.strip().split(" ", 1)
1534 else:
1534 else:
1535 k, v = l.strip(), ""
1535 k, v = l.strip(), ""
1536
1536
1537 k = encoding.tolocal(k)
1537 k = encoding.tolocal(k)
1538 try:
1538 try:
1539 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1539 data[revsingle(repo, k).rev()] = encoding.tolocal(v)
1540 except (error.LookupError, error.RepoLookupError):
1540 except (error.LookupError, error.RepoLookupError):
1541 pass # we ignore data for nodes that don't exist locally
1541 pass # we ignore data for nodes that don't exist locally
1542 finally:
1542 finally:
1543 if proc:
1543 if proc:
1544 try:
1544 try:
1545 proc.communicate()
1545 proc.communicate()
1546 except ValueError:
1546 except ValueError:
1547 # This happens if we started iterating src and then
1547 # This happens if we started iterating src and then
1548 # get a parse error on a line. It should be safe to ignore.
1548 # get a parse error on a line. It should be safe to ignore.
1549 pass
1549 pass
1550 if src:
1550 if src:
1551 src.close()
1551 src.close()
1552 if proc and proc.returncode != 0:
1552 if proc and proc.returncode != 0:
1553 raise error.Abort(_("extdata command '%s' failed: %s")
1553 raise error.Abort(_("extdata command '%s' failed: %s")
1554 % (cmd, procutil.explainexit(proc.returncode)))
1554 % (cmd, procutil.explainexit(proc.returncode)))
1555
1555
1556 return data
1556 return data
1557
1557
1558 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1558 def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs):
1559 if lock is None:
1559 if lock is None:
1560 raise error.LockInheritanceContractViolation(
1560 raise error.LockInheritanceContractViolation(
1561 'lock can only be inherited while held')
1561 'lock can only be inherited while held')
1562 if environ is None:
1562 if environ is None:
1563 environ = {}
1563 environ = {}
1564 with lock.inherit() as locker:
1564 with lock.inherit() as locker:
1565 environ[envvar] = locker
1565 environ[envvar] = locker
1566 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1566 return repo.ui.system(cmd, environ=environ, *args, **kwargs)
1567
1567
1568 def wlocksub(repo, cmd, *args, **kwargs):
1568 def wlocksub(repo, cmd, *args, **kwargs):
1569 """run cmd as a subprocess that allows inheriting repo's wlock
1569 """run cmd as a subprocess that allows inheriting repo's wlock
1570
1570
1571 This can only be called while the wlock is held. This takes all the
1571 This can only be called while the wlock is held. This takes all the
1572 arguments that ui.system does, and returns the exit code of the
1572 arguments that ui.system does, and returns the exit code of the
1573 subprocess."""
1573 subprocess."""
1574 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1574 return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args,
1575 **kwargs)
1575 **kwargs)
1576
1576
1577 class progress(object):
1577 class progress(object):
1578 def __init__(self, ui, updatebar, topic, unit="", total=None):
1578 def __init__(self, ui, updatebar, topic, unit="", total=None):
1579 self.ui = ui
1579 self.ui = ui
1580 self.pos = 0
1580 self.pos = 0
1581 self.topic = topic
1581 self.topic = topic
1582 self.unit = unit
1582 self.unit = unit
1583 self.total = total
1583 self.total = total
1584 self.debug = ui.configbool('progress', 'debug')
1584 self.debug = ui.configbool('progress', 'debug')
1585 self._updatebar = updatebar
1585 self._updatebar = updatebar
1586
1586
1587 def __enter__(self):
1587 def __enter__(self):
1588 return self
1588 return self
1589
1589
1590 def __exit__(self, exc_type, exc_value, exc_tb):
1590 def __exit__(self, exc_type, exc_value, exc_tb):
1591 self.complete()
1591 self.complete()
1592
1592
1593 def update(self, pos, item="", total=None):
1593 def update(self, pos, item="", total=None):
1594 assert pos is not None
1594 assert pos is not None
1595 if total:
1595 if total:
1596 self.total = total
1596 self.total = total
1597 self.pos = pos
1597 self.pos = pos
1598 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1598 self._updatebar(self.topic, self.pos, item, self.unit, self.total)
1599 if self.debug:
1599 if self.debug:
1600 self._printdebug(item)
1600 self._printdebug(item)
1601
1601
1602 def increment(self, step=1, item="", total=None):
1602 def increment(self, step=1, item="", total=None):
1603 self.update(self.pos + step, item, total)
1603 self.update(self.pos + step, item, total)
1604
1604
1605 def complete(self):
1605 def complete(self):
1606 self.pos = None
1606 self.pos = None
1607 self.unit = ""
1607 self.unit = ""
1608 self.total = None
1608 self.total = None
1609 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1609 self._updatebar(self.topic, self.pos, "", self.unit, self.total)
1610
1610
1611 def _printdebug(self, item):
1611 def _printdebug(self, item):
1612 if self.unit:
1612 if self.unit:
1613 unit = ' ' + self.unit
1613 unit = ' ' + self.unit
1614 if item:
1614 if item:
1615 item = ' ' + item
1615 item = ' ' + item
1616
1616
1617 if self.total:
1617 if self.total:
1618 pct = 100.0 * self.pos / self.total
1618 pct = 100.0 * self.pos / self.total
1619 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1619 self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n'
1620 % (self.topic, item, self.pos, self.total, unit, pct))
1620 % (self.topic, item, self.pos, self.total, unit, pct))
1621 else:
1621 else:
1622 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1622 self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit))
1623
1623
1624 def gdinitconfig(ui):
1624 def gdinitconfig(ui):
1625 """helper function to know if a repo should be created as general delta
1625 """helper function to know if a repo should be created as general delta
1626 """
1626 """
1627 # experimental config: format.generaldelta
1627 # experimental config: format.generaldelta
1628 return (ui.configbool('format', 'generaldelta')
1628 return (ui.configbool('format', 'generaldelta')
1629 or ui.configbool('format', 'usegeneraldelta'))
1629 or ui.configbool('format', 'usegeneraldelta'))
1630
1630
1631 def gddeltaconfig(ui):
1631 def gddeltaconfig(ui):
1632 """helper function to know if incoming delta should be optimised
1632 """helper function to know if incoming delta should be optimised
1633 """
1633 """
1634 # experimental config: format.generaldelta
1634 # experimental config: format.generaldelta
1635 return ui.configbool('format', 'generaldelta')
1635 return ui.configbool('format', 'generaldelta')
1636
1636
1637 class simplekeyvaluefile(object):
1637 class simplekeyvaluefile(object):
1638 """A simple file with key=value lines
1638 """A simple file with key=value lines
1639
1639
1640 Keys must be alphanumerics and start with a letter, values must not
1640 Keys must be alphanumerics and start with a letter, values must not
1641 contain '\n' characters"""
1641 contain '\n' characters"""
1642 firstlinekey = '__firstline'
1642 firstlinekey = '__firstline'
1643
1643
1644 def __init__(self, vfs, path, keys=None):
1644 def __init__(self, vfs, path, keys=None):
1645 self.vfs = vfs
1645 self.vfs = vfs
1646 self.path = path
1646 self.path = path
1647
1647
1648 def read(self, firstlinenonkeyval=False):
1648 def read(self, firstlinenonkeyval=False):
1649 """Read the contents of a simple key-value file
1649 """Read the contents of a simple key-value file
1650
1650
1651 'firstlinenonkeyval' indicates whether the first line of file should
1651 'firstlinenonkeyval' indicates whether the first line of file should
1652 be treated as a key-value pair or reuturned fully under the
1652 be treated as a key-value pair or reuturned fully under the
1653 __firstline key."""
1653 __firstline key."""
1654 lines = self.vfs.readlines(self.path)
1654 lines = self.vfs.readlines(self.path)
1655 d = {}
1655 d = {}
1656 if firstlinenonkeyval:
1656 if firstlinenonkeyval:
1657 if not lines:
1657 if not lines:
1658 e = _("empty simplekeyvalue file")
1658 e = _("empty simplekeyvalue file")
1659 raise error.CorruptedState(e)
1659 raise error.CorruptedState(e)
1660 # we don't want to include '\n' in the __firstline
1660 # we don't want to include '\n' in the __firstline
1661 d[self.firstlinekey] = lines[0][:-1]
1661 d[self.firstlinekey] = lines[0][:-1]
1662 del lines[0]
1662 del lines[0]
1663
1663
1664 try:
1664 try:
1665 # the 'if line.strip()' part prevents us from failing on empty
1665 # the 'if line.strip()' part prevents us from failing on empty
1666 # lines which only contain '\n' therefore are not skipped
1666 # lines which only contain '\n' therefore are not skipped
1667 # by 'if line'
1667 # by 'if line'
1668 updatedict = dict(line[:-1].split('=', 1) for line in lines
1668 updatedict = dict(line[:-1].split('=', 1) for line in lines
1669 if line.strip())
1669 if line.strip())
1670 if self.firstlinekey in updatedict:
1670 if self.firstlinekey in updatedict:
1671 e = _("%r can't be used as a key")
1671 e = _("%r can't be used as a key")
1672 raise error.CorruptedState(e % self.firstlinekey)
1672 raise error.CorruptedState(e % self.firstlinekey)
1673 d.update(updatedict)
1673 d.update(updatedict)
1674 except ValueError as e:
1674 except ValueError as e:
1675 raise error.CorruptedState(str(e))
1675 raise error.CorruptedState(str(e))
1676 return d
1676 return d
1677
1677
1678 def write(self, data, firstline=None):
1678 def write(self, data, firstline=None):
1679 """Write key=>value mapping to a file
1679 """Write key=>value mapping to a file
1680 data is a dict. Keys must be alphanumerical and start with a letter.
1680 data is a dict. Keys must be alphanumerical and start with a letter.
1681 Values must not contain newline characters.
1681 Values must not contain newline characters.
1682
1682
1683 If 'firstline' is not None, it is written to file before
1683 If 'firstline' is not None, it is written to file before
1684 everything else, as it is, not in a key=value form"""
1684 everything else, as it is, not in a key=value form"""
1685 lines = []
1685 lines = []
1686 if firstline is not None:
1686 if firstline is not None:
1687 lines.append('%s\n' % firstline)
1687 lines.append('%s\n' % firstline)
1688
1688
1689 for k, v in data.items():
1689 for k, v in data.items():
1690 if k == self.firstlinekey:
1690 if k == self.firstlinekey:
1691 e = "key name '%s' is reserved" % self.firstlinekey
1691 e = "key name '%s' is reserved" % self.firstlinekey
1692 raise error.ProgrammingError(e)
1692 raise error.ProgrammingError(e)
1693 if not k[0:1].isalpha():
1693 if not k[0:1].isalpha():
1694 e = "keys must start with a letter in a key-value file"
1694 e = "keys must start with a letter in a key-value file"
1695 raise error.ProgrammingError(e)
1695 raise error.ProgrammingError(e)
1696 if not k.isalnum():
1696 if not k.isalnum():
1697 e = "invalid key name in a simple key-value file"
1697 e = "invalid key name in a simple key-value file"
1698 raise error.ProgrammingError(e)
1698 raise error.ProgrammingError(e)
1699 if '\n' in v:
1699 if '\n' in v:
1700 e = "invalid value in a simple key-value file"
1700 e = "invalid value in a simple key-value file"
1701 raise error.ProgrammingError(e)
1701 raise error.ProgrammingError(e)
1702 lines.append("%s=%s\n" % (k, v))
1702 lines.append("%s=%s\n" % (k, v))
1703 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1703 with self.vfs(self.path, mode='wb', atomictemp=True) as fp:
1704 fp.write(''.join(lines))
1704 fp.write(''.join(lines))
1705
1705
1706 _reportobsoletedsource = [
1706 _reportobsoletedsource = [
1707 'debugobsolete',
1707 'debugobsolete',
1708 'pull',
1708 'pull',
1709 'push',
1709 'push',
1710 'serve',
1710 'serve',
1711 'unbundle',
1711 'unbundle',
1712 ]
1712 ]
1713
1713
1714 _reportnewcssource = [
1714 _reportnewcssource = [
1715 'pull',
1715 'pull',
1716 'unbundle',
1716 'unbundle',
1717 ]
1717 ]
1718
1718
1719 def prefetchfiles(repo, revs, match):
1719 def prefetchfiles(repo, revs, match):
1720 """Invokes the registered file prefetch functions, allowing extensions to
1720 """Invokes the registered file prefetch functions, allowing extensions to
1721 ensure the corresponding files are available locally, before the command
1721 ensure the corresponding files are available locally, before the command
1722 uses them."""
1722 uses them."""
1723 if match:
1723 if match:
1724 # The command itself will complain about files that don't exist, so
1724 # The command itself will complain about files that don't exist, so
1725 # don't duplicate the message.
1725 # don't duplicate the message.
1726 match = matchmod.badmatch(match, lambda fn, msg: None)
1726 match = matchmod.badmatch(match, lambda fn, msg: None)
1727 else:
1727 else:
1728 match = matchall(repo)
1728 match = matchall(repo)
1729
1729
1730 fileprefetchhooks(repo, revs, match)
1730 fileprefetchhooks(repo, revs, match)
1731
1731
1732 # a list of (repo, revs, match) prefetch functions
1732 # a list of (repo, revs, match) prefetch functions
1733 fileprefetchhooks = util.hooks()
1733 fileprefetchhooks = util.hooks()
1734
1734
1735 # A marker that tells the evolve extension to suppress its own reporting
1735 # A marker that tells the evolve extension to suppress its own reporting
1736 _reportstroubledchangesets = True
1736 _reportstroubledchangesets = True
1737
1737
1738 def registersummarycallback(repo, otr, txnname=''):
1738 def registersummarycallback(repo, otr, txnname=''):
1739 """register a callback to issue a summary after the transaction is closed
1739 """register a callback to issue a summary after the transaction is closed
1740 """
1740 """
1741 def txmatch(sources):
1741 def txmatch(sources):
1742 return any(txnname.startswith(source) for source in sources)
1742 return any(txnname.startswith(source) for source in sources)
1743
1743
1744 categories = []
1744 categories = []
1745
1745
1746 def reportsummary(func):
1746 def reportsummary(func):
1747 """decorator for report callbacks."""
1747 """decorator for report callbacks."""
1748 # The repoview life cycle is shorter than the one of the actual
1748 # The repoview life cycle is shorter than the one of the actual
1749 # underlying repository. So the filtered object can die before the
1749 # underlying repository. So the filtered object can die before the
1750 # weakref is used leading to troubles. We keep a reference to the
1750 # weakref is used leading to troubles. We keep a reference to the
1751 # unfiltered object and restore the filtering when retrieving the
1751 # unfiltered object and restore the filtering when retrieving the
1752 # repository through the weakref.
1752 # repository through the weakref.
1753 filtername = repo.filtername
1753 filtername = repo.filtername
1754 reporef = weakref.ref(repo.unfiltered())
1754 reporef = weakref.ref(repo.unfiltered())
1755 def wrapped(tr):
1755 def wrapped(tr):
1756 repo = reporef()
1756 repo = reporef()
1757 if filtername:
1757 if filtername:
1758 repo = repo.filtered(filtername)
1758 repo = repo.filtered(filtername)
1759 func(repo, tr)
1759 func(repo, tr)
1760 newcat = '%02i-txnreport' % len(categories)
1760 newcat = '%02i-txnreport' % len(categories)
1761 otr.addpostclose(newcat, wrapped)
1761 otr.addpostclose(newcat, wrapped)
1762 categories.append(newcat)
1762 categories.append(newcat)
1763 return wrapped
1763 return wrapped
1764
1764
1765
1765
1766 @reportsummary
1766 @reportsummary
1767 def reportchangegroup(repo, tr):
1767 def reportchangegroup(repo, tr):
1768 cgchangesets = tr.changes.get('changegroup-count-changesets', 0)
1768 cgchangesets = tr.changes.get('changegroup-count-changesets', 0)
1769 cgrevisions = tr.changes.get('changegroup-count-revisions', 0)
1769 cgrevisions = tr.changes.get('changegroup-count-revisions', 0)
1770 cgfiles = tr.changes.get('changegroup-count-files', 0)
1770 cgfiles = tr.changes.get('changegroup-count-files', 0)
1771 cgheads = tr.changes.get('changegroup-count-heads', 0)
1771 cgheads = tr.changes.get('changegroup-count-heads', 0)
1772 if cgchangesets or cgrevisions or cgfiles:
1772 if cgchangesets or cgrevisions or cgfiles:
1773 htext = ""
1773 htext = ""
1774 if cgheads:
1774 if cgheads:
1775 htext = _(" (%+d heads)") % cgheads
1775 htext = _(" (%+d heads)") % cgheads
1776 msg = _("added %d changesets with %d changes to %d files%s\n")
1776 msg = _("added %d changesets with %d changes to %d files%s\n")
1777 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1777 repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext))
1778
1778
1779 if txmatch(_reportobsoletedsource):
1779 if txmatch(_reportobsoletedsource):
1780 @reportsummary
1780 @reportsummary
1781 def reportobsoleted(repo, tr):
1781 def reportobsoleted(repo, tr):
1782 obsoleted = obsutil.getobsoleted(repo, tr)
1782 obsoleted = obsutil.getobsoleted(repo, tr)
1783 newmarkers = len(tr.changes.get('obsmarkers', ()))
1783 newmarkers = len(tr.changes.get('obsmarkers', ()))
1784 if newmarkers:
1784 if newmarkers:
1785 repo.ui.status(_('%i new obsolescence markers\n') % newmarkers)
1785 repo.ui.status(_('%i new obsolescence markers\n') % newmarkers)
1786 if obsoleted:
1786 if obsoleted:
1787 repo.ui.status(_('obsoleted %i changesets\n')
1787 repo.ui.status(_('obsoleted %i changesets\n')
1788 % len(obsoleted))
1788 % len(obsoleted))
1789
1789
1790 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1790 if (obsolete.isenabled(repo, obsolete.createmarkersopt) and
1791 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1791 repo.ui.configbool('experimental', 'evolution.report-instabilities')):
1792 instabilitytypes = [
1792 instabilitytypes = [
1793 ('orphan', 'orphan'),
1793 ('orphan', 'orphan'),
1794 ('phase-divergent', 'phasedivergent'),
1794 ('phase-divergent', 'phasedivergent'),
1795 ('content-divergent', 'contentdivergent'),
1795 ('content-divergent', 'contentdivergent'),
1796 ]
1796 ]
1797
1797
1798 def getinstabilitycounts(repo):
1798 def getinstabilitycounts(repo):
1799 filtered = repo.changelog.filteredrevs
1799 filtered = repo.changelog.filteredrevs
1800 counts = {}
1800 counts = {}
1801 for instability, revset in instabilitytypes:
1801 for instability, revset in instabilitytypes:
1802 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1802 counts[instability] = len(set(obsolete.getrevs(repo, revset)) -
1803 filtered)
1803 filtered)
1804 return counts
1804 return counts
1805
1805
1806 oldinstabilitycounts = getinstabilitycounts(repo)
1806 oldinstabilitycounts = getinstabilitycounts(repo)
1807 @reportsummary
1807 @reportsummary
1808 def reportnewinstabilities(repo, tr):
1808 def reportnewinstabilities(repo, tr):
1809 newinstabilitycounts = getinstabilitycounts(repo)
1809 newinstabilitycounts = getinstabilitycounts(repo)
1810 for instability, revset in instabilitytypes:
1810 for instability, revset in instabilitytypes:
1811 delta = (newinstabilitycounts[instability] -
1811 delta = (newinstabilitycounts[instability] -
1812 oldinstabilitycounts[instability])
1812 oldinstabilitycounts[instability])
1813 msg = getinstabilitymessage(delta, instability)
1813 msg = getinstabilitymessage(delta, instability)
1814 if msg:
1814 if msg:
1815 repo.ui.warn(msg)
1815 repo.ui.warn(msg)
1816
1816
1817 if txmatch(_reportnewcssource):
1817 if txmatch(_reportnewcssource):
1818 @reportsummary
1818 @reportsummary
1819 def reportnewcs(repo, tr):
1819 def reportnewcs(repo, tr):
1820 """Report the range of new revisions pulled/unbundled."""
1820 """Report the range of new revisions pulled/unbundled."""
1821 origrepolen = tr.changes.get('origrepolen', len(repo))
1821 origrepolen = tr.changes.get('origrepolen', len(repo))
1822 unfi = repo.unfiltered()
1822 unfi = repo.unfiltered()
1823 if origrepolen >= len(unfi):
1823 if origrepolen >= len(unfi):
1824 return
1824 return
1825
1825
1826 # Compute the bounds of new visible revisions' range.
1826 # Compute the bounds of new visible revisions' range.
1827 revs = smartset.spanset(repo, start=origrepolen)
1827 revs = smartset.spanset(repo, start=origrepolen)
1828 if revs:
1828 if revs:
1829 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1829 minrev, maxrev = repo[revs.min()], repo[revs.max()]
1830
1830
1831 if minrev == maxrev:
1831 if minrev == maxrev:
1832 revrange = minrev
1832 revrange = minrev
1833 else:
1833 else:
1834 revrange = '%s:%s' % (minrev, maxrev)
1834 revrange = '%s:%s' % (minrev, maxrev)
1835 draft = len(repo.revs('%ld and draft()', revs))
1835 draft = len(repo.revs('%ld and draft()', revs))
1836 secret = len(repo.revs('%ld and secret()', revs))
1836 secret = len(repo.revs('%ld and secret()', revs))
1837 if not (draft or secret):
1837 if not (draft or secret):
1838 msg = _('new changesets %s\n') % revrange
1838 msg = _('new changesets %s\n') % revrange
1839 elif draft and secret:
1839 elif draft and secret:
1840 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1840 msg = _('new changesets %s (%d drafts, %d secrets)\n')
1841 msg %= (revrange, draft, secret)
1841 msg %= (revrange, draft, secret)
1842 elif draft:
1842 elif draft:
1843 msg = _('new changesets %s (%d drafts)\n')
1843 msg = _('new changesets %s (%d drafts)\n')
1844 msg %= (revrange, draft)
1844 msg %= (revrange, draft)
1845 elif secret:
1845 elif secret:
1846 msg = _('new changesets %s (%d secrets)\n')
1846 msg = _('new changesets %s (%d secrets)\n')
1847 msg %= (revrange, secret)
1847 msg %= (revrange, secret)
1848 else:
1848 else:
1849 errormsg = 'entered unreachable condition'
1849 errormsg = 'entered unreachable condition'
1850 raise error.ProgrammingError(errormsg)
1850 raise error.ProgrammingError(errormsg)
1851 repo.ui.status(msg)
1851 repo.ui.status(msg)
1852
1852
1853 # search new changesets directly pulled as obsolete
1853 # search new changesets directly pulled as obsolete
1854 duplicates = tr.changes.get('revduplicates', ())
1854 duplicates = tr.changes.get('revduplicates', ())
1855 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1855 obsadded = unfi.revs('(%d: + %ld) and obsolete()',
1856 origrepolen, duplicates)
1856 origrepolen, duplicates)
1857 cl = repo.changelog
1857 cl = repo.changelog
1858 extinctadded = [r for r in obsadded if r not in cl]
1858 extinctadded = [r for r in obsadded if r not in cl]
1859 if extinctadded:
1859 if extinctadded:
1860 # They are not just obsolete, but obsolete and invisible
1860 # They are not just obsolete, but obsolete and invisible
1861 # we call them "extinct" internally but the terms have not been
1861 # we call them "extinct" internally but the terms have not been
1862 # exposed to users.
1862 # exposed to users.
1863 msg = '(%d other changesets obsolete on arrival)\n'
1863 msg = '(%d other changesets obsolete on arrival)\n'
1864 repo.ui.status(msg % len(extinctadded))
1864 repo.ui.status(msg % len(extinctadded))
1865
1865
1866 @reportsummary
1866 @reportsummary
1867 def reportphasechanges(repo, tr):
1867 def reportphasechanges(repo, tr):
1868 """Report statistics of phase changes for changesets pre-existing
1868 """Report statistics of phase changes for changesets pre-existing
1869 pull/unbundle.
1869 pull/unbundle.
1870 """
1870 """
1871 origrepolen = tr.changes.get('origrepolen', len(repo))
1871 origrepolen = tr.changes.get('origrepolen', len(repo))
1872 phasetracking = tr.changes.get('phases', {})
1872 phasetracking = tr.changes.get('phases', {})
1873 if not phasetracking:
1873 if not phasetracking:
1874 return
1874 return
1875 published = [
1875 published = [
1876 rev for rev, (old, new) in phasetracking.iteritems()
1876 rev for rev, (old, new) in phasetracking.iteritems()
1877 if new == phases.public and rev < origrepolen
1877 if new == phases.public and rev < origrepolen
1878 ]
1878 ]
1879 if not published:
1879 if not published:
1880 return
1880 return
1881 repo.ui.status(_('%d local changesets published\n')
1881 repo.ui.status(_('%d local changesets published\n')
1882 % len(published))
1882 % len(published))
1883
1883
1884 def getinstabilitymessage(delta, instability):
1884 def getinstabilitymessage(delta, instability):
1885 """function to return the message to show warning about new instabilities
1885 """function to return the message to show warning about new instabilities
1886
1886
1887 exists as a separate function so that extension can wrap to show more
1887 exists as a separate function so that extension can wrap to show more
1888 information like how to fix instabilities"""
1888 information like how to fix instabilities"""
1889 if delta > 0:
1889 if delta > 0:
1890 return _('%i new %s changesets\n') % (delta, instability)
1890 return _('%i new %s changesets\n') % (delta, instability)
1891
1891
1892 def nodesummaries(repo, nodes, maxnumnodes=4):
1892 def nodesummaries(repo, nodes, maxnumnodes=4):
1893 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1893 if len(nodes) <= maxnumnodes or repo.ui.verbose:
1894 return ' '.join(short(h) for h in nodes)
1894 return ' '.join(short(h) for h in nodes)
1895 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1895 first = ' '.join(short(h) for h in nodes[:maxnumnodes])
1896 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1896 return _("%s and %d others") % (first, len(nodes) - maxnumnodes)
1897
1897
1898 def enforcesinglehead(repo, tr, desc):
1898 def enforcesinglehead(repo, tr, desc):
1899 """check that no named branch has multiple heads"""
1899 """check that no named branch has multiple heads"""
1900 if desc in ('strip', 'repair'):
1900 if desc in ('strip', 'repair'):
1901 # skip the logic during strip
1901 # skip the logic during strip
1902 return
1902 return
1903 visible = repo.filtered('visible')
1903 visible = repo.filtered('visible')
1904 # possible improvement: we could restrict the check to affected branch
1904 # possible improvement: we could restrict the check to affected branch
1905 for name, heads in visible.branchmap().iteritems():
1905 for name, heads in visible.branchmap().iteritems():
1906 if len(heads) > 1:
1906 if len(heads) > 1:
1907 msg = _('rejecting multiple heads on branch "%s"')
1907 msg = _('rejecting multiple heads on branch "%s"')
1908 msg %= name
1908 msg %= name
1909 hint = _('%d heads: %s')
1909 hint = _('%d heads: %s')
1910 hint %= (len(heads), nodesummaries(repo, heads))
1910 hint %= (len(heads), nodesummaries(repo, heads))
1911 raise error.Abort(msg, hint=hint)
1911 raise error.Abort(msg, hint=hint)
1912
1912
1913 def wrapconvertsink(sink):
1913 def wrapconvertsink(sink):
1914 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1914 """Allow extensions to wrap the sink returned by convcmd.convertsink()
1915 before it is used, whether or not the convert extension was formally loaded.
1915 before it is used, whether or not the convert extension was formally loaded.
1916 """
1916 """
1917 return sink
1917 return sink
1918
1918
1919 def unhidehashlikerevs(repo, specs, hiddentype):
1919 def unhidehashlikerevs(repo, specs, hiddentype):
1920 """parse the user specs and unhide changesets whose hash or revision number
1920 """parse the user specs and unhide changesets whose hash or revision number
1921 is passed.
1921 is passed.
1922
1922
1923 hiddentype can be: 1) 'warn': warn while unhiding changesets
1923 hiddentype can be: 1) 'warn': warn while unhiding changesets
1924 2) 'nowarn': don't warn while unhiding changesets
1924 2) 'nowarn': don't warn while unhiding changesets
1925
1925
1926 returns a repo object with the required changesets unhidden
1926 returns a repo object with the required changesets unhidden
1927 """
1927 """
1928 if not repo.filtername or not repo.ui.configbool('experimental',
1928 if not repo.filtername or not repo.ui.configbool('experimental',
1929 'directaccess'):
1929 'directaccess'):
1930 return repo
1930 return repo
1931
1931
1932 if repo.filtername not in ('visible', 'visible-hidden'):
1932 if repo.filtername not in ('visible', 'visible-hidden'):
1933 return repo
1933 return repo
1934
1934
1935 symbols = set()
1935 symbols = set()
1936 for spec in specs:
1936 for spec in specs:
1937 try:
1937 try:
1938 tree = revsetlang.parse(spec)
1938 tree = revsetlang.parse(spec)
1939 except error.ParseError: # will be reported by scmutil.revrange()
1939 except error.ParseError: # will be reported by scmutil.revrange()
1940 continue
1940 continue
1941
1941
1942 symbols.update(revsetlang.gethashlikesymbols(tree))
1942 symbols.update(revsetlang.gethashlikesymbols(tree))
1943
1943
1944 if not symbols:
1944 if not symbols:
1945 return repo
1945 return repo
1946
1946
1947 revs = _getrevsfromsymbols(repo, symbols)
1947 revs = _getrevsfromsymbols(repo, symbols)
1948
1948
1949 if not revs:
1949 if not revs:
1950 return repo
1950 return repo
1951
1951
1952 if hiddentype == 'warn':
1952 if hiddentype == 'warn':
1953 unfi = repo.unfiltered()
1953 unfi = repo.unfiltered()
1954 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1954 revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs])
1955 repo.ui.warn(_("warning: accessing hidden changesets for write "
1955 repo.ui.warn(_("warning: accessing hidden changesets for write "
1956 "operation: %s\n") % revstr)
1956 "operation: %s\n") % revstr)
1957
1957
1958 # we have to use new filtername to separate branch/tags cache until we can
1958 # we have to use new filtername to separate branch/tags cache until we can
1959 # disbale these cache when revisions are dynamically pinned.
1959 # disbale these cache when revisions are dynamically pinned.
1960 return repo.filtered('visible-hidden', revs)
1960 return repo.filtered('visible-hidden', revs)
1961
1961
1962 def _getrevsfromsymbols(repo, symbols):
1962 def _getrevsfromsymbols(repo, symbols):
1963 """parse the list of symbols and returns a set of revision numbers of hidden
1963 """parse the list of symbols and returns a set of revision numbers of hidden
1964 changesets present in symbols"""
1964 changesets present in symbols"""
1965 revs = set()
1965 revs = set()
1966 unfi = repo.unfiltered()
1966 unfi = repo.unfiltered()
1967 unficl = unfi.changelog
1967 unficl = unfi.changelog
1968 cl = repo.changelog
1968 cl = repo.changelog
1969 tiprev = len(unficl)
1969 tiprev = len(unficl)
1970 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1970 allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums')
1971 for s in symbols:
1971 for s in symbols:
1972 try:
1972 try:
1973 n = int(s)
1973 n = int(s)
1974 if n <= tiprev:
1974 if n <= tiprev:
1975 if not allowrevnums:
1975 if not allowrevnums:
1976 continue
1976 continue
1977 else:
1977 else:
1978 if n not in cl:
1978 if n not in cl:
1979 revs.add(n)
1979 revs.add(n)
1980 continue
1980 continue
1981 except ValueError:
1981 except ValueError:
1982 pass
1982 pass
1983
1983
1984 try:
1984 try:
1985 s = resolvehexnodeidprefix(unfi, s)
1985 s = resolvehexnodeidprefix(unfi, s)
1986 except (error.LookupError, error.WdirUnsupported):
1986 except (error.LookupError, error.WdirUnsupported):
1987 s = None
1987 s = None
1988
1988
1989 if s is not None:
1989 if s is not None:
1990 rev = unficl.rev(s)
1990 rev = unficl.rev(s)
1991 if rev not in cl:
1991 if rev not in cl:
1992 revs.add(rev)
1992 revs.add(rev)
1993
1993
1994 return revs
1994 return revs
1995
1995
1996 def bookmarkrevs(repo, mark):
1996 def bookmarkrevs(repo, mark):
1997 """
1997 """
1998 Select revisions reachable by a given bookmark
1998 Select revisions reachable by a given bookmark
1999 """
1999 """
2000 return repo.revs("ancestors(bookmark(%s)) - "
2000 return repo.revs("ancestors(bookmark(%s)) - "
2001 "ancestors(head() and not bookmark(%s)) - "
2001 "ancestors(head() and not bookmark(%s)) - "
2002 "ancestors(bookmark() and not bookmark(%s))",
2002 "ancestors(bookmark() and not bookmark(%s))",
2003 mark, mark, mark)
2003 mark, mark, mark)
2004
2004
2005 def computechangesetfilesadded(ctx):
2005 def computechangesetfilesadded(ctx):
2006 """return the list of files added in a changeset
2006 """return the list of files added in a changeset
2007 """
2007 """
2008 added = []
2008 added = []
2009 for f in ctx.files():
2009 for f in ctx.files():
2010 if not any(f in p for p in ctx.parents()):
2010 if not any(f in p for p in ctx.parents()):
2011 added.append(f)
2011 added.append(f)
2012 return added
2012 return added
2013
2013
2014 def computechangesetfilesremoved(ctx):
2014 def computechangesetfilesremoved(ctx):
2015 """return the list of files removed in a changeset
2015 """return the list of files removed in a changeset
2016 """
2016 """
2017 removed = []
2017 removed = []
2018 for f in ctx.files():
2018 for f in ctx.files():
2019 if f not in ctx:
2019 if f not in ctx:
2020 removed.append(f)
2020 removed.append(f)
2021 return removed
2021 return removed
General Comments 0
You need to be logged in to leave comments. Login now