Show More
@@ -1,393 +1,393 b'' | |||
|
1 | 1 | # Copyright 2009-2010 Gregory P. Ward |
|
2 | 2 | # Copyright 2009-2010 Intelerad Medical Systems Incorporated |
|
3 | 3 | # Copyright 2010-2011 Fog Creek Software |
|
4 | 4 | # Copyright 2010-2011 Unity Technologies |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | '''setup for largefiles repositories: reposetup''' |
|
10 | 10 | from __future__ import absolute_import |
|
11 | 11 | |
|
12 | 12 | import copy |
|
13 | 13 | |
|
14 | 14 | from mercurial.i18n import _ |
|
15 | 15 | |
|
16 | 16 | from mercurial import ( |
|
17 | 17 | error, |
|
18 | 18 | localrepo, |
|
19 | 19 | match as matchmod, |
|
20 | 20 | scmutil, |
|
21 | 21 | ) |
|
22 | 22 | |
|
23 | 23 | from . import ( |
|
24 | 24 | lfcommands, |
|
25 | 25 | lfutil, |
|
26 | 26 | ) |
|
27 | 27 | |
|
28 | 28 | def reposetup(ui, repo): |
|
29 | 29 | # wire repositories should be given new wireproto functions |
|
30 | 30 | # by "proto.wirereposetup()" via "hg.wirepeersetupfuncs" |
|
31 | 31 | if not repo.local(): |
|
32 | 32 | return |
|
33 | 33 | |
|
34 | 34 | class lfilesrepo(repo.__class__): |
|
35 | 35 | # the mark to examine whether "repo" object enables largefiles or not |
|
36 | 36 | _largefilesenabled = True |
|
37 | 37 | |
|
38 | 38 | lfstatus = False |
|
39 | 39 | def status_nolfiles(self, *args, **kwargs): |
|
40 | 40 | return super(lfilesrepo, self).status(*args, **kwargs) |
|
41 | 41 | |
|
42 | 42 | # When lfstatus is set, return a context that gives the names |
|
43 | 43 | # of largefiles instead of their corresponding standins and |
|
44 | 44 | # identifies the largefiles as always binary, regardless of |
|
45 | 45 | # their actual contents. |
|
46 | 46 | def __getitem__(self, changeid): |
|
47 | 47 | ctx = super(lfilesrepo, self).__getitem__(changeid) |
|
48 | 48 | if self.lfstatus: |
|
49 | 49 | class lfilesctx(ctx.__class__): |
|
50 | 50 | def files(self): |
|
51 | 51 | filenames = super(lfilesctx, self).files() |
|
52 | 52 | return [lfutil.splitstandin(f) or f for f in filenames] |
|
53 | 53 | def manifest(self): |
|
54 | 54 | man1 = super(lfilesctx, self).manifest() |
|
55 | 55 | class lfilesmanifest(man1.__class__): |
|
56 | 56 | def __contains__(self, filename): |
|
57 | 57 | orig = super(lfilesmanifest, self).__contains__ |
|
58 | 58 | return (orig(filename) or |
|
59 | 59 | orig(lfutil.standin(filename))) |
|
60 | 60 | man1.__class__ = lfilesmanifest |
|
61 | 61 | return man1 |
|
62 | 62 | def filectx(self, path, fileid=None, filelog=None): |
|
63 | 63 | orig = super(lfilesctx, self).filectx |
|
64 | 64 | try: |
|
65 | 65 | if filelog is not None: |
|
66 | 66 | result = orig(path, fileid, filelog) |
|
67 | 67 | else: |
|
68 | 68 | result = orig(path, fileid) |
|
69 | 69 | except error.LookupError: |
|
70 | 70 | # Adding a null character will cause Mercurial to |
|
71 | 71 | # identify this as a binary file. |
|
72 | 72 | if filelog is not None: |
|
73 | 73 | result = orig(lfutil.standin(path), fileid, |
|
74 | 74 | filelog) |
|
75 | 75 | else: |
|
76 | 76 | result = orig(lfutil.standin(path), fileid) |
|
77 | 77 | olddata = result.data |
|
78 | 78 | result.data = lambda: olddata() + '\0' |
|
79 | 79 | return result |
|
80 | 80 | ctx.__class__ = lfilesctx |
|
81 | 81 | return ctx |
|
82 | 82 | |
|
83 | 83 | # Figure out the status of big files and insert them into the |
|
84 | 84 | # appropriate list in the result. Also removes standin files |
|
85 | 85 | # from the listing. Revert to the original status if |
|
86 | 86 | # self.lfstatus is False. |
|
87 | 87 | # XXX large file status is buggy when used on repo proxy. |
|
88 | 88 | # XXX this needs to be investigated. |
|
89 | 89 | @localrepo.unfilteredmethod |
|
90 | 90 | def status(self, node1='.', node2=None, match=None, ignored=False, |
|
91 | 91 | clean=False, unknown=False, listsubrepos=False): |
|
92 | 92 | listignored, listclean, listunknown = ignored, clean, unknown |
|
93 | 93 | orig = super(lfilesrepo, self).status |
|
94 | 94 | if not self.lfstatus: |
|
95 | 95 | return orig(node1, node2, match, listignored, listclean, |
|
96 | 96 | listunknown, listsubrepos) |
|
97 | 97 | |
|
98 | 98 | # some calls in this function rely on the old version of status |
|
99 | 99 | self.lfstatus = False |
|
100 | 100 | ctx1 = self[node1] |
|
101 | 101 | ctx2 = self[node2] |
|
102 | 102 | working = ctx2.rev() is None |
|
103 | 103 | parentworking = working and ctx1 == self['.'] |
|
104 | 104 | |
|
105 | 105 | if match is None: |
|
106 | 106 | match = matchmod.always() |
|
107 | 107 | |
|
108 | 108 | wlock = None |
|
109 | 109 | try: |
|
110 | 110 | try: |
|
111 | 111 | # updating the dirstate is optional |
|
112 | 112 | # so we don't wait on the lock |
|
113 | 113 | wlock = self.wlock(False) |
|
114 | 114 | except error.LockError: |
|
115 | 115 | pass |
|
116 | 116 | |
|
117 | 117 | # First check if paths or patterns were specified on the |
|
118 | 118 | # command line. If there were, and they don't match any |
|
119 | 119 | # largefiles, we should just bail here and let super |
|
120 | 120 | # handle it -- thus gaining a big performance boost. |
|
121 | 121 | lfdirstate = lfutil.openlfdirstate(ui, self) |
|
122 | 122 | if not match.always(): |
|
123 | 123 | for f in lfdirstate: |
|
124 | 124 | if match(f): |
|
125 | 125 | break |
|
126 | 126 | else: |
|
127 | 127 | return orig(node1, node2, match, listignored, listclean, |
|
128 | 128 | listunknown, listsubrepos) |
|
129 | 129 | |
|
130 | 130 | # Create a copy of match that matches standins instead |
|
131 | 131 | # of largefiles. |
|
132 | 132 | def tostandins(files): |
|
133 | 133 | if not working: |
|
134 | 134 | return files |
|
135 | 135 | newfiles = [] |
|
136 | 136 | dirstate = self.dirstate |
|
137 | 137 | for f in files: |
|
138 | 138 | sf = lfutil.standin(f) |
|
139 | 139 | if sf in dirstate: |
|
140 | 140 | newfiles.append(sf) |
|
141 | 141 | elif dirstate.hasdir(sf): |
|
142 | 142 | # Directory entries could be regular or |
|
143 | 143 | # standin, check both |
|
144 | 144 | newfiles.extend((f, sf)) |
|
145 | 145 | else: |
|
146 | 146 | newfiles.append(f) |
|
147 | 147 | return newfiles |
|
148 | 148 | |
|
149 | 149 | m = copy.copy(match) |
|
150 | 150 | m._files = tostandins(m._files) |
|
151 | 151 | |
|
152 | 152 | result = orig(node1, node2, m, ignored, clean, unknown, |
|
153 | 153 | listsubrepos) |
|
154 | 154 | if working: |
|
155 | 155 | |
|
156 | 156 | def sfindirstate(f): |
|
157 | 157 | sf = lfutil.standin(f) |
|
158 | 158 | dirstate = self.dirstate |
|
159 | 159 | return sf in dirstate or dirstate.hasdir(sf) |
|
160 | 160 | |
|
161 | 161 | match._files = [f for f in match._files |
|
162 | 162 | if sfindirstate(f)] |
|
163 | 163 | # Don't waste time getting the ignored and unknown |
|
164 | 164 | # files from lfdirstate |
|
165 | 165 | unsure, s = lfdirstate.status(match, subrepos=[], |
|
166 | 166 | ignored=False, |
|
167 | 167 | clean=listclean, |
|
168 | 168 | unknown=False) |
|
169 | 169 | (modified, added, removed, deleted, clean) = ( |
|
170 | 170 | s.modified, s.added, s.removed, s.deleted, s.clean) |
|
171 | 171 | if parentworking: |
|
172 | 172 | for lfile in unsure: |
|
173 | 173 | standin = lfutil.standin(lfile) |
|
174 | 174 | if standin not in ctx1: |
|
175 | 175 | # from second parent |
|
176 | 176 | modified.append(lfile) |
|
177 | 177 | elif (lfutil.readasstandin(ctx1[standin]) |
|
178 | 178 | != lfutil.hashfile(self.wjoin(lfile))): |
|
179 | 179 | modified.append(lfile) |
|
180 | 180 | else: |
|
181 | 181 | if listclean: |
|
182 | 182 | clean.append(lfile) |
|
183 | 183 | lfdirstate.normal(lfile) |
|
184 | 184 | else: |
|
185 | 185 | tocheck = unsure + modified + added + clean |
|
186 | 186 | modified, added, clean = [], [], [] |
|
187 | 187 | checkexec = self.dirstate._checkexec |
|
188 | 188 | |
|
189 | 189 | for lfile in tocheck: |
|
190 | 190 | standin = lfutil.standin(lfile) |
|
191 | 191 | if standin in ctx1: |
|
192 | 192 | abslfile = self.wjoin(lfile) |
|
193 | 193 | if ((lfutil.readasstandin(ctx1[standin]) != |
|
194 | 194 | lfutil.hashfile(abslfile)) or |
|
195 | 195 | (checkexec and |
|
196 | 196 | ('x' in ctx1.flags(standin)) != |
|
197 | 197 | bool(lfutil.getexecutable(abslfile)))): |
|
198 | 198 | modified.append(lfile) |
|
199 | 199 | elif listclean: |
|
200 | 200 | clean.append(lfile) |
|
201 | 201 | else: |
|
202 | 202 | added.append(lfile) |
|
203 | 203 | |
|
204 | 204 | # at this point, 'removed' contains largefiles |
|
205 | 205 | # marked as 'R' in the working context. |
|
206 | 206 | # then, largefiles not managed also in the target |
|
207 | 207 | # context should be excluded from 'removed'. |
|
208 | 208 | removed = [lfile for lfile in removed |
|
209 | 209 | if lfutil.standin(lfile) in ctx1] |
|
210 | 210 | |
|
211 | 211 | # Standins no longer found in lfdirstate have been deleted |
|
212 | 212 | for standin in ctx1.walk(lfutil.getstandinmatcher(self)): |
|
213 | 213 | lfile = lfutil.splitstandin(standin) |
|
214 | 214 | if not match(lfile): |
|
215 | 215 | continue |
|
216 | 216 | if lfile not in lfdirstate: |
|
217 | 217 | deleted.append(lfile) |
|
218 | 218 | # Sync "largefile has been removed" back to the |
|
219 | 219 | # standin. Removing a file as a side effect of |
|
220 | 220 | # running status is gross, but the alternatives (if |
|
221 | 221 | # any) are worse. |
|
222 | 222 | self.wvfs.unlinkpath(standin, ignoremissing=True) |
|
223 | 223 | |
|
224 | 224 | # Filter result lists |
|
225 | 225 | result = list(result) |
|
226 | 226 | |
|
227 | 227 | # Largefiles are not really removed when they're |
|
228 | 228 | # still in the normal dirstate. Likewise, normal |
|
229 | 229 | # files are not really removed if they are still in |
|
230 | 230 | # lfdirstate. This happens in merges where files |
|
231 | 231 | # change type. |
|
232 | 232 | removed = [f for f in removed |
|
233 | 233 | if f not in self.dirstate] |
|
234 | 234 | result[2] = [f for f in result[2] |
|
235 | 235 | if f not in lfdirstate] |
|
236 | 236 | |
|
237 |
lfiles = set(lfdirstate |
|
|
237 | lfiles = set(lfdirstate) | |
|
238 | 238 | # Unknown files |
|
239 | 239 | result[4] = set(result[4]).difference(lfiles) |
|
240 | 240 | # Ignored files |
|
241 | 241 | result[5] = set(result[5]).difference(lfiles) |
|
242 | 242 | # combine normal files and largefiles |
|
243 | 243 | normals = [[fn for fn in filelist |
|
244 | 244 | if not lfutil.isstandin(fn)] |
|
245 | 245 | for filelist in result] |
|
246 | 246 | lfstatus = (modified, added, removed, deleted, [], [], |
|
247 | 247 | clean) |
|
248 | 248 | result = [sorted(list1 + list2) |
|
249 | 249 | for (list1, list2) in zip(normals, lfstatus)] |
|
250 | 250 | else: # not against working directory |
|
251 | 251 | result = [[lfutil.splitstandin(f) or f for f in items] |
|
252 | 252 | for items in result] |
|
253 | 253 | |
|
254 | 254 | if wlock: |
|
255 | 255 | lfdirstate.write() |
|
256 | 256 | |
|
257 | 257 | finally: |
|
258 | 258 | if wlock: |
|
259 | 259 | wlock.release() |
|
260 | 260 | |
|
261 | 261 | self.lfstatus = True |
|
262 | 262 | return scmutil.status(*result) |
|
263 | 263 | |
|
264 | 264 | def commitctx(self, ctx, *args, **kwargs): |
|
265 | 265 | node = super(lfilesrepo, self).commitctx(ctx, *args, **kwargs) |
|
266 | 266 | class lfilesctx(ctx.__class__): |
|
267 | 267 | def markcommitted(self, node): |
|
268 | 268 | orig = super(lfilesctx, self).markcommitted |
|
269 | 269 | return lfutil.markcommitted(orig, self, node) |
|
270 | 270 | ctx.__class__ = lfilesctx |
|
271 | 271 | return node |
|
272 | 272 | |
|
273 | 273 | # Before commit, largefile standins have not had their |
|
274 | 274 | # contents updated to reflect the hash of their largefile. |
|
275 | 275 | # Do that here. |
|
276 | 276 | def commit(self, text="", user=None, date=None, match=None, |
|
277 | 277 | force=False, editor=False, extra=None): |
|
278 | 278 | if extra is None: |
|
279 | 279 | extra = {} |
|
280 | 280 | orig = super(lfilesrepo, self).commit |
|
281 | 281 | |
|
282 | 282 | with self.wlock(): |
|
283 | 283 | lfcommithook = self._lfcommithooks[-1] |
|
284 | 284 | match = lfcommithook(self, match) |
|
285 | 285 | result = orig(text=text, user=user, date=date, match=match, |
|
286 | 286 | force=force, editor=editor, extra=extra) |
|
287 | 287 | return result |
|
288 | 288 | |
|
289 | 289 | def push(self, remote, force=False, revs=None, newbranch=False): |
|
290 | 290 | if remote.local(): |
|
291 | 291 | missing = set(self.requirements) - remote.local().supported |
|
292 | 292 | if missing: |
|
293 | 293 | msg = _("required features are not" |
|
294 | 294 | " supported in the destination:" |
|
295 | 295 | " %s") % (', '.join(sorted(missing))) |
|
296 | 296 | raise error.Abort(msg) |
|
297 | 297 | return super(lfilesrepo, self).push(remote, force=force, revs=revs, |
|
298 | 298 | newbranch=newbranch) |
|
299 | 299 | |
|
300 | 300 | # TODO: _subdirlfs should be moved into "lfutil.py", because |
|
301 | 301 | # it is referred only from "lfutil.updatestandinsbymatch" |
|
302 | 302 | def _subdirlfs(self, files, lfiles): |
|
303 | 303 | ''' |
|
304 | 304 | Adjust matched file list |
|
305 | 305 | If we pass a directory to commit whose only committable files |
|
306 | 306 | are largefiles, the core commit code aborts before finding |
|
307 | 307 | the largefiles. |
|
308 | 308 | So we do the following: |
|
309 | 309 | For directories that only have largefiles as matches, |
|
310 | 310 | we explicitly add the largefiles to the match list and remove |
|
311 | 311 | the directory. |
|
312 | 312 | In other cases, we leave the match list unmodified. |
|
313 | 313 | ''' |
|
314 | 314 | actualfiles = [] |
|
315 | 315 | dirs = [] |
|
316 | 316 | regulars = [] |
|
317 | 317 | |
|
318 | 318 | for f in files: |
|
319 | 319 | if lfutil.isstandin(f + '/'): |
|
320 | 320 | raise error.Abort( |
|
321 | 321 | _('file "%s" is a largefile standin') % f, |
|
322 | 322 | hint=('commit the largefile itself instead')) |
|
323 | 323 | # Scan directories |
|
324 | 324 | if self.wvfs.isdir(f): |
|
325 | 325 | dirs.append(f) |
|
326 | 326 | else: |
|
327 | 327 | regulars.append(f) |
|
328 | 328 | |
|
329 | 329 | for f in dirs: |
|
330 | 330 | matcheddir = False |
|
331 | 331 | d = self.dirstate.normalize(f) + '/' |
|
332 | 332 | # Check for matched normal files |
|
333 | 333 | for mf in regulars: |
|
334 | 334 | if self.dirstate.normalize(mf).startswith(d): |
|
335 | 335 | actualfiles.append(f) |
|
336 | 336 | matcheddir = True |
|
337 | 337 | break |
|
338 | 338 | if not matcheddir: |
|
339 | 339 | # If no normal match, manually append |
|
340 | 340 | # any matching largefiles |
|
341 | 341 | for lf in lfiles: |
|
342 | 342 | if self.dirstate.normalize(lf).startswith(d): |
|
343 | 343 | actualfiles.append(lf) |
|
344 | 344 | if not matcheddir: |
|
345 | 345 | # There may still be normal files in the dir, so |
|
346 | 346 | # add a directory to the list, which |
|
347 | 347 | # forces status/dirstate to walk all files and |
|
348 | 348 | # call the match function on the matcher, even |
|
349 | 349 | # on case sensitive filesystems. |
|
350 | 350 | actualfiles.append('.') |
|
351 | 351 | matcheddir = True |
|
352 | 352 | # Nothing in dir, so readd it |
|
353 | 353 | # and let commit reject it |
|
354 | 354 | if not matcheddir: |
|
355 | 355 | actualfiles.append(f) |
|
356 | 356 | |
|
357 | 357 | # Always add normal files |
|
358 | 358 | actualfiles += regulars |
|
359 | 359 | return actualfiles |
|
360 | 360 | |
|
361 | 361 | repo.__class__ = lfilesrepo |
|
362 | 362 | |
|
363 | 363 | # stack of hooks being executed before committing. |
|
364 | 364 | # only last element ("_lfcommithooks[-1]") is used for each committing. |
|
365 | 365 | repo._lfcommithooks = [lfutil.updatestandinsbymatch] |
|
366 | 366 | |
|
367 | 367 | # Stack of status writer functions taking "*msg, **opts" arguments |
|
368 | 368 | # like "ui.status()". Only last element ("_lfstatuswriters[-1]") |
|
369 | 369 | # is used to write status out. |
|
370 | 370 | repo._lfstatuswriters = [ui.status] |
|
371 | 371 | |
|
372 | 372 | def prepushoutgoinghook(pushop): |
|
373 | 373 | """Push largefiles for pushop before pushing revisions.""" |
|
374 | 374 | lfrevs = pushop.lfrevs |
|
375 | 375 | if lfrevs is None: |
|
376 | 376 | lfrevs = pushop.outgoing.missing |
|
377 | 377 | if lfrevs: |
|
378 | 378 | toupload = set() |
|
379 | 379 | addfunc = lambda fn, lfhash: toupload.add(lfhash) |
|
380 | 380 | lfutil.getlfilestoupload(pushop.repo, lfrevs, |
|
381 | 381 | addfunc) |
|
382 | 382 | lfcommands.uploadlfiles(ui, pushop.repo, pushop.remote, toupload) |
|
383 | 383 | repo.prepushoutgoinghooks.add("largefiles", prepushoutgoinghook) |
|
384 | 384 | |
|
385 | 385 | def checkrequireslfiles(ui, repo, **kwargs): |
|
386 | 386 | if 'largefiles' not in repo.requirements and any( |
|
387 | 387 | lfutil.shortname+'/' in f[0] for f in repo.store.datafiles()): |
|
388 | 388 | repo.requirements.add('largefiles') |
|
389 | 389 | repo._writerequirements() |
|
390 | 390 | |
|
391 | 391 | ui.setconfig('hooks', 'changegroup.lfiles', checkrequireslfiles, |
|
392 | 392 | 'largefiles') |
|
393 | 393 | ui.setconfig('hooks', 'commit.lfiles', checkrequireslfiles, 'largefiles') |
@@ -1,3499 +1,3499 b'' | |||
|
1 | 1 | # debugcommands.py - command processing for debug* commands |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2005-2016 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import codecs |
|
11 | 11 | import collections |
|
12 | 12 | import difflib |
|
13 | 13 | import errno |
|
14 | 14 | import operator |
|
15 | 15 | import os |
|
16 | 16 | import random |
|
17 | 17 | import re |
|
18 | 18 | import socket |
|
19 | 19 | import ssl |
|
20 | 20 | import stat |
|
21 | 21 | import string |
|
22 | 22 | import subprocess |
|
23 | 23 | import sys |
|
24 | 24 | import time |
|
25 | 25 | |
|
26 | 26 | from .i18n import _ |
|
27 | 27 | from .node import ( |
|
28 | 28 | bin, |
|
29 | 29 | hex, |
|
30 | 30 | nullhex, |
|
31 | 31 | nullid, |
|
32 | 32 | nullrev, |
|
33 | 33 | short, |
|
34 | 34 | ) |
|
35 | 35 | from . import ( |
|
36 | 36 | bundle2, |
|
37 | 37 | changegroup, |
|
38 | 38 | cmdutil, |
|
39 | 39 | color, |
|
40 | 40 | context, |
|
41 | 41 | copies, |
|
42 | 42 | dagparser, |
|
43 | 43 | encoding, |
|
44 | 44 | error, |
|
45 | 45 | exchange, |
|
46 | 46 | extensions, |
|
47 | 47 | filemerge, |
|
48 | 48 | filesetlang, |
|
49 | 49 | formatter, |
|
50 | 50 | hg, |
|
51 | 51 | httppeer, |
|
52 | 52 | localrepo, |
|
53 | 53 | lock as lockmod, |
|
54 | 54 | logcmdutil, |
|
55 | 55 | merge as mergemod, |
|
56 | 56 | obsolete, |
|
57 | 57 | obsutil, |
|
58 | 58 | phases, |
|
59 | 59 | policy, |
|
60 | 60 | pvec, |
|
61 | 61 | pycompat, |
|
62 | 62 | registrar, |
|
63 | 63 | repair, |
|
64 | 64 | revlog, |
|
65 | 65 | revset, |
|
66 | 66 | revsetlang, |
|
67 | 67 | scmutil, |
|
68 | 68 | setdiscovery, |
|
69 | 69 | simplemerge, |
|
70 | 70 | sshpeer, |
|
71 | 71 | sslutil, |
|
72 | 72 | streamclone, |
|
73 | 73 | templater, |
|
74 | 74 | treediscovery, |
|
75 | 75 | upgrade, |
|
76 | 76 | url as urlmod, |
|
77 | 77 | util, |
|
78 | 78 | vfs as vfsmod, |
|
79 | 79 | wireprotoframing, |
|
80 | 80 | wireprotoserver, |
|
81 | 81 | wireprotov2peer, |
|
82 | 82 | ) |
|
83 | 83 | from .utils import ( |
|
84 | 84 | cborutil, |
|
85 | 85 | compression, |
|
86 | 86 | dateutil, |
|
87 | 87 | procutil, |
|
88 | 88 | stringutil, |
|
89 | 89 | ) |
|
90 | 90 | |
|
91 | 91 | from .revlogutils import ( |
|
92 | 92 | deltas as deltautil |
|
93 | 93 | ) |
|
94 | 94 | |
|
95 | 95 | release = lockmod.release |
|
96 | 96 | |
|
97 | 97 | command = registrar.command() |
|
98 | 98 | |
|
99 | 99 | @command('debugancestor', [], _('[INDEX] REV1 REV2'), optionalrepo=True) |
|
100 | 100 | def debugancestor(ui, repo, *args): |
|
101 | 101 | """find the ancestor revision of two revisions in a given index""" |
|
102 | 102 | if len(args) == 3: |
|
103 | 103 | index, rev1, rev2 = args |
|
104 | 104 | r = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), index) |
|
105 | 105 | lookup = r.lookup |
|
106 | 106 | elif len(args) == 2: |
|
107 | 107 | if not repo: |
|
108 | 108 | raise error.Abort(_('there is no Mercurial repository here ' |
|
109 | 109 | '(.hg not found)')) |
|
110 | 110 | rev1, rev2 = args |
|
111 | 111 | r = repo.changelog |
|
112 | 112 | lookup = repo.lookup |
|
113 | 113 | else: |
|
114 | 114 | raise error.Abort(_('either two or three arguments required')) |
|
115 | 115 | a = r.ancestor(lookup(rev1), lookup(rev2)) |
|
116 | 116 | ui.write('%d:%s\n' % (r.rev(a), hex(a))) |
|
117 | 117 | |
|
118 | 118 | @command('debugapplystreamclonebundle', [], 'FILE') |
|
119 | 119 | def debugapplystreamclonebundle(ui, repo, fname): |
|
120 | 120 | """apply a stream clone bundle file""" |
|
121 | 121 | f = hg.openpath(ui, fname) |
|
122 | 122 | gen = exchange.readbundle(ui, f, fname) |
|
123 | 123 | gen.apply(repo) |
|
124 | 124 | |
|
125 | 125 | @command('debugbuilddag', |
|
126 | 126 | [('m', 'mergeable-file', None, _('add single file mergeable changes')), |
|
127 | 127 | ('o', 'overwritten-file', None, _('add single file all revs overwrite')), |
|
128 | 128 | ('n', 'new-file', None, _('add new file at each rev'))], |
|
129 | 129 | _('[OPTION]... [TEXT]')) |
|
130 | 130 | def debugbuilddag(ui, repo, text=None, |
|
131 | 131 | mergeable_file=False, |
|
132 | 132 | overwritten_file=False, |
|
133 | 133 | new_file=False): |
|
134 | 134 | """builds a repo with a given DAG from scratch in the current empty repo |
|
135 | 135 | |
|
136 | 136 | The description of the DAG is read from stdin if not given on the |
|
137 | 137 | command line. |
|
138 | 138 | |
|
139 | 139 | Elements: |
|
140 | 140 | |
|
141 | 141 | - "+n" is a linear run of n nodes based on the current default parent |
|
142 | 142 | - "." is a single node based on the current default parent |
|
143 | 143 | - "$" resets the default parent to null (implied at the start); |
|
144 | 144 | otherwise the default parent is always the last node created |
|
145 | 145 | - "<p" sets the default parent to the backref p |
|
146 | 146 | - "*p" is a fork at parent p, which is a backref |
|
147 | 147 | - "*p1/p2" is a merge of parents p1 and p2, which are backrefs |
|
148 | 148 | - "/p2" is a merge of the preceding node and p2 |
|
149 | 149 | - ":tag" defines a local tag for the preceding node |
|
150 | 150 | - "@branch" sets the named branch for subsequent nodes |
|
151 | 151 | - "#...\\n" is a comment up to the end of the line |
|
152 | 152 | |
|
153 | 153 | Whitespace between the above elements is ignored. |
|
154 | 154 | |
|
155 | 155 | A backref is either |
|
156 | 156 | |
|
157 | 157 | - a number n, which references the node curr-n, where curr is the current |
|
158 | 158 | node, or |
|
159 | 159 | - the name of a local tag you placed earlier using ":tag", or |
|
160 | 160 | - empty to denote the default parent. |
|
161 | 161 | |
|
162 | 162 | All string valued-elements are either strictly alphanumeric, or must |
|
163 | 163 | be enclosed in double quotes ("..."), with "\\" as escape character. |
|
164 | 164 | """ |
|
165 | 165 | |
|
166 | 166 | if text is None: |
|
167 | 167 | ui.status(_("reading DAG from stdin\n")) |
|
168 | 168 | text = ui.fin.read() |
|
169 | 169 | |
|
170 | 170 | cl = repo.changelog |
|
171 | 171 | if len(cl) > 0: |
|
172 | 172 | raise error.Abort(_('repository is not empty')) |
|
173 | 173 | |
|
174 | 174 | # determine number of revs in DAG |
|
175 | 175 | total = 0 |
|
176 | 176 | for type, data in dagparser.parsedag(text): |
|
177 | 177 | if type == 'n': |
|
178 | 178 | total += 1 |
|
179 | 179 | |
|
180 | 180 | if mergeable_file: |
|
181 | 181 | linesperrev = 2 |
|
182 | 182 | # make a file with k lines per rev |
|
183 | 183 | initialmergedlines = ['%d' % i |
|
184 | 184 | for i in pycompat.xrange(0, total * linesperrev)] |
|
185 | 185 | initialmergedlines.append("") |
|
186 | 186 | |
|
187 | 187 | tags = [] |
|
188 | 188 | progress = ui.makeprogress(_('building'), unit=_('revisions'), |
|
189 | 189 | total=total) |
|
190 | 190 | with progress, repo.wlock(), repo.lock(), repo.transaction("builddag"): |
|
191 | 191 | at = -1 |
|
192 | 192 | atbranch = 'default' |
|
193 | 193 | nodeids = [] |
|
194 | 194 | id = 0 |
|
195 | 195 | progress.update(id) |
|
196 | 196 | for type, data in dagparser.parsedag(text): |
|
197 | 197 | if type == 'n': |
|
198 | 198 | ui.note(('node %s\n' % pycompat.bytestr(data))) |
|
199 | 199 | id, ps = data |
|
200 | 200 | |
|
201 | 201 | files = [] |
|
202 | 202 | filecontent = {} |
|
203 | 203 | |
|
204 | 204 | p2 = None |
|
205 | 205 | if mergeable_file: |
|
206 | 206 | fn = "mf" |
|
207 | 207 | p1 = repo[ps[0]] |
|
208 | 208 | if len(ps) > 1: |
|
209 | 209 | p2 = repo[ps[1]] |
|
210 | 210 | pa = p1.ancestor(p2) |
|
211 | 211 | base, local, other = [x[fn].data() for x in (pa, p1, |
|
212 | 212 | p2)] |
|
213 | 213 | m3 = simplemerge.Merge3Text(base, local, other) |
|
214 | 214 | ml = [l.strip() for l in m3.merge_lines()] |
|
215 | 215 | ml.append("") |
|
216 | 216 | elif at > 0: |
|
217 | 217 | ml = p1[fn].data().split("\n") |
|
218 | 218 | else: |
|
219 | 219 | ml = initialmergedlines |
|
220 | 220 | ml[id * linesperrev] += " r%i" % id |
|
221 | 221 | mergedtext = "\n".join(ml) |
|
222 | 222 | files.append(fn) |
|
223 | 223 | filecontent[fn] = mergedtext |
|
224 | 224 | |
|
225 | 225 | if overwritten_file: |
|
226 | 226 | fn = "of" |
|
227 | 227 | files.append(fn) |
|
228 | 228 | filecontent[fn] = "r%i\n" % id |
|
229 | 229 | |
|
230 | 230 | if new_file: |
|
231 | 231 | fn = "nf%i" % id |
|
232 | 232 | files.append(fn) |
|
233 | 233 | filecontent[fn] = "r%i\n" % id |
|
234 | 234 | if len(ps) > 1: |
|
235 | 235 | if not p2: |
|
236 | 236 | p2 = repo[ps[1]] |
|
237 | 237 | for fn in p2: |
|
238 | 238 | if fn.startswith("nf"): |
|
239 | 239 | files.append(fn) |
|
240 | 240 | filecontent[fn] = p2[fn].data() |
|
241 | 241 | |
|
242 | 242 | def fctxfn(repo, cx, path): |
|
243 | 243 | if path in filecontent: |
|
244 | 244 | return context.memfilectx(repo, cx, path, |
|
245 | 245 | filecontent[path]) |
|
246 | 246 | return None |
|
247 | 247 | |
|
248 | 248 | if len(ps) == 0 or ps[0] < 0: |
|
249 | 249 | pars = [None, None] |
|
250 | 250 | elif len(ps) == 1: |
|
251 | 251 | pars = [nodeids[ps[0]], None] |
|
252 | 252 | else: |
|
253 | 253 | pars = [nodeids[p] for p in ps] |
|
254 | 254 | cx = context.memctx(repo, pars, "r%i" % id, files, fctxfn, |
|
255 | 255 | date=(id, 0), |
|
256 | 256 | user="debugbuilddag", |
|
257 | 257 | extra={'branch': atbranch}) |
|
258 | 258 | nodeid = repo.commitctx(cx) |
|
259 | 259 | nodeids.append(nodeid) |
|
260 | 260 | at = id |
|
261 | 261 | elif type == 'l': |
|
262 | 262 | id, name = data |
|
263 | 263 | ui.note(('tag %s\n' % name)) |
|
264 | 264 | tags.append("%s %s\n" % (hex(repo.changelog.node(id)), name)) |
|
265 | 265 | elif type == 'a': |
|
266 | 266 | ui.note(('branch %s\n' % data)) |
|
267 | 267 | atbranch = data |
|
268 | 268 | progress.update(id) |
|
269 | 269 | |
|
270 | 270 | if tags: |
|
271 | 271 | repo.vfs.write("localtags", "".join(tags)) |
|
272 | 272 | |
|
273 | 273 | def _debugchangegroup(ui, gen, all=None, indent=0, **opts): |
|
274 | 274 | indent_string = ' ' * indent |
|
275 | 275 | if all: |
|
276 | 276 | ui.write(("%sformat: id, p1, p2, cset, delta base, len(delta)\n") |
|
277 | 277 | % indent_string) |
|
278 | 278 | |
|
279 | 279 | def showchunks(named): |
|
280 | 280 | ui.write("\n%s%s\n" % (indent_string, named)) |
|
281 | 281 | for deltadata in gen.deltaiter(): |
|
282 | 282 | node, p1, p2, cs, deltabase, delta, flags = deltadata |
|
283 | 283 | ui.write("%s%s %s %s %s %s %d\n" % |
|
284 | 284 | (indent_string, hex(node), hex(p1), hex(p2), |
|
285 | 285 | hex(cs), hex(deltabase), len(delta))) |
|
286 | 286 | |
|
287 | 287 | chunkdata = gen.changelogheader() |
|
288 | 288 | showchunks("changelog") |
|
289 | 289 | chunkdata = gen.manifestheader() |
|
290 | 290 | showchunks("manifest") |
|
291 | 291 | for chunkdata in iter(gen.filelogheader, {}): |
|
292 | 292 | fname = chunkdata['filename'] |
|
293 | 293 | showchunks(fname) |
|
294 | 294 | else: |
|
295 | 295 | if isinstance(gen, bundle2.unbundle20): |
|
296 | 296 | raise error.Abort(_('use debugbundle2 for this file')) |
|
297 | 297 | chunkdata = gen.changelogheader() |
|
298 | 298 | for deltadata in gen.deltaiter(): |
|
299 | 299 | node, p1, p2, cs, deltabase, delta, flags = deltadata |
|
300 | 300 | ui.write("%s%s\n" % (indent_string, hex(node))) |
|
301 | 301 | |
|
302 | 302 | def _debugobsmarkers(ui, part, indent=0, **opts): |
|
303 | 303 | """display version and markers contained in 'data'""" |
|
304 | 304 | opts = pycompat.byteskwargs(opts) |
|
305 | 305 | data = part.read() |
|
306 | 306 | indent_string = ' ' * indent |
|
307 | 307 | try: |
|
308 | 308 | version, markers = obsolete._readmarkers(data) |
|
309 | 309 | except error.UnknownVersion as exc: |
|
310 | 310 | msg = "%sunsupported version: %s (%d bytes)\n" |
|
311 | 311 | msg %= indent_string, exc.version, len(data) |
|
312 | 312 | ui.write(msg) |
|
313 | 313 | else: |
|
314 | 314 | msg = "%sversion: %d (%d bytes)\n" |
|
315 | 315 | msg %= indent_string, version, len(data) |
|
316 | 316 | ui.write(msg) |
|
317 | 317 | fm = ui.formatter('debugobsolete', opts) |
|
318 | 318 | for rawmarker in sorted(markers): |
|
319 | 319 | m = obsutil.marker(None, rawmarker) |
|
320 | 320 | fm.startitem() |
|
321 | 321 | fm.plain(indent_string) |
|
322 | 322 | cmdutil.showmarker(fm, m) |
|
323 | 323 | fm.end() |
|
324 | 324 | |
|
325 | 325 | def _debugphaseheads(ui, data, indent=0): |
|
326 | 326 | """display version and markers contained in 'data'""" |
|
327 | 327 | indent_string = ' ' * indent |
|
328 | 328 | headsbyphase = phases.binarydecode(data) |
|
329 | 329 | for phase in phases.allphases: |
|
330 | 330 | for head in headsbyphase[phase]: |
|
331 | 331 | ui.write(indent_string) |
|
332 | 332 | ui.write('%s %s\n' % (hex(head), phases.phasenames[phase])) |
|
333 | 333 | |
|
334 | 334 | def _quasirepr(thing): |
|
335 | 335 | if isinstance(thing, (dict, util.sortdict, collections.OrderedDict)): |
|
336 | 336 | return '{%s}' % ( |
|
337 | 337 | b', '.join(b'%s: %s' % (k, thing[k]) for k in sorted(thing))) |
|
338 | 338 | return pycompat.bytestr(repr(thing)) |
|
339 | 339 | |
|
340 | 340 | def _debugbundle2(ui, gen, all=None, **opts): |
|
341 | 341 | """lists the contents of a bundle2""" |
|
342 | 342 | if not isinstance(gen, bundle2.unbundle20): |
|
343 | 343 | raise error.Abort(_('not a bundle2 file')) |
|
344 | 344 | ui.write(('Stream params: %s\n' % _quasirepr(gen.params))) |
|
345 | 345 | parttypes = opts.get(r'part_type', []) |
|
346 | 346 | for part in gen.iterparts(): |
|
347 | 347 | if parttypes and part.type not in parttypes: |
|
348 | 348 | continue |
|
349 | 349 | msg = '%s -- %s (mandatory: %r)\n' |
|
350 | 350 | ui.write((msg % (part.type, _quasirepr(part.params), part.mandatory))) |
|
351 | 351 | if part.type == 'changegroup': |
|
352 | 352 | version = part.params.get('version', '01') |
|
353 | 353 | cg = changegroup.getunbundler(version, part, 'UN') |
|
354 | 354 | if not ui.quiet: |
|
355 | 355 | _debugchangegroup(ui, cg, all=all, indent=4, **opts) |
|
356 | 356 | if part.type == 'obsmarkers': |
|
357 | 357 | if not ui.quiet: |
|
358 | 358 | _debugobsmarkers(ui, part, indent=4, **opts) |
|
359 | 359 | if part.type == 'phase-heads': |
|
360 | 360 | if not ui.quiet: |
|
361 | 361 | _debugphaseheads(ui, part, indent=4) |
|
362 | 362 | |
|
363 | 363 | @command('debugbundle', |
|
364 | 364 | [('a', 'all', None, _('show all details')), |
|
365 | 365 | ('', 'part-type', [], _('show only the named part type')), |
|
366 | 366 | ('', 'spec', None, _('print the bundlespec of the bundle'))], |
|
367 | 367 | _('FILE'), |
|
368 | 368 | norepo=True) |
|
369 | 369 | def debugbundle(ui, bundlepath, all=None, spec=None, **opts): |
|
370 | 370 | """lists the contents of a bundle""" |
|
371 | 371 | with hg.openpath(ui, bundlepath) as f: |
|
372 | 372 | if spec: |
|
373 | 373 | spec = exchange.getbundlespec(ui, f) |
|
374 | 374 | ui.write('%s\n' % spec) |
|
375 | 375 | return |
|
376 | 376 | |
|
377 | 377 | gen = exchange.readbundle(ui, f, bundlepath) |
|
378 | 378 | if isinstance(gen, bundle2.unbundle20): |
|
379 | 379 | return _debugbundle2(ui, gen, all=all, **opts) |
|
380 | 380 | _debugchangegroup(ui, gen, all=all, **opts) |
|
381 | 381 | |
|
382 | 382 | @command('debugcapabilities', |
|
383 | 383 | [], _('PATH'), |
|
384 | 384 | norepo=True) |
|
385 | 385 | def debugcapabilities(ui, path, **opts): |
|
386 | 386 | """lists the capabilities of a remote peer""" |
|
387 | 387 | opts = pycompat.byteskwargs(opts) |
|
388 | 388 | peer = hg.peer(ui, opts, path) |
|
389 | 389 | caps = peer.capabilities() |
|
390 | 390 | ui.write(('Main capabilities:\n')) |
|
391 | 391 | for c in sorted(caps): |
|
392 | 392 | ui.write((' %s\n') % c) |
|
393 | 393 | b2caps = bundle2.bundle2caps(peer) |
|
394 | 394 | if b2caps: |
|
395 | 395 | ui.write(('Bundle2 capabilities:\n')) |
|
396 | 396 | for key, values in sorted(b2caps.iteritems()): |
|
397 | 397 | ui.write((' %s\n') % key) |
|
398 | 398 | for v in values: |
|
399 | 399 | ui.write((' %s\n') % v) |
|
400 | 400 | |
|
401 | 401 | @command('debugcheckstate', [], '') |
|
402 | 402 | def debugcheckstate(ui, repo): |
|
403 | 403 | """validate the correctness of the current dirstate""" |
|
404 | 404 | parent1, parent2 = repo.dirstate.parents() |
|
405 | 405 | m1 = repo[parent1].manifest() |
|
406 | 406 | m2 = repo[parent2].manifest() |
|
407 | 407 | errors = 0 |
|
408 | 408 | for f in repo.dirstate: |
|
409 | 409 | state = repo.dirstate[f] |
|
410 | 410 | if state in "nr" and f not in m1: |
|
411 | 411 | ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state)) |
|
412 | 412 | errors += 1 |
|
413 | 413 | if state in "a" and f in m1: |
|
414 | 414 | ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state)) |
|
415 | 415 | errors += 1 |
|
416 | 416 | if state in "m" and f not in m1 and f not in m2: |
|
417 | 417 | ui.warn(_("%s in state %s, but not in either manifest\n") % |
|
418 | 418 | (f, state)) |
|
419 | 419 | errors += 1 |
|
420 | 420 | for f in m1: |
|
421 | 421 | state = repo.dirstate[f] |
|
422 | 422 | if state not in "nrm": |
|
423 | 423 | ui.warn(_("%s in manifest1, but listed as state %s") % (f, state)) |
|
424 | 424 | errors += 1 |
|
425 | 425 | if errors: |
|
426 | 426 | error = _(".hg/dirstate inconsistent with current parent's manifest") |
|
427 | 427 | raise error.Abort(error) |
|
428 | 428 | |
|
429 | 429 | @command('debugcolor', |
|
430 | 430 | [('', 'style', None, _('show all configured styles'))], |
|
431 | 431 | 'hg debugcolor') |
|
432 | 432 | def debugcolor(ui, repo, **opts): |
|
433 | 433 | """show available color, effects or style""" |
|
434 | 434 | ui.write(('color mode: %s\n') % stringutil.pprint(ui._colormode)) |
|
435 | 435 | if opts.get(r'style'): |
|
436 | 436 | return _debugdisplaystyle(ui) |
|
437 | 437 | else: |
|
438 | 438 | return _debugdisplaycolor(ui) |
|
439 | 439 | |
|
440 | 440 | def _debugdisplaycolor(ui): |
|
441 | 441 | ui = ui.copy() |
|
442 | 442 | ui._styles.clear() |
|
443 | 443 | for effect in color._activeeffects(ui).keys(): |
|
444 | 444 | ui._styles[effect] = effect |
|
445 | 445 | if ui._terminfoparams: |
|
446 | 446 | for k, v in ui.configitems('color'): |
|
447 | 447 | if k.startswith('color.'): |
|
448 | 448 | ui._styles[k] = k[6:] |
|
449 | 449 | elif k.startswith('terminfo.'): |
|
450 | 450 | ui._styles[k] = k[9:] |
|
451 | 451 | ui.write(_('available colors:\n')) |
|
452 | 452 | # sort label with a '_' after the other to group '_background' entry. |
|
453 | 453 | items = sorted(ui._styles.items(), |
|
454 | 454 | key=lambda i: ('_' in i[0], i[0], i[1])) |
|
455 | 455 | for colorname, label in items: |
|
456 | 456 | ui.write(('%s\n') % colorname, label=label) |
|
457 | 457 | |
|
458 | 458 | def _debugdisplaystyle(ui): |
|
459 | 459 | ui.write(_('available style:\n')) |
|
460 | 460 | if not ui._styles: |
|
461 | 461 | return |
|
462 | 462 | width = max(len(s) for s in ui._styles) |
|
463 | 463 | for label, effects in sorted(ui._styles.items()): |
|
464 | 464 | ui.write('%s' % label, label=label) |
|
465 | 465 | if effects: |
|
466 | 466 | # 50 |
|
467 | 467 | ui.write(': ') |
|
468 | 468 | ui.write(' ' * (max(0, width - len(label)))) |
|
469 | 469 | ui.write(', '.join(ui.label(e, e) for e in effects.split())) |
|
470 | 470 | ui.write('\n') |
|
471 | 471 | |
|
472 | 472 | @command('debugcreatestreamclonebundle', [], 'FILE') |
|
473 | 473 | def debugcreatestreamclonebundle(ui, repo, fname): |
|
474 | 474 | """create a stream clone bundle file |
|
475 | 475 | |
|
476 | 476 | Stream bundles are special bundles that are essentially archives of |
|
477 | 477 | revlog files. They are commonly used for cloning very quickly. |
|
478 | 478 | """ |
|
479 | 479 | # TODO we may want to turn this into an abort when this functionality |
|
480 | 480 | # is moved into `hg bundle`. |
|
481 | 481 | if phases.hassecret(repo): |
|
482 | 482 | ui.warn(_('(warning: stream clone bundle will contain secret ' |
|
483 | 483 | 'revisions)\n')) |
|
484 | 484 | |
|
485 | 485 | requirements, gen = streamclone.generatebundlev1(repo) |
|
486 | 486 | changegroup.writechunks(ui, gen, fname) |
|
487 | 487 | |
|
488 | 488 | ui.write(_('bundle requirements: %s\n') % ', '.join(sorted(requirements))) |
|
489 | 489 | |
|
490 | 490 | @command('debugdag', |
|
491 | 491 | [('t', 'tags', None, _('use tags as labels')), |
|
492 | 492 | ('b', 'branches', None, _('annotate with branch names')), |
|
493 | 493 | ('', 'dots', None, _('use dots for runs')), |
|
494 | 494 | ('s', 'spaces', None, _('separate elements by spaces'))], |
|
495 | 495 | _('[OPTION]... [FILE [REV]...]'), |
|
496 | 496 | optionalrepo=True) |
|
497 | 497 | def debugdag(ui, repo, file_=None, *revs, **opts): |
|
498 | 498 | """format the changelog or an index DAG as a concise textual description |
|
499 | 499 | |
|
500 | 500 | If you pass a revlog index, the revlog's DAG is emitted. If you list |
|
501 | 501 | revision numbers, they get labeled in the output as rN. |
|
502 | 502 | |
|
503 | 503 | Otherwise, the changelog DAG of the current repo is emitted. |
|
504 | 504 | """ |
|
505 | 505 | spaces = opts.get(r'spaces') |
|
506 | 506 | dots = opts.get(r'dots') |
|
507 | 507 | if file_: |
|
508 | 508 | rlog = revlog.revlog(vfsmod.vfs(encoding.getcwd(), audit=False), |
|
509 | 509 | file_) |
|
510 | 510 | revs = set((int(r) for r in revs)) |
|
511 | 511 | def events(): |
|
512 | 512 | for r in rlog: |
|
513 | 513 | yield 'n', (r, list(p for p in rlog.parentrevs(r) |
|
514 | 514 | if p != -1)) |
|
515 | 515 | if r in revs: |
|
516 | 516 | yield 'l', (r, "r%i" % r) |
|
517 | 517 | elif repo: |
|
518 | 518 | cl = repo.changelog |
|
519 | 519 | tags = opts.get(r'tags') |
|
520 | 520 | branches = opts.get(r'branches') |
|
521 | 521 | if tags: |
|
522 | 522 | labels = {} |
|
523 | 523 | for l, n in repo.tags().items(): |
|
524 | 524 | labels.setdefault(cl.rev(n), []).append(l) |
|
525 | 525 | def events(): |
|
526 | 526 | b = "default" |
|
527 | 527 | for r in cl: |
|
528 | 528 | if branches: |
|
529 | 529 | newb = cl.read(cl.node(r))[5]['branch'] |
|
530 | 530 | if newb != b: |
|
531 | 531 | yield 'a', newb |
|
532 | 532 | b = newb |
|
533 | 533 | yield 'n', (r, list(p for p in cl.parentrevs(r) |
|
534 | 534 | if p != -1)) |
|
535 | 535 | if tags: |
|
536 | 536 | ls = labels.get(r) |
|
537 | 537 | if ls: |
|
538 | 538 | for l in ls: |
|
539 | 539 | yield 'l', (r, l) |
|
540 | 540 | else: |
|
541 | 541 | raise error.Abort(_('need repo for changelog dag')) |
|
542 | 542 | |
|
543 | 543 | for line in dagparser.dagtextlines(events(), |
|
544 | 544 | addspaces=spaces, |
|
545 | 545 | wraplabels=True, |
|
546 | 546 | wrapannotations=True, |
|
547 | 547 | wrapnonlinear=dots, |
|
548 | 548 | usedots=dots, |
|
549 | 549 | maxlinewidth=70): |
|
550 | 550 | ui.write(line) |
|
551 | 551 | ui.write("\n") |
|
552 | 552 | |
|
553 | 553 | @command('debugdata', cmdutil.debugrevlogopts, _('-c|-m|FILE REV')) |
|
554 | 554 | def debugdata(ui, repo, file_, rev=None, **opts): |
|
555 | 555 | """dump the contents of a data file revision""" |
|
556 | 556 | opts = pycompat.byteskwargs(opts) |
|
557 | 557 | if opts.get('changelog') or opts.get('manifest') or opts.get('dir'): |
|
558 | 558 | if rev is not None: |
|
559 | 559 | raise error.CommandError('debugdata', _('invalid arguments')) |
|
560 | 560 | file_, rev = None, file_ |
|
561 | 561 | elif rev is None: |
|
562 | 562 | raise error.CommandError('debugdata', _('invalid arguments')) |
|
563 | 563 | r = cmdutil.openstorage(repo, 'debugdata', file_, opts) |
|
564 | 564 | try: |
|
565 | 565 | ui.write(r.rawdata(r.lookup(rev))) |
|
566 | 566 | except KeyError: |
|
567 | 567 | raise error.Abort(_('invalid revision identifier %s') % rev) |
|
568 | 568 | |
|
569 | 569 | @command('debugdate', |
|
570 | 570 | [('e', 'extended', None, _('try extended date formats'))], |
|
571 | 571 | _('[-e] DATE [RANGE]'), |
|
572 | 572 | norepo=True, optionalrepo=True) |
|
573 | 573 | def debugdate(ui, date, range=None, **opts): |
|
574 | 574 | """parse and display a date""" |
|
575 | 575 | if opts[r"extended"]: |
|
576 | 576 | d = dateutil.parsedate(date, util.extendeddateformats) |
|
577 | 577 | else: |
|
578 | 578 | d = dateutil.parsedate(date) |
|
579 | 579 | ui.write(("internal: %d %d\n") % d) |
|
580 | 580 | ui.write(("standard: %s\n") % dateutil.datestr(d)) |
|
581 | 581 | if range: |
|
582 | 582 | m = dateutil.matchdate(range) |
|
583 | 583 | ui.write(("match: %s\n") % m(d[0])) |
|
584 | 584 | |
|
585 | 585 | @command('debugdeltachain', |
|
586 | 586 | cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
587 | 587 | _('-c|-m|FILE'), |
|
588 | 588 | optionalrepo=True) |
|
589 | 589 | def debugdeltachain(ui, repo, file_=None, **opts): |
|
590 | 590 | """dump information about delta chains in a revlog |
|
591 | 591 | |
|
592 | 592 | Output can be templatized. Available template keywords are: |
|
593 | 593 | |
|
594 | 594 | :``rev``: revision number |
|
595 | 595 | :``chainid``: delta chain identifier (numbered by unique base) |
|
596 | 596 | :``chainlen``: delta chain length to this revision |
|
597 | 597 | :``prevrev``: previous revision in delta chain |
|
598 | 598 | :``deltatype``: role of delta / how it was computed |
|
599 | 599 | :``compsize``: compressed size of revision |
|
600 | 600 | :``uncompsize``: uncompressed size of revision |
|
601 | 601 | :``chainsize``: total size of compressed revisions in chain |
|
602 | 602 | :``chainratio``: total chain size divided by uncompressed revision size |
|
603 | 603 | (new delta chains typically start at ratio 2.00) |
|
604 | 604 | :``lindist``: linear distance from base revision in delta chain to end |
|
605 | 605 | of this revision |
|
606 | 606 | :``extradist``: total size of revisions not part of this delta chain from |
|
607 | 607 | base of delta chain to end of this revision; a measurement |
|
608 | 608 | of how much extra data we need to read/seek across to read |
|
609 | 609 | the delta chain for this revision |
|
610 | 610 | :``extraratio``: extradist divided by chainsize; another representation of |
|
611 | 611 | how much unrelated data is needed to load this delta chain |
|
612 | 612 | |
|
613 | 613 | If the repository is configured to use the sparse read, additional keywords |
|
614 | 614 | are available: |
|
615 | 615 | |
|
616 | 616 | :``readsize``: total size of data read from the disk for a revision |
|
617 | 617 | (sum of the sizes of all the blocks) |
|
618 | 618 | :``largestblock``: size of the largest block of data read from the disk |
|
619 | 619 | :``readdensity``: density of useful bytes in the data read from the disk |
|
620 | 620 | :``srchunks``: in how many data hunks the whole revision would be read |
|
621 | 621 | |
|
622 | 622 | The sparse read can be enabled with experimental.sparse-read = True |
|
623 | 623 | """ |
|
624 | 624 | opts = pycompat.byteskwargs(opts) |
|
625 | 625 | r = cmdutil.openrevlog(repo, 'debugdeltachain', file_, opts) |
|
626 | 626 | index = r.index |
|
627 | 627 | start = r.start |
|
628 | 628 | length = r.length |
|
629 | 629 | generaldelta = r.version & revlog.FLAG_GENERALDELTA |
|
630 | 630 | withsparseread = getattr(r, '_withsparseread', False) |
|
631 | 631 | |
|
632 | 632 | def revinfo(rev): |
|
633 | 633 | e = index[rev] |
|
634 | 634 | compsize = e[1] |
|
635 | 635 | uncompsize = e[2] |
|
636 | 636 | chainsize = 0 |
|
637 | 637 | |
|
638 | 638 | if generaldelta: |
|
639 | 639 | if e[3] == e[5]: |
|
640 | 640 | deltatype = 'p1' |
|
641 | 641 | elif e[3] == e[6]: |
|
642 | 642 | deltatype = 'p2' |
|
643 | 643 | elif e[3] == rev - 1: |
|
644 | 644 | deltatype = 'prev' |
|
645 | 645 | elif e[3] == rev: |
|
646 | 646 | deltatype = 'base' |
|
647 | 647 | else: |
|
648 | 648 | deltatype = 'other' |
|
649 | 649 | else: |
|
650 | 650 | if e[3] == rev: |
|
651 | 651 | deltatype = 'base' |
|
652 | 652 | else: |
|
653 | 653 | deltatype = 'prev' |
|
654 | 654 | |
|
655 | 655 | chain = r._deltachain(rev)[0] |
|
656 | 656 | for iterrev in chain: |
|
657 | 657 | e = index[iterrev] |
|
658 | 658 | chainsize += e[1] |
|
659 | 659 | |
|
660 | 660 | return compsize, uncompsize, deltatype, chain, chainsize |
|
661 | 661 | |
|
662 | 662 | fm = ui.formatter('debugdeltachain', opts) |
|
663 | 663 | |
|
664 | 664 | fm.plain(' rev chain# chainlen prev delta ' |
|
665 | 665 | 'size rawsize chainsize ratio lindist extradist ' |
|
666 | 666 | 'extraratio') |
|
667 | 667 | if withsparseread: |
|
668 | 668 | fm.plain(' readsize largestblk rddensity srchunks') |
|
669 | 669 | fm.plain('\n') |
|
670 | 670 | |
|
671 | 671 | chainbases = {} |
|
672 | 672 | for rev in r: |
|
673 | 673 | comp, uncomp, deltatype, chain, chainsize = revinfo(rev) |
|
674 | 674 | chainbase = chain[0] |
|
675 | 675 | chainid = chainbases.setdefault(chainbase, len(chainbases) + 1) |
|
676 | 676 | basestart = start(chainbase) |
|
677 | 677 | revstart = start(rev) |
|
678 | 678 | lineardist = revstart + comp - basestart |
|
679 | 679 | extradist = lineardist - chainsize |
|
680 | 680 | try: |
|
681 | 681 | prevrev = chain[-2] |
|
682 | 682 | except IndexError: |
|
683 | 683 | prevrev = -1 |
|
684 | 684 | |
|
685 | 685 | if uncomp != 0: |
|
686 | 686 | chainratio = float(chainsize) / float(uncomp) |
|
687 | 687 | else: |
|
688 | 688 | chainratio = chainsize |
|
689 | 689 | |
|
690 | 690 | if chainsize != 0: |
|
691 | 691 | extraratio = float(extradist) / float(chainsize) |
|
692 | 692 | else: |
|
693 | 693 | extraratio = extradist |
|
694 | 694 | |
|
695 | 695 | fm.startitem() |
|
696 | 696 | fm.write('rev chainid chainlen prevrev deltatype compsize ' |
|
697 | 697 | 'uncompsize chainsize chainratio lindist extradist ' |
|
698 | 698 | 'extraratio', |
|
699 | 699 | '%7d %7d %8d %8d %7s %10d %10d %10d %9.5f %9d %9d %10.5f', |
|
700 | 700 | rev, chainid, len(chain), prevrev, deltatype, comp, |
|
701 | 701 | uncomp, chainsize, chainratio, lineardist, extradist, |
|
702 | 702 | extraratio, |
|
703 | 703 | rev=rev, chainid=chainid, chainlen=len(chain), |
|
704 | 704 | prevrev=prevrev, deltatype=deltatype, compsize=comp, |
|
705 | 705 | uncompsize=uncomp, chainsize=chainsize, |
|
706 | 706 | chainratio=chainratio, lindist=lineardist, |
|
707 | 707 | extradist=extradist, extraratio=extraratio) |
|
708 | 708 | if withsparseread: |
|
709 | 709 | readsize = 0 |
|
710 | 710 | largestblock = 0 |
|
711 | 711 | srchunks = 0 |
|
712 | 712 | |
|
713 | 713 | for revschunk in deltautil.slicechunk(r, chain): |
|
714 | 714 | srchunks += 1 |
|
715 | 715 | blkend = start(revschunk[-1]) + length(revschunk[-1]) |
|
716 | 716 | blksize = blkend - start(revschunk[0]) |
|
717 | 717 | |
|
718 | 718 | readsize += blksize |
|
719 | 719 | if largestblock < blksize: |
|
720 | 720 | largestblock = blksize |
|
721 | 721 | |
|
722 | 722 | if readsize: |
|
723 | 723 | readdensity = float(chainsize) / float(readsize) |
|
724 | 724 | else: |
|
725 | 725 | readdensity = 1 |
|
726 | 726 | |
|
727 | 727 | fm.write('readsize largestblock readdensity srchunks', |
|
728 | 728 | ' %10d %10d %9.5f %8d', |
|
729 | 729 | readsize, largestblock, readdensity, srchunks, |
|
730 | 730 | readsize=readsize, largestblock=largestblock, |
|
731 | 731 | readdensity=readdensity, srchunks=srchunks) |
|
732 | 732 | |
|
733 | 733 | fm.plain('\n') |
|
734 | 734 | |
|
735 | 735 | fm.end() |
|
736 | 736 | |
|
737 | 737 | @command('debugdirstate|debugstate', |
|
738 | 738 | [('', 'nodates', None, _('do not display the saved mtime (DEPRECATED)')), |
|
739 | 739 | ('', 'dates', True, _('display the saved mtime')), |
|
740 | 740 | ('', 'datesort', None, _('sort by saved mtime'))], |
|
741 | 741 | _('[OPTION]...')) |
|
742 | 742 | def debugstate(ui, repo, **opts): |
|
743 | 743 | """show the contents of the current dirstate""" |
|
744 | 744 | |
|
745 | 745 | nodates = not opts[r'dates'] |
|
746 | 746 | if opts.get(r'nodates') is not None: |
|
747 | 747 | nodates = True |
|
748 | 748 | datesort = opts.get(r'datesort') |
|
749 | 749 | |
|
750 | 750 | if datesort: |
|
751 | 751 | keyfunc = lambda x: (x[1][3], x[0]) # sort by mtime, then by filename |
|
752 | 752 | else: |
|
753 | 753 | keyfunc = None # sort by filename |
|
754 |
for file_, ent in sorted(repo.dirstate. |
|
|
754 | for file_, ent in sorted(repo.dirstate.iteritems(), key=keyfunc): | |
|
755 | 755 | if ent[3] == -1: |
|
756 | 756 | timestr = 'unset ' |
|
757 | 757 | elif nodates: |
|
758 | 758 | timestr = 'set ' |
|
759 | 759 | else: |
|
760 | 760 | timestr = time.strftime(r"%Y-%m-%d %H:%M:%S ", |
|
761 | 761 | time.localtime(ent[3])) |
|
762 | 762 | timestr = encoding.strtolocal(timestr) |
|
763 | 763 | if ent[1] & 0o20000: |
|
764 | 764 | mode = 'lnk' |
|
765 | 765 | else: |
|
766 | 766 | mode = '%3o' % (ent[1] & 0o777 & ~util.umask) |
|
767 | 767 | ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_)) |
|
768 | 768 | for f in repo.dirstate.copies(): |
|
769 | 769 | ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f)) |
|
770 | 770 | |
|
771 | 771 | @command('debugdiscovery', |
|
772 | 772 | [('', 'old', None, _('use old-style discovery')), |
|
773 | 773 | ('', 'nonheads', None, |
|
774 | 774 | _('use old-style discovery with non-heads included')), |
|
775 | 775 | ('', 'rev', [], 'restrict discovery to this set of revs'), |
|
776 | 776 | ('', 'seed', '12323', 'specify the random seed use for discovery'), |
|
777 | 777 | ] + cmdutil.remoteopts, |
|
778 | 778 | _('[--rev REV] [OTHER]')) |
|
779 | 779 | def debugdiscovery(ui, repo, remoteurl="default", **opts): |
|
780 | 780 | """runs the changeset discovery protocol in isolation""" |
|
781 | 781 | opts = pycompat.byteskwargs(opts) |
|
782 | 782 | remoteurl, branches = hg.parseurl(ui.expandpath(remoteurl)) |
|
783 | 783 | remote = hg.peer(repo, opts, remoteurl) |
|
784 | 784 | ui.status(_('comparing with %s\n') % util.hidepassword(remoteurl)) |
|
785 | 785 | |
|
786 | 786 | # make sure tests are repeatable |
|
787 | 787 | random.seed(int(opts['seed'])) |
|
788 | 788 | |
|
789 | 789 | |
|
790 | 790 | |
|
791 | 791 | if opts.get('old'): |
|
792 | 792 | def doit(pushedrevs, remoteheads, remote=remote): |
|
793 | 793 | if not util.safehasattr(remote, 'branches'): |
|
794 | 794 | # enable in-client legacy support |
|
795 | 795 | remote = localrepo.locallegacypeer(remote.local()) |
|
796 | 796 | common, _in, hds = treediscovery.findcommonincoming(repo, remote, |
|
797 | 797 | force=True) |
|
798 | 798 | common = set(common) |
|
799 | 799 | if not opts.get('nonheads'): |
|
800 | 800 | ui.write(("unpruned common: %s\n") % |
|
801 | 801 | " ".join(sorted(short(n) for n in common))) |
|
802 | 802 | |
|
803 | 803 | clnode = repo.changelog.node |
|
804 | 804 | common = repo.revs('heads(::%ln)', common) |
|
805 | 805 | common = {clnode(r) for r in common} |
|
806 | 806 | return common, hds |
|
807 | 807 | else: |
|
808 | 808 | def doit(pushedrevs, remoteheads, remote=remote): |
|
809 | 809 | nodes = None |
|
810 | 810 | if pushedrevs: |
|
811 | 811 | revs = scmutil.revrange(repo, pushedrevs) |
|
812 | 812 | nodes = [repo[r].node() for r in revs] |
|
813 | 813 | common, any, hds = setdiscovery.findcommonheads(ui, repo, remote, |
|
814 | 814 | ancestorsof=nodes) |
|
815 | 815 | return common, hds |
|
816 | 816 | |
|
817 | 817 | remoterevs, _checkout = hg.addbranchrevs(repo, remote, branches, revs=None) |
|
818 | 818 | localrevs = opts['rev'] |
|
819 | 819 | with util.timedcm('debug-discovery') as t: |
|
820 | 820 | common, hds = doit(localrevs, remoterevs) |
|
821 | 821 | |
|
822 | 822 | # compute all statistics |
|
823 | 823 | common = set(common) |
|
824 | 824 | rheads = set(hds) |
|
825 | 825 | lheads = set(repo.heads()) |
|
826 | 826 | |
|
827 | 827 | data = {} |
|
828 | 828 | data['elapsed'] = t.elapsed |
|
829 | 829 | data['nb-common'] = len(common) |
|
830 | 830 | data['nb-common-local'] = len(common & lheads) |
|
831 | 831 | data['nb-common-remote'] = len(common & rheads) |
|
832 | 832 | data['nb-common-both'] = len(common & rheads & lheads) |
|
833 | 833 | data['nb-local'] = len(lheads) |
|
834 | 834 | data['nb-local-missing'] = data['nb-local'] - data['nb-common-local'] |
|
835 | 835 | data['nb-remote'] = len(rheads) |
|
836 | 836 | data['nb-remote-unknown'] = data['nb-remote'] - data['nb-common-remote'] |
|
837 | 837 | data['nb-revs'] = len(repo.revs('all()')) |
|
838 | 838 | data['nb-revs-common'] = len(repo.revs('::%ln', common)) |
|
839 | 839 | data['nb-revs-missing'] = data['nb-revs'] - data['nb-revs-common'] |
|
840 | 840 | |
|
841 | 841 | # display discovery summary |
|
842 | 842 | ui.write(("elapsed time: %(elapsed)f seconds\n") % data) |
|
843 | 843 | ui.write(("heads summary:\n")) |
|
844 | 844 | ui.write((" total common heads: %(nb-common)9d\n") % data) |
|
845 | 845 | ui.write((" also local heads: %(nb-common-local)9d\n") % data) |
|
846 | 846 | ui.write((" also remote heads: %(nb-common-remote)9d\n") % data) |
|
847 | 847 | ui.write((" both: %(nb-common-both)9d\n") % data) |
|
848 | 848 | ui.write((" local heads: %(nb-local)9d\n") % data) |
|
849 | 849 | ui.write((" common: %(nb-common-local)9d\n") % data) |
|
850 | 850 | ui.write((" missing: %(nb-local-missing)9d\n") % data) |
|
851 | 851 | ui.write((" remote heads: %(nb-remote)9d\n") % data) |
|
852 | 852 | ui.write((" common: %(nb-common-remote)9d\n") % data) |
|
853 | 853 | ui.write((" unknown: %(nb-remote-unknown)9d\n") % data) |
|
854 | 854 | ui.write(("local changesets: %(nb-revs)9d\n") % data) |
|
855 | 855 | ui.write((" common: %(nb-revs-common)9d\n") % data) |
|
856 | 856 | ui.write((" missing: %(nb-revs-missing)9d\n") % data) |
|
857 | 857 | |
|
858 | 858 | if ui.verbose: |
|
859 | 859 | ui.write(("common heads: %s\n") % |
|
860 | 860 | " ".join(sorted(short(n) for n in common))) |
|
861 | 861 | |
|
862 | 862 | _chunksize = 4 << 10 |
|
863 | 863 | |
|
864 | 864 | @command('debugdownload', |
|
865 | 865 | [ |
|
866 | 866 | ('o', 'output', '', _('path')), |
|
867 | 867 | ], |
|
868 | 868 | optionalrepo=True) |
|
869 | 869 | def debugdownload(ui, repo, url, output=None, **opts): |
|
870 | 870 | """download a resource using Mercurial logic and config |
|
871 | 871 | """ |
|
872 | 872 | fh = urlmod.open(ui, url, output) |
|
873 | 873 | |
|
874 | 874 | dest = ui |
|
875 | 875 | if output: |
|
876 | 876 | dest = open(output, "wb", _chunksize) |
|
877 | 877 | try: |
|
878 | 878 | data = fh.read(_chunksize) |
|
879 | 879 | while data: |
|
880 | 880 | dest.write(data) |
|
881 | 881 | data = fh.read(_chunksize) |
|
882 | 882 | finally: |
|
883 | 883 | if output: |
|
884 | 884 | dest.close() |
|
885 | 885 | |
|
886 | 886 | @command('debugextensions', cmdutil.formatteropts, [], optionalrepo=True) |
|
887 | 887 | def debugextensions(ui, repo, **opts): |
|
888 | 888 | '''show information about active extensions''' |
|
889 | 889 | opts = pycompat.byteskwargs(opts) |
|
890 | 890 | exts = extensions.extensions(ui) |
|
891 | 891 | hgver = util.version() |
|
892 | 892 | fm = ui.formatter('debugextensions', opts) |
|
893 | 893 | for extname, extmod in sorted(exts, key=operator.itemgetter(0)): |
|
894 | 894 | isinternal = extensions.ismoduleinternal(extmod) |
|
895 | 895 | extsource = pycompat.fsencode(extmod.__file__) |
|
896 | 896 | if isinternal: |
|
897 | 897 | exttestedwith = [] # never expose magic string to users |
|
898 | 898 | else: |
|
899 | 899 | exttestedwith = getattr(extmod, 'testedwith', '').split() |
|
900 | 900 | extbuglink = getattr(extmod, 'buglink', None) |
|
901 | 901 | |
|
902 | 902 | fm.startitem() |
|
903 | 903 | |
|
904 | 904 | if ui.quiet or ui.verbose: |
|
905 | 905 | fm.write('name', '%s\n', extname) |
|
906 | 906 | else: |
|
907 | 907 | fm.write('name', '%s', extname) |
|
908 | 908 | if isinternal or hgver in exttestedwith: |
|
909 | 909 | fm.plain('\n') |
|
910 | 910 | elif not exttestedwith: |
|
911 | 911 | fm.plain(_(' (untested!)\n')) |
|
912 | 912 | else: |
|
913 | 913 | lasttestedversion = exttestedwith[-1] |
|
914 | 914 | fm.plain(' (%s!)\n' % lasttestedversion) |
|
915 | 915 | |
|
916 | 916 | fm.condwrite(ui.verbose and extsource, 'source', |
|
917 | 917 | _(' location: %s\n'), extsource or "") |
|
918 | 918 | |
|
919 | 919 | if ui.verbose: |
|
920 | 920 | fm.plain(_(' bundled: %s\n') % ['no', 'yes'][isinternal]) |
|
921 | 921 | fm.data(bundled=isinternal) |
|
922 | 922 | |
|
923 | 923 | fm.condwrite(ui.verbose and exttestedwith, 'testedwith', |
|
924 | 924 | _(' tested with: %s\n'), |
|
925 | 925 | fm.formatlist(exttestedwith, name='ver')) |
|
926 | 926 | |
|
927 | 927 | fm.condwrite(ui.verbose and extbuglink, 'buglink', |
|
928 | 928 | _(' bug reporting: %s\n'), extbuglink or "") |
|
929 | 929 | |
|
930 | 930 | fm.end() |
|
931 | 931 | |
|
932 | 932 | @command('debugfileset', |
|
933 | 933 | [('r', 'rev', '', _('apply the filespec on this revision'), _('REV')), |
|
934 | 934 | ('', 'all-files', False, |
|
935 | 935 | _('test files from all revisions and working directory')), |
|
936 | 936 | ('s', 'show-matcher', None, |
|
937 | 937 | _('print internal representation of matcher')), |
|
938 | 938 | ('p', 'show-stage', [], |
|
939 | 939 | _('print parsed tree at the given stage'), _('NAME'))], |
|
940 | 940 | _('[-r REV] [--all-files] [OPTION]... FILESPEC')) |
|
941 | 941 | def debugfileset(ui, repo, expr, **opts): |
|
942 | 942 | '''parse and apply a fileset specification''' |
|
943 | 943 | from . import fileset |
|
944 | 944 | fileset.symbols # force import of fileset so we have predicates to optimize |
|
945 | 945 | opts = pycompat.byteskwargs(opts) |
|
946 | 946 | ctx = scmutil.revsingle(repo, opts.get('rev'), None) |
|
947 | 947 | |
|
948 | 948 | stages = [ |
|
949 | 949 | ('parsed', pycompat.identity), |
|
950 | 950 | ('analyzed', filesetlang.analyze), |
|
951 | 951 | ('optimized', filesetlang.optimize), |
|
952 | 952 | ] |
|
953 | 953 | stagenames = set(n for n, f in stages) |
|
954 | 954 | |
|
955 | 955 | showalways = set() |
|
956 | 956 | if ui.verbose and not opts['show_stage']: |
|
957 | 957 | # show parsed tree by --verbose (deprecated) |
|
958 | 958 | showalways.add('parsed') |
|
959 | 959 | if opts['show_stage'] == ['all']: |
|
960 | 960 | showalways.update(stagenames) |
|
961 | 961 | else: |
|
962 | 962 | for n in opts['show_stage']: |
|
963 | 963 | if n not in stagenames: |
|
964 | 964 | raise error.Abort(_('invalid stage name: %s') % n) |
|
965 | 965 | showalways.update(opts['show_stage']) |
|
966 | 966 | |
|
967 | 967 | tree = filesetlang.parse(expr) |
|
968 | 968 | for n, f in stages: |
|
969 | 969 | tree = f(tree) |
|
970 | 970 | if n in showalways: |
|
971 | 971 | if opts['show_stage'] or n != 'parsed': |
|
972 | 972 | ui.write(("* %s:\n") % n) |
|
973 | 973 | ui.write(filesetlang.prettyformat(tree), "\n") |
|
974 | 974 | |
|
975 | 975 | files = set() |
|
976 | 976 | if opts['all_files']: |
|
977 | 977 | for r in repo: |
|
978 | 978 | c = repo[r] |
|
979 | 979 | files.update(c.files()) |
|
980 | 980 | files.update(c.substate) |
|
981 | 981 | if opts['all_files'] or ctx.rev() is None: |
|
982 | 982 | wctx = repo[None] |
|
983 | 983 | files.update(repo.dirstate.walk(scmutil.matchall(repo), |
|
984 | 984 | subrepos=list(wctx.substate), |
|
985 | 985 | unknown=True, ignored=True)) |
|
986 | 986 | files.update(wctx.substate) |
|
987 | 987 | else: |
|
988 | 988 | files.update(ctx.files()) |
|
989 | 989 | files.update(ctx.substate) |
|
990 | 990 | |
|
991 | 991 | m = ctx.matchfileset(expr) |
|
992 | 992 | if opts['show_matcher'] or (opts['show_matcher'] is None and ui.verbose): |
|
993 | 993 | ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n') |
|
994 | 994 | for f in sorted(files): |
|
995 | 995 | if not m(f): |
|
996 | 996 | continue |
|
997 | 997 | ui.write("%s\n" % f) |
|
998 | 998 | |
|
999 | 999 | @command('debugformat', |
|
1000 | 1000 | [] + cmdutil.formatteropts) |
|
1001 | 1001 | def debugformat(ui, repo, **opts): |
|
1002 | 1002 | """display format information about the current repository |
|
1003 | 1003 | |
|
1004 | 1004 | Use --verbose to get extra information about current config value and |
|
1005 | 1005 | Mercurial default.""" |
|
1006 | 1006 | opts = pycompat.byteskwargs(opts) |
|
1007 | 1007 | maxvariantlength = max(len(fv.name) for fv in upgrade.allformatvariant) |
|
1008 | 1008 | maxvariantlength = max(len('format-variant'), maxvariantlength) |
|
1009 | 1009 | |
|
1010 | 1010 | def makeformatname(name): |
|
1011 | 1011 | return '%s:' + (' ' * (maxvariantlength - len(name))) |
|
1012 | 1012 | |
|
1013 | 1013 | fm = ui.formatter('debugformat', opts) |
|
1014 | 1014 | if fm.isplain(): |
|
1015 | 1015 | def formatvalue(value): |
|
1016 | 1016 | if util.safehasattr(value, 'startswith'): |
|
1017 | 1017 | return value |
|
1018 | 1018 | if value: |
|
1019 | 1019 | return 'yes' |
|
1020 | 1020 | else: |
|
1021 | 1021 | return 'no' |
|
1022 | 1022 | else: |
|
1023 | 1023 | formatvalue = pycompat.identity |
|
1024 | 1024 | |
|
1025 | 1025 | fm.plain('format-variant') |
|
1026 | 1026 | fm.plain(' ' * (maxvariantlength - len('format-variant'))) |
|
1027 | 1027 | fm.plain(' repo') |
|
1028 | 1028 | if ui.verbose: |
|
1029 | 1029 | fm.plain(' config default') |
|
1030 | 1030 | fm.plain('\n') |
|
1031 | 1031 | for fv in upgrade.allformatvariant: |
|
1032 | 1032 | fm.startitem() |
|
1033 | 1033 | repovalue = fv.fromrepo(repo) |
|
1034 | 1034 | configvalue = fv.fromconfig(repo) |
|
1035 | 1035 | |
|
1036 | 1036 | if repovalue != configvalue: |
|
1037 | 1037 | namelabel = 'formatvariant.name.mismatchconfig' |
|
1038 | 1038 | repolabel = 'formatvariant.repo.mismatchconfig' |
|
1039 | 1039 | elif repovalue != fv.default: |
|
1040 | 1040 | namelabel = 'formatvariant.name.mismatchdefault' |
|
1041 | 1041 | repolabel = 'formatvariant.repo.mismatchdefault' |
|
1042 | 1042 | else: |
|
1043 | 1043 | namelabel = 'formatvariant.name.uptodate' |
|
1044 | 1044 | repolabel = 'formatvariant.repo.uptodate' |
|
1045 | 1045 | |
|
1046 | 1046 | fm.write('name', makeformatname(fv.name), fv.name, |
|
1047 | 1047 | label=namelabel) |
|
1048 | 1048 | fm.write('repo', ' %3s', formatvalue(repovalue), |
|
1049 | 1049 | label=repolabel) |
|
1050 | 1050 | if fv.default != configvalue: |
|
1051 | 1051 | configlabel = 'formatvariant.config.special' |
|
1052 | 1052 | else: |
|
1053 | 1053 | configlabel = 'formatvariant.config.default' |
|
1054 | 1054 | fm.condwrite(ui.verbose, 'config', ' %6s', formatvalue(configvalue), |
|
1055 | 1055 | label=configlabel) |
|
1056 | 1056 | fm.condwrite(ui.verbose, 'default', ' %7s', formatvalue(fv.default), |
|
1057 | 1057 | label='formatvariant.default') |
|
1058 | 1058 | fm.plain('\n') |
|
1059 | 1059 | fm.end() |
|
1060 | 1060 | |
|
1061 | 1061 | @command('debugfsinfo', [], _('[PATH]'), norepo=True) |
|
1062 | 1062 | def debugfsinfo(ui, path="."): |
|
1063 | 1063 | """show information detected about current filesystem""" |
|
1064 | 1064 | ui.write(('path: %s\n') % path) |
|
1065 | 1065 | ui.write(('mounted on: %s\n') % (util.getfsmountpoint(path) or '(unknown)')) |
|
1066 | 1066 | ui.write(('exec: %s\n') % (util.checkexec(path) and 'yes' or 'no')) |
|
1067 | 1067 | ui.write(('fstype: %s\n') % (util.getfstype(path) or '(unknown)')) |
|
1068 | 1068 | ui.write(('symlink: %s\n') % (util.checklink(path) and 'yes' or 'no')) |
|
1069 | 1069 | ui.write(('hardlink: %s\n') % (util.checknlink(path) and 'yes' or 'no')) |
|
1070 | 1070 | casesensitive = '(unknown)' |
|
1071 | 1071 | try: |
|
1072 | 1072 | with pycompat.namedtempfile(prefix='.debugfsinfo', dir=path) as f: |
|
1073 | 1073 | casesensitive = util.fscasesensitive(f.name) and 'yes' or 'no' |
|
1074 | 1074 | except OSError: |
|
1075 | 1075 | pass |
|
1076 | 1076 | ui.write(('case-sensitive: %s\n') % casesensitive) |
|
1077 | 1077 | |
|
1078 | 1078 | @command('debuggetbundle', |
|
1079 | 1079 | [('H', 'head', [], _('id of head node'), _('ID')), |
|
1080 | 1080 | ('C', 'common', [], _('id of common node'), _('ID')), |
|
1081 | 1081 | ('t', 'type', 'bzip2', _('bundle compression type to use'), _('TYPE'))], |
|
1082 | 1082 | _('REPO FILE [-H|-C ID]...'), |
|
1083 | 1083 | norepo=True) |
|
1084 | 1084 | def debuggetbundle(ui, repopath, bundlepath, head=None, common=None, **opts): |
|
1085 | 1085 | """retrieves a bundle from a repo |
|
1086 | 1086 | |
|
1087 | 1087 | Every ID must be a full-length hex node id string. Saves the bundle to the |
|
1088 | 1088 | given file. |
|
1089 | 1089 | """ |
|
1090 | 1090 | opts = pycompat.byteskwargs(opts) |
|
1091 | 1091 | repo = hg.peer(ui, opts, repopath) |
|
1092 | 1092 | if not repo.capable('getbundle'): |
|
1093 | 1093 | raise error.Abort("getbundle() not supported by target repository") |
|
1094 | 1094 | args = {} |
|
1095 | 1095 | if common: |
|
1096 | 1096 | args[r'common'] = [bin(s) for s in common] |
|
1097 | 1097 | if head: |
|
1098 | 1098 | args[r'heads'] = [bin(s) for s in head] |
|
1099 | 1099 | # TODO: get desired bundlecaps from command line. |
|
1100 | 1100 | args[r'bundlecaps'] = None |
|
1101 | 1101 | bundle = repo.getbundle('debug', **args) |
|
1102 | 1102 | |
|
1103 | 1103 | bundletype = opts.get('type', 'bzip2').lower() |
|
1104 | 1104 | btypes = {'none': 'HG10UN', |
|
1105 | 1105 | 'bzip2': 'HG10BZ', |
|
1106 | 1106 | 'gzip': 'HG10GZ', |
|
1107 | 1107 | 'bundle2': 'HG20'} |
|
1108 | 1108 | bundletype = btypes.get(bundletype) |
|
1109 | 1109 | if bundletype not in bundle2.bundletypes: |
|
1110 | 1110 | raise error.Abort(_('unknown bundle type specified with --type')) |
|
1111 | 1111 | bundle2.writebundle(ui, bundle, bundlepath, bundletype) |
|
1112 | 1112 | |
|
1113 | 1113 | @command('debugignore', [], '[FILE]') |
|
1114 | 1114 | def debugignore(ui, repo, *files, **opts): |
|
1115 | 1115 | """display the combined ignore pattern and information about ignored files |
|
1116 | 1116 | |
|
1117 | 1117 | With no argument display the combined ignore pattern. |
|
1118 | 1118 | |
|
1119 | 1119 | Given space separated file names, shows if the given file is ignored and |
|
1120 | 1120 | if so, show the ignore rule (file and line number) that matched it. |
|
1121 | 1121 | """ |
|
1122 | 1122 | ignore = repo.dirstate._ignore |
|
1123 | 1123 | if not files: |
|
1124 | 1124 | # Show all the patterns |
|
1125 | 1125 | ui.write("%s\n" % pycompat.byterepr(ignore)) |
|
1126 | 1126 | else: |
|
1127 | 1127 | m = scmutil.match(repo[None], pats=files) |
|
1128 | 1128 | uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True) |
|
1129 | 1129 | for f in m.files(): |
|
1130 | 1130 | nf = util.normpath(f) |
|
1131 | 1131 | ignored = None |
|
1132 | 1132 | ignoredata = None |
|
1133 | 1133 | if nf != '.': |
|
1134 | 1134 | if ignore(nf): |
|
1135 | 1135 | ignored = nf |
|
1136 | 1136 | ignoredata = repo.dirstate._ignorefileandline(nf) |
|
1137 | 1137 | else: |
|
1138 | 1138 | for p in util.finddirs(nf): |
|
1139 | 1139 | if ignore(p): |
|
1140 | 1140 | ignored = p |
|
1141 | 1141 | ignoredata = repo.dirstate._ignorefileandline(p) |
|
1142 | 1142 | break |
|
1143 | 1143 | if ignored: |
|
1144 | 1144 | if ignored == nf: |
|
1145 | 1145 | ui.write(_("%s is ignored\n") % uipathfn(f)) |
|
1146 | 1146 | else: |
|
1147 | 1147 | ui.write(_("%s is ignored because of " |
|
1148 | 1148 | "containing directory %s\n") |
|
1149 | 1149 | % (uipathfn(f), ignored)) |
|
1150 | 1150 | ignorefile, lineno, line = ignoredata |
|
1151 | 1151 | ui.write(_("(ignore rule in %s, line %d: '%s')\n") |
|
1152 | 1152 | % (ignorefile, lineno, line)) |
|
1153 | 1153 | else: |
|
1154 | 1154 | ui.write(_("%s is not ignored\n") % uipathfn(f)) |
|
1155 | 1155 | |
|
1156 | 1156 | @command('debugindex', cmdutil.debugrevlogopts + cmdutil.formatteropts, |
|
1157 | 1157 | _('-c|-m|FILE')) |
|
1158 | 1158 | def debugindex(ui, repo, file_=None, **opts): |
|
1159 | 1159 | """dump index data for a storage primitive""" |
|
1160 | 1160 | opts = pycompat.byteskwargs(opts) |
|
1161 | 1161 | store = cmdutil.openstorage(repo, 'debugindex', file_, opts) |
|
1162 | 1162 | |
|
1163 | 1163 | if ui.debugflag: |
|
1164 | 1164 | shortfn = hex |
|
1165 | 1165 | else: |
|
1166 | 1166 | shortfn = short |
|
1167 | 1167 | |
|
1168 | 1168 | idlen = 12 |
|
1169 | 1169 | for i in store: |
|
1170 | 1170 | idlen = len(shortfn(store.node(i))) |
|
1171 | 1171 | break |
|
1172 | 1172 | |
|
1173 | 1173 | fm = ui.formatter('debugindex', opts) |
|
1174 | 1174 | fm.plain(b' rev linkrev %s %s p2\n' % ( |
|
1175 | 1175 | b'nodeid'.ljust(idlen), |
|
1176 | 1176 | b'p1'.ljust(idlen))) |
|
1177 | 1177 | |
|
1178 | 1178 | for rev in store: |
|
1179 | 1179 | node = store.node(rev) |
|
1180 | 1180 | parents = store.parents(node) |
|
1181 | 1181 | |
|
1182 | 1182 | fm.startitem() |
|
1183 | 1183 | fm.write(b'rev', b'%6d ', rev) |
|
1184 | 1184 | fm.write(b'linkrev', '%7d ', store.linkrev(rev)) |
|
1185 | 1185 | fm.write(b'node', '%s ', shortfn(node)) |
|
1186 | 1186 | fm.write(b'p1', '%s ', shortfn(parents[0])) |
|
1187 | 1187 | fm.write(b'p2', '%s', shortfn(parents[1])) |
|
1188 | 1188 | fm.plain(b'\n') |
|
1189 | 1189 | |
|
1190 | 1190 | fm.end() |
|
1191 | 1191 | |
|
1192 | 1192 | @command('debugindexdot', cmdutil.debugrevlogopts, |
|
1193 | 1193 | _('-c|-m|FILE'), optionalrepo=True) |
|
1194 | 1194 | def debugindexdot(ui, repo, file_=None, **opts): |
|
1195 | 1195 | """dump an index DAG as a graphviz dot file""" |
|
1196 | 1196 | opts = pycompat.byteskwargs(opts) |
|
1197 | 1197 | r = cmdutil.openstorage(repo, 'debugindexdot', file_, opts) |
|
1198 | 1198 | ui.write(("digraph G {\n")) |
|
1199 | 1199 | for i in r: |
|
1200 | 1200 | node = r.node(i) |
|
1201 | 1201 | pp = r.parents(node) |
|
1202 | 1202 | ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i)) |
|
1203 | 1203 | if pp[1] != nullid: |
|
1204 | 1204 | ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i)) |
|
1205 | 1205 | ui.write("}\n") |
|
1206 | 1206 | |
|
1207 | 1207 | @command('debugindexstats', []) |
|
1208 | 1208 | def debugindexstats(ui, repo): |
|
1209 | 1209 | """show stats related to the changelog index""" |
|
1210 | 1210 | repo.changelog.shortest(nullid, 1) |
|
1211 | 1211 | index = repo.changelog.index |
|
1212 | 1212 | if not util.safehasattr(index, 'stats'): |
|
1213 | 1213 | raise error.Abort(_('debugindexstats only works with native code')) |
|
1214 | 1214 | for k, v in sorted(index.stats().items()): |
|
1215 | 1215 | ui.write('%s: %d\n' % (k, v)) |
|
1216 | 1216 | |
|
1217 | 1217 | @command('debuginstall', [] + cmdutil.formatteropts, '', norepo=True) |
|
1218 | 1218 | def debuginstall(ui, **opts): |
|
1219 | 1219 | '''test Mercurial installation |
|
1220 | 1220 | |
|
1221 | 1221 | Returns 0 on success. |
|
1222 | 1222 | ''' |
|
1223 | 1223 | opts = pycompat.byteskwargs(opts) |
|
1224 | 1224 | |
|
1225 | 1225 | problems = 0 |
|
1226 | 1226 | |
|
1227 | 1227 | fm = ui.formatter('debuginstall', opts) |
|
1228 | 1228 | fm.startitem() |
|
1229 | 1229 | |
|
1230 | 1230 | # encoding |
|
1231 | 1231 | fm.write('encoding', _("checking encoding (%s)...\n"), encoding.encoding) |
|
1232 | 1232 | err = None |
|
1233 | 1233 | try: |
|
1234 | 1234 | codecs.lookup(pycompat.sysstr(encoding.encoding)) |
|
1235 | 1235 | except LookupError as inst: |
|
1236 | 1236 | err = stringutil.forcebytestr(inst) |
|
1237 | 1237 | problems += 1 |
|
1238 | 1238 | fm.condwrite(err, 'encodingerror', _(" %s\n" |
|
1239 | 1239 | " (check that your locale is properly set)\n"), err) |
|
1240 | 1240 | |
|
1241 | 1241 | # Python |
|
1242 | 1242 | fm.write('pythonexe', _("checking Python executable (%s)\n"), |
|
1243 | 1243 | pycompat.sysexecutable or _("unknown")) |
|
1244 | 1244 | fm.write('pythonver', _("checking Python version (%s)\n"), |
|
1245 | 1245 | ("%d.%d.%d" % sys.version_info[:3])) |
|
1246 | 1246 | fm.write('pythonlib', _("checking Python lib (%s)...\n"), |
|
1247 | 1247 | os.path.dirname(pycompat.fsencode(os.__file__))) |
|
1248 | 1248 | |
|
1249 | 1249 | security = set(sslutil.supportedprotocols) |
|
1250 | 1250 | if sslutil.hassni: |
|
1251 | 1251 | security.add('sni') |
|
1252 | 1252 | |
|
1253 | 1253 | fm.write('pythonsecurity', _("checking Python security support (%s)\n"), |
|
1254 | 1254 | fm.formatlist(sorted(security), name='protocol', |
|
1255 | 1255 | fmt='%s', sep=',')) |
|
1256 | 1256 | |
|
1257 | 1257 | # These are warnings, not errors. So don't increment problem count. This |
|
1258 | 1258 | # may change in the future. |
|
1259 | 1259 | if 'tls1.2' not in security: |
|
1260 | 1260 | fm.plain(_(' TLS 1.2 not supported by Python install; ' |
|
1261 | 1261 | 'network connections lack modern security\n')) |
|
1262 | 1262 | if 'sni' not in security: |
|
1263 | 1263 | fm.plain(_(' SNI not supported by Python install; may have ' |
|
1264 | 1264 | 'connectivity issues with some servers\n')) |
|
1265 | 1265 | |
|
1266 | 1266 | # TODO print CA cert info |
|
1267 | 1267 | |
|
1268 | 1268 | # hg version |
|
1269 | 1269 | hgver = util.version() |
|
1270 | 1270 | fm.write('hgver', _("checking Mercurial version (%s)\n"), |
|
1271 | 1271 | hgver.split('+')[0]) |
|
1272 | 1272 | fm.write('hgverextra', _("checking Mercurial custom build (%s)\n"), |
|
1273 | 1273 | '+'.join(hgver.split('+')[1:])) |
|
1274 | 1274 | |
|
1275 | 1275 | # compiled modules |
|
1276 | 1276 | fm.write('hgmodulepolicy', _("checking module policy (%s)\n"), |
|
1277 | 1277 | policy.policy) |
|
1278 | 1278 | fm.write('hgmodules', _("checking installed modules (%s)...\n"), |
|
1279 | 1279 | os.path.dirname(pycompat.fsencode(__file__))) |
|
1280 | 1280 | |
|
1281 | 1281 | rustandc = policy.policy in ('rust+c', 'rust+c-allow') |
|
1282 | 1282 | rustext = rustandc # for now, that's the only case |
|
1283 | 1283 | cext = policy.policy in ('c', 'allow') or rustandc |
|
1284 | 1284 | nopure = cext or rustext |
|
1285 | 1285 | if nopure: |
|
1286 | 1286 | err = None |
|
1287 | 1287 | try: |
|
1288 | 1288 | if cext: |
|
1289 | 1289 | from .cext import ( |
|
1290 | 1290 | base85, |
|
1291 | 1291 | bdiff, |
|
1292 | 1292 | mpatch, |
|
1293 | 1293 | osutil, |
|
1294 | 1294 | ) |
|
1295 | 1295 | # quiet pyflakes |
|
1296 | 1296 | dir(bdiff), dir(mpatch), dir(base85), dir(osutil) |
|
1297 | 1297 | if rustext: |
|
1298 | 1298 | from .rustext import ( |
|
1299 | 1299 | ancestor, |
|
1300 | 1300 | dirstate, |
|
1301 | 1301 | ) |
|
1302 | 1302 | dir(ancestor), dir(dirstate) # quiet pyflakes |
|
1303 | 1303 | except Exception as inst: |
|
1304 | 1304 | err = stringutil.forcebytestr(inst) |
|
1305 | 1305 | problems += 1 |
|
1306 | 1306 | fm.condwrite(err, 'extensionserror', " %s\n", err) |
|
1307 | 1307 | |
|
1308 | 1308 | compengines = util.compengines._engines.values() |
|
1309 | 1309 | fm.write('compengines', _('checking registered compression engines (%s)\n'), |
|
1310 | 1310 | fm.formatlist(sorted(e.name() for e in compengines), |
|
1311 | 1311 | name='compengine', fmt='%s', sep=', ')) |
|
1312 | 1312 | fm.write('compenginesavail', _('checking available compression engines ' |
|
1313 | 1313 | '(%s)\n'), |
|
1314 | 1314 | fm.formatlist(sorted(e.name() for e in compengines |
|
1315 | 1315 | if e.available()), |
|
1316 | 1316 | name='compengine', fmt='%s', sep=', ')) |
|
1317 | 1317 | wirecompengines = compression.compengines.supportedwireengines( |
|
1318 | 1318 | compression.SERVERROLE) |
|
1319 | 1319 | fm.write('compenginesserver', _('checking available compression engines ' |
|
1320 | 1320 | 'for wire protocol (%s)\n'), |
|
1321 | 1321 | fm.formatlist([e.name() for e in wirecompengines |
|
1322 | 1322 | if e.wireprotosupport()], |
|
1323 | 1323 | name='compengine', fmt='%s', sep=', ')) |
|
1324 | 1324 | re2 = 'missing' |
|
1325 | 1325 | if util._re2: |
|
1326 | 1326 | re2 = 'available' |
|
1327 | 1327 | fm.plain(_('checking "re2" regexp engine (%s)\n') % re2) |
|
1328 | 1328 | fm.data(re2=bool(util._re2)) |
|
1329 | 1329 | |
|
1330 | 1330 | # templates |
|
1331 | 1331 | p = templater.templatepaths() |
|
1332 | 1332 | fm.write('templatedirs', 'checking templates (%s)...\n', ' '.join(p)) |
|
1333 | 1333 | fm.condwrite(not p, '', _(" no template directories found\n")) |
|
1334 | 1334 | if p: |
|
1335 | 1335 | m = templater.templatepath("map-cmdline.default") |
|
1336 | 1336 | if m: |
|
1337 | 1337 | # template found, check if it is working |
|
1338 | 1338 | err = None |
|
1339 | 1339 | try: |
|
1340 | 1340 | templater.templater.frommapfile(m) |
|
1341 | 1341 | except Exception as inst: |
|
1342 | 1342 | err = stringutil.forcebytestr(inst) |
|
1343 | 1343 | p = None |
|
1344 | 1344 | fm.condwrite(err, 'defaulttemplateerror', " %s\n", err) |
|
1345 | 1345 | else: |
|
1346 | 1346 | p = None |
|
1347 | 1347 | fm.condwrite(p, 'defaulttemplate', |
|
1348 | 1348 | _("checking default template (%s)\n"), m) |
|
1349 | 1349 | fm.condwrite(not m, 'defaulttemplatenotfound', |
|
1350 | 1350 | _(" template '%s' not found\n"), "default") |
|
1351 | 1351 | if not p: |
|
1352 | 1352 | problems += 1 |
|
1353 | 1353 | fm.condwrite(not p, '', |
|
1354 | 1354 | _(" (templates seem to have been installed incorrectly)\n")) |
|
1355 | 1355 | |
|
1356 | 1356 | # editor |
|
1357 | 1357 | editor = ui.geteditor() |
|
1358 | 1358 | editor = util.expandpath(editor) |
|
1359 | 1359 | editorbin = procutil.shellsplit(editor)[0] |
|
1360 | 1360 | fm.write('editor', _("checking commit editor... (%s)\n"), editorbin) |
|
1361 | 1361 | cmdpath = procutil.findexe(editorbin) |
|
1362 | 1362 | fm.condwrite(not cmdpath and editor == 'vi', 'vinotfound', |
|
1363 | 1363 | _(" No commit editor set and can't find %s in PATH\n" |
|
1364 | 1364 | " (specify a commit editor in your configuration" |
|
1365 | 1365 | " file)\n"), not cmdpath and editor == 'vi' and editorbin) |
|
1366 | 1366 | fm.condwrite(not cmdpath and editor != 'vi', 'editornotfound', |
|
1367 | 1367 | _(" Can't find editor '%s' in PATH\n" |
|
1368 | 1368 | " (specify a commit editor in your configuration" |
|
1369 | 1369 | " file)\n"), not cmdpath and editorbin) |
|
1370 | 1370 | if not cmdpath and editor != 'vi': |
|
1371 | 1371 | problems += 1 |
|
1372 | 1372 | |
|
1373 | 1373 | # check username |
|
1374 | 1374 | username = None |
|
1375 | 1375 | err = None |
|
1376 | 1376 | try: |
|
1377 | 1377 | username = ui.username() |
|
1378 | 1378 | except error.Abort as e: |
|
1379 | 1379 | err = stringutil.forcebytestr(e) |
|
1380 | 1380 | problems += 1 |
|
1381 | 1381 | |
|
1382 | 1382 | fm.condwrite(username, 'username', _("checking username (%s)\n"), username) |
|
1383 | 1383 | fm.condwrite(err, 'usernameerror', _("checking username...\n %s\n" |
|
1384 | 1384 | " (specify a username in your configuration file)\n"), err) |
|
1385 | 1385 | |
|
1386 | 1386 | for name, mod in extensions.extensions(): |
|
1387 | 1387 | handler = getattr(mod, 'debuginstall', None) |
|
1388 | 1388 | if handler is not None: |
|
1389 | 1389 | problems += handler(ui, fm) |
|
1390 | 1390 | |
|
1391 | 1391 | fm.condwrite(not problems, '', |
|
1392 | 1392 | _("no problems detected\n")) |
|
1393 | 1393 | if not problems: |
|
1394 | 1394 | fm.data(problems=problems) |
|
1395 | 1395 | fm.condwrite(problems, 'problems', |
|
1396 | 1396 | _("%d problems detected," |
|
1397 | 1397 | " please check your install!\n"), problems) |
|
1398 | 1398 | fm.end() |
|
1399 | 1399 | |
|
1400 | 1400 | return problems |
|
1401 | 1401 | |
|
1402 | 1402 | @command('debugknown', [], _('REPO ID...'), norepo=True) |
|
1403 | 1403 | def debugknown(ui, repopath, *ids, **opts): |
|
1404 | 1404 | """test whether node ids are known to a repo |
|
1405 | 1405 | |
|
1406 | 1406 | Every ID must be a full-length hex node id string. Returns a list of 0s |
|
1407 | 1407 | and 1s indicating unknown/known. |
|
1408 | 1408 | """ |
|
1409 | 1409 | opts = pycompat.byteskwargs(opts) |
|
1410 | 1410 | repo = hg.peer(ui, opts, repopath) |
|
1411 | 1411 | if not repo.capable('known'): |
|
1412 | 1412 | raise error.Abort("known() not supported by target repository") |
|
1413 | 1413 | flags = repo.known([bin(s) for s in ids]) |
|
1414 | 1414 | ui.write("%s\n" % ("".join([f and "1" or "0" for f in flags]))) |
|
1415 | 1415 | |
|
1416 | 1416 | @command('debuglabelcomplete', [], _('LABEL...')) |
|
1417 | 1417 | def debuglabelcomplete(ui, repo, *args): |
|
1418 | 1418 | '''backwards compatibility with old bash completion scripts (DEPRECATED)''' |
|
1419 | 1419 | debugnamecomplete(ui, repo, *args) |
|
1420 | 1420 | |
|
1421 | 1421 | @command('debuglocks', |
|
1422 | 1422 | [('L', 'force-lock', None, _('free the store lock (DANGEROUS)')), |
|
1423 | 1423 | ('W', 'force-wlock', None, |
|
1424 | 1424 | _('free the working state lock (DANGEROUS)')), |
|
1425 | 1425 | ('s', 'set-lock', None, _('set the store lock until stopped')), |
|
1426 | 1426 | ('S', 'set-wlock', None, |
|
1427 | 1427 | _('set the working state lock until stopped'))], |
|
1428 | 1428 | _('[OPTION]...')) |
|
1429 | 1429 | def debuglocks(ui, repo, **opts): |
|
1430 | 1430 | """show or modify state of locks |
|
1431 | 1431 | |
|
1432 | 1432 | By default, this command will show which locks are held. This |
|
1433 | 1433 | includes the user and process holding the lock, the amount of time |
|
1434 | 1434 | the lock has been held, and the machine name where the process is |
|
1435 | 1435 | running if it's not local. |
|
1436 | 1436 | |
|
1437 | 1437 | Locks protect the integrity of Mercurial's data, so should be |
|
1438 | 1438 | treated with care. System crashes or other interruptions may cause |
|
1439 | 1439 | locks to not be properly released, though Mercurial will usually |
|
1440 | 1440 | detect and remove such stale locks automatically. |
|
1441 | 1441 | |
|
1442 | 1442 | However, detecting stale locks may not always be possible (for |
|
1443 | 1443 | instance, on a shared filesystem). Removing locks may also be |
|
1444 | 1444 | blocked by filesystem permissions. |
|
1445 | 1445 | |
|
1446 | 1446 | Setting a lock will prevent other commands from changing the data. |
|
1447 | 1447 | The command will wait until an interruption (SIGINT, SIGTERM, ...) occurs. |
|
1448 | 1448 | The set locks are removed when the command exits. |
|
1449 | 1449 | |
|
1450 | 1450 | Returns 0 if no locks are held. |
|
1451 | 1451 | |
|
1452 | 1452 | """ |
|
1453 | 1453 | |
|
1454 | 1454 | if opts.get(r'force_lock'): |
|
1455 | 1455 | repo.svfs.unlink('lock') |
|
1456 | 1456 | if opts.get(r'force_wlock'): |
|
1457 | 1457 | repo.vfs.unlink('wlock') |
|
1458 | 1458 | if opts.get(r'force_lock') or opts.get(r'force_wlock'): |
|
1459 | 1459 | return 0 |
|
1460 | 1460 | |
|
1461 | 1461 | locks = [] |
|
1462 | 1462 | try: |
|
1463 | 1463 | if opts.get(r'set_wlock'): |
|
1464 | 1464 | try: |
|
1465 | 1465 | locks.append(repo.wlock(False)) |
|
1466 | 1466 | except error.LockHeld: |
|
1467 | 1467 | raise error.Abort(_('wlock is already held')) |
|
1468 | 1468 | if opts.get(r'set_lock'): |
|
1469 | 1469 | try: |
|
1470 | 1470 | locks.append(repo.lock(False)) |
|
1471 | 1471 | except error.LockHeld: |
|
1472 | 1472 | raise error.Abort(_('lock is already held')) |
|
1473 | 1473 | if len(locks): |
|
1474 | 1474 | ui.promptchoice(_("ready to release the lock (y)? $$ &Yes")) |
|
1475 | 1475 | return 0 |
|
1476 | 1476 | finally: |
|
1477 | 1477 | release(*locks) |
|
1478 | 1478 | |
|
1479 | 1479 | now = time.time() |
|
1480 | 1480 | held = 0 |
|
1481 | 1481 | |
|
1482 | 1482 | def report(vfs, name, method): |
|
1483 | 1483 | # this causes stale locks to get reaped for more accurate reporting |
|
1484 | 1484 | try: |
|
1485 | 1485 | l = method(False) |
|
1486 | 1486 | except error.LockHeld: |
|
1487 | 1487 | l = None |
|
1488 | 1488 | |
|
1489 | 1489 | if l: |
|
1490 | 1490 | l.release() |
|
1491 | 1491 | else: |
|
1492 | 1492 | try: |
|
1493 | 1493 | st = vfs.lstat(name) |
|
1494 | 1494 | age = now - st[stat.ST_MTIME] |
|
1495 | 1495 | user = util.username(st.st_uid) |
|
1496 | 1496 | locker = vfs.readlock(name) |
|
1497 | 1497 | if ":" in locker: |
|
1498 | 1498 | host, pid = locker.split(':') |
|
1499 | 1499 | if host == socket.gethostname(): |
|
1500 | 1500 | locker = 'user %s, process %s' % (user or b'None', pid) |
|
1501 | 1501 | else: |
|
1502 | 1502 | locker = ('user %s, process %s, host %s' |
|
1503 | 1503 | % (user or b'None', pid, host)) |
|
1504 | 1504 | ui.write(("%-6s %s (%ds)\n") % (name + ":", locker, age)) |
|
1505 | 1505 | return 1 |
|
1506 | 1506 | except OSError as e: |
|
1507 | 1507 | if e.errno != errno.ENOENT: |
|
1508 | 1508 | raise |
|
1509 | 1509 | |
|
1510 | 1510 | ui.write(("%-6s free\n") % (name + ":")) |
|
1511 | 1511 | return 0 |
|
1512 | 1512 | |
|
1513 | 1513 | held += report(repo.svfs, "lock", repo.lock) |
|
1514 | 1514 | held += report(repo.vfs, "wlock", repo.wlock) |
|
1515 | 1515 | |
|
1516 | 1516 | return held |
|
1517 | 1517 | |
|
1518 | 1518 | @command('debugmanifestfulltextcache', [ |
|
1519 | 1519 | ('', 'clear', False, _('clear the cache')), |
|
1520 | 1520 | ('a', 'add', [], _('add the given manifest nodes to the cache'), |
|
1521 | 1521 | _('NODE')) |
|
1522 | 1522 | ], '') |
|
1523 | 1523 | def debugmanifestfulltextcache(ui, repo, add=(), **opts): |
|
1524 | 1524 | """show, clear or amend the contents of the manifest fulltext cache""" |
|
1525 | 1525 | |
|
1526 | 1526 | def getcache(): |
|
1527 | 1527 | r = repo.manifestlog.getstorage(b'') |
|
1528 | 1528 | try: |
|
1529 | 1529 | return r._fulltextcache |
|
1530 | 1530 | except AttributeError: |
|
1531 | 1531 | msg = _("Current revlog implementation doesn't appear to have a " |
|
1532 | 1532 | "manifest fulltext cache\n") |
|
1533 | 1533 | raise error.Abort(msg) |
|
1534 | 1534 | |
|
1535 | 1535 | if opts.get(r'clear'): |
|
1536 | 1536 | with repo.wlock(): |
|
1537 | 1537 | cache = getcache() |
|
1538 | 1538 | cache.clear(clear_persisted_data=True) |
|
1539 | 1539 | return |
|
1540 | 1540 | |
|
1541 | 1541 | if add: |
|
1542 | 1542 | with repo.wlock(): |
|
1543 | 1543 | m = repo.manifestlog |
|
1544 | 1544 | store = m.getstorage(b'') |
|
1545 | 1545 | for n in add: |
|
1546 | 1546 | try: |
|
1547 | 1547 | manifest = m[store.lookup(n)] |
|
1548 | 1548 | except error.LookupError as e: |
|
1549 | 1549 | raise error.Abort(e, hint="Check your manifest node id") |
|
1550 | 1550 | manifest.read() # stores revisision in cache too |
|
1551 | 1551 | return |
|
1552 | 1552 | |
|
1553 | 1553 | cache = getcache() |
|
1554 | 1554 | if not len(cache): |
|
1555 | 1555 | ui.write(_('cache empty\n')) |
|
1556 | 1556 | else: |
|
1557 | 1557 | ui.write( |
|
1558 | 1558 | _('cache contains %d manifest entries, in order of most to ' |
|
1559 | 1559 | 'least recent:\n') % (len(cache),)) |
|
1560 | 1560 | totalsize = 0 |
|
1561 | 1561 | for nodeid in cache: |
|
1562 | 1562 | # Use cache.get to not update the LRU order |
|
1563 | 1563 | data = cache.peek(nodeid) |
|
1564 | 1564 | size = len(data) |
|
1565 | 1565 | totalsize += size + 24 # 20 bytes nodeid, 4 bytes size |
|
1566 | 1566 | ui.write(_('id: %s, size %s\n') % ( |
|
1567 | 1567 | hex(nodeid), util.bytecount(size))) |
|
1568 | 1568 | ondisk = cache._opener.stat('manifestfulltextcache').st_size |
|
1569 | 1569 | ui.write( |
|
1570 | 1570 | _('total cache data size %s, on-disk %s\n') % ( |
|
1571 | 1571 | util.bytecount(totalsize), util.bytecount(ondisk)) |
|
1572 | 1572 | ) |
|
1573 | 1573 | |
|
1574 | 1574 | @command('debugmergestate', [], '') |
|
1575 | 1575 | def debugmergestate(ui, repo, *args): |
|
1576 | 1576 | """print merge state |
|
1577 | 1577 | |
|
1578 | 1578 | Use --verbose to print out information about whether v1 or v2 merge state |
|
1579 | 1579 | was chosen.""" |
|
1580 | 1580 | def _hashornull(h): |
|
1581 | 1581 | if h == nullhex: |
|
1582 | 1582 | return 'null' |
|
1583 | 1583 | else: |
|
1584 | 1584 | return h |
|
1585 | 1585 | |
|
1586 | 1586 | def printrecords(version): |
|
1587 | 1587 | ui.write(('* version %d records\n') % version) |
|
1588 | 1588 | if version == 1: |
|
1589 | 1589 | records = v1records |
|
1590 | 1590 | else: |
|
1591 | 1591 | records = v2records |
|
1592 | 1592 | |
|
1593 | 1593 | for rtype, record in records: |
|
1594 | 1594 | # pretty print some record types |
|
1595 | 1595 | if rtype == 'L': |
|
1596 | 1596 | ui.write(('local: %s\n') % record) |
|
1597 | 1597 | elif rtype == 'O': |
|
1598 | 1598 | ui.write(('other: %s\n') % record) |
|
1599 | 1599 | elif rtype == 'm': |
|
1600 | 1600 | driver, mdstate = record.split('\0', 1) |
|
1601 | 1601 | ui.write(('merge driver: %s (state "%s")\n') |
|
1602 | 1602 | % (driver, mdstate)) |
|
1603 | 1603 | elif rtype in 'FDC': |
|
1604 | 1604 | r = record.split('\0') |
|
1605 | 1605 | f, state, hash, lfile, afile, anode, ofile = r[0:7] |
|
1606 | 1606 | if version == 1: |
|
1607 | 1607 | onode = 'not stored in v1 format' |
|
1608 | 1608 | flags = r[7] |
|
1609 | 1609 | else: |
|
1610 | 1610 | onode, flags = r[7:9] |
|
1611 | 1611 | ui.write(('file: %s (record type "%s", state "%s", hash %s)\n') |
|
1612 | 1612 | % (f, rtype, state, _hashornull(hash))) |
|
1613 | 1613 | ui.write((' local path: %s (flags "%s")\n') % (lfile, flags)) |
|
1614 | 1614 | ui.write((' ancestor path: %s (node %s)\n') |
|
1615 | 1615 | % (afile, _hashornull(anode))) |
|
1616 | 1616 | ui.write((' other path: %s (node %s)\n') |
|
1617 | 1617 | % (ofile, _hashornull(onode))) |
|
1618 | 1618 | elif rtype == 'f': |
|
1619 | 1619 | filename, rawextras = record.split('\0', 1) |
|
1620 | 1620 | extras = rawextras.split('\0') |
|
1621 | 1621 | i = 0 |
|
1622 | 1622 | extrastrings = [] |
|
1623 | 1623 | while i < len(extras): |
|
1624 | 1624 | extrastrings.append('%s = %s' % (extras[i], extras[i + 1])) |
|
1625 | 1625 | i += 2 |
|
1626 | 1626 | |
|
1627 | 1627 | ui.write(('file extras: %s (%s)\n') |
|
1628 | 1628 | % (filename, ', '.join(extrastrings))) |
|
1629 | 1629 | elif rtype == 'l': |
|
1630 | 1630 | labels = record.split('\0', 2) |
|
1631 | 1631 | labels = [l for l in labels if len(l) > 0] |
|
1632 | 1632 | ui.write(('labels:\n')) |
|
1633 | 1633 | ui.write((' local: %s\n' % labels[0])) |
|
1634 | 1634 | ui.write((' other: %s\n' % labels[1])) |
|
1635 | 1635 | if len(labels) > 2: |
|
1636 | 1636 | ui.write((' base: %s\n' % labels[2])) |
|
1637 | 1637 | else: |
|
1638 | 1638 | ui.write(('unrecognized entry: %s\t%s\n') |
|
1639 | 1639 | % (rtype, record.replace('\0', '\t'))) |
|
1640 | 1640 | |
|
1641 | 1641 | # Avoid mergestate.read() since it may raise an exception for unsupported |
|
1642 | 1642 | # merge state records. We shouldn't be doing this, but this is OK since this |
|
1643 | 1643 | # command is pretty low-level. |
|
1644 | 1644 | ms = mergemod.mergestate(repo) |
|
1645 | 1645 | |
|
1646 | 1646 | # sort so that reasonable information is on top |
|
1647 | 1647 | v1records = ms._readrecordsv1() |
|
1648 | 1648 | v2records = ms._readrecordsv2() |
|
1649 | 1649 | order = 'LOml' |
|
1650 | 1650 | def key(r): |
|
1651 | 1651 | idx = order.find(r[0]) |
|
1652 | 1652 | if idx == -1: |
|
1653 | 1653 | return (1, r[1]) |
|
1654 | 1654 | else: |
|
1655 | 1655 | return (0, idx) |
|
1656 | 1656 | v1records.sort(key=key) |
|
1657 | 1657 | v2records.sort(key=key) |
|
1658 | 1658 | |
|
1659 | 1659 | if not v1records and not v2records: |
|
1660 | 1660 | ui.write(('no merge state found\n')) |
|
1661 | 1661 | elif not v2records: |
|
1662 | 1662 | ui.note(('no version 2 merge state\n')) |
|
1663 | 1663 | printrecords(1) |
|
1664 | 1664 | elif ms._v1v2match(v1records, v2records): |
|
1665 | 1665 | ui.note(('v1 and v2 states match: using v2\n')) |
|
1666 | 1666 | printrecords(2) |
|
1667 | 1667 | else: |
|
1668 | 1668 | ui.note(('v1 and v2 states mismatch: using v1\n')) |
|
1669 | 1669 | printrecords(1) |
|
1670 | 1670 | if ui.verbose: |
|
1671 | 1671 | printrecords(2) |
|
1672 | 1672 | |
|
1673 | 1673 | @command('debugnamecomplete', [], _('NAME...')) |
|
1674 | 1674 | def debugnamecomplete(ui, repo, *args): |
|
1675 | 1675 | '''complete "names" - tags, open branch names, bookmark names''' |
|
1676 | 1676 | |
|
1677 | 1677 | names = set() |
|
1678 | 1678 | # since we previously only listed open branches, we will handle that |
|
1679 | 1679 | # specially (after this for loop) |
|
1680 | 1680 | for name, ns in repo.names.iteritems(): |
|
1681 | 1681 | if name != 'branches': |
|
1682 | 1682 | names.update(ns.listnames(repo)) |
|
1683 | 1683 | names.update(tag for (tag, heads, tip, closed) |
|
1684 | 1684 | in repo.branchmap().iterbranches() if not closed) |
|
1685 | 1685 | completions = set() |
|
1686 | 1686 | if not args: |
|
1687 | 1687 | args = [''] |
|
1688 | 1688 | for a in args: |
|
1689 | 1689 | completions.update(n for n in names if n.startswith(a)) |
|
1690 | 1690 | ui.write('\n'.join(sorted(completions))) |
|
1691 | 1691 | ui.write('\n') |
|
1692 | 1692 | |
|
1693 | 1693 | @command('debugobsolete', |
|
1694 | 1694 | [('', 'flags', 0, _('markers flag')), |
|
1695 | 1695 | ('', 'record-parents', False, |
|
1696 | 1696 | _('record parent information for the precursor')), |
|
1697 | 1697 | ('r', 'rev', [], _('display markers relevant to REV')), |
|
1698 | 1698 | ('', 'exclusive', False, _('restrict display to markers only ' |
|
1699 | 1699 | 'relevant to REV')), |
|
1700 | 1700 | ('', 'index', False, _('display index of the marker')), |
|
1701 | 1701 | ('', 'delete', [], _('delete markers specified by indices')), |
|
1702 | 1702 | ] + cmdutil.commitopts2 + cmdutil.formatteropts, |
|
1703 | 1703 | _('[OBSOLETED [REPLACEMENT ...]]')) |
|
1704 | 1704 | def debugobsolete(ui, repo, precursor=None, *successors, **opts): |
|
1705 | 1705 | """create arbitrary obsolete marker |
|
1706 | 1706 | |
|
1707 | 1707 | With no arguments, displays the list of obsolescence markers.""" |
|
1708 | 1708 | |
|
1709 | 1709 | opts = pycompat.byteskwargs(opts) |
|
1710 | 1710 | |
|
1711 | 1711 | def parsenodeid(s): |
|
1712 | 1712 | try: |
|
1713 | 1713 | # We do not use revsingle/revrange functions here to accept |
|
1714 | 1714 | # arbitrary node identifiers, possibly not present in the |
|
1715 | 1715 | # local repository. |
|
1716 | 1716 | n = bin(s) |
|
1717 | 1717 | if len(n) != len(nullid): |
|
1718 | 1718 | raise TypeError() |
|
1719 | 1719 | return n |
|
1720 | 1720 | except TypeError: |
|
1721 | 1721 | raise error.Abort('changeset references must be full hexadecimal ' |
|
1722 | 1722 | 'node identifiers') |
|
1723 | 1723 | |
|
1724 | 1724 | if opts.get('delete'): |
|
1725 | 1725 | indices = [] |
|
1726 | 1726 | for v in opts.get('delete'): |
|
1727 | 1727 | try: |
|
1728 | 1728 | indices.append(int(v)) |
|
1729 | 1729 | except ValueError: |
|
1730 | 1730 | raise error.Abort(_('invalid index value: %r') % v, |
|
1731 | 1731 | hint=_('use integers for indices')) |
|
1732 | 1732 | |
|
1733 | 1733 | if repo.currenttransaction(): |
|
1734 | 1734 | raise error.Abort(_('cannot delete obsmarkers in the middle ' |
|
1735 | 1735 | 'of transaction.')) |
|
1736 | 1736 | |
|
1737 | 1737 | with repo.lock(): |
|
1738 | 1738 | n = repair.deleteobsmarkers(repo.obsstore, indices) |
|
1739 | 1739 | ui.write(_('deleted %i obsolescence markers\n') % n) |
|
1740 | 1740 | |
|
1741 | 1741 | return |
|
1742 | 1742 | |
|
1743 | 1743 | if precursor is not None: |
|
1744 | 1744 | if opts['rev']: |
|
1745 | 1745 | raise error.Abort('cannot select revision when creating marker') |
|
1746 | 1746 | metadata = {} |
|
1747 | 1747 | metadata['user'] = encoding.fromlocal(opts['user'] or ui.username()) |
|
1748 | 1748 | succs = tuple(parsenodeid(succ) for succ in successors) |
|
1749 | 1749 | l = repo.lock() |
|
1750 | 1750 | try: |
|
1751 | 1751 | tr = repo.transaction('debugobsolete') |
|
1752 | 1752 | try: |
|
1753 | 1753 | date = opts.get('date') |
|
1754 | 1754 | if date: |
|
1755 | 1755 | date = dateutil.parsedate(date) |
|
1756 | 1756 | else: |
|
1757 | 1757 | date = None |
|
1758 | 1758 | prec = parsenodeid(precursor) |
|
1759 | 1759 | parents = None |
|
1760 | 1760 | if opts['record_parents']: |
|
1761 | 1761 | if prec not in repo.unfiltered(): |
|
1762 | 1762 | raise error.Abort('cannot used --record-parents on ' |
|
1763 | 1763 | 'unknown changesets') |
|
1764 | 1764 | parents = repo.unfiltered()[prec].parents() |
|
1765 | 1765 | parents = tuple(p.node() for p in parents) |
|
1766 | 1766 | repo.obsstore.create(tr, prec, succs, opts['flags'], |
|
1767 | 1767 | parents=parents, date=date, |
|
1768 | 1768 | metadata=metadata, ui=ui) |
|
1769 | 1769 | tr.close() |
|
1770 | 1770 | except ValueError as exc: |
|
1771 | 1771 | raise error.Abort(_('bad obsmarker input: %s') % |
|
1772 | 1772 | pycompat.bytestr(exc)) |
|
1773 | 1773 | finally: |
|
1774 | 1774 | tr.release() |
|
1775 | 1775 | finally: |
|
1776 | 1776 | l.release() |
|
1777 | 1777 | else: |
|
1778 | 1778 | if opts['rev']: |
|
1779 | 1779 | revs = scmutil.revrange(repo, opts['rev']) |
|
1780 | 1780 | nodes = [repo[r].node() for r in revs] |
|
1781 | 1781 | markers = list(obsutil.getmarkers(repo, nodes=nodes, |
|
1782 | 1782 | exclusive=opts['exclusive'])) |
|
1783 | 1783 | markers.sort(key=lambda x: x._data) |
|
1784 | 1784 | else: |
|
1785 | 1785 | markers = obsutil.getmarkers(repo) |
|
1786 | 1786 | |
|
1787 | 1787 | markerstoiter = markers |
|
1788 | 1788 | isrelevant = lambda m: True |
|
1789 | 1789 | if opts.get('rev') and opts.get('index'): |
|
1790 | 1790 | markerstoiter = obsutil.getmarkers(repo) |
|
1791 | 1791 | markerset = set(markers) |
|
1792 | 1792 | isrelevant = lambda m: m in markerset |
|
1793 | 1793 | |
|
1794 | 1794 | fm = ui.formatter('debugobsolete', opts) |
|
1795 | 1795 | for i, m in enumerate(markerstoiter): |
|
1796 | 1796 | if not isrelevant(m): |
|
1797 | 1797 | # marker can be irrelevant when we're iterating over a set |
|
1798 | 1798 | # of markers (markerstoiter) which is bigger than the set |
|
1799 | 1799 | # of markers we want to display (markers) |
|
1800 | 1800 | # this can happen if both --index and --rev options are |
|
1801 | 1801 | # provided and thus we need to iterate over all of the markers |
|
1802 | 1802 | # to get the correct indices, but only display the ones that |
|
1803 | 1803 | # are relevant to --rev value |
|
1804 | 1804 | continue |
|
1805 | 1805 | fm.startitem() |
|
1806 | 1806 | ind = i if opts.get('index') else None |
|
1807 | 1807 | cmdutil.showmarker(fm, m, index=ind) |
|
1808 | 1808 | fm.end() |
|
1809 | 1809 | |
|
1810 | 1810 | @command('debugp1copies', |
|
1811 | 1811 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
1812 | 1812 | _('[-r REV]')) |
|
1813 | 1813 | def debugp1copies(ui, repo, **opts): |
|
1814 | 1814 | """dump copy information compared to p1""" |
|
1815 | 1815 | |
|
1816 | 1816 | opts = pycompat.byteskwargs(opts) |
|
1817 | 1817 | ctx = scmutil.revsingle(repo, opts.get('rev'), default=None) |
|
1818 | 1818 | for dst, src in ctx.p1copies().items(): |
|
1819 | 1819 | ui.write('%s -> %s\n' % (src, dst)) |
|
1820 | 1820 | |
|
1821 | 1821 | @command('debugp2copies', |
|
1822 | 1822 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
1823 | 1823 | _('[-r REV]')) |
|
1824 | 1824 | def debugp1copies(ui, repo, **opts): |
|
1825 | 1825 | """dump copy information compared to p2""" |
|
1826 | 1826 | |
|
1827 | 1827 | opts = pycompat.byteskwargs(opts) |
|
1828 | 1828 | ctx = scmutil.revsingle(repo, opts.get('rev'), default=None) |
|
1829 | 1829 | for dst, src in ctx.p2copies().items(): |
|
1830 | 1830 | ui.write('%s -> %s\n' % (src, dst)) |
|
1831 | 1831 | |
|
1832 | 1832 | @command('debugpathcomplete', |
|
1833 | 1833 | [('f', 'full', None, _('complete an entire path')), |
|
1834 | 1834 | ('n', 'normal', None, _('show only normal files')), |
|
1835 | 1835 | ('a', 'added', None, _('show only added files')), |
|
1836 | 1836 | ('r', 'removed', None, _('show only removed files'))], |
|
1837 | 1837 | _('FILESPEC...')) |
|
1838 | 1838 | def debugpathcomplete(ui, repo, *specs, **opts): |
|
1839 | 1839 | '''complete part or all of a tracked path |
|
1840 | 1840 | |
|
1841 | 1841 | This command supports shells that offer path name completion. It |
|
1842 | 1842 | currently completes only files already known to the dirstate. |
|
1843 | 1843 | |
|
1844 | 1844 | Completion extends only to the next path segment unless |
|
1845 | 1845 | --full is specified, in which case entire paths are used.''' |
|
1846 | 1846 | |
|
1847 | 1847 | def complete(path, acceptable): |
|
1848 | 1848 | dirstate = repo.dirstate |
|
1849 | 1849 | spec = os.path.normpath(os.path.join(encoding.getcwd(), path)) |
|
1850 | 1850 | rootdir = repo.root + pycompat.ossep |
|
1851 | 1851 | if spec != repo.root and not spec.startswith(rootdir): |
|
1852 | 1852 | return [], [] |
|
1853 | 1853 | if os.path.isdir(spec): |
|
1854 | 1854 | spec += '/' |
|
1855 | 1855 | spec = spec[len(rootdir):] |
|
1856 | 1856 | fixpaths = pycompat.ossep != '/' |
|
1857 | 1857 | if fixpaths: |
|
1858 | 1858 | spec = spec.replace(pycompat.ossep, '/') |
|
1859 | 1859 | speclen = len(spec) |
|
1860 | 1860 | fullpaths = opts[r'full'] |
|
1861 | 1861 | files, dirs = set(), set() |
|
1862 | 1862 | adddir, addfile = dirs.add, files.add |
|
1863 | 1863 | for f, st in dirstate.iteritems(): |
|
1864 | 1864 | if f.startswith(spec) and st[0] in acceptable: |
|
1865 | 1865 | if fixpaths: |
|
1866 | 1866 | f = f.replace('/', pycompat.ossep) |
|
1867 | 1867 | if fullpaths: |
|
1868 | 1868 | addfile(f) |
|
1869 | 1869 | continue |
|
1870 | 1870 | s = f.find(pycompat.ossep, speclen) |
|
1871 | 1871 | if s >= 0: |
|
1872 | 1872 | adddir(f[:s]) |
|
1873 | 1873 | else: |
|
1874 | 1874 | addfile(f) |
|
1875 | 1875 | return files, dirs |
|
1876 | 1876 | |
|
1877 | 1877 | acceptable = '' |
|
1878 | 1878 | if opts[r'normal']: |
|
1879 | 1879 | acceptable += 'nm' |
|
1880 | 1880 | if opts[r'added']: |
|
1881 | 1881 | acceptable += 'a' |
|
1882 | 1882 | if opts[r'removed']: |
|
1883 | 1883 | acceptable += 'r' |
|
1884 | 1884 | cwd = repo.getcwd() |
|
1885 | 1885 | if not specs: |
|
1886 | 1886 | specs = ['.'] |
|
1887 | 1887 | |
|
1888 | 1888 | files, dirs = set(), set() |
|
1889 | 1889 | for spec in specs: |
|
1890 | 1890 | f, d = complete(spec, acceptable or 'nmar') |
|
1891 | 1891 | files.update(f) |
|
1892 | 1892 | dirs.update(d) |
|
1893 | 1893 | files.update(dirs) |
|
1894 | 1894 | ui.write('\n'.join(repo.pathto(p, cwd) for p in sorted(files))) |
|
1895 | 1895 | ui.write('\n') |
|
1896 | 1896 | |
|
1897 | 1897 | @command('debugpathcopies', |
|
1898 | 1898 | cmdutil.walkopts, |
|
1899 | 1899 | 'hg debugpathcopies REV1 REV2 [FILE]', |
|
1900 | 1900 | inferrepo=True) |
|
1901 | 1901 | def debugpathcopies(ui, repo, rev1, rev2, *pats, **opts): |
|
1902 | 1902 | """show copies between two revisions""" |
|
1903 | 1903 | ctx1 = scmutil.revsingle(repo, rev1) |
|
1904 | 1904 | ctx2 = scmutil.revsingle(repo, rev2) |
|
1905 | 1905 | m = scmutil.match(ctx1, pats, opts) |
|
1906 | 1906 | for dst, src in sorted(copies.pathcopies(ctx1, ctx2, m).items()): |
|
1907 | 1907 | ui.write('%s -> %s\n' % (src, dst)) |
|
1908 | 1908 | |
|
1909 | 1909 | @command('debugpeer', [], _('PATH'), norepo=True) |
|
1910 | 1910 | def debugpeer(ui, path): |
|
1911 | 1911 | """establish a connection to a peer repository""" |
|
1912 | 1912 | # Always enable peer request logging. Requires --debug to display |
|
1913 | 1913 | # though. |
|
1914 | 1914 | overrides = { |
|
1915 | 1915 | ('devel', 'debug.peer-request'): True, |
|
1916 | 1916 | } |
|
1917 | 1917 | |
|
1918 | 1918 | with ui.configoverride(overrides): |
|
1919 | 1919 | peer = hg.peer(ui, {}, path) |
|
1920 | 1920 | |
|
1921 | 1921 | local = peer.local() is not None |
|
1922 | 1922 | canpush = peer.canpush() |
|
1923 | 1923 | |
|
1924 | 1924 | ui.write(_('url: %s\n') % peer.url()) |
|
1925 | 1925 | ui.write(_('local: %s\n') % (_('yes') if local else _('no'))) |
|
1926 | 1926 | ui.write(_('pushable: %s\n') % (_('yes') if canpush else _('no'))) |
|
1927 | 1927 | |
|
1928 | 1928 | @command('debugpickmergetool', |
|
1929 | 1929 | [('r', 'rev', '', _('check for files in this revision'), _('REV')), |
|
1930 | 1930 | ('', 'changedelete', None, _('emulate merging change and delete')), |
|
1931 | 1931 | ] + cmdutil.walkopts + cmdutil.mergetoolopts, |
|
1932 | 1932 | _('[PATTERN]...'), |
|
1933 | 1933 | inferrepo=True) |
|
1934 | 1934 | def debugpickmergetool(ui, repo, *pats, **opts): |
|
1935 | 1935 | """examine which merge tool is chosen for specified file |
|
1936 | 1936 | |
|
1937 | 1937 | As described in :hg:`help merge-tools`, Mercurial examines |
|
1938 | 1938 | configurations below in this order to decide which merge tool is |
|
1939 | 1939 | chosen for specified file. |
|
1940 | 1940 | |
|
1941 | 1941 | 1. ``--tool`` option |
|
1942 | 1942 | 2. ``HGMERGE`` environment variable |
|
1943 | 1943 | 3. configurations in ``merge-patterns`` section |
|
1944 | 1944 | 4. configuration of ``ui.merge`` |
|
1945 | 1945 | 5. configurations in ``merge-tools`` section |
|
1946 | 1946 | 6. ``hgmerge`` tool (for historical reason only) |
|
1947 | 1947 | 7. default tool for fallback (``:merge`` or ``:prompt``) |
|
1948 | 1948 | |
|
1949 | 1949 | This command writes out examination result in the style below:: |
|
1950 | 1950 | |
|
1951 | 1951 | FILE = MERGETOOL |
|
1952 | 1952 | |
|
1953 | 1953 | By default, all files known in the first parent context of the |
|
1954 | 1954 | working directory are examined. Use file patterns and/or -I/-X |
|
1955 | 1955 | options to limit target files. -r/--rev is also useful to examine |
|
1956 | 1956 | files in another context without actual updating to it. |
|
1957 | 1957 | |
|
1958 | 1958 | With --debug, this command shows warning messages while matching |
|
1959 | 1959 | against ``merge-patterns`` and so on, too. It is recommended to |
|
1960 | 1960 | use this option with explicit file patterns and/or -I/-X options, |
|
1961 | 1961 | because this option increases amount of output per file according |
|
1962 | 1962 | to configurations in hgrc. |
|
1963 | 1963 | |
|
1964 | 1964 | With -v/--verbose, this command shows configurations below at |
|
1965 | 1965 | first (only if specified). |
|
1966 | 1966 | |
|
1967 | 1967 | - ``--tool`` option |
|
1968 | 1968 | - ``HGMERGE`` environment variable |
|
1969 | 1969 | - configuration of ``ui.merge`` |
|
1970 | 1970 | |
|
1971 | 1971 | If merge tool is chosen before matching against |
|
1972 | 1972 | ``merge-patterns``, this command can't show any helpful |
|
1973 | 1973 | information, even with --debug. In such case, information above is |
|
1974 | 1974 | useful to know why a merge tool is chosen. |
|
1975 | 1975 | """ |
|
1976 | 1976 | opts = pycompat.byteskwargs(opts) |
|
1977 | 1977 | overrides = {} |
|
1978 | 1978 | if opts['tool']: |
|
1979 | 1979 | overrides[('ui', 'forcemerge')] = opts['tool'] |
|
1980 | 1980 | ui.note(('with --tool %r\n') % (pycompat.bytestr(opts['tool']))) |
|
1981 | 1981 | |
|
1982 | 1982 | with ui.configoverride(overrides, 'debugmergepatterns'): |
|
1983 | 1983 | hgmerge = encoding.environ.get("HGMERGE") |
|
1984 | 1984 | if hgmerge is not None: |
|
1985 | 1985 | ui.note(('with HGMERGE=%r\n') % (pycompat.bytestr(hgmerge))) |
|
1986 | 1986 | uimerge = ui.config("ui", "merge") |
|
1987 | 1987 | if uimerge: |
|
1988 | 1988 | ui.note(('with ui.merge=%r\n') % (pycompat.bytestr(uimerge))) |
|
1989 | 1989 | |
|
1990 | 1990 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
1991 | 1991 | m = scmutil.match(ctx, pats, opts) |
|
1992 | 1992 | changedelete = opts['changedelete'] |
|
1993 | 1993 | for path in ctx.walk(m): |
|
1994 | 1994 | fctx = ctx[path] |
|
1995 | 1995 | try: |
|
1996 | 1996 | if not ui.debugflag: |
|
1997 | 1997 | ui.pushbuffer(error=True) |
|
1998 | 1998 | tool, toolpath = filemerge._picktool(repo, ui, path, |
|
1999 | 1999 | fctx.isbinary(), |
|
2000 | 2000 | 'l' in fctx.flags(), |
|
2001 | 2001 | changedelete) |
|
2002 | 2002 | finally: |
|
2003 | 2003 | if not ui.debugflag: |
|
2004 | 2004 | ui.popbuffer() |
|
2005 | 2005 | ui.write(('%s = %s\n') % (path, tool)) |
|
2006 | 2006 | |
|
2007 | 2007 | @command('debugpushkey', [], _('REPO NAMESPACE [KEY OLD NEW]'), norepo=True) |
|
2008 | 2008 | def debugpushkey(ui, repopath, namespace, *keyinfo, **opts): |
|
2009 | 2009 | '''access the pushkey key/value protocol |
|
2010 | 2010 | |
|
2011 | 2011 | With two args, list the keys in the given namespace. |
|
2012 | 2012 | |
|
2013 | 2013 | With five args, set a key to new if it currently is set to old. |
|
2014 | 2014 | Reports success or failure. |
|
2015 | 2015 | ''' |
|
2016 | 2016 | |
|
2017 | 2017 | target = hg.peer(ui, {}, repopath) |
|
2018 | 2018 | if keyinfo: |
|
2019 | 2019 | key, old, new = keyinfo |
|
2020 | 2020 | with target.commandexecutor() as e: |
|
2021 | 2021 | r = e.callcommand('pushkey', { |
|
2022 | 2022 | 'namespace': namespace, |
|
2023 | 2023 | 'key': key, |
|
2024 | 2024 | 'old': old, |
|
2025 | 2025 | 'new': new, |
|
2026 | 2026 | }).result() |
|
2027 | 2027 | |
|
2028 | 2028 | ui.status(pycompat.bytestr(r) + '\n') |
|
2029 | 2029 | return not r |
|
2030 | 2030 | else: |
|
2031 | 2031 | for k, v in sorted(target.listkeys(namespace).iteritems()): |
|
2032 | 2032 | ui.write("%s\t%s\n" % (stringutil.escapestr(k), |
|
2033 | 2033 | stringutil.escapestr(v))) |
|
2034 | 2034 | |
|
2035 | 2035 | @command('debugpvec', [], _('A B')) |
|
2036 | 2036 | def debugpvec(ui, repo, a, b=None): |
|
2037 | 2037 | ca = scmutil.revsingle(repo, a) |
|
2038 | 2038 | cb = scmutil.revsingle(repo, b) |
|
2039 | 2039 | pa = pvec.ctxpvec(ca) |
|
2040 | 2040 | pb = pvec.ctxpvec(cb) |
|
2041 | 2041 | if pa == pb: |
|
2042 | 2042 | rel = "=" |
|
2043 | 2043 | elif pa > pb: |
|
2044 | 2044 | rel = ">" |
|
2045 | 2045 | elif pa < pb: |
|
2046 | 2046 | rel = "<" |
|
2047 | 2047 | elif pa | pb: |
|
2048 | 2048 | rel = "|" |
|
2049 | 2049 | ui.write(_("a: %s\n") % pa) |
|
2050 | 2050 | ui.write(_("b: %s\n") % pb) |
|
2051 | 2051 | ui.write(_("depth(a): %d depth(b): %d\n") % (pa._depth, pb._depth)) |
|
2052 | 2052 | ui.write(_("delta: %d hdist: %d distance: %d relation: %s\n") % |
|
2053 | 2053 | (abs(pa._depth - pb._depth), pvec._hamming(pa._vec, pb._vec), |
|
2054 | 2054 | pa.distance(pb), rel)) |
|
2055 | 2055 | |
|
2056 | 2056 | @command('debugrebuilddirstate|debugrebuildstate', |
|
2057 | 2057 | [('r', 'rev', '', _('revision to rebuild to'), _('REV')), |
|
2058 | 2058 | ('', 'minimal', None, _('only rebuild files that are inconsistent with ' |
|
2059 | 2059 | 'the working copy parent')), |
|
2060 | 2060 | ], |
|
2061 | 2061 | _('[-r REV]')) |
|
2062 | 2062 | def debugrebuilddirstate(ui, repo, rev, **opts): |
|
2063 | 2063 | """rebuild the dirstate as it would look like for the given revision |
|
2064 | 2064 | |
|
2065 | 2065 | If no revision is specified the first current parent will be used. |
|
2066 | 2066 | |
|
2067 | 2067 | The dirstate will be set to the files of the given revision. |
|
2068 | 2068 | The actual working directory content or existing dirstate |
|
2069 | 2069 | information such as adds or removes is not considered. |
|
2070 | 2070 | |
|
2071 | 2071 | ``minimal`` will only rebuild the dirstate status for files that claim to be |
|
2072 | 2072 | tracked but are not in the parent manifest, or that exist in the parent |
|
2073 | 2073 | manifest but are not in the dirstate. It will not change adds, removes, or |
|
2074 | 2074 | modified files that are in the working copy parent. |
|
2075 | 2075 | |
|
2076 | 2076 | One use of this command is to make the next :hg:`status` invocation |
|
2077 | 2077 | check the actual file content. |
|
2078 | 2078 | """ |
|
2079 | 2079 | ctx = scmutil.revsingle(repo, rev) |
|
2080 | 2080 | with repo.wlock(): |
|
2081 | 2081 | dirstate = repo.dirstate |
|
2082 | 2082 | changedfiles = None |
|
2083 | 2083 | # See command doc for what minimal does. |
|
2084 | 2084 | if opts.get(r'minimal'): |
|
2085 | 2085 | manifestfiles = set(ctx.manifest().keys()) |
|
2086 | 2086 | dirstatefiles = set(dirstate) |
|
2087 | 2087 | manifestonly = manifestfiles - dirstatefiles |
|
2088 | 2088 | dsonly = dirstatefiles - manifestfiles |
|
2089 | 2089 | dsnotadded = set(f for f in dsonly if dirstate[f] != 'a') |
|
2090 | 2090 | changedfiles = manifestonly | dsnotadded |
|
2091 | 2091 | |
|
2092 | 2092 | dirstate.rebuild(ctx.node(), ctx.manifest(), changedfiles) |
|
2093 | 2093 | |
|
2094 | 2094 | @command('debugrebuildfncache', [], '') |
|
2095 | 2095 | def debugrebuildfncache(ui, repo): |
|
2096 | 2096 | """rebuild the fncache file""" |
|
2097 | 2097 | repair.rebuildfncache(ui, repo) |
|
2098 | 2098 | |
|
2099 | 2099 | @command('debugrename', |
|
2100 | 2100 | [('r', 'rev', '', _('revision to debug'), _('REV'))], |
|
2101 | 2101 | _('[-r REV] [FILE]...')) |
|
2102 | 2102 | def debugrename(ui, repo, *pats, **opts): |
|
2103 | 2103 | """dump rename information""" |
|
2104 | 2104 | |
|
2105 | 2105 | opts = pycompat.byteskwargs(opts) |
|
2106 | 2106 | ctx = scmutil.revsingle(repo, opts.get('rev')) |
|
2107 | 2107 | m = scmutil.match(ctx, pats, opts) |
|
2108 | 2108 | for abs in ctx.walk(m): |
|
2109 | 2109 | fctx = ctx[abs] |
|
2110 | 2110 | o = fctx.filelog().renamed(fctx.filenode()) |
|
2111 | 2111 | rel = repo.pathto(abs) |
|
2112 | 2112 | if o: |
|
2113 | 2113 | ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1]))) |
|
2114 | 2114 | else: |
|
2115 | 2115 | ui.write(_("%s not renamed\n") % rel) |
|
2116 | 2116 | |
|
2117 | 2117 | @command('debugrevlog', cmdutil.debugrevlogopts + |
|
2118 | 2118 | [('d', 'dump', False, _('dump index data'))], |
|
2119 | 2119 | _('-c|-m|FILE'), |
|
2120 | 2120 | optionalrepo=True) |
|
2121 | 2121 | def debugrevlog(ui, repo, file_=None, **opts): |
|
2122 | 2122 | """show data and statistics about a revlog""" |
|
2123 | 2123 | opts = pycompat.byteskwargs(opts) |
|
2124 | 2124 | r = cmdutil.openrevlog(repo, 'debugrevlog', file_, opts) |
|
2125 | 2125 | |
|
2126 | 2126 | if opts.get("dump"): |
|
2127 | 2127 | numrevs = len(r) |
|
2128 | 2128 | ui.write(("# rev p1rev p2rev start end deltastart base p1 p2" |
|
2129 | 2129 | " rawsize totalsize compression heads chainlen\n")) |
|
2130 | 2130 | ts = 0 |
|
2131 | 2131 | heads = set() |
|
2132 | 2132 | |
|
2133 | 2133 | for rev in pycompat.xrange(numrevs): |
|
2134 | 2134 | dbase = r.deltaparent(rev) |
|
2135 | 2135 | if dbase == -1: |
|
2136 | 2136 | dbase = rev |
|
2137 | 2137 | cbase = r.chainbase(rev) |
|
2138 | 2138 | clen = r.chainlen(rev) |
|
2139 | 2139 | p1, p2 = r.parentrevs(rev) |
|
2140 | 2140 | rs = r.rawsize(rev) |
|
2141 | 2141 | ts = ts + rs |
|
2142 | 2142 | heads -= set(r.parentrevs(rev)) |
|
2143 | 2143 | heads.add(rev) |
|
2144 | 2144 | try: |
|
2145 | 2145 | compression = ts / r.end(rev) |
|
2146 | 2146 | except ZeroDivisionError: |
|
2147 | 2147 | compression = 0 |
|
2148 | 2148 | ui.write("%5d %5d %5d %5d %5d %10d %4d %4d %4d %7d %9d " |
|
2149 | 2149 | "%11d %5d %8d\n" % |
|
2150 | 2150 | (rev, p1, p2, r.start(rev), r.end(rev), |
|
2151 | 2151 | r.start(dbase), r.start(cbase), |
|
2152 | 2152 | r.start(p1), r.start(p2), |
|
2153 | 2153 | rs, ts, compression, len(heads), clen)) |
|
2154 | 2154 | return 0 |
|
2155 | 2155 | |
|
2156 | 2156 | v = r.version |
|
2157 | 2157 | format = v & 0xFFFF |
|
2158 | 2158 | flags = [] |
|
2159 | 2159 | gdelta = False |
|
2160 | 2160 | if v & revlog.FLAG_INLINE_DATA: |
|
2161 | 2161 | flags.append('inline') |
|
2162 | 2162 | if v & revlog.FLAG_GENERALDELTA: |
|
2163 | 2163 | gdelta = True |
|
2164 | 2164 | flags.append('generaldelta') |
|
2165 | 2165 | if not flags: |
|
2166 | 2166 | flags = ['(none)'] |
|
2167 | 2167 | |
|
2168 | 2168 | ### tracks merge vs single parent |
|
2169 | 2169 | nummerges = 0 |
|
2170 | 2170 | |
|
2171 | 2171 | ### tracks ways the "delta" are build |
|
2172 | 2172 | # nodelta |
|
2173 | 2173 | numempty = 0 |
|
2174 | 2174 | numemptytext = 0 |
|
2175 | 2175 | numemptydelta = 0 |
|
2176 | 2176 | # full file content |
|
2177 | 2177 | numfull = 0 |
|
2178 | 2178 | # intermediate snapshot against a prior snapshot |
|
2179 | 2179 | numsemi = 0 |
|
2180 | 2180 | # snapshot count per depth |
|
2181 | 2181 | numsnapdepth = collections.defaultdict(lambda: 0) |
|
2182 | 2182 | # delta against previous revision |
|
2183 | 2183 | numprev = 0 |
|
2184 | 2184 | # delta against first or second parent (not prev) |
|
2185 | 2185 | nump1 = 0 |
|
2186 | 2186 | nump2 = 0 |
|
2187 | 2187 | # delta against neither prev nor parents |
|
2188 | 2188 | numother = 0 |
|
2189 | 2189 | # delta against prev that are also first or second parent |
|
2190 | 2190 | # (details of `numprev`) |
|
2191 | 2191 | nump1prev = 0 |
|
2192 | 2192 | nump2prev = 0 |
|
2193 | 2193 | |
|
2194 | 2194 | # data about delta chain of each revs |
|
2195 | 2195 | chainlengths = [] |
|
2196 | 2196 | chainbases = [] |
|
2197 | 2197 | chainspans = [] |
|
2198 | 2198 | |
|
2199 | 2199 | # data about each revision |
|
2200 | 2200 | datasize = [None, 0, 0] |
|
2201 | 2201 | fullsize = [None, 0, 0] |
|
2202 | 2202 | semisize = [None, 0, 0] |
|
2203 | 2203 | # snapshot count per depth |
|
2204 | 2204 | snapsizedepth = collections.defaultdict(lambda: [None, 0, 0]) |
|
2205 | 2205 | deltasize = [None, 0, 0] |
|
2206 | 2206 | chunktypecounts = {} |
|
2207 | 2207 | chunktypesizes = {} |
|
2208 | 2208 | |
|
2209 | 2209 | def addsize(size, l): |
|
2210 | 2210 | if l[0] is None or size < l[0]: |
|
2211 | 2211 | l[0] = size |
|
2212 | 2212 | if size > l[1]: |
|
2213 | 2213 | l[1] = size |
|
2214 | 2214 | l[2] += size |
|
2215 | 2215 | |
|
2216 | 2216 | numrevs = len(r) |
|
2217 | 2217 | for rev in pycompat.xrange(numrevs): |
|
2218 | 2218 | p1, p2 = r.parentrevs(rev) |
|
2219 | 2219 | delta = r.deltaparent(rev) |
|
2220 | 2220 | if format > 0: |
|
2221 | 2221 | addsize(r.rawsize(rev), datasize) |
|
2222 | 2222 | if p2 != nullrev: |
|
2223 | 2223 | nummerges += 1 |
|
2224 | 2224 | size = r.length(rev) |
|
2225 | 2225 | if delta == nullrev: |
|
2226 | 2226 | chainlengths.append(0) |
|
2227 | 2227 | chainbases.append(r.start(rev)) |
|
2228 | 2228 | chainspans.append(size) |
|
2229 | 2229 | if size == 0: |
|
2230 | 2230 | numempty += 1 |
|
2231 | 2231 | numemptytext += 1 |
|
2232 | 2232 | else: |
|
2233 | 2233 | numfull += 1 |
|
2234 | 2234 | numsnapdepth[0] += 1 |
|
2235 | 2235 | addsize(size, fullsize) |
|
2236 | 2236 | addsize(size, snapsizedepth[0]) |
|
2237 | 2237 | else: |
|
2238 | 2238 | chainlengths.append(chainlengths[delta] + 1) |
|
2239 | 2239 | baseaddr = chainbases[delta] |
|
2240 | 2240 | revaddr = r.start(rev) |
|
2241 | 2241 | chainbases.append(baseaddr) |
|
2242 | 2242 | chainspans.append((revaddr - baseaddr) + size) |
|
2243 | 2243 | if size == 0: |
|
2244 | 2244 | numempty += 1 |
|
2245 | 2245 | numemptydelta += 1 |
|
2246 | 2246 | elif r.issnapshot(rev): |
|
2247 | 2247 | addsize(size, semisize) |
|
2248 | 2248 | numsemi += 1 |
|
2249 | 2249 | depth = r.snapshotdepth(rev) |
|
2250 | 2250 | numsnapdepth[depth] += 1 |
|
2251 | 2251 | addsize(size, snapsizedepth[depth]) |
|
2252 | 2252 | else: |
|
2253 | 2253 | addsize(size, deltasize) |
|
2254 | 2254 | if delta == rev - 1: |
|
2255 | 2255 | numprev += 1 |
|
2256 | 2256 | if delta == p1: |
|
2257 | 2257 | nump1prev += 1 |
|
2258 | 2258 | elif delta == p2: |
|
2259 | 2259 | nump2prev += 1 |
|
2260 | 2260 | elif delta == p1: |
|
2261 | 2261 | nump1 += 1 |
|
2262 | 2262 | elif delta == p2: |
|
2263 | 2263 | nump2 += 1 |
|
2264 | 2264 | elif delta != nullrev: |
|
2265 | 2265 | numother += 1 |
|
2266 | 2266 | |
|
2267 | 2267 | # Obtain data on the raw chunks in the revlog. |
|
2268 | 2268 | if util.safehasattr(r, '_getsegmentforrevs'): |
|
2269 | 2269 | segment = r._getsegmentforrevs(rev, rev)[1] |
|
2270 | 2270 | else: |
|
2271 | 2271 | segment = r._revlog._getsegmentforrevs(rev, rev)[1] |
|
2272 | 2272 | if segment: |
|
2273 | 2273 | chunktype = bytes(segment[0:1]) |
|
2274 | 2274 | else: |
|
2275 | 2275 | chunktype = 'empty' |
|
2276 | 2276 | |
|
2277 | 2277 | if chunktype not in chunktypecounts: |
|
2278 | 2278 | chunktypecounts[chunktype] = 0 |
|
2279 | 2279 | chunktypesizes[chunktype] = 0 |
|
2280 | 2280 | |
|
2281 | 2281 | chunktypecounts[chunktype] += 1 |
|
2282 | 2282 | chunktypesizes[chunktype] += size |
|
2283 | 2283 | |
|
2284 | 2284 | # Adjust size min value for empty cases |
|
2285 | 2285 | for size in (datasize, fullsize, semisize, deltasize): |
|
2286 | 2286 | if size[0] is None: |
|
2287 | 2287 | size[0] = 0 |
|
2288 | 2288 | |
|
2289 | 2289 | numdeltas = numrevs - numfull - numempty - numsemi |
|
2290 | 2290 | numoprev = numprev - nump1prev - nump2prev |
|
2291 | 2291 | totalrawsize = datasize[2] |
|
2292 | 2292 | datasize[2] /= numrevs |
|
2293 | 2293 | fulltotal = fullsize[2] |
|
2294 | 2294 | if numfull == 0: |
|
2295 | 2295 | fullsize[2] = 0 |
|
2296 | 2296 | else: |
|
2297 | 2297 | fullsize[2] /= numfull |
|
2298 | 2298 | semitotal = semisize[2] |
|
2299 | 2299 | snaptotal = {} |
|
2300 | 2300 | if numsemi > 0: |
|
2301 | 2301 | semisize[2] /= numsemi |
|
2302 | 2302 | for depth in snapsizedepth: |
|
2303 | 2303 | snaptotal[depth] = snapsizedepth[depth][2] |
|
2304 | 2304 | snapsizedepth[depth][2] /= numsnapdepth[depth] |
|
2305 | 2305 | |
|
2306 | 2306 | deltatotal = deltasize[2] |
|
2307 | 2307 | if numdeltas > 0: |
|
2308 | 2308 | deltasize[2] /= numdeltas |
|
2309 | 2309 | totalsize = fulltotal + semitotal + deltatotal |
|
2310 | 2310 | avgchainlen = sum(chainlengths) / numrevs |
|
2311 | 2311 | maxchainlen = max(chainlengths) |
|
2312 | 2312 | maxchainspan = max(chainspans) |
|
2313 | 2313 | compratio = 1 |
|
2314 | 2314 | if totalsize: |
|
2315 | 2315 | compratio = totalrawsize / totalsize |
|
2316 | 2316 | |
|
2317 | 2317 | basedfmtstr = '%%%dd\n' |
|
2318 | 2318 | basepcfmtstr = '%%%dd %s(%%5.2f%%%%)\n' |
|
2319 | 2319 | |
|
2320 | 2320 | def dfmtstr(max): |
|
2321 | 2321 | return basedfmtstr % len(str(max)) |
|
2322 | 2322 | def pcfmtstr(max, padding=0): |
|
2323 | 2323 | return basepcfmtstr % (len(str(max)), ' ' * padding) |
|
2324 | 2324 | |
|
2325 | 2325 | def pcfmt(value, total): |
|
2326 | 2326 | if total: |
|
2327 | 2327 | return (value, 100 * float(value) / total) |
|
2328 | 2328 | else: |
|
2329 | 2329 | return value, 100.0 |
|
2330 | 2330 | |
|
2331 | 2331 | ui.write(('format : %d\n') % format) |
|
2332 | 2332 | ui.write(('flags : %s\n') % ', '.join(flags)) |
|
2333 | 2333 | |
|
2334 | 2334 | ui.write('\n') |
|
2335 | 2335 | fmt = pcfmtstr(totalsize) |
|
2336 | 2336 | fmt2 = dfmtstr(totalsize) |
|
2337 | 2337 | ui.write(('revisions : ') + fmt2 % numrevs) |
|
2338 | 2338 | ui.write((' merges : ') + fmt % pcfmt(nummerges, numrevs)) |
|
2339 | 2339 | ui.write((' normal : ') + fmt % pcfmt(numrevs - nummerges, numrevs)) |
|
2340 | 2340 | ui.write(('revisions : ') + fmt2 % numrevs) |
|
2341 | 2341 | ui.write((' empty : ') + fmt % pcfmt(numempty, numrevs)) |
|
2342 | 2342 | ui.write((' text : ') |
|
2343 | 2343 | + fmt % pcfmt(numemptytext, numemptytext + numemptydelta)) |
|
2344 | 2344 | ui.write((' delta : ') |
|
2345 | 2345 | + fmt % pcfmt(numemptydelta, numemptytext + numemptydelta)) |
|
2346 | 2346 | ui.write((' snapshot : ') + fmt % pcfmt(numfull + numsemi, numrevs)) |
|
2347 | 2347 | for depth in sorted(numsnapdepth): |
|
2348 | 2348 | ui.write((' lvl-%-3d : ' % depth) |
|
2349 | 2349 | + fmt % pcfmt(numsnapdepth[depth], numrevs)) |
|
2350 | 2350 | ui.write((' deltas : ') + fmt % pcfmt(numdeltas, numrevs)) |
|
2351 | 2351 | ui.write(('revision size : ') + fmt2 % totalsize) |
|
2352 | 2352 | ui.write((' snapshot : ') |
|
2353 | 2353 | + fmt % pcfmt(fulltotal + semitotal, totalsize)) |
|
2354 | 2354 | for depth in sorted(numsnapdepth): |
|
2355 | 2355 | ui.write((' lvl-%-3d : ' % depth) |
|
2356 | 2356 | + fmt % pcfmt(snaptotal[depth], totalsize)) |
|
2357 | 2357 | ui.write((' deltas : ') + fmt % pcfmt(deltatotal, totalsize)) |
|
2358 | 2358 | |
|
2359 | 2359 | def fmtchunktype(chunktype): |
|
2360 | 2360 | if chunktype == 'empty': |
|
2361 | 2361 | return ' %s : ' % chunktype |
|
2362 | 2362 | elif chunktype in pycompat.bytestr(string.ascii_letters): |
|
2363 | 2363 | return ' 0x%s (%s) : ' % (hex(chunktype), chunktype) |
|
2364 | 2364 | else: |
|
2365 | 2365 | return ' 0x%s : ' % hex(chunktype) |
|
2366 | 2366 | |
|
2367 | 2367 | ui.write('\n') |
|
2368 | 2368 | ui.write(('chunks : ') + fmt2 % numrevs) |
|
2369 | 2369 | for chunktype in sorted(chunktypecounts): |
|
2370 | 2370 | ui.write(fmtchunktype(chunktype)) |
|
2371 | 2371 | ui.write(fmt % pcfmt(chunktypecounts[chunktype], numrevs)) |
|
2372 | 2372 | ui.write(('chunks size : ') + fmt2 % totalsize) |
|
2373 | 2373 | for chunktype in sorted(chunktypecounts): |
|
2374 | 2374 | ui.write(fmtchunktype(chunktype)) |
|
2375 | 2375 | ui.write(fmt % pcfmt(chunktypesizes[chunktype], totalsize)) |
|
2376 | 2376 | |
|
2377 | 2377 | ui.write('\n') |
|
2378 | 2378 | fmt = dfmtstr(max(avgchainlen, maxchainlen, maxchainspan, compratio)) |
|
2379 | 2379 | ui.write(('avg chain length : ') + fmt % avgchainlen) |
|
2380 | 2380 | ui.write(('max chain length : ') + fmt % maxchainlen) |
|
2381 | 2381 | ui.write(('max chain reach : ') + fmt % maxchainspan) |
|
2382 | 2382 | ui.write(('compression ratio : ') + fmt % compratio) |
|
2383 | 2383 | |
|
2384 | 2384 | if format > 0: |
|
2385 | 2385 | ui.write('\n') |
|
2386 | 2386 | ui.write(('uncompressed data size (min/max/avg) : %d / %d / %d\n') |
|
2387 | 2387 | % tuple(datasize)) |
|
2388 | 2388 | ui.write(('full revision size (min/max/avg) : %d / %d / %d\n') |
|
2389 | 2389 | % tuple(fullsize)) |
|
2390 | 2390 | ui.write(('inter-snapshot size (min/max/avg) : %d / %d / %d\n') |
|
2391 | 2391 | % tuple(semisize)) |
|
2392 | 2392 | for depth in sorted(snapsizedepth): |
|
2393 | 2393 | if depth == 0: |
|
2394 | 2394 | continue |
|
2395 | 2395 | ui.write((' level-%-3d (min/max/avg) : %d / %d / %d\n') |
|
2396 | 2396 | % ((depth,) + tuple(snapsizedepth[depth]))) |
|
2397 | 2397 | ui.write(('delta size (min/max/avg) : %d / %d / %d\n') |
|
2398 | 2398 | % tuple(deltasize)) |
|
2399 | 2399 | |
|
2400 | 2400 | if numdeltas > 0: |
|
2401 | 2401 | ui.write('\n') |
|
2402 | 2402 | fmt = pcfmtstr(numdeltas) |
|
2403 | 2403 | fmt2 = pcfmtstr(numdeltas, 4) |
|
2404 | 2404 | ui.write(('deltas against prev : ') + fmt % pcfmt(numprev, numdeltas)) |
|
2405 | 2405 | if numprev > 0: |
|
2406 | 2406 | ui.write((' where prev = p1 : ') + fmt2 % pcfmt(nump1prev, |
|
2407 | 2407 | numprev)) |
|
2408 | 2408 | ui.write((' where prev = p2 : ') + fmt2 % pcfmt(nump2prev, |
|
2409 | 2409 | numprev)) |
|
2410 | 2410 | ui.write((' other : ') + fmt2 % pcfmt(numoprev, |
|
2411 | 2411 | numprev)) |
|
2412 | 2412 | if gdelta: |
|
2413 | 2413 | ui.write(('deltas against p1 : ') |
|
2414 | 2414 | + fmt % pcfmt(nump1, numdeltas)) |
|
2415 | 2415 | ui.write(('deltas against p2 : ') |
|
2416 | 2416 | + fmt % pcfmt(nump2, numdeltas)) |
|
2417 | 2417 | ui.write(('deltas against other : ') + fmt % pcfmt(numother, |
|
2418 | 2418 | numdeltas)) |
|
2419 | 2419 | |
|
2420 | 2420 | @command('debugrevlogindex', cmdutil.debugrevlogopts + |
|
2421 | 2421 | [('f', 'format', 0, _('revlog format'), _('FORMAT'))], |
|
2422 | 2422 | _('[-f FORMAT] -c|-m|FILE'), |
|
2423 | 2423 | optionalrepo=True) |
|
2424 | 2424 | def debugrevlogindex(ui, repo, file_=None, **opts): |
|
2425 | 2425 | """dump the contents of a revlog index""" |
|
2426 | 2426 | opts = pycompat.byteskwargs(opts) |
|
2427 | 2427 | r = cmdutil.openrevlog(repo, 'debugrevlogindex', file_, opts) |
|
2428 | 2428 | format = opts.get('format', 0) |
|
2429 | 2429 | if format not in (0, 1): |
|
2430 | 2430 | raise error.Abort(_("unknown format %d") % format) |
|
2431 | 2431 | |
|
2432 | 2432 | if ui.debugflag: |
|
2433 | 2433 | shortfn = hex |
|
2434 | 2434 | else: |
|
2435 | 2435 | shortfn = short |
|
2436 | 2436 | |
|
2437 | 2437 | # There might not be anything in r, so have a sane default |
|
2438 | 2438 | idlen = 12 |
|
2439 | 2439 | for i in r: |
|
2440 | 2440 | idlen = len(shortfn(r.node(i))) |
|
2441 | 2441 | break |
|
2442 | 2442 | |
|
2443 | 2443 | if format == 0: |
|
2444 | 2444 | if ui.verbose: |
|
2445 | 2445 | ui.write((" rev offset length linkrev" |
|
2446 | 2446 | " %s %s p2\n") % ("nodeid".ljust(idlen), |
|
2447 | 2447 | "p1".ljust(idlen))) |
|
2448 | 2448 | else: |
|
2449 | 2449 | ui.write((" rev linkrev %s %s p2\n") % ( |
|
2450 | 2450 | "nodeid".ljust(idlen), "p1".ljust(idlen))) |
|
2451 | 2451 | elif format == 1: |
|
2452 | 2452 | if ui.verbose: |
|
2453 | 2453 | ui.write((" rev flag offset length size link p1" |
|
2454 | 2454 | " p2 %s\n") % "nodeid".rjust(idlen)) |
|
2455 | 2455 | else: |
|
2456 | 2456 | ui.write((" rev flag size link p1 p2 %s\n") % |
|
2457 | 2457 | "nodeid".rjust(idlen)) |
|
2458 | 2458 | |
|
2459 | 2459 | for i in r: |
|
2460 | 2460 | node = r.node(i) |
|
2461 | 2461 | if format == 0: |
|
2462 | 2462 | try: |
|
2463 | 2463 | pp = r.parents(node) |
|
2464 | 2464 | except Exception: |
|
2465 | 2465 | pp = [nullid, nullid] |
|
2466 | 2466 | if ui.verbose: |
|
2467 | 2467 | ui.write("% 6d % 9d % 7d % 7d %s %s %s\n" % ( |
|
2468 | 2468 | i, r.start(i), r.length(i), r.linkrev(i), |
|
2469 | 2469 | shortfn(node), shortfn(pp[0]), shortfn(pp[1]))) |
|
2470 | 2470 | else: |
|
2471 | 2471 | ui.write("% 6d % 7d %s %s %s\n" % ( |
|
2472 | 2472 | i, r.linkrev(i), shortfn(node), shortfn(pp[0]), |
|
2473 | 2473 | shortfn(pp[1]))) |
|
2474 | 2474 | elif format == 1: |
|
2475 | 2475 | pr = r.parentrevs(i) |
|
2476 | 2476 | if ui.verbose: |
|
2477 | 2477 | ui.write("% 6d %04x % 8d % 8d % 8d % 6d % 6d % 6d %s\n" % ( |
|
2478 | 2478 | i, r.flags(i), r.start(i), r.length(i), r.rawsize(i), |
|
2479 | 2479 | r.linkrev(i), pr[0], pr[1], shortfn(node))) |
|
2480 | 2480 | else: |
|
2481 | 2481 | ui.write("% 6d %04x % 8d % 6d % 6d % 6d %s\n" % ( |
|
2482 | 2482 | i, r.flags(i), r.rawsize(i), r.linkrev(i), pr[0], pr[1], |
|
2483 | 2483 | shortfn(node))) |
|
2484 | 2484 | |
|
2485 | 2485 | @command('debugrevspec', |
|
2486 | 2486 | [('', 'optimize', None, |
|
2487 | 2487 | _('print parsed tree after optimizing (DEPRECATED)')), |
|
2488 | 2488 | ('', 'show-revs', True, _('print list of result revisions (default)')), |
|
2489 | 2489 | ('s', 'show-set', None, _('print internal representation of result set')), |
|
2490 | 2490 | ('p', 'show-stage', [], |
|
2491 | 2491 | _('print parsed tree at the given stage'), _('NAME')), |
|
2492 | 2492 | ('', 'no-optimized', False, _('evaluate tree without optimization')), |
|
2493 | 2493 | ('', 'verify-optimized', False, _('verify optimized result')), |
|
2494 | 2494 | ], |
|
2495 | 2495 | ('REVSPEC')) |
|
2496 | 2496 | def debugrevspec(ui, repo, expr, **opts): |
|
2497 | 2497 | """parse and apply a revision specification |
|
2498 | 2498 | |
|
2499 | 2499 | Use -p/--show-stage option to print the parsed tree at the given stages. |
|
2500 | 2500 | Use -p all to print tree at every stage. |
|
2501 | 2501 | |
|
2502 | 2502 | Use --no-show-revs option with -s or -p to print only the set |
|
2503 | 2503 | representation or the parsed tree respectively. |
|
2504 | 2504 | |
|
2505 | 2505 | Use --verify-optimized to compare the optimized result with the unoptimized |
|
2506 | 2506 | one. Returns 1 if the optimized result differs. |
|
2507 | 2507 | """ |
|
2508 | 2508 | opts = pycompat.byteskwargs(opts) |
|
2509 | 2509 | aliases = ui.configitems('revsetalias') |
|
2510 | 2510 | stages = [ |
|
2511 | 2511 | ('parsed', lambda tree: tree), |
|
2512 | 2512 | ('expanded', lambda tree: revsetlang.expandaliases(tree, aliases, |
|
2513 | 2513 | ui.warn)), |
|
2514 | 2514 | ('concatenated', revsetlang.foldconcat), |
|
2515 | 2515 | ('analyzed', revsetlang.analyze), |
|
2516 | 2516 | ('optimized', revsetlang.optimize), |
|
2517 | 2517 | ] |
|
2518 | 2518 | if opts['no_optimized']: |
|
2519 | 2519 | stages = stages[:-1] |
|
2520 | 2520 | if opts['verify_optimized'] and opts['no_optimized']: |
|
2521 | 2521 | raise error.Abort(_('cannot use --verify-optimized with ' |
|
2522 | 2522 | '--no-optimized')) |
|
2523 | 2523 | stagenames = set(n for n, f in stages) |
|
2524 | 2524 | |
|
2525 | 2525 | showalways = set() |
|
2526 | 2526 | showchanged = set() |
|
2527 | 2527 | if ui.verbose and not opts['show_stage']: |
|
2528 | 2528 | # show parsed tree by --verbose (deprecated) |
|
2529 | 2529 | showalways.add('parsed') |
|
2530 | 2530 | showchanged.update(['expanded', 'concatenated']) |
|
2531 | 2531 | if opts['optimize']: |
|
2532 | 2532 | showalways.add('optimized') |
|
2533 | 2533 | if opts['show_stage'] and opts['optimize']: |
|
2534 | 2534 | raise error.Abort(_('cannot use --optimize with --show-stage')) |
|
2535 | 2535 | if opts['show_stage'] == ['all']: |
|
2536 | 2536 | showalways.update(stagenames) |
|
2537 | 2537 | else: |
|
2538 | 2538 | for n in opts['show_stage']: |
|
2539 | 2539 | if n not in stagenames: |
|
2540 | 2540 | raise error.Abort(_('invalid stage name: %s') % n) |
|
2541 | 2541 | showalways.update(opts['show_stage']) |
|
2542 | 2542 | |
|
2543 | 2543 | treebystage = {} |
|
2544 | 2544 | printedtree = None |
|
2545 | 2545 | tree = revsetlang.parse(expr, lookup=revset.lookupfn(repo)) |
|
2546 | 2546 | for n, f in stages: |
|
2547 | 2547 | treebystage[n] = tree = f(tree) |
|
2548 | 2548 | if n in showalways or (n in showchanged and tree != printedtree): |
|
2549 | 2549 | if opts['show_stage'] or n != 'parsed': |
|
2550 | 2550 | ui.write(("* %s:\n") % n) |
|
2551 | 2551 | ui.write(revsetlang.prettyformat(tree), "\n") |
|
2552 | 2552 | printedtree = tree |
|
2553 | 2553 | |
|
2554 | 2554 | if opts['verify_optimized']: |
|
2555 | 2555 | arevs = revset.makematcher(treebystage['analyzed'])(repo) |
|
2556 | 2556 | brevs = revset.makematcher(treebystage['optimized'])(repo) |
|
2557 | 2557 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): |
|
2558 | 2558 | ui.write(("* analyzed set:\n"), stringutil.prettyrepr(arevs), "\n") |
|
2559 | 2559 | ui.write(("* optimized set:\n"), stringutil.prettyrepr(brevs), "\n") |
|
2560 | 2560 | arevs = list(arevs) |
|
2561 | 2561 | brevs = list(brevs) |
|
2562 | 2562 | if arevs == brevs: |
|
2563 | 2563 | return 0 |
|
2564 | 2564 | ui.write(('--- analyzed\n'), label='diff.file_a') |
|
2565 | 2565 | ui.write(('+++ optimized\n'), label='diff.file_b') |
|
2566 | 2566 | sm = difflib.SequenceMatcher(None, arevs, brevs) |
|
2567 | 2567 | for tag, alo, ahi, blo, bhi in sm.get_opcodes(): |
|
2568 | 2568 | if tag in (r'delete', r'replace'): |
|
2569 | 2569 | for c in arevs[alo:ahi]: |
|
2570 | 2570 | ui.write('-%d\n' % c, label='diff.deleted') |
|
2571 | 2571 | if tag in (r'insert', r'replace'): |
|
2572 | 2572 | for c in brevs[blo:bhi]: |
|
2573 | 2573 | ui.write('+%d\n' % c, label='diff.inserted') |
|
2574 | 2574 | if tag == r'equal': |
|
2575 | 2575 | for c in arevs[alo:ahi]: |
|
2576 | 2576 | ui.write(' %d\n' % c) |
|
2577 | 2577 | return 1 |
|
2578 | 2578 | |
|
2579 | 2579 | func = revset.makematcher(tree) |
|
2580 | 2580 | revs = func(repo) |
|
2581 | 2581 | if opts['show_set'] or (opts['show_set'] is None and ui.verbose): |
|
2582 | 2582 | ui.write(("* set:\n"), stringutil.prettyrepr(revs), "\n") |
|
2583 | 2583 | if not opts['show_revs']: |
|
2584 | 2584 | return |
|
2585 | 2585 | for c in revs: |
|
2586 | 2586 | ui.write("%d\n" % c) |
|
2587 | 2587 | |
|
2588 | 2588 | @command('debugserve', [ |
|
2589 | 2589 | ('', 'sshstdio', False, _('run an SSH server bound to process handles')), |
|
2590 | 2590 | ('', 'logiofd', '', _('file descriptor to log server I/O to')), |
|
2591 | 2591 | ('', 'logiofile', '', _('file to log server I/O to')), |
|
2592 | 2592 | ], '') |
|
2593 | 2593 | def debugserve(ui, repo, **opts): |
|
2594 | 2594 | """run a server with advanced settings |
|
2595 | 2595 | |
|
2596 | 2596 | This command is similar to :hg:`serve`. It exists partially as a |
|
2597 | 2597 | workaround to the fact that ``hg serve --stdio`` must have specific |
|
2598 | 2598 | arguments for security reasons. |
|
2599 | 2599 | """ |
|
2600 | 2600 | opts = pycompat.byteskwargs(opts) |
|
2601 | 2601 | |
|
2602 | 2602 | if not opts['sshstdio']: |
|
2603 | 2603 | raise error.Abort(_('only --sshstdio is currently supported')) |
|
2604 | 2604 | |
|
2605 | 2605 | logfh = None |
|
2606 | 2606 | |
|
2607 | 2607 | if opts['logiofd'] and opts['logiofile']: |
|
2608 | 2608 | raise error.Abort(_('cannot use both --logiofd and --logiofile')) |
|
2609 | 2609 | |
|
2610 | 2610 | if opts['logiofd']: |
|
2611 | 2611 | # Line buffered because output is line based. |
|
2612 | 2612 | try: |
|
2613 | 2613 | logfh = os.fdopen(int(opts['logiofd']), r'ab', 1) |
|
2614 | 2614 | except OSError as e: |
|
2615 | 2615 | if e.errno != errno.ESPIPE: |
|
2616 | 2616 | raise |
|
2617 | 2617 | # can't seek a pipe, so `ab` mode fails on py3 |
|
2618 | 2618 | logfh = os.fdopen(int(opts['logiofd']), r'wb', 1) |
|
2619 | 2619 | elif opts['logiofile']: |
|
2620 | 2620 | logfh = open(opts['logiofile'], 'ab', 1) |
|
2621 | 2621 | |
|
2622 | 2622 | s = wireprotoserver.sshserver(ui, repo, logfh=logfh) |
|
2623 | 2623 | s.serve_forever() |
|
2624 | 2624 | |
|
2625 | 2625 | @command('debugsetparents', [], _('REV1 [REV2]')) |
|
2626 | 2626 | def debugsetparents(ui, repo, rev1, rev2=None): |
|
2627 | 2627 | """manually set the parents of the current working directory |
|
2628 | 2628 | |
|
2629 | 2629 | This is useful for writing repository conversion tools, but should |
|
2630 | 2630 | be used with care. For example, neither the working directory nor the |
|
2631 | 2631 | dirstate is updated, so file status may be incorrect after running this |
|
2632 | 2632 | command. |
|
2633 | 2633 | |
|
2634 | 2634 | Returns 0 on success. |
|
2635 | 2635 | """ |
|
2636 | 2636 | |
|
2637 | 2637 | node1 = scmutil.revsingle(repo, rev1).node() |
|
2638 | 2638 | node2 = scmutil.revsingle(repo, rev2, 'null').node() |
|
2639 | 2639 | |
|
2640 | 2640 | with repo.wlock(): |
|
2641 | 2641 | repo.setparents(node1, node2) |
|
2642 | 2642 | |
|
2643 | 2643 | @command('debugssl', [], '[SOURCE]', optionalrepo=True) |
|
2644 | 2644 | def debugssl(ui, repo, source=None, **opts): |
|
2645 | 2645 | '''test a secure connection to a server |
|
2646 | 2646 | |
|
2647 | 2647 | This builds the certificate chain for the server on Windows, installing the |
|
2648 | 2648 | missing intermediates and trusted root via Windows Update if necessary. It |
|
2649 | 2649 | does nothing on other platforms. |
|
2650 | 2650 | |
|
2651 | 2651 | If SOURCE is omitted, the 'default' path will be used. If a URL is given, |
|
2652 | 2652 | that server is used. See :hg:`help urls` for more information. |
|
2653 | 2653 | |
|
2654 | 2654 | If the update succeeds, retry the original operation. Otherwise, the cause |
|
2655 | 2655 | of the SSL error is likely another issue. |
|
2656 | 2656 | ''' |
|
2657 | 2657 | if not pycompat.iswindows: |
|
2658 | 2658 | raise error.Abort(_('certificate chain building is only possible on ' |
|
2659 | 2659 | 'Windows')) |
|
2660 | 2660 | |
|
2661 | 2661 | if not source: |
|
2662 | 2662 | if not repo: |
|
2663 | 2663 | raise error.Abort(_("there is no Mercurial repository here, and no " |
|
2664 | 2664 | "server specified")) |
|
2665 | 2665 | source = "default" |
|
2666 | 2666 | |
|
2667 | 2667 | source, branches = hg.parseurl(ui.expandpath(source)) |
|
2668 | 2668 | url = util.url(source) |
|
2669 | 2669 | |
|
2670 | 2670 | defaultport = {'https': 443, 'ssh': 22} |
|
2671 | 2671 | if url.scheme in defaultport: |
|
2672 | 2672 | try: |
|
2673 | 2673 | addr = (url.host, int(url.port or defaultport[url.scheme])) |
|
2674 | 2674 | except ValueError: |
|
2675 | 2675 | raise error.Abort(_("malformed port number in URL")) |
|
2676 | 2676 | else: |
|
2677 | 2677 | raise error.Abort(_("only https and ssh connections are supported")) |
|
2678 | 2678 | |
|
2679 | 2679 | from . import win32 |
|
2680 | 2680 | |
|
2681 | 2681 | s = ssl.wrap_socket(socket.socket(), ssl_version=ssl.PROTOCOL_TLS, |
|
2682 | 2682 | cert_reqs=ssl.CERT_NONE, ca_certs=None) |
|
2683 | 2683 | |
|
2684 | 2684 | try: |
|
2685 | 2685 | s.connect(addr) |
|
2686 | 2686 | cert = s.getpeercert(True) |
|
2687 | 2687 | |
|
2688 | 2688 | ui.status(_('checking the certificate chain for %s\n') % url.host) |
|
2689 | 2689 | |
|
2690 | 2690 | complete = win32.checkcertificatechain(cert, build=False) |
|
2691 | 2691 | |
|
2692 | 2692 | if not complete: |
|
2693 | 2693 | ui.status(_('certificate chain is incomplete, updating... ')) |
|
2694 | 2694 | |
|
2695 | 2695 | if not win32.checkcertificatechain(cert): |
|
2696 | 2696 | ui.status(_('failed.\n')) |
|
2697 | 2697 | else: |
|
2698 | 2698 | ui.status(_('done.\n')) |
|
2699 | 2699 | else: |
|
2700 | 2700 | ui.status(_('full certificate chain is available\n')) |
|
2701 | 2701 | finally: |
|
2702 | 2702 | s.close() |
|
2703 | 2703 | |
|
2704 | 2704 | @command('debugsub', |
|
2705 | 2705 | [('r', 'rev', '', |
|
2706 | 2706 | _('revision to check'), _('REV'))], |
|
2707 | 2707 | _('[-r REV] [REV]')) |
|
2708 | 2708 | def debugsub(ui, repo, rev=None): |
|
2709 | 2709 | ctx = scmutil.revsingle(repo, rev, None) |
|
2710 | 2710 | for k, v in sorted(ctx.substate.items()): |
|
2711 | 2711 | ui.write(('path %s\n') % k) |
|
2712 | 2712 | ui.write((' source %s\n') % v[0]) |
|
2713 | 2713 | ui.write((' revision %s\n') % v[1]) |
|
2714 | 2714 | |
|
2715 | 2715 | @command('debugsuccessorssets', |
|
2716 | 2716 | [('', 'closest', False, _('return closest successors sets only'))], |
|
2717 | 2717 | _('[REV]')) |
|
2718 | 2718 | def debugsuccessorssets(ui, repo, *revs, **opts): |
|
2719 | 2719 | """show set of successors for revision |
|
2720 | 2720 | |
|
2721 | 2721 | A successors set of changeset A is a consistent group of revisions that |
|
2722 | 2722 | succeed A. It contains non-obsolete changesets only unless closests |
|
2723 | 2723 | successors set is set. |
|
2724 | 2724 | |
|
2725 | 2725 | In most cases a changeset A has a single successors set containing a single |
|
2726 | 2726 | successor (changeset A replaced by A'). |
|
2727 | 2727 | |
|
2728 | 2728 | A changeset that is made obsolete with no successors are called "pruned". |
|
2729 | 2729 | Such changesets have no successors sets at all. |
|
2730 | 2730 | |
|
2731 | 2731 | A changeset that has been "split" will have a successors set containing |
|
2732 | 2732 | more than one successor. |
|
2733 | 2733 | |
|
2734 | 2734 | A changeset that has been rewritten in multiple different ways is called |
|
2735 | 2735 | "divergent". Such changesets have multiple successor sets (each of which |
|
2736 | 2736 | may also be split, i.e. have multiple successors). |
|
2737 | 2737 | |
|
2738 | 2738 | Results are displayed as follows:: |
|
2739 | 2739 | |
|
2740 | 2740 | <rev1> |
|
2741 | 2741 | <successors-1A> |
|
2742 | 2742 | <rev2> |
|
2743 | 2743 | <successors-2A> |
|
2744 | 2744 | <successors-2B1> <successors-2B2> <successors-2B3> |
|
2745 | 2745 | |
|
2746 | 2746 | Here rev2 has two possible (i.e. divergent) successors sets. The first |
|
2747 | 2747 | holds one element, whereas the second holds three (i.e. the changeset has |
|
2748 | 2748 | been split). |
|
2749 | 2749 | """ |
|
2750 | 2750 | # passed to successorssets caching computation from one call to another |
|
2751 | 2751 | cache = {} |
|
2752 | 2752 | ctx2str = bytes |
|
2753 | 2753 | node2str = short |
|
2754 | 2754 | for rev in scmutil.revrange(repo, revs): |
|
2755 | 2755 | ctx = repo[rev] |
|
2756 | 2756 | ui.write('%s\n'% ctx2str(ctx)) |
|
2757 | 2757 | for succsset in obsutil.successorssets(repo, ctx.node(), |
|
2758 | 2758 | closest=opts[r'closest'], |
|
2759 | 2759 | cache=cache): |
|
2760 | 2760 | if succsset: |
|
2761 | 2761 | ui.write(' ') |
|
2762 | 2762 | ui.write(node2str(succsset[0])) |
|
2763 | 2763 | for node in succsset[1:]: |
|
2764 | 2764 | ui.write(' ') |
|
2765 | 2765 | ui.write(node2str(node)) |
|
2766 | 2766 | ui.write('\n') |
|
2767 | 2767 | |
|
2768 | 2768 | @command('debugtemplate', |
|
2769 | 2769 | [('r', 'rev', [], _('apply template on changesets'), _('REV')), |
|
2770 | 2770 | ('D', 'define', [], _('define template keyword'), _('KEY=VALUE'))], |
|
2771 | 2771 | _('[-r REV]... [-D KEY=VALUE]... TEMPLATE'), |
|
2772 | 2772 | optionalrepo=True) |
|
2773 | 2773 | def debugtemplate(ui, repo, tmpl, **opts): |
|
2774 | 2774 | """parse and apply a template |
|
2775 | 2775 | |
|
2776 | 2776 | If -r/--rev is given, the template is processed as a log template and |
|
2777 | 2777 | applied to the given changesets. Otherwise, it is processed as a generic |
|
2778 | 2778 | template. |
|
2779 | 2779 | |
|
2780 | 2780 | Use --verbose to print the parsed tree. |
|
2781 | 2781 | """ |
|
2782 | 2782 | revs = None |
|
2783 | 2783 | if opts[r'rev']: |
|
2784 | 2784 | if repo is None: |
|
2785 | 2785 | raise error.RepoError(_('there is no Mercurial repository here ' |
|
2786 | 2786 | '(.hg not found)')) |
|
2787 | 2787 | revs = scmutil.revrange(repo, opts[r'rev']) |
|
2788 | 2788 | |
|
2789 | 2789 | props = {} |
|
2790 | 2790 | for d in opts[r'define']: |
|
2791 | 2791 | try: |
|
2792 | 2792 | k, v = (e.strip() for e in d.split('=', 1)) |
|
2793 | 2793 | if not k or k == 'ui': |
|
2794 | 2794 | raise ValueError |
|
2795 | 2795 | props[k] = v |
|
2796 | 2796 | except ValueError: |
|
2797 | 2797 | raise error.Abort(_('malformed keyword definition: %s') % d) |
|
2798 | 2798 | |
|
2799 | 2799 | if ui.verbose: |
|
2800 | 2800 | aliases = ui.configitems('templatealias') |
|
2801 | 2801 | tree = templater.parse(tmpl) |
|
2802 | 2802 | ui.note(templater.prettyformat(tree), '\n') |
|
2803 | 2803 | newtree = templater.expandaliases(tree, aliases) |
|
2804 | 2804 | if newtree != tree: |
|
2805 | 2805 | ui.note(("* expanded:\n"), templater.prettyformat(newtree), '\n') |
|
2806 | 2806 | |
|
2807 | 2807 | if revs is None: |
|
2808 | 2808 | tres = formatter.templateresources(ui, repo) |
|
2809 | 2809 | t = formatter.maketemplater(ui, tmpl, resources=tres) |
|
2810 | 2810 | if ui.verbose: |
|
2811 | 2811 | kwds, funcs = t.symbolsuseddefault() |
|
2812 | 2812 | ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds))) |
|
2813 | 2813 | ui.write(("* functions: %s\n") % ', '.join(sorted(funcs))) |
|
2814 | 2814 | ui.write(t.renderdefault(props)) |
|
2815 | 2815 | else: |
|
2816 | 2816 | displayer = logcmdutil.maketemplater(ui, repo, tmpl) |
|
2817 | 2817 | if ui.verbose: |
|
2818 | 2818 | kwds, funcs = displayer.t.symbolsuseddefault() |
|
2819 | 2819 | ui.write(("* keywords: %s\n") % ', '.join(sorted(kwds))) |
|
2820 | 2820 | ui.write(("* functions: %s\n") % ', '.join(sorted(funcs))) |
|
2821 | 2821 | for r in revs: |
|
2822 | 2822 | displayer.show(repo[r], **pycompat.strkwargs(props)) |
|
2823 | 2823 | displayer.close() |
|
2824 | 2824 | |
|
2825 | 2825 | @command('debuguigetpass', [ |
|
2826 | 2826 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), |
|
2827 | 2827 | ], _('[-p TEXT]'), norepo=True) |
|
2828 | 2828 | def debuguigetpass(ui, prompt=''): |
|
2829 | 2829 | """show prompt to type password""" |
|
2830 | 2830 | r = ui.getpass(prompt) |
|
2831 | 2831 | ui.write(('respose: %s\n') % r) |
|
2832 | 2832 | |
|
2833 | 2833 | @command('debuguiprompt', [ |
|
2834 | 2834 | ('p', 'prompt', '', _('prompt text'), _('TEXT')), |
|
2835 | 2835 | ], _('[-p TEXT]'), norepo=True) |
|
2836 | 2836 | def debuguiprompt(ui, prompt=''): |
|
2837 | 2837 | """show plain prompt""" |
|
2838 | 2838 | r = ui.prompt(prompt) |
|
2839 | 2839 | ui.write(('response: %s\n') % r) |
|
2840 | 2840 | |
|
2841 | 2841 | @command('debugupdatecaches', []) |
|
2842 | 2842 | def debugupdatecaches(ui, repo, *pats, **opts): |
|
2843 | 2843 | """warm all known caches in the repository""" |
|
2844 | 2844 | with repo.wlock(), repo.lock(): |
|
2845 | 2845 | repo.updatecaches(full=True) |
|
2846 | 2846 | |
|
2847 | 2847 | @command('debugupgraderepo', [ |
|
2848 | 2848 | ('o', 'optimize', [], _('extra optimization to perform'), _('NAME')), |
|
2849 | 2849 | ('', 'run', False, _('performs an upgrade')), |
|
2850 | 2850 | ('', 'backup', True, _('keep the old repository content around')), |
|
2851 | 2851 | ('', 'changelog', None, _('select the changelog for upgrade')), |
|
2852 | 2852 | ('', 'manifest', None, _('select the manifest for upgrade')), |
|
2853 | 2853 | ]) |
|
2854 | 2854 | def debugupgraderepo(ui, repo, run=False, optimize=None, backup=True, **opts): |
|
2855 | 2855 | """upgrade a repository to use different features |
|
2856 | 2856 | |
|
2857 | 2857 | If no arguments are specified, the repository is evaluated for upgrade |
|
2858 | 2858 | and a list of problems and potential optimizations is printed. |
|
2859 | 2859 | |
|
2860 | 2860 | With ``--run``, a repository upgrade is performed. Behavior of the upgrade |
|
2861 | 2861 | can be influenced via additional arguments. More details will be provided |
|
2862 | 2862 | by the command output when run without ``--run``. |
|
2863 | 2863 | |
|
2864 | 2864 | During the upgrade, the repository will be locked and no writes will be |
|
2865 | 2865 | allowed. |
|
2866 | 2866 | |
|
2867 | 2867 | At the end of the upgrade, the repository may not be readable while new |
|
2868 | 2868 | repository data is swapped in. This window will be as long as it takes to |
|
2869 | 2869 | rename some directories inside the ``.hg`` directory. On most machines, this |
|
2870 | 2870 | should complete almost instantaneously and the chances of a consumer being |
|
2871 | 2871 | unable to access the repository should be low. |
|
2872 | 2872 | |
|
2873 | 2873 | By default, all revlog will be upgraded. You can restrict this using flag |
|
2874 | 2874 | such as `--manifest`: |
|
2875 | 2875 | |
|
2876 | 2876 | * `--manifest`: only optimize the manifest |
|
2877 | 2877 | * `--no-manifest`: optimize all revlog but the manifest |
|
2878 | 2878 | * `--changelog`: optimize the changelog only |
|
2879 | 2879 | * `--no-changelog --no-manifest`: optimize filelogs only |
|
2880 | 2880 | """ |
|
2881 | 2881 | return upgrade.upgraderepo(ui, repo, run=run, optimize=optimize, |
|
2882 | 2882 | backup=backup, **opts) |
|
2883 | 2883 | |
|
2884 | 2884 | @command('debugwalk', cmdutil.walkopts, _('[OPTION]... [FILE]...'), |
|
2885 | 2885 | inferrepo=True) |
|
2886 | 2886 | def debugwalk(ui, repo, *pats, **opts): |
|
2887 | 2887 | """show how files match on given patterns""" |
|
2888 | 2888 | opts = pycompat.byteskwargs(opts) |
|
2889 | 2889 | m = scmutil.match(repo[None], pats, opts) |
|
2890 | 2890 | if ui.verbose: |
|
2891 | 2891 | ui.write(('* matcher:\n'), stringutil.prettyrepr(m), '\n') |
|
2892 | 2892 | items = list(repo[None].walk(m)) |
|
2893 | 2893 | if not items: |
|
2894 | 2894 | return |
|
2895 | 2895 | f = lambda fn: fn |
|
2896 | 2896 | if ui.configbool('ui', 'slash') and pycompat.ossep != '/': |
|
2897 | 2897 | f = lambda fn: util.normpath(fn) |
|
2898 | 2898 | fmt = 'f %%-%ds %%-%ds %%s' % ( |
|
2899 | 2899 | max([len(abs) for abs in items]), |
|
2900 | 2900 | max([len(repo.pathto(abs)) for abs in items])) |
|
2901 | 2901 | for abs in items: |
|
2902 | 2902 | line = fmt % (abs, f(repo.pathto(abs)), m.exact(abs) and 'exact' or '') |
|
2903 | 2903 | ui.write("%s\n" % line.rstrip()) |
|
2904 | 2904 | |
|
2905 | 2905 | @command('debugwhyunstable', [], _('REV')) |
|
2906 | 2906 | def debugwhyunstable(ui, repo, rev): |
|
2907 | 2907 | """explain instabilities of a changeset""" |
|
2908 | 2908 | for entry in obsutil.whyunstable(repo, scmutil.revsingle(repo, rev)): |
|
2909 | 2909 | dnodes = '' |
|
2910 | 2910 | if entry.get('divergentnodes'): |
|
2911 | 2911 | dnodes = ' '.join('%s (%s)' % (ctx.hex(), ctx.phasestr()) |
|
2912 | 2912 | for ctx in entry['divergentnodes']) + ' ' |
|
2913 | 2913 | ui.write('%s: %s%s %s\n' % (entry['instability'], dnodes, |
|
2914 | 2914 | entry['reason'], entry['node'])) |
|
2915 | 2915 | |
|
2916 | 2916 | @command('debugwireargs', |
|
2917 | 2917 | [('', 'three', '', 'three'), |
|
2918 | 2918 | ('', 'four', '', 'four'), |
|
2919 | 2919 | ('', 'five', '', 'five'), |
|
2920 | 2920 | ] + cmdutil.remoteopts, |
|
2921 | 2921 | _('REPO [OPTIONS]... [ONE [TWO]]'), |
|
2922 | 2922 | norepo=True) |
|
2923 | 2923 | def debugwireargs(ui, repopath, *vals, **opts): |
|
2924 | 2924 | opts = pycompat.byteskwargs(opts) |
|
2925 | 2925 | repo = hg.peer(ui, opts, repopath) |
|
2926 | 2926 | for opt in cmdutil.remoteopts: |
|
2927 | 2927 | del opts[opt[1]] |
|
2928 | 2928 | args = {} |
|
2929 | 2929 | for k, v in opts.iteritems(): |
|
2930 | 2930 | if v: |
|
2931 | 2931 | args[k] = v |
|
2932 | 2932 | args = pycompat.strkwargs(args) |
|
2933 | 2933 | # run twice to check that we don't mess up the stream for the next command |
|
2934 | 2934 | res1 = repo.debugwireargs(*vals, **args) |
|
2935 | 2935 | res2 = repo.debugwireargs(*vals, **args) |
|
2936 | 2936 | ui.write("%s\n" % res1) |
|
2937 | 2937 | if res1 != res2: |
|
2938 | 2938 | ui.warn("%s\n" % res2) |
|
2939 | 2939 | |
|
2940 | 2940 | def _parsewirelangblocks(fh): |
|
2941 | 2941 | activeaction = None |
|
2942 | 2942 | blocklines = [] |
|
2943 | 2943 | lastindent = 0 |
|
2944 | 2944 | |
|
2945 | 2945 | for line in fh: |
|
2946 | 2946 | line = line.rstrip() |
|
2947 | 2947 | if not line: |
|
2948 | 2948 | continue |
|
2949 | 2949 | |
|
2950 | 2950 | if line.startswith(b'#'): |
|
2951 | 2951 | continue |
|
2952 | 2952 | |
|
2953 | 2953 | if not line.startswith(b' '): |
|
2954 | 2954 | # New block. Flush previous one. |
|
2955 | 2955 | if activeaction: |
|
2956 | 2956 | yield activeaction, blocklines |
|
2957 | 2957 | |
|
2958 | 2958 | activeaction = line |
|
2959 | 2959 | blocklines = [] |
|
2960 | 2960 | lastindent = 0 |
|
2961 | 2961 | continue |
|
2962 | 2962 | |
|
2963 | 2963 | # Else we start with an indent. |
|
2964 | 2964 | |
|
2965 | 2965 | if not activeaction: |
|
2966 | 2966 | raise error.Abort(_('indented line outside of block')) |
|
2967 | 2967 | |
|
2968 | 2968 | indent = len(line) - len(line.lstrip()) |
|
2969 | 2969 | |
|
2970 | 2970 | # If this line is indented more than the last line, concatenate it. |
|
2971 | 2971 | if indent > lastindent and blocklines: |
|
2972 | 2972 | blocklines[-1] += line.lstrip() |
|
2973 | 2973 | else: |
|
2974 | 2974 | blocklines.append(line) |
|
2975 | 2975 | lastindent = indent |
|
2976 | 2976 | |
|
2977 | 2977 | # Flush last block. |
|
2978 | 2978 | if activeaction: |
|
2979 | 2979 | yield activeaction, blocklines |
|
2980 | 2980 | |
|
2981 | 2981 | @command('debugwireproto', |
|
2982 | 2982 | [ |
|
2983 | 2983 | ('', 'localssh', False, _('start an SSH server for this repo')), |
|
2984 | 2984 | ('', 'peer', '', _('construct a specific version of the peer')), |
|
2985 | 2985 | ('', 'noreadstderr', False, _('do not read from stderr of the remote')), |
|
2986 | 2986 | ('', 'nologhandshake', False, |
|
2987 | 2987 | _('do not log I/O related to the peer handshake')), |
|
2988 | 2988 | ] + cmdutil.remoteopts, |
|
2989 | 2989 | _('[PATH]'), |
|
2990 | 2990 | optionalrepo=True) |
|
2991 | 2991 | def debugwireproto(ui, repo, path=None, **opts): |
|
2992 | 2992 | """send wire protocol commands to a server |
|
2993 | 2993 | |
|
2994 | 2994 | This command can be used to issue wire protocol commands to remote |
|
2995 | 2995 | peers and to debug the raw data being exchanged. |
|
2996 | 2996 | |
|
2997 | 2997 | ``--localssh`` will start an SSH server against the current repository |
|
2998 | 2998 | and connect to that. By default, the connection will perform a handshake |
|
2999 | 2999 | and establish an appropriate peer instance. |
|
3000 | 3000 | |
|
3001 | 3001 | ``--peer`` can be used to bypass the handshake protocol and construct a |
|
3002 | 3002 | peer instance using the specified class type. Valid values are ``raw``, |
|
3003 | 3003 | ``http2``, ``ssh1``, and ``ssh2``. ``raw`` instances only allow sending |
|
3004 | 3004 | raw data payloads and don't support higher-level command actions. |
|
3005 | 3005 | |
|
3006 | 3006 | ``--noreadstderr`` can be used to disable automatic reading from stderr |
|
3007 | 3007 | of the peer (for SSH connections only). Disabling automatic reading of |
|
3008 | 3008 | stderr is useful for making output more deterministic. |
|
3009 | 3009 | |
|
3010 | 3010 | Commands are issued via a mini language which is specified via stdin. |
|
3011 | 3011 | The language consists of individual actions to perform. An action is |
|
3012 | 3012 | defined by a block. A block is defined as a line with no leading |
|
3013 | 3013 | space followed by 0 or more lines with leading space. Blocks are |
|
3014 | 3014 | effectively a high-level command with additional metadata. |
|
3015 | 3015 | |
|
3016 | 3016 | Lines beginning with ``#`` are ignored. |
|
3017 | 3017 | |
|
3018 | 3018 | The following sections denote available actions. |
|
3019 | 3019 | |
|
3020 | 3020 | raw |
|
3021 | 3021 | --- |
|
3022 | 3022 | |
|
3023 | 3023 | Send raw data to the server. |
|
3024 | 3024 | |
|
3025 | 3025 | The block payload contains the raw data to send as one atomic send |
|
3026 | 3026 | operation. The data may not actually be delivered in a single system |
|
3027 | 3027 | call: it depends on the abilities of the transport being used. |
|
3028 | 3028 | |
|
3029 | 3029 | Each line in the block is de-indented and concatenated. Then, that |
|
3030 | 3030 | value is evaluated as a Python b'' literal. This allows the use of |
|
3031 | 3031 | backslash escaping, etc. |
|
3032 | 3032 | |
|
3033 | 3033 | raw+ |
|
3034 | 3034 | ---- |
|
3035 | 3035 | |
|
3036 | 3036 | Behaves like ``raw`` except flushes output afterwards. |
|
3037 | 3037 | |
|
3038 | 3038 | command <X> |
|
3039 | 3039 | ----------- |
|
3040 | 3040 | |
|
3041 | 3041 | Send a request to run a named command, whose name follows the ``command`` |
|
3042 | 3042 | string. |
|
3043 | 3043 | |
|
3044 | 3044 | Arguments to the command are defined as lines in this block. The format of |
|
3045 | 3045 | each line is ``<key> <value>``. e.g.:: |
|
3046 | 3046 | |
|
3047 | 3047 | command listkeys |
|
3048 | 3048 | namespace bookmarks |
|
3049 | 3049 | |
|
3050 | 3050 | If the value begins with ``eval:``, it will be interpreted as a Python |
|
3051 | 3051 | literal expression. Otherwise values are interpreted as Python b'' literals. |
|
3052 | 3052 | This allows sending complex types and encoding special byte sequences via |
|
3053 | 3053 | backslash escaping. |
|
3054 | 3054 | |
|
3055 | 3055 | The following arguments have special meaning: |
|
3056 | 3056 | |
|
3057 | 3057 | ``PUSHFILE`` |
|
3058 | 3058 | When defined, the *push* mechanism of the peer will be used instead |
|
3059 | 3059 | of the static request-response mechanism and the content of the |
|
3060 | 3060 | file specified in the value of this argument will be sent as the |
|
3061 | 3061 | command payload. |
|
3062 | 3062 | |
|
3063 | 3063 | This can be used to submit a local bundle file to the remote. |
|
3064 | 3064 | |
|
3065 | 3065 | batchbegin |
|
3066 | 3066 | ---------- |
|
3067 | 3067 | |
|
3068 | 3068 | Instruct the peer to begin a batched send. |
|
3069 | 3069 | |
|
3070 | 3070 | All ``command`` blocks are queued for execution until the next |
|
3071 | 3071 | ``batchsubmit`` block. |
|
3072 | 3072 | |
|
3073 | 3073 | batchsubmit |
|
3074 | 3074 | ----------- |
|
3075 | 3075 | |
|
3076 | 3076 | Submit previously queued ``command`` blocks as a batch request. |
|
3077 | 3077 | |
|
3078 | 3078 | This action MUST be paired with a ``batchbegin`` action. |
|
3079 | 3079 | |
|
3080 | 3080 | httprequest <method> <path> |
|
3081 | 3081 | --------------------------- |
|
3082 | 3082 | |
|
3083 | 3083 | (HTTP peer only) |
|
3084 | 3084 | |
|
3085 | 3085 | Send an HTTP request to the peer. |
|
3086 | 3086 | |
|
3087 | 3087 | The HTTP request line follows the ``httprequest`` action. e.g. ``GET /foo``. |
|
3088 | 3088 | |
|
3089 | 3089 | Arguments of the form ``<key>: <value>`` are interpreted as HTTP request |
|
3090 | 3090 | headers to add to the request. e.g. ``Accept: foo``. |
|
3091 | 3091 | |
|
3092 | 3092 | The following arguments are special: |
|
3093 | 3093 | |
|
3094 | 3094 | ``BODYFILE`` |
|
3095 | 3095 | The content of the file defined as the value to this argument will be |
|
3096 | 3096 | transferred verbatim as the HTTP request body. |
|
3097 | 3097 | |
|
3098 | 3098 | ``frame <type> <flags> <payload>`` |
|
3099 | 3099 | Send a unified protocol frame as part of the request body. |
|
3100 | 3100 | |
|
3101 | 3101 | All frames will be collected and sent as the body to the HTTP |
|
3102 | 3102 | request. |
|
3103 | 3103 | |
|
3104 | 3104 | close |
|
3105 | 3105 | ----- |
|
3106 | 3106 | |
|
3107 | 3107 | Close the connection to the server. |
|
3108 | 3108 | |
|
3109 | 3109 | flush |
|
3110 | 3110 | ----- |
|
3111 | 3111 | |
|
3112 | 3112 | Flush data written to the server. |
|
3113 | 3113 | |
|
3114 | 3114 | readavailable |
|
3115 | 3115 | ------------- |
|
3116 | 3116 | |
|
3117 | 3117 | Close the write end of the connection and read all available data from |
|
3118 | 3118 | the server. |
|
3119 | 3119 | |
|
3120 | 3120 | If the connection to the server encompasses multiple pipes, we poll both |
|
3121 | 3121 | pipes and read available data. |
|
3122 | 3122 | |
|
3123 | 3123 | readline |
|
3124 | 3124 | -------- |
|
3125 | 3125 | |
|
3126 | 3126 | Read a line of output from the server. If there are multiple output |
|
3127 | 3127 | pipes, reads only the main pipe. |
|
3128 | 3128 | |
|
3129 | 3129 | ereadline |
|
3130 | 3130 | --------- |
|
3131 | 3131 | |
|
3132 | 3132 | Like ``readline``, but read from the stderr pipe, if available. |
|
3133 | 3133 | |
|
3134 | 3134 | read <X> |
|
3135 | 3135 | -------- |
|
3136 | 3136 | |
|
3137 | 3137 | ``read()`` N bytes from the server's main output pipe. |
|
3138 | 3138 | |
|
3139 | 3139 | eread <X> |
|
3140 | 3140 | --------- |
|
3141 | 3141 | |
|
3142 | 3142 | ``read()`` N bytes from the server's stderr pipe, if available. |
|
3143 | 3143 | |
|
3144 | 3144 | Specifying Unified Frame-Based Protocol Frames |
|
3145 | 3145 | ---------------------------------------------- |
|
3146 | 3146 | |
|
3147 | 3147 | It is possible to emit a *Unified Frame-Based Protocol* by using special |
|
3148 | 3148 | syntax. |
|
3149 | 3149 | |
|
3150 | 3150 | A frame is composed as a type, flags, and payload. These can be parsed |
|
3151 | 3151 | from a string of the form: |
|
3152 | 3152 | |
|
3153 | 3153 | <request-id> <stream-id> <stream-flags> <type> <flags> <payload> |
|
3154 | 3154 | |
|
3155 | 3155 | ``request-id`` and ``stream-id`` are integers defining the request and |
|
3156 | 3156 | stream identifiers. |
|
3157 | 3157 | |
|
3158 | 3158 | ``type`` can be an integer value for the frame type or the string name |
|
3159 | 3159 | of the type. The strings are defined in ``wireprotoframing.py``. e.g. |
|
3160 | 3160 | ``command-name``. |
|
3161 | 3161 | |
|
3162 | 3162 | ``stream-flags`` and ``flags`` are a ``|`` delimited list of flag |
|
3163 | 3163 | components. Each component (and there can be just one) can be an integer |
|
3164 | 3164 | or a flag name for stream flags or frame flags, respectively. Values are |
|
3165 | 3165 | resolved to integers and then bitwise OR'd together. |
|
3166 | 3166 | |
|
3167 | 3167 | ``payload`` represents the raw frame payload. If it begins with |
|
3168 | 3168 | ``cbor:``, the following string is evaluated as Python code and the |
|
3169 | 3169 | resulting object is fed into a CBOR encoder. Otherwise it is interpreted |
|
3170 | 3170 | as a Python byte string literal. |
|
3171 | 3171 | """ |
|
3172 | 3172 | opts = pycompat.byteskwargs(opts) |
|
3173 | 3173 | |
|
3174 | 3174 | if opts['localssh'] and not repo: |
|
3175 | 3175 | raise error.Abort(_('--localssh requires a repository')) |
|
3176 | 3176 | |
|
3177 | 3177 | if opts['peer'] and opts['peer'] not in ('raw', 'http2', 'ssh1', 'ssh2'): |
|
3178 | 3178 | raise error.Abort(_('invalid value for --peer'), |
|
3179 | 3179 | hint=_('valid values are "raw", "ssh1", and "ssh2"')) |
|
3180 | 3180 | |
|
3181 | 3181 | if path and opts['localssh']: |
|
3182 | 3182 | raise error.Abort(_('cannot specify --localssh with an explicit ' |
|
3183 | 3183 | 'path')) |
|
3184 | 3184 | |
|
3185 | 3185 | if ui.interactive(): |
|
3186 | 3186 | ui.write(_('(waiting for commands on stdin)\n')) |
|
3187 | 3187 | |
|
3188 | 3188 | blocks = list(_parsewirelangblocks(ui.fin)) |
|
3189 | 3189 | |
|
3190 | 3190 | proc = None |
|
3191 | 3191 | stdin = None |
|
3192 | 3192 | stdout = None |
|
3193 | 3193 | stderr = None |
|
3194 | 3194 | opener = None |
|
3195 | 3195 | |
|
3196 | 3196 | if opts['localssh']: |
|
3197 | 3197 | # We start the SSH server in its own process so there is process |
|
3198 | 3198 | # separation. This prevents a whole class of potential bugs around |
|
3199 | 3199 | # shared state from interfering with server operation. |
|
3200 | 3200 | args = procutil.hgcmd() + [ |
|
3201 | 3201 | '-R', repo.root, |
|
3202 | 3202 | 'debugserve', '--sshstdio', |
|
3203 | 3203 | ] |
|
3204 | 3204 | proc = subprocess.Popen(pycompat.rapply(procutil.tonativestr, args), |
|
3205 | 3205 | stdin=subprocess.PIPE, |
|
3206 | 3206 | stdout=subprocess.PIPE, stderr=subprocess.PIPE, |
|
3207 | 3207 | bufsize=0) |
|
3208 | 3208 | |
|
3209 | 3209 | stdin = proc.stdin |
|
3210 | 3210 | stdout = proc.stdout |
|
3211 | 3211 | stderr = proc.stderr |
|
3212 | 3212 | |
|
3213 | 3213 | # We turn the pipes into observers so we can log I/O. |
|
3214 | 3214 | if ui.verbose or opts['peer'] == 'raw': |
|
3215 | 3215 | stdin = util.makeloggingfileobject(ui, proc.stdin, b'i', |
|
3216 | 3216 | logdata=True) |
|
3217 | 3217 | stdout = util.makeloggingfileobject(ui, proc.stdout, b'o', |
|
3218 | 3218 | logdata=True) |
|
3219 | 3219 | stderr = util.makeloggingfileobject(ui, proc.stderr, b'e', |
|
3220 | 3220 | logdata=True) |
|
3221 | 3221 | |
|
3222 | 3222 | # --localssh also implies the peer connection settings. |
|
3223 | 3223 | |
|
3224 | 3224 | url = 'ssh://localserver' |
|
3225 | 3225 | autoreadstderr = not opts['noreadstderr'] |
|
3226 | 3226 | |
|
3227 | 3227 | if opts['peer'] == 'ssh1': |
|
3228 | 3228 | ui.write(_('creating ssh peer for wire protocol version 1\n')) |
|
3229 | 3229 | peer = sshpeer.sshv1peer(ui, url, proc, stdin, stdout, stderr, |
|
3230 | 3230 | None, autoreadstderr=autoreadstderr) |
|
3231 | 3231 | elif opts['peer'] == 'ssh2': |
|
3232 | 3232 | ui.write(_('creating ssh peer for wire protocol version 2\n')) |
|
3233 | 3233 | peer = sshpeer.sshv2peer(ui, url, proc, stdin, stdout, stderr, |
|
3234 | 3234 | None, autoreadstderr=autoreadstderr) |
|
3235 | 3235 | elif opts['peer'] == 'raw': |
|
3236 | 3236 | ui.write(_('using raw connection to peer\n')) |
|
3237 | 3237 | peer = None |
|
3238 | 3238 | else: |
|
3239 | 3239 | ui.write(_('creating ssh peer from handshake results\n')) |
|
3240 | 3240 | peer = sshpeer.makepeer(ui, url, proc, stdin, stdout, stderr, |
|
3241 | 3241 | autoreadstderr=autoreadstderr) |
|
3242 | 3242 | |
|
3243 | 3243 | elif path: |
|
3244 | 3244 | # We bypass hg.peer() so we can proxy the sockets. |
|
3245 | 3245 | # TODO consider not doing this because we skip |
|
3246 | 3246 | # ``hg.wirepeersetupfuncs`` and potentially other useful functionality. |
|
3247 | 3247 | u = util.url(path) |
|
3248 | 3248 | if u.scheme != 'http': |
|
3249 | 3249 | raise error.Abort(_('only http:// paths are currently supported')) |
|
3250 | 3250 | |
|
3251 | 3251 | url, authinfo = u.authinfo() |
|
3252 | 3252 | openerargs = { |
|
3253 | 3253 | r'useragent': b'Mercurial debugwireproto', |
|
3254 | 3254 | } |
|
3255 | 3255 | |
|
3256 | 3256 | # Turn pipes/sockets into observers so we can log I/O. |
|
3257 | 3257 | if ui.verbose: |
|
3258 | 3258 | openerargs.update({ |
|
3259 | 3259 | r'loggingfh': ui, |
|
3260 | 3260 | r'loggingname': b's', |
|
3261 | 3261 | r'loggingopts': { |
|
3262 | 3262 | r'logdata': True, |
|
3263 | 3263 | r'logdataapis': False, |
|
3264 | 3264 | }, |
|
3265 | 3265 | }) |
|
3266 | 3266 | |
|
3267 | 3267 | if ui.debugflag: |
|
3268 | 3268 | openerargs[r'loggingopts'][r'logdataapis'] = True |
|
3269 | 3269 | |
|
3270 | 3270 | # Don't send default headers when in raw mode. This allows us to |
|
3271 | 3271 | # bypass most of the behavior of our URL handling code so we can |
|
3272 | 3272 | # have near complete control over what's sent on the wire. |
|
3273 | 3273 | if opts['peer'] == 'raw': |
|
3274 | 3274 | openerargs[r'sendaccept'] = False |
|
3275 | 3275 | |
|
3276 | 3276 | opener = urlmod.opener(ui, authinfo, **openerargs) |
|
3277 | 3277 | |
|
3278 | 3278 | if opts['peer'] == 'http2': |
|
3279 | 3279 | ui.write(_('creating http peer for wire protocol version 2\n')) |
|
3280 | 3280 | # We go through makepeer() because we need an API descriptor for |
|
3281 | 3281 | # the peer instance to be useful. |
|
3282 | 3282 | with ui.configoverride({ |
|
3283 | 3283 | ('experimental', 'httppeer.advertise-v2'): True}): |
|
3284 | 3284 | if opts['nologhandshake']: |
|
3285 | 3285 | ui.pushbuffer() |
|
3286 | 3286 | |
|
3287 | 3287 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
3288 | 3288 | |
|
3289 | 3289 | if opts['nologhandshake']: |
|
3290 | 3290 | ui.popbuffer() |
|
3291 | 3291 | |
|
3292 | 3292 | if not isinstance(peer, httppeer.httpv2peer): |
|
3293 | 3293 | raise error.Abort(_('could not instantiate HTTP peer for ' |
|
3294 | 3294 | 'wire protocol version 2'), |
|
3295 | 3295 | hint=_('the server may not have the feature ' |
|
3296 | 3296 | 'enabled or is not allowing this ' |
|
3297 | 3297 | 'client version')) |
|
3298 | 3298 | |
|
3299 | 3299 | elif opts['peer'] == 'raw': |
|
3300 | 3300 | ui.write(_('using raw connection to peer\n')) |
|
3301 | 3301 | peer = None |
|
3302 | 3302 | elif opts['peer']: |
|
3303 | 3303 | raise error.Abort(_('--peer %s not supported with HTTP peers') % |
|
3304 | 3304 | opts['peer']) |
|
3305 | 3305 | else: |
|
3306 | 3306 | peer = httppeer.makepeer(ui, path, opener=opener) |
|
3307 | 3307 | |
|
3308 | 3308 | # We /could/ populate stdin/stdout with sock.makefile()... |
|
3309 | 3309 | else: |
|
3310 | 3310 | raise error.Abort(_('unsupported connection configuration')) |
|
3311 | 3311 | |
|
3312 | 3312 | batchedcommands = None |
|
3313 | 3313 | |
|
3314 | 3314 | # Now perform actions based on the parsed wire language instructions. |
|
3315 | 3315 | for action, lines in blocks: |
|
3316 | 3316 | if action in ('raw', 'raw+'): |
|
3317 | 3317 | if not stdin: |
|
3318 | 3318 | raise error.Abort(_('cannot call raw/raw+ on this peer')) |
|
3319 | 3319 | |
|
3320 | 3320 | # Concatenate the data together. |
|
3321 | 3321 | data = ''.join(l.lstrip() for l in lines) |
|
3322 | 3322 | data = stringutil.unescapestr(data) |
|
3323 | 3323 | stdin.write(data) |
|
3324 | 3324 | |
|
3325 | 3325 | if action == 'raw+': |
|
3326 | 3326 | stdin.flush() |
|
3327 | 3327 | elif action == 'flush': |
|
3328 | 3328 | if not stdin: |
|
3329 | 3329 | raise error.Abort(_('cannot call flush on this peer')) |
|
3330 | 3330 | stdin.flush() |
|
3331 | 3331 | elif action.startswith('command'): |
|
3332 | 3332 | if not peer: |
|
3333 | 3333 | raise error.Abort(_('cannot send commands unless peer instance ' |
|
3334 | 3334 | 'is available')) |
|
3335 | 3335 | |
|
3336 | 3336 | command = action.split(' ', 1)[1] |
|
3337 | 3337 | |
|
3338 | 3338 | args = {} |
|
3339 | 3339 | for line in lines: |
|
3340 | 3340 | # We need to allow empty values. |
|
3341 | 3341 | fields = line.lstrip().split(' ', 1) |
|
3342 | 3342 | if len(fields) == 1: |
|
3343 | 3343 | key = fields[0] |
|
3344 | 3344 | value = '' |
|
3345 | 3345 | else: |
|
3346 | 3346 | key, value = fields |
|
3347 | 3347 | |
|
3348 | 3348 | if value.startswith('eval:'): |
|
3349 | 3349 | value = stringutil.evalpythonliteral(value[5:]) |
|
3350 | 3350 | else: |
|
3351 | 3351 | value = stringutil.unescapestr(value) |
|
3352 | 3352 | |
|
3353 | 3353 | args[key] = value |
|
3354 | 3354 | |
|
3355 | 3355 | if batchedcommands is not None: |
|
3356 | 3356 | batchedcommands.append((command, args)) |
|
3357 | 3357 | continue |
|
3358 | 3358 | |
|
3359 | 3359 | ui.status(_('sending %s command\n') % command) |
|
3360 | 3360 | |
|
3361 | 3361 | if 'PUSHFILE' in args: |
|
3362 | 3362 | with open(args['PUSHFILE'], r'rb') as fh: |
|
3363 | 3363 | del args['PUSHFILE'] |
|
3364 | 3364 | res, output = peer._callpush(command, fh, |
|
3365 | 3365 | **pycompat.strkwargs(args)) |
|
3366 | 3366 | ui.status(_('result: %s\n') % stringutil.escapestr(res)) |
|
3367 | 3367 | ui.status(_('remote output: %s\n') % |
|
3368 | 3368 | stringutil.escapestr(output)) |
|
3369 | 3369 | else: |
|
3370 | 3370 | with peer.commandexecutor() as e: |
|
3371 | 3371 | res = e.callcommand(command, args).result() |
|
3372 | 3372 | |
|
3373 | 3373 | if isinstance(res, wireprotov2peer.commandresponse): |
|
3374 | 3374 | val = res.objects() |
|
3375 | 3375 | ui.status(_('response: %s\n') % |
|
3376 | 3376 | stringutil.pprint(val, bprefix=True, indent=2)) |
|
3377 | 3377 | else: |
|
3378 | 3378 | ui.status(_('response: %s\n') % |
|
3379 | 3379 | stringutil.pprint(res, bprefix=True, indent=2)) |
|
3380 | 3380 | |
|
3381 | 3381 | elif action == 'batchbegin': |
|
3382 | 3382 | if batchedcommands is not None: |
|
3383 | 3383 | raise error.Abort(_('nested batchbegin not allowed')) |
|
3384 | 3384 | |
|
3385 | 3385 | batchedcommands = [] |
|
3386 | 3386 | elif action == 'batchsubmit': |
|
3387 | 3387 | # There is a batching API we could go through. But it would be |
|
3388 | 3388 | # difficult to normalize requests into function calls. It is easier |
|
3389 | 3389 | # to bypass this layer and normalize to commands + args. |
|
3390 | 3390 | ui.status(_('sending batch with %d sub-commands\n') % |
|
3391 | 3391 | len(batchedcommands)) |
|
3392 | 3392 | for i, chunk in enumerate(peer._submitbatch(batchedcommands)): |
|
3393 | 3393 | ui.status(_('response #%d: %s\n') % |
|
3394 | 3394 | (i, stringutil.escapestr(chunk))) |
|
3395 | 3395 | |
|
3396 | 3396 | batchedcommands = None |
|
3397 | 3397 | |
|
3398 | 3398 | elif action.startswith('httprequest '): |
|
3399 | 3399 | if not opener: |
|
3400 | 3400 | raise error.Abort(_('cannot use httprequest without an HTTP ' |
|
3401 | 3401 | 'peer')) |
|
3402 | 3402 | |
|
3403 | 3403 | request = action.split(' ', 2) |
|
3404 | 3404 | if len(request) != 3: |
|
3405 | 3405 | raise error.Abort(_('invalid httprequest: expected format is ' |
|
3406 | 3406 | '"httprequest <method> <path>')) |
|
3407 | 3407 | |
|
3408 | 3408 | method, httppath = request[1:] |
|
3409 | 3409 | headers = {} |
|
3410 | 3410 | body = None |
|
3411 | 3411 | frames = [] |
|
3412 | 3412 | for line in lines: |
|
3413 | 3413 | line = line.lstrip() |
|
3414 | 3414 | m = re.match(b'^([a-zA-Z0-9_-]+): (.*)$', line) |
|
3415 | 3415 | if m: |
|
3416 | 3416 | # Headers need to use native strings. |
|
3417 | 3417 | key = pycompat.strurl(m.group(1)) |
|
3418 | 3418 | value = pycompat.strurl(m.group(2)) |
|
3419 | 3419 | headers[key] = value |
|
3420 | 3420 | continue |
|
3421 | 3421 | |
|
3422 | 3422 | if line.startswith(b'BODYFILE '): |
|
3423 | 3423 | with open(line.split(b' ', 1), 'rb') as fh: |
|
3424 | 3424 | body = fh.read() |
|
3425 | 3425 | elif line.startswith(b'frame '): |
|
3426 | 3426 | frame = wireprotoframing.makeframefromhumanstring( |
|
3427 | 3427 | line[len(b'frame '):]) |
|
3428 | 3428 | |
|
3429 | 3429 | frames.append(frame) |
|
3430 | 3430 | else: |
|
3431 | 3431 | raise error.Abort(_('unknown argument to httprequest: %s') % |
|
3432 | 3432 | line) |
|
3433 | 3433 | |
|
3434 | 3434 | url = path + httppath |
|
3435 | 3435 | |
|
3436 | 3436 | if frames: |
|
3437 | 3437 | body = b''.join(bytes(f) for f in frames) |
|
3438 | 3438 | |
|
3439 | 3439 | req = urlmod.urlreq.request(pycompat.strurl(url), body, headers) |
|
3440 | 3440 | |
|
3441 | 3441 | # urllib.Request insists on using has_data() as a proxy for |
|
3442 | 3442 | # determining the request method. Override that to use our |
|
3443 | 3443 | # explicitly requested method. |
|
3444 | 3444 | req.get_method = lambda: pycompat.sysstr(method) |
|
3445 | 3445 | |
|
3446 | 3446 | try: |
|
3447 | 3447 | res = opener.open(req) |
|
3448 | 3448 | body = res.read() |
|
3449 | 3449 | except util.urlerr.urlerror as e: |
|
3450 | 3450 | # read() method must be called, but only exists in Python 2 |
|
3451 | 3451 | getattr(e, 'read', lambda: None)() |
|
3452 | 3452 | continue |
|
3453 | 3453 | |
|
3454 | 3454 | ct = res.headers.get(r'Content-Type') |
|
3455 | 3455 | if ct == r'application/mercurial-cbor': |
|
3456 | 3456 | ui.write(_('cbor> %s\n') % |
|
3457 | 3457 | stringutil.pprint(cborutil.decodeall(body), |
|
3458 | 3458 | bprefix=True, |
|
3459 | 3459 | indent=2)) |
|
3460 | 3460 | |
|
3461 | 3461 | elif action == 'close': |
|
3462 | 3462 | peer.close() |
|
3463 | 3463 | elif action == 'readavailable': |
|
3464 | 3464 | if not stdout or not stderr: |
|
3465 | 3465 | raise error.Abort(_('readavailable not available on this peer')) |
|
3466 | 3466 | |
|
3467 | 3467 | stdin.close() |
|
3468 | 3468 | stdout.read() |
|
3469 | 3469 | stderr.read() |
|
3470 | 3470 | |
|
3471 | 3471 | elif action == 'readline': |
|
3472 | 3472 | if not stdout: |
|
3473 | 3473 | raise error.Abort(_('readline not available on this peer')) |
|
3474 | 3474 | stdout.readline() |
|
3475 | 3475 | elif action == 'ereadline': |
|
3476 | 3476 | if not stderr: |
|
3477 | 3477 | raise error.Abort(_('ereadline not available on this peer')) |
|
3478 | 3478 | stderr.readline() |
|
3479 | 3479 | elif action.startswith('read '): |
|
3480 | 3480 | count = int(action.split(' ', 1)[1]) |
|
3481 | 3481 | if not stdout: |
|
3482 | 3482 | raise error.Abort(_('read not available on this peer')) |
|
3483 | 3483 | stdout.read(count) |
|
3484 | 3484 | elif action.startswith('eread '): |
|
3485 | 3485 | count = int(action.split(' ', 1)[1]) |
|
3486 | 3486 | if not stderr: |
|
3487 | 3487 | raise error.Abort(_('eread not available on this peer')) |
|
3488 | 3488 | stderr.read(count) |
|
3489 | 3489 | else: |
|
3490 | 3490 | raise error.Abort(_('unknown action: %s') % action) |
|
3491 | 3491 | |
|
3492 | 3492 | if batchedcommands is not None: |
|
3493 | 3493 | raise error.Abort(_('unclosed "batchbegin" request')) |
|
3494 | 3494 | |
|
3495 | 3495 | if peer: |
|
3496 | 3496 | peer.close() |
|
3497 | 3497 | |
|
3498 | 3498 | if proc: |
|
3499 | 3499 | proc.kill() |
@@ -1,2021 +1,2021 b'' | |||
|
1 | 1 | # scmutil.py - Mercurial core utility functions |
|
2 | 2 | # |
|
3 | 3 | # Copyright Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | from __future__ import absolute_import |
|
9 | 9 | |
|
10 | 10 | import errno |
|
11 | 11 | import glob |
|
12 | 12 | import hashlib |
|
13 | 13 | import os |
|
14 | 14 | import posixpath |
|
15 | 15 | import re |
|
16 | 16 | import subprocess |
|
17 | 17 | import weakref |
|
18 | 18 | |
|
19 | 19 | from .i18n import _ |
|
20 | 20 | from .node import ( |
|
21 | 21 | bin, |
|
22 | 22 | hex, |
|
23 | 23 | nullid, |
|
24 | 24 | nullrev, |
|
25 | 25 | short, |
|
26 | 26 | wdirid, |
|
27 | 27 | wdirrev, |
|
28 | 28 | ) |
|
29 | 29 | |
|
30 | 30 | from . import ( |
|
31 | 31 | copies as copiesmod, |
|
32 | 32 | encoding, |
|
33 | 33 | error, |
|
34 | 34 | match as matchmod, |
|
35 | 35 | obsolete, |
|
36 | 36 | obsutil, |
|
37 | 37 | pathutil, |
|
38 | 38 | phases, |
|
39 | 39 | policy, |
|
40 | 40 | pycompat, |
|
41 | 41 | revsetlang, |
|
42 | 42 | similar, |
|
43 | 43 | smartset, |
|
44 | 44 | url, |
|
45 | 45 | util, |
|
46 | 46 | vfs, |
|
47 | 47 | ) |
|
48 | 48 | |
|
49 | 49 | from .utils import ( |
|
50 | 50 | procutil, |
|
51 | 51 | stringutil, |
|
52 | 52 | ) |
|
53 | 53 | |
|
54 | 54 | if pycompat.iswindows: |
|
55 | 55 | from . import scmwindows as scmplatform |
|
56 | 56 | else: |
|
57 | 57 | from . import scmposix as scmplatform |
|
58 | 58 | |
|
59 | 59 | parsers = policy.importmod(r'parsers') |
|
60 | 60 | |
|
61 | 61 | termsize = scmplatform.termsize |
|
62 | 62 | |
|
63 | 63 | class status(tuple): |
|
64 | 64 | '''Named tuple with a list of files per status. The 'deleted', 'unknown' |
|
65 | 65 | and 'ignored' properties are only relevant to the working copy. |
|
66 | 66 | ''' |
|
67 | 67 | |
|
68 | 68 | __slots__ = () |
|
69 | 69 | |
|
70 | 70 | def __new__(cls, modified, added, removed, deleted, unknown, ignored, |
|
71 | 71 | clean): |
|
72 | 72 | return tuple.__new__(cls, (modified, added, removed, deleted, unknown, |
|
73 | 73 | ignored, clean)) |
|
74 | 74 | |
|
75 | 75 | @property |
|
76 | 76 | def modified(self): |
|
77 | 77 | '''files that have been modified''' |
|
78 | 78 | return self[0] |
|
79 | 79 | |
|
80 | 80 | @property |
|
81 | 81 | def added(self): |
|
82 | 82 | '''files that have been added''' |
|
83 | 83 | return self[1] |
|
84 | 84 | |
|
85 | 85 | @property |
|
86 | 86 | def removed(self): |
|
87 | 87 | '''files that have been removed''' |
|
88 | 88 | return self[2] |
|
89 | 89 | |
|
90 | 90 | @property |
|
91 | 91 | def deleted(self): |
|
92 | 92 | '''files that are in the dirstate, but have been deleted from the |
|
93 | 93 | working copy (aka "missing") |
|
94 | 94 | ''' |
|
95 | 95 | return self[3] |
|
96 | 96 | |
|
97 | 97 | @property |
|
98 | 98 | def unknown(self): |
|
99 | 99 | '''files not in the dirstate that are not ignored''' |
|
100 | 100 | return self[4] |
|
101 | 101 | |
|
102 | 102 | @property |
|
103 | 103 | def ignored(self): |
|
104 | 104 | '''files not in the dirstate that are ignored (by _dirignore())''' |
|
105 | 105 | return self[5] |
|
106 | 106 | |
|
107 | 107 | @property |
|
108 | 108 | def clean(self): |
|
109 | 109 | '''files that have not been modified''' |
|
110 | 110 | return self[6] |
|
111 | 111 | |
|
112 | 112 | def __repr__(self, *args, **kwargs): |
|
113 | 113 | return ((r'<status modified=%s, added=%s, removed=%s, deleted=%s, ' |
|
114 | 114 | r'unknown=%s, ignored=%s, clean=%s>') % |
|
115 | 115 | tuple(pycompat.sysstr(stringutil.pprint(v)) for v in self)) |
|
116 | 116 | |
|
117 | 117 | def itersubrepos(ctx1, ctx2): |
|
118 | 118 | """find subrepos in ctx1 or ctx2""" |
|
119 | 119 | # Create a (subpath, ctx) mapping where we prefer subpaths from |
|
120 | 120 | # ctx1. The subpaths from ctx2 are important when the .hgsub file |
|
121 | 121 | # has been modified (in ctx2) but not yet committed (in ctx1). |
|
122 | 122 | subpaths = dict.fromkeys(ctx2.substate, ctx2) |
|
123 | 123 | subpaths.update(dict.fromkeys(ctx1.substate, ctx1)) |
|
124 | 124 | |
|
125 | 125 | missing = set() |
|
126 | 126 | |
|
127 | 127 | for subpath in ctx2.substate: |
|
128 | 128 | if subpath not in ctx1.substate: |
|
129 | 129 | del subpaths[subpath] |
|
130 | 130 | missing.add(subpath) |
|
131 | 131 | |
|
132 | 132 | for subpath, ctx in sorted(subpaths.iteritems()): |
|
133 | 133 | yield subpath, ctx.sub(subpath) |
|
134 | 134 | |
|
135 | 135 | # Yield an empty subrepo based on ctx1 for anything only in ctx2. That way, |
|
136 | 136 | # status and diff will have an accurate result when it does |
|
137 | 137 | # 'sub.{status|diff}(rev2)'. Otherwise, the ctx2 subrepo is compared |
|
138 | 138 | # against itself. |
|
139 | 139 | for subpath in missing: |
|
140 | 140 | yield subpath, ctx2.nullsub(subpath, ctx1) |
|
141 | 141 | |
|
142 | 142 | def nochangesfound(ui, repo, excluded=None): |
|
143 | 143 | '''Report no changes for push/pull, excluded is None or a list of |
|
144 | 144 | nodes excluded from the push/pull. |
|
145 | 145 | ''' |
|
146 | 146 | secretlist = [] |
|
147 | 147 | if excluded: |
|
148 | 148 | for n in excluded: |
|
149 | 149 | ctx = repo[n] |
|
150 | 150 | if ctx.phase() >= phases.secret and not ctx.extinct(): |
|
151 | 151 | secretlist.append(n) |
|
152 | 152 | |
|
153 | 153 | if secretlist: |
|
154 | 154 | ui.status(_("no changes found (ignored %d secret changesets)\n") |
|
155 | 155 | % len(secretlist)) |
|
156 | 156 | else: |
|
157 | 157 | ui.status(_("no changes found\n")) |
|
158 | 158 | |
|
159 | 159 | def callcatch(ui, func): |
|
160 | 160 | """call func() with global exception handling |
|
161 | 161 | |
|
162 | 162 | return func() if no exception happens. otherwise do some error handling |
|
163 | 163 | and return an exit code accordingly. does not handle all exceptions. |
|
164 | 164 | """ |
|
165 | 165 | try: |
|
166 | 166 | try: |
|
167 | 167 | return func() |
|
168 | 168 | except: # re-raises |
|
169 | 169 | ui.traceback() |
|
170 | 170 | raise |
|
171 | 171 | # Global exception handling, alphabetically |
|
172 | 172 | # Mercurial-specific first, followed by built-in and library exceptions |
|
173 | 173 | except error.LockHeld as inst: |
|
174 | 174 | if inst.errno == errno.ETIMEDOUT: |
|
175 | 175 | reason = _('timed out waiting for lock held by %r') % ( |
|
176 | 176 | pycompat.bytestr(inst.locker)) |
|
177 | 177 | else: |
|
178 | 178 | reason = _('lock held by %r') % inst.locker |
|
179 | 179 | ui.error(_("abort: %s: %s\n") % ( |
|
180 | 180 | inst.desc or stringutil.forcebytestr(inst.filename), reason)) |
|
181 | 181 | if not inst.locker: |
|
182 | 182 | ui.error(_("(lock might be very busy)\n")) |
|
183 | 183 | except error.LockUnavailable as inst: |
|
184 | 184 | ui.error(_("abort: could not lock %s: %s\n") % |
|
185 | 185 | (inst.desc or stringutil.forcebytestr(inst.filename), |
|
186 | 186 | encoding.strtolocal(inst.strerror))) |
|
187 | 187 | except error.OutOfBandError as inst: |
|
188 | 188 | if inst.args: |
|
189 | 189 | msg = _("abort: remote error:\n") |
|
190 | 190 | else: |
|
191 | 191 | msg = _("abort: remote error\n") |
|
192 | 192 | ui.error(msg) |
|
193 | 193 | if inst.args: |
|
194 | 194 | ui.error(''.join(inst.args)) |
|
195 | 195 | if inst.hint: |
|
196 | 196 | ui.error('(%s)\n' % inst.hint) |
|
197 | 197 | except error.RepoError as inst: |
|
198 | 198 | ui.error(_("abort: %s!\n") % inst) |
|
199 | 199 | if inst.hint: |
|
200 | 200 | ui.error(_("(%s)\n") % inst.hint) |
|
201 | 201 | except error.ResponseError as inst: |
|
202 | 202 | ui.error(_("abort: %s") % inst.args[0]) |
|
203 | 203 | msg = inst.args[1] |
|
204 | 204 | if isinstance(msg, type(u'')): |
|
205 | 205 | msg = pycompat.sysbytes(msg) |
|
206 | 206 | if not isinstance(msg, bytes): |
|
207 | 207 | ui.error(" %r\n" % (msg,)) |
|
208 | 208 | elif not msg: |
|
209 | 209 | ui.error(_(" empty string\n")) |
|
210 | 210 | else: |
|
211 | 211 | ui.error("\n%r\n" % pycompat.bytestr(stringutil.ellipsis(msg))) |
|
212 | 212 | except error.CensoredNodeError as inst: |
|
213 | 213 | ui.error(_("abort: file censored %s!\n") % inst) |
|
214 | 214 | except error.StorageError as inst: |
|
215 | 215 | ui.error(_("abort: %s!\n") % inst) |
|
216 | 216 | if inst.hint: |
|
217 | 217 | ui.error(_("(%s)\n") % inst.hint) |
|
218 | 218 | except error.InterventionRequired as inst: |
|
219 | 219 | ui.error("%s\n" % inst) |
|
220 | 220 | if inst.hint: |
|
221 | 221 | ui.error(_("(%s)\n") % inst.hint) |
|
222 | 222 | return 1 |
|
223 | 223 | except error.WdirUnsupported: |
|
224 | 224 | ui.error(_("abort: working directory revision cannot be specified\n")) |
|
225 | 225 | except error.Abort as inst: |
|
226 | 226 | ui.error(_("abort: %s\n") % inst) |
|
227 | 227 | if inst.hint: |
|
228 | 228 | ui.error(_("(%s)\n") % inst.hint) |
|
229 | 229 | except ImportError as inst: |
|
230 | 230 | ui.error(_("abort: %s!\n") % stringutil.forcebytestr(inst)) |
|
231 | 231 | m = stringutil.forcebytestr(inst).split()[-1] |
|
232 | 232 | if m in "mpatch bdiff".split(): |
|
233 | 233 | ui.error(_("(did you forget to compile extensions?)\n")) |
|
234 | 234 | elif m in "zlib".split(): |
|
235 | 235 | ui.error(_("(is your Python install correct?)\n")) |
|
236 | 236 | except (IOError, OSError) as inst: |
|
237 | 237 | if util.safehasattr(inst, "code"): # HTTPError |
|
238 | 238 | ui.error(_("abort: %s\n") % stringutil.forcebytestr(inst)) |
|
239 | 239 | elif util.safehasattr(inst, "reason"): # URLError or SSLError |
|
240 | 240 | try: # usually it is in the form (errno, strerror) |
|
241 | 241 | reason = inst.reason.args[1] |
|
242 | 242 | except (AttributeError, IndexError): |
|
243 | 243 | # it might be anything, for example a string |
|
244 | 244 | reason = inst.reason |
|
245 | 245 | if isinstance(reason, pycompat.unicode): |
|
246 | 246 | # SSLError of Python 2.7.9 contains a unicode |
|
247 | 247 | reason = encoding.unitolocal(reason) |
|
248 | 248 | ui.error(_("abort: error: %s\n") % reason) |
|
249 | 249 | elif (util.safehasattr(inst, "args") |
|
250 | 250 | and inst.args and inst.args[0] == errno.EPIPE): |
|
251 | 251 | pass |
|
252 | 252 | elif getattr(inst, "strerror", None): # common IOError or OSError |
|
253 | 253 | if getattr(inst, "filename", None) is not None: |
|
254 | 254 | ui.error(_("abort: %s: '%s'\n") % ( |
|
255 | 255 | encoding.strtolocal(inst.strerror), |
|
256 | 256 | stringutil.forcebytestr(inst.filename))) |
|
257 | 257 | else: |
|
258 | 258 | ui.error(_("abort: %s\n") % encoding.strtolocal(inst.strerror)) |
|
259 | 259 | else: # suspicious IOError |
|
260 | 260 | raise |
|
261 | 261 | except MemoryError: |
|
262 | 262 | ui.error(_("abort: out of memory\n")) |
|
263 | 263 | except SystemExit as inst: |
|
264 | 264 | # Commands shouldn't sys.exit directly, but give a return code. |
|
265 | 265 | # Just in case catch this and and pass exit code to caller. |
|
266 | 266 | return inst.code |
|
267 | 267 | |
|
268 | 268 | return -1 |
|
269 | 269 | |
|
270 | 270 | def checknewlabel(repo, lbl, kind): |
|
271 | 271 | # Do not use the "kind" parameter in ui output. |
|
272 | 272 | # It makes strings difficult to translate. |
|
273 | 273 | if lbl in ['tip', '.', 'null']: |
|
274 | 274 | raise error.Abort(_("the name '%s' is reserved") % lbl) |
|
275 | 275 | for c in (':', '\0', '\n', '\r'): |
|
276 | 276 | if c in lbl: |
|
277 | 277 | raise error.Abort( |
|
278 | 278 | _("%r cannot be used in a name") % pycompat.bytestr(c)) |
|
279 | 279 | try: |
|
280 | 280 | int(lbl) |
|
281 | 281 | raise error.Abort(_("cannot use an integer as a name")) |
|
282 | 282 | except ValueError: |
|
283 | 283 | pass |
|
284 | 284 | if lbl.strip() != lbl: |
|
285 | 285 | raise error.Abort(_("leading or trailing whitespace in name %r") % lbl) |
|
286 | 286 | |
|
287 | 287 | def checkfilename(f): |
|
288 | 288 | '''Check that the filename f is an acceptable filename for a tracked file''' |
|
289 | 289 | if '\r' in f or '\n' in f: |
|
290 | 290 | raise error.Abort(_("'\\n' and '\\r' disallowed in filenames: %r") |
|
291 | 291 | % pycompat.bytestr(f)) |
|
292 | 292 | |
|
293 | 293 | def checkportable(ui, f): |
|
294 | 294 | '''Check if filename f is portable and warn or abort depending on config''' |
|
295 | 295 | checkfilename(f) |
|
296 | 296 | abort, warn = checkportabilityalert(ui) |
|
297 | 297 | if abort or warn: |
|
298 | 298 | msg = util.checkwinfilename(f) |
|
299 | 299 | if msg: |
|
300 | 300 | msg = "%s: %s" % (msg, procutil.shellquote(f)) |
|
301 | 301 | if abort: |
|
302 | 302 | raise error.Abort(msg) |
|
303 | 303 | ui.warn(_("warning: %s\n") % msg) |
|
304 | 304 | |
|
305 | 305 | def checkportabilityalert(ui): |
|
306 | 306 | '''check if the user's config requests nothing, a warning, or abort for |
|
307 | 307 | non-portable filenames''' |
|
308 | 308 | val = ui.config('ui', 'portablefilenames') |
|
309 | 309 | lval = val.lower() |
|
310 | 310 | bval = stringutil.parsebool(val) |
|
311 | 311 | abort = pycompat.iswindows or lval == 'abort' |
|
312 | 312 | warn = bval or lval == 'warn' |
|
313 | 313 | if bval is None and not (warn or abort or lval == 'ignore'): |
|
314 | 314 | raise error.ConfigError( |
|
315 | 315 | _("ui.portablefilenames value is invalid ('%s')") % val) |
|
316 | 316 | return abort, warn |
|
317 | 317 | |
|
318 | 318 | class casecollisionauditor(object): |
|
319 | 319 | def __init__(self, ui, abort, dirstate): |
|
320 | 320 | self._ui = ui |
|
321 | 321 | self._abort = abort |
|
322 |
allfiles = '\0'.join(dirstate |
|
|
322 | allfiles = '\0'.join(dirstate) | |
|
323 | 323 | self._loweredfiles = set(encoding.lower(allfiles).split('\0')) |
|
324 | 324 | self._dirstate = dirstate |
|
325 | 325 | # The purpose of _newfiles is so that we don't complain about |
|
326 | 326 | # case collisions if someone were to call this object with the |
|
327 | 327 | # same filename twice. |
|
328 | 328 | self._newfiles = set() |
|
329 | 329 | |
|
330 | 330 | def __call__(self, f): |
|
331 | 331 | if f in self._newfiles: |
|
332 | 332 | return |
|
333 | 333 | fl = encoding.lower(f) |
|
334 | 334 | if fl in self._loweredfiles and f not in self._dirstate: |
|
335 | 335 | msg = _('possible case-folding collision for %s') % f |
|
336 | 336 | if self._abort: |
|
337 | 337 | raise error.Abort(msg) |
|
338 | 338 | self._ui.warn(_("warning: %s\n") % msg) |
|
339 | 339 | self._loweredfiles.add(fl) |
|
340 | 340 | self._newfiles.add(f) |
|
341 | 341 | |
|
342 | 342 | def filteredhash(repo, maxrev): |
|
343 | 343 | """build hash of filtered revisions in the current repoview. |
|
344 | 344 | |
|
345 | 345 | Multiple caches perform up-to-date validation by checking that the |
|
346 | 346 | tiprev and tipnode stored in the cache file match the current repository. |
|
347 | 347 | However, this is not sufficient for validating repoviews because the set |
|
348 | 348 | of revisions in the view may change without the repository tiprev and |
|
349 | 349 | tipnode changing. |
|
350 | 350 | |
|
351 | 351 | This function hashes all the revs filtered from the view and returns |
|
352 | 352 | that SHA-1 digest. |
|
353 | 353 | """ |
|
354 | 354 | cl = repo.changelog |
|
355 | 355 | if not cl.filteredrevs: |
|
356 | 356 | return None |
|
357 | 357 | key = None |
|
358 | 358 | revs = sorted(r for r in cl.filteredrevs if r <= maxrev) |
|
359 | 359 | if revs: |
|
360 | 360 | s = hashlib.sha1() |
|
361 | 361 | for rev in revs: |
|
362 | 362 | s.update('%d;' % rev) |
|
363 | 363 | key = s.digest() |
|
364 | 364 | return key |
|
365 | 365 | |
|
366 | 366 | def walkrepos(path, followsym=False, seen_dirs=None, recurse=False): |
|
367 | 367 | '''yield every hg repository under path, always recursively. |
|
368 | 368 | The recurse flag will only control recursion into repo working dirs''' |
|
369 | 369 | def errhandler(err): |
|
370 | 370 | if err.filename == path: |
|
371 | 371 | raise err |
|
372 | 372 | samestat = getattr(os.path, 'samestat', None) |
|
373 | 373 | if followsym and samestat is not None: |
|
374 | 374 | def adddir(dirlst, dirname): |
|
375 | 375 | dirstat = os.stat(dirname) |
|
376 | 376 | match = any(samestat(dirstat, lstdirstat) for lstdirstat in dirlst) |
|
377 | 377 | if not match: |
|
378 | 378 | dirlst.append(dirstat) |
|
379 | 379 | return not match |
|
380 | 380 | else: |
|
381 | 381 | followsym = False |
|
382 | 382 | |
|
383 | 383 | if (seen_dirs is None) and followsym: |
|
384 | 384 | seen_dirs = [] |
|
385 | 385 | adddir(seen_dirs, path) |
|
386 | 386 | for root, dirs, files in os.walk(path, topdown=True, onerror=errhandler): |
|
387 | 387 | dirs.sort() |
|
388 | 388 | if '.hg' in dirs: |
|
389 | 389 | yield root # found a repository |
|
390 | 390 | qroot = os.path.join(root, '.hg', 'patches') |
|
391 | 391 | if os.path.isdir(os.path.join(qroot, '.hg')): |
|
392 | 392 | yield qroot # we have a patch queue repo here |
|
393 | 393 | if recurse: |
|
394 | 394 | # avoid recursing inside the .hg directory |
|
395 | 395 | dirs.remove('.hg') |
|
396 | 396 | else: |
|
397 | 397 | dirs[:] = [] # don't descend further |
|
398 | 398 | elif followsym: |
|
399 | 399 | newdirs = [] |
|
400 | 400 | for d in dirs: |
|
401 | 401 | fname = os.path.join(root, d) |
|
402 | 402 | if adddir(seen_dirs, fname): |
|
403 | 403 | if os.path.islink(fname): |
|
404 | 404 | for hgname in walkrepos(fname, True, seen_dirs): |
|
405 | 405 | yield hgname |
|
406 | 406 | else: |
|
407 | 407 | newdirs.append(d) |
|
408 | 408 | dirs[:] = newdirs |
|
409 | 409 | |
|
410 | 410 | def binnode(ctx): |
|
411 | 411 | """Return binary node id for a given basectx""" |
|
412 | 412 | node = ctx.node() |
|
413 | 413 | if node is None: |
|
414 | 414 | return wdirid |
|
415 | 415 | return node |
|
416 | 416 | |
|
417 | 417 | def intrev(ctx): |
|
418 | 418 | """Return integer for a given basectx that can be used in comparison or |
|
419 | 419 | arithmetic operation""" |
|
420 | 420 | rev = ctx.rev() |
|
421 | 421 | if rev is None: |
|
422 | 422 | return wdirrev |
|
423 | 423 | return rev |
|
424 | 424 | |
|
425 | 425 | def formatchangeid(ctx): |
|
426 | 426 | """Format changectx as '{rev}:{node|formatnode}', which is the default |
|
427 | 427 | template provided by logcmdutil.changesettemplater""" |
|
428 | 428 | repo = ctx.repo() |
|
429 | 429 | return formatrevnode(repo.ui, intrev(ctx), binnode(ctx)) |
|
430 | 430 | |
|
431 | 431 | def formatrevnode(ui, rev, node): |
|
432 | 432 | """Format given revision and node depending on the current verbosity""" |
|
433 | 433 | if ui.debugflag: |
|
434 | 434 | hexfunc = hex |
|
435 | 435 | else: |
|
436 | 436 | hexfunc = short |
|
437 | 437 | return '%d:%s' % (rev, hexfunc(node)) |
|
438 | 438 | |
|
439 | 439 | def resolvehexnodeidprefix(repo, prefix): |
|
440 | 440 | if (prefix.startswith('x') and |
|
441 | 441 | repo.ui.configbool('experimental', 'revisions.prefixhexnode')): |
|
442 | 442 | prefix = prefix[1:] |
|
443 | 443 | try: |
|
444 | 444 | # Uses unfiltered repo because it's faster when prefix is ambiguous/ |
|
445 | 445 | # This matches the shortesthexnodeidprefix() function below. |
|
446 | 446 | node = repo.unfiltered().changelog._partialmatch(prefix) |
|
447 | 447 | except error.AmbiguousPrefixLookupError: |
|
448 | 448 | revset = repo.ui.config('experimental', 'revisions.disambiguatewithin') |
|
449 | 449 | if revset: |
|
450 | 450 | # Clear config to avoid infinite recursion |
|
451 | 451 | configoverrides = {('experimental', |
|
452 | 452 | 'revisions.disambiguatewithin'): None} |
|
453 | 453 | with repo.ui.configoverride(configoverrides): |
|
454 | 454 | revs = repo.anyrevs([revset], user=True) |
|
455 | 455 | matches = [] |
|
456 | 456 | for rev in revs: |
|
457 | 457 | node = repo.changelog.node(rev) |
|
458 | 458 | if hex(node).startswith(prefix): |
|
459 | 459 | matches.append(node) |
|
460 | 460 | if len(matches) == 1: |
|
461 | 461 | return matches[0] |
|
462 | 462 | raise |
|
463 | 463 | if node is None: |
|
464 | 464 | return |
|
465 | 465 | repo.changelog.rev(node) # make sure node isn't filtered |
|
466 | 466 | return node |
|
467 | 467 | |
|
468 | 468 | def mayberevnum(repo, prefix): |
|
469 | 469 | """Checks if the given prefix may be mistaken for a revision number""" |
|
470 | 470 | try: |
|
471 | 471 | i = int(prefix) |
|
472 | 472 | # if we are a pure int, then starting with zero will not be |
|
473 | 473 | # confused as a rev; or, obviously, if the int is larger |
|
474 | 474 | # than the value of the tip rev. We still need to disambiguate if |
|
475 | 475 | # prefix == '0', since that *is* a valid revnum. |
|
476 | 476 | if (prefix != b'0' and prefix[0:1] == b'0') or i >= len(repo): |
|
477 | 477 | return False |
|
478 | 478 | return True |
|
479 | 479 | except ValueError: |
|
480 | 480 | return False |
|
481 | 481 | |
|
482 | 482 | def shortesthexnodeidprefix(repo, node, minlength=1, cache=None): |
|
483 | 483 | """Find the shortest unambiguous prefix that matches hexnode. |
|
484 | 484 | |
|
485 | 485 | If "cache" is not None, it must be a dictionary that can be used for |
|
486 | 486 | caching between calls to this method. |
|
487 | 487 | """ |
|
488 | 488 | # _partialmatch() of filtered changelog could take O(len(repo)) time, |
|
489 | 489 | # which would be unacceptably slow. so we look for hash collision in |
|
490 | 490 | # unfiltered space, which means some hashes may be slightly longer. |
|
491 | 491 | |
|
492 | 492 | minlength=max(minlength, 1) |
|
493 | 493 | |
|
494 | 494 | def disambiguate(prefix): |
|
495 | 495 | """Disambiguate against revnums.""" |
|
496 | 496 | if repo.ui.configbool('experimental', 'revisions.prefixhexnode'): |
|
497 | 497 | if mayberevnum(repo, prefix): |
|
498 | 498 | return 'x' + prefix |
|
499 | 499 | else: |
|
500 | 500 | return prefix |
|
501 | 501 | |
|
502 | 502 | hexnode = hex(node) |
|
503 | 503 | for length in range(len(prefix), len(hexnode) + 1): |
|
504 | 504 | prefix = hexnode[:length] |
|
505 | 505 | if not mayberevnum(repo, prefix): |
|
506 | 506 | return prefix |
|
507 | 507 | |
|
508 | 508 | cl = repo.unfiltered().changelog |
|
509 | 509 | revset = repo.ui.config('experimental', 'revisions.disambiguatewithin') |
|
510 | 510 | if revset: |
|
511 | 511 | revs = None |
|
512 | 512 | if cache is not None: |
|
513 | 513 | revs = cache.get('disambiguationrevset') |
|
514 | 514 | if revs is None: |
|
515 | 515 | revs = repo.anyrevs([revset], user=True) |
|
516 | 516 | if cache is not None: |
|
517 | 517 | cache['disambiguationrevset'] = revs |
|
518 | 518 | if cl.rev(node) in revs: |
|
519 | 519 | hexnode = hex(node) |
|
520 | 520 | nodetree = None |
|
521 | 521 | if cache is not None: |
|
522 | 522 | nodetree = cache.get('disambiguationnodetree') |
|
523 | 523 | if not nodetree: |
|
524 | 524 | try: |
|
525 | 525 | nodetree = parsers.nodetree(cl.index, len(revs)) |
|
526 | 526 | except AttributeError: |
|
527 | 527 | # no native nodetree |
|
528 | 528 | pass |
|
529 | 529 | else: |
|
530 | 530 | for r in revs: |
|
531 | 531 | nodetree.insert(r) |
|
532 | 532 | if cache is not None: |
|
533 | 533 | cache['disambiguationnodetree'] = nodetree |
|
534 | 534 | if nodetree is not None: |
|
535 | 535 | length = max(nodetree.shortest(node), minlength) |
|
536 | 536 | prefix = hexnode[:length] |
|
537 | 537 | return disambiguate(prefix) |
|
538 | 538 | for length in range(minlength, len(hexnode) + 1): |
|
539 | 539 | matches = [] |
|
540 | 540 | prefix = hexnode[:length] |
|
541 | 541 | for rev in revs: |
|
542 | 542 | otherhexnode = repo[rev].hex() |
|
543 | 543 | if prefix == otherhexnode[:length]: |
|
544 | 544 | matches.append(otherhexnode) |
|
545 | 545 | if len(matches) == 1: |
|
546 | 546 | return disambiguate(prefix) |
|
547 | 547 | |
|
548 | 548 | try: |
|
549 | 549 | return disambiguate(cl.shortest(node, minlength)) |
|
550 | 550 | except error.LookupError: |
|
551 | 551 | raise error.RepoLookupError() |
|
552 | 552 | |
|
553 | 553 | def isrevsymbol(repo, symbol): |
|
554 | 554 | """Checks if a symbol exists in the repo. |
|
555 | 555 | |
|
556 | 556 | See revsymbol() for details. Raises error.AmbiguousPrefixLookupError if the |
|
557 | 557 | symbol is an ambiguous nodeid prefix. |
|
558 | 558 | """ |
|
559 | 559 | try: |
|
560 | 560 | revsymbol(repo, symbol) |
|
561 | 561 | return True |
|
562 | 562 | except error.RepoLookupError: |
|
563 | 563 | return False |
|
564 | 564 | |
|
565 | 565 | def revsymbol(repo, symbol): |
|
566 | 566 | """Returns a context given a single revision symbol (as string). |
|
567 | 567 | |
|
568 | 568 | This is similar to revsingle(), but accepts only a single revision symbol, |
|
569 | 569 | i.e. things like ".", "tip", "1234", "deadbeef", "my-bookmark" work, but |
|
570 | 570 | not "max(public())". |
|
571 | 571 | """ |
|
572 | 572 | if not isinstance(symbol, bytes): |
|
573 | 573 | msg = ("symbol (%s of type %s) was not a string, did you mean " |
|
574 | 574 | "repo[symbol]?" % (symbol, type(symbol))) |
|
575 | 575 | raise error.ProgrammingError(msg) |
|
576 | 576 | try: |
|
577 | 577 | if symbol in ('.', 'tip', 'null'): |
|
578 | 578 | return repo[symbol] |
|
579 | 579 | |
|
580 | 580 | try: |
|
581 | 581 | r = int(symbol) |
|
582 | 582 | if '%d' % r != symbol: |
|
583 | 583 | raise ValueError |
|
584 | 584 | l = len(repo.changelog) |
|
585 | 585 | if r < 0: |
|
586 | 586 | r += l |
|
587 | 587 | if r < 0 or r >= l and r != wdirrev: |
|
588 | 588 | raise ValueError |
|
589 | 589 | return repo[r] |
|
590 | 590 | except error.FilteredIndexError: |
|
591 | 591 | raise |
|
592 | 592 | except (ValueError, OverflowError, IndexError): |
|
593 | 593 | pass |
|
594 | 594 | |
|
595 | 595 | if len(symbol) == 40: |
|
596 | 596 | try: |
|
597 | 597 | node = bin(symbol) |
|
598 | 598 | rev = repo.changelog.rev(node) |
|
599 | 599 | return repo[rev] |
|
600 | 600 | except error.FilteredLookupError: |
|
601 | 601 | raise |
|
602 | 602 | except (TypeError, LookupError): |
|
603 | 603 | pass |
|
604 | 604 | |
|
605 | 605 | # look up bookmarks through the name interface |
|
606 | 606 | try: |
|
607 | 607 | node = repo.names.singlenode(repo, symbol) |
|
608 | 608 | rev = repo.changelog.rev(node) |
|
609 | 609 | return repo[rev] |
|
610 | 610 | except KeyError: |
|
611 | 611 | pass |
|
612 | 612 | |
|
613 | 613 | node = resolvehexnodeidprefix(repo, symbol) |
|
614 | 614 | if node is not None: |
|
615 | 615 | rev = repo.changelog.rev(node) |
|
616 | 616 | return repo[rev] |
|
617 | 617 | |
|
618 | 618 | raise error.RepoLookupError(_("unknown revision '%s'") % symbol) |
|
619 | 619 | |
|
620 | 620 | except error.WdirUnsupported: |
|
621 | 621 | return repo[None] |
|
622 | 622 | except (error.FilteredIndexError, error.FilteredLookupError, |
|
623 | 623 | error.FilteredRepoLookupError): |
|
624 | 624 | raise _filterederror(repo, symbol) |
|
625 | 625 | |
|
626 | 626 | def _filterederror(repo, changeid): |
|
627 | 627 | """build an exception to be raised about a filtered changeid |
|
628 | 628 | |
|
629 | 629 | This is extracted in a function to help extensions (eg: evolve) to |
|
630 | 630 | experiment with various message variants.""" |
|
631 | 631 | if repo.filtername.startswith('visible'): |
|
632 | 632 | |
|
633 | 633 | # Check if the changeset is obsolete |
|
634 | 634 | unfilteredrepo = repo.unfiltered() |
|
635 | 635 | ctx = revsymbol(unfilteredrepo, changeid) |
|
636 | 636 | |
|
637 | 637 | # If the changeset is obsolete, enrich the message with the reason |
|
638 | 638 | # that made this changeset not visible |
|
639 | 639 | if ctx.obsolete(): |
|
640 | 640 | msg = obsutil._getfilteredreason(repo, changeid, ctx) |
|
641 | 641 | else: |
|
642 | 642 | msg = _("hidden revision '%s'") % changeid |
|
643 | 643 | |
|
644 | 644 | hint = _('use --hidden to access hidden revisions') |
|
645 | 645 | |
|
646 | 646 | return error.FilteredRepoLookupError(msg, hint=hint) |
|
647 | 647 | msg = _("filtered revision '%s' (not in '%s' subset)") |
|
648 | 648 | msg %= (changeid, repo.filtername) |
|
649 | 649 | return error.FilteredRepoLookupError(msg) |
|
650 | 650 | |
|
651 | 651 | def revsingle(repo, revspec, default='.', localalias=None): |
|
652 | 652 | if not revspec and revspec != 0: |
|
653 | 653 | return repo[default] |
|
654 | 654 | |
|
655 | 655 | l = revrange(repo, [revspec], localalias=localalias) |
|
656 | 656 | if not l: |
|
657 | 657 | raise error.Abort(_('empty revision set')) |
|
658 | 658 | return repo[l.last()] |
|
659 | 659 | |
|
660 | 660 | def _pairspec(revspec): |
|
661 | 661 | tree = revsetlang.parse(revspec) |
|
662 | 662 | return tree and tree[0] in ('range', 'rangepre', 'rangepost', 'rangeall') |
|
663 | 663 | |
|
664 | 664 | def revpair(repo, revs): |
|
665 | 665 | if not revs: |
|
666 | 666 | return repo['.'], repo[None] |
|
667 | 667 | |
|
668 | 668 | l = revrange(repo, revs) |
|
669 | 669 | |
|
670 | 670 | if not l: |
|
671 | 671 | raise error.Abort(_('empty revision range')) |
|
672 | 672 | |
|
673 | 673 | first = l.first() |
|
674 | 674 | second = l.last() |
|
675 | 675 | |
|
676 | 676 | if (first == second and len(revs) >= 2 |
|
677 | 677 | and not all(revrange(repo, [r]) for r in revs)): |
|
678 | 678 | raise error.Abort(_('empty revision on one side of range')) |
|
679 | 679 | |
|
680 | 680 | # if top-level is range expression, the result must always be a pair |
|
681 | 681 | if first == second and len(revs) == 1 and not _pairspec(revs[0]): |
|
682 | 682 | return repo[first], repo[None] |
|
683 | 683 | |
|
684 | 684 | return repo[first], repo[second] |
|
685 | 685 | |
|
686 | 686 | def revrange(repo, specs, localalias=None): |
|
687 | 687 | """Execute 1 to many revsets and return the union. |
|
688 | 688 | |
|
689 | 689 | This is the preferred mechanism for executing revsets using user-specified |
|
690 | 690 | config options, such as revset aliases. |
|
691 | 691 | |
|
692 | 692 | The revsets specified by ``specs`` will be executed via a chained ``OR`` |
|
693 | 693 | expression. If ``specs`` is empty, an empty result is returned. |
|
694 | 694 | |
|
695 | 695 | ``specs`` can contain integers, in which case they are assumed to be |
|
696 | 696 | revision numbers. |
|
697 | 697 | |
|
698 | 698 | It is assumed the revsets are already formatted. If you have arguments |
|
699 | 699 | that need to be expanded in the revset, call ``revsetlang.formatspec()`` |
|
700 | 700 | and pass the result as an element of ``specs``. |
|
701 | 701 | |
|
702 | 702 | Specifying a single revset is allowed. |
|
703 | 703 | |
|
704 | 704 | Returns a ``revset.abstractsmartset`` which is a list-like interface over |
|
705 | 705 | integer revisions. |
|
706 | 706 | """ |
|
707 | 707 | allspecs = [] |
|
708 | 708 | for spec in specs: |
|
709 | 709 | if isinstance(spec, int): |
|
710 | 710 | spec = revsetlang.formatspec('%d', spec) |
|
711 | 711 | allspecs.append(spec) |
|
712 | 712 | return repo.anyrevs(allspecs, user=True, localalias=localalias) |
|
713 | 713 | |
|
714 | 714 | def meaningfulparents(repo, ctx): |
|
715 | 715 | """Return list of meaningful (or all if debug) parentrevs for rev. |
|
716 | 716 | |
|
717 | 717 | For merges (two non-nullrev revisions) both parents are meaningful. |
|
718 | 718 | Otherwise the first parent revision is considered meaningful if it |
|
719 | 719 | is not the preceding revision. |
|
720 | 720 | """ |
|
721 | 721 | parents = ctx.parents() |
|
722 | 722 | if len(parents) > 1: |
|
723 | 723 | return parents |
|
724 | 724 | if repo.ui.debugflag: |
|
725 | 725 | return [parents[0], repo[nullrev]] |
|
726 | 726 | if parents[0].rev() >= intrev(ctx) - 1: |
|
727 | 727 | return [] |
|
728 | 728 | return parents |
|
729 | 729 | |
|
730 | 730 | def getuipathfn(repo, legacyrelativevalue=False, forcerelativevalue=None): |
|
731 | 731 | """Return a function that produced paths for presenting to the user. |
|
732 | 732 | |
|
733 | 733 | The returned function takes a repo-relative path and produces a path |
|
734 | 734 | that can be presented in the UI. |
|
735 | 735 | |
|
736 | 736 | Depending on the value of ui.relative-paths, either a repo-relative or |
|
737 | 737 | cwd-relative path will be produced. |
|
738 | 738 | |
|
739 | 739 | legacyrelativevalue is the value to use if ui.relative-paths=legacy |
|
740 | 740 | |
|
741 | 741 | If forcerelativevalue is not None, then that value will be used regardless |
|
742 | 742 | of what ui.relative-paths is set to. |
|
743 | 743 | """ |
|
744 | 744 | if forcerelativevalue is not None: |
|
745 | 745 | relative = forcerelativevalue |
|
746 | 746 | else: |
|
747 | 747 | config = repo.ui.config('ui', 'relative-paths') |
|
748 | 748 | if config == 'legacy': |
|
749 | 749 | relative = legacyrelativevalue |
|
750 | 750 | else: |
|
751 | 751 | relative = stringutil.parsebool(config) |
|
752 | 752 | if relative is None: |
|
753 | 753 | raise error.ConfigError( |
|
754 | 754 | _("ui.relative-paths is not a boolean ('%s')") % config) |
|
755 | 755 | |
|
756 | 756 | if relative: |
|
757 | 757 | cwd = repo.getcwd() |
|
758 | 758 | pathto = repo.pathto |
|
759 | 759 | return lambda f: pathto(f, cwd) |
|
760 | 760 | elif repo.ui.configbool('ui', 'slash'): |
|
761 | 761 | return lambda f: f |
|
762 | 762 | else: |
|
763 | 763 | return util.localpath |
|
764 | 764 | |
|
765 | 765 | def subdiruipathfn(subpath, uipathfn): |
|
766 | 766 | '''Create a new uipathfn that treats the file as relative to subpath.''' |
|
767 | 767 | return lambda f: uipathfn(posixpath.join(subpath, f)) |
|
768 | 768 | |
|
769 | 769 | def anypats(pats, opts): |
|
770 | 770 | '''Checks if any patterns, including --include and --exclude were given. |
|
771 | 771 | |
|
772 | 772 | Some commands (e.g. addremove) use this condition for deciding whether to |
|
773 | 773 | print absolute or relative paths. |
|
774 | 774 | ''' |
|
775 | 775 | return bool(pats or opts.get('include') or opts.get('exclude')) |
|
776 | 776 | |
|
777 | 777 | def expandpats(pats): |
|
778 | 778 | '''Expand bare globs when running on windows. |
|
779 | 779 | On posix we assume it already has already been done by sh.''' |
|
780 | 780 | if not util.expandglobs: |
|
781 | 781 | return list(pats) |
|
782 | 782 | ret = [] |
|
783 | 783 | for kindpat in pats: |
|
784 | 784 | kind, pat = matchmod._patsplit(kindpat, None) |
|
785 | 785 | if kind is None: |
|
786 | 786 | try: |
|
787 | 787 | globbed = glob.glob(pat) |
|
788 | 788 | except re.error: |
|
789 | 789 | globbed = [pat] |
|
790 | 790 | if globbed: |
|
791 | 791 | ret.extend(globbed) |
|
792 | 792 | continue |
|
793 | 793 | ret.append(kindpat) |
|
794 | 794 | return ret |
|
795 | 795 | |
|
796 | 796 | def matchandpats(ctx, pats=(), opts=None, globbed=False, default='relpath', |
|
797 | 797 | badfn=None): |
|
798 | 798 | '''Return a matcher and the patterns that were used. |
|
799 | 799 | The matcher will warn about bad matches, unless an alternate badfn callback |
|
800 | 800 | is provided.''' |
|
801 | 801 | if opts is None: |
|
802 | 802 | opts = {} |
|
803 | 803 | if not globbed and default == 'relpath': |
|
804 | 804 | pats = expandpats(pats or []) |
|
805 | 805 | |
|
806 | 806 | uipathfn = getuipathfn(ctx.repo(), legacyrelativevalue=True) |
|
807 | 807 | def bad(f, msg): |
|
808 | 808 | ctx.repo().ui.warn("%s: %s\n" % (uipathfn(f), msg)) |
|
809 | 809 | |
|
810 | 810 | if badfn is None: |
|
811 | 811 | badfn = bad |
|
812 | 812 | |
|
813 | 813 | m = ctx.match(pats, opts.get('include'), opts.get('exclude'), |
|
814 | 814 | default, listsubrepos=opts.get('subrepos'), badfn=badfn) |
|
815 | 815 | |
|
816 | 816 | if m.always(): |
|
817 | 817 | pats = [] |
|
818 | 818 | return m, pats |
|
819 | 819 | |
|
820 | 820 | def match(ctx, pats=(), opts=None, globbed=False, default='relpath', |
|
821 | 821 | badfn=None): |
|
822 | 822 | '''Return a matcher that will warn about bad matches.''' |
|
823 | 823 | return matchandpats(ctx, pats, opts, globbed, default, badfn=badfn)[0] |
|
824 | 824 | |
|
825 | 825 | def matchall(repo): |
|
826 | 826 | '''Return a matcher that will efficiently match everything.''' |
|
827 | 827 | return matchmod.always() |
|
828 | 828 | |
|
829 | 829 | def matchfiles(repo, files, badfn=None): |
|
830 | 830 | '''Return a matcher that will efficiently match exactly these files.''' |
|
831 | 831 | return matchmod.exact(files, badfn=badfn) |
|
832 | 832 | |
|
833 | 833 | def parsefollowlinespattern(repo, rev, pat, msg): |
|
834 | 834 | """Return a file name from `pat` pattern suitable for usage in followlines |
|
835 | 835 | logic. |
|
836 | 836 | """ |
|
837 | 837 | if not matchmod.patkind(pat): |
|
838 | 838 | return pathutil.canonpath(repo.root, repo.getcwd(), pat) |
|
839 | 839 | else: |
|
840 | 840 | ctx = repo[rev] |
|
841 | 841 | m = matchmod.match(repo.root, repo.getcwd(), [pat], ctx=ctx) |
|
842 | 842 | files = [f for f in ctx if m(f)] |
|
843 | 843 | if len(files) != 1: |
|
844 | 844 | raise error.ParseError(msg) |
|
845 | 845 | return files[0] |
|
846 | 846 | |
|
847 | 847 | def getorigvfs(ui, repo): |
|
848 | 848 | """return a vfs suitable to save 'orig' file |
|
849 | 849 | |
|
850 | 850 | return None if no special directory is configured""" |
|
851 | 851 | origbackuppath = ui.config('ui', 'origbackuppath') |
|
852 | 852 | if not origbackuppath: |
|
853 | 853 | return None |
|
854 | 854 | return vfs.vfs(repo.wvfs.join(origbackuppath)) |
|
855 | 855 | |
|
856 | 856 | def backuppath(ui, repo, filepath): |
|
857 | 857 | '''customize where working copy backup files (.orig files) are created |
|
858 | 858 | |
|
859 | 859 | Fetch user defined path from config file: [ui] origbackuppath = <path> |
|
860 | 860 | Fall back to default (filepath with .orig suffix) if not specified |
|
861 | 861 | |
|
862 | 862 | filepath is repo-relative |
|
863 | 863 | |
|
864 | 864 | Returns an absolute path |
|
865 | 865 | ''' |
|
866 | 866 | origvfs = getorigvfs(ui, repo) |
|
867 | 867 | if origvfs is None: |
|
868 | 868 | return repo.wjoin(filepath + ".orig") |
|
869 | 869 | |
|
870 | 870 | origbackupdir = origvfs.dirname(filepath) |
|
871 | 871 | if not origvfs.isdir(origbackupdir) or origvfs.islink(origbackupdir): |
|
872 | 872 | ui.note(_('creating directory: %s\n') % origvfs.join(origbackupdir)) |
|
873 | 873 | |
|
874 | 874 | # Remove any files that conflict with the backup file's path |
|
875 | 875 | for f in reversed(list(util.finddirs(filepath))): |
|
876 | 876 | if origvfs.isfileorlink(f): |
|
877 | 877 | ui.note(_('removing conflicting file: %s\n') |
|
878 | 878 | % origvfs.join(f)) |
|
879 | 879 | origvfs.unlink(f) |
|
880 | 880 | break |
|
881 | 881 | |
|
882 | 882 | origvfs.makedirs(origbackupdir) |
|
883 | 883 | |
|
884 | 884 | if origvfs.isdir(filepath) and not origvfs.islink(filepath): |
|
885 | 885 | ui.note(_('removing conflicting directory: %s\n') |
|
886 | 886 | % origvfs.join(filepath)) |
|
887 | 887 | origvfs.rmtree(filepath, forcibly=True) |
|
888 | 888 | |
|
889 | 889 | return origvfs.join(filepath) |
|
890 | 890 | |
|
891 | 891 | class _containsnode(object): |
|
892 | 892 | """proxy __contains__(node) to container.__contains__ which accepts revs""" |
|
893 | 893 | |
|
894 | 894 | def __init__(self, repo, revcontainer): |
|
895 | 895 | self._torev = repo.changelog.rev |
|
896 | 896 | self._revcontains = revcontainer.__contains__ |
|
897 | 897 | |
|
898 | 898 | def __contains__(self, node): |
|
899 | 899 | return self._revcontains(self._torev(node)) |
|
900 | 900 | |
|
901 | 901 | def cleanupnodes(repo, replacements, operation, moves=None, metadata=None, |
|
902 | 902 | fixphase=False, targetphase=None, backup=True): |
|
903 | 903 | """do common cleanups when old nodes are replaced by new nodes |
|
904 | 904 | |
|
905 | 905 | That includes writing obsmarkers or stripping nodes, and moving bookmarks. |
|
906 | 906 | (we might also want to move working directory parent in the future) |
|
907 | 907 | |
|
908 | 908 | By default, bookmark moves are calculated automatically from 'replacements', |
|
909 | 909 | but 'moves' can be used to override that. Also, 'moves' may include |
|
910 | 910 | additional bookmark moves that should not have associated obsmarkers. |
|
911 | 911 | |
|
912 | 912 | replacements is {oldnode: [newnode]} or a iterable of nodes if they do not |
|
913 | 913 | have replacements. operation is a string, like "rebase". |
|
914 | 914 | |
|
915 | 915 | metadata is dictionary containing metadata to be stored in obsmarker if |
|
916 | 916 | obsolescence is enabled. |
|
917 | 917 | """ |
|
918 | 918 | assert fixphase or targetphase is None |
|
919 | 919 | if not replacements and not moves: |
|
920 | 920 | return |
|
921 | 921 | |
|
922 | 922 | # translate mapping's other forms |
|
923 | 923 | if not util.safehasattr(replacements, 'items'): |
|
924 | 924 | replacements = {(n,): () for n in replacements} |
|
925 | 925 | else: |
|
926 | 926 | # upgrading non tuple "source" to tuple ones for BC |
|
927 | 927 | repls = {} |
|
928 | 928 | for key, value in replacements.items(): |
|
929 | 929 | if not isinstance(key, tuple): |
|
930 | 930 | key = (key,) |
|
931 | 931 | repls[key] = value |
|
932 | 932 | replacements = repls |
|
933 | 933 | |
|
934 | 934 | # Unfiltered repo is needed since nodes in replacements might be hidden. |
|
935 | 935 | unfi = repo.unfiltered() |
|
936 | 936 | |
|
937 | 937 | # Calculate bookmark movements |
|
938 | 938 | if moves is None: |
|
939 | 939 | moves = {} |
|
940 | 940 | for oldnodes, newnodes in replacements.items(): |
|
941 | 941 | for oldnode in oldnodes: |
|
942 | 942 | if oldnode in moves: |
|
943 | 943 | continue |
|
944 | 944 | if len(newnodes) > 1: |
|
945 | 945 | # usually a split, take the one with biggest rev number |
|
946 | 946 | newnode = next(unfi.set('max(%ln)', newnodes)).node() |
|
947 | 947 | elif len(newnodes) == 0: |
|
948 | 948 | # move bookmark backwards |
|
949 | 949 | allreplaced = [] |
|
950 | 950 | for rep in replacements: |
|
951 | 951 | allreplaced.extend(rep) |
|
952 | 952 | roots = list(unfi.set('max((::%n) - %ln)', oldnode, |
|
953 | 953 | allreplaced)) |
|
954 | 954 | if roots: |
|
955 | 955 | newnode = roots[0].node() |
|
956 | 956 | else: |
|
957 | 957 | newnode = nullid |
|
958 | 958 | else: |
|
959 | 959 | newnode = newnodes[0] |
|
960 | 960 | moves[oldnode] = newnode |
|
961 | 961 | |
|
962 | 962 | allnewnodes = [n for ns in replacements.values() for n in ns] |
|
963 | 963 | toretract = {} |
|
964 | 964 | toadvance = {} |
|
965 | 965 | if fixphase: |
|
966 | 966 | precursors = {} |
|
967 | 967 | for oldnodes, newnodes in replacements.items(): |
|
968 | 968 | for oldnode in oldnodes: |
|
969 | 969 | for newnode in newnodes: |
|
970 | 970 | precursors.setdefault(newnode, []).append(oldnode) |
|
971 | 971 | |
|
972 | 972 | allnewnodes.sort(key=lambda n: unfi[n].rev()) |
|
973 | 973 | newphases = {} |
|
974 | 974 | def phase(ctx): |
|
975 | 975 | return newphases.get(ctx.node(), ctx.phase()) |
|
976 | 976 | for newnode in allnewnodes: |
|
977 | 977 | ctx = unfi[newnode] |
|
978 | 978 | parentphase = max(phase(p) for p in ctx.parents()) |
|
979 | 979 | if targetphase is None: |
|
980 | 980 | oldphase = max(unfi[oldnode].phase() |
|
981 | 981 | for oldnode in precursors[newnode]) |
|
982 | 982 | newphase = max(oldphase, parentphase) |
|
983 | 983 | else: |
|
984 | 984 | newphase = max(targetphase, parentphase) |
|
985 | 985 | newphases[newnode] = newphase |
|
986 | 986 | if newphase > ctx.phase(): |
|
987 | 987 | toretract.setdefault(newphase, []).append(newnode) |
|
988 | 988 | elif newphase < ctx.phase(): |
|
989 | 989 | toadvance.setdefault(newphase, []).append(newnode) |
|
990 | 990 | |
|
991 | 991 | with repo.transaction('cleanup') as tr: |
|
992 | 992 | # Move bookmarks |
|
993 | 993 | bmarks = repo._bookmarks |
|
994 | 994 | bmarkchanges = [] |
|
995 | 995 | for oldnode, newnode in moves.items(): |
|
996 | 996 | oldbmarks = repo.nodebookmarks(oldnode) |
|
997 | 997 | if not oldbmarks: |
|
998 | 998 | continue |
|
999 | 999 | from . import bookmarks # avoid import cycle |
|
1000 | 1000 | repo.ui.debug('moving bookmarks %r from %s to %s\n' % |
|
1001 | 1001 | (pycompat.rapply(pycompat.maybebytestr, oldbmarks), |
|
1002 | 1002 | hex(oldnode), hex(newnode))) |
|
1003 | 1003 | # Delete divergent bookmarks being parents of related newnodes |
|
1004 | 1004 | deleterevs = repo.revs('parents(roots(%ln & (::%n))) - parents(%n)', |
|
1005 | 1005 | allnewnodes, newnode, oldnode) |
|
1006 | 1006 | deletenodes = _containsnode(repo, deleterevs) |
|
1007 | 1007 | for name in oldbmarks: |
|
1008 | 1008 | bmarkchanges.append((name, newnode)) |
|
1009 | 1009 | for b in bookmarks.divergent2delete(repo, deletenodes, name): |
|
1010 | 1010 | bmarkchanges.append((b, None)) |
|
1011 | 1011 | |
|
1012 | 1012 | if bmarkchanges: |
|
1013 | 1013 | bmarks.applychanges(repo, tr, bmarkchanges) |
|
1014 | 1014 | |
|
1015 | 1015 | for phase, nodes in toretract.items(): |
|
1016 | 1016 | phases.retractboundary(repo, tr, phase, nodes) |
|
1017 | 1017 | for phase, nodes in toadvance.items(): |
|
1018 | 1018 | phases.advanceboundary(repo, tr, phase, nodes) |
|
1019 | 1019 | |
|
1020 | 1020 | mayusearchived = repo.ui.config('experimental', 'cleanup-as-archived') |
|
1021 | 1021 | # Obsolete or strip nodes |
|
1022 | 1022 | if obsolete.isenabled(repo, obsolete.createmarkersopt): |
|
1023 | 1023 | # If a node is already obsoleted, and we want to obsolete it |
|
1024 | 1024 | # without a successor, skip that obssolete request since it's |
|
1025 | 1025 | # unnecessary. That's the "if s or not isobs(n)" check below. |
|
1026 | 1026 | # Also sort the node in topology order, that might be useful for |
|
1027 | 1027 | # some obsstore logic. |
|
1028 | 1028 | # NOTE: the sorting might belong to createmarkers. |
|
1029 | 1029 | torev = unfi.changelog.rev |
|
1030 | 1030 | sortfunc = lambda ns: torev(ns[0][0]) |
|
1031 | 1031 | rels = [] |
|
1032 | 1032 | for ns, s in sorted(replacements.items(), key=sortfunc): |
|
1033 | 1033 | rel = (tuple(unfi[n] for n in ns), tuple(unfi[m] for m in s)) |
|
1034 | 1034 | rels.append(rel) |
|
1035 | 1035 | if rels: |
|
1036 | 1036 | obsolete.createmarkers(repo, rels, operation=operation, |
|
1037 | 1037 | metadata=metadata) |
|
1038 | 1038 | elif phases.supportinternal(repo) and mayusearchived: |
|
1039 | 1039 | # this assume we do not have "unstable" nodes above the cleaned ones |
|
1040 | 1040 | allreplaced = set() |
|
1041 | 1041 | for ns in replacements.keys(): |
|
1042 | 1042 | allreplaced.update(ns) |
|
1043 | 1043 | if backup: |
|
1044 | 1044 | from . import repair # avoid import cycle |
|
1045 | 1045 | node = min(allreplaced, key=repo.changelog.rev) |
|
1046 | 1046 | repair.backupbundle(repo, allreplaced, allreplaced, node, |
|
1047 | 1047 | operation) |
|
1048 | 1048 | phases.retractboundary(repo, tr, phases.archived, allreplaced) |
|
1049 | 1049 | else: |
|
1050 | 1050 | from . import repair # avoid import cycle |
|
1051 | 1051 | tostrip = list(n for ns in replacements for n in ns) |
|
1052 | 1052 | if tostrip: |
|
1053 | 1053 | repair.delayedstrip(repo.ui, repo, tostrip, operation, |
|
1054 | 1054 | backup=backup) |
|
1055 | 1055 | |
|
1056 | 1056 | def addremove(repo, matcher, prefix, uipathfn, opts=None): |
|
1057 | 1057 | if opts is None: |
|
1058 | 1058 | opts = {} |
|
1059 | 1059 | m = matcher |
|
1060 | 1060 | dry_run = opts.get('dry_run') |
|
1061 | 1061 | try: |
|
1062 | 1062 | similarity = float(opts.get('similarity') or 0) |
|
1063 | 1063 | except ValueError: |
|
1064 | 1064 | raise error.Abort(_('similarity must be a number')) |
|
1065 | 1065 | if similarity < 0 or similarity > 100: |
|
1066 | 1066 | raise error.Abort(_('similarity must be between 0 and 100')) |
|
1067 | 1067 | similarity /= 100.0 |
|
1068 | 1068 | |
|
1069 | 1069 | ret = 0 |
|
1070 | 1070 | |
|
1071 | 1071 | wctx = repo[None] |
|
1072 | 1072 | for subpath in sorted(wctx.substate): |
|
1073 | 1073 | submatch = matchmod.subdirmatcher(subpath, m) |
|
1074 | 1074 | if opts.get('subrepos') or m.exact(subpath) or any(submatch.files()): |
|
1075 | 1075 | sub = wctx.sub(subpath) |
|
1076 | 1076 | subprefix = repo.wvfs.reljoin(prefix, subpath) |
|
1077 | 1077 | subuipathfn = subdiruipathfn(subpath, uipathfn) |
|
1078 | 1078 | try: |
|
1079 | 1079 | if sub.addremove(submatch, subprefix, subuipathfn, opts): |
|
1080 | 1080 | ret = 1 |
|
1081 | 1081 | except error.LookupError: |
|
1082 | 1082 | repo.ui.status(_("skipping missing subrepository: %s\n") |
|
1083 | 1083 | % uipathfn(subpath)) |
|
1084 | 1084 | |
|
1085 | 1085 | rejected = [] |
|
1086 | 1086 | def badfn(f, msg): |
|
1087 | 1087 | if f in m.files(): |
|
1088 | 1088 | m.bad(f, msg) |
|
1089 | 1089 | rejected.append(f) |
|
1090 | 1090 | |
|
1091 | 1091 | badmatch = matchmod.badmatch(m, badfn) |
|
1092 | 1092 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, |
|
1093 | 1093 | badmatch) |
|
1094 | 1094 | |
|
1095 | 1095 | unknownset = set(unknown + forgotten) |
|
1096 | 1096 | toprint = unknownset.copy() |
|
1097 | 1097 | toprint.update(deleted) |
|
1098 | 1098 | for abs in sorted(toprint): |
|
1099 | 1099 | if repo.ui.verbose or not m.exact(abs): |
|
1100 | 1100 | if abs in unknownset: |
|
1101 | 1101 | status = _('adding %s\n') % uipathfn(abs) |
|
1102 | 1102 | label = 'ui.addremove.added' |
|
1103 | 1103 | else: |
|
1104 | 1104 | status = _('removing %s\n') % uipathfn(abs) |
|
1105 | 1105 | label = 'ui.addremove.removed' |
|
1106 | 1106 | repo.ui.status(status, label=label) |
|
1107 | 1107 | |
|
1108 | 1108 | renames = _findrenames(repo, m, added + unknown, removed + deleted, |
|
1109 | 1109 | similarity, uipathfn) |
|
1110 | 1110 | |
|
1111 | 1111 | if not dry_run: |
|
1112 | 1112 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
1113 | 1113 | |
|
1114 | 1114 | for f in rejected: |
|
1115 | 1115 | if f in m.files(): |
|
1116 | 1116 | return 1 |
|
1117 | 1117 | return ret |
|
1118 | 1118 | |
|
1119 | 1119 | def marktouched(repo, files, similarity=0.0): |
|
1120 | 1120 | '''Assert that files have somehow been operated upon. files are relative to |
|
1121 | 1121 | the repo root.''' |
|
1122 | 1122 | m = matchfiles(repo, files, badfn=lambda x, y: rejected.append(x)) |
|
1123 | 1123 | rejected = [] |
|
1124 | 1124 | |
|
1125 | 1125 | added, unknown, deleted, removed, forgotten = _interestingfiles(repo, m) |
|
1126 | 1126 | |
|
1127 | 1127 | if repo.ui.verbose: |
|
1128 | 1128 | unknownset = set(unknown + forgotten) |
|
1129 | 1129 | toprint = unknownset.copy() |
|
1130 | 1130 | toprint.update(deleted) |
|
1131 | 1131 | for abs in sorted(toprint): |
|
1132 | 1132 | if abs in unknownset: |
|
1133 | 1133 | status = _('adding %s\n') % abs |
|
1134 | 1134 | else: |
|
1135 | 1135 | status = _('removing %s\n') % abs |
|
1136 | 1136 | repo.ui.status(status) |
|
1137 | 1137 | |
|
1138 | 1138 | # TODO: We should probably have the caller pass in uipathfn and apply it to |
|
1139 | 1139 | # the messages above too. legacyrelativevalue=True is consistent with how |
|
1140 | 1140 | # it used to work. |
|
1141 | 1141 | uipathfn = getuipathfn(repo, legacyrelativevalue=True) |
|
1142 | 1142 | renames = _findrenames(repo, m, added + unknown, removed + deleted, |
|
1143 | 1143 | similarity, uipathfn) |
|
1144 | 1144 | |
|
1145 | 1145 | _markchanges(repo, unknown + forgotten, deleted, renames) |
|
1146 | 1146 | |
|
1147 | 1147 | for f in rejected: |
|
1148 | 1148 | if f in m.files(): |
|
1149 | 1149 | return 1 |
|
1150 | 1150 | return 0 |
|
1151 | 1151 | |
|
1152 | 1152 | def _interestingfiles(repo, matcher): |
|
1153 | 1153 | '''Walk dirstate with matcher, looking for files that addremove would care |
|
1154 | 1154 | about. |
|
1155 | 1155 | |
|
1156 | 1156 | This is different from dirstate.status because it doesn't care about |
|
1157 | 1157 | whether files are modified or clean.''' |
|
1158 | 1158 | added, unknown, deleted, removed, forgotten = [], [], [], [], [] |
|
1159 | 1159 | audit_path = pathutil.pathauditor(repo.root, cached=True) |
|
1160 | 1160 | |
|
1161 | 1161 | ctx = repo[None] |
|
1162 | 1162 | dirstate = repo.dirstate |
|
1163 | 1163 | matcher = repo.narrowmatch(matcher, includeexact=True) |
|
1164 | 1164 | walkresults = dirstate.walk(matcher, subrepos=sorted(ctx.substate), |
|
1165 | 1165 | unknown=True, ignored=False, full=False) |
|
1166 | 1166 | for abs, st in walkresults.iteritems(): |
|
1167 | 1167 | dstate = dirstate[abs] |
|
1168 | 1168 | if dstate == '?' and audit_path.check(abs): |
|
1169 | 1169 | unknown.append(abs) |
|
1170 | 1170 | elif dstate != 'r' and not st: |
|
1171 | 1171 | deleted.append(abs) |
|
1172 | 1172 | elif dstate == 'r' and st: |
|
1173 | 1173 | forgotten.append(abs) |
|
1174 | 1174 | # for finding renames |
|
1175 | 1175 | elif dstate == 'r' and not st: |
|
1176 | 1176 | removed.append(abs) |
|
1177 | 1177 | elif dstate == 'a': |
|
1178 | 1178 | added.append(abs) |
|
1179 | 1179 | |
|
1180 | 1180 | return added, unknown, deleted, removed, forgotten |
|
1181 | 1181 | |
|
1182 | 1182 | def _findrenames(repo, matcher, added, removed, similarity, uipathfn): |
|
1183 | 1183 | '''Find renames from removed files to added ones.''' |
|
1184 | 1184 | renames = {} |
|
1185 | 1185 | if similarity > 0: |
|
1186 | 1186 | for old, new, score in similar.findrenames(repo, added, removed, |
|
1187 | 1187 | similarity): |
|
1188 | 1188 | if (repo.ui.verbose or not matcher.exact(old) |
|
1189 | 1189 | or not matcher.exact(new)): |
|
1190 | 1190 | repo.ui.status(_('recording removal of %s as rename to %s ' |
|
1191 | 1191 | '(%d%% similar)\n') % |
|
1192 | 1192 | (uipathfn(old), uipathfn(new), |
|
1193 | 1193 | score * 100)) |
|
1194 | 1194 | renames[new] = old |
|
1195 | 1195 | return renames |
|
1196 | 1196 | |
|
1197 | 1197 | def _markchanges(repo, unknown, deleted, renames): |
|
1198 | 1198 | '''Marks the files in unknown as added, the files in deleted as removed, |
|
1199 | 1199 | and the files in renames as copied.''' |
|
1200 | 1200 | wctx = repo[None] |
|
1201 | 1201 | with repo.wlock(): |
|
1202 | 1202 | wctx.forget(deleted) |
|
1203 | 1203 | wctx.add(unknown) |
|
1204 | 1204 | for new, old in renames.iteritems(): |
|
1205 | 1205 | wctx.copy(old, new) |
|
1206 | 1206 | |
|
1207 | 1207 | def getrenamedfn(repo, endrev=None): |
|
1208 | 1208 | if copiesmod.usechangesetcentricalgo(repo): |
|
1209 | 1209 | def getrenamed(fn, rev): |
|
1210 | 1210 | ctx = repo[rev] |
|
1211 | 1211 | p1copies = ctx.p1copies() |
|
1212 | 1212 | if fn in p1copies: |
|
1213 | 1213 | return p1copies[fn] |
|
1214 | 1214 | p2copies = ctx.p2copies() |
|
1215 | 1215 | if fn in p2copies: |
|
1216 | 1216 | return p2copies[fn] |
|
1217 | 1217 | return None |
|
1218 | 1218 | return getrenamed |
|
1219 | 1219 | |
|
1220 | 1220 | rcache = {} |
|
1221 | 1221 | if endrev is None: |
|
1222 | 1222 | endrev = len(repo) |
|
1223 | 1223 | |
|
1224 | 1224 | def getrenamed(fn, rev): |
|
1225 | 1225 | '''looks up all renames for a file (up to endrev) the first |
|
1226 | 1226 | time the file is given. It indexes on the changerev and only |
|
1227 | 1227 | parses the manifest if linkrev != changerev. |
|
1228 | 1228 | Returns rename info for fn at changerev rev.''' |
|
1229 | 1229 | if fn not in rcache: |
|
1230 | 1230 | rcache[fn] = {} |
|
1231 | 1231 | fl = repo.file(fn) |
|
1232 | 1232 | for i in fl: |
|
1233 | 1233 | lr = fl.linkrev(i) |
|
1234 | 1234 | renamed = fl.renamed(fl.node(i)) |
|
1235 | 1235 | rcache[fn][lr] = renamed and renamed[0] |
|
1236 | 1236 | if lr >= endrev: |
|
1237 | 1237 | break |
|
1238 | 1238 | if rev in rcache[fn]: |
|
1239 | 1239 | return rcache[fn][rev] |
|
1240 | 1240 | |
|
1241 | 1241 | # If linkrev != rev (i.e. rev not found in rcache) fallback to |
|
1242 | 1242 | # filectx logic. |
|
1243 | 1243 | try: |
|
1244 | 1244 | return repo[rev][fn].copysource() |
|
1245 | 1245 | except error.LookupError: |
|
1246 | 1246 | return None |
|
1247 | 1247 | |
|
1248 | 1248 | return getrenamed |
|
1249 | 1249 | |
|
1250 | 1250 | def getcopiesfn(repo, endrev=None): |
|
1251 | 1251 | if copiesmod.usechangesetcentricalgo(repo): |
|
1252 | 1252 | def copiesfn(ctx): |
|
1253 | 1253 | if ctx.p2copies(): |
|
1254 | 1254 | allcopies = ctx.p1copies().copy() |
|
1255 | 1255 | # There should be no overlap |
|
1256 | 1256 | allcopies.update(ctx.p2copies()) |
|
1257 | 1257 | return sorted(allcopies.items()) |
|
1258 | 1258 | else: |
|
1259 | 1259 | return sorted(ctx.p1copies().items()) |
|
1260 | 1260 | else: |
|
1261 | 1261 | getrenamed = getrenamedfn(repo, endrev) |
|
1262 | 1262 | def copiesfn(ctx): |
|
1263 | 1263 | copies = [] |
|
1264 | 1264 | for fn in ctx.files(): |
|
1265 | 1265 | rename = getrenamed(fn, ctx.rev()) |
|
1266 | 1266 | if rename: |
|
1267 | 1267 | copies.append((fn, rename)) |
|
1268 | 1268 | return copies |
|
1269 | 1269 | |
|
1270 | 1270 | return copiesfn |
|
1271 | 1271 | |
|
1272 | 1272 | def dirstatecopy(ui, repo, wctx, src, dst, dryrun=False, cwd=None): |
|
1273 | 1273 | """Update the dirstate to reflect the intent of copying src to dst. For |
|
1274 | 1274 | different reasons it might not end with dst being marked as copied from src. |
|
1275 | 1275 | """ |
|
1276 | 1276 | origsrc = repo.dirstate.copied(src) or src |
|
1277 | 1277 | if dst == origsrc: # copying back a copy? |
|
1278 | 1278 | if repo.dirstate[dst] not in 'mn' and not dryrun: |
|
1279 | 1279 | repo.dirstate.normallookup(dst) |
|
1280 | 1280 | else: |
|
1281 | 1281 | if repo.dirstate[origsrc] == 'a' and origsrc == src: |
|
1282 | 1282 | if not ui.quiet: |
|
1283 | 1283 | ui.warn(_("%s has not been committed yet, so no copy " |
|
1284 | 1284 | "data will be stored for %s.\n") |
|
1285 | 1285 | % (repo.pathto(origsrc, cwd), repo.pathto(dst, cwd))) |
|
1286 | 1286 | if repo.dirstate[dst] in '?r' and not dryrun: |
|
1287 | 1287 | wctx.add([dst]) |
|
1288 | 1288 | elif not dryrun: |
|
1289 | 1289 | wctx.copy(origsrc, dst) |
|
1290 | 1290 | |
|
1291 | 1291 | def movedirstate(repo, newctx, match=None): |
|
1292 | 1292 | """Move the dirstate to newctx and adjust it as necessary. |
|
1293 | 1293 | |
|
1294 | 1294 | A matcher can be provided as an optimization. It is probably a bug to pass |
|
1295 | 1295 | a matcher that doesn't match all the differences between the parent of the |
|
1296 | 1296 | working copy and newctx. |
|
1297 | 1297 | """ |
|
1298 | 1298 | oldctx = repo['.'] |
|
1299 | 1299 | ds = repo.dirstate |
|
1300 | 1300 | ds.setparents(newctx.node(), nullid) |
|
1301 | 1301 | copies = dict(ds.copies()) |
|
1302 | 1302 | s = newctx.status(oldctx, match=match) |
|
1303 | 1303 | for f in s.modified: |
|
1304 | 1304 | if ds[f] == 'r': |
|
1305 | 1305 | # modified + removed -> removed |
|
1306 | 1306 | continue |
|
1307 | 1307 | ds.normallookup(f) |
|
1308 | 1308 | |
|
1309 | 1309 | for f in s.added: |
|
1310 | 1310 | if ds[f] == 'r': |
|
1311 | 1311 | # added + removed -> unknown |
|
1312 | 1312 | ds.drop(f) |
|
1313 | 1313 | elif ds[f] != 'a': |
|
1314 | 1314 | ds.add(f) |
|
1315 | 1315 | |
|
1316 | 1316 | for f in s.removed: |
|
1317 | 1317 | if ds[f] == 'a': |
|
1318 | 1318 | # removed + added -> normal |
|
1319 | 1319 | ds.normallookup(f) |
|
1320 | 1320 | elif ds[f] != 'r': |
|
1321 | 1321 | ds.remove(f) |
|
1322 | 1322 | |
|
1323 | 1323 | # Merge old parent and old working dir copies |
|
1324 | 1324 | oldcopies = copiesmod.pathcopies(newctx, oldctx, match) |
|
1325 | 1325 | oldcopies.update(copies) |
|
1326 | 1326 | copies = dict((dst, oldcopies.get(src, src)) |
|
1327 | 1327 | for dst, src in oldcopies.iteritems()) |
|
1328 | 1328 | # Adjust the dirstate copies |
|
1329 | 1329 | for dst, src in copies.iteritems(): |
|
1330 | 1330 | if (src not in newctx or dst in newctx or ds[dst] != 'a'): |
|
1331 | 1331 | src = None |
|
1332 | 1332 | ds.copy(src, dst) |
|
1333 | 1333 | |
|
1334 | 1334 | def writerequires(opener, requirements): |
|
1335 | 1335 | with opener('requires', 'w', atomictemp=True) as fp: |
|
1336 | 1336 | for r in sorted(requirements): |
|
1337 | 1337 | fp.write("%s\n" % r) |
|
1338 | 1338 | |
|
1339 | 1339 | class filecachesubentry(object): |
|
1340 | 1340 | def __init__(self, path, stat): |
|
1341 | 1341 | self.path = path |
|
1342 | 1342 | self.cachestat = None |
|
1343 | 1343 | self._cacheable = None |
|
1344 | 1344 | |
|
1345 | 1345 | if stat: |
|
1346 | 1346 | self.cachestat = filecachesubentry.stat(self.path) |
|
1347 | 1347 | |
|
1348 | 1348 | if self.cachestat: |
|
1349 | 1349 | self._cacheable = self.cachestat.cacheable() |
|
1350 | 1350 | else: |
|
1351 | 1351 | # None means we don't know yet |
|
1352 | 1352 | self._cacheable = None |
|
1353 | 1353 | |
|
1354 | 1354 | def refresh(self): |
|
1355 | 1355 | if self.cacheable(): |
|
1356 | 1356 | self.cachestat = filecachesubentry.stat(self.path) |
|
1357 | 1357 | |
|
1358 | 1358 | def cacheable(self): |
|
1359 | 1359 | if self._cacheable is not None: |
|
1360 | 1360 | return self._cacheable |
|
1361 | 1361 | |
|
1362 | 1362 | # we don't know yet, assume it is for now |
|
1363 | 1363 | return True |
|
1364 | 1364 | |
|
1365 | 1365 | def changed(self): |
|
1366 | 1366 | # no point in going further if we can't cache it |
|
1367 | 1367 | if not self.cacheable(): |
|
1368 | 1368 | return True |
|
1369 | 1369 | |
|
1370 | 1370 | newstat = filecachesubentry.stat(self.path) |
|
1371 | 1371 | |
|
1372 | 1372 | # we may not know if it's cacheable yet, check again now |
|
1373 | 1373 | if newstat and self._cacheable is None: |
|
1374 | 1374 | self._cacheable = newstat.cacheable() |
|
1375 | 1375 | |
|
1376 | 1376 | # check again |
|
1377 | 1377 | if not self._cacheable: |
|
1378 | 1378 | return True |
|
1379 | 1379 | |
|
1380 | 1380 | if self.cachestat != newstat: |
|
1381 | 1381 | self.cachestat = newstat |
|
1382 | 1382 | return True |
|
1383 | 1383 | else: |
|
1384 | 1384 | return False |
|
1385 | 1385 | |
|
1386 | 1386 | @staticmethod |
|
1387 | 1387 | def stat(path): |
|
1388 | 1388 | try: |
|
1389 | 1389 | return util.cachestat(path) |
|
1390 | 1390 | except OSError as e: |
|
1391 | 1391 | if e.errno != errno.ENOENT: |
|
1392 | 1392 | raise |
|
1393 | 1393 | |
|
1394 | 1394 | class filecacheentry(object): |
|
1395 | 1395 | def __init__(self, paths, stat=True): |
|
1396 | 1396 | self._entries = [] |
|
1397 | 1397 | for path in paths: |
|
1398 | 1398 | self._entries.append(filecachesubentry(path, stat)) |
|
1399 | 1399 | |
|
1400 | 1400 | def changed(self): |
|
1401 | 1401 | '''true if any entry has changed''' |
|
1402 | 1402 | for entry in self._entries: |
|
1403 | 1403 | if entry.changed(): |
|
1404 | 1404 | return True |
|
1405 | 1405 | return False |
|
1406 | 1406 | |
|
1407 | 1407 | def refresh(self): |
|
1408 | 1408 | for entry in self._entries: |
|
1409 | 1409 | entry.refresh() |
|
1410 | 1410 | |
|
1411 | 1411 | class filecache(object): |
|
1412 | 1412 | """A property like decorator that tracks files under .hg/ for updates. |
|
1413 | 1413 | |
|
1414 | 1414 | On first access, the files defined as arguments are stat()ed and the |
|
1415 | 1415 | results cached. The decorated function is called. The results are stashed |
|
1416 | 1416 | away in a ``_filecache`` dict on the object whose method is decorated. |
|
1417 | 1417 | |
|
1418 | 1418 | On subsequent access, the cached result is used as it is set to the |
|
1419 | 1419 | instance dictionary. |
|
1420 | 1420 | |
|
1421 | 1421 | On external property set/delete operations, the caller must update the |
|
1422 | 1422 | corresponding _filecache entry appropriately. Use __class__.<attr>.set() |
|
1423 | 1423 | instead of directly setting <attr>. |
|
1424 | 1424 | |
|
1425 | 1425 | When using the property API, the cached data is always used if available. |
|
1426 | 1426 | No stat() is performed to check if the file has changed. |
|
1427 | 1427 | |
|
1428 | 1428 | Others can muck about with the state of the ``_filecache`` dict. e.g. they |
|
1429 | 1429 | can populate an entry before the property's getter is called. In this case, |
|
1430 | 1430 | entries in ``_filecache`` will be used during property operations, |
|
1431 | 1431 | if available. If the underlying file changes, it is up to external callers |
|
1432 | 1432 | to reflect this by e.g. calling ``delattr(obj, attr)`` to remove the cached |
|
1433 | 1433 | method result as well as possibly calling ``del obj._filecache[attr]`` to |
|
1434 | 1434 | remove the ``filecacheentry``. |
|
1435 | 1435 | """ |
|
1436 | 1436 | |
|
1437 | 1437 | def __init__(self, *paths): |
|
1438 | 1438 | self.paths = paths |
|
1439 | 1439 | |
|
1440 | 1440 | def join(self, obj, fname): |
|
1441 | 1441 | """Used to compute the runtime path of a cached file. |
|
1442 | 1442 | |
|
1443 | 1443 | Users should subclass filecache and provide their own version of this |
|
1444 | 1444 | function to call the appropriate join function on 'obj' (an instance |
|
1445 | 1445 | of the class that its member function was decorated). |
|
1446 | 1446 | """ |
|
1447 | 1447 | raise NotImplementedError |
|
1448 | 1448 | |
|
1449 | 1449 | def __call__(self, func): |
|
1450 | 1450 | self.func = func |
|
1451 | 1451 | self.sname = func.__name__ |
|
1452 | 1452 | self.name = pycompat.sysbytes(self.sname) |
|
1453 | 1453 | return self |
|
1454 | 1454 | |
|
1455 | 1455 | def __get__(self, obj, type=None): |
|
1456 | 1456 | # if accessed on the class, return the descriptor itself. |
|
1457 | 1457 | if obj is None: |
|
1458 | 1458 | return self |
|
1459 | 1459 | |
|
1460 | 1460 | assert self.sname not in obj.__dict__ |
|
1461 | 1461 | |
|
1462 | 1462 | entry = obj._filecache.get(self.name) |
|
1463 | 1463 | |
|
1464 | 1464 | if entry: |
|
1465 | 1465 | if entry.changed(): |
|
1466 | 1466 | entry.obj = self.func(obj) |
|
1467 | 1467 | else: |
|
1468 | 1468 | paths = [self.join(obj, path) for path in self.paths] |
|
1469 | 1469 | |
|
1470 | 1470 | # We stat -before- creating the object so our cache doesn't lie if |
|
1471 | 1471 | # a writer modified between the time we read and stat |
|
1472 | 1472 | entry = filecacheentry(paths, True) |
|
1473 | 1473 | entry.obj = self.func(obj) |
|
1474 | 1474 | |
|
1475 | 1475 | obj._filecache[self.name] = entry |
|
1476 | 1476 | |
|
1477 | 1477 | obj.__dict__[self.sname] = entry.obj |
|
1478 | 1478 | return entry.obj |
|
1479 | 1479 | |
|
1480 | 1480 | # don't implement __set__(), which would make __dict__ lookup as slow as |
|
1481 | 1481 | # function call. |
|
1482 | 1482 | |
|
1483 | 1483 | def set(self, obj, value): |
|
1484 | 1484 | if self.name not in obj._filecache: |
|
1485 | 1485 | # we add an entry for the missing value because X in __dict__ |
|
1486 | 1486 | # implies X in _filecache |
|
1487 | 1487 | paths = [self.join(obj, path) for path in self.paths] |
|
1488 | 1488 | ce = filecacheentry(paths, False) |
|
1489 | 1489 | obj._filecache[self.name] = ce |
|
1490 | 1490 | else: |
|
1491 | 1491 | ce = obj._filecache[self.name] |
|
1492 | 1492 | |
|
1493 | 1493 | ce.obj = value # update cached copy |
|
1494 | 1494 | obj.__dict__[self.sname] = value # update copy returned by obj.x |
|
1495 | 1495 | |
|
1496 | 1496 | def extdatasource(repo, source): |
|
1497 | 1497 | """Gather a map of rev -> value dict from the specified source |
|
1498 | 1498 | |
|
1499 | 1499 | A source spec is treated as a URL, with a special case shell: type |
|
1500 | 1500 | for parsing the output from a shell command. |
|
1501 | 1501 | |
|
1502 | 1502 | The data is parsed as a series of newline-separated records where |
|
1503 | 1503 | each record is a revision specifier optionally followed by a space |
|
1504 | 1504 | and a freeform string value. If the revision is known locally, it |
|
1505 | 1505 | is converted to a rev, otherwise the record is skipped. |
|
1506 | 1506 | |
|
1507 | 1507 | Note that both key and value are treated as UTF-8 and converted to |
|
1508 | 1508 | the local encoding. This allows uniformity between local and |
|
1509 | 1509 | remote data sources. |
|
1510 | 1510 | """ |
|
1511 | 1511 | |
|
1512 | 1512 | spec = repo.ui.config("extdata", source) |
|
1513 | 1513 | if not spec: |
|
1514 | 1514 | raise error.Abort(_("unknown extdata source '%s'") % source) |
|
1515 | 1515 | |
|
1516 | 1516 | data = {} |
|
1517 | 1517 | src = proc = None |
|
1518 | 1518 | try: |
|
1519 | 1519 | if spec.startswith("shell:"): |
|
1520 | 1520 | # external commands should be run relative to the repo root |
|
1521 | 1521 | cmd = spec[6:] |
|
1522 | 1522 | proc = subprocess.Popen(procutil.tonativestr(cmd), |
|
1523 | 1523 | shell=True, bufsize=-1, |
|
1524 | 1524 | close_fds=procutil.closefds, |
|
1525 | 1525 | stdout=subprocess.PIPE, |
|
1526 | 1526 | cwd=procutil.tonativestr(repo.root)) |
|
1527 | 1527 | src = proc.stdout |
|
1528 | 1528 | else: |
|
1529 | 1529 | # treat as a URL or file |
|
1530 | 1530 | src = url.open(repo.ui, spec) |
|
1531 | 1531 | for l in src: |
|
1532 | 1532 | if " " in l: |
|
1533 | 1533 | k, v = l.strip().split(" ", 1) |
|
1534 | 1534 | else: |
|
1535 | 1535 | k, v = l.strip(), "" |
|
1536 | 1536 | |
|
1537 | 1537 | k = encoding.tolocal(k) |
|
1538 | 1538 | try: |
|
1539 | 1539 | data[revsingle(repo, k).rev()] = encoding.tolocal(v) |
|
1540 | 1540 | except (error.LookupError, error.RepoLookupError): |
|
1541 | 1541 | pass # we ignore data for nodes that don't exist locally |
|
1542 | 1542 | finally: |
|
1543 | 1543 | if proc: |
|
1544 | 1544 | try: |
|
1545 | 1545 | proc.communicate() |
|
1546 | 1546 | except ValueError: |
|
1547 | 1547 | # This happens if we started iterating src and then |
|
1548 | 1548 | # get a parse error on a line. It should be safe to ignore. |
|
1549 | 1549 | pass |
|
1550 | 1550 | if src: |
|
1551 | 1551 | src.close() |
|
1552 | 1552 | if proc and proc.returncode != 0: |
|
1553 | 1553 | raise error.Abort(_("extdata command '%s' failed: %s") |
|
1554 | 1554 | % (cmd, procutil.explainexit(proc.returncode))) |
|
1555 | 1555 | |
|
1556 | 1556 | return data |
|
1557 | 1557 | |
|
1558 | 1558 | def _locksub(repo, lock, envvar, cmd, environ=None, *args, **kwargs): |
|
1559 | 1559 | if lock is None: |
|
1560 | 1560 | raise error.LockInheritanceContractViolation( |
|
1561 | 1561 | 'lock can only be inherited while held') |
|
1562 | 1562 | if environ is None: |
|
1563 | 1563 | environ = {} |
|
1564 | 1564 | with lock.inherit() as locker: |
|
1565 | 1565 | environ[envvar] = locker |
|
1566 | 1566 | return repo.ui.system(cmd, environ=environ, *args, **kwargs) |
|
1567 | 1567 | |
|
1568 | 1568 | def wlocksub(repo, cmd, *args, **kwargs): |
|
1569 | 1569 | """run cmd as a subprocess that allows inheriting repo's wlock |
|
1570 | 1570 | |
|
1571 | 1571 | This can only be called while the wlock is held. This takes all the |
|
1572 | 1572 | arguments that ui.system does, and returns the exit code of the |
|
1573 | 1573 | subprocess.""" |
|
1574 | 1574 | return _locksub(repo, repo.currentwlock(), 'HG_WLOCK_LOCKER', cmd, *args, |
|
1575 | 1575 | **kwargs) |
|
1576 | 1576 | |
|
1577 | 1577 | class progress(object): |
|
1578 | 1578 | def __init__(self, ui, updatebar, topic, unit="", total=None): |
|
1579 | 1579 | self.ui = ui |
|
1580 | 1580 | self.pos = 0 |
|
1581 | 1581 | self.topic = topic |
|
1582 | 1582 | self.unit = unit |
|
1583 | 1583 | self.total = total |
|
1584 | 1584 | self.debug = ui.configbool('progress', 'debug') |
|
1585 | 1585 | self._updatebar = updatebar |
|
1586 | 1586 | |
|
1587 | 1587 | def __enter__(self): |
|
1588 | 1588 | return self |
|
1589 | 1589 | |
|
1590 | 1590 | def __exit__(self, exc_type, exc_value, exc_tb): |
|
1591 | 1591 | self.complete() |
|
1592 | 1592 | |
|
1593 | 1593 | def update(self, pos, item="", total=None): |
|
1594 | 1594 | assert pos is not None |
|
1595 | 1595 | if total: |
|
1596 | 1596 | self.total = total |
|
1597 | 1597 | self.pos = pos |
|
1598 | 1598 | self._updatebar(self.topic, self.pos, item, self.unit, self.total) |
|
1599 | 1599 | if self.debug: |
|
1600 | 1600 | self._printdebug(item) |
|
1601 | 1601 | |
|
1602 | 1602 | def increment(self, step=1, item="", total=None): |
|
1603 | 1603 | self.update(self.pos + step, item, total) |
|
1604 | 1604 | |
|
1605 | 1605 | def complete(self): |
|
1606 | 1606 | self.pos = None |
|
1607 | 1607 | self.unit = "" |
|
1608 | 1608 | self.total = None |
|
1609 | 1609 | self._updatebar(self.topic, self.pos, "", self.unit, self.total) |
|
1610 | 1610 | |
|
1611 | 1611 | def _printdebug(self, item): |
|
1612 | 1612 | if self.unit: |
|
1613 | 1613 | unit = ' ' + self.unit |
|
1614 | 1614 | if item: |
|
1615 | 1615 | item = ' ' + item |
|
1616 | 1616 | |
|
1617 | 1617 | if self.total: |
|
1618 | 1618 | pct = 100.0 * self.pos / self.total |
|
1619 | 1619 | self.ui.debug('%s:%s %d/%d%s (%4.2f%%)\n' |
|
1620 | 1620 | % (self.topic, item, self.pos, self.total, unit, pct)) |
|
1621 | 1621 | else: |
|
1622 | 1622 | self.ui.debug('%s:%s %d%s\n' % (self.topic, item, self.pos, unit)) |
|
1623 | 1623 | |
|
1624 | 1624 | def gdinitconfig(ui): |
|
1625 | 1625 | """helper function to know if a repo should be created as general delta |
|
1626 | 1626 | """ |
|
1627 | 1627 | # experimental config: format.generaldelta |
|
1628 | 1628 | return (ui.configbool('format', 'generaldelta') |
|
1629 | 1629 | or ui.configbool('format', 'usegeneraldelta')) |
|
1630 | 1630 | |
|
1631 | 1631 | def gddeltaconfig(ui): |
|
1632 | 1632 | """helper function to know if incoming delta should be optimised |
|
1633 | 1633 | """ |
|
1634 | 1634 | # experimental config: format.generaldelta |
|
1635 | 1635 | return ui.configbool('format', 'generaldelta') |
|
1636 | 1636 | |
|
1637 | 1637 | class simplekeyvaluefile(object): |
|
1638 | 1638 | """A simple file with key=value lines |
|
1639 | 1639 | |
|
1640 | 1640 | Keys must be alphanumerics and start with a letter, values must not |
|
1641 | 1641 | contain '\n' characters""" |
|
1642 | 1642 | firstlinekey = '__firstline' |
|
1643 | 1643 | |
|
1644 | 1644 | def __init__(self, vfs, path, keys=None): |
|
1645 | 1645 | self.vfs = vfs |
|
1646 | 1646 | self.path = path |
|
1647 | 1647 | |
|
1648 | 1648 | def read(self, firstlinenonkeyval=False): |
|
1649 | 1649 | """Read the contents of a simple key-value file |
|
1650 | 1650 | |
|
1651 | 1651 | 'firstlinenonkeyval' indicates whether the first line of file should |
|
1652 | 1652 | be treated as a key-value pair or reuturned fully under the |
|
1653 | 1653 | __firstline key.""" |
|
1654 | 1654 | lines = self.vfs.readlines(self.path) |
|
1655 | 1655 | d = {} |
|
1656 | 1656 | if firstlinenonkeyval: |
|
1657 | 1657 | if not lines: |
|
1658 | 1658 | e = _("empty simplekeyvalue file") |
|
1659 | 1659 | raise error.CorruptedState(e) |
|
1660 | 1660 | # we don't want to include '\n' in the __firstline |
|
1661 | 1661 | d[self.firstlinekey] = lines[0][:-1] |
|
1662 | 1662 | del lines[0] |
|
1663 | 1663 | |
|
1664 | 1664 | try: |
|
1665 | 1665 | # the 'if line.strip()' part prevents us from failing on empty |
|
1666 | 1666 | # lines which only contain '\n' therefore are not skipped |
|
1667 | 1667 | # by 'if line' |
|
1668 | 1668 | updatedict = dict(line[:-1].split('=', 1) for line in lines |
|
1669 | 1669 | if line.strip()) |
|
1670 | 1670 | if self.firstlinekey in updatedict: |
|
1671 | 1671 | e = _("%r can't be used as a key") |
|
1672 | 1672 | raise error.CorruptedState(e % self.firstlinekey) |
|
1673 | 1673 | d.update(updatedict) |
|
1674 | 1674 | except ValueError as e: |
|
1675 | 1675 | raise error.CorruptedState(str(e)) |
|
1676 | 1676 | return d |
|
1677 | 1677 | |
|
1678 | 1678 | def write(self, data, firstline=None): |
|
1679 | 1679 | """Write key=>value mapping to a file |
|
1680 | 1680 | data is a dict. Keys must be alphanumerical and start with a letter. |
|
1681 | 1681 | Values must not contain newline characters. |
|
1682 | 1682 | |
|
1683 | 1683 | If 'firstline' is not None, it is written to file before |
|
1684 | 1684 | everything else, as it is, not in a key=value form""" |
|
1685 | 1685 | lines = [] |
|
1686 | 1686 | if firstline is not None: |
|
1687 | 1687 | lines.append('%s\n' % firstline) |
|
1688 | 1688 | |
|
1689 | 1689 | for k, v in data.items(): |
|
1690 | 1690 | if k == self.firstlinekey: |
|
1691 | 1691 | e = "key name '%s' is reserved" % self.firstlinekey |
|
1692 | 1692 | raise error.ProgrammingError(e) |
|
1693 | 1693 | if not k[0:1].isalpha(): |
|
1694 | 1694 | e = "keys must start with a letter in a key-value file" |
|
1695 | 1695 | raise error.ProgrammingError(e) |
|
1696 | 1696 | if not k.isalnum(): |
|
1697 | 1697 | e = "invalid key name in a simple key-value file" |
|
1698 | 1698 | raise error.ProgrammingError(e) |
|
1699 | 1699 | if '\n' in v: |
|
1700 | 1700 | e = "invalid value in a simple key-value file" |
|
1701 | 1701 | raise error.ProgrammingError(e) |
|
1702 | 1702 | lines.append("%s=%s\n" % (k, v)) |
|
1703 | 1703 | with self.vfs(self.path, mode='wb', atomictemp=True) as fp: |
|
1704 | 1704 | fp.write(''.join(lines)) |
|
1705 | 1705 | |
|
1706 | 1706 | _reportobsoletedsource = [ |
|
1707 | 1707 | 'debugobsolete', |
|
1708 | 1708 | 'pull', |
|
1709 | 1709 | 'push', |
|
1710 | 1710 | 'serve', |
|
1711 | 1711 | 'unbundle', |
|
1712 | 1712 | ] |
|
1713 | 1713 | |
|
1714 | 1714 | _reportnewcssource = [ |
|
1715 | 1715 | 'pull', |
|
1716 | 1716 | 'unbundle', |
|
1717 | 1717 | ] |
|
1718 | 1718 | |
|
1719 | 1719 | def prefetchfiles(repo, revs, match): |
|
1720 | 1720 | """Invokes the registered file prefetch functions, allowing extensions to |
|
1721 | 1721 | ensure the corresponding files are available locally, before the command |
|
1722 | 1722 | uses them.""" |
|
1723 | 1723 | if match: |
|
1724 | 1724 | # The command itself will complain about files that don't exist, so |
|
1725 | 1725 | # don't duplicate the message. |
|
1726 | 1726 | match = matchmod.badmatch(match, lambda fn, msg: None) |
|
1727 | 1727 | else: |
|
1728 | 1728 | match = matchall(repo) |
|
1729 | 1729 | |
|
1730 | 1730 | fileprefetchhooks(repo, revs, match) |
|
1731 | 1731 | |
|
1732 | 1732 | # a list of (repo, revs, match) prefetch functions |
|
1733 | 1733 | fileprefetchhooks = util.hooks() |
|
1734 | 1734 | |
|
1735 | 1735 | # A marker that tells the evolve extension to suppress its own reporting |
|
1736 | 1736 | _reportstroubledchangesets = True |
|
1737 | 1737 | |
|
1738 | 1738 | def registersummarycallback(repo, otr, txnname=''): |
|
1739 | 1739 | """register a callback to issue a summary after the transaction is closed |
|
1740 | 1740 | """ |
|
1741 | 1741 | def txmatch(sources): |
|
1742 | 1742 | return any(txnname.startswith(source) for source in sources) |
|
1743 | 1743 | |
|
1744 | 1744 | categories = [] |
|
1745 | 1745 | |
|
1746 | 1746 | def reportsummary(func): |
|
1747 | 1747 | """decorator for report callbacks.""" |
|
1748 | 1748 | # The repoview life cycle is shorter than the one of the actual |
|
1749 | 1749 | # underlying repository. So the filtered object can die before the |
|
1750 | 1750 | # weakref is used leading to troubles. We keep a reference to the |
|
1751 | 1751 | # unfiltered object and restore the filtering when retrieving the |
|
1752 | 1752 | # repository through the weakref. |
|
1753 | 1753 | filtername = repo.filtername |
|
1754 | 1754 | reporef = weakref.ref(repo.unfiltered()) |
|
1755 | 1755 | def wrapped(tr): |
|
1756 | 1756 | repo = reporef() |
|
1757 | 1757 | if filtername: |
|
1758 | 1758 | repo = repo.filtered(filtername) |
|
1759 | 1759 | func(repo, tr) |
|
1760 | 1760 | newcat = '%02i-txnreport' % len(categories) |
|
1761 | 1761 | otr.addpostclose(newcat, wrapped) |
|
1762 | 1762 | categories.append(newcat) |
|
1763 | 1763 | return wrapped |
|
1764 | 1764 | |
|
1765 | 1765 | |
|
1766 | 1766 | @reportsummary |
|
1767 | 1767 | def reportchangegroup(repo, tr): |
|
1768 | 1768 | cgchangesets = tr.changes.get('changegroup-count-changesets', 0) |
|
1769 | 1769 | cgrevisions = tr.changes.get('changegroup-count-revisions', 0) |
|
1770 | 1770 | cgfiles = tr.changes.get('changegroup-count-files', 0) |
|
1771 | 1771 | cgheads = tr.changes.get('changegroup-count-heads', 0) |
|
1772 | 1772 | if cgchangesets or cgrevisions or cgfiles: |
|
1773 | 1773 | htext = "" |
|
1774 | 1774 | if cgheads: |
|
1775 | 1775 | htext = _(" (%+d heads)") % cgheads |
|
1776 | 1776 | msg = _("added %d changesets with %d changes to %d files%s\n") |
|
1777 | 1777 | repo.ui.status(msg % (cgchangesets, cgrevisions, cgfiles, htext)) |
|
1778 | 1778 | |
|
1779 | 1779 | if txmatch(_reportobsoletedsource): |
|
1780 | 1780 | @reportsummary |
|
1781 | 1781 | def reportobsoleted(repo, tr): |
|
1782 | 1782 | obsoleted = obsutil.getobsoleted(repo, tr) |
|
1783 | 1783 | newmarkers = len(tr.changes.get('obsmarkers', ())) |
|
1784 | 1784 | if newmarkers: |
|
1785 | 1785 | repo.ui.status(_('%i new obsolescence markers\n') % newmarkers) |
|
1786 | 1786 | if obsoleted: |
|
1787 | 1787 | repo.ui.status(_('obsoleted %i changesets\n') |
|
1788 | 1788 | % len(obsoleted)) |
|
1789 | 1789 | |
|
1790 | 1790 | if (obsolete.isenabled(repo, obsolete.createmarkersopt) and |
|
1791 | 1791 | repo.ui.configbool('experimental', 'evolution.report-instabilities')): |
|
1792 | 1792 | instabilitytypes = [ |
|
1793 | 1793 | ('orphan', 'orphan'), |
|
1794 | 1794 | ('phase-divergent', 'phasedivergent'), |
|
1795 | 1795 | ('content-divergent', 'contentdivergent'), |
|
1796 | 1796 | ] |
|
1797 | 1797 | |
|
1798 | 1798 | def getinstabilitycounts(repo): |
|
1799 | 1799 | filtered = repo.changelog.filteredrevs |
|
1800 | 1800 | counts = {} |
|
1801 | 1801 | for instability, revset in instabilitytypes: |
|
1802 | 1802 | counts[instability] = len(set(obsolete.getrevs(repo, revset)) - |
|
1803 | 1803 | filtered) |
|
1804 | 1804 | return counts |
|
1805 | 1805 | |
|
1806 | 1806 | oldinstabilitycounts = getinstabilitycounts(repo) |
|
1807 | 1807 | @reportsummary |
|
1808 | 1808 | def reportnewinstabilities(repo, tr): |
|
1809 | 1809 | newinstabilitycounts = getinstabilitycounts(repo) |
|
1810 | 1810 | for instability, revset in instabilitytypes: |
|
1811 | 1811 | delta = (newinstabilitycounts[instability] - |
|
1812 | 1812 | oldinstabilitycounts[instability]) |
|
1813 | 1813 | msg = getinstabilitymessage(delta, instability) |
|
1814 | 1814 | if msg: |
|
1815 | 1815 | repo.ui.warn(msg) |
|
1816 | 1816 | |
|
1817 | 1817 | if txmatch(_reportnewcssource): |
|
1818 | 1818 | @reportsummary |
|
1819 | 1819 | def reportnewcs(repo, tr): |
|
1820 | 1820 | """Report the range of new revisions pulled/unbundled.""" |
|
1821 | 1821 | origrepolen = tr.changes.get('origrepolen', len(repo)) |
|
1822 | 1822 | unfi = repo.unfiltered() |
|
1823 | 1823 | if origrepolen >= len(unfi): |
|
1824 | 1824 | return |
|
1825 | 1825 | |
|
1826 | 1826 | # Compute the bounds of new visible revisions' range. |
|
1827 | 1827 | revs = smartset.spanset(repo, start=origrepolen) |
|
1828 | 1828 | if revs: |
|
1829 | 1829 | minrev, maxrev = repo[revs.min()], repo[revs.max()] |
|
1830 | 1830 | |
|
1831 | 1831 | if minrev == maxrev: |
|
1832 | 1832 | revrange = minrev |
|
1833 | 1833 | else: |
|
1834 | 1834 | revrange = '%s:%s' % (minrev, maxrev) |
|
1835 | 1835 | draft = len(repo.revs('%ld and draft()', revs)) |
|
1836 | 1836 | secret = len(repo.revs('%ld and secret()', revs)) |
|
1837 | 1837 | if not (draft or secret): |
|
1838 | 1838 | msg = _('new changesets %s\n') % revrange |
|
1839 | 1839 | elif draft and secret: |
|
1840 | 1840 | msg = _('new changesets %s (%d drafts, %d secrets)\n') |
|
1841 | 1841 | msg %= (revrange, draft, secret) |
|
1842 | 1842 | elif draft: |
|
1843 | 1843 | msg = _('new changesets %s (%d drafts)\n') |
|
1844 | 1844 | msg %= (revrange, draft) |
|
1845 | 1845 | elif secret: |
|
1846 | 1846 | msg = _('new changesets %s (%d secrets)\n') |
|
1847 | 1847 | msg %= (revrange, secret) |
|
1848 | 1848 | else: |
|
1849 | 1849 | errormsg = 'entered unreachable condition' |
|
1850 | 1850 | raise error.ProgrammingError(errormsg) |
|
1851 | 1851 | repo.ui.status(msg) |
|
1852 | 1852 | |
|
1853 | 1853 | # search new changesets directly pulled as obsolete |
|
1854 | 1854 | duplicates = tr.changes.get('revduplicates', ()) |
|
1855 | 1855 | obsadded = unfi.revs('(%d: + %ld) and obsolete()', |
|
1856 | 1856 | origrepolen, duplicates) |
|
1857 | 1857 | cl = repo.changelog |
|
1858 | 1858 | extinctadded = [r for r in obsadded if r not in cl] |
|
1859 | 1859 | if extinctadded: |
|
1860 | 1860 | # They are not just obsolete, but obsolete and invisible |
|
1861 | 1861 | # we call them "extinct" internally but the terms have not been |
|
1862 | 1862 | # exposed to users. |
|
1863 | 1863 | msg = '(%d other changesets obsolete on arrival)\n' |
|
1864 | 1864 | repo.ui.status(msg % len(extinctadded)) |
|
1865 | 1865 | |
|
1866 | 1866 | @reportsummary |
|
1867 | 1867 | def reportphasechanges(repo, tr): |
|
1868 | 1868 | """Report statistics of phase changes for changesets pre-existing |
|
1869 | 1869 | pull/unbundle. |
|
1870 | 1870 | """ |
|
1871 | 1871 | origrepolen = tr.changes.get('origrepolen', len(repo)) |
|
1872 | 1872 | phasetracking = tr.changes.get('phases', {}) |
|
1873 | 1873 | if not phasetracking: |
|
1874 | 1874 | return |
|
1875 | 1875 | published = [ |
|
1876 | 1876 | rev for rev, (old, new) in phasetracking.iteritems() |
|
1877 | 1877 | if new == phases.public and rev < origrepolen |
|
1878 | 1878 | ] |
|
1879 | 1879 | if not published: |
|
1880 | 1880 | return |
|
1881 | 1881 | repo.ui.status(_('%d local changesets published\n') |
|
1882 | 1882 | % len(published)) |
|
1883 | 1883 | |
|
1884 | 1884 | def getinstabilitymessage(delta, instability): |
|
1885 | 1885 | """function to return the message to show warning about new instabilities |
|
1886 | 1886 | |
|
1887 | 1887 | exists as a separate function so that extension can wrap to show more |
|
1888 | 1888 | information like how to fix instabilities""" |
|
1889 | 1889 | if delta > 0: |
|
1890 | 1890 | return _('%i new %s changesets\n') % (delta, instability) |
|
1891 | 1891 | |
|
1892 | 1892 | def nodesummaries(repo, nodes, maxnumnodes=4): |
|
1893 | 1893 | if len(nodes) <= maxnumnodes or repo.ui.verbose: |
|
1894 | 1894 | return ' '.join(short(h) for h in nodes) |
|
1895 | 1895 | first = ' '.join(short(h) for h in nodes[:maxnumnodes]) |
|
1896 | 1896 | return _("%s and %d others") % (first, len(nodes) - maxnumnodes) |
|
1897 | 1897 | |
|
1898 | 1898 | def enforcesinglehead(repo, tr, desc): |
|
1899 | 1899 | """check that no named branch has multiple heads""" |
|
1900 | 1900 | if desc in ('strip', 'repair'): |
|
1901 | 1901 | # skip the logic during strip |
|
1902 | 1902 | return |
|
1903 | 1903 | visible = repo.filtered('visible') |
|
1904 | 1904 | # possible improvement: we could restrict the check to affected branch |
|
1905 | 1905 | for name, heads in visible.branchmap().iteritems(): |
|
1906 | 1906 | if len(heads) > 1: |
|
1907 | 1907 | msg = _('rejecting multiple heads on branch "%s"') |
|
1908 | 1908 | msg %= name |
|
1909 | 1909 | hint = _('%d heads: %s') |
|
1910 | 1910 | hint %= (len(heads), nodesummaries(repo, heads)) |
|
1911 | 1911 | raise error.Abort(msg, hint=hint) |
|
1912 | 1912 | |
|
1913 | 1913 | def wrapconvertsink(sink): |
|
1914 | 1914 | """Allow extensions to wrap the sink returned by convcmd.convertsink() |
|
1915 | 1915 | before it is used, whether or not the convert extension was formally loaded. |
|
1916 | 1916 | """ |
|
1917 | 1917 | return sink |
|
1918 | 1918 | |
|
1919 | 1919 | def unhidehashlikerevs(repo, specs, hiddentype): |
|
1920 | 1920 | """parse the user specs and unhide changesets whose hash or revision number |
|
1921 | 1921 | is passed. |
|
1922 | 1922 | |
|
1923 | 1923 | hiddentype can be: 1) 'warn': warn while unhiding changesets |
|
1924 | 1924 | 2) 'nowarn': don't warn while unhiding changesets |
|
1925 | 1925 | |
|
1926 | 1926 | returns a repo object with the required changesets unhidden |
|
1927 | 1927 | """ |
|
1928 | 1928 | if not repo.filtername or not repo.ui.configbool('experimental', |
|
1929 | 1929 | 'directaccess'): |
|
1930 | 1930 | return repo |
|
1931 | 1931 | |
|
1932 | 1932 | if repo.filtername not in ('visible', 'visible-hidden'): |
|
1933 | 1933 | return repo |
|
1934 | 1934 | |
|
1935 | 1935 | symbols = set() |
|
1936 | 1936 | for spec in specs: |
|
1937 | 1937 | try: |
|
1938 | 1938 | tree = revsetlang.parse(spec) |
|
1939 | 1939 | except error.ParseError: # will be reported by scmutil.revrange() |
|
1940 | 1940 | continue |
|
1941 | 1941 | |
|
1942 | 1942 | symbols.update(revsetlang.gethashlikesymbols(tree)) |
|
1943 | 1943 | |
|
1944 | 1944 | if not symbols: |
|
1945 | 1945 | return repo |
|
1946 | 1946 | |
|
1947 | 1947 | revs = _getrevsfromsymbols(repo, symbols) |
|
1948 | 1948 | |
|
1949 | 1949 | if not revs: |
|
1950 | 1950 | return repo |
|
1951 | 1951 | |
|
1952 | 1952 | if hiddentype == 'warn': |
|
1953 | 1953 | unfi = repo.unfiltered() |
|
1954 | 1954 | revstr = ", ".join([pycompat.bytestr(unfi[l]) for l in revs]) |
|
1955 | 1955 | repo.ui.warn(_("warning: accessing hidden changesets for write " |
|
1956 | 1956 | "operation: %s\n") % revstr) |
|
1957 | 1957 | |
|
1958 | 1958 | # we have to use new filtername to separate branch/tags cache until we can |
|
1959 | 1959 | # disbale these cache when revisions are dynamically pinned. |
|
1960 | 1960 | return repo.filtered('visible-hidden', revs) |
|
1961 | 1961 | |
|
1962 | 1962 | def _getrevsfromsymbols(repo, symbols): |
|
1963 | 1963 | """parse the list of symbols and returns a set of revision numbers of hidden |
|
1964 | 1964 | changesets present in symbols""" |
|
1965 | 1965 | revs = set() |
|
1966 | 1966 | unfi = repo.unfiltered() |
|
1967 | 1967 | unficl = unfi.changelog |
|
1968 | 1968 | cl = repo.changelog |
|
1969 | 1969 | tiprev = len(unficl) |
|
1970 | 1970 | allowrevnums = repo.ui.configbool('experimental', 'directaccess.revnums') |
|
1971 | 1971 | for s in symbols: |
|
1972 | 1972 | try: |
|
1973 | 1973 | n = int(s) |
|
1974 | 1974 | if n <= tiprev: |
|
1975 | 1975 | if not allowrevnums: |
|
1976 | 1976 | continue |
|
1977 | 1977 | else: |
|
1978 | 1978 | if n not in cl: |
|
1979 | 1979 | revs.add(n) |
|
1980 | 1980 | continue |
|
1981 | 1981 | except ValueError: |
|
1982 | 1982 | pass |
|
1983 | 1983 | |
|
1984 | 1984 | try: |
|
1985 | 1985 | s = resolvehexnodeidprefix(unfi, s) |
|
1986 | 1986 | except (error.LookupError, error.WdirUnsupported): |
|
1987 | 1987 | s = None |
|
1988 | 1988 | |
|
1989 | 1989 | if s is not None: |
|
1990 | 1990 | rev = unficl.rev(s) |
|
1991 | 1991 | if rev not in cl: |
|
1992 | 1992 | revs.add(rev) |
|
1993 | 1993 | |
|
1994 | 1994 | return revs |
|
1995 | 1995 | |
|
1996 | 1996 | def bookmarkrevs(repo, mark): |
|
1997 | 1997 | """ |
|
1998 | 1998 | Select revisions reachable by a given bookmark |
|
1999 | 1999 | """ |
|
2000 | 2000 | return repo.revs("ancestors(bookmark(%s)) - " |
|
2001 | 2001 | "ancestors(head() and not bookmark(%s)) - " |
|
2002 | 2002 | "ancestors(bookmark() and not bookmark(%s))", |
|
2003 | 2003 | mark, mark, mark) |
|
2004 | 2004 | |
|
2005 | 2005 | def computechangesetfilesadded(ctx): |
|
2006 | 2006 | """return the list of files added in a changeset |
|
2007 | 2007 | """ |
|
2008 | 2008 | added = [] |
|
2009 | 2009 | for f in ctx.files(): |
|
2010 | 2010 | if not any(f in p for p in ctx.parents()): |
|
2011 | 2011 | added.append(f) |
|
2012 | 2012 | return added |
|
2013 | 2013 | |
|
2014 | 2014 | def computechangesetfilesremoved(ctx): |
|
2015 | 2015 | """return the list of files removed in a changeset |
|
2016 | 2016 | """ |
|
2017 | 2017 | removed = [] |
|
2018 | 2018 | for f in ctx.files(): |
|
2019 | 2019 | if f not in ctx: |
|
2020 | 2020 | removed.append(f) |
|
2021 | 2021 | return removed |
General Comments 0
You need to be logged in to leave comments.
Login now