##// END OF EJS Templates
cmdutil: remove remainder of old walkchangerevs() implementation
Yuya Nishihara -
r46228:c7413ffe default
parent child Browse files
Show More
@@ -1,1291 +1,1255
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial.pycompat import open
136 136 from mercurial import (
137 137 changegroup,
138 138 changelog,
139 cmdutil,
140 139 commands,
141 140 configitems,
142 141 context,
143 142 copies,
144 143 debugcommands as hgdebugcommands,
145 144 dispatch,
146 145 error,
147 146 exchange,
148 147 extensions,
149 148 hg,
150 149 localrepo,
151 150 match as matchmod,
152 151 merge,
153 152 mergestate as mergestatemod,
154 153 node as nodemod,
155 154 patch,
156 155 pycompat,
157 156 registrar,
158 157 repair,
159 158 repoview,
160 159 revset,
161 160 scmutil,
162 161 smartset,
163 162 streamclone,
164 163 util,
165 164 )
166 165 from . import (
167 166 constants,
168 167 debugcommands,
169 168 fileserverclient,
170 169 remotefilectx,
171 170 remotefilelog,
172 171 remotefilelogserver,
173 172 repack as repackmod,
174 173 shallowbundle,
175 174 shallowrepo,
176 175 shallowstore,
177 176 shallowutil,
178 177 shallowverifier,
179 178 )
180 179
181 180 # ensures debug commands are registered
182 181 hgdebugcommands.command
183 182
184 183 cmdtable = {}
185 184 command = registrar.command(cmdtable)
186 185
187 186 configtable = {}
188 187 configitem = registrar.configitem(configtable)
189 188
190 189 configitem(b'remotefilelog', b'debug', default=False)
191 190
192 191 configitem(b'remotefilelog', b'reponame', default=b'')
193 192 configitem(b'remotefilelog', b'cachepath', default=None)
194 193 configitem(b'remotefilelog', b'cachegroup', default=None)
195 194 configitem(b'remotefilelog', b'cacheprocess', default=None)
196 195 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
197 196 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
198 197
199 198 configitem(
200 199 b'remotefilelog',
201 200 b'fallbackpath',
202 201 default=configitems.dynamicdefault,
203 202 alias=[(b'remotefilelog', b'fallbackrepo')],
204 203 )
205 204
206 205 configitem(b'remotefilelog', b'validatecachelog', default=None)
207 206 configitem(b'remotefilelog', b'validatecache', default=b'on')
208 207 configitem(b'remotefilelog', b'server', default=None)
209 208 configitem(b'remotefilelog', b'servercachepath', default=None)
210 209 configitem(b"remotefilelog", b"serverexpiration", default=30)
211 210 configitem(b'remotefilelog', b'backgroundrepack', default=False)
212 211 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
213 212 configitem(b'remotefilelog', b'pullprefetch', default=None)
214 213 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
215 214 configitem(b'remotefilelog', b'prefetchdelay', default=120)
216 215 configitem(b'remotefilelog', b'prefetchdays', default=14)
217 216
218 217 configitem(b'remotefilelog', b'getfilesstep', default=10000)
219 218 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
220 219 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
221 220 configitem(b'remotefilelog', b'fetchwarning', default=b'')
222 221
223 222 configitem(b'remotefilelog', b'includepattern', default=None)
224 223 configitem(b'remotefilelog', b'excludepattern', default=None)
225 224
226 225 configitem(b'remotefilelog', b'gcrepack', default=False)
227 226 configitem(b'remotefilelog', b'repackonhggc', default=False)
228 227 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
229 228
230 229 configitem(b'packs', b'maxpacksize', default=0)
231 230 configitem(b'packs', b'maxchainlen', default=1000)
232 231
233 232 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
234 233
235 234 # default TTL limit is 30 days
236 235 _defaultlimit = 60 * 60 * 24 * 30
237 236 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
238 237
239 238 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
240 239 configitem(
241 240 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
242 241 )
243 242 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
244 243 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
245 244 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
246 245
247 246 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
248 247 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
249 248 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
250 249 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
251 250 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
252 251
253 252 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
254 253 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
255 254 # be specifying the version(s) of Mercurial they are tested with, or
256 255 # leave the attribute unspecified.
257 256 testedwith = b'ships-with-hg-core'
258 257
259 258 repoclass = localrepo.localrepository
260 259 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
261 260
262 261 isenabled = shallowutil.isenabled
263 262
264 263
265 264 def uisetup(ui):
266 265 """Wraps user facing Mercurial commands to swap them out with shallow
267 266 versions.
268 267 """
269 268 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
270 269
271 270 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
272 271 entry[1].append(
273 272 (
274 273 b'',
275 274 b'shallow',
276 275 None,
277 276 _(b"create a shallow clone which uses remote file history"),
278 277 )
279 278 )
280 279
281 280 extensions.wrapcommand(
282 281 commands.table, b'debugindex', debugcommands.debugindex
283 282 )
284 283 extensions.wrapcommand(
285 284 commands.table, b'debugindexdot', debugcommands.debugindexdot
286 285 )
287 286 extensions.wrapcommand(commands.table, b'log', log)
288 287 extensions.wrapcommand(commands.table, b'pull', pull)
289 288
290 289 # Prevent 'hg manifest --all'
291 290 def _manifest(orig, ui, repo, *args, **opts):
292 291 if isenabled(repo) and opts.get('all'):
293 292 raise error.Abort(_(b"--all is not supported in a shallow repo"))
294 293
295 294 return orig(ui, repo, *args, **opts)
296 295
297 296 extensions.wrapcommand(commands.table, b"manifest", _manifest)
298 297
299 298 # Wrap remotefilelog with lfs code
300 299 def _lfsloaded(loaded=False):
301 300 lfsmod = None
302 301 try:
303 302 lfsmod = extensions.find(b'lfs')
304 303 except KeyError:
305 304 pass
306 305 if lfsmod:
307 306 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
308 307 fileserverclient._lfsmod = lfsmod
309 308
310 309 extensions.afterloaded(b'lfs', _lfsloaded)
311 310
312 311 # debugdata needs remotefilelog.len to work
313 312 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
314 313
315 314 changegroup.cgpacker = shallowbundle.shallowcg1packer
316 315
317 316 extensions.wrapfunction(
318 317 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
319 318 )
320 319 extensions.wrapfunction(
321 320 changegroup, b'makechangegroup', shallowbundle.makechangegroup
322 321 )
323 322 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
324 323 extensions.wrapfunction(exchange, b'pull', exchangepull)
325 324 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
326 325 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
327 326 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
328 327 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
329 328 extensions.wrapfunction(
330 329 copies, b'_computeforwardmissing', computeforwardmissing
331 330 )
332 331 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
333 332 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
334 333 extensions.wrapfunction(context.changectx, b'filectx', filectx)
335 334 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
336 335 extensions.wrapfunction(patch, b'trydiff', trydiff)
337 336 extensions.wrapfunction(hg, b'verify', _verify)
338 337 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
339 338
340 339 # disappointing hacks below
341 340 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
342 341 extensions.wrapfunction(revset, b'filelog', filelogrevset)
343 342 revset.symbols[b'filelog'] = revset.filelog
344 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
345 343
346 344
347 345 def cloneshallow(orig, ui, repo, *args, **opts):
348 346 if opts.get('shallow'):
349 347 repos = []
350 348
351 349 def pull_shallow(orig, self, *args, **kwargs):
352 350 if not isenabled(self):
353 351 repos.append(self.unfiltered())
354 352 # set up the client hooks so the post-clone update works
355 353 setupclient(self.ui, self.unfiltered())
356 354
357 355 # setupclient fixed the class on the repo itself
358 356 # but we also need to fix it on the repoview
359 357 if isinstance(self, repoview.repoview):
360 358 self.__class__.__bases__ = (
361 359 self.__class__.__bases__[0],
362 360 self.unfiltered().__class__,
363 361 )
364 362 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
365 363 with self.lock():
366 364 # acquire store lock before writing requirements as some
367 365 # requirements might be written to .hg/store/requires
368 366 scmutil.writereporequirements(self)
369 367
370 368 # Since setupclient hadn't been called, exchange.pull was not
371 369 # wrapped. So we need to manually invoke our version of it.
372 370 return exchangepull(orig, self, *args, **kwargs)
373 371 else:
374 372 return orig(self, *args, **kwargs)
375 373
376 374 extensions.wrapfunction(exchange, b'pull', pull_shallow)
377 375
378 376 # Wrap the stream logic to add requirements and to pass include/exclude
379 377 # patterns around.
380 378 def setup_streamout(repo, remote):
381 379 # Replace remote.stream_out with a version that sends file
382 380 # patterns.
383 381 def stream_out_shallow(orig):
384 382 caps = remote.capabilities()
385 383 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
386 384 opts = {}
387 385 if repo.includepattern:
388 386 opts['includepattern'] = b'\0'.join(repo.includepattern)
389 387 if repo.excludepattern:
390 388 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
391 389 return remote._callstream(b'stream_out_shallow', **opts)
392 390 else:
393 391 return orig()
394 392
395 393 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
396 394
397 395 def stream_wrap(orig, op):
398 396 setup_streamout(op.repo, op.remote)
399 397 return orig(op)
400 398
401 399 extensions.wrapfunction(
402 400 streamclone, b'maybeperformlegacystreamclone', stream_wrap
403 401 )
404 402
405 403 def canperformstreamclone(orig, pullop, bundle2=False):
406 404 # remotefilelog is currently incompatible with the
407 405 # bundle2 flavor of streamclones, so force us to use
408 406 # v1 instead.
409 407 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
410 408 pullop.remotebundle2caps[b'stream'] = [
411 409 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
412 410 ]
413 411 if bundle2:
414 412 return False, None
415 413 supported, requirements = orig(pullop, bundle2=bundle2)
416 414 if requirements is not None:
417 415 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
418 416 return supported, requirements
419 417
420 418 extensions.wrapfunction(
421 419 streamclone, b'canperformstreamclone', canperformstreamclone
422 420 )
423 421
424 422 try:
425 423 orig(ui, repo, *args, **opts)
426 424 finally:
427 425 if opts.get('shallow'):
428 426 for r in repos:
429 427 if util.safehasattr(r, b'fileservice'):
430 428 r.fileservice.close()
431 429
432 430
433 431 def debugdatashallow(orig, *args, **kwds):
434 432 oldlen = remotefilelog.remotefilelog.__len__
435 433 try:
436 434 remotefilelog.remotefilelog.__len__ = lambda x: 1
437 435 return orig(*args, **kwds)
438 436 finally:
439 437 remotefilelog.remotefilelog.__len__ = oldlen
440 438
441 439
442 440 def reposetup(ui, repo):
443 441 if not repo.local():
444 442 return
445 443
446 444 # put here intentionally bc doesnt work in uisetup
447 445 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
448 446 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
449 447
450 448 isserverenabled = ui.configbool(b'remotefilelog', b'server')
451 449 isshallowclient = isenabled(repo)
452 450
453 451 if isserverenabled and isshallowclient:
454 452 raise RuntimeError(b"Cannot be both a server and shallow client.")
455 453
456 454 if isshallowclient:
457 455 setupclient(ui, repo)
458 456
459 457 if isserverenabled:
460 458 remotefilelogserver.setupserver(ui, repo)
461 459
462 460
463 461 def setupclient(ui, repo):
464 462 if not isinstance(repo, localrepo.localrepository):
465 463 return
466 464
467 465 # Even clients get the server setup since they need to have the
468 466 # wireprotocol endpoints registered.
469 467 remotefilelogserver.onetimesetup(ui)
470 468 onetimeclientsetup(ui)
471 469
472 470 shallowrepo.wraprepo(repo)
473 471 repo.store = shallowstore.wrapstore(repo.store)
474 472
475 473
476 474 def storewrapper(orig, requirements, path, vfstype):
477 475 s = orig(requirements, path, vfstype)
478 476 if constants.SHALLOWREPO_REQUIREMENT in requirements:
479 477 s = shallowstore.wrapstore(s)
480 478
481 479 return s
482 480
483 481
484 482 # prefetch files before update
485 483 def applyupdates(
486 484 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
487 485 ):
488 486 if isenabled(repo):
489 487 manifest = mctx.manifest()
490 488 files = []
491 489 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
492 490 files.append((f, hex(manifest[f])))
493 491 # batch fetch the needed files from the server
494 492 repo.fileservice.prefetch(files)
495 493 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
496 494
497 495
498 496 # Prefetch merge checkunknownfiles
499 497 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
500 498 if isenabled(repo):
501 499 files = []
502 500 sparsematch = repo.maybesparsematch(mctx.rev())
503 501 for f, (m, actionargs, msg) in mresult.filemap():
504 502 if sparsematch and not sparsematch(f):
505 503 continue
506 504 if m in (
507 505 mergestatemod.ACTION_CREATED,
508 506 mergestatemod.ACTION_DELETED_CHANGED,
509 507 mergestatemod.ACTION_CREATED_MERGE,
510 508 ):
511 509 files.append((f, hex(mctx.filenode(f))))
512 510 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
513 511 f2 = actionargs[0]
514 512 files.append((f2, hex(mctx.filenode(f2))))
515 513 # batch fetch the needed files from the server
516 514 repo.fileservice.prefetch(files)
517 515 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
518 516
519 517
520 518 # Prefetch files before status attempts to look at their size and contents
521 519 def checklookup(orig, self, files):
522 520 repo = self._repo
523 521 if isenabled(repo):
524 522 prefetchfiles = []
525 523 for parent in self._parents:
526 524 for f in files:
527 525 if f in parent:
528 526 prefetchfiles.append((f, hex(parent.filenode(f))))
529 527 # batch fetch the needed files from the server
530 528 repo.fileservice.prefetch(prefetchfiles)
531 529 return orig(self, files)
532 530
533 531
534 532 # Prefetch the logic that compares added and removed files for renames
535 533 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
536 534 if isenabled(repo):
537 535 files = []
538 536 pmf = repo[b'.'].manifest()
539 537 for f in removed:
540 538 if f in pmf:
541 539 files.append((f, hex(pmf[f])))
542 540 # batch fetch the needed files from the server
543 541 repo.fileservice.prefetch(files)
544 542 return orig(repo, matcher, added, removed, *args, **kwargs)
545 543
546 544
547 545 # prefetch files before pathcopies check
548 546 def computeforwardmissing(orig, a, b, match=None):
549 547 missing = orig(a, b, match=match)
550 548 repo = a._repo
551 549 if isenabled(repo):
552 550 mb = b.manifest()
553 551
554 552 files = []
555 553 sparsematch = repo.maybesparsematch(b.rev())
556 554 if sparsematch:
557 555 sparsemissing = set()
558 556 for f in missing:
559 557 if sparsematch(f):
560 558 files.append((f, hex(mb[f])))
561 559 sparsemissing.add(f)
562 560 missing = sparsemissing
563 561
564 562 # batch fetch the needed files from the server
565 563 repo.fileservice.prefetch(files)
566 564 return missing
567 565
568 566
569 567 # close cache miss server connection after the command has finished
570 568 def runcommand(orig, lui, repo, *args, **kwargs):
571 569 fileservice = None
572 570 # repo can be None when running in chg:
573 571 # - at startup, reposetup was called because serve is not norepo
574 572 # - a norepo command like "help" is called
575 573 if repo and isenabled(repo):
576 574 fileservice = repo.fileservice
577 575 try:
578 576 return orig(lui, repo, *args, **kwargs)
579 577 finally:
580 578 if fileservice:
581 579 fileservice.close()
582 580
583 581
584 582 # prevent strip from stripping remotefilelogs
585 583 def _collectbrokencsets(orig, repo, files, striprev):
586 584 if isenabled(repo):
587 585 files = list([f for f in files if not repo.shallowmatch(f)])
588 586 return orig(repo, files, striprev)
589 587
590 588
591 589 # changectx wrappers
592 590 def filectx(orig, self, path, fileid=None, filelog=None):
593 591 if fileid is None:
594 592 fileid = self.filenode(path)
595 593 if isenabled(self._repo) and self._repo.shallowmatch(path):
596 594 return remotefilectx.remotefilectx(
597 595 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
598 596 )
599 597 return orig(self, path, fileid=fileid, filelog=filelog)
600 598
601 599
602 600 def workingfilectx(orig, self, path, filelog=None):
603 601 if isenabled(self._repo) and self._repo.shallowmatch(path):
604 602 return remotefilectx.remoteworkingfilectx(
605 603 self._repo, path, workingctx=self, filelog=filelog
606 604 )
607 605 return orig(self, path, filelog=filelog)
608 606
609 607
610 608 # prefetch required revisions before a diff
611 609 def trydiff(
612 610 orig,
613 611 repo,
614 612 revs,
615 613 ctx1,
616 614 ctx2,
617 615 modified,
618 616 added,
619 617 removed,
620 618 copy,
621 619 getfilectx,
622 620 *args,
623 621 **kwargs
624 622 ):
625 623 if isenabled(repo):
626 624 prefetch = []
627 625 mf1 = ctx1.manifest()
628 626 for fname in modified + added + removed:
629 627 if fname in mf1:
630 628 fnode = getfilectx(fname, ctx1).filenode()
631 629 # fnode can be None if it's a edited working ctx file
632 630 if fnode:
633 631 prefetch.append((fname, hex(fnode)))
634 632 if fname not in removed:
635 633 fnode = getfilectx(fname, ctx2).filenode()
636 634 if fnode:
637 635 prefetch.append((fname, hex(fnode)))
638 636
639 637 repo.fileservice.prefetch(prefetch)
640 638
641 639 return orig(
642 640 repo,
643 641 revs,
644 642 ctx1,
645 643 ctx2,
646 644 modified,
647 645 added,
648 646 removed,
649 647 copy,
650 648 getfilectx,
651 649 *args,
652 650 **kwargs
653 651 )
654 652
655 653
656 654 # Prevent verify from processing files
657 655 # a stub for mercurial.hg.verify()
658 656 def _verify(orig, repo, level=None):
659 657 lock = repo.lock()
660 658 try:
661 659 return shallowverifier.shallowverifier(repo).verify()
662 660 finally:
663 661 lock.release()
664 662
665 663
666 664 clientonetime = False
667 665
668 666
669 667 def onetimeclientsetup(ui):
670 668 global clientonetime
671 669 if clientonetime:
672 670 return
673 671 clientonetime = True
674 672
675 673 # Don't commit filelogs until we know the commit hash, since the hash
676 674 # is present in the filelog blob.
677 675 # This violates Mercurial's filelog->manifest->changelog write order,
678 676 # but is generally fine for client repos.
679 677 pendingfilecommits = []
680 678
681 679 def addrawrevision(
682 680 orig,
683 681 self,
684 682 rawtext,
685 683 transaction,
686 684 link,
687 685 p1,
688 686 p2,
689 687 node,
690 688 flags,
691 689 cachedelta=None,
692 690 _metatuple=None,
693 691 ):
694 692 if isinstance(link, int):
695 693 pendingfilecommits.append(
696 694 (
697 695 self,
698 696 rawtext,
699 697 transaction,
700 698 link,
701 699 p1,
702 700 p2,
703 701 node,
704 702 flags,
705 703 cachedelta,
706 704 _metatuple,
707 705 )
708 706 )
709 707 return node
710 708 else:
711 709 return orig(
712 710 self,
713 711 rawtext,
714 712 transaction,
715 713 link,
716 714 p1,
717 715 p2,
718 716 node,
719 717 flags,
720 718 cachedelta,
721 719 _metatuple=_metatuple,
722 720 )
723 721
724 722 extensions.wrapfunction(
725 723 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
726 724 )
727 725
728 726 def changelogadd(orig, self, *args, **kwargs):
729 727 oldlen = len(self)
730 728 node = orig(self, *args, **kwargs)
731 729 newlen = len(self)
732 730 if oldlen != newlen:
733 731 for oldargs in pendingfilecommits:
734 732 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
735 733 linknode = self.node(link)
736 734 if linknode == node:
737 735 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
738 736 else:
739 737 raise error.ProgrammingError(
740 738 b'pending multiple integer revisions are not supported'
741 739 )
742 740 else:
743 741 # "link" is actually wrong here (it is set to len(changelog))
744 742 # if changelog remains unchanged, skip writing file revisions
745 743 # but still do a sanity check about pending multiple revisions
746 744 if len({x[3] for x in pendingfilecommits}) > 1:
747 745 raise error.ProgrammingError(
748 746 b'pending multiple integer revisions are not supported'
749 747 )
750 748 del pendingfilecommits[:]
751 749 return node
752 750
753 751 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
754 752
755 753
756 754 def getrenamedfn(orig, repo, endrev=None):
757 755 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
758 756 return orig(repo, endrev)
759 757
760 758 rcache = {}
761 759
762 760 def getrenamed(fn, rev):
763 761 '''looks up all renames for a file (up to endrev) the first
764 762 time the file is given. It indexes on the changerev and only
765 763 parses the manifest if linkrev != changerev.
766 764 Returns rename info for fn at changerev rev.'''
767 765 if rev in rcache.setdefault(fn, {}):
768 766 return rcache[fn][rev]
769 767
770 768 try:
771 769 fctx = repo[rev].filectx(fn)
772 770 for ancestor in fctx.ancestors():
773 771 if ancestor.path() == fn:
774 772 renamed = ancestor.renamed()
775 773 rcache[fn][ancestor.rev()] = renamed and renamed[0]
776 774
777 775 renamed = fctx.renamed()
778 776 return renamed and renamed[0]
779 777 except error.LookupError:
780 778 return None
781 779
782 780 return getrenamed
783 781
784 782
785 def walkfilerevs(orig, repo, match, follow, revs, fncache):
786 if not isenabled(repo):
787 return orig(repo, match, follow, revs, fncache)
788
789 # remotefilelog's can't be walked in rev order, so throw.
790 # The caller will see the exception and walk the commit tree instead.
791 if not follow:
792 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
793
794 wanted = set()
795 minrev, maxrev = min(revs), max(revs)
796
797 pctx = repo[b'.']
798 for filename in match.files():
799 if filename not in pctx:
800 raise error.Abort(
801 _(b'cannot follow file not in parent revision: "%s"') % filename
802 )
803 fctx = pctx[filename]
804
805 linkrev = fctx.linkrev()
806 if linkrev >= minrev and linkrev <= maxrev:
807 fncache.setdefault(linkrev, []).append(filename)
808 wanted.add(linkrev)
809
810 for ancestor in fctx.ancestors():
811 linkrev = ancestor.linkrev()
812 if linkrev >= minrev and linkrev <= maxrev:
813 fncache.setdefault(linkrev, []).append(ancestor.path())
814 wanted.add(linkrev)
815
816 return wanted
817
818
819 783 def filelogrevset(orig, repo, subset, x):
820 784 """``filelog(pattern)``
821 785 Changesets connected to the specified filelog.
822 786
823 787 For performance reasons, ``filelog()`` does not show every changeset
824 788 that affects the requested file(s). See :hg:`help log` for details. For
825 789 a slower, more accurate result, use ``file()``.
826 790 """
827 791
828 792 if not isenabled(repo):
829 793 return orig(repo, subset, x)
830 794
831 795 # i18n: "filelog" is a keyword
832 796 pat = revset.getstring(x, _(b"filelog requires a pattern"))
833 797 m = matchmod.match(
834 798 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
835 799 )
836 800 s = set()
837 801
838 802 if not matchmod.patkind(pat):
839 803 # slow
840 804 for r in subset:
841 805 ctx = repo[r]
842 806 cfiles = ctx.files()
843 807 for f in m.files():
844 808 if f in cfiles:
845 809 s.add(ctx.rev())
846 810 break
847 811 else:
848 812 # partial
849 813 files = (f for f in repo[None] if m(f))
850 814 for f in files:
851 815 fctx = repo[None].filectx(f)
852 816 s.add(fctx.linkrev())
853 817 for actx in fctx.ancestors():
854 818 s.add(actx.linkrev())
855 819
856 820 return smartset.baseset([r for r in subset if r in s])
857 821
858 822
859 823 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
860 824 def gc(ui, *args, **opts):
861 825 '''garbage collect the client and server filelog caches
862 826 '''
863 827 cachepaths = set()
864 828
865 829 # get the system client cache
866 830 systemcache = shallowutil.getcachepath(ui, allowempty=True)
867 831 if systemcache:
868 832 cachepaths.add(systemcache)
869 833
870 834 # get repo client and server cache
871 835 repopaths = []
872 836 pwd = ui.environ.get(b'PWD')
873 837 if pwd:
874 838 repopaths.append(pwd)
875 839
876 840 repopaths.extend(args)
877 841 repos = []
878 842 for repopath in repopaths:
879 843 try:
880 844 repo = hg.peer(ui, {}, repopath)
881 845 repos.append(repo)
882 846
883 847 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
884 848 if repocache:
885 849 cachepaths.add(repocache)
886 850 except error.RepoError:
887 851 pass
888 852
889 853 # gc client cache
890 854 for cachepath in cachepaths:
891 855 gcclient(ui, cachepath)
892 856
893 857 # gc server cache
894 858 for repo in repos:
895 859 remotefilelogserver.gcserver(ui, repo._repo)
896 860
897 861
898 862 def gcclient(ui, cachepath):
899 863 # get list of repos that use this cache
900 864 repospath = os.path.join(cachepath, b'repos')
901 865 if not os.path.exists(repospath):
902 866 ui.warn(_(b"no known cache at %s\n") % cachepath)
903 867 return
904 868
905 869 reposfile = open(repospath, b'rb')
906 870 repos = {r[:-1] for r in reposfile.readlines()}
907 871 reposfile.close()
908 872
909 873 # build list of useful files
910 874 validrepos = []
911 875 keepkeys = set()
912 876
913 877 sharedcache = None
914 878 filesrepacked = False
915 879
916 880 count = 0
917 881 progress = ui.makeprogress(
918 882 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
919 883 )
920 884 for path in repos:
921 885 progress.update(count)
922 886 count += 1
923 887 try:
924 888 path = ui.expandpath(os.path.normpath(path))
925 889 except TypeError as e:
926 890 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
927 891 traceback.print_exc()
928 892 continue
929 893 try:
930 894 peer = hg.peer(ui, {}, path)
931 895 repo = peer._repo
932 896 except error.RepoError:
933 897 continue
934 898
935 899 validrepos.append(path)
936 900
937 901 # Protect against any repo or config changes that have happened since
938 902 # this repo was added to the repos file. We'd rather this loop succeed
939 903 # and too much be deleted, than the loop fail and nothing gets deleted.
940 904 if not isenabled(repo):
941 905 continue
942 906
943 907 if not util.safehasattr(repo, b'name'):
944 908 ui.warn(
945 909 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
946 910 )
947 911 continue
948 912
949 913 # If garbage collection on repack and repack on hg gc are enabled
950 914 # then loose files are repacked and garbage collected.
951 915 # Otherwise regular garbage collection is performed.
952 916 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
953 917 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
954 918 if repackonhggc and gcrepack:
955 919 try:
956 920 repackmod.incrementalrepack(repo)
957 921 filesrepacked = True
958 922 continue
959 923 except (IOError, repackmod.RepackAlreadyRunning):
960 924 # If repack cannot be performed due to not enough disk space
961 925 # continue doing garbage collection of loose files w/o repack
962 926 pass
963 927
964 928 reponame = repo.name
965 929 if not sharedcache:
966 930 sharedcache = repo.sharedstore
967 931
968 932 # Compute a keepset which is not garbage collected
969 933 def keyfn(fname, fnode):
970 934 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
971 935
972 936 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
973 937
974 938 progress.complete()
975 939
976 940 # write list of valid repos back
977 941 oldumask = os.umask(0o002)
978 942 try:
979 943 reposfile = open(repospath, b'wb')
980 944 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
981 945 reposfile.close()
982 946 finally:
983 947 os.umask(oldumask)
984 948
985 949 # prune cache
986 950 if sharedcache is not None:
987 951 sharedcache.gc(keepkeys)
988 952 elif not filesrepacked:
989 953 ui.warn(_(b"warning: no valid repos in repofile\n"))
990 954
991 955
992 956 def log(orig, ui, repo, *pats, **opts):
993 957 if not isenabled(repo):
994 958 return orig(ui, repo, *pats, **opts)
995 959
996 960 follow = opts.get('follow')
997 961 revs = opts.get('rev')
998 962 if pats:
999 963 # Force slowpath for non-follow patterns and follows that start from
1000 964 # non-working-copy-parent revs.
1001 965 if not follow or revs:
1002 966 # This forces the slowpath
1003 967 opts['removed'] = True
1004 968
1005 969 # If this is a non-follow log without any revs specified, recommend that
1006 970 # the user add -f to speed it up.
1007 971 if not follow and not revs:
1008 972 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1009 973 isfile = not match.anypats()
1010 974 if isfile:
1011 975 for file in match.files():
1012 976 if not os.path.isfile(repo.wjoin(file)):
1013 977 isfile = False
1014 978 break
1015 979
1016 980 if isfile:
1017 981 ui.warn(
1018 982 _(
1019 983 b"warning: file log can be slow on large repos - "
1020 984 + b"use -f to speed it up\n"
1021 985 )
1022 986 )
1023 987
1024 988 return orig(ui, repo, *pats, **opts)
1025 989
1026 990
1027 991 def revdatelimit(ui, revset):
1028 992 """Update revset so that only changesets no older than 'prefetchdays' days
1029 993 are included. The default value is set to 14 days. If 'prefetchdays' is set
1030 994 to zero or negative value then date restriction is not applied.
1031 995 """
1032 996 days = ui.configint(b'remotefilelog', b'prefetchdays')
1033 997 if days > 0:
1034 998 revset = b'(%s) & date(-%s)' % (revset, days)
1035 999 return revset
1036 1000
1037 1001
1038 1002 def readytofetch(repo):
1039 1003 """Check that enough time has passed since the last background prefetch.
1040 1004 This only relates to prefetches after operations that change the working
1041 1005 copy parent. Default delay between background prefetches is 2 minutes.
1042 1006 """
1043 1007 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1044 1008 fname = repo.vfs.join(b'lastprefetch')
1045 1009
1046 1010 ready = False
1047 1011 with open(fname, b'a'):
1048 1012 # the with construct above is used to avoid race conditions
1049 1013 modtime = os.path.getmtime(fname)
1050 1014 if (time.time() - modtime) > timeout:
1051 1015 os.utime(fname, None)
1052 1016 ready = True
1053 1017
1054 1018 return ready
1055 1019
1056 1020
1057 1021 def wcpprefetch(ui, repo, **kwargs):
1058 1022 """Prefetches in background revisions specified by bgprefetchrevs revset.
1059 1023 Does background repack if backgroundrepack flag is set in config.
1060 1024 """
1061 1025 shallow = isenabled(repo)
1062 1026 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1063 1027 isready = readytofetch(repo)
1064 1028
1065 1029 if not (shallow and bgprefetchrevs and isready):
1066 1030 return
1067 1031
1068 1032 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1069 1033 # update a revset with a date limit
1070 1034 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1071 1035
1072 1036 def anon(unused_success):
1073 1037 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1074 1038 return
1075 1039 repo.ranprefetch = True
1076 1040 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1077 1041
1078 1042 repo._afterlock(anon)
1079 1043
1080 1044
1081 1045 def pull(orig, ui, repo, *pats, **opts):
1082 1046 result = orig(ui, repo, *pats, **opts)
1083 1047
1084 1048 if isenabled(repo):
1085 1049 # prefetch if it's configured
1086 1050 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1087 1051 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1088 1052 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1089 1053
1090 1054 if prefetchrevset:
1091 1055 ui.status(_(b"prefetching file contents\n"))
1092 1056 revs = scmutil.revrange(repo, [prefetchrevset])
1093 1057 base = repo[b'.'].rev()
1094 1058 if bgprefetch:
1095 1059 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1096 1060 else:
1097 1061 repo.prefetch(revs, base=base)
1098 1062 if bgrepack:
1099 1063 repackmod.backgroundrepack(repo, incremental=True)
1100 1064 elif bgrepack:
1101 1065 repackmod.backgroundrepack(repo, incremental=True)
1102 1066
1103 1067 return result
1104 1068
1105 1069
1106 1070 def exchangepull(orig, repo, remote, *args, **kwargs):
1107 1071 # Hook into the callstream/getbundle to insert bundle capabilities
1108 1072 # during a pull.
1109 1073 def localgetbundle(
1110 1074 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1111 1075 ):
1112 1076 if not bundlecaps:
1113 1077 bundlecaps = set()
1114 1078 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1115 1079 return orig(
1116 1080 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1117 1081 )
1118 1082
1119 1083 if util.safehasattr(remote, b'_callstream'):
1120 1084 remote._localrepo = repo
1121 1085 elif util.safehasattr(remote, b'getbundle'):
1122 1086 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1123 1087
1124 1088 return orig(repo, remote, *args, **kwargs)
1125 1089
1126 1090
1127 1091 def _fileprefetchhook(repo, revmatches):
1128 1092 if isenabled(repo):
1129 1093 allfiles = []
1130 1094 for rev, match in revmatches:
1131 1095 if rev == nodemod.wdirrev or rev is None:
1132 1096 continue
1133 1097 ctx = repo[rev]
1134 1098 mf = ctx.manifest()
1135 1099 sparsematch = repo.maybesparsematch(ctx.rev())
1136 1100 for path in ctx.walk(match):
1137 1101 if (not sparsematch or sparsematch(path)) and path in mf:
1138 1102 allfiles.append((path, hex(mf[path])))
1139 1103 repo.fileservice.prefetch(allfiles)
1140 1104
1141 1105
1142 1106 @command(
1143 1107 b'debugremotefilelog',
1144 1108 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1145 1109 _(b'hg debugremotefilelog <path>'),
1146 1110 norepo=True,
1147 1111 )
1148 1112 def debugremotefilelog(ui, path, **opts):
1149 1113 return debugcommands.debugremotefilelog(ui, path, **opts)
1150 1114
1151 1115
1152 1116 @command(
1153 1117 b'verifyremotefilelog',
1154 1118 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1155 1119 _(b'hg verifyremotefilelogs <directory>'),
1156 1120 norepo=True,
1157 1121 )
1158 1122 def verifyremotefilelog(ui, path, **opts):
1159 1123 return debugcommands.verifyremotefilelog(ui, path, **opts)
1160 1124
1161 1125
1162 1126 @command(
1163 1127 b'debugdatapack',
1164 1128 [
1165 1129 (b'', b'long', None, _(b'print the long hashes')),
1166 1130 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1167 1131 ],
1168 1132 _(b'hg debugdatapack <paths>'),
1169 1133 norepo=True,
1170 1134 )
1171 1135 def debugdatapack(ui, *paths, **opts):
1172 1136 return debugcommands.debugdatapack(ui, *paths, **opts)
1173 1137
1174 1138
1175 1139 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1176 1140 def debughistorypack(ui, path, **opts):
1177 1141 return debugcommands.debughistorypack(ui, path)
1178 1142
1179 1143
1180 1144 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1181 1145 def debugkeepset(ui, repo, **opts):
1182 1146 # The command is used to measure keepset computation time
1183 1147 def keyfn(fname, fnode):
1184 1148 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1185 1149
1186 1150 repackmod.keepset(repo, keyfn)
1187 1151 return
1188 1152
1189 1153
1190 1154 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1191 1155 def debugwaitonrepack(ui, repo, **opts):
1192 1156 return debugcommands.debugwaitonrepack(repo)
1193 1157
1194 1158
1195 1159 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1196 1160 def debugwaitonprefetch(ui, repo, **opts):
1197 1161 return debugcommands.debugwaitonprefetch(repo)
1198 1162
1199 1163
1200 1164 def resolveprefetchopts(ui, opts):
1201 1165 if not opts.get(b'rev'):
1202 1166 revset = [b'.', b'draft()']
1203 1167
1204 1168 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1205 1169 if prefetchrevset:
1206 1170 revset.append(b'(%s)' % prefetchrevset)
1207 1171 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1208 1172 if bgprefetchrevs:
1209 1173 revset.append(b'(%s)' % bgprefetchrevs)
1210 1174 revset = b'+'.join(revset)
1211 1175
1212 1176 # update a revset with a date limit
1213 1177 revset = revdatelimit(ui, revset)
1214 1178
1215 1179 opts[b'rev'] = [revset]
1216 1180
1217 1181 if not opts.get(b'base'):
1218 1182 opts[b'base'] = None
1219 1183
1220 1184 return opts
1221 1185
1222 1186
1223 1187 @command(
1224 1188 b'prefetch',
1225 1189 [
1226 1190 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1227 1191 (b'', b'repack', False, _(b'run repack after prefetch')),
1228 1192 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1229 1193 ]
1230 1194 + commands.walkopts,
1231 1195 _(b'hg prefetch [OPTIONS] [FILE...]'),
1232 1196 helpcategory=command.CATEGORY_MAINTENANCE,
1233 1197 )
1234 1198 def prefetch(ui, repo, *pats, **opts):
1235 1199 """prefetch file revisions from the server
1236 1200
1237 1201 Prefetchs file revisions for the specified revs and stores them in the
1238 1202 local remotefilelog cache. If no rev is specified, the default rev is
1239 1203 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1240 1204 File names or patterns can be used to limit which files are downloaded.
1241 1205
1242 1206 Return 0 on success.
1243 1207 """
1244 1208 opts = pycompat.byteskwargs(opts)
1245 1209 if not isenabled(repo):
1246 1210 raise error.Abort(_(b"repo is not shallow"))
1247 1211
1248 1212 opts = resolveprefetchopts(ui, opts)
1249 1213 revs = scmutil.revrange(repo, opts.get(b'rev'))
1250 1214 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1251 1215
1252 1216 # Run repack in background
1253 1217 if opts.get(b'repack'):
1254 1218 repackmod.backgroundrepack(repo, incremental=True)
1255 1219
1256 1220
1257 1221 @command(
1258 1222 b'repack',
1259 1223 [
1260 1224 (b'', b'background', None, _(b'run in a background process'), None),
1261 1225 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1262 1226 (
1263 1227 b'',
1264 1228 b'packsonly',
1265 1229 None,
1266 1230 _(b'only repack packs (skip loose objects)'),
1267 1231 None,
1268 1232 ),
1269 1233 ],
1270 1234 _(b'hg repack [OPTIONS]'),
1271 1235 )
1272 1236 def repack_(ui, repo, *pats, **opts):
1273 1237 if opts.get('background'):
1274 1238 repackmod.backgroundrepack(
1275 1239 repo,
1276 1240 incremental=opts.get('incremental'),
1277 1241 packsonly=opts.get('packsonly', False),
1278 1242 )
1279 1243 return
1280 1244
1281 1245 options = {b'packsonly': opts.get('packsonly')}
1282 1246
1283 1247 try:
1284 1248 if opts.get('incremental'):
1285 1249 repackmod.incrementalrepack(repo, options=options)
1286 1250 else:
1287 1251 repackmod.fullrepack(repo, options=options)
1288 1252 except repackmod.RepackAlreadyRunning as ex:
1289 1253 # Don't propogate the exception if the repack is already in
1290 1254 # progress, since we want the command to exit 0.
1291 1255 repo.ui.warn(b'%s\n' % ex)
@@ -1,4102 +1,3921
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import copy as copymod
11 11 import errno
12 12 import os
13 13 import re
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 hex,
18 18 nullid,
19 nullrev,
20 19 short,
21 20 )
22 21 from .pycompat import (
23 22 getattr,
24 23 open,
25 24 setattr,
26 25 )
27 26 from .thirdparty import attr
28 27
29 28 from . import (
30 29 bookmarks,
31 30 changelog,
32 31 copies,
33 32 crecord as crecordmod,
34 33 dirstateguard,
35 34 encoding,
36 35 error,
37 36 formatter,
38 37 logcmdutil,
39 38 match as matchmod,
40 39 merge as mergemod,
41 40 mergestate as mergestatemod,
42 41 mergeutil,
43 42 obsolete,
44 43 patch,
45 44 pathutil,
46 45 phases,
47 46 pycompat,
48 47 repair,
49 48 revlog,
50 49 rewriteutil,
51 50 scmutil,
52 smartset,
53 51 state as statemod,
54 52 subrepoutil,
55 53 templatekw,
56 54 templater,
57 55 util,
58 56 vfs as vfsmod,
59 57 )
60 58
61 59 from .utils import (
62 60 dateutil,
63 61 stringutil,
64 62 )
65 63
66 64 if pycompat.TYPE_CHECKING:
67 65 from typing import (
68 66 Any,
69 67 Dict,
70 68 )
71 69
72 70 for t in (Any, Dict):
73 71 assert t
74 72
75 73 stringio = util.stringio
76 74
77 75 # templates of common command options
78 76
79 77 dryrunopts = [
80 78 (b'n', b'dry-run', None, _(b'do not perform actions, just print output')),
81 79 ]
82 80
83 81 confirmopts = [
84 82 (b'', b'confirm', None, _(b'ask before applying actions')),
85 83 ]
86 84
87 85 remoteopts = [
88 86 (b'e', b'ssh', b'', _(b'specify ssh command to use'), _(b'CMD')),
89 87 (
90 88 b'',
91 89 b'remotecmd',
92 90 b'',
93 91 _(b'specify hg command to run on the remote side'),
94 92 _(b'CMD'),
95 93 ),
96 94 (
97 95 b'',
98 96 b'insecure',
99 97 None,
100 98 _(b'do not verify server certificate (ignoring web.cacerts config)'),
101 99 ),
102 100 ]
103 101
104 102 walkopts = [
105 103 (
106 104 b'I',
107 105 b'include',
108 106 [],
109 107 _(b'include names matching the given patterns'),
110 108 _(b'PATTERN'),
111 109 ),
112 110 (
113 111 b'X',
114 112 b'exclude',
115 113 [],
116 114 _(b'exclude names matching the given patterns'),
117 115 _(b'PATTERN'),
118 116 ),
119 117 ]
120 118
121 119 commitopts = [
122 120 (b'm', b'message', b'', _(b'use text as commit message'), _(b'TEXT')),
123 121 (b'l', b'logfile', b'', _(b'read commit message from file'), _(b'FILE')),
124 122 ]
125 123
126 124 commitopts2 = [
127 125 (
128 126 b'd',
129 127 b'date',
130 128 b'',
131 129 _(b'record the specified date as commit date'),
132 130 _(b'DATE'),
133 131 ),
134 132 (
135 133 b'u',
136 134 b'user',
137 135 b'',
138 136 _(b'record the specified user as committer'),
139 137 _(b'USER'),
140 138 ),
141 139 ]
142 140
143 141 commitopts3 = [
144 142 (b'D', b'currentdate', None, _(b'record the current date as commit date')),
145 143 (b'U', b'currentuser', None, _(b'record the current user as committer')),
146 144 ]
147 145
148 146 formatteropts = [
149 147 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
150 148 ]
151 149
152 150 templateopts = [
153 151 (
154 152 b'',
155 153 b'style',
156 154 b'',
157 155 _(b'display using template map file (DEPRECATED)'),
158 156 _(b'STYLE'),
159 157 ),
160 158 (b'T', b'template', b'', _(b'display with template'), _(b'TEMPLATE')),
161 159 ]
162 160
163 161 logopts = [
164 162 (b'p', b'patch', None, _(b'show patch')),
165 163 (b'g', b'git', None, _(b'use git extended diff format')),
166 164 (b'l', b'limit', b'', _(b'limit number of changes displayed'), _(b'NUM')),
167 165 (b'M', b'no-merges', None, _(b'do not show merges')),
168 166 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
169 167 (b'G', b'graph', None, _(b"show the revision DAG")),
170 168 ] + templateopts
171 169
172 170 diffopts = [
173 171 (b'a', b'text', None, _(b'treat all files as text')),
174 172 (
175 173 b'g',
176 174 b'git',
177 175 None,
178 176 _(b'use git extended diff format (DEFAULT: diff.git)'),
179 177 ),
180 178 (b'', b'binary', None, _(b'generate binary diffs in git mode (default)')),
181 179 (b'', b'nodates', None, _(b'omit dates from diff headers')),
182 180 ]
183 181
184 182 diffwsopts = [
185 183 (
186 184 b'w',
187 185 b'ignore-all-space',
188 186 None,
189 187 _(b'ignore white space when comparing lines'),
190 188 ),
191 189 (
192 190 b'b',
193 191 b'ignore-space-change',
194 192 None,
195 193 _(b'ignore changes in the amount of white space'),
196 194 ),
197 195 (
198 196 b'B',
199 197 b'ignore-blank-lines',
200 198 None,
201 199 _(b'ignore changes whose lines are all blank'),
202 200 ),
203 201 (
204 202 b'Z',
205 203 b'ignore-space-at-eol',
206 204 None,
207 205 _(b'ignore changes in whitespace at EOL'),
208 206 ),
209 207 ]
210 208
211 209 diffopts2 = (
212 210 [
213 211 (b'', b'noprefix', None, _(b'omit a/ and b/ prefixes from filenames')),
214 212 (
215 213 b'p',
216 214 b'show-function',
217 215 None,
218 216 _(
219 217 b'show which function each change is in (DEFAULT: diff.showfunc)'
220 218 ),
221 219 ),
222 220 (b'', b'reverse', None, _(b'produce a diff that undoes the changes')),
223 221 ]
224 222 + diffwsopts
225 223 + [
226 224 (
227 225 b'U',
228 226 b'unified',
229 227 b'',
230 228 _(b'number of lines of context to show'),
231 229 _(b'NUM'),
232 230 ),
233 231 (b'', b'stat', None, _(b'output diffstat-style summary of changes')),
234 232 (
235 233 b'',
236 234 b'root',
237 235 b'',
238 236 _(b'produce diffs relative to subdirectory'),
239 237 _(b'DIR'),
240 238 ),
241 239 ]
242 240 )
243 241
244 242 mergetoolopts = [
245 243 (b't', b'tool', b'', _(b'specify merge tool'), _(b'TOOL')),
246 244 ]
247 245
248 246 similarityopts = [
249 247 (
250 248 b's',
251 249 b'similarity',
252 250 b'',
253 251 _(b'guess renamed files by similarity (0<=s<=100)'),
254 252 _(b'SIMILARITY'),
255 253 )
256 254 ]
257 255
258 256 subrepoopts = [(b'S', b'subrepos', None, _(b'recurse into subrepositories'))]
259 257
260 258 debugrevlogopts = [
261 259 (b'c', b'changelog', False, _(b'open changelog')),
262 260 (b'm', b'manifest', False, _(b'open manifest')),
263 261 (b'', b'dir', b'', _(b'open directory manifest')),
264 262 ]
265 263
266 264 # special string such that everything below this line will be ingored in the
267 265 # editor text
268 266 _linebelow = b"^HG: ------------------------ >8 ------------------------$"
269 267
270 268
271 269 def check_at_most_one_arg(opts, *args):
272 270 """abort if more than one of the arguments are in opts
273 271
274 272 Returns the unique argument or None if none of them were specified.
275 273 """
276 274
277 275 def to_display(name):
278 276 return pycompat.sysbytes(name).replace(b'_', b'-')
279 277
280 278 previous = None
281 279 for x in args:
282 280 if opts.get(x):
283 281 if previous:
284 282 raise error.Abort(
285 283 _(b'cannot specify both --%s and --%s')
286 284 % (to_display(previous), to_display(x))
287 285 )
288 286 previous = x
289 287 return previous
290 288
291 289
292 290 def check_incompatible_arguments(opts, first, others):
293 291 """abort if the first argument is given along with any of the others
294 292
295 293 Unlike check_at_most_one_arg(), `others` are not mutually exclusive
296 294 among themselves, and they're passed as a single collection.
297 295 """
298 296 for other in others:
299 297 check_at_most_one_arg(opts, first, other)
300 298
301 299
302 300 def resolvecommitoptions(ui, opts):
303 301 """modify commit options dict to handle related options
304 302
305 303 The return value indicates that ``rewrite.update-timestamp`` is the reason
306 304 the ``date`` option is set.
307 305 """
308 306 check_at_most_one_arg(opts, b'date', b'currentdate')
309 307 check_at_most_one_arg(opts, b'user', b'currentuser')
310 308
311 309 datemaydiffer = False # date-only change should be ignored?
312 310
313 311 if opts.get(b'currentdate'):
314 312 opts[b'date'] = b'%d %d' % dateutil.makedate()
315 313 elif (
316 314 not opts.get(b'date')
317 315 and ui.configbool(b'rewrite', b'update-timestamp')
318 316 and opts.get(b'currentdate') is None
319 317 ):
320 318 opts[b'date'] = b'%d %d' % dateutil.makedate()
321 319 datemaydiffer = True
322 320
323 321 if opts.get(b'currentuser'):
324 322 opts[b'user'] = ui.username()
325 323
326 324 return datemaydiffer
327 325
328 326
329 327 def checknotesize(ui, opts):
330 328 """ make sure note is of valid format """
331 329
332 330 note = opts.get(b'note')
333 331 if not note:
334 332 return
335 333
336 334 if len(note) > 255:
337 335 raise error.Abort(_(b"cannot store a note of more than 255 bytes"))
338 336 if b'\n' in note:
339 337 raise error.Abort(_(b"note cannot contain a newline"))
340 338
341 339
342 340 def ishunk(x):
343 341 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
344 342 return isinstance(x, hunkclasses)
345 343
346 344
347 345 def newandmodified(chunks, originalchunks):
348 346 newlyaddedandmodifiedfiles = set()
349 347 alsorestore = set()
350 348 for chunk in chunks:
351 349 if (
352 350 ishunk(chunk)
353 351 and chunk.header.isnewfile()
354 352 and chunk not in originalchunks
355 353 ):
356 354 newlyaddedandmodifiedfiles.add(chunk.header.filename())
357 355 alsorestore.update(
358 356 set(chunk.header.files()) - {chunk.header.filename()}
359 357 )
360 358 return newlyaddedandmodifiedfiles, alsorestore
361 359
362 360
363 361 def parsealiases(cmd):
364 362 return cmd.split(b"|")
365 363
366 364
367 365 def setupwrapcolorwrite(ui):
368 366 # wrap ui.write so diff output can be labeled/colorized
369 367 def wrapwrite(orig, *args, **kw):
370 368 label = kw.pop('label', b'')
371 369 for chunk, l in patch.difflabel(lambda: args):
372 370 orig(chunk, label=label + l)
373 371
374 372 oldwrite = ui.write
375 373
376 374 def wrap(*args, **kwargs):
377 375 return wrapwrite(oldwrite, *args, **kwargs)
378 376
379 377 setattr(ui, 'write', wrap)
380 378 return oldwrite
381 379
382 380
383 381 def filterchunks(ui, originalhunks, usecurses, testfile, match, operation=None):
384 382 try:
385 383 if usecurses:
386 384 if testfile:
387 385 recordfn = crecordmod.testdecorator(
388 386 testfile, crecordmod.testchunkselector
389 387 )
390 388 else:
391 389 recordfn = crecordmod.chunkselector
392 390
393 391 return crecordmod.filterpatch(
394 392 ui, originalhunks, recordfn, operation
395 393 )
396 394 except crecordmod.fallbackerror as e:
397 395 ui.warn(b'%s\n' % e)
398 396 ui.warn(_(b'falling back to text mode\n'))
399 397
400 398 return patch.filterpatch(ui, originalhunks, match, operation)
401 399
402 400
403 401 def recordfilter(ui, originalhunks, match, operation=None):
404 402 """ Prompts the user to filter the originalhunks and return a list of
405 403 selected hunks.
406 404 *operation* is used for to build ui messages to indicate the user what
407 405 kind of filtering they are doing: reverting, committing, shelving, etc.
408 406 (see patch.filterpatch).
409 407 """
410 408 usecurses = crecordmod.checkcurses(ui)
411 409 testfile = ui.config(b'experimental', b'crecordtest')
412 410 oldwrite = setupwrapcolorwrite(ui)
413 411 try:
414 412 newchunks, newopts = filterchunks(
415 413 ui, originalhunks, usecurses, testfile, match, operation
416 414 )
417 415 finally:
418 416 ui.write = oldwrite
419 417 return newchunks, newopts
420 418
421 419
422 420 def dorecord(
423 421 ui, repo, commitfunc, cmdsuggest, backupall, filterfn, *pats, **opts
424 422 ):
425 423 opts = pycompat.byteskwargs(opts)
426 424 if not ui.interactive():
427 425 if cmdsuggest:
428 426 msg = _(b'running non-interactively, use %s instead') % cmdsuggest
429 427 else:
430 428 msg = _(b'running non-interactively')
431 429 raise error.Abort(msg)
432 430
433 431 # make sure username is set before going interactive
434 432 if not opts.get(b'user'):
435 433 ui.username() # raise exception, username not provided
436 434
437 435 def recordfunc(ui, repo, message, match, opts):
438 436 """This is generic record driver.
439 437
440 438 Its job is to interactively filter local changes, and
441 439 accordingly prepare working directory into a state in which the
442 440 job can be delegated to a non-interactive commit command such as
443 441 'commit' or 'qrefresh'.
444 442
445 443 After the actual job is done by non-interactive command, the
446 444 working directory is restored to its original state.
447 445
448 446 In the end we'll record interesting changes, and everything else
449 447 will be left in place, so the user can continue working.
450 448 """
451 449 if not opts.get(b'interactive-unshelve'):
452 450 checkunfinished(repo, commit=True)
453 451 wctx = repo[None]
454 452 merge = len(wctx.parents()) > 1
455 453 if merge:
456 454 raise error.Abort(
457 455 _(
458 456 b'cannot partially commit a merge '
459 457 b'(use "hg commit" instead)'
460 458 )
461 459 )
462 460
463 461 def fail(f, msg):
464 462 raise error.Abort(b'%s: %s' % (f, msg))
465 463
466 464 force = opts.get(b'force')
467 465 if not force:
468 466 match = matchmod.badmatch(match, fail)
469 467
470 468 status = repo.status(match=match)
471 469
472 470 overrides = {(b'ui', b'commitsubrepos'): True}
473 471
474 472 with repo.ui.configoverride(overrides, b'record'):
475 473 # subrepoutil.precommit() modifies the status
476 474 tmpstatus = scmutil.status(
477 475 copymod.copy(status.modified),
478 476 copymod.copy(status.added),
479 477 copymod.copy(status.removed),
480 478 copymod.copy(status.deleted),
481 479 copymod.copy(status.unknown),
482 480 copymod.copy(status.ignored),
483 481 copymod.copy(status.clean), # pytype: disable=wrong-arg-count
484 482 )
485 483
486 484 # Force allows -X subrepo to skip the subrepo.
487 485 subs, commitsubs, newstate = subrepoutil.precommit(
488 486 repo.ui, wctx, tmpstatus, match, force=True
489 487 )
490 488 for s in subs:
491 489 if s in commitsubs:
492 490 dirtyreason = wctx.sub(s).dirtyreason(True)
493 491 raise error.Abort(dirtyreason)
494 492
495 493 if not force:
496 494 repo.checkcommitpatterns(wctx, match, status, fail)
497 495 diffopts = patch.difffeatureopts(
498 496 ui,
499 497 opts=opts,
500 498 whitespace=True,
501 499 section=b'commands',
502 500 configprefix=b'commit.interactive.',
503 501 )
504 502 diffopts.nodates = True
505 503 diffopts.git = True
506 504 diffopts.showfunc = True
507 505 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
508 506 originalchunks = patch.parsepatch(originaldiff)
509 507 match = scmutil.match(repo[None], pats)
510 508
511 509 # 1. filter patch, since we are intending to apply subset of it
512 510 try:
513 511 chunks, newopts = filterfn(ui, originalchunks, match)
514 512 except error.PatchError as err:
515 513 raise error.Abort(_(b'error parsing patch: %s') % err)
516 514 opts.update(newopts)
517 515
518 516 # We need to keep a backup of files that have been newly added and
519 517 # modified during the recording process because there is a previous
520 518 # version without the edit in the workdir. We also will need to restore
521 519 # files that were the sources of renames so that the patch application
522 520 # works.
523 521 newlyaddedandmodifiedfiles, alsorestore = newandmodified(
524 522 chunks, originalchunks
525 523 )
526 524 contenders = set()
527 525 for h in chunks:
528 526 try:
529 527 contenders.update(set(h.files()))
530 528 except AttributeError:
531 529 pass
532 530
533 531 changed = status.modified + status.added + status.removed
534 532 newfiles = [f for f in changed if f in contenders]
535 533 if not newfiles:
536 534 ui.status(_(b'no changes to record\n'))
537 535 return 0
538 536
539 537 modified = set(status.modified)
540 538
541 539 # 2. backup changed files, so we can restore them in the end
542 540
543 541 if backupall:
544 542 tobackup = changed
545 543 else:
546 544 tobackup = [
547 545 f
548 546 for f in newfiles
549 547 if f in modified or f in newlyaddedandmodifiedfiles
550 548 ]
551 549 backups = {}
552 550 if tobackup:
553 551 backupdir = repo.vfs.join(b'record-backups')
554 552 try:
555 553 os.mkdir(backupdir)
556 554 except OSError as err:
557 555 if err.errno != errno.EEXIST:
558 556 raise
559 557 try:
560 558 # backup continues
561 559 for f in tobackup:
562 560 fd, tmpname = pycompat.mkstemp(
563 561 prefix=f.replace(b'/', b'_') + b'.', dir=backupdir
564 562 )
565 563 os.close(fd)
566 564 ui.debug(b'backup %r as %r\n' % (f, tmpname))
567 565 util.copyfile(repo.wjoin(f), tmpname, copystat=True)
568 566 backups[f] = tmpname
569 567
570 568 fp = stringio()
571 569 for c in chunks:
572 570 fname = c.filename()
573 571 if fname in backups:
574 572 c.write(fp)
575 573 dopatch = fp.tell()
576 574 fp.seek(0)
577 575
578 576 # 2.5 optionally review / modify patch in text editor
579 577 if opts.get(b'review', False):
580 578 patchtext = (
581 579 crecordmod.diffhelptext
582 580 + crecordmod.patchhelptext
583 581 + fp.read()
584 582 )
585 583 reviewedpatch = ui.edit(
586 584 patchtext, b"", action=b"diff", repopath=repo.path
587 585 )
588 586 fp.truncate(0)
589 587 fp.write(reviewedpatch)
590 588 fp.seek(0)
591 589
592 590 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
593 591 # 3a. apply filtered patch to clean repo (clean)
594 592 if backups:
595 593 m = scmutil.matchfiles(repo, set(backups.keys()) | alsorestore)
596 594 mergemod.revert_to(repo[b'.'], matcher=m)
597 595
598 596 # 3b. (apply)
599 597 if dopatch:
600 598 try:
601 599 ui.debug(b'applying patch\n')
602 600 ui.debug(fp.getvalue())
603 601 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
604 602 except error.PatchError as err:
605 603 raise error.Abort(pycompat.bytestr(err))
606 604 del fp
607 605
608 606 # 4. We prepared working directory according to filtered
609 607 # patch. Now is the time to delegate the job to
610 608 # commit/qrefresh or the like!
611 609
612 610 # Make all of the pathnames absolute.
613 611 newfiles = [repo.wjoin(nf) for nf in newfiles]
614 612 return commitfunc(ui, repo, *newfiles, **pycompat.strkwargs(opts))
615 613 finally:
616 614 # 5. finally restore backed-up files
617 615 try:
618 616 dirstate = repo.dirstate
619 617 for realname, tmpname in pycompat.iteritems(backups):
620 618 ui.debug(b'restoring %r to %r\n' % (tmpname, realname))
621 619
622 620 if dirstate[realname] == b'n':
623 621 # without normallookup, restoring timestamp
624 622 # may cause partially committed files
625 623 # to be treated as unmodified
626 624 dirstate.normallookup(realname)
627 625
628 626 # copystat=True here and above are a hack to trick any
629 627 # editors that have f open that we haven't modified them.
630 628 #
631 629 # Also note that this racy as an editor could notice the
632 630 # file's mtime before we've finished writing it.
633 631 util.copyfile(tmpname, repo.wjoin(realname), copystat=True)
634 632 os.unlink(tmpname)
635 633 if tobackup:
636 634 os.rmdir(backupdir)
637 635 except OSError:
638 636 pass
639 637
640 638 def recordinwlock(ui, repo, message, match, opts):
641 639 with repo.wlock():
642 640 return recordfunc(ui, repo, message, match, opts)
643 641
644 642 return commit(ui, repo, recordinwlock, pats, opts)
645 643
646 644
647 645 class dirnode(object):
648 646 """
649 647 Represent a directory in user working copy with information required for
650 648 the purpose of tersing its status.
651 649
652 650 path is the path to the directory, without a trailing '/'
653 651
654 652 statuses is a set of statuses of all files in this directory (this includes
655 653 all the files in all the subdirectories too)
656 654
657 655 files is a list of files which are direct child of this directory
658 656
659 657 subdirs is a dictionary of sub-directory name as the key and it's own
660 658 dirnode object as the value
661 659 """
662 660
663 661 def __init__(self, dirpath):
664 662 self.path = dirpath
665 663 self.statuses = set()
666 664 self.files = []
667 665 self.subdirs = {}
668 666
669 667 def _addfileindir(self, filename, status):
670 668 """Add a file in this directory as a direct child."""
671 669 self.files.append((filename, status))
672 670
673 671 def addfile(self, filename, status):
674 672 """
675 673 Add a file to this directory or to its direct parent directory.
676 674
677 675 If the file is not direct child of this directory, we traverse to the
678 676 directory of which this file is a direct child of and add the file
679 677 there.
680 678 """
681 679
682 680 # the filename contains a path separator, it means it's not the direct
683 681 # child of this directory
684 682 if b'/' in filename:
685 683 subdir, filep = filename.split(b'/', 1)
686 684
687 685 # does the dirnode object for subdir exists
688 686 if subdir not in self.subdirs:
689 687 subdirpath = pathutil.join(self.path, subdir)
690 688 self.subdirs[subdir] = dirnode(subdirpath)
691 689
692 690 # try adding the file in subdir
693 691 self.subdirs[subdir].addfile(filep, status)
694 692
695 693 else:
696 694 self._addfileindir(filename, status)
697 695
698 696 if status not in self.statuses:
699 697 self.statuses.add(status)
700 698
701 699 def iterfilepaths(self):
702 700 """Yield (status, path) for files directly under this directory."""
703 701 for f, st in self.files:
704 702 yield st, pathutil.join(self.path, f)
705 703
706 704 def tersewalk(self, terseargs):
707 705 """
708 706 Yield (status, path) obtained by processing the status of this
709 707 dirnode.
710 708
711 709 terseargs is the string of arguments passed by the user with `--terse`
712 710 flag.
713 711
714 712 Following are the cases which can happen:
715 713
716 714 1) All the files in the directory (including all the files in its
717 715 subdirectories) share the same status and the user has asked us to terse
718 716 that status. -> yield (status, dirpath). dirpath will end in '/'.
719 717
720 718 2) Otherwise, we do following:
721 719
722 720 a) Yield (status, filepath) for all the files which are in this
723 721 directory (only the ones in this directory, not the subdirs)
724 722
725 723 b) Recurse the function on all the subdirectories of this
726 724 directory
727 725 """
728 726
729 727 if len(self.statuses) == 1:
730 728 onlyst = self.statuses.pop()
731 729
732 730 # Making sure we terse only when the status abbreviation is
733 731 # passed as terse argument
734 732 if onlyst in terseargs:
735 733 yield onlyst, self.path + b'/'
736 734 return
737 735
738 736 # add the files to status list
739 737 for st, fpath in self.iterfilepaths():
740 738 yield st, fpath
741 739
742 740 # recurse on the subdirs
743 741 for dirobj in self.subdirs.values():
744 742 for st, fpath in dirobj.tersewalk(terseargs):
745 743 yield st, fpath
746 744
747 745
748 746 def tersedir(statuslist, terseargs):
749 747 """
750 748 Terse the status if all the files in a directory shares the same status.
751 749
752 750 statuslist is scmutil.status() object which contains a list of files for
753 751 each status.
754 752 terseargs is string which is passed by the user as the argument to `--terse`
755 753 flag.
756 754
757 755 The function makes a tree of objects of dirnode class, and at each node it
758 756 stores the information required to know whether we can terse a certain
759 757 directory or not.
760 758 """
761 759 # the order matters here as that is used to produce final list
762 760 allst = (b'm', b'a', b'r', b'd', b'u', b'i', b'c')
763 761
764 762 # checking the argument validity
765 763 for s in pycompat.bytestr(terseargs):
766 764 if s not in allst:
767 765 raise error.Abort(_(b"'%s' not recognized") % s)
768 766
769 767 # creating a dirnode object for the root of the repo
770 768 rootobj = dirnode(b'')
771 769 pstatus = (
772 770 b'modified',
773 771 b'added',
774 772 b'deleted',
775 773 b'clean',
776 774 b'unknown',
777 775 b'ignored',
778 776 b'removed',
779 777 )
780 778
781 779 tersedict = {}
782 780 for attrname in pstatus:
783 781 statuschar = attrname[0:1]
784 782 for f in getattr(statuslist, attrname):
785 783 rootobj.addfile(f, statuschar)
786 784 tersedict[statuschar] = []
787 785
788 786 # we won't be tersing the root dir, so add files in it
789 787 for st, fpath in rootobj.iterfilepaths():
790 788 tersedict[st].append(fpath)
791 789
792 790 # process each sub-directory and build tersedict
793 791 for subdir in rootobj.subdirs.values():
794 792 for st, f in subdir.tersewalk(terseargs):
795 793 tersedict[st].append(f)
796 794
797 795 tersedlist = []
798 796 for st in allst:
799 797 tersedict[st].sort()
800 798 tersedlist.append(tersedict[st])
801 799
802 800 return scmutil.status(*tersedlist)
803 801
804 802
805 803 def _commentlines(raw):
806 804 '''Surround lineswith a comment char and a new line'''
807 805 lines = raw.splitlines()
808 806 commentedlines = [b'# %s' % line for line in lines]
809 807 return b'\n'.join(commentedlines) + b'\n'
810 808
811 809
812 810 @attr.s(frozen=True)
813 811 class morestatus(object):
814 812 reporoot = attr.ib()
815 813 unfinishedop = attr.ib()
816 814 unfinishedmsg = attr.ib()
817 815 activemerge = attr.ib()
818 816 unresolvedpaths = attr.ib()
819 817 _formattedpaths = attr.ib(init=False, default=set())
820 818 _label = b'status.morestatus'
821 819
822 820 def formatfile(self, path, fm):
823 821 self._formattedpaths.add(path)
824 822 if self.activemerge and path in self.unresolvedpaths:
825 823 fm.data(unresolved=True)
826 824
827 825 def formatfooter(self, fm):
828 826 if self.unfinishedop or self.unfinishedmsg:
829 827 fm.startitem()
830 828 fm.data(itemtype=b'morestatus')
831 829
832 830 if self.unfinishedop:
833 831 fm.data(unfinished=self.unfinishedop)
834 832 statemsg = (
835 833 _(b'The repository is in an unfinished *%s* state.')
836 834 % self.unfinishedop
837 835 )
838 836 fm.plain(b'%s\n' % _commentlines(statemsg), label=self._label)
839 837 if self.unfinishedmsg:
840 838 fm.data(unfinishedmsg=self.unfinishedmsg)
841 839
842 840 # May also start new data items.
843 841 self._formatconflicts(fm)
844 842
845 843 if self.unfinishedmsg:
846 844 fm.plain(
847 845 b'%s\n' % _commentlines(self.unfinishedmsg), label=self._label
848 846 )
849 847
850 848 def _formatconflicts(self, fm):
851 849 if not self.activemerge:
852 850 return
853 851
854 852 if self.unresolvedpaths:
855 853 mergeliststr = b'\n'.join(
856 854 [
857 855 b' %s'
858 856 % util.pathto(self.reporoot, encoding.getcwd(), path)
859 857 for path in self.unresolvedpaths
860 858 ]
861 859 )
862 860 msg = (
863 861 _(
864 862 '''Unresolved merge conflicts:
865 863
866 864 %s
867 865
868 866 To mark files as resolved: hg resolve --mark FILE'''
869 867 )
870 868 % mergeliststr
871 869 )
872 870
873 871 # If any paths with unresolved conflicts were not previously
874 872 # formatted, output them now.
875 873 for f in self.unresolvedpaths:
876 874 if f in self._formattedpaths:
877 875 # Already output.
878 876 continue
879 877 fm.startitem()
880 878 # We can't claim to know the status of the file - it may just
881 879 # have been in one of the states that were not requested for
882 880 # display, so it could be anything.
883 881 fm.data(itemtype=b'file', path=f, unresolved=True)
884 882
885 883 else:
886 884 msg = _(b'No unresolved merge conflicts.')
887 885
888 886 fm.plain(b'%s\n' % _commentlines(msg), label=self._label)
889 887
890 888
891 889 def readmorestatus(repo):
892 890 """Returns a morestatus object if the repo has unfinished state."""
893 891 statetuple = statemod.getrepostate(repo)
894 892 mergestate = mergestatemod.mergestate.read(repo)
895 893 activemerge = mergestate.active()
896 894 if not statetuple and not activemerge:
897 895 return None
898 896
899 897 unfinishedop = unfinishedmsg = unresolved = None
900 898 if statetuple:
901 899 unfinishedop, unfinishedmsg = statetuple
902 900 if activemerge:
903 901 unresolved = sorted(mergestate.unresolved())
904 902 return morestatus(
905 903 repo.root, unfinishedop, unfinishedmsg, activemerge, unresolved
906 904 )
907 905
908 906
909 907 def findpossible(cmd, table, strict=False):
910 908 """
911 909 Return cmd -> (aliases, command table entry)
912 910 for each matching command.
913 911 Return debug commands (or their aliases) only if no normal command matches.
914 912 """
915 913 choice = {}
916 914 debugchoice = {}
917 915
918 916 if cmd in table:
919 917 # short-circuit exact matches, "log" alias beats "log|history"
920 918 keys = [cmd]
921 919 else:
922 920 keys = table.keys()
923 921
924 922 allcmds = []
925 923 for e in keys:
926 924 aliases = parsealiases(e)
927 925 allcmds.extend(aliases)
928 926 found = None
929 927 if cmd in aliases:
930 928 found = cmd
931 929 elif not strict:
932 930 for a in aliases:
933 931 if a.startswith(cmd):
934 932 found = a
935 933 break
936 934 if found is not None:
937 935 if aliases[0].startswith(b"debug") or found.startswith(b"debug"):
938 936 debugchoice[found] = (aliases, table[e])
939 937 else:
940 938 choice[found] = (aliases, table[e])
941 939
942 940 if not choice and debugchoice:
943 941 choice = debugchoice
944 942
945 943 return choice, allcmds
946 944
947 945
948 946 def findcmd(cmd, table, strict=True):
949 947 """Return (aliases, command table entry) for command string."""
950 948 choice, allcmds = findpossible(cmd, table, strict)
951 949
952 950 if cmd in choice:
953 951 return choice[cmd]
954 952
955 953 if len(choice) > 1:
956 954 clist = sorted(choice)
957 955 raise error.AmbiguousCommand(cmd, clist)
958 956
959 957 if choice:
960 958 return list(choice.values())[0]
961 959
962 960 raise error.UnknownCommand(cmd, allcmds)
963 961
964 962
965 963 def changebranch(ui, repo, revs, label, opts):
966 964 """ Change the branch name of given revs to label """
967 965
968 966 with repo.wlock(), repo.lock(), repo.transaction(b'branches'):
969 967 # abort in case of uncommitted merge or dirty wdir
970 968 bailifchanged(repo)
971 969 revs = scmutil.revrange(repo, revs)
972 970 if not revs:
973 971 raise error.Abort(b"empty revision set")
974 972 roots = repo.revs(b'roots(%ld)', revs)
975 973 if len(roots) > 1:
976 974 raise error.Abort(
977 975 _(b"cannot change branch of non-linear revisions")
978 976 )
979 977 rewriteutil.precheck(repo, revs, b'change branch of')
980 978
981 979 root = repo[roots.first()]
982 980 rpb = {parent.branch() for parent in root.parents()}
983 981 if (
984 982 not opts.get(b'force')
985 983 and label not in rpb
986 984 and label in repo.branchmap()
987 985 ):
988 986 raise error.Abort(_(b"a branch of the same name already exists"))
989 987
990 988 if repo.revs(b'obsolete() and %ld', revs):
991 989 raise error.Abort(
992 990 _(b"cannot change branch of a obsolete changeset")
993 991 )
994 992
995 993 # make sure only topological heads
996 994 if repo.revs(b'heads(%ld) - head()', revs):
997 995 raise error.Abort(_(b"cannot change branch in middle of a stack"))
998 996
999 997 replacements = {}
1000 998 # avoid import cycle mercurial.cmdutil -> mercurial.context ->
1001 999 # mercurial.subrepo -> mercurial.cmdutil
1002 1000 from . import context
1003 1001
1004 1002 for rev in revs:
1005 1003 ctx = repo[rev]
1006 1004 oldbranch = ctx.branch()
1007 1005 # check if ctx has same branch
1008 1006 if oldbranch == label:
1009 1007 continue
1010 1008
1011 1009 def filectxfn(repo, newctx, path):
1012 1010 try:
1013 1011 return ctx[path]
1014 1012 except error.ManifestLookupError:
1015 1013 return None
1016 1014
1017 1015 ui.debug(
1018 1016 b"changing branch of '%s' from '%s' to '%s'\n"
1019 1017 % (hex(ctx.node()), oldbranch, label)
1020 1018 )
1021 1019 extra = ctx.extra()
1022 1020 extra[b'branch_change'] = hex(ctx.node())
1023 1021 # While changing branch of set of linear commits, make sure that
1024 1022 # we base our commits on new parent rather than old parent which
1025 1023 # was obsoleted while changing the branch
1026 1024 p1 = ctx.p1().node()
1027 1025 p2 = ctx.p2().node()
1028 1026 if p1 in replacements:
1029 1027 p1 = replacements[p1][0]
1030 1028 if p2 in replacements:
1031 1029 p2 = replacements[p2][0]
1032 1030
1033 1031 mc = context.memctx(
1034 1032 repo,
1035 1033 (p1, p2),
1036 1034 ctx.description(),
1037 1035 ctx.files(),
1038 1036 filectxfn,
1039 1037 user=ctx.user(),
1040 1038 date=ctx.date(),
1041 1039 extra=extra,
1042 1040 branch=label,
1043 1041 )
1044 1042
1045 1043 newnode = repo.commitctx(mc)
1046 1044 replacements[ctx.node()] = (newnode,)
1047 1045 ui.debug(b'new node id is %s\n' % hex(newnode))
1048 1046
1049 1047 # create obsmarkers and move bookmarks
1050 1048 scmutil.cleanupnodes(
1051 1049 repo, replacements, b'branch-change', fixphase=True
1052 1050 )
1053 1051
1054 1052 # move the working copy too
1055 1053 wctx = repo[None]
1056 1054 # in-progress merge is a bit too complex for now.
1057 1055 if len(wctx.parents()) == 1:
1058 1056 newid = replacements.get(wctx.p1().node())
1059 1057 if newid is not None:
1060 1058 # avoid import cycle mercurial.cmdutil -> mercurial.hg ->
1061 1059 # mercurial.cmdutil
1062 1060 from . import hg
1063 1061
1064 1062 hg.update(repo, newid[0], quietempty=True)
1065 1063
1066 1064 ui.status(_(b"changed branch on %d changesets\n") % len(replacements))
1067 1065
1068 1066
1069 1067 def findrepo(p):
1070 1068 while not os.path.isdir(os.path.join(p, b".hg")):
1071 1069 oldp, p = p, os.path.dirname(p)
1072 1070 if p == oldp:
1073 1071 return None
1074 1072
1075 1073 return p
1076 1074
1077 1075
1078 1076 def bailifchanged(repo, merge=True, hint=None):
1079 1077 """ enforce the precondition that working directory must be clean.
1080 1078
1081 1079 'merge' can be set to false if a pending uncommitted merge should be
1082 1080 ignored (such as when 'update --check' runs).
1083 1081
1084 1082 'hint' is the usual hint given to Abort exception.
1085 1083 """
1086 1084
1087 1085 if merge and repo.dirstate.p2() != nullid:
1088 1086 raise error.Abort(_(b'outstanding uncommitted merge'), hint=hint)
1089 1087 st = repo.status()
1090 1088 if st.modified or st.added or st.removed or st.deleted:
1091 1089 raise error.Abort(_(b'uncommitted changes'), hint=hint)
1092 1090 ctx = repo[None]
1093 1091 for s in sorted(ctx.substate):
1094 1092 ctx.sub(s).bailifchanged(hint=hint)
1095 1093
1096 1094
1097 1095 def logmessage(ui, opts):
1098 1096 """ get the log message according to -m and -l option """
1099 1097
1100 1098 check_at_most_one_arg(opts, b'message', b'logfile')
1101 1099
1102 1100 message = opts.get(b'message')
1103 1101 logfile = opts.get(b'logfile')
1104 1102
1105 1103 if not message and logfile:
1106 1104 try:
1107 1105 if isstdiofilename(logfile):
1108 1106 message = ui.fin.read()
1109 1107 else:
1110 1108 message = b'\n'.join(util.readfile(logfile).splitlines())
1111 1109 except IOError as inst:
1112 1110 raise error.Abort(
1113 1111 _(b"can't read commit message '%s': %s")
1114 1112 % (logfile, encoding.strtolocal(inst.strerror))
1115 1113 )
1116 1114 return message
1117 1115
1118 1116
1119 1117 def mergeeditform(ctxorbool, baseformname):
1120 1118 """return appropriate editform name (referencing a committemplate)
1121 1119
1122 1120 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
1123 1121 merging is committed.
1124 1122
1125 1123 This returns baseformname with '.merge' appended if it is a merge,
1126 1124 otherwise '.normal' is appended.
1127 1125 """
1128 1126 if isinstance(ctxorbool, bool):
1129 1127 if ctxorbool:
1130 1128 return baseformname + b".merge"
1131 1129 elif len(ctxorbool.parents()) > 1:
1132 1130 return baseformname + b".merge"
1133 1131
1134 1132 return baseformname + b".normal"
1135 1133
1136 1134
1137 1135 def getcommiteditor(
1138 1136 edit=False, finishdesc=None, extramsg=None, editform=b'', **opts
1139 1137 ):
1140 1138 """get appropriate commit message editor according to '--edit' option
1141 1139
1142 1140 'finishdesc' is a function to be called with edited commit message
1143 1141 (= 'description' of the new changeset) just after editing, but
1144 1142 before checking empty-ness. It should return actual text to be
1145 1143 stored into history. This allows to change description before
1146 1144 storing.
1147 1145
1148 1146 'extramsg' is a extra message to be shown in the editor instead of
1149 1147 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
1150 1148 is automatically added.
1151 1149
1152 1150 'editform' is a dot-separated list of names, to distinguish
1153 1151 the purpose of commit text editing.
1154 1152
1155 1153 'getcommiteditor' returns 'commitforceeditor' regardless of
1156 1154 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
1157 1155 they are specific for usage in MQ.
1158 1156 """
1159 1157 if edit or finishdesc or extramsg:
1160 1158 return lambda r, c, s: commitforceeditor(
1161 1159 r, c, s, finishdesc=finishdesc, extramsg=extramsg, editform=editform
1162 1160 )
1163 1161 elif editform:
1164 1162 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
1165 1163 else:
1166 1164 return commiteditor
1167 1165
1168 1166
1169 1167 def _escapecommandtemplate(tmpl):
1170 1168 parts = []
1171 1169 for typ, start, end in templater.scantemplate(tmpl, raw=True):
1172 1170 if typ == b'string':
1173 1171 parts.append(stringutil.escapestr(tmpl[start:end]))
1174 1172 else:
1175 1173 parts.append(tmpl[start:end])
1176 1174 return b''.join(parts)
1177 1175
1178 1176
1179 1177 def rendercommandtemplate(ui, tmpl, props):
1180 1178 r"""Expand a literal template 'tmpl' in a way suitable for command line
1181 1179
1182 1180 '\' in outermost string is not taken as an escape character because it
1183 1181 is a directory separator on Windows.
1184 1182
1185 1183 >>> from . import ui as uimod
1186 1184 >>> ui = uimod.ui()
1187 1185 >>> rendercommandtemplate(ui, b'c:\\{path}', {b'path': b'foo'})
1188 1186 'c:\\foo'
1189 1187 >>> rendercommandtemplate(ui, b'{"c:\\{path}"}', {'path': b'foo'})
1190 1188 'c:{path}'
1191 1189 """
1192 1190 if not tmpl:
1193 1191 return tmpl
1194 1192 t = formatter.maketemplater(ui, _escapecommandtemplate(tmpl))
1195 1193 return t.renderdefault(props)
1196 1194
1197 1195
1198 1196 def rendertemplate(ctx, tmpl, props=None):
1199 1197 """Expand a literal template 'tmpl' byte-string against one changeset
1200 1198
1201 1199 Each props item must be a stringify-able value or a callable returning
1202 1200 such value, i.e. no bare list nor dict should be passed.
1203 1201 """
1204 1202 repo = ctx.repo()
1205 1203 tres = formatter.templateresources(repo.ui, repo)
1206 1204 t = formatter.maketemplater(
1207 1205 repo.ui, tmpl, defaults=templatekw.keywords, resources=tres
1208 1206 )
1209 1207 mapping = {b'ctx': ctx}
1210 1208 if props:
1211 1209 mapping.update(props)
1212 1210 return t.renderdefault(mapping)
1213 1211
1214 1212
1215 1213 def _buildfntemplate(pat, total=None, seqno=None, revwidth=None, pathname=None):
1216 1214 r"""Convert old-style filename format string to template string
1217 1215
1218 1216 >>> _buildfntemplate(b'foo-%b-%n.patch', seqno=0)
1219 1217 'foo-{reporoot|basename}-{seqno}.patch'
1220 1218 >>> _buildfntemplate(b'%R{tags % "{tag}"}%H')
1221 1219 '{rev}{tags % "{tag}"}{node}'
1222 1220
1223 1221 '\' in outermost strings has to be escaped because it is a directory
1224 1222 separator on Windows:
1225 1223
1226 1224 >>> _buildfntemplate(b'c:\\tmp\\%R\\%n.patch', seqno=0)
1227 1225 'c:\\\\tmp\\\\{rev}\\\\{seqno}.patch'
1228 1226 >>> _buildfntemplate(b'\\\\foo\\bar.patch')
1229 1227 '\\\\\\\\foo\\\\bar.patch'
1230 1228 >>> _buildfntemplate(b'\\{tags % "{tag}"}')
1231 1229 '\\\\{tags % "{tag}"}'
1232 1230
1233 1231 but inner strings follow the template rules (i.e. '\' is taken as an
1234 1232 escape character):
1235 1233
1236 1234 >>> _buildfntemplate(br'{"c:\tmp"}', seqno=0)
1237 1235 '{"c:\\tmp"}'
1238 1236 """
1239 1237 expander = {
1240 1238 b'H': b'{node}',
1241 1239 b'R': b'{rev}',
1242 1240 b'h': b'{node|short}',
1243 1241 b'm': br'{sub(r"[^\w]", "_", desc|firstline)}',
1244 1242 b'r': b'{if(revwidth, pad(rev, revwidth, "0", left=True), rev)}',
1245 1243 b'%': b'%',
1246 1244 b'b': b'{reporoot|basename}',
1247 1245 }
1248 1246 if total is not None:
1249 1247 expander[b'N'] = b'{total}'
1250 1248 if seqno is not None:
1251 1249 expander[b'n'] = b'{seqno}'
1252 1250 if total is not None and seqno is not None:
1253 1251 expander[b'n'] = b'{pad(seqno, total|stringify|count, "0", left=True)}'
1254 1252 if pathname is not None:
1255 1253 expander[b's'] = b'{pathname|basename}'
1256 1254 expander[b'd'] = b'{if(pathname|dirname, pathname|dirname, ".")}'
1257 1255 expander[b'p'] = b'{pathname}'
1258 1256
1259 1257 newname = []
1260 1258 for typ, start, end in templater.scantemplate(pat, raw=True):
1261 1259 if typ != b'string':
1262 1260 newname.append(pat[start:end])
1263 1261 continue
1264 1262 i = start
1265 1263 while i < end:
1266 1264 n = pat.find(b'%', i, end)
1267 1265 if n < 0:
1268 1266 newname.append(stringutil.escapestr(pat[i:end]))
1269 1267 break
1270 1268 newname.append(stringutil.escapestr(pat[i:n]))
1271 1269 if n + 2 > end:
1272 1270 raise error.Abort(
1273 1271 _(b"incomplete format spec in output filename")
1274 1272 )
1275 1273 c = pat[n + 1 : n + 2]
1276 1274 i = n + 2
1277 1275 try:
1278 1276 newname.append(expander[c])
1279 1277 except KeyError:
1280 1278 raise error.Abort(
1281 1279 _(b"invalid format spec '%%%s' in output filename") % c
1282 1280 )
1283 1281 return b''.join(newname)
1284 1282
1285 1283
1286 1284 def makefilename(ctx, pat, **props):
1287 1285 if not pat:
1288 1286 return pat
1289 1287 tmpl = _buildfntemplate(pat, **props)
1290 1288 # BUG: alias expansion shouldn't be made against template fragments
1291 1289 # rewritten from %-format strings, but we have no easy way to partially
1292 1290 # disable the expansion.
1293 1291 return rendertemplate(ctx, tmpl, pycompat.byteskwargs(props))
1294 1292
1295 1293
1296 1294 def isstdiofilename(pat):
1297 1295 """True if the given pat looks like a filename denoting stdin/stdout"""
1298 1296 return not pat or pat == b'-'
1299 1297
1300 1298
1301 1299 class _unclosablefile(object):
1302 1300 def __init__(self, fp):
1303 1301 self._fp = fp
1304 1302
1305 1303 def close(self):
1306 1304 pass
1307 1305
1308 1306 def __iter__(self):
1309 1307 return iter(self._fp)
1310 1308
1311 1309 def __getattr__(self, attr):
1312 1310 return getattr(self._fp, attr)
1313 1311
1314 1312 def __enter__(self):
1315 1313 return self
1316 1314
1317 1315 def __exit__(self, exc_type, exc_value, exc_tb):
1318 1316 pass
1319 1317
1320 1318
1321 1319 def makefileobj(ctx, pat, mode=b'wb', **props):
1322 1320 writable = mode not in (b'r', b'rb')
1323 1321
1324 1322 if isstdiofilename(pat):
1325 1323 repo = ctx.repo()
1326 1324 if writable:
1327 1325 fp = repo.ui.fout
1328 1326 else:
1329 1327 fp = repo.ui.fin
1330 1328 return _unclosablefile(fp)
1331 1329 fn = makefilename(ctx, pat, **props)
1332 1330 return open(fn, mode)
1333 1331
1334 1332
1335 1333 def openstorage(repo, cmd, file_, opts, returnrevlog=False):
1336 1334 """opens the changelog, manifest, a filelog or a given revlog"""
1337 1335 cl = opts[b'changelog']
1338 1336 mf = opts[b'manifest']
1339 1337 dir = opts[b'dir']
1340 1338 msg = None
1341 1339 if cl and mf:
1342 1340 msg = _(b'cannot specify --changelog and --manifest at the same time')
1343 1341 elif cl and dir:
1344 1342 msg = _(b'cannot specify --changelog and --dir at the same time')
1345 1343 elif cl or mf or dir:
1346 1344 if file_:
1347 1345 msg = _(b'cannot specify filename with --changelog or --manifest')
1348 1346 elif not repo:
1349 1347 msg = _(
1350 1348 b'cannot specify --changelog or --manifest or --dir '
1351 1349 b'without a repository'
1352 1350 )
1353 1351 if msg:
1354 1352 raise error.Abort(msg)
1355 1353
1356 1354 r = None
1357 1355 if repo:
1358 1356 if cl:
1359 1357 r = repo.unfiltered().changelog
1360 1358 elif dir:
1361 1359 if not scmutil.istreemanifest(repo):
1362 1360 raise error.Abort(
1363 1361 _(
1364 1362 b"--dir can only be used on repos with "
1365 1363 b"treemanifest enabled"
1366 1364 )
1367 1365 )
1368 1366 if not dir.endswith(b'/'):
1369 1367 dir = dir + b'/'
1370 1368 dirlog = repo.manifestlog.getstorage(dir)
1371 1369 if len(dirlog):
1372 1370 r = dirlog
1373 1371 elif mf:
1374 1372 r = repo.manifestlog.getstorage(b'')
1375 1373 elif file_:
1376 1374 filelog = repo.file(file_)
1377 1375 if len(filelog):
1378 1376 r = filelog
1379 1377
1380 1378 # Not all storage may be revlogs. If requested, try to return an actual
1381 1379 # revlog instance.
1382 1380 if returnrevlog:
1383 1381 if isinstance(r, revlog.revlog):
1384 1382 pass
1385 1383 elif util.safehasattr(r, b'_revlog'):
1386 1384 r = r._revlog # pytype: disable=attribute-error
1387 1385 elif r is not None:
1388 1386 raise error.Abort(_(b'%r does not appear to be a revlog') % r)
1389 1387
1390 1388 if not r:
1391 1389 if not returnrevlog:
1392 1390 raise error.Abort(_(b'cannot give path to non-revlog'))
1393 1391
1394 1392 if not file_:
1395 1393 raise error.CommandError(cmd, _(b'invalid arguments'))
1396 1394 if not os.path.isfile(file_):
1397 1395 raise error.Abort(_(b"revlog '%s' not found") % file_)
1398 1396 r = revlog.revlog(
1399 1397 vfsmod.vfs(encoding.getcwd(), audit=False), file_[:-2] + b".i"
1400 1398 )
1401 1399 return r
1402 1400
1403 1401
1404 1402 def openrevlog(repo, cmd, file_, opts):
1405 1403 """Obtain a revlog backing storage of an item.
1406 1404
1407 1405 This is similar to ``openstorage()`` except it always returns a revlog.
1408 1406
1409 1407 In most cases, a caller cares about the main storage object - not the
1410 1408 revlog backing it. Therefore, this function should only be used by code
1411 1409 that needs to examine low-level revlog implementation details. e.g. debug
1412 1410 commands.
1413 1411 """
1414 1412 return openstorage(repo, cmd, file_, opts, returnrevlog=True)
1415 1413
1416 1414
1417 1415 def copy(ui, repo, pats, opts, rename=False):
1418 1416 check_incompatible_arguments(opts, b'forget', [b'dry_run'])
1419 1417
1420 1418 # called with the repo lock held
1421 1419 #
1422 1420 # hgsep => pathname that uses "/" to separate directories
1423 1421 # ossep => pathname that uses os.sep to separate directories
1424 1422 cwd = repo.getcwd()
1425 1423 targets = {}
1426 1424 forget = opts.get(b"forget")
1427 1425 after = opts.get(b"after")
1428 1426 dryrun = opts.get(b"dry_run")
1429 1427 rev = opts.get(b'at_rev')
1430 1428 if rev:
1431 1429 if not forget and not after:
1432 1430 # TODO: Remove this restriction and make it also create the copy
1433 1431 # targets (and remove the rename source if rename==True).
1434 1432 raise error.Abort(_(b'--at-rev requires --after'))
1435 1433 ctx = scmutil.revsingle(repo, rev)
1436 1434 if len(ctx.parents()) > 1:
1437 1435 raise error.Abort(_(b'cannot mark/unmark copy in merge commit'))
1438 1436 else:
1439 1437 ctx = repo[None]
1440 1438
1441 1439 pctx = ctx.p1()
1442 1440
1443 1441 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
1444 1442
1445 1443 if forget:
1446 1444 if ctx.rev() is None:
1447 1445 new_ctx = ctx
1448 1446 else:
1449 1447 if len(ctx.parents()) > 1:
1450 1448 raise error.Abort(_(b'cannot unmark copy in merge commit'))
1451 1449 # avoid cycle context -> subrepo -> cmdutil
1452 1450 from . import context
1453 1451
1454 1452 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1455 1453 new_ctx = context.overlayworkingctx(repo)
1456 1454 new_ctx.setbase(ctx.p1())
1457 1455 mergemod.graft(repo, ctx, wctx=new_ctx)
1458 1456
1459 1457 match = scmutil.match(ctx, pats, opts)
1460 1458
1461 1459 current_copies = ctx.p1copies()
1462 1460 current_copies.update(ctx.p2copies())
1463 1461
1464 1462 uipathfn = scmutil.getuipathfn(repo)
1465 1463 for f in ctx.walk(match):
1466 1464 if f in current_copies:
1467 1465 new_ctx[f].markcopied(None)
1468 1466 elif match.exact(f):
1469 1467 ui.warn(
1470 1468 _(
1471 1469 b'%s: not unmarking as copy - file is not marked as copied\n'
1472 1470 )
1473 1471 % uipathfn(f)
1474 1472 )
1475 1473
1476 1474 if ctx.rev() is not None:
1477 1475 with repo.lock():
1478 1476 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1479 1477 new_node = mem_ctx.commit()
1480 1478
1481 1479 if repo.dirstate.p1() == ctx.node():
1482 1480 with repo.dirstate.parentchange():
1483 1481 scmutil.movedirstate(repo, repo[new_node])
1484 1482 replacements = {ctx.node(): [new_node]}
1485 1483 scmutil.cleanupnodes(
1486 1484 repo, replacements, b'uncopy', fixphase=True
1487 1485 )
1488 1486
1489 1487 return
1490 1488
1491 1489 pats = scmutil.expandpats(pats)
1492 1490 if not pats:
1493 1491 raise error.Abort(_(b'no source or destination specified'))
1494 1492 if len(pats) == 1:
1495 1493 raise error.Abort(_(b'no destination specified'))
1496 1494 dest = pats.pop()
1497 1495
1498 1496 def walkpat(pat):
1499 1497 srcs = []
1500 1498 # TODO: Inline and simplify the non-working-copy version of this code
1501 1499 # since it shares very little with the working-copy version of it.
1502 1500 ctx_to_walk = ctx if ctx.rev() is None else pctx
1503 1501 m = scmutil.match(ctx_to_walk, [pat], opts, globbed=True)
1504 1502 for abs in ctx_to_walk.walk(m):
1505 1503 rel = uipathfn(abs)
1506 1504 exact = m.exact(abs)
1507 1505 if abs not in ctx:
1508 1506 if abs in pctx:
1509 1507 if not after:
1510 1508 if exact:
1511 1509 ui.warn(
1512 1510 _(
1513 1511 b'%s: not copying - file has been marked '
1514 1512 b'for remove\n'
1515 1513 )
1516 1514 % rel
1517 1515 )
1518 1516 continue
1519 1517 else:
1520 1518 if exact:
1521 1519 ui.warn(
1522 1520 _(b'%s: not copying - file is not managed\n') % rel
1523 1521 )
1524 1522 continue
1525 1523
1526 1524 # abs: hgsep
1527 1525 # rel: ossep
1528 1526 srcs.append((abs, rel, exact))
1529 1527 return srcs
1530 1528
1531 1529 if ctx.rev() is not None:
1532 1530 rewriteutil.precheck(repo, [ctx.rev()], b'uncopy')
1533 1531 absdest = pathutil.canonpath(repo.root, cwd, dest)
1534 1532 if ctx.hasdir(absdest):
1535 1533 raise error.Abort(
1536 1534 _(b'%s: --at-rev does not support a directory as destination')
1537 1535 % uipathfn(absdest)
1538 1536 )
1539 1537 if absdest not in ctx:
1540 1538 raise error.Abort(
1541 1539 _(b'%s: copy destination does not exist in %s')
1542 1540 % (uipathfn(absdest), ctx)
1543 1541 )
1544 1542
1545 1543 # avoid cycle context -> subrepo -> cmdutil
1546 1544 from . import context
1547 1545
1548 1546 copylist = []
1549 1547 for pat in pats:
1550 1548 srcs = walkpat(pat)
1551 1549 if not srcs:
1552 1550 continue
1553 1551 for abs, rel, exact in srcs:
1554 1552 copylist.append(abs)
1555 1553
1556 1554 if not copylist:
1557 1555 raise error.Abort(_(b'no files to copy'))
1558 1556 # TODO: Add support for `hg cp --at-rev . foo bar dir` and
1559 1557 # `hg cp --at-rev . dir1 dir2`, preferably unifying the code with the
1560 1558 # existing functions below.
1561 1559 if len(copylist) != 1:
1562 1560 raise error.Abort(_(b'--at-rev requires a single source'))
1563 1561
1564 1562 new_ctx = context.overlayworkingctx(repo)
1565 1563 new_ctx.setbase(ctx.p1())
1566 1564 mergemod.graft(repo, ctx, wctx=new_ctx)
1567 1565
1568 1566 new_ctx.markcopied(absdest, copylist[0])
1569 1567
1570 1568 with repo.lock():
1571 1569 mem_ctx = new_ctx.tomemctx_for_amend(ctx)
1572 1570 new_node = mem_ctx.commit()
1573 1571
1574 1572 if repo.dirstate.p1() == ctx.node():
1575 1573 with repo.dirstate.parentchange():
1576 1574 scmutil.movedirstate(repo, repo[new_node])
1577 1575 replacements = {ctx.node(): [new_node]}
1578 1576 scmutil.cleanupnodes(repo, replacements, b'copy', fixphase=True)
1579 1577
1580 1578 return
1581 1579
1582 1580 # abssrc: hgsep
1583 1581 # relsrc: ossep
1584 1582 # otarget: ossep
1585 1583 def copyfile(abssrc, relsrc, otarget, exact):
1586 1584 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
1587 1585 if b'/' in abstarget:
1588 1586 # We cannot normalize abstarget itself, this would prevent
1589 1587 # case only renames, like a => A.
1590 1588 abspath, absname = abstarget.rsplit(b'/', 1)
1591 1589 abstarget = repo.dirstate.normalize(abspath) + b'/' + absname
1592 1590 reltarget = repo.pathto(abstarget, cwd)
1593 1591 target = repo.wjoin(abstarget)
1594 1592 src = repo.wjoin(abssrc)
1595 1593 state = repo.dirstate[abstarget]
1596 1594
1597 1595 scmutil.checkportable(ui, abstarget)
1598 1596
1599 1597 # check for collisions
1600 1598 prevsrc = targets.get(abstarget)
1601 1599 if prevsrc is not None:
1602 1600 ui.warn(
1603 1601 _(b'%s: not overwriting - %s collides with %s\n')
1604 1602 % (
1605 1603 reltarget,
1606 1604 repo.pathto(abssrc, cwd),
1607 1605 repo.pathto(prevsrc, cwd),
1608 1606 )
1609 1607 )
1610 1608 return True # report a failure
1611 1609
1612 1610 # check for overwrites
1613 1611 exists = os.path.lexists(target)
1614 1612 samefile = False
1615 1613 if exists and abssrc != abstarget:
1616 1614 if repo.dirstate.normalize(abssrc) == repo.dirstate.normalize(
1617 1615 abstarget
1618 1616 ):
1619 1617 if not rename:
1620 1618 ui.warn(_(b"%s: can't copy - same file\n") % reltarget)
1621 1619 return True # report a failure
1622 1620 exists = False
1623 1621 samefile = True
1624 1622
1625 1623 if not after and exists or after and state in b'mn':
1626 1624 if not opts[b'force']:
1627 1625 if state in b'mn':
1628 1626 msg = _(b'%s: not overwriting - file already committed\n')
1629 1627 if after:
1630 1628 flags = b'--after --force'
1631 1629 else:
1632 1630 flags = b'--force'
1633 1631 if rename:
1634 1632 hint = (
1635 1633 _(
1636 1634 b"('hg rename %s' to replace the file by "
1637 1635 b'recording a rename)\n'
1638 1636 )
1639 1637 % flags
1640 1638 )
1641 1639 else:
1642 1640 hint = (
1643 1641 _(
1644 1642 b"('hg copy %s' to replace the file by "
1645 1643 b'recording a copy)\n'
1646 1644 )
1647 1645 % flags
1648 1646 )
1649 1647 else:
1650 1648 msg = _(b'%s: not overwriting - file exists\n')
1651 1649 if rename:
1652 1650 hint = _(
1653 1651 b"('hg rename --after' to record the rename)\n"
1654 1652 )
1655 1653 else:
1656 1654 hint = _(b"('hg copy --after' to record the copy)\n")
1657 1655 ui.warn(msg % reltarget)
1658 1656 ui.warn(hint)
1659 1657 return True # report a failure
1660 1658
1661 1659 if after:
1662 1660 if not exists:
1663 1661 if rename:
1664 1662 ui.warn(
1665 1663 _(b'%s: not recording move - %s does not exist\n')
1666 1664 % (relsrc, reltarget)
1667 1665 )
1668 1666 else:
1669 1667 ui.warn(
1670 1668 _(b'%s: not recording copy - %s does not exist\n')
1671 1669 % (relsrc, reltarget)
1672 1670 )
1673 1671 return True # report a failure
1674 1672 elif not dryrun:
1675 1673 try:
1676 1674 if exists:
1677 1675 os.unlink(target)
1678 1676 targetdir = os.path.dirname(target) or b'.'
1679 1677 if not os.path.isdir(targetdir):
1680 1678 os.makedirs(targetdir)
1681 1679 if samefile:
1682 1680 tmp = target + b"~hgrename"
1683 1681 os.rename(src, tmp)
1684 1682 os.rename(tmp, target)
1685 1683 else:
1686 1684 # Preserve stat info on renames, not on copies; this matches
1687 1685 # Linux CLI behavior.
1688 1686 util.copyfile(src, target, copystat=rename)
1689 1687 srcexists = True
1690 1688 except IOError as inst:
1691 1689 if inst.errno == errno.ENOENT:
1692 1690 ui.warn(_(b'%s: deleted in working directory\n') % relsrc)
1693 1691 srcexists = False
1694 1692 else:
1695 1693 ui.warn(
1696 1694 _(b'%s: cannot copy - %s\n')
1697 1695 % (relsrc, encoding.strtolocal(inst.strerror))
1698 1696 )
1699 1697 return True # report a failure
1700 1698
1701 1699 if ui.verbose or not exact:
1702 1700 if rename:
1703 1701 ui.status(_(b'moving %s to %s\n') % (relsrc, reltarget))
1704 1702 else:
1705 1703 ui.status(_(b'copying %s to %s\n') % (relsrc, reltarget))
1706 1704
1707 1705 targets[abstarget] = abssrc
1708 1706
1709 1707 # fix up dirstate
1710 1708 scmutil.dirstatecopy(
1711 1709 ui, repo, ctx, abssrc, abstarget, dryrun=dryrun, cwd=cwd
1712 1710 )
1713 1711 if rename and not dryrun:
1714 1712 if not after and srcexists and not samefile:
1715 1713 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
1716 1714 repo.wvfs.unlinkpath(abssrc, rmdir=rmdir)
1717 1715 ctx.forget([abssrc])
1718 1716
1719 1717 # pat: ossep
1720 1718 # dest ossep
1721 1719 # srcs: list of (hgsep, hgsep, ossep, bool)
1722 1720 # return: function that takes hgsep and returns ossep
1723 1721 def targetpathfn(pat, dest, srcs):
1724 1722 if os.path.isdir(pat):
1725 1723 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1726 1724 abspfx = util.localpath(abspfx)
1727 1725 if destdirexists:
1728 1726 striplen = len(os.path.split(abspfx)[0])
1729 1727 else:
1730 1728 striplen = len(abspfx)
1731 1729 if striplen:
1732 1730 striplen += len(pycompat.ossep)
1733 1731 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1734 1732 elif destdirexists:
1735 1733 res = lambda p: os.path.join(
1736 1734 dest, os.path.basename(util.localpath(p))
1737 1735 )
1738 1736 else:
1739 1737 res = lambda p: dest
1740 1738 return res
1741 1739
1742 1740 # pat: ossep
1743 1741 # dest ossep
1744 1742 # srcs: list of (hgsep, hgsep, ossep, bool)
1745 1743 # return: function that takes hgsep and returns ossep
1746 1744 def targetpathafterfn(pat, dest, srcs):
1747 1745 if matchmod.patkind(pat):
1748 1746 # a mercurial pattern
1749 1747 res = lambda p: os.path.join(
1750 1748 dest, os.path.basename(util.localpath(p))
1751 1749 )
1752 1750 else:
1753 1751 abspfx = pathutil.canonpath(repo.root, cwd, pat)
1754 1752 if len(abspfx) < len(srcs[0][0]):
1755 1753 # A directory. Either the target path contains the last
1756 1754 # component of the source path or it does not.
1757 1755 def evalpath(striplen):
1758 1756 score = 0
1759 1757 for s in srcs:
1760 1758 t = os.path.join(dest, util.localpath(s[0])[striplen:])
1761 1759 if os.path.lexists(t):
1762 1760 score += 1
1763 1761 return score
1764 1762
1765 1763 abspfx = util.localpath(abspfx)
1766 1764 striplen = len(abspfx)
1767 1765 if striplen:
1768 1766 striplen += len(pycompat.ossep)
1769 1767 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
1770 1768 score = evalpath(striplen)
1771 1769 striplen1 = len(os.path.split(abspfx)[0])
1772 1770 if striplen1:
1773 1771 striplen1 += len(pycompat.ossep)
1774 1772 if evalpath(striplen1) > score:
1775 1773 striplen = striplen1
1776 1774 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
1777 1775 else:
1778 1776 # a file
1779 1777 if destdirexists:
1780 1778 res = lambda p: os.path.join(
1781 1779 dest, os.path.basename(util.localpath(p))
1782 1780 )
1783 1781 else:
1784 1782 res = lambda p: dest
1785 1783 return res
1786 1784
1787 1785 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
1788 1786 if not destdirexists:
1789 1787 if len(pats) > 1 or matchmod.patkind(pats[0]):
1790 1788 raise error.Abort(
1791 1789 _(
1792 1790 b'with multiple sources, destination must be an '
1793 1791 b'existing directory'
1794 1792 )
1795 1793 )
1796 1794 if util.endswithsep(dest):
1797 1795 raise error.Abort(_(b'destination %s is not a directory') % dest)
1798 1796
1799 1797 tfn = targetpathfn
1800 1798 if after:
1801 1799 tfn = targetpathafterfn
1802 1800 copylist = []
1803 1801 for pat in pats:
1804 1802 srcs = walkpat(pat)
1805 1803 if not srcs:
1806 1804 continue
1807 1805 copylist.append((tfn(pat, dest, srcs), srcs))
1808 1806 if not copylist:
1809 1807 raise error.Abort(_(b'no files to copy'))
1810 1808
1811 1809 errors = 0
1812 1810 for targetpath, srcs in copylist:
1813 1811 for abssrc, relsrc, exact in srcs:
1814 1812 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
1815 1813 errors += 1
1816 1814
1817 1815 return errors != 0
1818 1816
1819 1817
1820 1818 ## facility to let extension process additional data into an import patch
1821 1819 # list of identifier to be executed in order
1822 1820 extrapreimport = [] # run before commit
1823 1821 extrapostimport = [] # run after commit
1824 1822 # mapping from identifier to actual import function
1825 1823 #
1826 1824 # 'preimport' are run before the commit is made and are provided the following
1827 1825 # arguments:
1828 1826 # - repo: the localrepository instance,
1829 1827 # - patchdata: data extracted from patch header (cf m.patch.patchheadermap),
1830 1828 # - extra: the future extra dictionary of the changeset, please mutate it,
1831 1829 # - opts: the import options.
1832 1830 # XXX ideally, we would just pass an ctx ready to be computed, that would allow
1833 1831 # mutation of in memory commit and more. Feel free to rework the code to get
1834 1832 # there.
1835 1833 extrapreimportmap = {}
1836 1834 # 'postimport' are run after the commit is made and are provided the following
1837 1835 # argument:
1838 1836 # - ctx: the changectx created by import.
1839 1837 extrapostimportmap = {}
1840 1838
1841 1839
1842 1840 def tryimportone(ui, repo, patchdata, parents, opts, msgs, updatefunc):
1843 1841 """Utility function used by commands.import to import a single patch
1844 1842
1845 1843 This function is explicitly defined here to help the evolve extension to
1846 1844 wrap this part of the import logic.
1847 1845
1848 1846 The API is currently a bit ugly because it a simple code translation from
1849 1847 the import command. Feel free to make it better.
1850 1848
1851 1849 :patchdata: a dictionary containing parsed patch data (such as from
1852 1850 ``patch.extract()``)
1853 1851 :parents: nodes that will be parent of the created commit
1854 1852 :opts: the full dict of option passed to the import command
1855 1853 :msgs: list to save commit message to.
1856 1854 (used in case we need to save it when failing)
1857 1855 :updatefunc: a function that update a repo to a given node
1858 1856 updatefunc(<repo>, <node>)
1859 1857 """
1860 1858 # avoid cycle context -> subrepo -> cmdutil
1861 1859 from . import context
1862 1860
1863 1861 tmpname = patchdata.get(b'filename')
1864 1862 message = patchdata.get(b'message')
1865 1863 user = opts.get(b'user') or patchdata.get(b'user')
1866 1864 date = opts.get(b'date') or patchdata.get(b'date')
1867 1865 branch = patchdata.get(b'branch')
1868 1866 nodeid = patchdata.get(b'nodeid')
1869 1867 p1 = patchdata.get(b'p1')
1870 1868 p2 = patchdata.get(b'p2')
1871 1869
1872 1870 nocommit = opts.get(b'no_commit')
1873 1871 importbranch = opts.get(b'import_branch')
1874 1872 update = not opts.get(b'bypass')
1875 1873 strip = opts[b"strip"]
1876 1874 prefix = opts[b"prefix"]
1877 1875 sim = float(opts.get(b'similarity') or 0)
1878 1876
1879 1877 if not tmpname:
1880 1878 return None, None, False
1881 1879
1882 1880 rejects = False
1883 1881
1884 1882 cmdline_message = logmessage(ui, opts)
1885 1883 if cmdline_message:
1886 1884 # pickup the cmdline msg
1887 1885 message = cmdline_message
1888 1886 elif message:
1889 1887 # pickup the patch msg
1890 1888 message = message.strip()
1891 1889 else:
1892 1890 # launch the editor
1893 1891 message = None
1894 1892 ui.debug(b'message:\n%s\n' % (message or b''))
1895 1893
1896 1894 if len(parents) == 1:
1897 1895 parents.append(repo[nullid])
1898 1896 if opts.get(b'exact'):
1899 1897 if not nodeid or not p1:
1900 1898 raise error.Abort(_(b'not a Mercurial patch'))
1901 1899 p1 = repo[p1]
1902 1900 p2 = repo[p2 or nullid]
1903 1901 elif p2:
1904 1902 try:
1905 1903 p1 = repo[p1]
1906 1904 p2 = repo[p2]
1907 1905 # Without any options, consider p2 only if the
1908 1906 # patch is being applied on top of the recorded
1909 1907 # first parent.
1910 1908 if p1 != parents[0]:
1911 1909 p1 = parents[0]
1912 1910 p2 = repo[nullid]
1913 1911 except error.RepoError:
1914 1912 p1, p2 = parents
1915 1913 if p2.node() == nullid:
1916 1914 ui.warn(
1917 1915 _(
1918 1916 b"warning: import the patch as a normal revision\n"
1919 1917 b"(use --exact to import the patch as a merge)\n"
1920 1918 )
1921 1919 )
1922 1920 else:
1923 1921 p1, p2 = parents
1924 1922
1925 1923 n = None
1926 1924 if update:
1927 1925 if p1 != parents[0]:
1928 1926 updatefunc(repo, p1.node())
1929 1927 if p2 != parents[1]:
1930 1928 repo.setparents(p1.node(), p2.node())
1931 1929
1932 1930 if opts.get(b'exact') or importbranch:
1933 1931 repo.dirstate.setbranch(branch or b'default')
1934 1932
1935 1933 partial = opts.get(b'partial', False)
1936 1934 files = set()
1937 1935 try:
1938 1936 patch.patch(
1939 1937 ui,
1940 1938 repo,
1941 1939 tmpname,
1942 1940 strip=strip,
1943 1941 prefix=prefix,
1944 1942 files=files,
1945 1943 eolmode=None,
1946 1944 similarity=sim / 100.0,
1947 1945 )
1948 1946 except error.PatchError as e:
1949 1947 if not partial:
1950 1948 raise error.Abort(pycompat.bytestr(e))
1951 1949 if partial:
1952 1950 rejects = True
1953 1951
1954 1952 files = list(files)
1955 1953 if nocommit:
1956 1954 if message:
1957 1955 msgs.append(message)
1958 1956 else:
1959 1957 if opts.get(b'exact') or p2:
1960 1958 # If you got here, you either use --force and know what
1961 1959 # you are doing or used --exact or a merge patch while
1962 1960 # being updated to its first parent.
1963 1961 m = None
1964 1962 else:
1965 1963 m = scmutil.matchfiles(repo, files or [])
1966 1964 editform = mergeeditform(repo[None], b'import.normal')
1967 1965 if opts.get(b'exact'):
1968 1966 editor = None
1969 1967 else:
1970 1968 editor = getcommiteditor(
1971 1969 editform=editform, **pycompat.strkwargs(opts)
1972 1970 )
1973 1971 extra = {}
1974 1972 for idfunc in extrapreimport:
1975 1973 extrapreimportmap[idfunc](repo, patchdata, extra, opts)
1976 1974 overrides = {}
1977 1975 if partial:
1978 1976 overrides[(b'ui', b'allowemptycommit')] = True
1979 1977 if opts.get(b'secret'):
1980 1978 overrides[(b'phases', b'new-commit')] = b'secret'
1981 1979 with repo.ui.configoverride(overrides, b'import'):
1982 1980 n = repo.commit(
1983 1981 message, user, date, match=m, editor=editor, extra=extra
1984 1982 )
1985 1983 for idfunc in extrapostimport:
1986 1984 extrapostimportmap[idfunc](repo[n])
1987 1985 else:
1988 1986 if opts.get(b'exact') or importbranch:
1989 1987 branch = branch or b'default'
1990 1988 else:
1991 1989 branch = p1.branch()
1992 1990 store = patch.filestore()
1993 1991 try:
1994 1992 files = set()
1995 1993 try:
1996 1994 patch.patchrepo(
1997 1995 ui,
1998 1996 repo,
1999 1997 p1,
2000 1998 store,
2001 1999 tmpname,
2002 2000 strip,
2003 2001 prefix,
2004 2002 files,
2005 2003 eolmode=None,
2006 2004 )
2007 2005 except error.PatchError as e:
2008 2006 raise error.Abort(stringutil.forcebytestr(e))
2009 2007 if opts.get(b'exact'):
2010 2008 editor = None
2011 2009 else:
2012 2010 editor = getcommiteditor(editform=b'import.bypass')
2013 2011 memctx = context.memctx(
2014 2012 repo,
2015 2013 (p1.node(), p2.node()),
2016 2014 message,
2017 2015 files=files,
2018 2016 filectxfn=store,
2019 2017 user=user,
2020 2018 date=date,
2021 2019 branch=branch,
2022 2020 editor=editor,
2023 2021 )
2024 2022
2025 2023 overrides = {}
2026 2024 if opts.get(b'secret'):
2027 2025 overrides[(b'phases', b'new-commit')] = b'secret'
2028 2026 with repo.ui.configoverride(overrides, b'import'):
2029 2027 n = memctx.commit()
2030 2028 finally:
2031 2029 store.close()
2032 2030 if opts.get(b'exact') and nocommit:
2033 2031 # --exact with --no-commit is still useful in that it does merge
2034 2032 # and branch bits
2035 2033 ui.warn(_(b"warning: can't check exact import with --no-commit\n"))
2036 2034 elif opts.get(b'exact') and (not n or hex(n) != nodeid):
2037 2035 raise error.Abort(_(b'patch is damaged or loses information'))
2038 2036 msg = _(b'applied to working directory')
2039 2037 if n:
2040 2038 # i18n: refers to a short changeset id
2041 2039 msg = _(b'created %s') % short(n)
2042 2040 return msg, n, rejects
2043 2041
2044 2042
2045 2043 # facility to let extensions include additional data in an exported patch
2046 2044 # list of identifiers to be executed in order
2047 2045 extraexport = []
2048 2046 # mapping from identifier to actual export function
2049 2047 # function as to return a string to be added to the header or None
2050 2048 # it is given two arguments (sequencenumber, changectx)
2051 2049 extraexportmap = {}
2052 2050
2053 2051
2054 2052 def _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts):
2055 2053 node = scmutil.binnode(ctx)
2056 2054 parents = [p.node() for p in ctx.parents() if p]
2057 2055 branch = ctx.branch()
2058 2056 if switch_parent:
2059 2057 parents.reverse()
2060 2058
2061 2059 if parents:
2062 2060 prev = parents[0]
2063 2061 else:
2064 2062 prev = nullid
2065 2063
2066 2064 fm.context(ctx=ctx)
2067 2065 fm.plain(b'# HG changeset patch\n')
2068 2066 fm.write(b'user', b'# User %s\n', ctx.user())
2069 2067 fm.plain(b'# Date %d %d\n' % ctx.date())
2070 2068 fm.write(b'date', b'# %s\n', fm.formatdate(ctx.date()))
2071 2069 fm.condwrite(
2072 2070 branch and branch != b'default', b'branch', b'# Branch %s\n', branch
2073 2071 )
2074 2072 fm.write(b'node', b'# Node ID %s\n', hex(node))
2075 2073 fm.plain(b'# Parent %s\n' % hex(prev))
2076 2074 if len(parents) > 1:
2077 2075 fm.plain(b'# Parent %s\n' % hex(parents[1]))
2078 2076 fm.data(parents=fm.formatlist(pycompat.maplist(hex, parents), name=b'node'))
2079 2077
2080 2078 # TODO: redesign extraexportmap function to support formatter
2081 2079 for headerid in extraexport:
2082 2080 header = extraexportmap[headerid](seqno, ctx)
2083 2081 if header is not None:
2084 2082 fm.plain(b'# %s\n' % header)
2085 2083
2086 2084 fm.write(b'desc', b'%s\n', ctx.description().rstrip())
2087 2085 fm.plain(b'\n')
2088 2086
2089 2087 if fm.isplain():
2090 2088 chunkiter = patch.diffui(repo, prev, node, match, opts=diffopts)
2091 2089 for chunk, label in chunkiter:
2092 2090 fm.plain(chunk, label=label)
2093 2091 else:
2094 2092 chunkiter = patch.diff(repo, prev, node, match, opts=diffopts)
2095 2093 # TODO: make it structured?
2096 2094 fm.data(diff=b''.join(chunkiter))
2097 2095
2098 2096
2099 2097 def _exportfile(repo, revs, fm, dest, switch_parent, diffopts, match):
2100 2098 """Export changesets to stdout or a single file"""
2101 2099 for seqno, rev in enumerate(revs, 1):
2102 2100 ctx = repo[rev]
2103 2101 if not dest.startswith(b'<'):
2104 2102 repo.ui.note(b"%s\n" % dest)
2105 2103 fm.startitem()
2106 2104 _exportsingle(repo, ctx, fm, match, switch_parent, seqno, diffopts)
2107 2105
2108 2106
2109 2107 def _exportfntemplate(
2110 2108 repo, revs, basefm, fntemplate, switch_parent, diffopts, match
2111 2109 ):
2112 2110 """Export changesets to possibly multiple files"""
2113 2111 total = len(revs)
2114 2112 revwidth = max(len(str(rev)) for rev in revs)
2115 2113 filemap = util.sortdict() # filename: [(seqno, rev), ...]
2116 2114
2117 2115 for seqno, rev in enumerate(revs, 1):
2118 2116 ctx = repo[rev]
2119 2117 dest = makefilename(
2120 2118 ctx, fntemplate, total=total, seqno=seqno, revwidth=revwidth
2121 2119 )
2122 2120 filemap.setdefault(dest, []).append((seqno, rev))
2123 2121
2124 2122 for dest in filemap:
2125 2123 with formatter.maybereopen(basefm, dest) as fm:
2126 2124 repo.ui.note(b"%s\n" % dest)
2127 2125 for seqno, rev in filemap[dest]:
2128 2126 fm.startitem()
2129 2127 ctx = repo[rev]
2130 2128 _exportsingle(
2131 2129 repo, ctx, fm, match, switch_parent, seqno, diffopts
2132 2130 )
2133 2131
2134 2132
2135 2133 def _prefetchchangedfiles(repo, revs, match):
2136 2134 allfiles = set()
2137 2135 for rev in revs:
2138 2136 for file in repo[rev].files():
2139 2137 if not match or match(file):
2140 2138 allfiles.add(file)
2141 2139 match = scmutil.matchfiles(repo, allfiles)
2142 2140 revmatches = [(rev, match) for rev in revs]
2143 2141 scmutil.prefetchfiles(repo, revmatches)
2144 2142
2145 2143
2146 2144 def export(
2147 2145 repo,
2148 2146 revs,
2149 2147 basefm,
2150 2148 fntemplate=b'hg-%h.patch',
2151 2149 switch_parent=False,
2152 2150 opts=None,
2153 2151 match=None,
2154 2152 ):
2155 2153 '''export changesets as hg patches
2156 2154
2157 2155 Args:
2158 2156 repo: The repository from which we're exporting revisions.
2159 2157 revs: A list of revisions to export as revision numbers.
2160 2158 basefm: A formatter to which patches should be written.
2161 2159 fntemplate: An optional string to use for generating patch file names.
2162 2160 switch_parent: If True, show diffs against second parent when not nullid.
2163 2161 Default is false, which always shows diff against p1.
2164 2162 opts: diff options to use for generating the patch.
2165 2163 match: If specified, only export changes to files matching this matcher.
2166 2164
2167 2165 Returns:
2168 2166 Nothing.
2169 2167
2170 2168 Side Effect:
2171 2169 "HG Changeset Patch" data is emitted to one of the following
2172 2170 destinations:
2173 2171 fntemplate specified: Each rev is written to a unique file named using
2174 2172 the given template.
2175 2173 Otherwise: All revs will be written to basefm.
2176 2174 '''
2177 2175 _prefetchchangedfiles(repo, revs, match)
2178 2176
2179 2177 if not fntemplate:
2180 2178 _exportfile(
2181 2179 repo, revs, basefm, b'<unnamed>', switch_parent, opts, match
2182 2180 )
2183 2181 else:
2184 2182 _exportfntemplate(
2185 2183 repo, revs, basefm, fntemplate, switch_parent, opts, match
2186 2184 )
2187 2185
2188 2186
2189 2187 def exportfile(repo, revs, fp, switch_parent=False, opts=None, match=None):
2190 2188 """Export changesets to the given file stream"""
2191 2189 _prefetchchangedfiles(repo, revs, match)
2192 2190
2193 2191 dest = getattr(fp, 'name', b'<unnamed>')
2194 2192 with formatter.formatter(repo.ui, fp, b'export', {}) as fm:
2195 2193 _exportfile(repo, revs, fm, dest, switch_parent, opts, match)
2196 2194
2197 2195
2198 2196 def showmarker(fm, marker, index=None):
2199 2197 """utility function to display obsolescence marker in a readable way
2200 2198
2201 2199 To be used by debug function."""
2202 2200 if index is not None:
2203 2201 fm.write(b'index', b'%i ', index)
2204 2202 fm.write(b'prednode', b'%s ', hex(marker.prednode()))
2205 2203 succs = marker.succnodes()
2206 2204 fm.condwrite(
2207 2205 succs,
2208 2206 b'succnodes',
2209 2207 b'%s ',
2210 2208 fm.formatlist(map(hex, succs), name=b'node'),
2211 2209 )
2212 2210 fm.write(b'flag', b'%X ', marker.flags())
2213 2211 parents = marker.parentnodes()
2214 2212 if parents is not None:
2215 2213 fm.write(
2216 2214 b'parentnodes',
2217 2215 b'{%s} ',
2218 2216 fm.formatlist(map(hex, parents), name=b'node', sep=b', '),
2219 2217 )
2220 2218 fm.write(b'date', b'(%s) ', fm.formatdate(marker.date()))
2221 2219 meta = marker.metadata().copy()
2222 2220 meta.pop(b'date', None)
2223 2221 smeta = pycompat.rapply(pycompat.maybebytestr, meta)
2224 2222 fm.write(
2225 2223 b'metadata', b'{%s}', fm.formatdict(smeta, fmt=b'%r: %r', sep=b', ')
2226 2224 )
2227 2225 fm.plain(b'\n')
2228 2226
2229 2227
2230 2228 def finddate(ui, repo, date):
2231 2229 """Find the tipmost changeset that matches the given date spec"""
2232 2230 mrevs = repo.revs(b'date(%s)', date)
2233 2231 try:
2234 2232 rev = mrevs.max()
2235 2233 except ValueError:
2236 2234 raise error.Abort(_(b"revision matching date not found"))
2237 2235
2238 2236 ui.status(
2239 2237 _(b"found revision %d from %s\n")
2240 2238 % (rev, dateutil.datestr(repo[rev].date()))
2241 2239 )
2242 2240 return b'%d' % rev
2243 2241
2244 2242
2245 2243 def increasingwindows(windowsize=8, sizelimit=512):
2246 2244 while True:
2247 2245 yield windowsize
2248 2246 if windowsize < sizelimit:
2249 2247 windowsize *= 2
2250 2248
2251 2249
2252 def _walkrevs(repo, opts):
2253 # Default --rev value depends on --follow but --follow behavior
2254 # depends on revisions resolved from --rev...
2255 follow = opts.get(b'follow') or opts.get(b'follow_first')
2256 revspec = opts.get(b'rev')
2257 if follow and revspec:
2258 revs = scmutil.revrange(repo, revspec)
2259 revs = repo.revs(b'reverse(::%ld)', revs)
2260 elif revspec:
2261 revs = scmutil.revrange(repo, revspec)
2262 elif follow and repo.dirstate.p1() == nullid:
2263 revs = smartset.baseset()
2264 elif follow:
2265 revs = repo.revs(b'reverse(:.)')
2266 else:
2267 revs = smartset.spanset(repo)
2268 revs.reverse()
2269 return revs
2270
2271
2272 class FileWalkError(Exception):
2273 pass
2274
2275
2276 def walkfilerevs(repo, match, follow, revs, fncache):
2277 '''Walks the file history for the matched files.
2278
2279 Returns the changeset revs that are involved in the file history.
2280
2281 Throws FileWalkError if the file history can't be walked using
2282 filelogs alone.
2283 '''
2284 wanted = set()
2285 copies = []
2286 minrev, maxrev = min(revs), max(revs)
2287
2288 def filerevs(filelog, last):
2289 """
2290 Only files, no patterns. Check the history of each file.
2291
2292 Examines filelog entries within minrev, maxrev linkrev range
2293 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
2294 tuples in backwards order
2295 """
2296 cl_count = len(repo)
2297 revs = []
2298 for j in pycompat.xrange(0, last + 1):
2299 linkrev = filelog.linkrev(j)
2300 if linkrev < minrev:
2301 continue
2302 # only yield rev for which we have the changelog, it can
2303 # happen while doing "hg log" during a pull or commit
2304 if linkrev >= cl_count:
2305 break
2306
2307 parentlinkrevs = []
2308 for p in filelog.parentrevs(j):
2309 if p != nullrev:
2310 parentlinkrevs.append(filelog.linkrev(p))
2311 n = filelog.node(j)
2312 revs.append(
2313 (linkrev, parentlinkrevs, follow and filelog.renamed(n))
2314 )
2315
2316 return reversed(revs)
2317
2318 def iterfiles():
2319 pctx = repo[b'.']
2320 for filename in match.files():
2321 if follow:
2322 if filename not in pctx:
2323 raise error.Abort(
2324 _(
2325 b'cannot follow file not in parent '
2326 b'revision: "%s"'
2327 )
2328 % filename
2329 )
2330 yield filename, pctx[filename].filenode()
2331 else:
2332 yield filename, None
2333 for filename_node in copies:
2334 yield filename_node
2335
2336 for file_, node in iterfiles():
2337 filelog = repo.file(file_)
2338 if not len(filelog):
2339 if node is None:
2340 # A zero count may be a directory or deleted file, so
2341 # try to find matching entries on the slow path.
2342 if follow:
2343 raise error.Abort(
2344 _(b'cannot follow nonexistent file: "%s"') % file_
2345 )
2346 raise FileWalkError(b"Cannot walk via filelog")
2347 else:
2348 continue
2349
2350 if node is None:
2351 last = len(filelog) - 1
2352 else:
2353 last = filelog.rev(node)
2354
2355 # keep track of all ancestors of the file
2356 ancestors = {filelog.linkrev(last)}
2357
2358 # iterate from latest to oldest revision
2359 for rev, flparentlinkrevs, copied in filerevs(filelog, last):
2360 if not follow:
2361 if rev > maxrev:
2362 continue
2363 else:
2364 # Note that last might not be the first interesting
2365 # rev to us:
2366 # if the file has been changed after maxrev, we'll
2367 # have linkrev(last) > maxrev, and we still need
2368 # to explore the file graph
2369 if rev not in ancestors:
2370 continue
2371 # XXX insert 1327 fix here
2372 if flparentlinkrevs:
2373 ancestors.update(flparentlinkrevs)
2374
2375 fncache.setdefault(rev, []).append(file_)
2376 wanted.add(rev)
2377 if copied:
2378 copies.append(copied)
2379
2380 return wanted
2381
2382
2383 class _followfilter(object):
2384 def __init__(self, repo, onlyfirst=False):
2385 self.repo = repo
2386 self.startrev = nullrev
2387 self.roots = set()
2388 self.onlyfirst = onlyfirst
2389
2390 def match(self, rev):
2391 def realparents(rev):
2392 try:
2393 if self.onlyfirst:
2394 return self.repo.changelog.parentrevs(rev)[0:1]
2395 else:
2396 return filter(
2397 lambda x: x != nullrev,
2398 self.repo.changelog.parentrevs(rev),
2399 )
2400 except error.WdirUnsupported:
2401 prevs = [p.rev() for p in self.repo[rev].parents()]
2402 if self.onlyfirst:
2403 return prevs[:1]
2404 else:
2405 return prevs
2406
2407 if self.startrev == nullrev:
2408 self.startrev = rev
2409 return True
2410
2411 if rev > self.startrev:
2412 # forward: all descendants
2413 if not self.roots:
2414 self.roots.add(self.startrev)
2415 for parent in realparents(rev):
2416 if parent in self.roots:
2417 self.roots.add(rev)
2418 return True
2419 else:
2420 # backwards: all parents
2421 if not self.roots:
2422 self.roots.update(realparents(self.startrev))
2423 if rev in self.roots:
2424 self.roots.remove(rev)
2425 self.roots.update(realparents(rev))
2426 return True
2427
2428 return False
2429
2430
2431 2250 def walkchangerevs(repo, revs, makefilematcher, prepare):
2432 2251 '''Iterate over files and the revs in a "windowed" way.
2433 2252
2434 2253 Callers most commonly need to iterate backwards over the history
2435 2254 in which they are interested. Doing so has awful (quadratic-looking)
2436 2255 performance, so we use iterators in a "windowed" way.
2437 2256
2438 2257 We walk a window of revisions in the desired order. Within the
2439 2258 window, we first walk forwards to gather data, then in the desired
2440 2259 order (usually backwards) to display it.
2441 2260
2442 2261 This function returns an iterator yielding contexts. Before
2443 2262 yielding each context, the iterator will first call the prepare
2444 2263 function on each context in the window in forward order.'''
2445 2264
2446 2265 if not revs:
2447 2266 return []
2448 2267 change = repo.__getitem__
2449 2268
2450 2269 def iterate():
2451 2270 it = iter(revs)
2452 2271 stopiteration = False
2453 2272 for windowsize in increasingwindows():
2454 2273 nrevs = []
2455 2274 for i in pycompat.xrange(windowsize):
2456 2275 rev = next(it, None)
2457 2276 if rev is None:
2458 2277 stopiteration = True
2459 2278 break
2460 2279 nrevs.append(rev)
2461 2280 for rev in sorted(nrevs):
2462 2281 ctx = change(rev)
2463 2282 prepare(ctx, makefilematcher(ctx))
2464 2283 for rev in nrevs:
2465 2284 yield change(rev)
2466 2285
2467 2286 if stopiteration:
2468 2287 break
2469 2288
2470 2289 return iterate()
2471 2290
2472 2291
2473 2292 def add(ui, repo, match, prefix, uipathfn, explicitonly, **opts):
2474 2293 bad = []
2475 2294
2476 2295 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2477 2296 names = []
2478 2297 wctx = repo[None]
2479 2298 cca = None
2480 2299 abort, warn = scmutil.checkportabilityalert(ui)
2481 2300 if abort or warn:
2482 2301 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2483 2302
2484 2303 match = repo.narrowmatch(match, includeexact=True)
2485 2304 badmatch = matchmod.badmatch(match, badfn)
2486 2305 dirstate = repo.dirstate
2487 2306 # We don't want to just call wctx.walk here, since it would return a lot of
2488 2307 # clean files, which we aren't interested in and takes time.
2489 2308 for f in sorted(
2490 2309 dirstate.walk(
2491 2310 badmatch,
2492 2311 subrepos=sorted(wctx.substate),
2493 2312 unknown=True,
2494 2313 ignored=False,
2495 2314 full=False,
2496 2315 )
2497 2316 ):
2498 2317 exact = match.exact(f)
2499 2318 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2500 2319 if cca:
2501 2320 cca(f)
2502 2321 names.append(f)
2503 2322 if ui.verbose or not exact:
2504 2323 ui.status(
2505 2324 _(b'adding %s\n') % uipathfn(f), label=b'ui.addremove.added'
2506 2325 )
2507 2326
2508 2327 for subpath in sorted(wctx.substate):
2509 2328 sub = wctx.sub(subpath)
2510 2329 try:
2511 2330 submatch = matchmod.subdirmatcher(subpath, match)
2512 2331 subprefix = repo.wvfs.reljoin(prefix, subpath)
2513 2332 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2514 2333 if opts.get('subrepos'):
2515 2334 bad.extend(
2516 2335 sub.add(ui, submatch, subprefix, subuipathfn, False, **opts)
2517 2336 )
2518 2337 else:
2519 2338 bad.extend(
2520 2339 sub.add(ui, submatch, subprefix, subuipathfn, True, **opts)
2521 2340 )
2522 2341 except error.LookupError:
2523 2342 ui.status(
2524 2343 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2525 2344 )
2526 2345
2527 2346 if not opts.get('dry_run'):
2528 2347 rejected = wctx.add(names, prefix)
2529 2348 bad.extend(f for f in rejected if f in match.files())
2530 2349 return bad
2531 2350
2532 2351
2533 2352 def addwebdirpath(repo, serverpath, webconf):
2534 2353 webconf[serverpath] = repo.root
2535 2354 repo.ui.debug(b'adding %s = %s\n' % (serverpath, repo.root))
2536 2355
2537 2356 for r in repo.revs(b'filelog("path:.hgsub")'):
2538 2357 ctx = repo[r]
2539 2358 for subpath in ctx.substate:
2540 2359 ctx.sub(subpath).addwebdirpath(serverpath, webconf)
2541 2360
2542 2361
2543 2362 def forget(
2544 2363 ui, repo, match, prefix, uipathfn, explicitonly, dryrun, interactive
2545 2364 ):
2546 2365 if dryrun and interactive:
2547 2366 raise error.Abort(_(b"cannot specify both --dry-run and --interactive"))
2548 2367 bad = []
2549 2368 badfn = lambda x, y: bad.append(x) or match.bad(x, y)
2550 2369 wctx = repo[None]
2551 2370 forgot = []
2552 2371
2553 2372 s = repo.status(match=matchmod.badmatch(match, badfn), clean=True)
2554 2373 forget = sorted(s.modified + s.added + s.deleted + s.clean)
2555 2374 if explicitonly:
2556 2375 forget = [f for f in forget if match.exact(f)]
2557 2376
2558 2377 for subpath in sorted(wctx.substate):
2559 2378 sub = wctx.sub(subpath)
2560 2379 submatch = matchmod.subdirmatcher(subpath, match)
2561 2380 subprefix = repo.wvfs.reljoin(prefix, subpath)
2562 2381 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2563 2382 try:
2564 2383 subbad, subforgot = sub.forget(
2565 2384 submatch,
2566 2385 subprefix,
2567 2386 subuipathfn,
2568 2387 dryrun=dryrun,
2569 2388 interactive=interactive,
2570 2389 )
2571 2390 bad.extend([subpath + b'/' + f for f in subbad])
2572 2391 forgot.extend([subpath + b'/' + f for f in subforgot])
2573 2392 except error.LookupError:
2574 2393 ui.status(
2575 2394 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2576 2395 )
2577 2396
2578 2397 if not explicitonly:
2579 2398 for f in match.files():
2580 2399 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2581 2400 if f not in forgot:
2582 2401 if repo.wvfs.exists(f):
2583 2402 # Don't complain if the exact case match wasn't given.
2584 2403 # But don't do this until after checking 'forgot', so
2585 2404 # that subrepo files aren't normalized, and this op is
2586 2405 # purely from data cached by the status walk above.
2587 2406 if repo.dirstate.normalize(f) in repo.dirstate:
2588 2407 continue
2589 2408 ui.warn(
2590 2409 _(
2591 2410 b'not removing %s: '
2592 2411 b'file is already untracked\n'
2593 2412 )
2594 2413 % uipathfn(f)
2595 2414 )
2596 2415 bad.append(f)
2597 2416
2598 2417 if interactive:
2599 2418 responses = _(
2600 2419 b'[Ynsa?]'
2601 2420 b'$$ &Yes, forget this file'
2602 2421 b'$$ &No, skip this file'
2603 2422 b'$$ &Skip remaining files'
2604 2423 b'$$ Include &all remaining files'
2605 2424 b'$$ &? (display help)'
2606 2425 )
2607 2426 for filename in forget[:]:
2608 2427 r = ui.promptchoice(
2609 2428 _(b'forget %s %s') % (uipathfn(filename), responses)
2610 2429 )
2611 2430 if r == 4: # ?
2612 2431 while r == 4:
2613 2432 for c, t in ui.extractchoices(responses)[1]:
2614 2433 ui.write(b'%s - %s\n' % (c, encoding.lower(t)))
2615 2434 r = ui.promptchoice(
2616 2435 _(b'forget %s %s') % (uipathfn(filename), responses)
2617 2436 )
2618 2437 if r == 0: # yes
2619 2438 continue
2620 2439 elif r == 1: # no
2621 2440 forget.remove(filename)
2622 2441 elif r == 2: # Skip
2623 2442 fnindex = forget.index(filename)
2624 2443 del forget[fnindex:]
2625 2444 break
2626 2445 elif r == 3: # All
2627 2446 break
2628 2447
2629 2448 for f in forget:
2630 2449 if ui.verbose or not match.exact(f) or interactive:
2631 2450 ui.status(
2632 2451 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2633 2452 )
2634 2453
2635 2454 if not dryrun:
2636 2455 rejected = wctx.forget(forget, prefix)
2637 2456 bad.extend(f for f in rejected if f in match.files())
2638 2457 forgot.extend(f for f in forget if f not in rejected)
2639 2458 return bad, forgot
2640 2459
2641 2460
2642 2461 def files(ui, ctx, m, uipathfn, fm, fmt, subrepos):
2643 2462 ret = 1
2644 2463
2645 2464 needsfctx = ui.verbose or {b'size', b'flags'} & fm.datahint()
2646 2465 if fm.isplain() and not needsfctx:
2647 2466 # Fast path. The speed-up comes from skipping the formatter, and batching
2648 2467 # calls to ui.write.
2649 2468 buf = []
2650 2469 for f in ctx.matches(m):
2651 2470 buf.append(fmt % uipathfn(f))
2652 2471 if len(buf) > 100:
2653 2472 ui.write(b''.join(buf))
2654 2473 del buf[:]
2655 2474 ret = 0
2656 2475 if buf:
2657 2476 ui.write(b''.join(buf))
2658 2477 else:
2659 2478 for f in ctx.matches(m):
2660 2479 fm.startitem()
2661 2480 fm.context(ctx=ctx)
2662 2481 if needsfctx:
2663 2482 fc = ctx[f]
2664 2483 fm.write(b'size flags', b'% 10d % 1s ', fc.size(), fc.flags())
2665 2484 fm.data(path=f)
2666 2485 fm.plain(fmt % uipathfn(f))
2667 2486 ret = 0
2668 2487
2669 2488 for subpath in sorted(ctx.substate):
2670 2489 submatch = matchmod.subdirmatcher(subpath, m)
2671 2490 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2672 2491 if subrepos or m.exact(subpath) or any(submatch.files()):
2673 2492 sub = ctx.sub(subpath)
2674 2493 try:
2675 2494 recurse = m.exact(subpath) or subrepos
2676 2495 if (
2677 2496 sub.printfiles(ui, submatch, subuipathfn, fm, fmt, recurse)
2678 2497 == 0
2679 2498 ):
2680 2499 ret = 0
2681 2500 except error.LookupError:
2682 2501 ui.status(
2683 2502 _(b"skipping missing subrepository: %s\n")
2684 2503 % uipathfn(subpath)
2685 2504 )
2686 2505
2687 2506 return ret
2688 2507
2689 2508
2690 2509 def remove(
2691 2510 ui, repo, m, prefix, uipathfn, after, force, subrepos, dryrun, warnings=None
2692 2511 ):
2693 2512 ret = 0
2694 2513 s = repo.status(match=m, clean=True)
2695 2514 modified, added, deleted, clean = s.modified, s.added, s.deleted, s.clean
2696 2515
2697 2516 wctx = repo[None]
2698 2517
2699 2518 if warnings is None:
2700 2519 warnings = []
2701 2520 warn = True
2702 2521 else:
2703 2522 warn = False
2704 2523
2705 2524 subs = sorted(wctx.substate)
2706 2525 progress = ui.makeprogress(
2707 2526 _(b'searching'), total=len(subs), unit=_(b'subrepos')
2708 2527 )
2709 2528 for subpath in subs:
2710 2529 submatch = matchmod.subdirmatcher(subpath, m)
2711 2530 subprefix = repo.wvfs.reljoin(prefix, subpath)
2712 2531 subuipathfn = scmutil.subdiruipathfn(subpath, uipathfn)
2713 2532 if subrepos or m.exact(subpath) or any(submatch.files()):
2714 2533 progress.increment()
2715 2534 sub = wctx.sub(subpath)
2716 2535 try:
2717 2536 if sub.removefiles(
2718 2537 submatch,
2719 2538 subprefix,
2720 2539 subuipathfn,
2721 2540 after,
2722 2541 force,
2723 2542 subrepos,
2724 2543 dryrun,
2725 2544 warnings,
2726 2545 ):
2727 2546 ret = 1
2728 2547 except error.LookupError:
2729 2548 warnings.append(
2730 2549 _(b"skipping missing subrepository: %s\n")
2731 2550 % uipathfn(subpath)
2732 2551 )
2733 2552 progress.complete()
2734 2553
2735 2554 # warn about failure to delete explicit files/dirs
2736 2555 deleteddirs = pathutil.dirs(deleted)
2737 2556 files = m.files()
2738 2557 progress = ui.makeprogress(
2739 2558 _(b'deleting'), total=len(files), unit=_(b'files')
2740 2559 )
2741 2560 for f in files:
2742 2561
2743 2562 def insubrepo():
2744 2563 for subpath in wctx.substate:
2745 2564 if f.startswith(subpath + b'/'):
2746 2565 return True
2747 2566 return False
2748 2567
2749 2568 progress.increment()
2750 2569 isdir = f in deleteddirs or wctx.hasdir(f)
2751 2570 if f in repo.dirstate or isdir or f == b'.' or insubrepo() or f in subs:
2752 2571 continue
2753 2572
2754 2573 if repo.wvfs.exists(f):
2755 2574 if repo.wvfs.isdir(f):
2756 2575 warnings.append(
2757 2576 _(b'not removing %s: no tracked files\n') % uipathfn(f)
2758 2577 )
2759 2578 else:
2760 2579 warnings.append(
2761 2580 _(b'not removing %s: file is untracked\n') % uipathfn(f)
2762 2581 )
2763 2582 # missing files will generate a warning elsewhere
2764 2583 ret = 1
2765 2584 progress.complete()
2766 2585
2767 2586 if force:
2768 2587 list = modified + deleted + clean + added
2769 2588 elif after:
2770 2589 list = deleted
2771 2590 remaining = modified + added + clean
2772 2591 progress = ui.makeprogress(
2773 2592 _(b'skipping'), total=len(remaining), unit=_(b'files')
2774 2593 )
2775 2594 for f in remaining:
2776 2595 progress.increment()
2777 2596 if ui.verbose or (f in files):
2778 2597 warnings.append(
2779 2598 _(b'not removing %s: file still exists\n') % uipathfn(f)
2780 2599 )
2781 2600 ret = 1
2782 2601 progress.complete()
2783 2602 else:
2784 2603 list = deleted + clean
2785 2604 progress = ui.makeprogress(
2786 2605 _(b'skipping'), total=(len(modified) + len(added)), unit=_(b'files')
2787 2606 )
2788 2607 for f in modified:
2789 2608 progress.increment()
2790 2609 warnings.append(
2791 2610 _(
2792 2611 b'not removing %s: file is modified (use -f'
2793 2612 b' to force removal)\n'
2794 2613 )
2795 2614 % uipathfn(f)
2796 2615 )
2797 2616 ret = 1
2798 2617 for f in added:
2799 2618 progress.increment()
2800 2619 warnings.append(
2801 2620 _(
2802 2621 b"not removing %s: file has been marked for add"
2803 2622 b" (use 'hg forget' to undo add)\n"
2804 2623 )
2805 2624 % uipathfn(f)
2806 2625 )
2807 2626 ret = 1
2808 2627 progress.complete()
2809 2628
2810 2629 list = sorted(list)
2811 2630 progress = ui.makeprogress(
2812 2631 _(b'deleting'), total=len(list), unit=_(b'files')
2813 2632 )
2814 2633 for f in list:
2815 2634 if ui.verbose or not m.exact(f):
2816 2635 progress.increment()
2817 2636 ui.status(
2818 2637 _(b'removing %s\n') % uipathfn(f), label=b'ui.addremove.removed'
2819 2638 )
2820 2639 progress.complete()
2821 2640
2822 2641 if not dryrun:
2823 2642 with repo.wlock():
2824 2643 if not after:
2825 2644 for f in list:
2826 2645 if f in added:
2827 2646 continue # we never unlink added files on remove
2828 2647 rmdir = repo.ui.configbool(
2829 2648 b'experimental', b'removeemptydirs'
2830 2649 )
2831 2650 repo.wvfs.unlinkpath(f, ignoremissing=True, rmdir=rmdir)
2832 2651 repo[None].forget(list)
2833 2652
2834 2653 if warn:
2835 2654 for warning in warnings:
2836 2655 ui.warn(warning)
2837 2656
2838 2657 return ret
2839 2658
2840 2659
2841 2660 def _catfmtneedsdata(fm):
2842 2661 return not fm.datahint() or b'data' in fm.datahint()
2843 2662
2844 2663
2845 2664 def _updatecatformatter(fm, ctx, matcher, path, decode):
2846 2665 """Hook for adding data to the formatter used by ``hg cat``.
2847 2666
2848 2667 Extensions (e.g., lfs) can wrap this to inject keywords/data, but must call
2849 2668 this method first."""
2850 2669
2851 2670 # data() can be expensive to fetch (e.g. lfs), so don't fetch it if it
2852 2671 # wasn't requested.
2853 2672 data = b''
2854 2673 if _catfmtneedsdata(fm):
2855 2674 data = ctx[path].data()
2856 2675 if decode:
2857 2676 data = ctx.repo().wwritedata(path, data)
2858 2677 fm.startitem()
2859 2678 fm.context(ctx=ctx)
2860 2679 fm.write(b'data', b'%s', data)
2861 2680 fm.data(path=path)
2862 2681
2863 2682
2864 2683 def cat(ui, repo, ctx, matcher, basefm, fntemplate, prefix, **opts):
2865 2684 err = 1
2866 2685 opts = pycompat.byteskwargs(opts)
2867 2686
2868 2687 def write(path):
2869 2688 filename = None
2870 2689 if fntemplate:
2871 2690 filename = makefilename(
2872 2691 ctx, fntemplate, pathname=os.path.join(prefix, path)
2873 2692 )
2874 2693 # attempt to create the directory if it does not already exist
2875 2694 try:
2876 2695 os.makedirs(os.path.dirname(filename))
2877 2696 except OSError:
2878 2697 pass
2879 2698 with formatter.maybereopen(basefm, filename) as fm:
2880 2699 _updatecatformatter(fm, ctx, matcher, path, opts.get(b'decode'))
2881 2700
2882 2701 # Automation often uses hg cat on single files, so special case it
2883 2702 # for performance to avoid the cost of parsing the manifest.
2884 2703 if len(matcher.files()) == 1 and not matcher.anypats():
2885 2704 file = matcher.files()[0]
2886 2705 mfl = repo.manifestlog
2887 2706 mfnode = ctx.manifestnode()
2888 2707 try:
2889 2708 if mfnode and mfl[mfnode].find(file)[0]:
2890 2709 if _catfmtneedsdata(basefm):
2891 2710 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2892 2711 write(file)
2893 2712 return 0
2894 2713 except KeyError:
2895 2714 pass
2896 2715
2897 2716 if _catfmtneedsdata(basefm):
2898 2717 scmutil.prefetchfiles(repo, [(ctx.rev(), matcher)])
2899 2718
2900 2719 for abs in ctx.walk(matcher):
2901 2720 write(abs)
2902 2721 err = 0
2903 2722
2904 2723 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
2905 2724 for subpath in sorted(ctx.substate):
2906 2725 sub = ctx.sub(subpath)
2907 2726 try:
2908 2727 submatch = matchmod.subdirmatcher(subpath, matcher)
2909 2728 subprefix = os.path.join(prefix, subpath)
2910 2729 if not sub.cat(
2911 2730 submatch,
2912 2731 basefm,
2913 2732 fntemplate,
2914 2733 subprefix,
2915 2734 **pycompat.strkwargs(opts)
2916 2735 ):
2917 2736 err = 0
2918 2737 except error.RepoLookupError:
2919 2738 ui.status(
2920 2739 _(b"skipping missing subrepository: %s\n") % uipathfn(subpath)
2921 2740 )
2922 2741
2923 2742 return err
2924 2743
2925 2744
2926 2745 def commit(ui, repo, commitfunc, pats, opts):
2927 2746 '''commit the specified files or all outstanding changes'''
2928 2747 date = opts.get(b'date')
2929 2748 if date:
2930 2749 opts[b'date'] = dateutil.parsedate(date)
2931 2750 message = logmessage(ui, opts)
2932 2751 matcher = scmutil.match(repo[None], pats, opts)
2933 2752
2934 2753 dsguard = None
2935 2754 # extract addremove carefully -- this function can be called from a command
2936 2755 # that doesn't support addremove
2937 2756 if opts.get(b'addremove'):
2938 2757 dsguard = dirstateguard.dirstateguard(repo, b'commit')
2939 2758 with dsguard or util.nullcontextmanager():
2940 2759 if dsguard:
2941 2760 relative = scmutil.anypats(pats, opts)
2942 2761 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
2943 2762 if scmutil.addremove(repo, matcher, b"", uipathfn, opts) != 0:
2944 2763 raise error.Abort(
2945 2764 _(b"failed to mark all new/missing files as added/removed")
2946 2765 )
2947 2766
2948 2767 return commitfunc(ui, repo, message, matcher, opts)
2949 2768
2950 2769
2951 2770 def samefile(f, ctx1, ctx2):
2952 2771 if f in ctx1.manifest():
2953 2772 a = ctx1.filectx(f)
2954 2773 if f in ctx2.manifest():
2955 2774 b = ctx2.filectx(f)
2956 2775 return not a.cmp(b) and a.flags() == b.flags()
2957 2776 else:
2958 2777 return False
2959 2778 else:
2960 2779 return f not in ctx2.manifest()
2961 2780
2962 2781
2963 2782 def amend(ui, repo, old, extra, pats, opts):
2964 2783 # avoid cycle context -> subrepo -> cmdutil
2965 2784 from . import context
2966 2785
2967 2786 # amend will reuse the existing user if not specified, but the obsolete
2968 2787 # marker creation requires that the current user's name is specified.
2969 2788 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2970 2789 ui.username() # raise exception if username not set
2971 2790
2972 2791 ui.note(_(b'amending changeset %s\n') % old)
2973 2792 base = old.p1()
2974 2793
2975 2794 with repo.wlock(), repo.lock(), repo.transaction(b'amend'):
2976 2795 # Participating changesets:
2977 2796 #
2978 2797 # wctx o - workingctx that contains changes from working copy
2979 2798 # | to go into amending commit
2980 2799 # |
2981 2800 # old o - changeset to amend
2982 2801 # |
2983 2802 # base o - first parent of the changeset to amend
2984 2803 wctx = repo[None]
2985 2804
2986 2805 # Copy to avoid mutating input
2987 2806 extra = extra.copy()
2988 2807 # Update extra dict from amended commit (e.g. to preserve graft
2989 2808 # source)
2990 2809 extra.update(old.extra())
2991 2810
2992 2811 # Also update it from the from the wctx
2993 2812 extra.update(wctx.extra())
2994 2813
2995 2814 # date-only change should be ignored?
2996 2815 datemaydiffer = resolvecommitoptions(ui, opts)
2997 2816
2998 2817 date = old.date()
2999 2818 if opts.get(b'date'):
3000 2819 date = dateutil.parsedate(opts.get(b'date'))
3001 2820 user = opts.get(b'user') or old.user()
3002 2821
3003 2822 if len(old.parents()) > 1:
3004 2823 # ctx.files() isn't reliable for merges, so fall back to the
3005 2824 # slower repo.status() method
3006 2825 st = base.status(old)
3007 2826 files = set(st.modified) | set(st.added) | set(st.removed)
3008 2827 else:
3009 2828 files = set(old.files())
3010 2829
3011 2830 # add/remove the files to the working copy if the "addremove" option
3012 2831 # was specified.
3013 2832 matcher = scmutil.match(wctx, pats, opts)
3014 2833 relative = scmutil.anypats(pats, opts)
3015 2834 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=relative)
3016 2835 if opts.get(b'addremove') and scmutil.addremove(
3017 2836 repo, matcher, b"", uipathfn, opts
3018 2837 ):
3019 2838 raise error.Abort(
3020 2839 _(b"failed to mark all new/missing files as added/removed")
3021 2840 )
3022 2841
3023 2842 # Check subrepos. This depends on in-place wctx._status update in
3024 2843 # subrepo.precommit(). To minimize the risk of this hack, we do
3025 2844 # nothing if .hgsub does not exist.
3026 2845 if b'.hgsub' in wctx or b'.hgsub' in old:
3027 2846 subs, commitsubs, newsubstate = subrepoutil.precommit(
3028 2847 ui, wctx, wctx._status, matcher
3029 2848 )
3030 2849 # amend should abort if commitsubrepos is enabled
3031 2850 assert not commitsubs
3032 2851 if subs:
3033 2852 subrepoutil.writestate(repo, newsubstate)
3034 2853
3035 2854 ms = mergestatemod.mergestate.read(repo)
3036 2855 mergeutil.checkunresolved(ms)
3037 2856
3038 2857 filestoamend = {f for f in wctx.files() if matcher(f)}
3039 2858
3040 2859 changes = len(filestoamend) > 0
3041 2860 if changes:
3042 2861 # Recompute copies (avoid recording a -> b -> a)
3043 2862 copied = copies.pathcopies(base, wctx, matcher)
3044 2863 if old.p2:
3045 2864 copied.update(copies.pathcopies(old.p2(), wctx, matcher))
3046 2865
3047 2866 # Prune files which were reverted by the updates: if old
3048 2867 # introduced file X and the file was renamed in the working
3049 2868 # copy, then those two files are the same and
3050 2869 # we can discard X from our list of files. Likewise if X
3051 2870 # was removed, it's no longer relevant. If X is missing (aka
3052 2871 # deleted), old X must be preserved.
3053 2872 files.update(filestoamend)
3054 2873 files = [
3055 2874 f
3056 2875 for f in files
3057 2876 if (f not in filestoamend or not samefile(f, wctx, base))
3058 2877 ]
3059 2878
3060 2879 def filectxfn(repo, ctx_, path):
3061 2880 try:
3062 2881 # If the file being considered is not amongst the files
3063 2882 # to be amended, we should return the file context from the
3064 2883 # old changeset. This avoids issues when only some files in
3065 2884 # the working copy are being amended but there are also
3066 2885 # changes to other files from the old changeset.
3067 2886 if path not in filestoamend:
3068 2887 return old.filectx(path)
3069 2888
3070 2889 # Return None for removed files.
3071 2890 if path in wctx.removed():
3072 2891 return None
3073 2892
3074 2893 fctx = wctx[path]
3075 2894 flags = fctx.flags()
3076 2895 mctx = context.memfilectx(
3077 2896 repo,
3078 2897 ctx_,
3079 2898 fctx.path(),
3080 2899 fctx.data(),
3081 2900 islink=b'l' in flags,
3082 2901 isexec=b'x' in flags,
3083 2902 copysource=copied.get(path),
3084 2903 )
3085 2904 return mctx
3086 2905 except KeyError:
3087 2906 return None
3088 2907
3089 2908 else:
3090 2909 ui.note(_(b'copying changeset %s to %s\n') % (old, base))
3091 2910
3092 2911 # Use version of files as in the old cset
3093 2912 def filectxfn(repo, ctx_, path):
3094 2913 try:
3095 2914 return old.filectx(path)
3096 2915 except KeyError:
3097 2916 return None
3098 2917
3099 2918 # See if we got a message from -m or -l, if not, open the editor with
3100 2919 # the message of the changeset to amend.
3101 2920 message = logmessage(ui, opts)
3102 2921
3103 2922 editform = mergeeditform(old, b'commit.amend')
3104 2923
3105 2924 if not message:
3106 2925 message = old.description()
3107 2926 # Default if message isn't provided and --edit is not passed is to
3108 2927 # invoke editor, but allow --no-edit. If somehow we don't have any
3109 2928 # description, let's always start the editor.
3110 2929 doedit = not message or opts.get(b'edit') in [True, None]
3111 2930 else:
3112 2931 # Default if message is provided is to not invoke editor, but allow
3113 2932 # --edit.
3114 2933 doedit = opts.get(b'edit') is True
3115 2934 editor = getcommiteditor(edit=doedit, editform=editform)
3116 2935
3117 2936 pureextra = extra.copy()
3118 2937 extra[b'amend_source'] = old.hex()
3119 2938
3120 2939 new = context.memctx(
3121 2940 repo,
3122 2941 parents=[base.node(), old.p2().node()],
3123 2942 text=message,
3124 2943 files=files,
3125 2944 filectxfn=filectxfn,
3126 2945 user=user,
3127 2946 date=date,
3128 2947 extra=extra,
3129 2948 editor=editor,
3130 2949 )
3131 2950
3132 2951 newdesc = changelog.stripdesc(new.description())
3133 2952 if (
3134 2953 (not changes)
3135 2954 and newdesc == old.description()
3136 2955 and user == old.user()
3137 2956 and (date == old.date() or datemaydiffer)
3138 2957 and pureextra == old.extra()
3139 2958 ):
3140 2959 # nothing changed. continuing here would create a new node
3141 2960 # anyway because of the amend_source noise.
3142 2961 #
3143 2962 # This not what we expect from amend.
3144 2963 return old.node()
3145 2964
3146 2965 commitphase = None
3147 2966 if opts.get(b'secret'):
3148 2967 commitphase = phases.secret
3149 2968 newid = repo.commitctx(new)
3150 2969 ms.reset()
3151 2970
3152 2971 # Reroute the working copy parent to the new changeset
3153 2972 repo.setparents(newid, nullid)
3154 2973 mapping = {old.node(): (newid,)}
3155 2974 obsmetadata = None
3156 2975 if opts.get(b'note'):
3157 2976 obsmetadata = {b'note': encoding.fromlocal(opts[b'note'])}
3158 2977 backup = ui.configbool(b'rewrite', b'backup-bundle')
3159 2978 scmutil.cleanupnodes(
3160 2979 repo,
3161 2980 mapping,
3162 2981 b'amend',
3163 2982 metadata=obsmetadata,
3164 2983 fixphase=True,
3165 2984 targetphase=commitphase,
3166 2985 backup=backup,
3167 2986 )
3168 2987
3169 2988 # Fixing the dirstate because localrepo.commitctx does not update
3170 2989 # it. This is rather convenient because we did not need to update
3171 2990 # the dirstate for all the files in the new commit which commitctx
3172 2991 # could have done if it updated the dirstate. Now, we can
3173 2992 # selectively update the dirstate only for the amended files.
3174 2993 dirstate = repo.dirstate
3175 2994
3176 2995 # Update the state of the files which were added and modified in the
3177 2996 # amend to "normal" in the dirstate. We need to use "normallookup" since
3178 2997 # the files may have changed since the command started; using "normal"
3179 2998 # would mark them as clean but with uncommitted contents.
3180 2999 normalfiles = set(wctx.modified() + wctx.added()) & filestoamend
3181 3000 for f in normalfiles:
3182 3001 dirstate.normallookup(f)
3183 3002
3184 3003 # Update the state of files which were removed in the amend
3185 3004 # to "removed" in the dirstate.
3186 3005 removedfiles = set(wctx.removed()) & filestoamend
3187 3006 for f in removedfiles:
3188 3007 dirstate.drop(f)
3189 3008
3190 3009 return newid
3191 3010
3192 3011
3193 3012 def commiteditor(repo, ctx, subs, editform=b''):
3194 3013 if ctx.description():
3195 3014 return ctx.description()
3196 3015 return commitforceeditor(
3197 3016 repo, ctx, subs, editform=editform, unchangedmessagedetection=True
3198 3017 )
3199 3018
3200 3019
3201 3020 def commitforceeditor(
3202 3021 repo,
3203 3022 ctx,
3204 3023 subs,
3205 3024 finishdesc=None,
3206 3025 extramsg=None,
3207 3026 editform=b'',
3208 3027 unchangedmessagedetection=False,
3209 3028 ):
3210 3029 if not extramsg:
3211 3030 extramsg = _(b"Leave message empty to abort commit.")
3212 3031
3213 3032 forms = [e for e in editform.split(b'.') if e]
3214 3033 forms.insert(0, b'changeset')
3215 3034 templatetext = None
3216 3035 while forms:
3217 3036 ref = b'.'.join(forms)
3218 3037 if repo.ui.config(b'committemplate', ref):
3219 3038 templatetext = committext = buildcommittemplate(
3220 3039 repo, ctx, subs, extramsg, ref
3221 3040 )
3222 3041 break
3223 3042 forms.pop()
3224 3043 else:
3225 3044 committext = buildcommittext(repo, ctx, subs, extramsg)
3226 3045
3227 3046 # run editor in the repository root
3228 3047 olddir = encoding.getcwd()
3229 3048 os.chdir(repo.root)
3230 3049
3231 3050 # make in-memory changes visible to external process
3232 3051 tr = repo.currenttransaction()
3233 3052 repo.dirstate.write(tr)
3234 3053 pending = tr and tr.writepending() and repo.root
3235 3054
3236 3055 editortext = repo.ui.edit(
3237 3056 committext,
3238 3057 ctx.user(),
3239 3058 ctx.extra(),
3240 3059 editform=editform,
3241 3060 pending=pending,
3242 3061 repopath=repo.path,
3243 3062 action=b'commit',
3244 3063 )
3245 3064 text = editortext
3246 3065
3247 3066 # strip away anything below this special string (used for editors that want
3248 3067 # to display the diff)
3249 3068 stripbelow = re.search(_linebelow, text, flags=re.MULTILINE)
3250 3069 if stripbelow:
3251 3070 text = text[: stripbelow.start()]
3252 3071
3253 3072 text = re.sub(b"(?m)^HG:.*(\n|$)", b"", text)
3254 3073 os.chdir(olddir)
3255 3074
3256 3075 if finishdesc:
3257 3076 text = finishdesc(text)
3258 3077 if not text.strip():
3259 3078 raise error.Abort(_(b"empty commit message"))
3260 3079 if unchangedmessagedetection and editortext == templatetext:
3261 3080 raise error.Abort(_(b"commit message unchanged"))
3262 3081
3263 3082 return text
3264 3083
3265 3084
3266 3085 def buildcommittemplate(repo, ctx, subs, extramsg, ref):
3267 3086 ui = repo.ui
3268 3087 spec = formatter.reference_templatespec(ref)
3269 3088 t = logcmdutil.changesettemplater(ui, repo, spec)
3270 3089 t.t.cache.update(
3271 3090 (k, templater.unquotestring(v))
3272 3091 for k, v in repo.ui.configitems(b'committemplate')
3273 3092 )
3274 3093
3275 3094 if not extramsg:
3276 3095 extramsg = b'' # ensure that extramsg is string
3277 3096
3278 3097 ui.pushbuffer()
3279 3098 t.show(ctx, extramsg=extramsg)
3280 3099 return ui.popbuffer()
3281 3100
3282 3101
3283 3102 def hgprefix(msg):
3284 3103 return b"\n".join([b"HG: %s" % a for a in msg.split(b"\n") if a])
3285 3104
3286 3105
3287 3106 def buildcommittext(repo, ctx, subs, extramsg):
3288 3107 edittext = []
3289 3108 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
3290 3109 if ctx.description():
3291 3110 edittext.append(ctx.description())
3292 3111 edittext.append(b"")
3293 3112 edittext.append(b"") # Empty line between message and comments.
3294 3113 edittext.append(
3295 3114 hgprefix(
3296 3115 _(
3297 3116 b"Enter commit message."
3298 3117 b" Lines beginning with 'HG:' are removed."
3299 3118 )
3300 3119 )
3301 3120 )
3302 3121 edittext.append(hgprefix(extramsg))
3303 3122 edittext.append(b"HG: --")
3304 3123 edittext.append(hgprefix(_(b"user: %s") % ctx.user()))
3305 3124 if ctx.p2():
3306 3125 edittext.append(hgprefix(_(b"branch merge")))
3307 3126 if ctx.branch():
3308 3127 edittext.append(hgprefix(_(b"branch '%s'") % ctx.branch()))
3309 3128 if bookmarks.isactivewdirparent(repo):
3310 3129 edittext.append(hgprefix(_(b"bookmark '%s'") % repo._activebookmark))
3311 3130 edittext.extend([hgprefix(_(b"subrepo %s") % s) for s in subs])
3312 3131 edittext.extend([hgprefix(_(b"added %s") % f) for f in added])
3313 3132 edittext.extend([hgprefix(_(b"changed %s") % f) for f in modified])
3314 3133 edittext.extend([hgprefix(_(b"removed %s") % f) for f in removed])
3315 3134 if not added and not modified and not removed:
3316 3135 edittext.append(hgprefix(_(b"no files changed")))
3317 3136 edittext.append(b"")
3318 3137
3319 3138 return b"\n".join(edittext)
3320 3139
3321 3140
3322 3141 def commitstatus(repo, node, branch, bheads=None, opts=None):
3323 3142 if opts is None:
3324 3143 opts = {}
3325 3144 ctx = repo[node]
3326 3145 parents = ctx.parents()
3327 3146
3328 3147 if (
3329 3148 not opts.get(b'amend')
3330 3149 and bheads
3331 3150 and node not in bheads
3332 3151 and not any(
3333 3152 p.node() in bheads and p.branch() == branch for p in parents
3334 3153 )
3335 3154 ):
3336 3155 repo.ui.status(_(b'created new head\n'))
3337 3156 # The message is not printed for initial roots. For the other
3338 3157 # changesets, it is printed in the following situations:
3339 3158 #
3340 3159 # Par column: for the 2 parents with ...
3341 3160 # N: null or no parent
3342 3161 # B: parent is on another named branch
3343 3162 # C: parent is a regular non head changeset
3344 3163 # H: parent was a branch head of the current branch
3345 3164 # Msg column: whether we print "created new head" message
3346 3165 # In the following, it is assumed that there already exists some
3347 3166 # initial branch heads of the current branch, otherwise nothing is
3348 3167 # printed anyway.
3349 3168 #
3350 3169 # Par Msg Comment
3351 3170 # N N y additional topo root
3352 3171 #
3353 3172 # B N y additional branch root
3354 3173 # C N y additional topo head
3355 3174 # H N n usual case
3356 3175 #
3357 3176 # B B y weird additional branch root
3358 3177 # C B y branch merge
3359 3178 # H B n merge with named branch
3360 3179 #
3361 3180 # C C y additional head from merge
3362 3181 # C H n merge with a head
3363 3182 #
3364 3183 # H H n head merge: head count decreases
3365 3184
3366 3185 if not opts.get(b'close_branch'):
3367 3186 for r in parents:
3368 3187 if r.closesbranch() and r.branch() == branch:
3369 3188 repo.ui.status(
3370 3189 _(b'reopening closed branch head %d\n') % r.rev()
3371 3190 )
3372 3191
3373 3192 if repo.ui.debugflag:
3374 3193 repo.ui.write(
3375 3194 _(b'committed changeset %d:%s\n') % (ctx.rev(), ctx.hex())
3376 3195 )
3377 3196 elif repo.ui.verbose:
3378 3197 repo.ui.write(_(b'committed changeset %d:%s\n') % (ctx.rev(), ctx))
3379 3198
3380 3199
3381 3200 def postcommitstatus(repo, pats, opts):
3382 3201 return repo.status(match=scmutil.match(repo[None], pats, opts))
3383 3202
3384 3203
3385 3204 def revert(ui, repo, ctx, *pats, **opts):
3386 3205 opts = pycompat.byteskwargs(opts)
3387 3206 parent, p2 = repo.dirstate.parents()
3388 3207 node = ctx.node()
3389 3208
3390 3209 mf = ctx.manifest()
3391 3210 if node == p2:
3392 3211 parent = p2
3393 3212
3394 3213 # need all matching names in dirstate and manifest of target rev,
3395 3214 # so have to walk both. do not print errors if files exist in one
3396 3215 # but not other. in both cases, filesets should be evaluated against
3397 3216 # workingctx to get consistent result (issue4497). this means 'set:**'
3398 3217 # cannot be used to select missing files from target rev.
3399 3218
3400 3219 # `names` is a mapping for all elements in working copy and target revision
3401 3220 # The mapping is in the form:
3402 3221 # <abs path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
3403 3222 names = {}
3404 3223 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
3405 3224
3406 3225 with repo.wlock():
3407 3226 ## filling of the `names` mapping
3408 3227 # walk dirstate to fill `names`
3409 3228
3410 3229 interactive = opts.get(b'interactive', False)
3411 3230 wctx = repo[None]
3412 3231 m = scmutil.match(wctx, pats, opts)
3413 3232
3414 3233 # we'll need this later
3415 3234 targetsubs = sorted(s for s in wctx.substate if m(s))
3416 3235
3417 3236 if not m.always():
3418 3237 matcher = matchmod.badmatch(m, lambda x, y: False)
3419 3238 for abs in wctx.walk(matcher):
3420 3239 names[abs] = m.exact(abs)
3421 3240
3422 3241 # walk target manifest to fill `names`
3423 3242
3424 3243 def badfn(path, msg):
3425 3244 if path in names:
3426 3245 return
3427 3246 if path in ctx.substate:
3428 3247 return
3429 3248 path_ = path + b'/'
3430 3249 for f in names:
3431 3250 if f.startswith(path_):
3432 3251 return
3433 3252 ui.warn(b"%s: %s\n" % (uipathfn(path), msg))
3434 3253
3435 3254 for abs in ctx.walk(matchmod.badmatch(m, badfn)):
3436 3255 if abs not in names:
3437 3256 names[abs] = m.exact(abs)
3438 3257
3439 3258 # Find status of all file in `names`.
3440 3259 m = scmutil.matchfiles(repo, names)
3441 3260
3442 3261 changes = repo.status(
3443 3262 node1=node, match=m, unknown=True, ignored=True, clean=True
3444 3263 )
3445 3264 else:
3446 3265 changes = repo.status(node1=node, match=m)
3447 3266 for kind in changes:
3448 3267 for abs in kind:
3449 3268 names[abs] = m.exact(abs)
3450 3269
3451 3270 m = scmutil.matchfiles(repo, names)
3452 3271
3453 3272 modified = set(changes.modified)
3454 3273 added = set(changes.added)
3455 3274 removed = set(changes.removed)
3456 3275 _deleted = set(changes.deleted)
3457 3276 unknown = set(changes.unknown)
3458 3277 unknown.update(changes.ignored)
3459 3278 clean = set(changes.clean)
3460 3279 modadded = set()
3461 3280
3462 3281 # We need to account for the state of the file in the dirstate,
3463 3282 # even when we revert against something else than parent. This will
3464 3283 # slightly alter the behavior of revert (doing back up or not, delete
3465 3284 # or just forget etc).
3466 3285 if parent == node:
3467 3286 dsmodified = modified
3468 3287 dsadded = added
3469 3288 dsremoved = removed
3470 3289 # store all local modifications, useful later for rename detection
3471 3290 localchanges = dsmodified | dsadded
3472 3291 modified, added, removed = set(), set(), set()
3473 3292 else:
3474 3293 changes = repo.status(node1=parent, match=m)
3475 3294 dsmodified = set(changes.modified)
3476 3295 dsadded = set(changes.added)
3477 3296 dsremoved = set(changes.removed)
3478 3297 # store all local modifications, useful later for rename detection
3479 3298 localchanges = dsmodified | dsadded
3480 3299
3481 3300 # only take into account for removes between wc and target
3482 3301 clean |= dsremoved - removed
3483 3302 dsremoved &= removed
3484 3303 # distinct between dirstate remove and other
3485 3304 removed -= dsremoved
3486 3305
3487 3306 modadded = added & dsmodified
3488 3307 added -= modadded
3489 3308
3490 3309 # tell newly modified apart.
3491 3310 dsmodified &= modified
3492 3311 dsmodified |= modified & dsadded # dirstate added may need backup
3493 3312 modified -= dsmodified
3494 3313
3495 3314 # We need to wait for some post-processing to update this set
3496 3315 # before making the distinction. The dirstate will be used for
3497 3316 # that purpose.
3498 3317 dsadded = added
3499 3318
3500 3319 # in case of merge, files that are actually added can be reported as
3501 3320 # modified, we need to post process the result
3502 3321 if p2 != nullid:
3503 3322 mergeadd = set(dsmodified)
3504 3323 for path in dsmodified:
3505 3324 if path in mf:
3506 3325 mergeadd.remove(path)
3507 3326 dsadded |= mergeadd
3508 3327 dsmodified -= mergeadd
3509 3328
3510 3329 # if f is a rename, update `names` to also revert the source
3511 3330 for f in localchanges:
3512 3331 src = repo.dirstate.copied(f)
3513 3332 # XXX should we check for rename down to target node?
3514 3333 if src and src not in names and repo.dirstate[src] == b'r':
3515 3334 dsremoved.add(src)
3516 3335 names[src] = True
3517 3336
3518 3337 # determine the exact nature of the deleted changesets
3519 3338 deladded = set(_deleted)
3520 3339 for path in _deleted:
3521 3340 if path in mf:
3522 3341 deladded.remove(path)
3523 3342 deleted = _deleted - deladded
3524 3343
3525 3344 # distinguish between file to forget and the other
3526 3345 added = set()
3527 3346 for abs in dsadded:
3528 3347 if repo.dirstate[abs] != b'a':
3529 3348 added.add(abs)
3530 3349 dsadded -= added
3531 3350
3532 3351 for abs in deladded:
3533 3352 if repo.dirstate[abs] == b'a':
3534 3353 dsadded.add(abs)
3535 3354 deladded -= dsadded
3536 3355
3537 3356 # For files marked as removed, we check if an unknown file is present at
3538 3357 # the same path. If a such file exists it may need to be backed up.
3539 3358 # Making the distinction at this stage helps have simpler backup
3540 3359 # logic.
3541 3360 removunk = set()
3542 3361 for abs in removed:
3543 3362 target = repo.wjoin(abs)
3544 3363 if os.path.lexists(target):
3545 3364 removunk.add(abs)
3546 3365 removed -= removunk
3547 3366
3548 3367 dsremovunk = set()
3549 3368 for abs in dsremoved:
3550 3369 target = repo.wjoin(abs)
3551 3370 if os.path.lexists(target):
3552 3371 dsremovunk.add(abs)
3553 3372 dsremoved -= dsremovunk
3554 3373
3555 3374 # action to be actually performed by revert
3556 3375 # (<list of file>, message>) tuple
3557 3376 actions = {
3558 3377 b'revert': ([], _(b'reverting %s\n')),
3559 3378 b'add': ([], _(b'adding %s\n')),
3560 3379 b'remove': ([], _(b'removing %s\n')),
3561 3380 b'drop': ([], _(b'removing %s\n')),
3562 3381 b'forget': ([], _(b'forgetting %s\n')),
3563 3382 b'undelete': ([], _(b'undeleting %s\n')),
3564 3383 b'noop': (None, _(b'no changes needed to %s\n')),
3565 3384 b'unknown': (None, _(b'file not managed: %s\n')),
3566 3385 }
3567 3386
3568 3387 # "constant" that convey the backup strategy.
3569 3388 # All set to `discard` if `no-backup` is set do avoid checking
3570 3389 # no_backup lower in the code.
3571 3390 # These values are ordered for comparison purposes
3572 3391 backupinteractive = 3 # do backup if interactively modified
3573 3392 backup = 2 # unconditionally do backup
3574 3393 check = 1 # check if the existing file differs from target
3575 3394 discard = 0 # never do backup
3576 3395 if opts.get(b'no_backup'):
3577 3396 backupinteractive = backup = check = discard
3578 3397 if interactive:
3579 3398 dsmodifiedbackup = backupinteractive
3580 3399 else:
3581 3400 dsmodifiedbackup = backup
3582 3401 tobackup = set()
3583 3402
3584 3403 backupanddel = actions[b'remove']
3585 3404 if not opts.get(b'no_backup'):
3586 3405 backupanddel = actions[b'drop']
3587 3406
3588 3407 disptable = (
3589 3408 # dispatch table:
3590 3409 # file state
3591 3410 # action
3592 3411 # make backup
3593 3412 ## Sets that results that will change file on disk
3594 3413 # Modified compared to target, no local change
3595 3414 (modified, actions[b'revert'], discard),
3596 3415 # Modified compared to target, but local file is deleted
3597 3416 (deleted, actions[b'revert'], discard),
3598 3417 # Modified compared to target, local change
3599 3418 (dsmodified, actions[b'revert'], dsmodifiedbackup),
3600 3419 # Added since target
3601 3420 (added, actions[b'remove'], discard),
3602 3421 # Added in working directory
3603 3422 (dsadded, actions[b'forget'], discard),
3604 3423 # Added since target, have local modification
3605 3424 (modadded, backupanddel, backup),
3606 3425 # Added since target but file is missing in working directory
3607 3426 (deladded, actions[b'drop'], discard),
3608 3427 # Removed since target, before working copy parent
3609 3428 (removed, actions[b'add'], discard),
3610 3429 # Same as `removed` but an unknown file exists at the same path
3611 3430 (removunk, actions[b'add'], check),
3612 3431 # Removed since targe, marked as such in working copy parent
3613 3432 (dsremoved, actions[b'undelete'], discard),
3614 3433 # Same as `dsremoved` but an unknown file exists at the same path
3615 3434 (dsremovunk, actions[b'undelete'], check),
3616 3435 ## the following sets does not result in any file changes
3617 3436 # File with no modification
3618 3437 (clean, actions[b'noop'], discard),
3619 3438 # Existing file, not tracked anywhere
3620 3439 (unknown, actions[b'unknown'], discard),
3621 3440 )
3622 3441
3623 3442 for abs, exact in sorted(names.items()):
3624 3443 # target file to be touch on disk (relative to cwd)
3625 3444 target = repo.wjoin(abs)
3626 3445 # search the entry in the dispatch table.
3627 3446 # if the file is in any of these sets, it was touched in the working
3628 3447 # directory parent and we are sure it needs to be reverted.
3629 3448 for table, (xlist, msg), dobackup in disptable:
3630 3449 if abs not in table:
3631 3450 continue
3632 3451 if xlist is not None:
3633 3452 xlist.append(abs)
3634 3453 if dobackup:
3635 3454 # If in interactive mode, don't automatically create
3636 3455 # .orig files (issue4793)
3637 3456 if dobackup == backupinteractive:
3638 3457 tobackup.add(abs)
3639 3458 elif backup <= dobackup or wctx[abs].cmp(ctx[abs]):
3640 3459 absbakname = scmutil.backuppath(ui, repo, abs)
3641 3460 bakname = os.path.relpath(
3642 3461 absbakname, start=repo.root
3643 3462 )
3644 3463 ui.note(
3645 3464 _(b'saving current version of %s as %s\n')
3646 3465 % (uipathfn(abs), uipathfn(bakname))
3647 3466 )
3648 3467 if not opts.get(b'dry_run'):
3649 3468 if interactive:
3650 3469 util.copyfile(target, absbakname)
3651 3470 else:
3652 3471 util.rename(target, absbakname)
3653 3472 if opts.get(b'dry_run'):
3654 3473 if ui.verbose or not exact:
3655 3474 ui.status(msg % uipathfn(abs))
3656 3475 elif exact:
3657 3476 ui.warn(msg % uipathfn(abs))
3658 3477 break
3659 3478
3660 3479 if not opts.get(b'dry_run'):
3661 3480 needdata = (b'revert', b'add', b'undelete')
3662 3481 oplist = [actions[name][0] for name in needdata]
3663 3482 prefetch = scmutil.prefetchfiles
3664 3483 matchfiles = scmutil.matchfiles(
3665 3484 repo, [f for sublist in oplist for f in sublist]
3666 3485 )
3667 3486 prefetch(
3668 3487 repo, [(ctx.rev(), matchfiles)],
3669 3488 )
3670 3489 match = scmutil.match(repo[None], pats)
3671 3490 _performrevert(
3672 3491 repo,
3673 3492 ctx,
3674 3493 names,
3675 3494 uipathfn,
3676 3495 actions,
3677 3496 match,
3678 3497 interactive,
3679 3498 tobackup,
3680 3499 )
3681 3500
3682 3501 if targetsubs:
3683 3502 # Revert the subrepos on the revert list
3684 3503 for sub in targetsubs:
3685 3504 try:
3686 3505 wctx.sub(sub).revert(
3687 3506 ctx.substate[sub], *pats, **pycompat.strkwargs(opts)
3688 3507 )
3689 3508 except KeyError:
3690 3509 raise error.Abort(
3691 3510 b"subrepository '%s' does not exist in %s!"
3692 3511 % (sub, short(ctx.node()))
3693 3512 )
3694 3513
3695 3514
3696 3515 def _performrevert(
3697 3516 repo,
3698 3517 ctx,
3699 3518 names,
3700 3519 uipathfn,
3701 3520 actions,
3702 3521 match,
3703 3522 interactive=False,
3704 3523 tobackup=None,
3705 3524 ):
3706 3525 """function that actually perform all the actions computed for revert
3707 3526
3708 3527 This is an independent function to let extension to plug in and react to
3709 3528 the imminent revert.
3710 3529
3711 3530 Make sure you have the working directory locked when calling this function.
3712 3531 """
3713 3532 parent, p2 = repo.dirstate.parents()
3714 3533 node = ctx.node()
3715 3534 excluded_files = []
3716 3535
3717 3536 def checkout(f):
3718 3537 fc = ctx[f]
3719 3538 repo.wwrite(f, fc.data(), fc.flags())
3720 3539
3721 3540 def doremove(f):
3722 3541 try:
3723 3542 rmdir = repo.ui.configbool(b'experimental', b'removeemptydirs')
3724 3543 repo.wvfs.unlinkpath(f, rmdir=rmdir)
3725 3544 except OSError:
3726 3545 pass
3727 3546 repo.dirstate.remove(f)
3728 3547
3729 3548 def prntstatusmsg(action, f):
3730 3549 exact = names[f]
3731 3550 if repo.ui.verbose or not exact:
3732 3551 repo.ui.status(actions[action][1] % uipathfn(f))
3733 3552
3734 3553 audit_path = pathutil.pathauditor(repo.root, cached=True)
3735 3554 for f in actions[b'forget'][0]:
3736 3555 if interactive:
3737 3556 choice = repo.ui.promptchoice(
3738 3557 _(b"forget added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3739 3558 )
3740 3559 if choice == 0:
3741 3560 prntstatusmsg(b'forget', f)
3742 3561 repo.dirstate.drop(f)
3743 3562 else:
3744 3563 excluded_files.append(f)
3745 3564 else:
3746 3565 prntstatusmsg(b'forget', f)
3747 3566 repo.dirstate.drop(f)
3748 3567 for f in actions[b'remove'][0]:
3749 3568 audit_path(f)
3750 3569 if interactive:
3751 3570 choice = repo.ui.promptchoice(
3752 3571 _(b"remove added file %s (Yn)?$$ &Yes $$ &No") % uipathfn(f)
3753 3572 )
3754 3573 if choice == 0:
3755 3574 prntstatusmsg(b'remove', f)
3756 3575 doremove(f)
3757 3576 else:
3758 3577 excluded_files.append(f)
3759 3578 else:
3760 3579 prntstatusmsg(b'remove', f)
3761 3580 doremove(f)
3762 3581 for f in actions[b'drop'][0]:
3763 3582 audit_path(f)
3764 3583 prntstatusmsg(b'drop', f)
3765 3584 repo.dirstate.remove(f)
3766 3585
3767 3586 normal = None
3768 3587 if node == parent:
3769 3588 # We're reverting to our parent. If possible, we'd like status
3770 3589 # to report the file as clean. We have to use normallookup for
3771 3590 # merges to avoid losing information about merged/dirty files.
3772 3591 if p2 != nullid:
3773 3592 normal = repo.dirstate.normallookup
3774 3593 else:
3775 3594 normal = repo.dirstate.normal
3776 3595
3777 3596 newlyaddedandmodifiedfiles = set()
3778 3597 if interactive:
3779 3598 # Prompt the user for changes to revert
3780 3599 torevert = [f for f in actions[b'revert'][0] if f not in excluded_files]
3781 3600 m = scmutil.matchfiles(repo, torevert)
3782 3601 diffopts = patch.difffeatureopts(
3783 3602 repo.ui,
3784 3603 whitespace=True,
3785 3604 section=b'commands',
3786 3605 configprefix=b'revert.interactive.',
3787 3606 )
3788 3607 diffopts.nodates = True
3789 3608 diffopts.git = True
3790 3609 operation = b'apply'
3791 3610 if node == parent:
3792 3611 if repo.ui.configbool(
3793 3612 b'experimental', b'revert.interactive.select-to-keep'
3794 3613 ):
3795 3614 operation = b'keep'
3796 3615 else:
3797 3616 operation = b'discard'
3798 3617
3799 3618 if operation == b'apply':
3800 3619 diff = patch.diff(repo, None, ctx.node(), m, opts=diffopts)
3801 3620 else:
3802 3621 diff = patch.diff(repo, ctx.node(), None, m, opts=diffopts)
3803 3622 originalchunks = patch.parsepatch(diff)
3804 3623
3805 3624 try:
3806 3625
3807 3626 chunks, opts = recordfilter(
3808 3627 repo.ui, originalchunks, match, operation=operation
3809 3628 )
3810 3629 if operation == b'discard':
3811 3630 chunks = patch.reversehunks(chunks)
3812 3631
3813 3632 except error.PatchError as err:
3814 3633 raise error.Abort(_(b'error parsing patch: %s') % err)
3815 3634
3816 3635 # FIXME: when doing an interactive revert of a copy, there's no way of
3817 3636 # performing a partial revert of the added file, the only option is
3818 3637 # "remove added file <name> (Yn)?", so we don't need to worry about the
3819 3638 # alsorestore value. Ideally we'd be able to partially revert
3820 3639 # copied/renamed files.
3821 3640 newlyaddedandmodifiedfiles, unusedalsorestore = newandmodified(
3822 3641 chunks, originalchunks
3823 3642 )
3824 3643 if tobackup is None:
3825 3644 tobackup = set()
3826 3645 # Apply changes
3827 3646 fp = stringio()
3828 3647 # chunks are serialized per file, but files aren't sorted
3829 3648 for f in sorted({c.header.filename() for c in chunks if ishunk(c)}):
3830 3649 prntstatusmsg(b'revert', f)
3831 3650 files = set()
3832 3651 for c in chunks:
3833 3652 if ishunk(c):
3834 3653 abs = c.header.filename()
3835 3654 # Create a backup file only if this hunk should be backed up
3836 3655 if c.header.filename() in tobackup:
3837 3656 target = repo.wjoin(abs)
3838 3657 bakname = scmutil.backuppath(repo.ui, repo, abs)
3839 3658 util.copyfile(target, bakname)
3840 3659 tobackup.remove(abs)
3841 3660 if abs not in files:
3842 3661 files.add(abs)
3843 3662 if operation == b'keep':
3844 3663 checkout(abs)
3845 3664 c.write(fp)
3846 3665 dopatch = fp.tell()
3847 3666 fp.seek(0)
3848 3667 if dopatch:
3849 3668 try:
3850 3669 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3851 3670 except error.PatchError as err:
3852 3671 raise error.Abort(pycompat.bytestr(err))
3853 3672 del fp
3854 3673 else:
3855 3674 for f in actions[b'revert'][0]:
3856 3675 prntstatusmsg(b'revert', f)
3857 3676 checkout(f)
3858 3677 if normal:
3859 3678 normal(f)
3860 3679
3861 3680 for f in actions[b'add'][0]:
3862 3681 # Don't checkout modified files, they are already created by the diff
3863 3682 if f not in newlyaddedandmodifiedfiles:
3864 3683 prntstatusmsg(b'add', f)
3865 3684 checkout(f)
3866 3685 repo.dirstate.add(f)
3867 3686
3868 3687 normal = repo.dirstate.normallookup
3869 3688 if node == parent and p2 == nullid:
3870 3689 normal = repo.dirstate.normal
3871 3690 for f in actions[b'undelete'][0]:
3872 3691 if interactive:
3873 3692 choice = repo.ui.promptchoice(
3874 3693 _(b"add back removed file %s (Yn)?$$ &Yes $$ &No") % f
3875 3694 )
3876 3695 if choice == 0:
3877 3696 prntstatusmsg(b'undelete', f)
3878 3697 checkout(f)
3879 3698 normal(f)
3880 3699 else:
3881 3700 excluded_files.append(f)
3882 3701 else:
3883 3702 prntstatusmsg(b'undelete', f)
3884 3703 checkout(f)
3885 3704 normal(f)
3886 3705
3887 3706 copied = copies.pathcopies(repo[parent], ctx)
3888 3707
3889 3708 for f in (
3890 3709 actions[b'add'][0] + actions[b'undelete'][0] + actions[b'revert'][0]
3891 3710 ):
3892 3711 if f in copied:
3893 3712 repo.dirstate.copy(copied[f], f)
3894 3713
3895 3714
3896 3715 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3897 3716 # commands.outgoing. "missing" is "missing" of the result of
3898 3717 # "findcommonoutgoing()"
3899 3718 outgoinghooks = util.hooks()
3900 3719
3901 3720 # a list of (ui, repo) functions called by commands.summary
3902 3721 summaryhooks = util.hooks()
3903 3722
3904 3723 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3905 3724 #
3906 3725 # functions should return tuple of booleans below, if 'changes' is None:
3907 3726 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3908 3727 #
3909 3728 # otherwise, 'changes' is a tuple of tuples below:
3910 3729 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3911 3730 # - (desturl, destbranch, destpeer, outgoing)
3912 3731 summaryremotehooks = util.hooks()
3913 3732
3914 3733
3915 3734 def checkunfinished(repo, commit=False, skipmerge=False):
3916 3735 '''Look for an unfinished multistep operation, like graft, and abort
3917 3736 if found. It's probably good to check this right before
3918 3737 bailifchanged().
3919 3738 '''
3920 3739 # Check for non-clearable states first, so things like rebase will take
3921 3740 # precedence over update.
3922 3741 for state in statemod._unfinishedstates:
3923 3742 if (
3924 3743 state._clearable
3925 3744 or (commit and state._allowcommit)
3926 3745 or state._reportonly
3927 3746 ):
3928 3747 continue
3929 3748 if state.isunfinished(repo):
3930 3749 raise error.Abort(state.msg(), hint=state.hint())
3931 3750
3932 3751 for s in statemod._unfinishedstates:
3933 3752 if (
3934 3753 not s._clearable
3935 3754 or (commit and s._allowcommit)
3936 3755 or (s._opname == b'merge' and skipmerge)
3937 3756 or s._reportonly
3938 3757 ):
3939 3758 continue
3940 3759 if s.isunfinished(repo):
3941 3760 raise error.Abort(s.msg(), hint=s.hint())
3942 3761
3943 3762
3944 3763 def clearunfinished(repo):
3945 3764 '''Check for unfinished operations (as above), and clear the ones
3946 3765 that are clearable.
3947 3766 '''
3948 3767 for state in statemod._unfinishedstates:
3949 3768 if state._reportonly:
3950 3769 continue
3951 3770 if not state._clearable and state.isunfinished(repo):
3952 3771 raise error.Abort(state.msg(), hint=state.hint())
3953 3772
3954 3773 for s in statemod._unfinishedstates:
3955 3774 if s._opname == b'merge' or state._reportonly:
3956 3775 continue
3957 3776 if s._clearable and s.isunfinished(repo):
3958 3777 util.unlink(repo.vfs.join(s._fname))
3959 3778
3960 3779
3961 3780 def getunfinishedstate(repo):
3962 3781 ''' Checks for unfinished operations and returns statecheck object
3963 3782 for it'''
3964 3783 for state in statemod._unfinishedstates:
3965 3784 if state.isunfinished(repo):
3966 3785 return state
3967 3786 return None
3968 3787
3969 3788
3970 3789 def howtocontinue(repo):
3971 3790 '''Check for an unfinished operation and return the command to finish
3972 3791 it.
3973 3792
3974 3793 statemod._unfinishedstates list is checked for an unfinished operation
3975 3794 and the corresponding message to finish it is generated if a method to
3976 3795 continue is supported by the operation.
3977 3796
3978 3797 Returns a (msg, warning) tuple. 'msg' is a string and 'warning' is
3979 3798 a boolean.
3980 3799 '''
3981 3800 contmsg = _(b"continue: %s")
3982 3801 for state in statemod._unfinishedstates:
3983 3802 if not state._continueflag:
3984 3803 continue
3985 3804 if state.isunfinished(repo):
3986 3805 return contmsg % state.continuemsg(), True
3987 3806 if repo[None].dirty(missing=True, merge=False, branch=False):
3988 3807 return contmsg % _(b"hg commit"), False
3989 3808 return None, None
3990 3809
3991 3810
3992 3811 def checkafterresolved(repo):
3993 3812 '''Inform the user about the next action after completing hg resolve
3994 3813
3995 3814 If there's a an unfinished operation that supports continue flag,
3996 3815 howtocontinue will yield repo.ui.warn as the reporter.
3997 3816
3998 3817 Otherwise, it will yield repo.ui.note.
3999 3818 '''
4000 3819 msg, warning = howtocontinue(repo)
4001 3820 if msg is not None:
4002 3821 if warning:
4003 3822 repo.ui.warn(b"%s\n" % msg)
4004 3823 else:
4005 3824 repo.ui.note(b"%s\n" % msg)
4006 3825
4007 3826
4008 3827 def wrongtooltocontinue(repo, task):
4009 3828 '''Raise an abort suggesting how to properly continue if there is an
4010 3829 active task.
4011 3830
4012 3831 Uses howtocontinue() to find the active task.
4013 3832
4014 3833 If there's no task (repo.ui.note for 'hg commit'), it does not offer
4015 3834 a hint.
4016 3835 '''
4017 3836 after = howtocontinue(repo)
4018 3837 hint = None
4019 3838 if after[1]:
4020 3839 hint = after[0]
4021 3840 raise error.Abort(_(b'no %s in progress') % task, hint=hint)
4022 3841
4023 3842
4024 3843 def abortgraft(ui, repo, graftstate):
4025 3844 """abort the interrupted graft and rollbacks to the state before interrupted
4026 3845 graft"""
4027 3846 if not graftstate.exists():
4028 3847 raise error.Abort(_(b"no interrupted graft to abort"))
4029 3848 statedata = readgraftstate(repo, graftstate)
4030 3849 newnodes = statedata.get(b'newnodes')
4031 3850 if newnodes is None:
4032 3851 # and old graft state which does not have all the data required to abort
4033 3852 # the graft
4034 3853 raise error.Abort(_(b"cannot abort using an old graftstate"))
4035 3854
4036 3855 # changeset from which graft operation was started
4037 3856 if len(newnodes) > 0:
4038 3857 startctx = repo[newnodes[0]].p1()
4039 3858 else:
4040 3859 startctx = repo[b'.']
4041 3860 # whether to strip or not
4042 3861 cleanup = False
4043 3862
4044 3863 if newnodes:
4045 3864 newnodes = [repo[r].rev() for r in newnodes]
4046 3865 cleanup = True
4047 3866 # checking that none of the newnodes turned public or is public
4048 3867 immutable = [c for c in newnodes if not repo[c].mutable()]
4049 3868 if immutable:
4050 3869 repo.ui.warn(
4051 3870 _(b"cannot clean up public changesets %s\n")
4052 3871 % b', '.join(bytes(repo[r]) for r in immutable),
4053 3872 hint=_(b"see 'hg help phases' for details"),
4054 3873 )
4055 3874 cleanup = False
4056 3875
4057 3876 # checking that no new nodes are created on top of grafted revs
4058 3877 desc = set(repo.changelog.descendants(newnodes))
4059 3878 if desc - set(newnodes):
4060 3879 repo.ui.warn(
4061 3880 _(
4062 3881 b"new changesets detected on destination "
4063 3882 b"branch, can't strip\n"
4064 3883 )
4065 3884 )
4066 3885 cleanup = False
4067 3886
4068 3887 if cleanup:
4069 3888 with repo.wlock(), repo.lock():
4070 3889 mergemod.clean_update(startctx)
4071 3890 # stripping the new nodes created
4072 3891 strippoints = [
4073 3892 c.node() for c in repo.set(b"roots(%ld)", newnodes)
4074 3893 ]
4075 3894 repair.strip(repo.ui, repo, strippoints, backup=False)
4076 3895
4077 3896 if not cleanup:
4078 3897 # we don't update to the startnode if we can't strip
4079 3898 startctx = repo[b'.']
4080 3899 mergemod.clean_update(startctx)
4081 3900
4082 3901 ui.status(_(b"graft aborted\n"))
4083 3902 ui.status(_(b"working directory is now at %s\n") % startctx.hex()[:12])
4084 3903 graftstate.delete()
4085 3904 return 0
4086 3905
4087 3906
4088 3907 def readgraftstate(repo, graftstate):
4089 3908 # type: (Any, statemod.cmdstate) -> Dict[bytes, Any]
4090 3909 """read the graft state file and return a dict of the data stored in it"""
4091 3910 try:
4092 3911 return graftstate.read()
4093 3912 except error.CorruptedState:
4094 3913 nodes = repo.vfs.read(b'graftstate').splitlines()
4095 3914 return {b'nodes': nodes}
4096 3915
4097 3916
4098 3917 def hgabortgraft(ui, repo):
4099 3918 """ abort logic for aborting graft using 'hg abort'"""
4100 3919 with repo.wlock():
4101 3920 graftstate = statemod.cmdstate(repo, b'graftstate')
4102 3921 return abortgraft(ui, repo, graftstate)
@@ -1,1212 +1,1210
1 1 # logcmdutil.py - utility for log-like commands
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import itertools
11 11 import os
12 12 import posixpath
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 nullid,
17 17 wdirid,
18 18 wdirrev,
19 19 )
20 20
21 21 from .thirdparty import attr
22 22
23 23 from . import (
24 24 dagop,
25 25 error,
26 26 formatter,
27 27 graphmod,
28 28 match as matchmod,
29 29 mdiff,
30 30 patch,
31 31 pathutil,
32 32 pycompat,
33 33 revset,
34 34 revsetlang,
35 35 scmutil,
36 36 smartset,
37 37 templatekw,
38 38 templater,
39 39 util,
40 40 )
41 41 from .utils import (
42 42 dateutil,
43 43 stringutil,
44 44 )
45 45
46 46
47 47 if pycompat.TYPE_CHECKING:
48 48 from typing import (
49 49 Any,
50 50 Callable,
51 51 Dict,
52 52 List,
53 53 Optional,
54 54 Tuple,
55 55 )
56 56
57 57 for t in (Any, Callable, Dict, List, Optional, Tuple):
58 58 assert t
59 59
60 60
61 61 def getlimit(opts):
62 62 """get the log limit according to option -l/--limit"""
63 63 limit = opts.get(b'limit')
64 64 if limit:
65 65 try:
66 66 limit = int(limit)
67 67 except ValueError:
68 68 raise error.Abort(_(b'limit must be a positive integer'))
69 69 if limit <= 0:
70 70 raise error.Abort(_(b'limit must be positive'))
71 71 else:
72 72 limit = None
73 73 return limit
74 74
75 75
76 76 def diffordiffstat(
77 77 ui,
78 78 repo,
79 79 diffopts,
80 80 ctx1,
81 81 ctx2,
82 82 match,
83 83 changes=None,
84 84 stat=False,
85 85 fp=None,
86 86 graphwidth=0,
87 87 prefix=b'',
88 88 root=b'',
89 89 listsubrepos=False,
90 90 hunksfilterfn=None,
91 91 ):
92 92 '''show diff or diffstat.'''
93 93 if root:
94 94 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
95 95 else:
96 96 relroot = b''
97 97 copysourcematch = None
98 98
99 99 def compose(f, g):
100 100 return lambda x: f(g(x))
101 101
102 102 def pathfn(f):
103 103 return posixpath.join(prefix, f)
104 104
105 105 if relroot != b'':
106 106 # XXX relative roots currently don't work if the root is within a
107 107 # subrepo
108 108 uipathfn = scmutil.getuipathfn(repo, legacyrelativevalue=True)
109 109 uirelroot = uipathfn(pathfn(relroot))
110 110 relroot += b'/'
111 111 for matchroot in match.files():
112 112 if not matchroot.startswith(relroot):
113 113 ui.warn(
114 114 _(b'warning: %s not inside relative root %s\n')
115 115 % (uipathfn(pathfn(matchroot)), uirelroot)
116 116 )
117 117
118 118 relrootmatch = scmutil.match(ctx2, pats=[relroot], default=b'path')
119 119 match = matchmod.intersectmatchers(match, relrootmatch)
120 120 copysourcematch = relrootmatch
121 121
122 122 checkroot = repo.ui.configbool(
123 123 b'devel', b'all-warnings'
124 124 ) or repo.ui.configbool(b'devel', b'check-relroot')
125 125
126 126 def relrootpathfn(f):
127 127 if checkroot and not f.startswith(relroot):
128 128 raise AssertionError(
129 129 b"file %s doesn't start with relroot %s" % (f, relroot)
130 130 )
131 131 return f[len(relroot) :]
132 132
133 133 pathfn = compose(relrootpathfn, pathfn)
134 134
135 135 if stat:
136 136 diffopts = diffopts.copy(context=0, noprefix=False)
137 137 width = 80
138 138 if not ui.plain():
139 139 width = ui.termwidth() - graphwidth
140 140 # If an explicit --root was given, don't respect ui.relative-paths
141 141 if not relroot:
142 142 pathfn = compose(scmutil.getuipathfn(repo), pathfn)
143 143
144 144 chunks = ctx2.diff(
145 145 ctx1,
146 146 match,
147 147 changes,
148 148 opts=diffopts,
149 149 pathfn=pathfn,
150 150 copysourcematch=copysourcematch,
151 151 hunksfilterfn=hunksfilterfn,
152 152 )
153 153
154 154 if fp is not None or ui.canwritewithoutlabels():
155 155 out = fp or ui
156 156 if stat:
157 157 chunks = [patch.diffstat(util.iterlines(chunks), width=width)]
158 158 for chunk in util.filechunkiter(util.chunkbuffer(chunks)):
159 159 out.write(chunk)
160 160 else:
161 161 if stat:
162 162 chunks = patch.diffstatui(util.iterlines(chunks), width=width)
163 163 else:
164 164 chunks = patch.difflabel(
165 165 lambda chunks, **kwargs: chunks, chunks, opts=diffopts
166 166 )
167 167 if ui.canbatchlabeledwrites():
168 168
169 169 def gen():
170 170 for chunk, label in chunks:
171 171 yield ui.label(chunk, label=label)
172 172
173 173 for chunk in util.filechunkiter(util.chunkbuffer(gen())):
174 174 ui.write(chunk)
175 175 else:
176 176 for chunk, label in chunks:
177 177 ui.write(chunk, label=label)
178 178
179 179 node2 = ctx2.node()
180 180 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
181 181 tempnode2 = node2
182 182 try:
183 183 if node2 is not None:
184 184 tempnode2 = ctx2.substate[subpath][1]
185 185 except KeyError:
186 186 # A subrepo that existed in node1 was deleted between node1 and
187 187 # node2 (inclusive). Thus, ctx2's substate won't contain that
188 188 # subpath. The best we can do is to ignore it.
189 189 tempnode2 = None
190 190 submatch = matchmod.subdirmatcher(subpath, match)
191 191 subprefix = repo.wvfs.reljoin(prefix, subpath)
192 192 if listsubrepos or match.exact(subpath) or any(submatch.files()):
193 193 sub.diff(
194 194 ui,
195 195 diffopts,
196 196 tempnode2,
197 197 submatch,
198 198 changes=changes,
199 199 stat=stat,
200 200 fp=fp,
201 201 prefix=subprefix,
202 202 )
203 203
204 204
205 205 class changesetdiffer(object):
206 206 """Generate diff of changeset with pre-configured filtering functions"""
207 207
208 208 def _makefilematcher(self, ctx):
209 209 return scmutil.matchall(ctx.repo())
210 210
211 211 def _makehunksfilter(self, ctx):
212 212 return None
213 213
214 214 def showdiff(self, ui, ctx, diffopts, graphwidth=0, stat=False):
215 215 diffordiffstat(
216 216 ui,
217 217 ctx.repo(),
218 218 diffopts,
219 219 ctx.p1(),
220 220 ctx,
221 221 match=self._makefilematcher(ctx),
222 222 stat=stat,
223 223 graphwidth=graphwidth,
224 224 hunksfilterfn=self._makehunksfilter(ctx),
225 225 )
226 226
227 227
228 228 def changesetlabels(ctx):
229 229 labels = [b'log.changeset', b'changeset.%s' % ctx.phasestr()]
230 230 if ctx.obsolete():
231 231 labels.append(b'changeset.obsolete')
232 232 if ctx.isunstable():
233 233 labels.append(b'changeset.unstable')
234 234 for instability in ctx.instabilities():
235 235 labels.append(b'instability.%s' % instability)
236 236 return b' '.join(labels)
237 237
238 238
239 239 class changesetprinter(object):
240 240 '''show changeset information when templating not requested.'''
241 241
242 242 def __init__(self, ui, repo, differ=None, diffopts=None, buffered=False):
243 243 self.ui = ui
244 244 self.repo = repo
245 245 self.buffered = buffered
246 246 self._differ = differ or changesetdiffer()
247 247 self._diffopts = patch.diffallopts(ui, diffopts)
248 248 self._includestat = diffopts and diffopts.get(b'stat')
249 249 self._includediff = diffopts and diffopts.get(b'patch')
250 250 self.header = {}
251 251 self.hunk = {}
252 252 self.lastheader = None
253 253 self.footer = None
254 254 self._columns = templatekw.getlogcolumns()
255 255
256 256 def flush(self, ctx):
257 257 rev = ctx.rev()
258 258 if rev in self.header:
259 259 h = self.header[rev]
260 260 if h != self.lastheader:
261 261 self.lastheader = h
262 262 self.ui.write(h)
263 263 del self.header[rev]
264 264 if rev in self.hunk:
265 265 self.ui.write(self.hunk[rev])
266 266 del self.hunk[rev]
267 267
268 268 def close(self):
269 269 if self.footer:
270 270 self.ui.write(self.footer)
271 271
272 272 def show(self, ctx, copies=None, **props):
273 273 props = pycompat.byteskwargs(props)
274 274 if self.buffered:
275 275 self.ui.pushbuffer(labeled=True)
276 276 self._show(ctx, copies, props)
277 277 self.hunk[ctx.rev()] = self.ui.popbuffer()
278 278 else:
279 279 self._show(ctx, copies, props)
280 280
281 281 def _show(self, ctx, copies, props):
282 282 '''show a single changeset or file revision'''
283 283 changenode = ctx.node()
284 284 graphwidth = props.get(b'graphwidth', 0)
285 285
286 286 if self.ui.quiet:
287 287 self.ui.write(
288 288 b"%s\n" % scmutil.formatchangeid(ctx), label=b'log.node'
289 289 )
290 290 return
291 291
292 292 columns = self._columns
293 293 self.ui.write(
294 294 columns[b'changeset'] % scmutil.formatchangeid(ctx),
295 295 label=changesetlabels(ctx),
296 296 )
297 297
298 298 # branches are shown first before any other names due to backwards
299 299 # compatibility
300 300 branch = ctx.branch()
301 301 # don't show the default branch name
302 302 if branch != b'default':
303 303 self.ui.write(columns[b'branch'] % branch, label=b'log.branch')
304 304
305 305 for nsname, ns in pycompat.iteritems(self.repo.names):
306 306 # branches has special logic already handled above, so here we just
307 307 # skip it
308 308 if nsname == b'branches':
309 309 continue
310 310 # we will use the templatename as the color name since those two
311 311 # should be the same
312 312 for name in ns.names(self.repo, changenode):
313 313 self.ui.write(ns.logfmt % name, label=b'log.%s' % ns.colorname)
314 314 if self.ui.debugflag:
315 315 self.ui.write(
316 316 columns[b'phase'] % ctx.phasestr(), label=b'log.phase'
317 317 )
318 318 for pctx in scmutil.meaningfulparents(self.repo, ctx):
319 319 label = b'log.parent changeset.%s' % pctx.phasestr()
320 320 self.ui.write(
321 321 columns[b'parent'] % scmutil.formatchangeid(pctx), label=label
322 322 )
323 323
324 324 if self.ui.debugflag:
325 325 mnode = ctx.manifestnode()
326 326 if mnode is None:
327 327 mnode = wdirid
328 328 mrev = wdirrev
329 329 else:
330 330 mrev = self.repo.manifestlog.rev(mnode)
331 331 self.ui.write(
332 332 columns[b'manifest']
333 333 % scmutil.formatrevnode(self.ui, mrev, mnode),
334 334 label=b'ui.debug log.manifest',
335 335 )
336 336 self.ui.write(columns[b'user'] % ctx.user(), label=b'log.user')
337 337 self.ui.write(
338 338 columns[b'date'] % dateutil.datestr(ctx.date()), label=b'log.date'
339 339 )
340 340
341 341 if ctx.isunstable():
342 342 instabilities = ctx.instabilities()
343 343 self.ui.write(
344 344 columns[b'instability'] % b', '.join(instabilities),
345 345 label=b'log.instability',
346 346 )
347 347
348 348 elif ctx.obsolete():
349 349 self._showobsfate(ctx)
350 350
351 351 self._exthook(ctx)
352 352
353 353 if self.ui.debugflag:
354 354 files = ctx.p1().status(ctx)
355 355 for key, value in zip(
356 356 [b'files', b'files+', b'files-'],
357 357 [files.modified, files.added, files.removed],
358 358 ):
359 359 if value:
360 360 self.ui.write(
361 361 columns[key] % b" ".join(value),
362 362 label=b'ui.debug log.files',
363 363 )
364 364 elif ctx.files() and self.ui.verbose:
365 365 self.ui.write(
366 366 columns[b'files'] % b" ".join(ctx.files()),
367 367 label=b'ui.note log.files',
368 368 )
369 369 if copies and self.ui.verbose:
370 370 copies = [b'%s (%s)' % c for c in copies]
371 371 self.ui.write(
372 372 columns[b'copies'] % b' '.join(copies),
373 373 label=b'ui.note log.copies',
374 374 )
375 375
376 376 extra = ctx.extra()
377 377 if extra and self.ui.debugflag:
378 378 for key, value in sorted(extra.items()):
379 379 self.ui.write(
380 380 columns[b'extra'] % (key, stringutil.escapestr(value)),
381 381 label=b'ui.debug log.extra',
382 382 )
383 383
384 384 description = ctx.description().strip()
385 385 if description:
386 386 if self.ui.verbose:
387 387 self.ui.write(
388 388 _(b"description:\n"), label=b'ui.note log.description'
389 389 )
390 390 self.ui.write(description, label=b'ui.note log.description')
391 391 self.ui.write(b"\n\n")
392 392 else:
393 393 self.ui.write(
394 394 columns[b'summary'] % description.splitlines()[0],
395 395 label=b'log.summary',
396 396 )
397 397 self.ui.write(b"\n")
398 398
399 399 self._showpatch(ctx, graphwidth)
400 400
401 401 def _showobsfate(self, ctx):
402 402 # TODO: do not depend on templater
403 403 tres = formatter.templateresources(self.repo.ui, self.repo)
404 404 t = formatter.maketemplater(
405 405 self.repo.ui,
406 406 b'{join(obsfate, "\n")}',
407 407 defaults=templatekw.keywords,
408 408 resources=tres,
409 409 )
410 410 obsfate = t.renderdefault({b'ctx': ctx}).splitlines()
411 411
412 412 if obsfate:
413 413 for obsfateline in obsfate:
414 414 self.ui.write(
415 415 self._columns[b'obsolete'] % obsfateline,
416 416 label=b'log.obsfate',
417 417 )
418 418
419 419 def _exthook(self, ctx):
420 420 '''empty method used by extension as a hook point
421 421 '''
422 422
423 423 def _showpatch(self, ctx, graphwidth=0):
424 424 if self._includestat:
425 425 self._differ.showdiff(
426 426 self.ui, ctx, self._diffopts, graphwidth, stat=True
427 427 )
428 428 if self._includestat and self._includediff:
429 429 self.ui.write(b"\n")
430 430 if self._includediff:
431 431 self._differ.showdiff(
432 432 self.ui, ctx, self._diffopts, graphwidth, stat=False
433 433 )
434 434 if self._includestat or self._includediff:
435 435 self.ui.write(b"\n")
436 436
437 437
438 438 class changesetformatter(changesetprinter):
439 439 """Format changeset information by generic formatter"""
440 440
441 441 def __init__(
442 442 self, ui, repo, fm, differ=None, diffopts=None, buffered=False
443 443 ):
444 444 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
445 445 self._diffopts = patch.difffeatureopts(ui, diffopts, git=True)
446 446 self._fm = fm
447 447
448 448 def close(self):
449 449 self._fm.end()
450 450
451 451 def _show(self, ctx, copies, props):
452 452 '''show a single changeset or file revision'''
453 453 fm = self._fm
454 454 fm.startitem()
455 455 fm.context(ctx=ctx)
456 456 fm.data(rev=scmutil.intrev(ctx), node=fm.hexfunc(scmutil.binnode(ctx)))
457 457
458 458 datahint = fm.datahint()
459 459 if self.ui.quiet and not datahint:
460 460 return
461 461
462 462 fm.data(
463 463 branch=ctx.branch(),
464 464 phase=ctx.phasestr(),
465 465 user=ctx.user(),
466 466 date=fm.formatdate(ctx.date()),
467 467 desc=ctx.description(),
468 468 bookmarks=fm.formatlist(ctx.bookmarks(), name=b'bookmark'),
469 469 tags=fm.formatlist(ctx.tags(), name=b'tag'),
470 470 parents=fm.formatlist(
471 471 [fm.hexfunc(c.node()) for c in ctx.parents()], name=b'node'
472 472 ),
473 473 )
474 474
475 475 if self.ui.debugflag or b'manifest' in datahint:
476 476 fm.data(manifest=fm.hexfunc(ctx.manifestnode() or wdirid))
477 477 if self.ui.debugflag or b'extra' in datahint:
478 478 fm.data(extra=fm.formatdict(ctx.extra()))
479 479
480 480 if (
481 481 self.ui.debugflag
482 482 or b'modified' in datahint
483 483 or b'added' in datahint
484 484 or b'removed' in datahint
485 485 ):
486 486 files = ctx.p1().status(ctx)
487 487 fm.data(
488 488 modified=fm.formatlist(files.modified, name=b'file'),
489 489 added=fm.formatlist(files.added, name=b'file'),
490 490 removed=fm.formatlist(files.removed, name=b'file'),
491 491 )
492 492
493 493 verbose = not self.ui.debugflag and self.ui.verbose
494 494 if verbose or b'files' in datahint:
495 495 fm.data(files=fm.formatlist(ctx.files(), name=b'file'))
496 496 if verbose and copies or b'copies' in datahint:
497 497 fm.data(
498 498 copies=fm.formatdict(copies or {}, key=b'name', value=b'source')
499 499 )
500 500
501 501 if self._includestat or b'diffstat' in datahint:
502 502 self.ui.pushbuffer()
503 503 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=True)
504 504 fm.data(diffstat=self.ui.popbuffer())
505 505 if self._includediff or b'diff' in datahint:
506 506 self.ui.pushbuffer()
507 507 self._differ.showdiff(self.ui, ctx, self._diffopts, stat=False)
508 508 fm.data(diff=self.ui.popbuffer())
509 509
510 510
511 511 class changesettemplater(changesetprinter):
512 512 '''format changeset information.
513 513
514 514 Note: there are a variety of convenience functions to build a
515 515 changesettemplater for common cases. See functions such as:
516 516 maketemplater, changesetdisplayer, buildcommittemplate, or other
517 517 functions that use changesest_templater.
518 518 '''
519 519
520 520 # Arguments before "buffered" used to be positional. Consider not
521 521 # adding/removing arguments before "buffered" to not break callers.
522 522 def __init__(
523 523 self, ui, repo, tmplspec, differ=None, diffopts=None, buffered=False
524 524 ):
525 525 changesetprinter.__init__(self, ui, repo, differ, diffopts, buffered)
526 526 # tres is shared with _graphnodeformatter()
527 527 self._tresources = tres = formatter.templateresources(ui, repo)
528 528 self.t = formatter.loadtemplater(
529 529 ui,
530 530 tmplspec,
531 531 defaults=templatekw.keywords,
532 532 resources=tres,
533 533 cache=templatekw.defaulttempl,
534 534 )
535 535 self._counter = itertools.count()
536 536
537 537 self._tref = tmplspec.ref
538 538 self._parts = {
539 539 b'header': b'',
540 540 b'footer': b'',
541 541 tmplspec.ref: tmplspec.ref,
542 542 b'docheader': b'',
543 543 b'docfooter': b'',
544 544 b'separator': b'',
545 545 }
546 546 if tmplspec.mapfile:
547 547 # find correct templates for current mode, for backward
548 548 # compatibility with 'log -v/-q/--debug' using a mapfile
549 549 tmplmodes = [
550 550 (True, b''),
551 551 (self.ui.verbose, b'_verbose'),
552 552 (self.ui.quiet, b'_quiet'),
553 553 (self.ui.debugflag, b'_debug'),
554 554 ]
555 555 for mode, postfix in tmplmodes:
556 556 for t in self._parts:
557 557 cur = t + postfix
558 558 if mode and cur in self.t:
559 559 self._parts[t] = cur
560 560 else:
561 561 partnames = [p for p in self._parts.keys() if p != tmplspec.ref]
562 562 m = formatter.templatepartsmap(tmplspec, self.t, partnames)
563 563 self._parts.update(m)
564 564
565 565 if self._parts[b'docheader']:
566 566 self.ui.write(self.t.render(self._parts[b'docheader'], {}))
567 567
568 568 def close(self):
569 569 if self._parts[b'docfooter']:
570 570 if not self.footer:
571 571 self.footer = b""
572 572 self.footer += self.t.render(self._parts[b'docfooter'], {})
573 573 return super(changesettemplater, self).close()
574 574
575 575 def _show(self, ctx, copies, props):
576 576 '''show a single changeset or file revision'''
577 577 props = props.copy()
578 578 props[b'ctx'] = ctx
579 579 props[b'index'] = index = next(self._counter)
580 580 props[b'revcache'] = {b'copies': copies}
581 581 graphwidth = props.get(b'graphwidth', 0)
582 582
583 583 # write separator, which wouldn't work well with the header part below
584 584 # since there's inherently a conflict between header (across items) and
585 585 # separator (per item)
586 586 if self._parts[b'separator'] and index > 0:
587 587 self.ui.write(self.t.render(self._parts[b'separator'], {}))
588 588
589 589 # write header
590 590 if self._parts[b'header']:
591 591 h = self.t.render(self._parts[b'header'], props)
592 592 if self.buffered:
593 593 self.header[ctx.rev()] = h
594 594 else:
595 595 if self.lastheader != h:
596 596 self.lastheader = h
597 597 self.ui.write(h)
598 598
599 599 # write changeset metadata, then patch if requested
600 600 key = self._parts[self._tref]
601 601 self.ui.write(self.t.render(key, props))
602 602 self._exthook(ctx)
603 603 self._showpatch(ctx, graphwidth)
604 604
605 605 if self._parts[b'footer']:
606 606 if not self.footer:
607 607 self.footer = self.t.render(self._parts[b'footer'], props)
608 608
609 609
610 610 def templatespec(tmpl, mapfile):
611 611 assert not (tmpl and mapfile)
612 612 if mapfile:
613 613 return formatter.mapfile_templatespec(b'changeset', mapfile)
614 614 else:
615 615 return formatter.literal_templatespec(tmpl)
616 616
617 617
618 618 def _lookuptemplate(ui, tmpl, style):
619 619 """Find the template matching the given template spec or style
620 620
621 621 See formatter.lookuptemplate() for details.
622 622 """
623 623
624 624 # ui settings
625 625 if not tmpl and not style: # template are stronger than style
626 626 tmpl = ui.config(b'ui', b'logtemplate')
627 627 if tmpl:
628 628 return formatter.literal_templatespec(templater.unquotestring(tmpl))
629 629 else:
630 630 style = util.expandpath(ui.config(b'ui', b'style'))
631 631
632 632 if not tmpl and style:
633 633 mapfile = style
634 634 fp = None
635 635 if not os.path.split(mapfile)[0]:
636 636 (mapname, fp) = templater.try_open_template(
637 637 b'map-cmdline.' + mapfile
638 638 ) or templater.try_open_template(mapfile)
639 639 if mapname:
640 640 mapfile = mapname
641 641 return formatter.mapfile_templatespec(b'changeset', mapfile, fp)
642 642
643 643 return formatter.lookuptemplate(ui, b'changeset', tmpl)
644 644
645 645
646 646 def maketemplater(ui, repo, tmpl, buffered=False):
647 647 """Create a changesettemplater from a literal template 'tmpl'
648 648 byte-string."""
649 649 spec = formatter.literal_templatespec(tmpl)
650 650 return changesettemplater(ui, repo, spec, buffered=buffered)
651 651
652 652
653 653 def changesetdisplayer(ui, repo, opts, differ=None, buffered=False):
654 654 """show one changeset using template or regular display.
655 655
656 656 Display format will be the first non-empty hit of:
657 657 1. option 'template'
658 658 2. option 'style'
659 659 3. [ui] setting 'logtemplate'
660 660 4. [ui] setting 'style'
661 661 If all of these values are either the unset or the empty string,
662 662 regular display via changesetprinter() is done.
663 663 """
664 664 postargs = (differ, opts, buffered)
665 665 spec = _lookuptemplate(ui, opts.get(b'template'), opts.get(b'style'))
666 666
667 667 # machine-readable formats have slightly different keyword set than
668 668 # plain templates, which are handled by changesetformatter.
669 669 # note that {b'pickle', b'debug'} can also be added to the list if needed.
670 670 if spec.ref in {b'cbor', b'json'}:
671 671 fm = ui.formatter(b'log', opts)
672 672 return changesetformatter(ui, repo, fm, *postargs)
673 673
674 674 if not spec.ref and not spec.tmpl and not spec.mapfile:
675 675 return changesetprinter(ui, repo, *postargs)
676 676
677 677 return changesettemplater(ui, repo, spec, *postargs)
678 678
679 679
680 680 @attr.s
681 681 class walkopts(object):
682 682 """Options to configure a set of revisions and file matcher factory
683 683 to scan revision/file history
684 684 """
685 685
686 686 # raw command-line parameters, which a matcher will be built from
687 687 pats = attr.ib() # type: List[bytes]
688 688 opts = attr.ib() # type: Dict[bytes, Any]
689 689
690 690 # a list of revset expressions to be traversed; if follow, it specifies
691 691 # the start revisions
692 692 revspec = attr.ib() # type: List[bytes]
693 693
694 694 # miscellaneous queries to filter revisions (see "hg help log" for details)
695 695 branches = attr.ib(default=attr.Factory(list)) # type: List[bytes]
696 696 date = attr.ib(default=None) # type: Optional[bytes]
697 697 keywords = attr.ib(default=attr.Factory(list)) # type: List[bytes]
698 698 no_merges = attr.ib(default=False) # type: bool
699 699 only_merges = attr.ib(default=False) # type: bool
700 700 prune_ancestors = attr.ib(default=attr.Factory(list)) # type: List[bytes]
701 701 users = attr.ib(default=attr.Factory(list)) # type: List[bytes]
702 702
703 703 # miscellaneous matcher arguments
704 704 include_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
705 705 exclude_pats = attr.ib(default=attr.Factory(list)) # type: List[bytes]
706 706
707 707 # 0: no follow, 1: follow first, 2: follow both parents
708 708 follow = attr.ib(default=0) # type: int
709 709
710 710 # do not attempt filelog-based traversal, which may be fast but cannot
711 711 # include revisions where files were removed
712 712 force_changelog_traversal = attr.ib(default=False) # type: bool
713 713
714 714 # filter revisions by file patterns, which should be disabled only if
715 715 # you want to include revisions where files were unmodified
716 716 filter_revisions_by_pats = attr.ib(default=True) # type: bool
717 717
718 718 # sort revisions prior to traversal: 'desc', 'topo', or None
719 719 sort_revisions = attr.ib(default=None) # type: Optional[bytes]
720 720
721 721 # limit number of changes displayed; None means unlimited
722 722 limit = attr.ib(default=None) # type: Optional[int]
723 723
724 724
725 725 def parseopts(ui, pats, opts):
726 726 # type: (Any, List[bytes], Dict[bytes, Any]) -> walkopts
727 727 """Parse log command options into walkopts
728 728
729 729 The returned walkopts will be passed in to getrevs() or makewalker().
730 730 """
731 731 if opts.get(b'follow_first'):
732 732 follow = 1
733 733 elif opts.get(b'follow'):
734 734 follow = 2
735 735 else:
736 736 follow = 0
737 737
738 738 if opts.get(b'graph'):
739 739 if ui.configbool(b'experimental', b'log.topo'):
740 740 sort_revisions = b'topo'
741 741 else:
742 742 sort_revisions = b'desc'
743 743 else:
744 744 sort_revisions = None
745 745
746 746 return walkopts(
747 747 pats=pats,
748 748 opts=opts,
749 749 revspec=opts.get(b'rev', []),
750 750 # branch and only_branch are really aliases and must be handled at
751 751 # the same time
752 752 branches=opts.get(b'branch', []) + opts.get(b'only_branch', []),
753 753 date=opts.get(b'date'),
754 754 keywords=opts.get(b'keyword', []),
755 755 no_merges=bool(opts.get(b'no_merges')),
756 756 only_merges=bool(opts.get(b'only_merges')),
757 757 prune_ancestors=opts.get(b'prune', []),
758 758 users=opts.get(b'user', []),
759 759 include_pats=opts.get(b'include', []),
760 760 exclude_pats=opts.get(b'exclude', []),
761 761 follow=follow,
762 762 force_changelog_traversal=bool(opts.get(b'removed')),
763 763 sort_revisions=sort_revisions,
764 764 limit=getlimit(opts),
765 765 )
766 766
767 767
768 768 def _makematcher(repo, revs, wopts):
769 769 """Build matcher and expanded patterns from log options
770 770
771 771 If --follow, revs are the revisions to follow from.
772 772
773 773 Returns (match, pats, slowpath) where
774 774 - match: a matcher built from the given pats and -I/-X opts
775 775 - pats: patterns used (globs are expanded on Windows)
776 776 - slowpath: True if patterns aren't as simple as scanning filelogs
777 777 """
778 778 # pats/include/exclude are passed to match.match() directly in
779 # _matchfiles() revset but walkchangerevs() builds its matcher with
780 # scmutil.match(). The difference is input pats are globbed on
779 # _matchfiles() revset, but a log-like command should build its matcher
780 # with scmutil.match(). The difference is input pats are globbed on
781 781 # platforms without shell expansion (windows).
782 782 wctx = repo[None]
783 783 match, pats = scmutil.matchandpats(wctx, wopts.pats, wopts.opts)
784 784 slowpath = match.anypats() or (
785 785 not match.always() and wopts.force_changelog_traversal
786 786 )
787 787 if not slowpath:
788 788 if wopts.follow and wopts.revspec:
789 789 # There may be the case that a path doesn't exist in some (but
790 790 # not all) of the specified start revisions, but let's consider
791 791 # the path is valid. Missing files will be warned by the matcher.
792 792 startctxs = [repo[r] for r in revs]
793 793 for f in match.files():
794 794 found = False
795 795 for c in startctxs:
796 796 if f in c:
797 797 found = True
798 798 elif c.hasdir(f):
799 799 # If a directory exists in any of the start revisions,
800 800 # take the slow path.
801 801 found = slowpath = True
802 802 if not found:
803 803 raise error.Abort(
804 804 _(
805 805 b'cannot follow file not in any of the specified '
806 806 b'revisions: "%s"'
807 807 )
808 808 % f
809 809 )
810 810 elif wopts.follow:
811 811 for f in match.files():
812 812 if f not in wctx:
813 813 # If the file exists, it may be a directory, so let it
814 814 # take the slow path.
815 815 if os.path.exists(repo.wjoin(f)):
816 816 slowpath = True
817 817 continue
818 818 else:
819 819 raise error.Abort(
820 820 _(
821 821 b'cannot follow file not in parent '
822 822 b'revision: "%s"'
823 823 )
824 824 % f
825 825 )
826 826 filelog = repo.file(f)
827 827 if not filelog:
828 828 # A file exists in wdir but not in history, which means
829 829 # the file isn't committed yet.
830 830 raise error.Abort(
831 831 _(b'cannot follow nonexistent file: "%s"') % f
832 832 )
833 833 else:
834 834 for f in match.files():
835 835 filelog = repo.file(f)
836 836 if not filelog:
837 837 # A zero count may be a directory or deleted file, so
838 838 # try to find matching entries on the slow path.
839 839 slowpath = True
840 840
841 841 # We decided to fall back to the slowpath because at least one
842 842 # of the paths was not a file. Check to see if at least one of them
843 843 # existed in history - in that case, we'll continue down the
844 844 # slowpath; otherwise, we can turn off the slowpath
845 845 if slowpath:
846 846 for path in match.files():
847 847 if path == b'.' or path in repo.store:
848 848 break
849 849 else:
850 850 slowpath = False
851 851
852 852 return match, pats, slowpath
853 853
854 854
855 855 def _fileancestors(repo, revs, match, followfirst):
856 856 fctxs = []
857 857 for r in revs:
858 858 ctx = repo[r]
859 859 fctxs.extend(ctx[f].introfilectx() for f in ctx.walk(match))
860 860
861 861 # When displaying a revision with --patch --follow FILE, we have
862 862 # to know which file of the revision must be diffed. With
863 863 # --follow, we want the names of the ancestors of FILE in the
864 864 # revision, stored in "fcache". "fcache" is populated as a side effect
865 865 # of the graph traversal.
866 866 fcache = {}
867 867
868 868 def filematcher(ctx):
869 869 return scmutil.matchfiles(repo, fcache.get(scmutil.intrev(ctx), []))
870 870
871 871 def revgen():
872 872 for rev, cs in dagop.filectxancestors(fctxs, followfirst=followfirst):
873 873 fcache[rev] = [c.path() for c in cs]
874 874 yield rev
875 875
876 876 return smartset.generatorset(revgen(), iterasc=False), filematcher
877 877
878 878
879 879 def _makenofollowfilematcher(repo, pats, opts):
880 880 '''hook for extensions to override the filematcher for non-follow cases'''
881 881 return None
882 882
883 883
884 884 _opt2logrevset = {
885 885 b'no_merges': (b'not merge()', None),
886 886 b'only_merges': (b'merge()', None),
887 887 b'_matchfiles': (None, b'_matchfiles(%ps)'),
888 888 b'date': (b'date(%s)', None),
889 889 b'branch': (b'branch(%s)', b'%lr'),
890 890 b'_patslog': (b'filelog(%s)', b'%lr'),
891 891 b'keyword': (b'keyword(%s)', b'%lr'),
892 892 b'prune': (b'ancestors(%s)', b'not %lr'),
893 893 b'user': (b'user(%s)', b'%lr'),
894 894 }
895 895
896 896
897 897 def _makerevset(repo, wopts, slowpath):
898 898 """Return a revset string built from log options and file patterns"""
899 899 opts = {
900 900 b'branch': [repo.lookupbranch(b) for b in wopts.branches],
901 901 b'date': wopts.date,
902 902 b'keyword': wopts.keywords,
903 903 b'no_merges': wopts.no_merges,
904 904 b'only_merges': wopts.only_merges,
905 905 b'prune': wopts.prune_ancestors,
906 906 b'user': wopts.users,
907 907 }
908 908
909 909 if wopts.filter_revisions_by_pats and slowpath:
910 # See walkchangerevs() slow path.
911 #
912 910 # pats/include/exclude cannot be represented as separate
913 911 # revset expressions as their filtering logic applies at file
914 912 # level. For instance "-I a -X b" matches a revision touching
915 913 # "a" and "b" while "file(a) and not file(b)" does
916 914 # not. Besides, filesets are evaluated against the working
917 915 # directory.
918 916 matchargs = [b'r:', b'd:relpath']
919 917 for p in wopts.pats:
920 918 matchargs.append(b'p:' + p)
921 919 for p in wopts.include_pats:
922 920 matchargs.append(b'i:' + p)
923 921 for p in wopts.exclude_pats:
924 922 matchargs.append(b'x:' + p)
925 923 opts[b'_matchfiles'] = matchargs
926 924 elif wopts.filter_revisions_by_pats and not wopts.follow:
927 925 opts[b'_patslog'] = list(wopts.pats)
928 926
929 927 expr = []
930 928 for op, val in sorted(pycompat.iteritems(opts)):
931 929 if not val:
932 930 continue
933 931 revop, listop = _opt2logrevset[op]
934 932 if revop and b'%' not in revop:
935 933 expr.append(revop)
936 934 elif not listop:
937 935 expr.append(revsetlang.formatspec(revop, val))
938 936 else:
939 937 if revop:
940 938 val = [revsetlang.formatspec(revop, v) for v in val]
941 939 expr.append(revsetlang.formatspec(listop, val))
942 940
943 941 if expr:
944 942 expr = b'(' + b' and '.join(expr) + b')'
945 943 else:
946 944 expr = None
947 945 return expr
948 946
949 947
950 948 def _initialrevs(repo, wopts):
951 949 """Return the initial set of revisions to be filtered or followed"""
952 950 if wopts.revspec:
953 951 revs = scmutil.revrange(repo, wopts.revspec)
954 952 elif wopts.follow and repo.dirstate.p1() == nullid:
955 953 revs = smartset.baseset()
956 954 elif wopts.follow:
957 955 revs = repo.revs(b'.')
958 956 else:
959 957 revs = smartset.spanset(repo)
960 958 revs.reverse()
961 959 return revs
962 960
963 961
964 962 def makewalker(repo, wopts):
965 963 # type: (Any, walkopts) -> Tuple[smartset.abstractsmartset, Optional[Callable[[Any], matchmod.basematcher]]]
966 964 """Build (revs, makefilematcher) to scan revision/file history
967 965
968 966 - revs is the smartset to be traversed.
969 967 - makefilematcher is a function to map ctx to a matcher for that revision
970 968 """
971 969 revs = _initialrevs(repo, wopts)
972 970 if not revs:
973 971 return smartset.baseset(), None
974 972 # TODO: might want to merge slowpath with wopts.force_changelog_traversal
975 973 match, pats, slowpath = _makematcher(repo, revs, wopts)
976 974 wopts = attr.evolve(wopts, pats=pats)
977 975
978 976 filematcher = None
979 977 if wopts.follow:
980 978 if slowpath or match.always():
981 979 revs = dagop.revancestors(repo, revs, followfirst=wopts.follow == 1)
982 980 else:
983 981 assert not wopts.force_changelog_traversal
984 982 revs, filematcher = _fileancestors(
985 983 repo, revs, match, followfirst=wopts.follow == 1
986 984 )
987 985 revs.reverse()
988 986 if filematcher is None:
989 987 filematcher = _makenofollowfilematcher(repo, wopts.pats, wopts.opts)
990 988 if filematcher is None:
991 989
992 990 def filematcher(ctx):
993 991 return match
994 992
995 993 expr = _makerevset(repo, wopts, slowpath)
996 994 if wopts.sort_revisions:
997 995 assert wopts.sort_revisions in {b'topo', b'desc'}
998 996 if wopts.sort_revisions == b'topo':
999 997 if not revs.istopo():
1000 998 revs = dagop.toposort(revs, repo.changelog.parentrevs)
1001 999 # TODO: try to iterate the set lazily
1002 1000 revs = revset.baseset(list(revs), istopo=True)
1003 1001 elif not (revs.isdescending() or revs.istopo()):
1004 1002 # User-specified revs might be unsorted
1005 1003 revs.sort(reverse=True)
1006 1004 if expr:
1007 1005 matcher = revset.match(None, expr)
1008 1006 revs = matcher(repo, revs)
1009 1007 if wopts.limit is not None:
1010 1008 revs = revs.slice(0, wopts.limit)
1011 1009
1012 1010 return revs, filematcher
1013 1011
1014 1012
1015 1013 def getrevs(repo, wopts):
1016 1014 # type: (Any, walkopts) -> Tuple[smartset.abstractsmartset, Optional[changesetdiffer]]
1017 1015 """Return (revs, differ) where revs is a smartset
1018 1016
1019 1017 differ is a changesetdiffer with pre-configured file matcher.
1020 1018 """
1021 1019 revs, filematcher = makewalker(repo, wopts)
1022 1020 if not revs:
1023 1021 return revs, None
1024 1022 differ = changesetdiffer()
1025 1023 differ._makefilematcher = filematcher
1026 1024 return revs, differ
1027 1025
1028 1026
1029 1027 def _parselinerangeopt(repo, opts):
1030 1028 """Parse --line-range log option and return a list of tuples (filename,
1031 1029 (fromline, toline)).
1032 1030 """
1033 1031 linerangebyfname = []
1034 1032 for pat in opts.get(b'line_range', []):
1035 1033 try:
1036 1034 pat, linerange = pat.rsplit(b',', 1)
1037 1035 except ValueError:
1038 1036 raise error.Abort(_(b'malformatted line-range pattern %s') % pat)
1039 1037 try:
1040 1038 fromline, toline = map(int, linerange.split(b':'))
1041 1039 except ValueError:
1042 1040 raise error.Abort(_(b"invalid line range for %s") % pat)
1043 1041 msg = _(b"line range pattern '%s' must match exactly one file") % pat
1044 1042 fname = scmutil.parsefollowlinespattern(repo, None, pat, msg)
1045 1043 linerangebyfname.append(
1046 1044 (fname, util.processlinerange(fromline, toline))
1047 1045 )
1048 1046 return linerangebyfname
1049 1047
1050 1048
1051 1049 def getlinerangerevs(repo, userrevs, opts):
1052 1050 """Return (revs, differ).
1053 1051
1054 1052 "revs" are revisions obtained by processing "line-range" log options and
1055 1053 walking block ancestors of each specified file/line-range.
1056 1054
1057 1055 "differ" is a changesetdiffer with pre-configured file matcher and hunks
1058 1056 filter.
1059 1057 """
1060 1058 wctx = repo[None]
1061 1059
1062 1060 # Two-levels map of "rev -> file ctx -> [line range]".
1063 1061 linerangesbyrev = {}
1064 1062 for fname, (fromline, toline) in _parselinerangeopt(repo, opts):
1065 1063 if fname not in wctx:
1066 1064 raise error.Abort(
1067 1065 _(b'cannot follow file not in parent revision: "%s"') % fname
1068 1066 )
1069 1067 fctx = wctx.filectx(fname)
1070 1068 for fctx, linerange in dagop.blockancestors(fctx, fromline, toline):
1071 1069 rev = fctx.introrev()
1072 1070 if rev is None:
1073 1071 rev = wdirrev
1074 1072 if rev not in userrevs:
1075 1073 continue
1076 1074 linerangesbyrev.setdefault(rev, {}).setdefault(
1077 1075 fctx.path(), []
1078 1076 ).append(linerange)
1079 1077
1080 1078 def nofilterhunksfn(fctx, hunks):
1081 1079 return hunks
1082 1080
1083 1081 def hunksfilter(ctx):
1084 1082 fctxlineranges = linerangesbyrev.get(scmutil.intrev(ctx))
1085 1083 if fctxlineranges is None:
1086 1084 return nofilterhunksfn
1087 1085
1088 1086 def filterfn(fctx, hunks):
1089 1087 lineranges = fctxlineranges.get(fctx.path())
1090 1088 if lineranges is not None:
1091 1089 for hr, lines in hunks:
1092 1090 if hr is None: # binary
1093 1091 yield hr, lines
1094 1092 continue
1095 1093 if any(mdiff.hunkinrange(hr[2:], lr) for lr in lineranges):
1096 1094 yield hr, lines
1097 1095 else:
1098 1096 for hunk in hunks:
1099 1097 yield hunk
1100 1098
1101 1099 return filterfn
1102 1100
1103 1101 def filematcher(ctx):
1104 1102 files = list(linerangesbyrev.get(scmutil.intrev(ctx), []))
1105 1103 return scmutil.matchfiles(repo, files)
1106 1104
1107 1105 revs = sorted(linerangesbyrev, reverse=True)
1108 1106
1109 1107 differ = changesetdiffer()
1110 1108 differ._makefilematcher = filematcher
1111 1109 differ._makehunksfilter = hunksfilter
1112 1110 return smartset.baseset(revs), differ
1113 1111
1114 1112
1115 1113 def _graphnodeformatter(ui, displayer):
1116 1114 spec = ui.config(b'ui', b'graphnodetemplate')
1117 1115 if not spec:
1118 1116 return templatekw.getgraphnode # fast path for "{graphnode}"
1119 1117
1120 1118 spec = templater.unquotestring(spec)
1121 1119 if isinstance(displayer, changesettemplater):
1122 1120 # reuse cache of slow templates
1123 1121 tres = displayer._tresources
1124 1122 else:
1125 1123 tres = formatter.templateresources(ui)
1126 1124 templ = formatter.maketemplater(
1127 1125 ui, spec, defaults=templatekw.keywords, resources=tres
1128 1126 )
1129 1127
1130 1128 def formatnode(repo, ctx, cache):
1131 1129 props = {b'ctx': ctx, b'repo': repo}
1132 1130 return templ.renderdefault(props)
1133 1131
1134 1132 return formatnode
1135 1133
1136 1134
1137 1135 def displaygraph(ui, repo, dag, displayer, edgefn, getcopies=None, props=None):
1138 1136 props = props or {}
1139 1137 formatnode = _graphnodeformatter(ui, displayer)
1140 1138 state = graphmod.asciistate()
1141 1139 styles = state.styles
1142 1140
1143 1141 # only set graph styling if HGPLAIN is not set.
1144 1142 if ui.plain(b'graph'):
1145 1143 # set all edge styles to |, the default pre-3.8 behaviour
1146 1144 styles.update(dict.fromkeys(styles, b'|'))
1147 1145 else:
1148 1146 edgetypes = {
1149 1147 b'parent': graphmod.PARENT,
1150 1148 b'grandparent': graphmod.GRANDPARENT,
1151 1149 b'missing': graphmod.MISSINGPARENT,
1152 1150 }
1153 1151 for name, key in edgetypes.items():
1154 1152 # experimental config: experimental.graphstyle.*
1155 1153 styles[key] = ui.config(
1156 1154 b'experimental', b'graphstyle.%s' % name, styles[key]
1157 1155 )
1158 1156 if not styles[key]:
1159 1157 styles[key] = None
1160 1158
1161 1159 # experimental config: experimental.graphshorten
1162 1160 state.graphshorten = ui.configbool(b'experimental', b'graphshorten')
1163 1161
1164 1162 formatnode_cache = {}
1165 1163 for rev, type, ctx, parents in dag:
1166 1164 char = formatnode(repo, ctx, formatnode_cache)
1167 1165 copies = getcopies(ctx) if getcopies else None
1168 1166 edges = edgefn(type, char, state, rev, parents)
1169 1167 firstedge = next(edges)
1170 1168 width = firstedge[2]
1171 1169 displayer.show(
1172 1170 ctx, copies=copies, graphwidth=width, **pycompat.strkwargs(props)
1173 1171 )
1174 1172 lines = displayer.hunk.pop(rev).split(b'\n')
1175 1173 if not lines[-1]:
1176 1174 del lines[-1]
1177 1175 displayer.flush(ctx)
1178 1176 for type, char, width, coldata in itertools.chain([firstedge], edges):
1179 1177 graphmod.ascii(ui, state, type, char, lines, coldata)
1180 1178 lines = []
1181 1179 displayer.close()
1182 1180
1183 1181
1184 1182 def displaygraphrevs(ui, repo, revs, displayer, getrenamed):
1185 1183 revdag = graphmod.dagwalker(repo, revs)
1186 1184 displaygraph(ui, repo, revdag, displayer, graphmod.asciiedges, getrenamed)
1187 1185
1188 1186
1189 1187 def displayrevs(ui, repo, revs, displayer, getcopies):
1190 1188 for rev in revs:
1191 1189 ctx = repo[rev]
1192 1190 copies = getcopies(ctx) if getcopies else None
1193 1191 displayer.show(ctx, copies=copies)
1194 1192 displayer.flush(ctx)
1195 1193 displayer.close()
1196 1194
1197 1195
1198 1196 def checkunsupportedgraphflags(pats, opts):
1199 1197 for op in [b"newest_first"]:
1200 1198 if op in opts and opts[op]:
1201 1199 raise error.Abort(
1202 1200 _(b"-G/--graph option is incompatible with --%s")
1203 1201 % op.replace(b"_", b"-")
1204 1202 )
1205 1203
1206 1204
1207 1205 def graphrevs(repo, nodes, opts):
1208 1206 limit = getlimit(opts)
1209 1207 nodes.reverse()
1210 1208 if limit is not None:
1211 1209 nodes = nodes[:limit]
1212 1210 return graphmod.nodes(repo, nodes)
General Comments 0
You need to be logged in to leave comments. Login now