##// END OF EJS Templates
merge: pass commitinfo to applyupdates() and get it stored in mergestate...
Pulkit Goyal -
r45833:cb6a72dc default
parent child Browse files
Show More
@@ -1,1285 +1,1283 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial.pycompat import open
136 136 from mercurial import (
137 137 changegroup,
138 138 changelog,
139 139 cmdutil,
140 140 commands,
141 141 configitems,
142 142 context,
143 143 copies,
144 144 debugcommands as hgdebugcommands,
145 145 dispatch,
146 146 error,
147 147 exchange,
148 148 extensions,
149 149 hg,
150 150 localrepo,
151 151 match as matchmod,
152 152 merge,
153 153 node as nodemod,
154 154 patch,
155 155 pycompat,
156 156 registrar,
157 157 repair,
158 158 repoview,
159 159 revset,
160 160 scmutil,
161 161 smartset,
162 162 streamclone,
163 163 util,
164 164 )
165 165 from . import (
166 166 constants,
167 167 debugcommands,
168 168 fileserverclient,
169 169 remotefilectx,
170 170 remotefilelog,
171 171 remotefilelogserver,
172 172 repack as repackmod,
173 173 shallowbundle,
174 174 shallowrepo,
175 175 shallowstore,
176 176 shallowutil,
177 177 shallowverifier,
178 178 )
179 179
180 180 # ensures debug commands are registered
181 181 hgdebugcommands.command
182 182
183 183 cmdtable = {}
184 184 command = registrar.command(cmdtable)
185 185
186 186 configtable = {}
187 187 configitem = registrar.configitem(configtable)
188 188
189 189 configitem(b'remotefilelog', b'debug', default=False)
190 190
191 191 configitem(b'remotefilelog', b'reponame', default=b'')
192 192 configitem(b'remotefilelog', b'cachepath', default=None)
193 193 configitem(b'remotefilelog', b'cachegroup', default=None)
194 194 configitem(b'remotefilelog', b'cacheprocess', default=None)
195 195 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
196 196 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
197 197
198 198 configitem(
199 199 b'remotefilelog',
200 200 b'fallbackpath',
201 201 default=configitems.dynamicdefault,
202 202 alias=[(b'remotefilelog', b'fallbackrepo')],
203 203 )
204 204
205 205 configitem(b'remotefilelog', b'validatecachelog', default=None)
206 206 configitem(b'remotefilelog', b'validatecache', default=b'on')
207 207 configitem(b'remotefilelog', b'server', default=None)
208 208 configitem(b'remotefilelog', b'servercachepath', default=None)
209 209 configitem(b"remotefilelog", b"serverexpiration", default=30)
210 210 configitem(b'remotefilelog', b'backgroundrepack', default=False)
211 211 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
212 212 configitem(b'remotefilelog', b'pullprefetch', default=None)
213 213 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
214 214 configitem(b'remotefilelog', b'prefetchdelay', default=120)
215 215 configitem(b'remotefilelog', b'prefetchdays', default=14)
216 216
217 217 configitem(b'remotefilelog', b'getfilesstep', default=10000)
218 218 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
219 219 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
220 220 configitem(b'remotefilelog', b'fetchwarning', default=b'')
221 221
222 222 configitem(b'remotefilelog', b'includepattern', default=None)
223 223 configitem(b'remotefilelog', b'excludepattern', default=None)
224 224
225 225 configitem(b'remotefilelog', b'gcrepack', default=False)
226 226 configitem(b'remotefilelog', b'repackonhggc', default=False)
227 227 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
228 228
229 229 configitem(b'packs', b'maxpacksize', default=0)
230 230 configitem(b'packs', b'maxchainlen', default=1000)
231 231
232 232 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
233 233
234 234 # default TTL limit is 30 days
235 235 _defaultlimit = 60 * 60 * 24 * 30
236 236 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
237 237
238 238 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
239 239 configitem(
240 240 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
241 241 )
242 242 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
243 243 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
244 244 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
245 245
246 246 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
247 247 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
248 248 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
249 249 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
250 250 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
251 251
252 252 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
253 253 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
254 254 # be specifying the version(s) of Mercurial they are tested with, or
255 255 # leave the attribute unspecified.
256 256 testedwith = b'ships-with-hg-core'
257 257
258 258 repoclass = localrepo.localrepository
259 259 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
260 260
261 261 isenabled = shallowutil.isenabled
262 262
263 263
264 264 def uisetup(ui):
265 265 """Wraps user facing Mercurial commands to swap them out with shallow
266 266 versions.
267 267 """
268 268 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
269 269
270 270 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
271 271 entry[1].append(
272 272 (
273 273 b'',
274 274 b'shallow',
275 275 None,
276 276 _(b"create a shallow clone which uses remote file history"),
277 277 )
278 278 )
279 279
280 280 extensions.wrapcommand(
281 281 commands.table, b'debugindex', debugcommands.debugindex
282 282 )
283 283 extensions.wrapcommand(
284 284 commands.table, b'debugindexdot', debugcommands.debugindexdot
285 285 )
286 286 extensions.wrapcommand(commands.table, b'log', log)
287 287 extensions.wrapcommand(commands.table, b'pull', pull)
288 288
289 289 # Prevent 'hg manifest --all'
290 290 def _manifest(orig, ui, repo, *args, **opts):
291 291 if isenabled(repo) and opts.get('all'):
292 292 raise error.Abort(_(b"--all is not supported in a shallow repo"))
293 293
294 294 return orig(ui, repo, *args, **opts)
295 295
296 296 extensions.wrapcommand(commands.table, b"manifest", _manifest)
297 297
298 298 # Wrap remotefilelog with lfs code
299 299 def _lfsloaded(loaded=False):
300 300 lfsmod = None
301 301 try:
302 302 lfsmod = extensions.find(b'lfs')
303 303 except KeyError:
304 304 pass
305 305 if lfsmod:
306 306 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
307 307 fileserverclient._lfsmod = lfsmod
308 308
309 309 extensions.afterloaded(b'lfs', _lfsloaded)
310 310
311 311 # debugdata needs remotefilelog.len to work
312 312 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
313 313
314 314 changegroup.cgpacker = shallowbundle.shallowcg1packer
315 315
316 316 extensions.wrapfunction(
317 317 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
318 318 )
319 319 extensions.wrapfunction(
320 320 changegroup, b'makechangegroup', shallowbundle.makechangegroup
321 321 )
322 322 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
323 323 extensions.wrapfunction(exchange, b'pull', exchangepull)
324 324 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
325 325 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
326 326 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
327 327 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
328 328 extensions.wrapfunction(
329 329 copies, b'_computeforwardmissing', computeforwardmissing
330 330 )
331 331 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
332 332 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
333 333 extensions.wrapfunction(context.changectx, b'filectx', filectx)
334 334 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
335 335 extensions.wrapfunction(patch, b'trydiff', trydiff)
336 336 extensions.wrapfunction(hg, b'verify', _verify)
337 337 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
338 338
339 339 # disappointing hacks below
340 340 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
341 341 extensions.wrapfunction(revset, b'filelog', filelogrevset)
342 342 revset.symbols[b'filelog'] = revset.filelog
343 343 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
344 344
345 345
346 346 def cloneshallow(orig, ui, repo, *args, **opts):
347 347 if opts.get('shallow'):
348 348 repos = []
349 349
350 350 def pull_shallow(orig, self, *args, **kwargs):
351 351 if not isenabled(self):
352 352 repos.append(self.unfiltered())
353 353 # set up the client hooks so the post-clone update works
354 354 setupclient(self.ui, self.unfiltered())
355 355
356 356 # setupclient fixed the class on the repo itself
357 357 # but we also need to fix it on the repoview
358 358 if isinstance(self, repoview.repoview):
359 359 self.__class__.__bases__ = (
360 360 self.__class__.__bases__[0],
361 361 self.unfiltered().__class__,
362 362 )
363 363 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
364 364 scmutil.writereporequirements(self)
365 365
366 366 # Since setupclient hadn't been called, exchange.pull was not
367 367 # wrapped. So we need to manually invoke our version of it.
368 368 return exchangepull(orig, self, *args, **kwargs)
369 369 else:
370 370 return orig(self, *args, **kwargs)
371 371
372 372 extensions.wrapfunction(exchange, b'pull', pull_shallow)
373 373
374 374 # Wrap the stream logic to add requirements and to pass include/exclude
375 375 # patterns around.
376 376 def setup_streamout(repo, remote):
377 377 # Replace remote.stream_out with a version that sends file
378 378 # patterns.
379 379 def stream_out_shallow(orig):
380 380 caps = remote.capabilities()
381 381 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
382 382 opts = {}
383 383 if repo.includepattern:
384 384 opts['includepattern'] = b'\0'.join(repo.includepattern)
385 385 if repo.excludepattern:
386 386 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
387 387 return remote._callstream(b'stream_out_shallow', **opts)
388 388 else:
389 389 return orig()
390 390
391 391 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
392 392
393 393 def stream_wrap(orig, op):
394 394 setup_streamout(op.repo, op.remote)
395 395 return orig(op)
396 396
397 397 extensions.wrapfunction(
398 398 streamclone, b'maybeperformlegacystreamclone', stream_wrap
399 399 )
400 400
401 401 def canperformstreamclone(orig, pullop, bundle2=False):
402 402 # remotefilelog is currently incompatible with the
403 403 # bundle2 flavor of streamclones, so force us to use
404 404 # v1 instead.
405 405 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
406 406 pullop.remotebundle2caps[b'stream'] = [
407 407 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
408 408 ]
409 409 if bundle2:
410 410 return False, None
411 411 supported, requirements = orig(pullop, bundle2=bundle2)
412 412 if requirements is not None:
413 413 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
414 414 return supported, requirements
415 415
416 416 extensions.wrapfunction(
417 417 streamclone, b'canperformstreamclone', canperformstreamclone
418 418 )
419 419
420 420 try:
421 421 orig(ui, repo, *args, **opts)
422 422 finally:
423 423 if opts.get('shallow'):
424 424 for r in repos:
425 425 if util.safehasattr(r, b'fileservice'):
426 426 r.fileservice.close()
427 427
428 428
429 429 def debugdatashallow(orig, *args, **kwds):
430 430 oldlen = remotefilelog.remotefilelog.__len__
431 431 try:
432 432 remotefilelog.remotefilelog.__len__ = lambda x: 1
433 433 return orig(*args, **kwds)
434 434 finally:
435 435 remotefilelog.remotefilelog.__len__ = oldlen
436 436
437 437
438 438 def reposetup(ui, repo):
439 439 if not repo.local():
440 440 return
441 441
442 442 # put here intentionally bc doesnt work in uisetup
443 443 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
444 444 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
445 445
446 446 isserverenabled = ui.configbool(b'remotefilelog', b'server')
447 447 isshallowclient = isenabled(repo)
448 448
449 449 if isserverenabled and isshallowclient:
450 450 raise RuntimeError(b"Cannot be both a server and shallow client.")
451 451
452 452 if isshallowclient:
453 453 setupclient(ui, repo)
454 454
455 455 if isserverenabled:
456 456 remotefilelogserver.setupserver(ui, repo)
457 457
458 458
459 459 def setupclient(ui, repo):
460 460 if not isinstance(repo, localrepo.localrepository):
461 461 return
462 462
463 463 # Even clients get the server setup since they need to have the
464 464 # wireprotocol endpoints registered.
465 465 remotefilelogserver.onetimesetup(ui)
466 466 onetimeclientsetup(ui)
467 467
468 468 shallowrepo.wraprepo(repo)
469 469 repo.store = shallowstore.wrapstore(repo.store)
470 470
471 471
472 472 def storewrapper(orig, requirements, path, vfstype):
473 473 s = orig(requirements, path, vfstype)
474 474 if constants.SHALLOWREPO_REQUIREMENT in requirements:
475 475 s = shallowstore.wrapstore(s)
476 476
477 477 return s
478 478
479 479
480 480 # prefetch files before update
481 481 def applyupdates(
482 orig, repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
482 orig, repo, actions, wctx, mctx, overwrite, wantfiledata, **opts
483 483 ):
484 484 if isenabled(repo):
485 485 manifest = mctx.manifest()
486 486 files = []
487 487 for f, args, msg in actions[b'g']:
488 488 files.append((f, hex(manifest[f])))
489 489 # batch fetch the needed files from the server
490 490 repo.fileservice.prefetch(files)
491 return orig(
492 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=labels
493 )
491 return orig(repo, actions, wctx, mctx, overwrite, wantfiledata, **opts)
494 492
495 493
496 494 # Prefetch merge checkunknownfiles
497 495 def checkunknownfiles(orig, repo, wctx, mctx, force, actions, *args, **kwargs):
498 496 if isenabled(repo):
499 497 files = []
500 498 sparsematch = repo.maybesparsematch(mctx.rev())
501 499 for f, (m, actionargs, msg) in pycompat.iteritems(actions):
502 500 if sparsematch and not sparsematch(f):
503 501 continue
504 502 if m in (b'c', b'dc', b'cm'):
505 503 files.append((f, hex(mctx.filenode(f))))
506 504 elif m == b'dg':
507 505 f2 = actionargs[0]
508 506 files.append((f2, hex(mctx.filenode(f2))))
509 507 # batch fetch the needed files from the server
510 508 repo.fileservice.prefetch(files)
511 509 return orig(repo, wctx, mctx, force, actions, *args, **kwargs)
512 510
513 511
514 512 # Prefetch files before status attempts to look at their size and contents
515 513 def checklookup(orig, self, files):
516 514 repo = self._repo
517 515 if isenabled(repo):
518 516 prefetchfiles = []
519 517 for parent in self._parents:
520 518 for f in files:
521 519 if f in parent:
522 520 prefetchfiles.append((f, hex(parent.filenode(f))))
523 521 # batch fetch the needed files from the server
524 522 repo.fileservice.prefetch(prefetchfiles)
525 523 return orig(self, files)
526 524
527 525
528 526 # Prefetch the logic that compares added and removed files for renames
529 527 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
530 528 if isenabled(repo):
531 529 files = []
532 530 pmf = repo[b'.'].manifest()
533 531 for f in removed:
534 532 if f in pmf:
535 533 files.append((f, hex(pmf[f])))
536 534 # batch fetch the needed files from the server
537 535 repo.fileservice.prefetch(files)
538 536 return orig(repo, matcher, added, removed, *args, **kwargs)
539 537
540 538
541 539 # prefetch files before pathcopies check
542 540 def computeforwardmissing(orig, a, b, match=None):
543 541 missing = orig(a, b, match=match)
544 542 repo = a._repo
545 543 if isenabled(repo):
546 544 mb = b.manifest()
547 545
548 546 files = []
549 547 sparsematch = repo.maybesparsematch(b.rev())
550 548 if sparsematch:
551 549 sparsemissing = set()
552 550 for f in missing:
553 551 if sparsematch(f):
554 552 files.append((f, hex(mb[f])))
555 553 sparsemissing.add(f)
556 554 missing = sparsemissing
557 555
558 556 # batch fetch the needed files from the server
559 557 repo.fileservice.prefetch(files)
560 558 return missing
561 559
562 560
563 561 # close cache miss server connection after the command has finished
564 562 def runcommand(orig, lui, repo, *args, **kwargs):
565 563 fileservice = None
566 564 # repo can be None when running in chg:
567 565 # - at startup, reposetup was called because serve is not norepo
568 566 # - a norepo command like "help" is called
569 567 if repo and isenabled(repo):
570 568 fileservice = repo.fileservice
571 569 try:
572 570 return orig(lui, repo, *args, **kwargs)
573 571 finally:
574 572 if fileservice:
575 573 fileservice.close()
576 574
577 575
578 576 # prevent strip from stripping remotefilelogs
579 577 def _collectbrokencsets(orig, repo, files, striprev):
580 578 if isenabled(repo):
581 579 files = list([f for f in files if not repo.shallowmatch(f)])
582 580 return orig(repo, files, striprev)
583 581
584 582
585 583 # changectx wrappers
586 584 def filectx(orig, self, path, fileid=None, filelog=None):
587 585 if fileid is None:
588 586 fileid = self.filenode(path)
589 587 if isenabled(self._repo) and self._repo.shallowmatch(path):
590 588 return remotefilectx.remotefilectx(
591 589 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
592 590 )
593 591 return orig(self, path, fileid=fileid, filelog=filelog)
594 592
595 593
596 594 def workingfilectx(orig, self, path, filelog=None):
597 595 if isenabled(self._repo) and self._repo.shallowmatch(path):
598 596 return remotefilectx.remoteworkingfilectx(
599 597 self._repo, path, workingctx=self, filelog=filelog
600 598 )
601 599 return orig(self, path, filelog=filelog)
602 600
603 601
604 602 # prefetch required revisions before a diff
605 603 def trydiff(
606 604 orig,
607 605 repo,
608 606 revs,
609 607 ctx1,
610 608 ctx2,
611 609 modified,
612 610 added,
613 611 removed,
614 612 copy,
615 613 getfilectx,
616 614 *args,
617 615 **kwargs
618 616 ):
619 617 if isenabled(repo):
620 618 prefetch = []
621 619 mf1 = ctx1.manifest()
622 620 for fname in modified + added + removed:
623 621 if fname in mf1:
624 622 fnode = getfilectx(fname, ctx1).filenode()
625 623 # fnode can be None if it's a edited working ctx file
626 624 if fnode:
627 625 prefetch.append((fname, hex(fnode)))
628 626 if fname not in removed:
629 627 fnode = getfilectx(fname, ctx2).filenode()
630 628 if fnode:
631 629 prefetch.append((fname, hex(fnode)))
632 630
633 631 repo.fileservice.prefetch(prefetch)
634 632
635 633 return orig(
636 634 repo,
637 635 revs,
638 636 ctx1,
639 637 ctx2,
640 638 modified,
641 639 added,
642 640 removed,
643 641 copy,
644 642 getfilectx,
645 643 *args,
646 644 **kwargs
647 645 )
648 646
649 647
650 648 # Prevent verify from processing files
651 649 # a stub for mercurial.hg.verify()
652 650 def _verify(orig, repo, level=None):
653 651 lock = repo.lock()
654 652 try:
655 653 return shallowverifier.shallowverifier(repo).verify()
656 654 finally:
657 655 lock.release()
658 656
659 657
660 658 clientonetime = False
661 659
662 660
663 661 def onetimeclientsetup(ui):
664 662 global clientonetime
665 663 if clientonetime:
666 664 return
667 665 clientonetime = True
668 666
669 667 # Don't commit filelogs until we know the commit hash, since the hash
670 668 # is present in the filelog blob.
671 669 # This violates Mercurial's filelog->manifest->changelog write order,
672 670 # but is generally fine for client repos.
673 671 pendingfilecommits = []
674 672
675 673 def addrawrevision(
676 674 orig,
677 675 self,
678 676 rawtext,
679 677 transaction,
680 678 link,
681 679 p1,
682 680 p2,
683 681 node,
684 682 flags,
685 683 cachedelta=None,
686 684 _metatuple=None,
687 685 ):
688 686 if isinstance(link, int):
689 687 pendingfilecommits.append(
690 688 (
691 689 self,
692 690 rawtext,
693 691 transaction,
694 692 link,
695 693 p1,
696 694 p2,
697 695 node,
698 696 flags,
699 697 cachedelta,
700 698 _metatuple,
701 699 )
702 700 )
703 701 return node
704 702 else:
705 703 return orig(
706 704 self,
707 705 rawtext,
708 706 transaction,
709 707 link,
710 708 p1,
711 709 p2,
712 710 node,
713 711 flags,
714 712 cachedelta,
715 713 _metatuple=_metatuple,
716 714 )
717 715
718 716 extensions.wrapfunction(
719 717 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
720 718 )
721 719
722 720 def changelogadd(orig, self, *args, **kwargs):
723 721 oldlen = len(self)
724 722 node = orig(self, *args, **kwargs)
725 723 newlen = len(self)
726 724 if oldlen != newlen:
727 725 for oldargs in pendingfilecommits:
728 726 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
729 727 linknode = self.node(link)
730 728 if linknode == node:
731 729 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
732 730 else:
733 731 raise error.ProgrammingError(
734 732 b'pending multiple integer revisions are not supported'
735 733 )
736 734 else:
737 735 # "link" is actually wrong here (it is set to len(changelog))
738 736 # if changelog remains unchanged, skip writing file revisions
739 737 # but still do a sanity check about pending multiple revisions
740 738 if len({x[3] for x in pendingfilecommits}) > 1:
741 739 raise error.ProgrammingError(
742 740 b'pending multiple integer revisions are not supported'
743 741 )
744 742 del pendingfilecommits[:]
745 743 return node
746 744
747 745 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
748 746
749 747
750 748 def getrenamedfn(orig, repo, endrev=None):
751 749 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
752 750 return orig(repo, endrev)
753 751
754 752 rcache = {}
755 753
756 754 def getrenamed(fn, rev):
757 755 '''looks up all renames for a file (up to endrev) the first
758 756 time the file is given. It indexes on the changerev and only
759 757 parses the manifest if linkrev != changerev.
760 758 Returns rename info for fn at changerev rev.'''
761 759 if rev in rcache.setdefault(fn, {}):
762 760 return rcache[fn][rev]
763 761
764 762 try:
765 763 fctx = repo[rev].filectx(fn)
766 764 for ancestor in fctx.ancestors():
767 765 if ancestor.path() == fn:
768 766 renamed = ancestor.renamed()
769 767 rcache[fn][ancestor.rev()] = renamed and renamed[0]
770 768
771 769 renamed = fctx.renamed()
772 770 return renamed and renamed[0]
773 771 except error.LookupError:
774 772 return None
775 773
776 774 return getrenamed
777 775
778 776
779 777 def walkfilerevs(orig, repo, match, follow, revs, fncache):
780 778 if not isenabled(repo):
781 779 return orig(repo, match, follow, revs, fncache)
782 780
783 781 # remotefilelog's can't be walked in rev order, so throw.
784 782 # The caller will see the exception and walk the commit tree instead.
785 783 if not follow:
786 784 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
787 785
788 786 wanted = set()
789 787 minrev, maxrev = min(revs), max(revs)
790 788
791 789 pctx = repo[b'.']
792 790 for filename in match.files():
793 791 if filename not in pctx:
794 792 raise error.Abort(
795 793 _(b'cannot follow file not in parent revision: "%s"') % filename
796 794 )
797 795 fctx = pctx[filename]
798 796
799 797 linkrev = fctx.linkrev()
800 798 if linkrev >= minrev and linkrev <= maxrev:
801 799 fncache.setdefault(linkrev, []).append(filename)
802 800 wanted.add(linkrev)
803 801
804 802 for ancestor in fctx.ancestors():
805 803 linkrev = ancestor.linkrev()
806 804 if linkrev >= minrev and linkrev <= maxrev:
807 805 fncache.setdefault(linkrev, []).append(ancestor.path())
808 806 wanted.add(linkrev)
809 807
810 808 return wanted
811 809
812 810
813 811 def filelogrevset(orig, repo, subset, x):
814 812 """``filelog(pattern)``
815 813 Changesets connected to the specified filelog.
816 814
817 815 For performance reasons, ``filelog()`` does not show every changeset
818 816 that affects the requested file(s). See :hg:`help log` for details. For
819 817 a slower, more accurate result, use ``file()``.
820 818 """
821 819
822 820 if not isenabled(repo):
823 821 return orig(repo, subset, x)
824 822
825 823 # i18n: "filelog" is a keyword
826 824 pat = revset.getstring(x, _(b"filelog requires a pattern"))
827 825 m = matchmod.match(
828 826 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
829 827 )
830 828 s = set()
831 829
832 830 if not matchmod.patkind(pat):
833 831 # slow
834 832 for r in subset:
835 833 ctx = repo[r]
836 834 cfiles = ctx.files()
837 835 for f in m.files():
838 836 if f in cfiles:
839 837 s.add(ctx.rev())
840 838 break
841 839 else:
842 840 # partial
843 841 files = (f for f in repo[None] if m(f))
844 842 for f in files:
845 843 fctx = repo[None].filectx(f)
846 844 s.add(fctx.linkrev())
847 845 for actx in fctx.ancestors():
848 846 s.add(actx.linkrev())
849 847
850 848 return smartset.baseset([r for r in subset if r in s])
851 849
852 850
853 851 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
854 852 def gc(ui, *args, **opts):
855 853 '''garbage collect the client and server filelog caches
856 854 '''
857 855 cachepaths = set()
858 856
859 857 # get the system client cache
860 858 systemcache = shallowutil.getcachepath(ui, allowempty=True)
861 859 if systemcache:
862 860 cachepaths.add(systemcache)
863 861
864 862 # get repo client and server cache
865 863 repopaths = []
866 864 pwd = ui.environ.get(b'PWD')
867 865 if pwd:
868 866 repopaths.append(pwd)
869 867
870 868 repopaths.extend(args)
871 869 repos = []
872 870 for repopath in repopaths:
873 871 try:
874 872 repo = hg.peer(ui, {}, repopath)
875 873 repos.append(repo)
876 874
877 875 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
878 876 if repocache:
879 877 cachepaths.add(repocache)
880 878 except error.RepoError:
881 879 pass
882 880
883 881 # gc client cache
884 882 for cachepath in cachepaths:
885 883 gcclient(ui, cachepath)
886 884
887 885 # gc server cache
888 886 for repo in repos:
889 887 remotefilelogserver.gcserver(ui, repo._repo)
890 888
891 889
892 890 def gcclient(ui, cachepath):
893 891 # get list of repos that use this cache
894 892 repospath = os.path.join(cachepath, b'repos')
895 893 if not os.path.exists(repospath):
896 894 ui.warn(_(b"no known cache at %s\n") % cachepath)
897 895 return
898 896
899 897 reposfile = open(repospath, b'rb')
900 898 repos = {r[:-1] for r in reposfile.readlines()}
901 899 reposfile.close()
902 900
903 901 # build list of useful files
904 902 validrepos = []
905 903 keepkeys = set()
906 904
907 905 sharedcache = None
908 906 filesrepacked = False
909 907
910 908 count = 0
911 909 progress = ui.makeprogress(
912 910 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
913 911 )
914 912 for path in repos:
915 913 progress.update(count)
916 914 count += 1
917 915 try:
918 916 path = ui.expandpath(os.path.normpath(path))
919 917 except TypeError as e:
920 918 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
921 919 traceback.print_exc()
922 920 continue
923 921 try:
924 922 peer = hg.peer(ui, {}, path)
925 923 repo = peer._repo
926 924 except error.RepoError:
927 925 continue
928 926
929 927 validrepos.append(path)
930 928
931 929 # Protect against any repo or config changes that have happened since
932 930 # this repo was added to the repos file. We'd rather this loop succeed
933 931 # and too much be deleted, than the loop fail and nothing gets deleted.
934 932 if not isenabled(repo):
935 933 continue
936 934
937 935 if not util.safehasattr(repo, b'name'):
938 936 ui.warn(
939 937 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
940 938 )
941 939 continue
942 940
943 941 # If garbage collection on repack and repack on hg gc are enabled
944 942 # then loose files are repacked and garbage collected.
945 943 # Otherwise regular garbage collection is performed.
946 944 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
947 945 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
948 946 if repackonhggc and gcrepack:
949 947 try:
950 948 repackmod.incrementalrepack(repo)
951 949 filesrepacked = True
952 950 continue
953 951 except (IOError, repackmod.RepackAlreadyRunning):
954 952 # If repack cannot be performed due to not enough disk space
955 953 # continue doing garbage collection of loose files w/o repack
956 954 pass
957 955
958 956 reponame = repo.name
959 957 if not sharedcache:
960 958 sharedcache = repo.sharedstore
961 959
962 960 # Compute a keepset which is not garbage collected
963 961 def keyfn(fname, fnode):
964 962 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
965 963
966 964 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
967 965
968 966 progress.complete()
969 967
970 968 # write list of valid repos back
971 969 oldumask = os.umask(0o002)
972 970 try:
973 971 reposfile = open(repospath, b'wb')
974 972 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
975 973 reposfile.close()
976 974 finally:
977 975 os.umask(oldumask)
978 976
979 977 # prune cache
980 978 if sharedcache is not None:
981 979 sharedcache.gc(keepkeys)
982 980 elif not filesrepacked:
983 981 ui.warn(_(b"warning: no valid repos in repofile\n"))
984 982
985 983
986 984 def log(orig, ui, repo, *pats, **opts):
987 985 if not isenabled(repo):
988 986 return orig(ui, repo, *pats, **opts)
989 987
990 988 follow = opts.get('follow')
991 989 revs = opts.get('rev')
992 990 if pats:
993 991 # Force slowpath for non-follow patterns and follows that start from
994 992 # non-working-copy-parent revs.
995 993 if not follow or revs:
996 994 # This forces the slowpath
997 995 opts['removed'] = True
998 996
999 997 # If this is a non-follow log without any revs specified, recommend that
1000 998 # the user add -f to speed it up.
1001 999 if not follow and not revs:
1002 1000 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1003 1001 isfile = not match.anypats()
1004 1002 if isfile:
1005 1003 for file in match.files():
1006 1004 if not os.path.isfile(repo.wjoin(file)):
1007 1005 isfile = False
1008 1006 break
1009 1007
1010 1008 if isfile:
1011 1009 ui.warn(
1012 1010 _(
1013 1011 b"warning: file log can be slow on large repos - "
1014 1012 + b"use -f to speed it up\n"
1015 1013 )
1016 1014 )
1017 1015
1018 1016 return orig(ui, repo, *pats, **opts)
1019 1017
1020 1018
1021 1019 def revdatelimit(ui, revset):
1022 1020 """Update revset so that only changesets no older than 'prefetchdays' days
1023 1021 are included. The default value is set to 14 days. If 'prefetchdays' is set
1024 1022 to zero or negative value then date restriction is not applied.
1025 1023 """
1026 1024 days = ui.configint(b'remotefilelog', b'prefetchdays')
1027 1025 if days > 0:
1028 1026 revset = b'(%s) & date(-%s)' % (revset, days)
1029 1027 return revset
1030 1028
1031 1029
1032 1030 def readytofetch(repo):
1033 1031 """Check that enough time has passed since the last background prefetch.
1034 1032 This only relates to prefetches after operations that change the working
1035 1033 copy parent. Default delay between background prefetches is 2 minutes.
1036 1034 """
1037 1035 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1038 1036 fname = repo.vfs.join(b'lastprefetch')
1039 1037
1040 1038 ready = False
1041 1039 with open(fname, b'a'):
1042 1040 # the with construct above is used to avoid race conditions
1043 1041 modtime = os.path.getmtime(fname)
1044 1042 if (time.time() - modtime) > timeout:
1045 1043 os.utime(fname, None)
1046 1044 ready = True
1047 1045
1048 1046 return ready
1049 1047
1050 1048
1051 1049 def wcpprefetch(ui, repo, **kwargs):
1052 1050 """Prefetches in background revisions specified by bgprefetchrevs revset.
1053 1051 Does background repack if backgroundrepack flag is set in config.
1054 1052 """
1055 1053 shallow = isenabled(repo)
1056 1054 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1057 1055 isready = readytofetch(repo)
1058 1056
1059 1057 if not (shallow and bgprefetchrevs and isready):
1060 1058 return
1061 1059
1062 1060 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1063 1061 # update a revset with a date limit
1064 1062 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1065 1063
1066 1064 def anon(unused_success):
1067 1065 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1068 1066 return
1069 1067 repo.ranprefetch = True
1070 1068 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1071 1069
1072 1070 repo._afterlock(anon)
1073 1071
1074 1072
1075 1073 def pull(orig, ui, repo, *pats, **opts):
1076 1074 result = orig(ui, repo, *pats, **opts)
1077 1075
1078 1076 if isenabled(repo):
1079 1077 # prefetch if it's configured
1080 1078 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1081 1079 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1082 1080 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1083 1081
1084 1082 if prefetchrevset:
1085 1083 ui.status(_(b"prefetching file contents\n"))
1086 1084 revs = scmutil.revrange(repo, [prefetchrevset])
1087 1085 base = repo[b'.'].rev()
1088 1086 if bgprefetch:
1089 1087 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1090 1088 else:
1091 1089 repo.prefetch(revs, base=base)
1092 1090 if bgrepack:
1093 1091 repackmod.backgroundrepack(repo, incremental=True)
1094 1092 elif bgrepack:
1095 1093 repackmod.backgroundrepack(repo, incremental=True)
1096 1094
1097 1095 return result
1098 1096
1099 1097
1100 1098 def exchangepull(orig, repo, remote, *args, **kwargs):
1101 1099 # Hook into the callstream/getbundle to insert bundle capabilities
1102 1100 # during a pull.
1103 1101 def localgetbundle(
1104 1102 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1105 1103 ):
1106 1104 if not bundlecaps:
1107 1105 bundlecaps = set()
1108 1106 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1109 1107 return orig(
1110 1108 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1111 1109 )
1112 1110
1113 1111 if util.safehasattr(remote, b'_callstream'):
1114 1112 remote._localrepo = repo
1115 1113 elif util.safehasattr(remote, b'getbundle'):
1116 1114 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1117 1115
1118 1116 return orig(repo, remote, *args, **kwargs)
1119 1117
1120 1118
1121 1119 def _fileprefetchhook(repo, revmatches):
1122 1120 if isenabled(repo):
1123 1121 allfiles = []
1124 1122 for rev, match in revmatches:
1125 1123 if rev == nodemod.wdirrev or rev is None:
1126 1124 continue
1127 1125 ctx = repo[rev]
1128 1126 mf = ctx.manifest()
1129 1127 sparsematch = repo.maybesparsematch(ctx.rev())
1130 1128 for path in ctx.walk(match):
1131 1129 if (not sparsematch or sparsematch(path)) and path in mf:
1132 1130 allfiles.append((path, hex(mf[path])))
1133 1131 repo.fileservice.prefetch(allfiles)
1134 1132
1135 1133
1136 1134 @command(
1137 1135 b'debugremotefilelog',
1138 1136 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1139 1137 _(b'hg debugremotefilelog <path>'),
1140 1138 norepo=True,
1141 1139 )
1142 1140 def debugremotefilelog(ui, path, **opts):
1143 1141 return debugcommands.debugremotefilelog(ui, path, **opts)
1144 1142
1145 1143
1146 1144 @command(
1147 1145 b'verifyremotefilelog',
1148 1146 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1149 1147 _(b'hg verifyremotefilelogs <directory>'),
1150 1148 norepo=True,
1151 1149 )
1152 1150 def verifyremotefilelog(ui, path, **opts):
1153 1151 return debugcommands.verifyremotefilelog(ui, path, **opts)
1154 1152
1155 1153
1156 1154 @command(
1157 1155 b'debugdatapack',
1158 1156 [
1159 1157 (b'', b'long', None, _(b'print the long hashes')),
1160 1158 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1161 1159 ],
1162 1160 _(b'hg debugdatapack <paths>'),
1163 1161 norepo=True,
1164 1162 )
1165 1163 def debugdatapack(ui, *paths, **opts):
1166 1164 return debugcommands.debugdatapack(ui, *paths, **opts)
1167 1165
1168 1166
1169 1167 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1170 1168 def debughistorypack(ui, path, **opts):
1171 1169 return debugcommands.debughistorypack(ui, path)
1172 1170
1173 1171
1174 1172 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1175 1173 def debugkeepset(ui, repo, **opts):
1176 1174 # The command is used to measure keepset computation time
1177 1175 def keyfn(fname, fnode):
1178 1176 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1179 1177
1180 1178 repackmod.keepset(repo, keyfn)
1181 1179 return
1182 1180
1183 1181
1184 1182 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1185 1183 def debugwaitonrepack(ui, repo, **opts):
1186 1184 return debugcommands.debugwaitonrepack(repo)
1187 1185
1188 1186
1189 1187 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1190 1188 def debugwaitonprefetch(ui, repo, **opts):
1191 1189 return debugcommands.debugwaitonprefetch(repo)
1192 1190
1193 1191
1194 1192 def resolveprefetchopts(ui, opts):
1195 1193 if not opts.get(b'rev'):
1196 1194 revset = [b'.', b'draft()']
1197 1195
1198 1196 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1199 1197 if prefetchrevset:
1200 1198 revset.append(b'(%s)' % prefetchrevset)
1201 1199 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1202 1200 if bgprefetchrevs:
1203 1201 revset.append(b'(%s)' % bgprefetchrevs)
1204 1202 revset = b'+'.join(revset)
1205 1203
1206 1204 # update a revset with a date limit
1207 1205 revset = revdatelimit(ui, revset)
1208 1206
1209 1207 opts[b'rev'] = [revset]
1210 1208
1211 1209 if not opts.get(b'base'):
1212 1210 opts[b'base'] = None
1213 1211
1214 1212 return opts
1215 1213
1216 1214
1217 1215 @command(
1218 1216 b'prefetch',
1219 1217 [
1220 1218 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1221 1219 (b'', b'repack', False, _(b'run repack after prefetch')),
1222 1220 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1223 1221 ]
1224 1222 + commands.walkopts,
1225 1223 _(b'hg prefetch [OPTIONS] [FILE...]'),
1226 1224 helpcategory=command.CATEGORY_MAINTENANCE,
1227 1225 )
1228 1226 def prefetch(ui, repo, *pats, **opts):
1229 1227 """prefetch file revisions from the server
1230 1228
1231 1229 Prefetchs file revisions for the specified revs and stores them in the
1232 1230 local remotefilelog cache. If no rev is specified, the default rev is
1233 1231 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1234 1232 File names or patterns can be used to limit which files are downloaded.
1235 1233
1236 1234 Return 0 on success.
1237 1235 """
1238 1236 opts = pycompat.byteskwargs(opts)
1239 1237 if not isenabled(repo):
1240 1238 raise error.Abort(_(b"repo is not shallow"))
1241 1239
1242 1240 opts = resolveprefetchopts(ui, opts)
1243 1241 revs = scmutil.revrange(repo, opts.get(b'rev'))
1244 1242 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1245 1243
1246 1244 # Run repack in background
1247 1245 if opts.get(b'repack'):
1248 1246 repackmod.backgroundrepack(repo, incremental=True)
1249 1247
1250 1248
1251 1249 @command(
1252 1250 b'repack',
1253 1251 [
1254 1252 (b'', b'background', None, _(b'run in a background process'), None),
1255 1253 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1256 1254 (
1257 1255 b'',
1258 1256 b'packsonly',
1259 1257 None,
1260 1258 _(b'only repack packs (skip loose objects)'),
1261 1259 None,
1262 1260 ),
1263 1261 ],
1264 1262 _(b'hg repack [OPTIONS]'),
1265 1263 )
1266 1264 def repack_(ui, repo, *pats, **opts):
1267 1265 if opts.get('background'):
1268 1266 repackmod.backgroundrepack(
1269 1267 repo,
1270 1268 incremental=opts.get('incremental'),
1271 1269 packsonly=opts.get('packsonly', False),
1272 1270 )
1273 1271 return
1274 1272
1275 1273 options = {b'packsonly': opts.get('packsonly')}
1276 1274
1277 1275 try:
1278 1276 if opts.get('incremental'):
1279 1277 repackmod.incrementalrepack(repo, options=options)
1280 1278 else:
1281 1279 repackmod.fullrepack(repo, options=options)
1282 1280 except repackmod.RepackAlreadyRunning as ex:
1283 1281 # Don't propogate the exception if the repack is already in
1284 1282 # progress, since we want the command to exit 0.
1285 1283 repo.ui.warn(b'%s\n' % ex)
@@ -1,2159 +1,2184 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import stat
12 12 import struct
13 13
14 14 from .i18n import _
15 15 from .node import (
16 16 addednodeid,
17 17 modifiednodeid,
18 18 nullid,
19 19 nullrev,
20 20 )
21 21 from .thirdparty import attr
22 22 from . import (
23 23 copies,
24 24 encoding,
25 25 error,
26 26 filemerge,
27 27 match as matchmod,
28 28 mergestate as mergestatemod,
29 29 obsutil,
30 30 pathutil,
31 31 pycompat,
32 32 scmutil,
33 33 subrepoutil,
34 34 util,
35 35 worker,
36 36 )
37 37
38 38 _pack = struct.pack
39 39 _unpack = struct.unpack
40 40
41 41
42 42 def _getcheckunknownconfig(repo, section, name):
43 43 config = repo.ui.config(section, name)
44 44 valid = [b'abort', b'ignore', b'warn']
45 45 if config not in valid:
46 46 validstr = b', '.join([b"'" + v + b"'" for v in valid])
47 47 raise error.ConfigError(
48 48 _(b"%s.%s not valid ('%s' is none of %s)")
49 49 % (section, name, config, validstr)
50 50 )
51 51 return config
52 52
53 53
54 54 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
55 55 if wctx.isinmemory():
56 56 # Nothing to do in IMM because nothing in the "working copy" can be an
57 57 # unknown file.
58 58 #
59 59 # Note that we should bail out here, not in ``_checkunknownfiles()``,
60 60 # because that function does other useful work.
61 61 return False
62 62
63 63 if f2 is None:
64 64 f2 = f
65 65 return (
66 66 repo.wvfs.audit.check(f)
67 67 and repo.wvfs.isfileorlink(f)
68 68 and repo.dirstate.normalize(f) not in repo.dirstate
69 69 and mctx[f2].cmp(wctx[f])
70 70 )
71 71
72 72
73 73 class _unknowndirschecker(object):
74 74 """
75 75 Look for any unknown files or directories that may have a path conflict
76 76 with a file. If any path prefix of the file exists as a file or link,
77 77 then it conflicts. If the file itself is a directory that contains any
78 78 file that is not tracked, then it conflicts.
79 79
80 80 Returns the shortest path at which a conflict occurs, or None if there is
81 81 no conflict.
82 82 """
83 83
84 84 def __init__(self):
85 85 # A set of paths known to be good. This prevents repeated checking of
86 86 # dirs. It will be updated with any new dirs that are checked and found
87 87 # to be safe.
88 88 self._unknowndircache = set()
89 89
90 90 # A set of paths that are known to be absent. This prevents repeated
91 91 # checking of subdirectories that are known not to exist. It will be
92 92 # updated with any new dirs that are checked and found to be absent.
93 93 self._missingdircache = set()
94 94
95 95 def __call__(self, repo, wctx, f):
96 96 if wctx.isinmemory():
97 97 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
98 98 return False
99 99
100 100 # Check for path prefixes that exist as unknown files.
101 101 for p in reversed(list(pathutil.finddirs(f))):
102 102 if p in self._missingdircache:
103 103 return
104 104 if p in self._unknowndircache:
105 105 continue
106 106 if repo.wvfs.audit.check(p):
107 107 if (
108 108 repo.wvfs.isfileorlink(p)
109 109 and repo.dirstate.normalize(p) not in repo.dirstate
110 110 ):
111 111 return p
112 112 if not repo.wvfs.lexists(p):
113 113 self._missingdircache.add(p)
114 114 return
115 115 self._unknowndircache.add(p)
116 116
117 117 # Check if the file conflicts with a directory containing unknown files.
118 118 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
119 119 # Does the directory contain any files that are not in the dirstate?
120 120 for p, dirs, files in repo.wvfs.walk(f):
121 121 for fn in files:
122 122 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
123 123 relf = repo.dirstate.normalize(relf, isknown=True)
124 124 if relf not in repo.dirstate:
125 125 return f
126 126 return None
127 127
128 128
129 129 def _checkunknownfiles(repo, wctx, mctx, force, actions, mergeforce):
130 130 """
131 131 Considers any actions that care about the presence of conflicting unknown
132 132 files. For some actions, the result is to abort; for others, it is to
133 133 choose a different action.
134 134 """
135 135 fileconflicts = set()
136 136 pathconflicts = set()
137 137 warnconflicts = set()
138 138 abortconflicts = set()
139 139 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
140 140 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
141 141 pathconfig = repo.ui.configbool(
142 142 b'experimental', b'merge.checkpathconflicts'
143 143 )
144 144 if not force:
145 145
146 146 def collectconflicts(conflicts, config):
147 147 if config == b'abort':
148 148 abortconflicts.update(conflicts)
149 149 elif config == b'warn':
150 150 warnconflicts.update(conflicts)
151 151
152 152 checkunknowndirs = _unknowndirschecker()
153 153 for f, (m, args, msg) in pycompat.iteritems(actions):
154 154 if m in (
155 155 mergestatemod.ACTION_CREATED,
156 156 mergestatemod.ACTION_DELETED_CHANGED,
157 157 ):
158 158 if _checkunknownfile(repo, wctx, mctx, f):
159 159 fileconflicts.add(f)
160 160 elif pathconfig and f not in wctx:
161 161 path = checkunknowndirs(repo, wctx, f)
162 162 if path is not None:
163 163 pathconflicts.add(path)
164 164 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
165 165 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
166 166 fileconflicts.add(f)
167 167
168 168 allconflicts = fileconflicts | pathconflicts
169 169 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
170 170 unknownconflicts = allconflicts - ignoredconflicts
171 171 collectconflicts(ignoredconflicts, ignoredconfig)
172 172 collectconflicts(unknownconflicts, unknownconfig)
173 173 else:
174 174 for f, (m, args, msg) in pycompat.iteritems(actions):
175 175 if m == mergestatemod.ACTION_CREATED_MERGE:
176 176 fl2, anc = args
177 177 different = _checkunknownfile(repo, wctx, mctx, f)
178 178 if repo.dirstate._ignore(f):
179 179 config = ignoredconfig
180 180 else:
181 181 config = unknownconfig
182 182
183 183 # The behavior when force is True is described by this table:
184 184 # config different mergeforce | action backup
185 185 # * n * | get n
186 186 # * y y | merge -
187 187 # abort y n | merge - (1)
188 188 # warn y n | warn + get y
189 189 # ignore y n | get y
190 190 #
191 191 # (1) this is probably the wrong behavior here -- we should
192 192 # probably abort, but some actions like rebases currently
193 193 # don't like an abort happening in the middle of
194 194 # merge.update.
195 195 if not different:
196 196 actions[f] = (
197 197 mergestatemod.ACTION_GET,
198 198 (fl2, False),
199 199 b'remote created',
200 200 )
201 201 elif mergeforce or config == b'abort':
202 202 actions[f] = (
203 203 mergestatemod.ACTION_MERGE,
204 204 (f, f, None, False, anc),
205 205 b'remote differs from untracked local',
206 206 )
207 207 elif config == b'abort':
208 208 abortconflicts.add(f)
209 209 else:
210 210 if config == b'warn':
211 211 warnconflicts.add(f)
212 212 actions[f] = (
213 213 mergestatemod.ACTION_GET,
214 214 (fl2, True),
215 215 b'remote created',
216 216 )
217 217
218 218 for f in sorted(abortconflicts):
219 219 warn = repo.ui.warn
220 220 if f in pathconflicts:
221 221 if repo.wvfs.isfileorlink(f):
222 222 warn(_(b"%s: untracked file conflicts with directory\n") % f)
223 223 else:
224 224 warn(_(b"%s: untracked directory conflicts with file\n") % f)
225 225 else:
226 226 warn(_(b"%s: untracked file differs\n") % f)
227 227 if abortconflicts:
228 228 raise error.Abort(
229 229 _(
230 230 b"untracked files in working directory "
231 231 b"differ from files in requested revision"
232 232 )
233 233 )
234 234
235 235 for f in sorted(warnconflicts):
236 236 if repo.wvfs.isfileorlink(f):
237 237 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
238 238 else:
239 239 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
240 240
241 241 for f, (m, args, msg) in pycompat.iteritems(actions):
242 242 if m == mergestatemod.ACTION_CREATED:
243 243 backup = (
244 244 f in fileconflicts
245 245 or f in pathconflicts
246 246 or any(p in pathconflicts for p in pathutil.finddirs(f))
247 247 )
248 248 (flags,) = args
249 249 actions[f] = (mergestatemod.ACTION_GET, (flags, backup), msg)
250 250
251 251
252 252 def _forgetremoved(wctx, mctx, branchmerge):
253 253 """
254 254 Forget removed files
255 255
256 256 If we're jumping between revisions (as opposed to merging), and if
257 257 neither the working directory nor the target rev has the file,
258 258 then we need to remove it from the dirstate, to prevent the
259 259 dirstate from listing the file when it is no longer in the
260 260 manifest.
261 261
262 262 If we're merging, and the other revision has removed a file
263 263 that is not present in the working directory, we need to mark it
264 264 as removed.
265 265 """
266 266
267 267 actions = {}
268 268 m = mergestatemod.ACTION_FORGET
269 269 if branchmerge:
270 270 m = mergestatemod.ACTION_REMOVE
271 271 for f in wctx.deleted():
272 272 if f not in mctx:
273 273 actions[f] = m, None, b"forget deleted"
274 274
275 275 if not branchmerge:
276 276 for f in wctx.removed():
277 277 if f not in mctx:
278 278 actions[f] = (
279 279 mergestatemod.ACTION_FORGET,
280 280 None,
281 281 b"forget removed",
282 282 )
283 283
284 284 return actions
285 285
286 286
287 287 def _checkcollision(repo, wmf, actions):
288 288 """
289 289 Check for case-folding collisions.
290 290 """
291 291 # If the repo is narrowed, filter out files outside the narrowspec.
292 292 narrowmatch = repo.narrowmatch()
293 293 if not narrowmatch.always():
294 294 pmmf = set(wmf.walk(narrowmatch))
295 295 if actions:
296 296 narrowactions = {}
297 297 for m, actionsfortype in pycompat.iteritems(actions):
298 298 narrowactions[m] = []
299 299 for (f, args, msg) in actionsfortype:
300 300 if narrowmatch(f):
301 301 narrowactions[m].append((f, args, msg))
302 302 actions = narrowactions
303 303 else:
304 304 # build provisional merged manifest up
305 305 pmmf = set(wmf)
306 306
307 307 if actions:
308 308 # KEEP and EXEC are no-op
309 309 for m in (
310 310 mergestatemod.ACTION_ADD,
311 311 mergestatemod.ACTION_ADD_MODIFIED,
312 312 mergestatemod.ACTION_FORGET,
313 313 mergestatemod.ACTION_GET,
314 314 mergestatemod.ACTION_CHANGED_DELETED,
315 315 mergestatemod.ACTION_DELETED_CHANGED,
316 316 ):
317 317 for f, args, msg in actions[m]:
318 318 pmmf.add(f)
319 319 for f, args, msg in actions[mergestatemod.ACTION_REMOVE]:
320 320 pmmf.discard(f)
321 321 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
322 322 f2, flags = args
323 323 pmmf.discard(f2)
324 324 pmmf.add(f)
325 325 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
326 326 pmmf.add(f)
327 327 for f, args, msg in actions[mergestatemod.ACTION_MERGE]:
328 328 f1, f2, fa, move, anc = args
329 329 if move:
330 330 pmmf.discard(f1)
331 331 pmmf.add(f)
332 332
333 333 # check case-folding collision in provisional merged manifest
334 334 foldmap = {}
335 335 for f in pmmf:
336 336 fold = util.normcase(f)
337 337 if fold in foldmap:
338 338 raise error.Abort(
339 339 _(b"case-folding collision between %s and %s")
340 340 % (f, foldmap[fold])
341 341 )
342 342 foldmap[fold] = f
343 343
344 344 # check case-folding of directories
345 345 foldprefix = unfoldprefix = lastfull = b''
346 346 for fold, f in sorted(foldmap.items()):
347 347 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
348 348 # the folded prefix matches but actual casing is different
349 349 raise error.Abort(
350 350 _(b"case-folding collision between %s and directory of %s")
351 351 % (lastfull, f)
352 352 )
353 353 foldprefix = fold + b'/'
354 354 unfoldprefix = f + b'/'
355 355 lastfull = f
356 356
357 357
358 358 def driverpreprocess(repo, ms, wctx, labels=None):
359 359 """run the preprocess step of the merge driver, if any
360 360
361 361 This is currently not implemented -- it's an extension point."""
362 362 return True
363 363
364 364
365 365 def driverconclude(repo, ms, wctx, labels=None):
366 366 """run the conclude step of the merge driver, if any
367 367
368 368 This is currently not implemented -- it's an extension point."""
369 369 return True
370 370
371 371
372 372 def _filesindirs(repo, manifest, dirs):
373 373 """
374 374 Generator that yields pairs of all the files in the manifest that are found
375 375 inside the directories listed in dirs, and which directory they are found
376 376 in.
377 377 """
378 378 for f in manifest:
379 379 for p in pathutil.finddirs(f):
380 380 if p in dirs:
381 381 yield f, p
382 382 break
383 383
384 384
385 385 def checkpathconflicts(repo, wctx, mctx, actions):
386 386 """
387 387 Check if any actions introduce path conflicts in the repository, updating
388 388 actions to record or handle the path conflict accordingly.
389 389 """
390 390 mf = wctx.manifest()
391 391
392 392 # The set of local files that conflict with a remote directory.
393 393 localconflicts = set()
394 394
395 395 # The set of directories that conflict with a remote file, and so may cause
396 396 # conflicts if they still contain any files after the merge.
397 397 remoteconflicts = set()
398 398
399 399 # The set of directories that appear as both a file and a directory in the
400 400 # remote manifest. These indicate an invalid remote manifest, which
401 401 # can't be updated to cleanly.
402 402 invalidconflicts = set()
403 403
404 404 # The set of directories that contain files that are being created.
405 405 createdfiledirs = set()
406 406
407 407 # The set of files deleted by all the actions.
408 408 deletedfiles = set()
409 409
410 410 for f, (m, args, msg) in actions.items():
411 411 if m in (
412 412 mergestatemod.ACTION_CREATED,
413 413 mergestatemod.ACTION_DELETED_CHANGED,
414 414 mergestatemod.ACTION_MERGE,
415 415 mergestatemod.ACTION_CREATED_MERGE,
416 416 ):
417 417 # This action may create a new local file.
418 418 createdfiledirs.update(pathutil.finddirs(f))
419 419 if mf.hasdir(f):
420 420 # The file aliases a local directory. This might be ok if all
421 421 # the files in the local directory are being deleted. This
422 422 # will be checked once we know what all the deleted files are.
423 423 remoteconflicts.add(f)
424 424 # Track the names of all deleted files.
425 425 if m == mergestatemod.ACTION_REMOVE:
426 426 deletedfiles.add(f)
427 427 if m == mergestatemod.ACTION_MERGE:
428 428 f1, f2, fa, move, anc = args
429 429 if move:
430 430 deletedfiles.add(f1)
431 431 if m == mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL:
432 432 f2, flags = args
433 433 deletedfiles.add(f2)
434 434
435 435 # Check all directories that contain created files for path conflicts.
436 436 for p in createdfiledirs:
437 437 if p in mf:
438 438 if p in mctx:
439 439 # A file is in a directory which aliases both a local
440 440 # and a remote file. This is an internal inconsistency
441 441 # within the remote manifest.
442 442 invalidconflicts.add(p)
443 443 else:
444 444 # A file is in a directory which aliases a local file.
445 445 # We will need to rename the local file.
446 446 localconflicts.add(p)
447 447 if p in actions and actions[p][0] in (
448 448 mergestatemod.ACTION_CREATED,
449 449 mergestatemod.ACTION_DELETED_CHANGED,
450 450 mergestatemod.ACTION_MERGE,
451 451 mergestatemod.ACTION_CREATED_MERGE,
452 452 ):
453 453 # The file is in a directory which aliases a remote file.
454 454 # This is an internal inconsistency within the remote
455 455 # manifest.
456 456 invalidconflicts.add(p)
457 457
458 458 # Rename all local conflicting files that have not been deleted.
459 459 for p in localconflicts:
460 460 if p not in deletedfiles:
461 461 ctxname = bytes(wctx).rstrip(b'+')
462 462 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
463 463 porig = wctx[p].copysource() or p
464 464 actions[pnew] = (
465 465 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
466 466 (p, porig),
467 467 b'local path conflict',
468 468 )
469 469 actions[p] = (
470 470 mergestatemod.ACTION_PATH_CONFLICT,
471 471 (pnew, b'l'),
472 472 b'path conflict',
473 473 )
474 474
475 475 if remoteconflicts:
476 476 # Check if all files in the conflicting directories have been removed.
477 477 ctxname = bytes(mctx).rstrip(b'+')
478 478 for f, p in _filesindirs(repo, mf, remoteconflicts):
479 479 if f not in deletedfiles:
480 480 m, args, msg = actions[p]
481 481 pnew = util.safename(p, ctxname, wctx, set(actions.keys()))
482 482 if m in (
483 483 mergestatemod.ACTION_DELETED_CHANGED,
484 484 mergestatemod.ACTION_MERGE,
485 485 ):
486 486 # Action was merge, just update target.
487 487 actions[pnew] = (m, args, msg)
488 488 else:
489 489 # Action was create, change to renamed get action.
490 490 fl = args[0]
491 491 actions[pnew] = (
492 492 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
493 493 (p, fl),
494 494 b'remote path conflict',
495 495 )
496 496 actions[p] = (
497 497 mergestatemod.ACTION_PATH_CONFLICT,
498 498 (pnew, mergestatemod.ACTION_REMOVE),
499 499 b'path conflict',
500 500 )
501 501 remoteconflicts.remove(p)
502 502 break
503 503
504 504 if invalidconflicts:
505 505 for p in invalidconflicts:
506 506 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
507 507 raise error.Abort(_(b"destination manifest contains path conflicts"))
508 508
509 509
510 510 def _filternarrowactions(narrowmatch, branchmerge, actions):
511 511 """
512 512 Filters out actions that can ignored because the repo is narrowed.
513 513
514 514 Raise an exception if the merge cannot be completed because the repo is
515 515 narrowed.
516 516 """
517 517 nooptypes = {b'k'} # TODO: handle with nonconflicttypes
518 518 nonconflicttypes = set(b'a am c cm f g gs r e'.split())
519 519 # We mutate the items in the dict during iteration, so iterate
520 520 # over a copy.
521 521 for f, action in list(actions.items()):
522 522 if narrowmatch(f):
523 523 pass
524 524 elif not branchmerge:
525 525 del actions[f] # just updating, ignore changes outside clone
526 526 elif action[0] in nooptypes:
527 527 del actions[f] # merge does not affect file
528 528 elif action[0] in nonconflicttypes:
529 529 raise error.Abort(
530 530 _(
531 531 b'merge affects file \'%s\' outside narrow, '
532 532 b'which is not yet supported'
533 533 )
534 534 % f,
535 535 hint=_(b'merging in the other direction may work'),
536 536 )
537 537 else:
538 538 raise error.Abort(
539 539 _(b'conflict in file \'%s\' is outside narrow clone') % f
540 540 )
541 541
542 542
543 543 class mergeresult(object):
544 544 ''''An object representing result of merging manifests.
545 545
546 546 It has information about what actions need to be performed on dirstate
547 547 mapping of divergent renames and other such cases. '''
548 548
549 549 def __init__(self, actions, diverge, renamedelete, commitinfo):
550 550 """
551 551 actions: dict of filename as keys and action related info as values
552 552 diverge: mapping of source name -> list of dest name for
553 553 divergent renames
554 554 renamedelete: mapping of source name -> list of destinations for files
555 555 deleted on one side and renamed on other.
556 556 commitinfo: dict containing data which should be used on commit
557 557 contains a filename -> info mapping
558 558 """
559 559
560 560 self._actions = actions
561 561 self._diverge = diverge
562 562 self._renamedelete = renamedelete
563 563 self._commitinfo = commitinfo
564 564
565 565 @property
566 566 def actions(self):
567 567 return self._actions
568 568
569 569 @property
570 570 def diverge(self):
571 571 return self._diverge
572 572
573 573 @property
574 574 def renamedelete(self):
575 575 return self._renamedelete
576 576
577 577 @property
578 578 def commitinfo(self):
579 579 return self._commitinfo
580 580
581 581 def setactions(self, actions):
582 582 self._actions = actions
583 583
584 584
585 585 def manifestmerge(
586 586 repo,
587 587 wctx,
588 588 p2,
589 589 pa,
590 590 branchmerge,
591 591 force,
592 592 matcher,
593 593 acceptremote,
594 594 followcopies,
595 595 forcefulldiff=False,
596 596 ):
597 597 """
598 598 Merge wctx and p2 with ancestor pa and generate merge action list
599 599
600 600 branchmerge and force are as passed in to update
601 601 matcher = matcher to filter file lists
602 602 acceptremote = accept the incoming changes without prompting
603 603
604 604 Returns an object of mergeresult class
605 605 """
606 606 if matcher is not None and matcher.always():
607 607 matcher = None
608 608
609 609 # manifests fetched in order are going to be faster, so prime the caches
610 610 [
611 611 x.manifest()
612 612 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
613 613 ]
614 614
615 615 branch_copies1 = copies.branch_copies()
616 616 branch_copies2 = copies.branch_copies()
617 617 diverge = {}
618 618 # information from merge which is needed at commit time
619 619 # for example choosing filelog of which parent to commit
620 620 # TODO: use specific constants in future for this mapping
621 621 commitinfo = {}
622 622 if followcopies:
623 623 branch_copies1, branch_copies2, diverge = copies.mergecopies(
624 624 repo, wctx, p2, pa
625 625 )
626 626
627 627 boolbm = pycompat.bytestr(bool(branchmerge))
628 628 boolf = pycompat.bytestr(bool(force))
629 629 boolm = pycompat.bytestr(bool(matcher))
630 630 repo.ui.note(_(b"resolving manifests\n"))
631 631 repo.ui.debug(
632 632 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
633 633 )
634 634 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
635 635
636 636 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
637 637 copied1 = set(branch_copies1.copy.values())
638 638 copied1.update(branch_copies1.movewithdir.values())
639 639 copied2 = set(branch_copies2.copy.values())
640 640 copied2.update(branch_copies2.movewithdir.values())
641 641
642 642 if b'.hgsubstate' in m1 and wctx.rev() is None:
643 643 # Check whether sub state is modified, and overwrite the manifest
644 644 # to flag the change. If wctx is a committed revision, we shouldn't
645 645 # care for the dirty state of the working directory.
646 646 if any(wctx.sub(s).dirty() for s in wctx.substate):
647 647 m1[b'.hgsubstate'] = modifiednodeid
648 648
649 649 # Don't use m2-vs-ma optimization if:
650 650 # - ma is the same as m1 or m2, which we're just going to diff again later
651 651 # - The caller specifically asks for a full diff, which is useful during bid
652 652 # merge.
653 653 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
654 654 # Identify which files are relevant to the merge, so we can limit the
655 655 # total m1-vs-m2 diff to just those files. This has significant
656 656 # performance benefits in large repositories.
657 657 relevantfiles = set(ma.diff(m2).keys())
658 658
659 659 # For copied and moved files, we need to add the source file too.
660 660 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
661 661 if copyvalue in relevantfiles:
662 662 relevantfiles.add(copykey)
663 663 for movedirkey in branch_copies1.movewithdir:
664 664 relevantfiles.add(movedirkey)
665 665 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
666 666 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
667 667
668 668 diff = m1.diff(m2, match=matcher)
669 669
670 670 actions = {}
671 671 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
672 672 if n1 and n2: # file exists on both local and remote side
673 673 if f not in ma:
674 674 # TODO: what if they're renamed from different sources?
675 675 fa = branch_copies1.copy.get(
676 676 f, None
677 677 ) or branch_copies2.copy.get(f, None)
678 678 if fa is not None:
679 679 actions[f] = (
680 680 mergestatemod.ACTION_MERGE,
681 681 (f, f, fa, False, pa.node()),
682 682 b'both renamed from %s' % fa,
683 683 )
684 684 else:
685 685 actions[f] = (
686 686 mergestatemod.ACTION_MERGE,
687 687 (f, f, None, False, pa.node()),
688 688 b'both created',
689 689 )
690 690 else:
691 691 a = ma[f]
692 692 fla = ma.flags(f)
693 693 nol = b'l' not in fl1 + fl2 + fla
694 694 if n2 == a and fl2 == fla:
695 695 actions[f] = (
696 696 mergestatemod.ACTION_KEEP,
697 697 (),
698 698 b'remote unchanged',
699 699 )
700 700 elif n1 == a and fl1 == fla: # local unchanged - use remote
701 701 if n1 == n2: # optimization: keep local content
702 702 actions[f] = (
703 703 mergestatemod.ACTION_EXEC,
704 704 (fl2,),
705 705 b'update permissions',
706 706 )
707 707 else:
708 708 actions[f] = (
709 709 mergestatemod.ACTION_GET_OTHER_AND_STORE
710 710 if branchmerge
711 711 else mergestatemod.ACTION_GET,
712 712 (fl2, False),
713 713 b'remote is newer',
714 714 )
715 715 if branchmerge:
716 716 commitinfo[f] = b'other'
717 717 elif nol and n2 == a: # remote only changed 'x'
718 718 actions[f] = (
719 719 mergestatemod.ACTION_EXEC,
720 720 (fl2,),
721 721 b'update permissions',
722 722 )
723 723 elif nol and n1 == a: # local only changed 'x'
724 724 actions[f] = (
725 725 mergestatemod.ACTION_GET_OTHER_AND_STORE
726 726 if branchmerge
727 727 else mergestatemod.ACTION_GET,
728 728 (fl1, False),
729 729 b'remote is newer',
730 730 )
731 731 if branchmerge:
732 732 commitinfo[f] = b'other'
733 733 else: # both changed something
734 734 actions[f] = (
735 735 mergestatemod.ACTION_MERGE,
736 736 (f, f, f, False, pa.node()),
737 737 b'versions differ',
738 738 )
739 739 elif n1: # file exists only on local side
740 740 if f in copied2:
741 741 pass # we'll deal with it on m2 side
742 742 elif (
743 743 f in branch_copies1.movewithdir
744 744 ): # directory rename, move local
745 745 f2 = branch_copies1.movewithdir[f]
746 746 if f2 in m2:
747 747 actions[f2] = (
748 748 mergestatemod.ACTION_MERGE,
749 749 (f, f2, None, True, pa.node()),
750 750 b'remote directory rename, both created',
751 751 )
752 752 else:
753 753 actions[f2] = (
754 754 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
755 755 (f, fl1),
756 756 b'remote directory rename - move from %s' % f,
757 757 )
758 758 elif f in branch_copies1.copy:
759 759 f2 = branch_copies1.copy[f]
760 760 actions[f] = (
761 761 mergestatemod.ACTION_MERGE,
762 762 (f, f2, f2, False, pa.node()),
763 763 b'local copied/moved from %s' % f2,
764 764 )
765 765 elif f in ma: # clean, a different, no remote
766 766 if n1 != ma[f]:
767 767 if acceptremote:
768 768 actions[f] = (
769 769 mergestatemod.ACTION_REMOVE,
770 770 None,
771 771 b'remote delete',
772 772 )
773 773 else:
774 774 actions[f] = (
775 775 mergestatemod.ACTION_CHANGED_DELETED,
776 776 (f, None, f, False, pa.node()),
777 777 b'prompt changed/deleted',
778 778 )
779 779 elif n1 == addednodeid:
780 780 # This file was locally added. We should forget it instead of
781 781 # deleting it.
782 782 actions[f] = (
783 783 mergestatemod.ACTION_FORGET,
784 784 None,
785 785 b'remote deleted',
786 786 )
787 787 else:
788 788 actions[f] = (
789 789 mergestatemod.ACTION_REMOVE,
790 790 None,
791 791 b'other deleted',
792 792 )
793 793 elif n2: # file exists only on remote side
794 794 if f in copied1:
795 795 pass # we'll deal with it on m1 side
796 796 elif f in branch_copies2.movewithdir:
797 797 f2 = branch_copies2.movewithdir[f]
798 798 if f2 in m1:
799 799 actions[f2] = (
800 800 mergestatemod.ACTION_MERGE,
801 801 (f2, f, None, False, pa.node()),
802 802 b'local directory rename, both created',
803 803 )
804 804 else:
805 805 actions[f2] = (
806 806 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
807 807 (f, fl2),
808 808 b'local directory rename - get from %s' % f,
809 809 )
810 810 elif f in branch_copies2.copy:
811 811 f2 = branch_copies2.copy[f]
812 812 if f2 in m2:
813 813 actions[f] = (
814 814 mergestatemod.ACTION_MERGE,
815 815 (f2, f, f2, False, pa.node()),
816 816 b'remote copied from %s' % f2,
817 817 )
818 818 else:
819 819 actions[f] = (
820 820 mergestatemod.ACTION_MERGE,
821 821 (f2, f, f2, True, pa.node()),
822 822 b'remote moved from %s' % f2,
823 823 )
824 824 elif f not in ma:
825 825 # local unknown, remote created: the logic is described by the
826 826 # following table:
827 827 #
828 828 # force branchmerge different | action
829 829 # n * * | create
830 830 # y n * | create
831 831 # y y n | create
832 832 # y y y | merge
833 833 #
834 834 # Checking whether the files are different is expensive, so we
835 835 # don't do that when we can avoid it.
836 836 if not force:
837 837 actions[f] = (
838 838 mergestatemod.ACTION_CREATED,
839 839 (fl2,),
840 840 b'remote created',
841 841 )
842 842 elif not branchmerge:
843 843 actions[f] = (
844 844 mergestatemod.ACTION_CREATED,
845 845 (fl2,),
846 846 b'remote created',
847 847 )
848 848 else:
849 849 actions[f] = (
850 850 mergestatemod.ACTION_CREATED_MERGE,
851 851 (fl2, pa.node()),
852 852 b'remote created, get or merge',
853 853 )
854 854 elif n2 != ma[f]:
855 855 df = None
856 856 for d in branch_copies1.dirmove:
857 857 if f.startswith(d):
858 858 # new file added in a directory that was moved
859 859 df = branch_copies1.dirmove[d] + f[len(d) :]
860 860 break
861 861 if df is not None and df in m1:
862 862 actions[df] = (
863 863 mergestatemod.ACTION_MERGE,
864 864 (df, f, f, False, pa.node()),
865 865 b'local directory rename - respect move '
866 866 b'from %s' % f,
867 867 )
868 868 elif acceptremote:
869 869 actions[f] = (
870 870 mergestatemod.ACTION_CREATED,
871 871 (fl2,),
872 872 b'remote recreating',
873 873 )
874 874 else:
875 875 actions[f] = (
876 876 mergestatemod.ACTION_DELETED_CHANGED,
877 877 (None, f, f, False, pa.node()),
878 878 b'prompt deleted/changed',
879 879 )
880 880
881 881 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
882 882 # If we are merging, look for path conflicts.
883 883 checkpathconflicts(repo, wctx, p2, actions)
884 884
885 885 narrowmatch = repo.narrowmatch()
886 886 if not narrowmatch.always():
887 887 # Updates "actions" in place
888 888 _filternarrowactions(narrowmatch, branchmerge, actions)
889 889
890 890 renamedelete = branch_copies1.renamedelete
891 891 renamedelete.update(branch_copies2.renamedelete)
892 892
893 893 return mergeresult(actions, diverge, renamedelete, commitinfo)
894 894
895 895
896 896 def _resolvetrivial(repo, wctx, mctx, ancestor, actions):
897 897 """Resolves false conflicts where the nodeid changed but the content
898 898 remained the same."""
899 899 # We force a copy of actions.items() because we're going to mutate
900 900 # actions as we resolve trivial conflicts.
901 901 for f, (m, args, msg) in list(actions.items()):
902 902 if (
903 903 m == mergestatemod.ACTION_CHANGED_DELETED
904 904 and f in ancestor
905 905 and not wctx[f].cmp(ancestor[f])
906 906 ):
907 907 # local did change but ended up with same content
908 908 actions[f] = mergestatemod.ACTION_REMOVE, None, b'prompt same'
909 909 elif (
910 910 m == mergestatemod.ACTION_DELETED_CHANGED
911 911 and f in ancestor
912 912 and not mctx[f].cmp(ancestor[f])
913 913 ):
914 914 # remote did change but ended up with same content
915 915 del actions[f] # don't get = keep local deleted
916 916
917 917
918 918 def calculateupdates(
919 919 repo,
920 920 wctx,
921 921 mctx,
922 922 ancestors,
923 923 branchmerge,
924 924 force,
925 925 acceptremote,
926 926 followcopies,
927 927 matcher=None,
928 928 mergeforce=False,
929 929 ):
930 930 """
931 931 Calculate the actions needed to merge mctx into wctx using ancestors
932 932
933 933 Uses manifestmerge() to merge manifest and get list of actions required to
934 934 perform for merging two manifests. If there are multiple ancestors, uses bid
935 935 merge if enabled.
936 936
937 937 Also filters out actions which are unrequired if repository is sparse.
938 938
939 939 Returns mergeresult object same as manifestmerge().
940 940 """
941 941 # Avoid cycle.
942 942 from . import sparse
943 943
944 944 if len(ancestors) == 1: # default
945 945 mresult = manifestmerge(
946 946 repo,
947 947 wctx,
948 948 mctx,
949 949 ancestors[0],
950 950 branchmerge,
951 951 force,
952 952 matcher,
953 953 acceptremote,
954 954 followcopies,
955 955 )
956 956 _checkunknownfiles(repo, wctx, mctx, force, mresult.actions, mergeforce)
957 957
958 958 else: # only when merge.preferancestor=* - the default
959 959 repo.ui.note(
960 960 _(b"note: merging %s and %s using bids from ancestors %s\n")
961 961 % (
962 962 wctx,
963 963 mctx,
964 964 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
965 965 )
966 966 )
967 967
968 968 # Call for bids
969 969 fbids = (
970 970 {}
971 971 ) # mapping filename to bids (action method to list af actions)
972 972 diverge, renamedelete = None, None
973 973 for ancestor in ancestors:
974 974 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
975 975 mresult1 = manifestmerge(
976 976 repo,
977 977 wctx,
978 978 mctx,
979 979 ancestor,
980 980 branchmerge,
981 981 force,
982 982 matcher,
983 983 acceptremote,
984 984 followcopies,
985 985 forcefulldiff=True,
986 986 )
987 987 _checkunknownfiles(
988 988 repo, wctx, mctx, force, mresult1.actions, mergeforce
989 989 )
990 990
991 991 # Track the shortest set of warning on the theory that bid
992 992 # merge will correctly incorporate more information
993 993 if diverge is None or len(mresult1.diverge) < len(diverge):
994 994 diverge = mresult1.diverge
995 995 if renamedelete is None or len(renamedelete) < len(
996 996 mresult1.renamedelete
997 997 ):
998 998 renamedelete = mresult1.renamedelete
999 999
1000 1000 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1001 1001 m, args, msg = a
1002 1002 if m == mergestatemod.ACTION_GET_OTHER_AND_STORE:
1003 1003 m = mergestatemod.ACTION_GET
1004 1004 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1005 1005 if f in fbids:
1006 1006 d = fbids[f]
1007 1007 if m in d:
1008 1008 d[m].append(a)
1009 1009 else:
1010 1010 d[m] = [a]
1011 1011 else:
1012 1012 fbids[f] = {m: [a]}
1013 1013
1014 1014 # Pick the best bid for each file
1015 1015 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1016 1016 actions = {}
1017 1017 for f, bids in sorted(fbids.items()):
1018 1018 # bids is a mapping from action method to list af actions
1019 1019 # Consensus?
1020 1020 if len(bids) == 1: # all bids are the same kind of method
1021 1021 m, l = list(bids.items())[0]
1022 1022 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1023 1023 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1024 1024 actions[f] = l[0]
1025 1025 continue
1026 1026 # If keep is an option, just do it.
1027 1027 if mergestatemod.ACTION_KEEP in bids:
1028 1028 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1029 1029 actions[f] = bids[mergestatemod.ACTION_KEEP][0]
1030 1030 continue
1031 1031 # If there are gets and they all agree [how could they not?], do it.
1032 1032 if mergestatemod.ACTION_GET in bids:
1033 1033 ga0 = bids[mergestatemod.ACTION_GET][0]
1034 1034 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1035 1035 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1036 1036 actions[f] = ga0
1037 1037 continue
1038 1038 # TODO: Consider other simple actions such as mode changes
1039 1039 # Handle inefficient democrazy.
1040 1040 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1041 1041 for m, l in sorted(bids.items()):
1042 1042 for _f, args, msg in l:
1043 1043 repo.ui.note(b' %s -> %s\n' % (msg, m))
1044 1044 # Pick random action. TODO: Instead, prompt user when resolving
1045 1045 m, l = list(bids.items())[0]
1046 1046 repo.ui.warn(
1047 1047 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1048 1048 )
1049 1049 actions[f] = l[0]
1050 1050 continue
1051 1051 repo.ui.note(_(b'end of auction\n\n'))
1052 1052 # TODO: think about commitinfo when bid merge is used
1053 1053 mresult = mergeresult(actions, diverge, renamedelete, {})
1054 1054
1055 1055 if wctx.rev() is None:
1056 1056 fractions = _forgetremoved(wctx, mctx, branchmerge)
1057 1057 mresult.actions.update(fractions)
1058 1058
1059 1059 prunedactions = sparse.filterupdatesactions(
1060 1060 repo, wctx, mctx, branchmerge, mresult.actions
1061 1061 )
1062 1062 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult.actions)
1063 1063
1064 1064 mresult.setactions(prunedactions)
1065 1065 return mresult
1066 1066
1067 1067
1068 1068 def _getcwd():
1069 1069 try:
1070 1070 return encoding.getcwd()
1071 1071 except OSError as err:
1072 1072 if err.errno == errno.ENOENT:
1073 1073 return None
1074 1074 raise
1075 1075
1076 1076
1077 1077 def batchremove(repo, wctx, actions):
1078 1078 """apply removes to the working directory
1079 1079
1080 1080 yields tuples for progress updates
1081 1081 """
1082 1082 verbose = repo.ui.verbose
1083 1083 cwd = _getcwd()
1084 1084 i = 0
1085 1085 for f, args, msg in actions:
1086 1086 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1087 1087 if verbose:
1088 1088 repo.ui.note(_(b"removing %s\n") % f)
1089 1089 wctx[f].audit()
1090 1090 try:
1091 1091 wctx[f].remove(ignoremissing=True)
1092 1092 except OSError as inst:
1093 1093 repo.ui.warn(
1094 1094 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1095 1095 )
1096 1096 if i == 100:
1097 1097 yield i, f
1098 1098 i = 0
1099 1099 i += 1
1100 1100 if i > 0:
1101 1101 yield i, f
1102 1102
1103 1103 if cwd and not _getcwd():
1104 1104 # cwd was removed in the course of removing files; print a helpful
1105 1105 # warning.
1106 1106 repo.ui.warn(
1107 1107 _(
1108 1108 b"current directory was removed\n"
1109 1109 b"(consider changing to repo root: %s)\n"
1110 1110 )
1111 1111 % repo.root
1112 1112 )
1113 1113
1114 1114
1115 1115 def batchget(repo, mctx, wctx, wantfiledata, actions):
1116 1116 """apply gets to the working directory
1117 1117
1118 1118 mctx is the context to get from
1119 1119
1120 1120 Yields arbitrarily many (False, tuple) for progress updates, followed by
1121 1121 exactly one (True, filedata). When wantfiledata is false, filedata is an
1122 1122 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1123 1123 mtime) of the file f written for each action.
1124 1124 """
1125 1125 filedata = {}
1126 1126 verbose = repo.ui.verbose
1127 1127 fctx = mctx.filectx
1128 1128 ui = repo.ui
1129 1129 i = 0
1130 1130 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1131 1131 for f, (flags, backup), msg in actions:
1132 1132 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1133 1133 if verbose:
1134 1134 repo.ui.note(_(b"getting %s\n") % f)
1135 1135
1136 1136 if backup:
1137 1137 # If a file or directory exists with the same name, back that
1138 1138 # up. Otherwise, look to see if there is a file that conflicts
1139 1139 # with a directory this file is in, and if so, back that up.
1140 1140 conflicting = f
1141 1141 if not repo.wvfs.lexists(f):
1142 1142 for p in pathutil.finddirs(f):
1143 1143 if repo.wvfs.isfileorlink(p):
1144 1144 conflicting = p
1145 1145 break
1146 1146 if repo.wvfs.lexists(conflicting):
1147 1147 orig = scmutil.backuppath(ui, repo, conflicting)
1148 1148 util.rename(repo.wjoin(conflicting), orig)
1149 1149 wfctx = wctx[f]
1150 1150 wfctx.clearunknown()
1151 1151 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1152 1152 size = wfctx.write(
1153 1153 fctx(f).data(),
1154 1154 flags,
1155 1155 backgroundclose=True,
1156 1156 atomictemp=atomictemp,
1157 1157 )
1158 1158 if wantfiledata:
1159 1159 s = wfctx.lstat()
1160 1160 mode = s.st_mode
1161 1161 mtime = s[stat.ST_MTIME]
1162 1162 filedata[f] = (mode, size, mtime) # for dirstate.normal
1163 1163 if i == 100:
1164 1164 yield False, (i, f)
1165 1165 i = 0
1166 1166 i += 1
1167 1167 if i > 0:
1168 1168 yield False, (i, f)
1169 1169 yield True, filedata
1170 1170
1171 1171
1172 1172 def _prefetchfiles(repo, ctx, actions):
1173 1173 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1174 1174 of merge actions. ``ctx`` is the context being merged in."""
1175 1175
1176 1176 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1177 1177 # don't touch the context to be merged in. 'cd' is skipped, because
1178 1178 # changed/deleted never resolves to something from the remote side.
1179 1179 oplist = [
1180 1180 actions[a]
1181 1181 for a in (
1182 1182 mergestatemod.ACTION_GET,
1183 1183 mergestatemod.ACTION_DELETED_CHANGED,
1184 1184 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1185 1185 mergestatemod.ACTION_MERGE,
1186 1186 )
1187 1187 ]
1188 1188 prefetch = scmutil.prefetchfiles
1189 1189 matchfiles = scmutil.matchfiles
1190 1190 prefetch(
1191 1191 repo,
1192 1192 [
1193 1193 (
1194 1194 ctx.rev(),
1195 1195 matchfiles(
1196 1196 repo, [f for sublist in oplist for f, args, msg in sublist]
1197 1197 ),
1198 1198 )
1199 1199 ],
1200 1200 )
1201 1201
1202 1202
1203 1203 @attr.s(frozen=True)
1204 1204 class updateresult(object):
1205 1205 updatedcount = attr.ib()
1206 1206 mergedcount = attr.ib()
1207 1207 removedcount = attr.ib()
1208 1208 unresolvedcount = attr.ib()
1209 1209
1210 1210 def isempty(self):
1211 1211 return not (
1212 1212 self.updatedcount
1213 1213 or self.mergedcount
1214 1214 or self.removedcount
1215 1215 or self.unresolvedcount
1216 1216 )
1217 1217
1218 1218
1219 1219 def emptyactions():
1220 1220 """create an actions dict, to be populated and passed to applyupdates()"""
1221 1221 return {
1222 1222 m: []
1223 1223 for m in (
1224 1224 mergestatemod.ACTION_ADD,
1225 1225 mergestatemod.ACTION_ADD_MODIFIED,
1226 1226 mergestatemod.ACTION_FORGET,
1227 1227 mergestatemod.ACTION_GET,
1228 1228 mergestatemod.ACTION_CHANGED_DELETED,
1229 1229 mergestatemod.ACTION_DELETED_CHANGED,
1230 1230 mergestatemod.ACTION_REMOVE,
1231 1231 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1232 1232 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1233 1233 mergestatemod.ACTION_MERGE,
1234 1234 mergestatemod.ACTION_EXEC,
1235 1235 mergestatemod.ACTION_KEEP,
1236 1236 mergestatemod.ACTION_PATH_CONFLICT,
1237 1237 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1238 1238 mergestatemod.ACTION_GET_OTHER_AND_STORE,
1239 1239 )
1240 1240 }
1241 1241
1242 1242
1243 1243 def applyupdates(
1244 repo, actions, wctx, mctx, overwrite, wantfiledata, labels=None
1244 repo,
1245 actions,
1246 wctx,
1247 mctx,
1248 overwrite,
1249 wantfiledata,
1250 labels=None,
1251 commitinfo=None,
1245 1252 ):
1246 1253 """apply the merge action list to the working directory
1247 1254
1248 1255 wctx is the working copy context
1249 1256 mctx is the context to be merged into the working copy
1257 commitinfo is a mapping of information which needs to be stored somewhere
1258 (probably mergestate) so that it can be used at commit time.
1250 1259
1251 1260 Return a tuple of (counts, filedata), where counts is a tuple
1252 1261 (updated, merged, removed, unresolved) that describes how many
1253 1262 files were affected by the update, and filedata is as described in
1254 1263 batchget.
1255 1264 """
1256 1265
1257 1266 _prefetchfiles(repo, mctx, actions)
1258 1267
1259 1268 updated, merged, removed = 0, 0, 0
1260 1269 ms = mergestatemod.mergestate.clean(
1261 1270 repo, wctx.p1().node(), mctx.node(), labels
1262 1271 )
1263 1272
1273 if commitinfo is None:
1274 commitinfo = {}
1275
1276 for f, op in pycompat.iteritems(commitinfo):
1277 # the other side of filenode was choosen while merging, store this in
1278 # mergestate so that it can be reused on commit
1279 if op == b'other':
1280 ms.addmergedother(f)
1281
1264 1282 # add ACTION_GET_OTHER_AND_STORE to mergestate
1265 1283 for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
1266 1284 ms.addmergedother(e[0])
1267 1285
1268 1286 moves = []
1269 1287 for m, l in actions.items():
1270 1288 l.sort()
1271 1289
1272 1290 # 'cd' and 'dc' actions are treated like other merge conflicts
1273 1291 mergeactions = sorted(actions[mergestatemod.ACTION_CHANGED_DELETED])
1274 1292 mergeactions.extend(sorted(actions[mergestatemod.ACTION_DELETED_CHANGED]))
1275 1293 mergeactions.extend(actions[mergestatemod.ACTION_MERGE])
1276 1294 for f, args, msg in mergeactions:
1277 1295 f1, f2, fa, move, anc = args
1278 1296 if f == b'.hgsubstate': # merged internally
1279 1297 continue
1280 1298 if f1 is None:
1281 1299 fcl = filemerge.absentfilectx(wctx, fa)
1282 1300 else:
1283 1301 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1284 1302 fcl = wctx[f1]
1285 1303 if f2 is None:
1286 1304 fco = filemerge.absentfilectx(mctx, fa)
1287 1305 else:
1288 1306 fco = mctx[f2]
1289 1307 actx = repo[anc]
1290 1308 if fa in actx:
1291 1309 fca = actx[fa]
1292 1310 else:
1293 1311 # TODO: move to absentfilectx
1294 1312 fca = repo.filectx(f1, fileid=nullrev)
1295 1313 ms.add(fcl, fco, fca, f)
1296 1314 if f1 != f and move:
1297 1315 moves.append(f1)
1298 1316
1299 1317 # remove renamed files after safely stored
1300 1318 for f in moves:
1301 1319 if wctx[f].lexists():
1302 1320 repo.ui.debug(b"removing %s\n" % f)
1303 1321 wctx[f].audit()
1304 1322 wctx[f].remove()
1305 1323
1306 1324 numupdates = sum(
1307 1325 len(l) for m, l in actions.items() if m != mergestatemod.ACTION_KEEP
1308 1326 )
1309 1327 progress = repo.ui.makeprogress(
1310 1328 _(b'updating'), unit=_(b'files'), total=numupdates
1311 1329 )
1312 1330
1313 1331 if [
1314 1332 a
1315 1333 for a in actions[mergestatemod.ACTION_REMOVE]
1316 1334 if a[0] == b'.hgsubstate'
1317 1335 ]:
1318 1336 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1319 1337
1320 1338 # record path conflicts
1321 1339 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT]:
1322 1340 f1, fo = args
1323 1341 s = repo.ui.status
1324 1342 s(
1325 1343 _(
1326 1344 b"%s: path conflict - a file or link has the same name as a "
1327 1345 b"directory\n"
1328 1346 )
1329 1347 % f
1330 1348 )
1331 1349 if fo == b'l':
1332 1350 s(_(b"the local file has been renamed to %s\n") % f1)
1333 1351 else:
1334 1352 s(_(b"the remote file has been renamed to %s\n") % f1)
1335 1353 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1336 1354 ms.addpathconflict(f, f1, fo)
1337 1355 progress.increment(item=f)
1338 1356
1339 1357 # When merging in-memory, we can't support worker processes, so set the
1340 1358 # per-item cost at 0 in that case.
1341 1359 cost = 0 if wctx.isinmemory() else 0.001
1342 1360
1343 1361 # remove in parallel (must come before resolving path conflicts and getting)
1344 1362 prog = worker.worker(
1345 1363 repo.ui,
1346 1364 cost,
1347 1365 batchremove,
1348 1366 (repo, wctx),
1349 1367 actions[mergestatemod.ACTION_REMOVE],
1350 1368 )
1351 1369 for i, item in prog:
1352 1370 progress.increment(step=i, item=item)
1353 1371 removed = len(actions[mergestatemod.ACTION_REMOVE])
1354 1372
1355 1373 # resolve path conflicts (must come before getting)
1356 1374 for f, args, msg in actions[mergestatemod.ACTION_PATH_CONFLICT_RESOLVE]:
1357 1375 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1358 1376 (f0, origf0) = args
1359 1377 if wctx[f0].lexists():
1360 1378 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1361 1379 wctx[f].audit()
1362 1380 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1363 1381 wctx[f0].remove()
1364 1382 progress.increment(item=f)
1365 1383
1366 1384 # get in parallel.
1367 1385 threadsafe = repo.ui.configbool(
1368 1386 b'experimental', b'worker.wdir-get-thread-safe'
1369 1387 )
1370 1388 prog = worker.worker(
1371 1389 repo.ui,
1372 1390 cost,
1373 1391 batchget,
1374 1392 (repo, mctx, wctx, wantfiledata),
1375 1393 actions[mergestatemod.ACTION_GET],
1376 1394 threadsafe=threadsafe,
1377 1395 hasretval=True,
1378 1396 )
1379 1397 getfiledata = {}
1380 1398 for final, res in prog:
1381 1399 if final:
1382 1400 getfiledata = res
1383 1401 else:
1384 1402 i, item = res
1385 1403 progress.increment(step=i, item=item)
1386 1404 updated = len(actions[mergestatemod.ACTION_GET])
1387 1405
1388 1406 if [a for a in actions[mergestatemod.ACTION_GET] if a[0] == b'.hgsubstate']:
1389 1407 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1390 1408
1391 1409 # forget (manifest only, just log it) (must come first)
1392 1410 for f, args, msg in actions[mergestatemod.ACTION_FORGET]:
1393 1411 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1394 1412 progress.increment(item=f)
1395 1413
1396 1414 # re-add (manifest only, just log it)
1397 1415 for f, args, msg in actions[mergestatemod.ACTION_ADD]:
1398 1416 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1399 1417 progress.increment(item=f)
1400 1418
1401 1419 # re-add/mark as modified (manifest only, just log it)
1402 1420 for f, args, msg in actions[mergestatemod.ACTION_ADD_MODIFIED]:
1403 1421 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1404 1422 progress.increment(item=f)
1405 1423
1406 1424 # keep (noop, just log it)
1407 1425 for f, args, msg in actions[mergestatemod.ACTION_KEEP]:
1408 1426 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1409 1427 # no progress
1410 1428
1411 1429 # directory rename, move local
1412 1430 for f, args, msg in actions[mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]:
1413 1431 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1414 1432 progress.increment(item=f)
1415 1433 f0, flags = args
1416 1434 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1417 1435 wctx[f].audit()
1418 1436 wctx[f].write(wctx.filectx(f0).data(), flags)
1419 1437 wctx[f0].remove()
1420 1438 updated += 1
1421 1439
1422 1440 # local directory rename, get
1423 1441 for f, args, msg in actions[mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]:
1424 1442 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1425 1443 progress.increment(item=f)
1426 1444 f0, flags = args
1427 1445 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1428 1446 wctx[f].write(mctx.filectx(f0).data(), flags)
1429 1447 updated += 1
1430 1448
1431 1449 # exec
1432 1450 for f, args, msg in actions[mergestatemod.ACTION_EXEC]:
1433 1451 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1434 1452 progress.increment(item=f)
1435 1453 (flags,) = args
1436 1454 wctx[f].audit()
1437 1455 wctx[f].setflags(b'l' in flags, b'x' in flags)
1438 1456 updated += 1
1439 1457
1440 1458 # the ordering is important here -- ms.mergedriver will raise if the merge
1441 1459 # driver has changed, and we want to be able to bypass it when overwrite is
1442 1460 # True
1443 1461 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1444 1462
1445 1463 if usemergedriver:
1446 1464 if wctx.isinmemory():
1447 1465 raise error.InMemoryMergeConflictsError(
1448 1466 b"in-memory merge does not support mergedriver"
1449 1467 )
1450 1468 ms.commit()
1451 1469 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1452 1470 # the driver might leave some files unresolved
1453 1471 unresolvedf = set(ms.unresolved())
1454 1472 if not proceed:
1455 1473 # XXX setting unresolved to at least 1 is a hack to make sure we
1456 1474 # error out
1457 1475 return updateresult(
1458 1476 updated, merged, removed, max(len(unresolvedf), 1)
1459 1477 )
1460 1478 newactions = []
1461 1479 for f, args, msg in mergeactions:
1462 1480 if f in unresolvedf:
1463 1481 newactions.append((f, args, msg))
1464 1482 mergeactions = newactions
1465 1483
1466 1484 try:
1467 1485 # premerge
1468 1486 tocomplete = []
1469 1487 for f, args, msg in mergeactions:
1470 1488 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1471 1489 progress.increment(item=f)
1472 1490 if f == b'.hgsubstate': # subrepo states need updating
1473 1491 subrepoutil.submerge(
1474 1492 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1475 1493 )
1476 1494 continue
1477 1495 wctx[f].audit()
1478 1496 complete, r = ms.preresolve(f, wctx)
1479 1497 if not complete:
1480 1498 numupdates += 1
1481 1499 tocomplete.append((f, args, msg))
1482 1500
1483 1501 # merge
1484 1502 for f, args, msg in tocomplete:
1485 1503 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1486 1504 progress.increment(item=f, total=numupdates)
1487 1505 ms.resolve(f, wctx)
1488 1506
1489 1507 finally:
1490 1508 ms.commit()
1491 1509
1492 1510 unresolved = ms.unresolvedcount()
1493 1511
1494 1512 if (
1495 1513 usemergedriver
1496 1514 and not unresolved
1497 1515 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1498 1516 ):
1499 1517 if not driverconclude(repo, ms, wctx, labels=labels):
1500 1518 # XXX setting unresolved to at least 1 is a hack to make sure we
1501 1519 # error out
1502 1520 unresolved = max(unresolved, 1)
1503 1521
1504 1522 ms.commit()
1505 1523
1506 1524 msupdated, msmerged, msremoved = ms.counts()
1507 1525 updated += msupdated
1508 1526 merged += msmerged
1509 1527 removed += msremoved
1510 1528
1511 1529 extraactions = ms.actions()
1512 1530 if extraactions:
1513 1531 mfiles = {a[0] for a in actions[mergestatemod.ACTION_MERGE]}
1514 1532 for k, acts in pycompat.iteritems(extraactions):
1515 1533 actions[k].extend(acts)
1516 1534 if k == mergestatemod.ACTION_GET and wantfiledata:
1517 1535 # no filedata until mergestate is updated to provide it
1518 1536 for a in acts:
1519 1537 getfiledata[a[0]] = None
1520 1538 # Remove these files from actions[ACTION_MERGE] as well. This is
1521 1539 # important because in recordupdates, files in actions[ACTION_MERGE]
1522 1540 # are processed after files in other actions, and the merge driver
1523 1541 # might add files to those actions via extraactions above. This can
1524 1542 # lead to a file being recorded twice, with poor results. This is
1525 1543 # especially problematic for actions[ACTION_REMOVE] (currently only
1526 1544 # possible with the merge driver in the initial merge process;
1527 1545 # interrupted merges don't go through this flow).
1528 1546 #
1529 1547 # The real fix here is to have indexes by both file and action so
1530 1548 # that when the action for a file is changed it is automatically
1531 1549 # reflected in the other action lists. But that involves a more
1532 1550 # complex data structure, so this will do for now.
1533 1551 #
1534 1552 # We don't need to do the same operation for 'dc' and 'cd' because
1535 1553 # those lists aren't consulted again.
1536 1554 mfiles.difference_update(a[0] for a in acts)
1537 1555
1538 1556 actions[mergestatemod.ACTION_MERGE] = [
1539 1557 a for a in actions[mergestatemod.ACTION_MERGE] if a[0] in mfiles
1540 1558 ]
1541 1559
1542 1560 progress.complete()
1543 1561 assert len(getfiledata) == (
1544 1562 len(actions[mergestatemod.ACTION_GET]) if wantfiledata else 0
1545 1563 )
1546 1564 return updateresult(updated, merged, removed, unresolved), getfiledata
1547 1565
1548 1566
1549 1567 def _advertisefsmonitor(repo, num_gets, p1node):
1550 1568 # Advertise fsmonitor when its presence could be useful.
1551 1569 #
1552 1570 # We only advertise when performing an update from an empty working
1553 1571 # directory. This typically only occurs during initial clone.
1554 1572 #
1555 1573 # We give users a mechanism to disable the warning in case it is
1556 1574 # annoying.
1557 1575 #
1558 1576 # We only allow on Linux and MacOS because that's where fsmonitor is
1559 1577 # considered stable.
1560 1578 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1561 1579 fsmonitorthreshold = repo.ui.configint(
1562 1580 b'fsmonitor', b'warn_update_file_count'
1563 1581 )
1564 1582 try:
1565 1583 # avoid cycle: extensions -> cmdutil -> merge
1566 1584 from . import extensions
1567 1585
1568 1586 extensions.find(b'fsmonitor')
1569 1587 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1570 1588 # We intentionally don't look at whether fsmonitor has disabled
1571 1589 # itself because a) fsmonitor may have already printed a warning
1572 1590 # b) we only care about the config state here.
1573 1591 except KeyError:
1574 1592 fsmonitorenabled = False
1575 1593
1576 1594 if (
1577 1595 fsmonitorwarning
1578 1596 and not fsmonitorenabled
1579 1597 and p1node == nullid
1580 1598 and num_gets >= fsmonitorthreshold
1581 1599 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1582 1600 ):
1583 1601 repo.ui.warn(
1584 1602 _(
1585 1603 b'(warning: large working directory being used without '
1586 1604 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1587 1605 b'see "hg help -e fsmonitor")\n'
1588 1606 )
1589 1607 )
1590 1608
1591 1609
1592 1610 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1593 1611 UPDATECHECK_NONE = b'none'
1594 1612 UPDATECHECK_LINEAR = b'linear'
1595 1613 UPDATECHECK_NO_CONFLICT = b'noconflict'
1596 1614
1597 1615
1598 1616 def update(
1599 1617 repo,
1600 1618 node,
1601 1619 branchmerge,
1602 1620 force,
1603 1621 ancestor=None,
1604 1622 mergeancestor=False,
1605 1623 labels=None,
1606 1624 matcher=None,
1607 1625 mergeforce=False,
1608 1626 updatedirstate=True,
1609 1627 updatecheck=None,
1610 1628 wc=None,
1611 1629 ):
1612 1630 """
1613 1631 Perform a merge between the working directory and the given node
1614 1632
1615 1633 node = the node to update to
1616 1634 branchmerge = whether to merge between branches
1617 1635 force = whether to force branch merging or file overwriting
1618 1636 matcher = a matcher to filter file lists (dirstate not updated)
1619 1637 mergeancestor = whether it is merging with an ancestor. If true,
1620 1638 we should accept the incoming changes for any prompts that occur.
1621 1639 If false, merging with an ancestor (fast-forward) is only allowed
1622 1640 between different named branches. This flag is used by rebase extension
1623 1641 as a temporary fix and should be avoided in general.
1624 1642 labels = labels to use for base, local and other
1625 1643 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1626 1644 this is True, then 'force' should be True as well.
1627 1645
1628 1646 The table below shows all the behaviors of the update command given the
1629 1647 -c/--check and -C/--clean or no options, whether the working directory is
1630 1648 dirty, whether a revision is specified, and the relationship of the parent
1631 1649 rev to the target rev (linear or not). Match from top first. The -n
1632 1650 option doesn't exist on the command line, but represents the
1633 1651 experimental.updatecheck=noconflict option.
1634 1652
1635 1653 This logic is tested by test-update-branches.t.
1636 1654
1637 1655 -c -C -n -m dirty rev linear | result
1638 1656 y y * * * * * | (1)
1639 1657 y * y * * * * | (1)
1640 1658 y * * y * * * | (1)
1641 1659 * y y * * * * | (1)
1642 1660 * y * y * * * | (1)
1643 1661 * * y y * * * | (1)
1644 1662 * * * * * n n | x
1645 1663 * * * * n * * | ok
1646 1664 n n n n y * y | merge
1647 1665 n n n n y y n | (2)
1648 1666 n n n y y * * | merge
1649 1667 n n y n y * * | merge if no conflict
1650 1668 n y n n y * * | discard
1651 1669 y n n n y * * | (3)
1652 1670
1653 1671 x = can't happen
1654 1672 * = don't-care
1655 1673 1 = incompatible options (checked in commands.py)
1656 1674 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1657 1675 3 = abort: uncommitted changes (checked in commands.py)
1658 1676
1659 1677 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1660 1678 to repo[None] if None is passed.
1661 1679
1662 1680 Return the same tuple as applyupdates().
1663 1681 """
1664 1682 # Avoid cycle.
1665 1683 from . import sparse
1666 1684
1667 1685 # This function used to find the default destination if node was None, but
1668 1686 # that's now in destutil.py.
1669 1687 assert node is not None
1670 1688 if not branchmerge and not force:
1671 1689 # TODO: remove the default once all callers that pass branchmerge=False
1672 1690 # and force=False pass a value for updatecheck. We may want to allow
1673 1691 # updatecheck='abort' to better suppport some of these callers.
1674 1692 if updatecheck is None:
1675 1693 updatecheck = UPDATECHECK_LINEAR
1676 1694 if updatecheck not in (
1677 1695 UPDATECHECK_NONE,
1678 1696 UPDATECHECK_LINEAR,
1679 1697 UPDATECHECK_NO_CONFLICT,
1680 1698 ):
1681 1699 raise ValueError(
1682 1700 r'Invalid updatecheck %r (can accept %r)'
1683 1701 % (
1684 1702 updatecheck,
1685 1703 (
1686 1704 UPDATECHECK_NONE,
1687 1705 UPDATECHECK_LINEAR,
1688 1706 UPDATECHECK_NO_CONFLICT,
1689 1707 ),
1690 1708 )
1691 1709 )
1692 1710 if wc is not None and wc.isinmemory():
1693 1711 maybe_wlock = util.nullcontextmanager()
1694 1712 else:
1695 1713 maybe_wlock = repo.wlock()
1696 1714 with maybe_wlock:
1697 1715 if wc is None:
1698 1716 wc = repo[None]
1699 1717 pl = wc.parents()
1700 1718 p1 = pl[0]
1701 1719 p2 = repo[node]
1702 1720 if ancestor is not None:
1703 1721 pas = [repo[ancestor]]
1704 1722 else:
1705 1723 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1706 1724 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1707 1725 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1708 1726 else:
1709 1727 pas = [p1.ancestor(p2, warn=branchmerge)]
1710 1728
1711 1729 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1712 1730
1713 1731 overwrite = force and not branchmerge
1714 1732 ### check phase
1715 1733 if not overwrite:
1716 1734 if len(pl) > 1:
1717 1735 raise error.Abort(_(b"outstanding uncommitted merge"))
1718 1736 ms = mergestatemod.mergestate.read(repo)
1719 1737 if list(ms.unresolved()):
1720 1738 raise error.Abort(
1721 1739 _(b"outstanding merge conflicts"),
1722 1740 hint=_(b"use 'hg resolve' to resolve"),
1723 1741 )
1724 1742 if branchmerge:
1725 1743 if pas == [p2]:
1726 1744 raise error.Abort(
1727 1745 _(
1728 1746 b"merging with a working directory ancestor"
1729 1747 b" has no effect"
1730 1748 )
1731 1749 )
1732 1750 elif pas == [p1]:
1733 1751 if not mergeancestor and wc.branch() == p2.branch():
1734 1752 raise error.Abort(
1735 1753 _(b"nothing to merge"),
1736 1754 hint=_(b"use 'hg update' or check 'hg heads'"),
1737 1755 )
1738 1756 if not force and (wc.files() or wc.deleted()):
1739 1757 raise error.Abort(
1740 1758 _(b"uncommitted changes"),
1741 1759 hint=_(b"use 'hg status' to list changes"),
1742 1760 )
1743 1761 if not wc.isinmemory():
1744 1762 for s in sorted(wc.substate):
1745 1763 wc.sub(s).bailifchanged()
1746 1764
1747 1765 elif not overwrite:
1748 1766 if p1 == p2: # no-op update
1749 1767 # call the hooks and exit early
1750 1768 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1751 1769 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1752 1770 return updateresult(0, 0, 0, 0)
1753 1771
1754 1772 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1755 1773 [p1],
1756 1774 [p2],
1757 1775 ): # nonlinear
1758 1776 dirty = wc.dirty(missing=True)
1759 1777 if dirty:
1760 1778 # Branching is a bit strange to ensure we do the minimal
1761 1779 # amount of call to obsutil.foreground.
1762 1780 foreground = obsutil.foreground(repo, [p1.node()])
1763 1781 # note: the <node> variable contains a random identifier
1764 1782 if repo[node].node() in foreground:
1765 1783 pass # allow updating to successors
1766 1784 else:
1767 1785 msg = _(b"uncommitted changes")
1768 1786 hint = _(b"commit or update --clean to discard changes")
1769 1787 raise error.UpdateAbort(msg, hint=hint)
1770 1788 else:
1771 1789 # Allow jumping branches if clean and specific rev given
1772 1790 pass
1773 1791
1774 1792 if overwrite:
1775 1793 pas = [wc]
1776 1794 elif not branchmerge:
1777 1795 pas = [p1]
1778 1796
1779 1797 # deprecated config: merge.followcopies
1780 1798 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1781 1799 if overwrite:
1782 1800 followcopies = False
1783 1801 elif not pas[0]:
1784 1802 followcopies = False
1785 1803 if not branchmerge and not wc.dirty(missing=True):
1786 1804 followcopies = False
1787 1805
1788 1806 ### calculate phase
1789 1807 mresult = calculateupdates(
1790 1808 repo,
1791 1809 wc,
1792 1810 p2,
1793 1811 pas,
1794 1812 branchmerge,
1795 1813 force,
1796 1814 mergeancestor,
1797 1815 followcopies,
1798 1816 matcher=matcher,
1799 1817 mergeforce=mergeforce,
1800 1818 )
1801 1819
1802 1820 actionbyfile = mresult.actions
1803 1821
1804 1822 if updatecheck == UPDATECHECK_NO_CONFLICT:
1805 1823 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
1806 1824 if m not in (
1807 1825 mergestatemod.ACTION_GET,
1808 1826 mergestatemod.ACTION_KEEP,
1809 1827 mergestatemod.ACTION_EXEC,
1810 1828 mergestatemod.ACTION_REMOVE,
1811 1829 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1812 1830 mergestatemod.ACTION_GET_OTHER_AND_STORE,
1813 1831 ):
1814 1832 msg = _(b"conflicting changes")
1815 1833 hint = _(b"commit or update --clean to discard changes")
1816 1834 raise error.Abort(msg, hint=hint)
1817 1835
1818 1836 # Prompt and create actions. Most of this is in the resolve phase
1819 1837 # already, but we can't handle .hgsubstate in filemerge or
1820 1838 # subrepoutil.submerge yet so we have to keep prompting for it.
1821 1839 if b'.hgsubstate' in actionbyfile:
1822 1840 f = b'.hgsubstate'
1823 1841 m, args, msg = actionbyfile[f]
1824 1842 prompts = filemerge.partextras(labels)
1825 1843 prompts[b'f'] = f
1826 1844 if m == mergestatemod.ACTION_CHANGED_DELETED:
1827 1845 if repo.ui.promptchoice(
1828 1846 _(
1829 1847 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1830 1848 b"use (c)hanged version or (d)elete?"
1831 1849 b"$$ &Changed $$ &Delete"
1832 1850 )
1833 1851 % prompts,
1834 1852 0,
1835 1853 ):
1836 1854 actionbyfile[f] = (
1837 1855 mergestatemod.ACTION_REMOVE,
1838 1856 None,
1839 1857 b'prompt delete',
1840 1858 )
1841 1859 elif f in p1:
1842 1860 actionbyfile[f] = (
1843 1861 mergestatemod.ACTION_ADD_MODIFIED,
1844 1862 None,
1845 1863 b'prompt keep',
1846 1864 )
1847 1865 else:
1848 1866 actionbyfile[f] = (
1849 1867 mergestatemod.ACTION_ADD,
1850 1868 None,
1851 1869 b'prompt keep',
1852 1870 )
1853 1871 elif m == mergestatemod.ACTION_DELETED_CHANGED:
1854 1872 f1, f2, fa, move, anc = args
1855 1873 flags = p2[f2].flags()
1856 1874 if (
1857 1875 repo.ui.promptchoice(
1858 1876 _(
1859 1877 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
1860 1878 b"use (c)hanged version or leave (d)eleted?"
1861 1879 b"$$ &Changed $$ &Deleted"
1862 1880 )
1863 1881 % prompts,
1864 1882 0,
1865 1883 )
1866 1884 == 0
1867 1885 ):
1868 1886 actionbyfile[f] = (
1869 1887 mergestatemod.ACTION_GET,
1870 1888 (flags, False),
1871 1889 b'prompt recreating',
1872 1890 )
1873 1891 else:
1874 1892 del actionbyfile[f]
1875 1893
1876 1894 # Convert to dictionary-of-lists format
1877 1895 actions = emptyactions()
1878 1896 for f, (m, args, msg) in pycompat.iteritems(actionbyfile):
1879 1897 if m not in actions:
1880 1898 actions[m] = []
1881 1899 actions[m].append((f, args, msg))
1882 1900
1883 1901 # ACTION_GET_OTHER_AND_STORE is a mergestatemod.ACTION_GET + store in mergestate
1884 1902 for e in actions[mergestatemod.ACTION_GET_OTHER_AND_STORE]:
1885 1903 actions[mergestatemod.ACTION_GET].append(e)
1886 1904
1887 1905 if not util.fscasesensitive(repo.path):
1888 1906 # check collision between files only in p2 for clean update
1889 1907 if not branchmerge and (
1890 1908 force or not wc.dirty(missing=True, branch=False)
1891 1909 ):
1892 1910 _checkcollision(repo, p2.manifest(), None)
1893 1911 else:
1894 1912 _checkcollision(repo, wc.manifest(), actions)
1895 1913
1896 1914 # divergent renames
1897 1915 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
1898 1916 repo.ui.warn(
1899 1917 _(
1900 1918 b"note: possible conflict - %s was renamed "
1901 1919 b"multiple times to:\n"
1902 1920 )
1903 1921 % f
1904 1922 )
1905 1923 for nf in sorted(fl):
1906 1924 repo.ui.warn(b" %s\n" % nf)
1907 1925
1908 1926 # rename and delete
1909 1927 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
1910 1928 repo.ui.warn(
1911 1929 _(
1912 1930 b"note: possible conflict - %s was deleted "
1913 1931 b"and renamed to:\n"
1914 1932 )
1915 1933 % f
1916 1934 )
1917 1935 for nf in sorted(fl):
1918 1936 repo.ui.warn(b" %s\n" % nf)
1919 1937
1920 1938 ### apply phase
1921 1939 if not branchmerge: # just jump to the new rev
1922 1940 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
1923 1941 # If we're doing a partial update, we need to skip updating
1924 1942 # the dirstate.
1925 1943 always = matcher is None or matcher.always()
1926 1944 updatedirstate = updatedirstate and always and not wc.isinmemory()
1927 1945 if updatedirstate:
1928 1946 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
1929 1947 # note that we're in the middle of an update
1930 1948 repo.vfs.write(b'updatestate', p2.hex())
1931 1949
1932 1950 _advertisefsmonitor(
1933 1951 repo, len(actions[mergestatemod.ACTION_GET]), p1.node()
1934 1952 )
1935 1953
1936 1954 wantfiledata = updatedirstate and not branchmerge
1937 1955 stats, getfiledata = applyupdates(
1938 repo, actions, wc, p2, overwrite, wantfiledata, labels=labels
1956 repo,
1957 actions,
1958 wc,
1959 p2,
1960 overwrite,
1961 wantfiledata,
1962 labels=labels,
1963 commitinfo=mresult.commitinfo,
1939 1964 )
1940 1965
1941 1966 if updatedirstate:
1942 1967 with repo.dirstate.parentchange():
1943 1968 repo.setparents(fp1, fp2)
1944 1969 mergestatemod.recordupdates(
1945 1970 repo, actions, branchmerge, getfiledata
1946 1971 )
1947 1972 # update completed, clear state
1948 1973 util.unlink(repo.vfs.join(b'updatestate'))
1949 1974
1950 1975 if not branchmerge:
1951 1976 repo.dirstate.setbranch(p2.branch())
1952 1977
1953 1978 # If we're updating to a location, clean up any stale temporary includes
1954 1979 # (ex: this happens during hg rebase --abort).
1955 1980 if not branchmerge:
1956 1981 sparse.prunetemporaryincludes(repo)
1957 1982
1958 1983 if updatedirstate:
1959 1984 repo.hook(
1960 1985 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
1961 1986 )
1962 1987 return stats
1963 1988
1964 1989
1965 1990 def merge(ctx, labels=None, force=False, wc=None):
1966 1991 """Merge another topological branch into the working copy.
1967 1992
1968 1993 force = whether the merge was run with 'merge --force' (deprecated)
1969 1994 """
1970 1995
1971 1996 return update(
1972 1997 ctx.repo(),
1973 1998 ctx.rev(),
1974 1999 labels=labels,
1975 2000 branchmerge=True,
1976 2001 force=force,
1977 2002 mergeforce=force,
1978 2003 wc=wc,
1979 2004 )
1980 2005
1981 2006
1982 2007 def clean_update(ctx, wc=None):
1983 2008 """Do a clean update to the given commit.
1984 2009
1985 2010 This involves updating to the commit and discarding any changes in the
1986 2011 working copy.
1987 2012 """
1988 2013 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
1989 2014
1990 2015
1991 2016 def revert_to(ctx, matcher=None, wc=None):
1992 2017 """Revert the working copy to the given commit.
1993 2018
1994 2019 The working copy will keep its current parent(s) but its content will
1995 2020 be the same as in the given commit.
1996 2021 """
1997 2022
1998 2023 return update(
1999 2024 ctx.repo(),
2000 2025 ctx.rev(),
2001 2026 branchmerge=False,
2002 2027 force=True,
2003 2028 updatedirstate=False,
2004 2029 matcher=matcher,
2005 2030 wc=wc,
2006 2031 )
2007 2032
2008 2033
2009 2034 def graft(
2010 2035 repo,
2011 2036 ctx,
2012 2037 base=None,
2013 2038 labels=None,
2014 2039 keepparent=False,
2015 2040 keepconflictparent=False,
2016 2041 wctx=None,
2017 2042 ):
2018 2043 """Do a graft-like merge.
2019 2044
2020 2045 This is a merge where the merge ancestor is chosen such that one
2021 2046 or more changesets are grafted onto the current changeset. In
2022 2047 addition to the merge, this fixes up the dirstate to include only
2023 2048 a single parent (if keepparent is False) and tries to duplicate any
2024 2049 renames/copies appropriately.
2025 2050
2026 2051 ctx - changeset to rebase
2027 2052 base - merge base, or ctx.p1() if not specified
2028 2053 labels - merge labels eg ['local', 'graft']
2029 2054 keepparent - keep second parent if any
2030 2055 keepconflictparent - if unresolved, keep parent used for the merge
2031 2056
2032 2057 """
2033 2058 # If we're grafting a descendant onto an ancestor, be sure to pass
2034 2059 # mergeancestor=True to update. This does two things: 1) allows the merge if
2035 2060 # the destination is the same as the parent of the ctx (so we can use graft
2036 2061 # to copy commits), and 2) informs update that the incoming changes are
2037 2062 # newer than the destination so it doesn't prompt about "remote changed foo
2038 2063 # which local deleted".
2039 2064 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2040 2065 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2041 2066 wctx = wctx or repo[None]
2042 2067 pctx = wctx.p1()
2043 2068 base = base or ctx.p1()
2044 2069 mergeancestor = (
2045 2070 repo.changelog.isancestor(pctx.node(), ctx.node())
2046 2071 or pctx.rev() == base.rev()
2047 2072 )
2048 2073
2049 2074 stats = update(
2050 2075 repo,
2051 2076 ctx.node(),
2052 2077 True,
2053 2078 True,
2054 2079 base.node(),
2055 2080 mergeancestor=mergeancestor,
2056 2081 labels=labels,
2057 2082 wc=wctx,
2058 2083 )
2059 2084
2060 2085 if keepconflictparent and stats.unresolvedcount:
2061 2086 pother = ctx.node()
2062 2087 else:
2063 2088 pother = nullid
2064 2089 parents = ctx.parents()
2065 2090 if keepparent and len(parents) == 2 and base in parents:
2066 2091 parents.remove(base)
2067 2092 pother = parents[0].node()
2068 2093 # Never set both parents equal to each other
2069 2094 if pother == pctx.node():
2070 2095 pother = nullid
2071 2096
2072 2097 if wctx.isinmemory():
2073 2098 wctx.setparents(pctx.node(), pother)
2074 2099 # fix up dirstate for copies and renames
2075 2100 copies.graftcopies(wctx, ctx, base)
2076 2101 else:
2077 2102 with repo.dirstate.parentchange():
2078 2103 repo.setparents(pctx.node(), pother)
2079 2104 repo.dirstate.write(repo.currenttransaction())
2080 2105 # fix up dirstate for copies and renames
2081 2106 copies.graftcopies(wctx, ctx, base)
2082 2107 return stats
2083 2108
2084 2109
2085 2110 def purge(
2086 2111 repo,
2087 2112 matcher,
2088 2113 unknown=True,
2089 2114 ignored=False,
2090 2115 removeemptydirs=True,
2091 2116 removefiles=True,
2092 2117 abortonerror=False,
2093 2118 noop=False,
2094 2119 ):
2095 2120 """Purge the working directory of untracked files.
2096 2121
2097 2122 ``matcher`` is a matcher configured to scan the working directory -
2098 2123 potentially a subset.
2099 2124
2100 2125 ``unknown`` controls whether unknown files should be purged.
2101 2126
2102 2127 ``ignored`` controls whether ignored files should be purged.
2103 2128
2104 2129 ``removeemptydirs`` controls whether empty directories should be removed.
2105 2130
2106 2131 ``removefiles`` controls whether files are removed.
2107 2132
2108 2133 ``abortonerror`` causes an exception to be raised if an error occurs
2109 2134 deleting a file or directory.
2110 2135
2111 2136 ``noop`` controls whether to actually remove files. If not defined, actions
2112 2137 will be taken.
2113 2138
2114 2139 Returns an iterable of relative paths in the working directory that were
2115 2140 or would be removed.
2116 2141 """
2117 2142
2118 2143 def remove(removefn, path):
2119 2144 try:
2120 2145 removefn(path)
2121 2146 except OSError:
2122 2147 m = _(b'%s cannot be removed') % path
2123 2148 if abortonerror:
2124 2149 raise error.Abort(m)
2125 2150 else:
2126 2151 repo.ui.warn(_(b'warning: %s\n') % m)
2127 2152
2128 2153 # There's no API to copy a matcher. So mutate the passed matcher and
2129 2154 # restore it when we're done.
2130 2155 oldtraversedir = matcher.traversedir
2131 2156
2132 2157 res = []
2133 2158
2134 2159 try:
2135 2160 if removeemptydirs:
2136 2161 directories = []
2137 2162 matcher.traversedir = directories.append
2138 2163
2139 2164 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2140 2165
2141 2166 if removefiles:
2142 2167 for f in sorted(status.unknown + status.ignored):
2143 2168 if not noop:
2144 2169 repo.ui.note(_(b'removing file %s\n') % f)
2145 2170 remove(repo.wvfs.unlink, f)
2146 2171 res.append(f)
2147 2172
2148 2173 if removeemptydirs:
2149 2174 for f in sorted(directories, reverse=True):
2150 2175 if matcher(f) and not repo.wvfs.listdir(f):
2151 2176 if not noop:
2152 2177 repo.ui.note(_(b'removing directory %s\n') % f)
2153 2178 remove(repo.wvfs.rmdir, f)
2154 2179 res.append(f)
2155 2180
2156 2181 return res
2157 2182
2158 2183 finally:
2159 2184 matcher.traversedir = oldtraversedir
General Comments 0
You need to be logged in to leave comments. Login now