##// END OF EJS Templates
mergeresult: introduce filemap() which yields filename based mapping...
Pulkit Goyal -
r45906:3c783ff0 default
parent child Browse files
Show More
@@ -1,730 +1,730 b''
1 1 # hg.py - hg backend for convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # Notes for hg->hg conversion:
9 9 #
10 10 # * Old versions of Mercurial didn't trim the whitespace from the ends
11 11 # of commit messages, but new versions do. Changesets created by
12 12 # those older versions, then converted, may thus have different
13 13 # hashes for changesets that are otherwise identical.
14 14 #
15 15 # * Using "--config convert.hg.saverev=true" will make the source
16 16 # identifier to be stored in the converted revision. This will cause
17 17 # the converted revision to have a different identity than the
18 18 # source.
19 19 from __future__ import absolute_import
20 20
21 21 import os
22 22 import re
23 23 import time
24 24
25 25 from mercurial.i18n import _
26 26 from mercurial.pycompat import open
27 27 from mercurial import (
28 28 bookmarks,
29 29 context,
30 30 error,
31 31 exchange,
32 32 hg,
33 33 lock as lockmod,
34 34 merge as mergemod,
35 35 node as nodemod,
36 36 phases,
37 37 pycompat,
38 38 scmutil,
39 39 util,
40 40 )
41 41 from mercurial.utils import dateutil
42 42
43 43 stringio = util.stringio
44 44
45 45 from . import common
46 46
47 47 mapfile = common.mapfile
48 48 NoRepo = common.NoRepo
49 49
50 50 sha1re = re.compile(br'\b[0-9a-f]{12,40}\b')
51 51
52 52
53 53 class mercurial_sink(common.converter_sink):
54 54 def __init__(self, ui, repotype, path):
55 55 common.converter_sink.__init__(self, ui, repotype, path)
56 56 self.branchnames = ui.configbool(b'convert', b'hg.usebranchnames')
57 57 self.clonebranches = ui.configbool(b'convert', b'hg.clonebranches')
58 58 self.tagsbranch = ui.config(b'convert', b'hg.tagsbranch')
59 59 self.lastbranch = None
60 60 if os.path.isdir(path) and len(os.listdir(path)) > 0:
61 61 try:
62 62 self.repo = hg.repository(self.ui, path)
63 63 if not self.repo.local():
64 64 raise NoRepo(
65 65 _(b'%s is not a local Mercurial repository') % path
66 66 )
67 67 except error.RepoError as err:
68 68 ui.traceback()
69 69 raise NoRepo(err.args[0])
70 70 else:
71 71 try:
72 72 ui.status(_(b'initializing destination %s repository\n') % path)
73 73 self.repo = hg.repository(self.ui, path, create=True)
74 74 if not self.repo.local():
75 75 raise NoRepo(
76 76 _(b'%s is not a local Mercurial repository') % path
77 77 )
78 78 self.created.append(path)
79 79 except error.RepoError:
80 80 ui.traceback()
81 81 raise NoRepo(
82 82 _(b"could not create hg repository %s as sink") % path
83 83 )
84 84 self.lock = None
85 85 self.wlock = None
86 86 self.filemapmode = False
87 87 self.subrevmaps = {}
88 88
89 89 def before(self):
90 90 self.ui.debug(b'run hg sink pre-conversion action\n')
91 91 self.wlock = self.repo.wlock()
92 92 self.lock = self.repo.lock()
93 93
94 94 def after(self):
95 95 self.ui.debug(b'run hg sink post-conversion action\n')
96 96 if self.lock:
97 97 self.lock.release()
98 98 if self.wlock:
99 99 self.wlock.release()
100 100
101 101 def revmapfile(self):
102 102 return self.repo.vfs.join(b"shamap")
103 103
104 104 def authorfile(self):
105 105 return self.repo.vfs.join(b"authormap")
106 106
107 107 def setbranch(self, branch, pbranches):
108 108 if not self.clonebranches:
109 109 return
110 110
111 111 setbranch = branch != self.lastbranch
112 112 self.lastbranch = branch
113 113 if not branch:
114 114 branch = b'default'
115 115 pbranches = [(b[0], b[1] and b[1] or b'default') for b in pbranches]
116 116
117 117 branchpath = os.path.join(self.path, branch)
118 118 if setbranch:
119 119 self.after()
120 120 try:
121 121 self.repo = hg.repository(self.ui, branchpath)
122 122 except Exception:
123 123 self.repo = hg.repository(self.ui, branchpath, create=True)
124 124 self.before()
125 125
126 126 # pbranches may bring revisions from other branches (merge parents)
127 127 # Make sure we have them, or pull them.
128 128 missings = {}
129 129 for b in pbranches:
130 130 try:
131 131 self.repo.lookup(b[0])
132 132 except Exception:
133 133 missings.setdefault(b[1], []).append(b[0])
134 134
135 135 if missings:
136 136 self.after()
137 137 for pbranch, heads in sorted(pycompat.iteritems(missings)):
138 138 pbranchpath = os.path.join(self.path, pbranch)
139 139 prepo = hg.peer(self.ui, {}, pbranchpath)
140 140 self.ui.note(
141 141 _(b'pulling from %s into %s\n') % (pbranch, branch)
142 142 )
143 143 exchange.pull(
144 144 self.repo, prepo, [prepo.lookup(h) for h in heads]
145 145 )
146 146 self.before()
147 147
148 148 def _rewritetags(self, source, revmap, data):
149 149 fp = stringio()
150 150 for line in data.splitlines():
151 151 s = line.split(b' ', 1)
152 152 if len(s) != 2:
153 153 self.ui.warn(_(b'invalid tag entry: "%s"\n') % line)
154 154 fp.write(b'%s\n' % line) # Bogus, but keep for hash stability
155 155 continue
156 156 revid = revmap.get(source.lookuprev(s[0]))
157 157 if not revid:
158 158 if s[0] == nodemod.nullhex:
159 159 revid = s[0]
160 160 else:
161 161 # missing, but keep for hash stability
162 162 self.ui.warn(_(b'missing tag entry: "%s"\n') % line)
163 163 fp.write(b'%s\n' % line)
164 164 continue
165 165 fp.write(b'%s %s\n' % (revid, s[1]))
166 166 return fp.getvalue()
167 167
168 168 def _rewritesubstate(self, source, data):
169 169 fp = stringio()
170 170 for line in data.splitlines():
171 171 s = line.split(b' ', 1)
172 172 if len(s) != 2:
173 173 continue
174 174
175 175 revid = s[0]
176 176 subpath = s[1]
177 177 if revid != nodemod.nullhex:
178 178 revmap = self.subrevmaps.get(subpath)
179 179 if revmap is None:
180 180 revmap = mapfile(
181 181 self.ui, self.repo.wjoin(subpath, b'.hg/shamap')
182 182 )
183 183 self.subrevmaps[subpath] = revmap
184 184
185 185 # It is reasonable that one or more of the subrepos don't
186 186 # need to be converted, in which case they can be cloned
187 187 # into place instead of converted. Therefore, only warn
188 188 # once.
189 189 msg = _(b'no ".hgsubstate" updates will be made for "%s"\n')
190 190 if len(revmap) == 0:
191 191 sub = self.repo.wvfs.reljoin(subpath, b'.hg')
192 192
193 193 if self.repo.wvfs.exists(sub):
194 194 self.ui.warn(msg % subpath)
195 195
196 196 newid = revmap.get(revid)
197 197 if not newid:
198 198 if len(revmap) > 0:
199 199 self.ui.warn(
200 200 _(b"%s is missing from %s/.hg/shamap\n")
201 201 % (revid, subpath)
202 202 )
203 203 else:
204 204 revid = newid
205 205
206 206 fp.write(b'%s %s\n' % (revid, subpath))
207 207
208 208 return fp.getvalue()
209 209
210 210 def _calculatemergedfiles(self, source, p1ctx, p2ctx):
211 211 """Calculates the files from p2 that we need to pull in when merging p1
212 212 and p2, given that the merge is coming from the given source.
213 213
214 214 This prevents us from losing files that only exist in the target p2 and
215 215 that don't come from the source repo (like if you're merging multiple
216 216 repositories together).
217 217 """
218 218 anc = [p1ctx.ancestor(p2ctx)]
219 219 # Calculate what files are coming from p2
220 220 # TODO: mresult.commitinfo might be able to get that info
221 221 mresult = mergemod.calculateupdates(
222 222 self.repo,
223 223 p1ctx,
224 224 p2ctx,
225 225 anc,
226 226 branchmerge=True,
227 227 force=True,
228 228 acceptremote=False,
229 229 followcopies=False,
230 230 )
231 231
232 for file, (action, info, msg) in pycompat.iteritems(mresult.actions):
232 for file, (action, info, msg) in mresult.filemap():
233 233 if source.targetfilebelongstosource(file):
234 234 # If the file belongs to the source repo, ignore the p2
235 235 # since it will be covered by the existing fileset.
236 236 continue
237 237
238 238 # If the file requires actual merging, abort. We don't have enough
239 239 # context to resolve merges correctly.
240 240 if action in [b'm', b'dm', b'cd', b'dc']:
241 241 raise error.Abort(
242 242 _(
243 243 b"unable to convert merge commit "
244 244 b"since target parents do not merge cleanly (file "
245 245 b"%s, parents %s and %s)"
246 246 )
247 247 % (file, p1ctx, p2ctx)
248 248 )
249 249 elif action == b'k':
250 250 # 'keep' means nothing changed from p1
251 251 continue
252 252 else:
253 253 # Any other change means we want to take the p2 version
254 254 yield file
255 255
256 256 def putcommit(
257 257 self, files, copies, parents, commit, source, revmap, full, cleanp2
258 258 ):
259 259 files = dict(files)
260 260
261 261 def getfilectx(repo, memctx, f):
262 262 if p2ctx and f in p2files and f not in copies:
263 263 self.ui.debug(b'reusing %s from p2\n' % f)
264 264 try:
265 265 return p2ctx[f]
266 266 except error.ManifestLookupError:
267 267 # If the file doesn't exist in p2, then we're syncing a
268 268 # delete, so just return None.
269 269 return None
270 270 try:
271 271 v = files[f]
272 272 except KeyError:
273 273 return None
274 274 data, mode = source.getfile(f, v)
275 275 if data is None:
276 276 return None
277 277 if f == b'.hgtags':
278 278 data = self._rewritetags(source, revmap, data)
279 279 if f == b'.hgsubstate':
280 280 data = self._rewritesubstate(source, data)
281 281 return context.memfilectx(
282 282 self.repo,
283 283 memctx,
284 284 f,
285 285 data,
286 286 b'l' in mode,
287 287 b'x' in mode,
288 288 copies.get(f),
289 289 )
290 290
291 291 pl = []
292 292 for p in parents:
293 293 if p not in pl:
294 294 pl.append(p)
295 295 parents = pl
296 296 nparents = len(parents)
297 297 if self.filemapmode and nparents == 1:
298 298 m1node = self.repo.changelog.read(nodemod.bin(parents[0]))[0]
299 299 parent = parents[0]
300 300
301 301 if len(parents) < 2:
302 302 parents.append(nodemod.nullid)
303 303 if len(parents) < 2:
304 304 parents.append(nodemod.nullid)
305 305 p2 = parents.pop(0)
306 306
307 307 text = commit.desc
308 308
309 309 sha1s = re.findall(sha1re, text)
310 310 for sha1 in sha1s:
311 311 oldrev = source.lookuprev(sha1)
312 312 newrev = revmap.get(oldrev)
313 313 if newrev is not None:
314 314 text = text.replace(sha1, newrev[: len(sha1)])
315 315
316 316 extra = commit.extra.copy()
317 317
318 318 sourcename = self.repo.ui.config(b'convert', b'hg.sourcename')
319 319 if sourcename:
320 320 extra[b'convert_source'] = sourcename
321 321
322 322 for label in (
323 323 b'source',
324 324 b'transplant_source',
325 325 b'rebase_source',
326 326 b'intermediate-source',
327 327 ):
328 328 node = extra.get(label)
329 329
330 330 if node is None:
331 331 continue
332 332
333 333 # Only transplant stores its reference in binary
334 334 if label == b'transplant_source':
335 335 node = nodemod.hex(node)
336 336
337 337 newrev = revmap.get(node)
338 338 if newrev is not None:
339 339 if label == b'transplant_source':
340 340 newrev = nodemod.bin(newrev)
341 341
342 342 extra[label] = newrev
343 343
344 344 if self.branchnames and commit.branch:
345 345 extra[b'branch'] = commit.branch
346 346 if commit.rev and commit.saverev:
347 347 extra[b'convert_revision'] = commit.rev
348 348
349 349 while parents:
350 350 p1 = p2
351 351 p2 = parents.pop(0)
352 352 p1ctx = self.repo[p1]
353 353 p2ctx = None
354 354 if p2 != nodemod.nullid:
355 355 p2ctx = self.repo[p2]
356 356 fileset = set(files)
357 357 if full:
358 358 fileset.update(self.repo[p1])
359 359 fileset.update(self.repo[p2])
360 360
361 361 if p2ctx:
362 362 p2files = set(cleanp2)
363 363 for file in self._calculatemergedfiles(source, p1ctx, p2ctx):
364 364 p2files.add(file)
365 365 fileset.add(file)
366 366
367 367 ctx = context.memctx(
368 368 self.repo,
369 369 (p1, p2),
370 370 text,
371 371 fileset,
372 372 getfilectx,
373 373 commit.author,
374 374 commit.date,
375 375 extra,
376 376 )
377 377
378 378 # We won't know if the conversion changes the node until after the
379 379 # commit, so copy the source's phase for now.
380 380 self.repo.ui.setconfig(
381 381 b'phases',
382 382 b'new-commit',
383 383 phases.phasenames[commit.phase],
384 384 b'convert',
385 385 )
386 386
387 387 with self.repo.transaction(b"convert") as tr:
388 388 if self.repo.ui.config(b'convert', b'hg.preserve-hash'):
389 389 origctx = commit.ctx
390 390 else:
391 391 origctx = None
392 392 node = nodemod.hex(self.repo.commitctx(ctx, origctx=origctx))
393 393
394 394 # If the node value has changed, but the phase is lower than
395 395 # draft, set it back to draft since it hasn't been exposed
396 396 # anywhere.
397 397 if commit.rev != node:
398 398 ctx = self.repo[node]
399 399 if ctx.phase() < phases.draft:
400 400 phases.registernew(
401 401 self.repo, tr, phases.draft, [ctx.node()]
402 402 )
403 403
404 404 text = b"(octopus merge fixup)\n"
405 405 p2 = node
406 406
407 407 if self.filemapmode and nparents == 1:
408 408 man = self.repo.manifestlog.getstorage(b'')
409 409 mnode = self.repo.changelog.read(nodemod.bin(p2))[0]
410 410 closed = b'close' in commit.extra
411 411 if not closed and not man.cmp(m1node, man.revision(mnode)):
412 412 self.ui.status(_(b"filtering out empty revision\n"))
413 413 self.repo.rollback(force=True)
414 414 return parent
415 415 return p2
416 416
417 417 def puttags(self, tags):
418 418 tagparent = self.repo.branchtip(self.tagsbranch, ignoremissing=True)
419 419 tagparent = tagparent or nodemod.nullid
420 420
421 421 oldlines = set()
422 422 for branch, heads in pycompat.iteritems(self.repo.branchmap()):
423 423 for h in heads:
424 424 if b'.hgtags' in self.repo[h]:
425 425 oldlines.update(
426 426 set(self.repo[h][b'.hgtags'].data().splitlines(True))
427 427 )
428 428 oldlines = sorted(list(oldlines))
429 429
430 430 newlines = sorted([(b"%s %s\n" % (tags[tag], tag)) for tag in tags])
431 431 if newlines == oldlines:
432 432 return None, None
433 433
434 434 # if the old and new tags match, then there is nothing to update
435 435 oldtags = set()
436 436 newtags = set()
437 437 for line in oldlines:
438 438 s = line.strip().split(b' ', 1)
439 439 if len(s) != 2:
440 440 continue
441 441 oldtags.add(s[1])
442 442 for line in newlines:
443 443 s = line.strip().split(b' ', 1)
444 444 if len(s) != 2:
445 445 continue
446 446 if s[1] not in oldtags:
447 447 newtags.add(s[1].strip())
448 448
449 449 if not newtags:
450 450 return None, None
451 451
452 452 data = b"".join(newlines)
453 453
454 454 def getfilectx(repo, memctx, f):
455 455 return context.memfilectx(repo, memctx, f, data, False, False, None)
456 456
457 457 self.ui.status(_(b"updating tags\n"))
458 458 date = b"%d 0" % int(time.mktime(time.gmtime()))
459 459 extra = {b'branch': self.tagsbranch}
460 460 ctx = context.memctx(
461 461 self.repo,
462 462 (tagparent, None),
463 463 b"update tags",
464 464 [b".hgtags"],
465 465 getfilectx,
466 466 b"convert-repo",
467 467 date,
468 468 extra,
469 469 )
470 470 node = self.repo.commitctx(ctx)
471 471 return nodemod.hex(node), nodemod.hex(tagparent)
472 472
473 473 def setfilemapmode(self, active):
474 474 self.filemapmode = active
475 475
476 476 def putbookmarks(self, updatedbookmark):
477 477 if not len(updatedbookmark):
478 478 return
479 479 wlock = lock = tr = None
480 480 try:
481 481 wlock = self.repo.wlock()
482 482 lock = self.repo.lock()
483 483 tr = self.repo.transaction(b'bookmark')
484 484 self.ui.status(_(b"updating bookmarks\n"))
485 485 destmarks = self.repo._bookmarks
486 486 changes = [
487 487 (bookmark, nodemod.bin(updatedbookmark[bookmark]))
488 488 for bookmark in updatedbookmark
489 489 ]
490 490 destmarks.applychanges(self.repo, tr, changes)
491 491 tr.close()
492 492 finally:
493 493 lockmod.release(lock, wlock, tr)
494 494
495 495 def hascommitfrommap(self, rev):
496 496 # the exact semantics of clonebranches is unclear so we can't say no
497 497 return rev in self.repo or self.clonebranches
498 498
499 499 def hascommitforsplicemap(self, rev):
500 500 if rev not in self.repo and self.clonebranches:
501 501 raise error.Abort(
502 502 _(
503 503 b'revision %s not found in destination '
504 504 b'repository (lookups with clonebranches=true '
505 505 b'are not implemented)'
506 506 )
507 507 % rev
508 508 )
509 509 return rev in self.repo
510 510
511 511
512 512 class mercurial_source(common.converter_source):
513 513 def __init__(self, ui, repotype, path, revs=None):
514 514 common.converter_source.__init__(self, ui, repotype, path, revs)
515 515 self.ignoreerrors = ui.configbool(b'convert', b'hg.ignoreerrors')
516 516 self.ignored = set()
517 517 self.saverev = ui.configbool(b'convert', b'hg.saverev')
518 518 try:
519 519 self.repo = hg.repository(self.ui, path)
520 520 # try to provoke an exception if this isn't really a hg
521 521 # repo, but some other bogus compatible-looking url
522 522 if not self.repo.local():
523 523 raise error.RepoError
524 524 except error.RepoError:
525 525 ui.traceback()
526 526 raise NoRepo(_(b"%s is not a local Mercurial repository") % path)
527 527 self.lastrev = None
528 528 self.lastctx = None
529 529 self._changescache = None, None
530 530 self.convertfp = None
531 531 # Restrict converted revisions to startrev descendants
532 532 startnode = ui.config(b'convert', b'hg.startrev')
533 533 hgrevs = ui.config(b'convert', b'hg.revs')
534 534 if hgrevs is None:
535 535 if startnode is not None:
536 536 try:
537 537 startnode = self.repo.lookup(startnode)
538 538 except error.RepoError:
539 539 raise error.Abort(
540 540 _(b'%s is not a valid start revision') % startnode
541 541 )
542 542 startrev = self.repo.changelog.rev(startnode)
543 543 children = {startnode: 1}
544 544 for r in self.repo.changelog.descendants([startrev]):
545 545 children[self.repo.changelog.node(r)] = 1
546 546 self.keep = children.__contains__
547 547 else:
548 548 self.keep = util.always
549 549 if revs:
550 550 self._heads = [self.repo.lookup(r) for r in revs]
551 551 else:
552 552 self._heads = self.repo.heads()
553 553 else:
554 554 if revs or startnode is not None:
555 555 raise error.Abort(
556 556 _(
557 557 b'hg.revs cannot be combined with '
558 558 b'hg.startrev or --rev'
559 559 )
560 560 )
561 561 nodes = set()
562 562 parents = set()
563 563 for r in scmutil.revrange(self.repo, [hgrevs]):
564 564 ctx = self.repo[r]
565 565 nodes.add(ctx.node())
566 566 parents.update(p.node() for p in ctx.parents())
567 567 self.keep = nodes.__contains__
568 568 self._heads = nodes - parents
569 569
570 570 def _changectx(self, rev):
571 571 if self.lastrev != rev:
572 572 self.lastctx = self.repo[rev]
573 573 self.lastrev = rev
574 574 return self.lastctx
575 575
576 576 def _parents(self, ctx):
577 577 return [p for p in ctx.parents() if p and self.keep(p.node())]
578 578
579 579 def getheads(self):
580 580 return [nodemod.hex(h) for h in self._heads if self.keep(h)]
581 581
582 582 def getfile(self, name, rev):
583 583 try:
584 584 fctx = self._changectx(rev)[name]
585 585 return fctx.data(), fctx.flags()
586 586 except error.LookupError:
587 587 return None, None
588 588
589 589 def _changedfiles(self, ctx1, ctx2):
590 590 ma, r = [], []
591 591 maappend = ma.append
592 592 rappend = r.append
593 593 d = ctx1.manifest().diff(ctx2.manifest())
594 594 for f, ((node1, flag1), (node2, flag2)) in pycompat.iteritems(d):
595 595 if node2 is None:
596 596 rappend(f)
597 597 else:
598 598 maappend(f)
599 599 return ma, r
600 600
601 601 def getchanges(self, rev, full):
602 602 ctx = self._changectx(rev)
603 603 parents = self._parents(ctx)
604 604 if full or not parents:
605 605 files = copyfiles = ctx.manifest()
606 606 if parents:
607 607 if self._changescache[0] == rev:
608 608 ma, r = self._changescache[1]
609 609 else:
610 610 ma, r = self._changedfiles(parents[0], ctx)
611 611 if not full:
612 612 files = ma + r
613 613 copyfiles = ma
614 614 # _getcopies() is also run for roots and before filtering so missing
615 615 # revlogs are detected early
616 616 copies = self._getcopies(ctx, parents, copyfiles)
617 617 cleanp2 = set()
618 618 if len(parents) == 2:
619 619 d = parents[1].manifest().diff(ctx.manifest(), clean=True)
620 620 for f, value in pycompat.iteritems(d):
621 621 if value is None:
622 622 cleanp2.add(f)
623 623 changes = [(f, rev) for f in files if f not in self.ignored]
624 624 changes.sort()
625 625 return changes, copies, cleanp2
626 626
627 627 def _getcopies(self, ctx, parents, files):
628 628 copies = {}
629 629 for name in files:
630 630 if name in self.ignored:
631 631 continue
632 632 try:
633 633 copysource = ctx.filectx(name).copysource()
634 634 if copysource in self.ignored:
635 635 continue
636 636 # Ignore copy sources not in parent revisions
637 637 if not any(copysource in p for p in parents):
638 638 continue
639 639 copies[name] = copysource
640 640 except TypeError:
641 641 pass
642 642 except error.LookupError as e:
643 643 if not self.ignoreerrors:
644 644 raise
645 645 self.ignored.add(name)
646 646 self.ui.warn(_(b'ignoring: %s\n') % e)
647 647 return copies
648 648
649 649 def getcommit(self, rev):
650 650 ctx = self._changectx(rev)
651 651 _parents = self._parents(ctx)
652 652 parents = [p.hex() for p in _parents]
653 653 optparents = [p.hex() for p in ctx.parents() if p and p not in _parents]
654 654 crev = rev
655 655
656 656 return common.commit(
657 657 author=ctx.user(),
658 658 date=dateutil.datestr(ctx.date(), b'%Y-%m-%d %H:%M:%S %1%2'),
659 659 desc=ctx.description(),
660 660 rev=crev,
661 661 parents=parents,
662 662 optparents=optparents,
663 663 branch=ctx.branch(),
664 664 extra=ctx.extra(),
665 665 sortkey=ctx.rev(),
666 666 saverev=self.saverev,
667 667 phase=ctx.phase(),
668 668 ctx=ctx,
669 669 )
670 670
671 671 def numcommits(self):
672 672 return len(self.repo)
673 673
674 674 def gettags(self):
675 675 # This will get written to .hgtags, filter non global tags out.
676 676 tags = [
677 677 t
678 678 for t in self.repo.tagslist()
679 679 if self.repo.tagtype(t[0]) == b'global'
680 680 ]
681 681 return {
682 682 name: nodemod.hex(node) for name, node in tags if self.keep(node)
683 683 }
684 684
685 685 def getchangedfiles(self, rev, i):
686 686 ctx = self._changectx(rev)
687 687 parents = self._parents(ctx)
688 688 if not parents and i is None:
689 689 i = 0
690 690 ma, r = ctx.manifest().keys(), []
691 691 else:
692 692 i = i or 0
693 693 ma, r = self._changedfiles(parents[i], ctx)
694 694 ma, r = [[f for f in l if f not in self.ignored] for l in (ma, r)]
695 695
696 696 if i == 0:
697 697 self._changescache = (rev, (ma, r))
698 698
699 699 return ma + r
700 700
701 701 def converted(self, rev, destrev):
702 702 if self.convertfp is None:
703 703 self.convertfp = open(self.repo.vfs.join(b'shamap'), b'ab')
704 704 self.convertfp.write(util.tonativeeol(b'%s %s\n' % (destrev, rev)))
705 705 self.convertfp.flush()
706 706
707 707 def before(self):
708 708 self.ui.debug(b'run hg source pre-conversion action\n')
709 709
710 710 def after(self):
711 711 self.ui.debug(b'run hg source post-conversion action\n')
712 712
713 713 def hasnativeorder(self):
714 714 return True
715 715
716 716 def hasnativeclose(self):
717 717 return True
718 718
719 719 def lookuprev(self, rev):
720 720 try:
721 721 return nodemod.hex(self.repo.lookup(rev))
722 722 except (error.RepoError, error.LookupError):
723 723 return None
724 724
725 725 def getbookmarks(self):
726 726 return bookmarks.listbookmarks(self.repo)
727 727
728 728 def checkrevformat(self, revstr, mapname=b'splicemap'):
729 729 """ Mercurial, revision string is a 40 byte hex """
730 730 self.checkhexformat(revstr, mapname)
@@ -1,1288 +1,1288 b''
1 1 # __init__.py - remotefilelog extension
2 2 #
3 3 # Copyright 2013 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """remotefilelog causes Mercurial to lazilly fetch file contents (EXPERIMENTAL)
8 8
9 9 This extension is HIGHLY EXPERIMENTAL. There are NO BACKWARDS COMPATIBILITY
10 10 GUARANTEES. This means that repositories created with this extension may
11 11 only be usable with the exact version of this extension/Mercurial that was
12 12 used. The extension attempts to enforce this in order to prevent repository
13 13 corruption.
14 14
15 15 remotefilelog works by fetching file contents lazily and storing them
16 16 in a cache on the client rather than in revlogs. This allows enormous
17 17 histories to be transferred only partially, making them easier to
18 18 operate on.
19 19
20 20 Configs:
21 21
22 22 ``packs.maxchainlen`` specifies the maximum delta chain length in pack files
23 23
24 24 ``packs.maxpacksize`` specifies the maximum pack file size
25 25
26 26 ``packs.maxpackfilecount`` specifies the maximum number of packs in the
27 27 shared cache (trees only for now)
28 28
29 29 ``remotefilelog.backgroundprefetch`` runs prefetch in background when True
30 30
31 31 ``remotefilelog.bgprefetchrevs`` specifies revisions to fetch on commit and
32 32 update, and on other commands that use them. Different from pullprefetch.
33 33
34 34 ``remotefilelog.gcrepack`` does garbage collection during repack when True
35 35
36 36 ``remotefilelog.nodettl`` specifies maximum TTL of a node in seconds before
37 37 it is garbage collected
38 38
39 39 ``remotefilelog.repackonhggc`` runs repack on hg gc when True
40 40
41 41 ``remotefilelog.prefetchdays`` specifies the maximum age of a commit in
42 42 days after which it is no longer prefetched.
43 43
44 44 ``remotefilelog.prefetchdelay`` specifies delay between background
45 45 prefetches in seconds after operations that change the working copy parent
46 46
47 47 ``remotefilelog.data.gencountlimit`` constraints the minimum number of data
48 48 pack files required to be considered part of a generation. In particular,
49 49 minimum number of packs files > gencountlimit.
50 50
51 51 ``remotefilelog.data.generations`` list for specifying the lower bound of
52 52 each generation of the data pack files. For example, list ['100MB','1MB']
53 53 or ['1MB', '100MB'] will lead to three generations: [0, 1MB), [
54 54 1MB, 100MB) and [100MB, infinity).
55 55
56 56 ``remotefilelog.data.maxrepackpacks`` the maximum number of pack files to
57 57 include in an incremental data repack.
58 58
59 59 ``remotefilelog.data.repackmaxpacksize`` the maximum size of a pack file for
60 60 it to be considered for an incremental data repack.
61 61
62 62 ``remotefilelog.data.repacksizelimit`` the maximum total size of pack files
63 63 to include in an incremental data repack.
64 64
65 65 ``remotefilelog.history.gencountlimit`` constraints the minimum number of
66 66 history pack files required to be considered part of a generation. In
67 67 particular, minimum number of packs files > gencountlimit.
68 68
69 69 ``remotefilelog.history.generations`` list for specifying the lower bound of
70 70 each generation of the history pack files. For example, list [
71 71 '100MB', '1MB'] or ['1MB', '100MB'] will lead to three generations: [
72 72 0, 1MB), [1MB, 100MB) and [100MB, infinity).
73 73
74 74 ``remotefilelog.history.maxrepackpacks`` the maximum number of pack files to
75 75 include in an incremental history repack.
76 76
77 77 ``remotefilelog.history.repackmaxpacksize`` the maximum size of a pack file
78 78 for it to be considered for an incremental history repack.
79 79
80 80 ``remotefilelog.history.repacksizelimit`` the maximum total size of pack
81 81 files to include in an incremental history repack.
82 82
83 83 ``remotefilelog.backgroundrepack`` automatically consolidate packs in the
84 84 background
85 85
86 86 ``remotefilelog.cachepath`` path to cache
87 87
88 88 ``remotefilelog.cachegroup`` if set, make cache directory sgid to this
89 89 group
90 90
91 91 ``remotefilelog.cacheprocess`` binary to invoke for fetching file data
92 92
93 93 ``remotefilelog.debug`` turn on remotefilelog-specific debug output
94 94
95 95 ``remotefilelog.excludepattern`` pattern of files to exclude from pulls
96 96
97 97 ``remotefilelog.includepattern`` pattern of files to include in pulls
98 98
99 99 ``remotefilelog.fetchwarning``: message to print when too many
100 100 single-file fetches occur
101 101
102 102 ``remotefilelog.getfilesstep`` number of files to request in a single RPC
103 103
104 104 ``remotefilelog.getfilestype`` if set to 'threaded' use threads to fetch
105 105 files, otherwise use optimistic fetching
106 106
107 107 ``remotefilelog.pullprefetch`` revset for selecting files that should be
108 108 eagerly downloaded rather than lazily
109 109
110 110 ``remotefilelog.reponame`` name of the repo. If set, used to partition
111 111 data from other repos in a shared store.
112 112
113 113 ``remotefilelog.server`` if true, enable server-side functionality
114 114
115 115 ``remotefilelog.servercachepath`` path for caching blobs on the server
116 116
117 117 ``remotefilelog.serverexpiration`` number of days to keep cached server
118 118 blobs
119 119
120 120 ``remotefilelog.validatecache`` if set, check cache entries for corruption
121 121 before returning blobs
122 122
123 123 ``remotefilelog.validatecachelog`` if set, check cache entries for
124 124 corruption before returning metadata
125 125
126 126 """
127 127 from __future__ import absolute_import
128 128
129 129 import os
130 130 import time
131 131 import traceback
132 132
133 133 from mercurial.node import hex
134 134 from mercurial.i18n import _
135 135 from mercurial.pycompat import open
136 136 from mercurial import (
137 137 changegroup,
138 138 changelog,
139 139 cmdutil,
140 140 commands,
141 141 configitems,
142 142 context,
143 143 copies,
144 144 debugcommands as hgdebugcommands,
145 145 dispatch,
146 146 error,
147 147 exchange,
148 148 extensions,
149 149 hg,
150 150 localrepo,
151 151 match as matchmod,
152 152 merge,
153 153 mergestate as mergestatemod,
154 154 node as nodemod,
155 155 patch,
156 156 pycompat,
157 157 registrar,
158 158 repair,
159 159 repoview,
160 160 revset,
161 161 scmutil,
162 162 smartset,
163 163 streamclone,
164 164 util,
165 165 )
166 166 from . import (
167 167 constants,
168 168 debugcommands,
169 169 fileserverclient,
170 170 remotefilectx,
171 171 remotefilelog,
172 172 remotefilelogserver,
173 173 repack as repackmod,
174 174 shallowbundle,
175 175 shallowrepo,
176 176 shallowstore,
177 177 shallowutil,
178 178 shallowverifier,
179 179 )
180 180
181 181 # ensures debug commands are registered
182 182 hgdebugcommands.command
183 183
184 184 cmdtable = {}
185 185 command = registrar.command(cmdtable)
186 186
187 187 configtable = {}
188 188 configitem = registrar.configitem(configtable)
189 189
190 190 configitem(b'remotefilelog', b'debug', default=False)
191 191
192 192 configitem(b'remotefilelog', b'reponame', default=b'')
193 193 configitem(b'remotefilelog', b'cachepath', default=None)
194 194 configitem(b'remotefilelog', b'cachegroup', default=None)
195 195 configitem(b'remotefilelog', b'cacheprocess', default=None)
196 196 configitem(b'remotefilelog', b'cacheprocess.includepath', default=None)
197 197 configitem(b"remotefilelog", b"cachelimit", default=b"1000 GB")
198 198
199 199 configitem(
200 200 b'remotefilelog',
201 201 b'fallbackpath',
202 202 default=configitems.dynamicdefault,
203 203 alias=[(b'remotefilelog', b'fallbackrepo')],
204 204 )
205 205
206 206 configitem(b'remotefilelog', b'validatecachelog', default=None)
207 207 configitem(b'remotefilelog', b'validatecache', default=b'on')
208 208 configitem(b'remotefilelog', b'server', default=None)
209 209 configitem(b'remotefilelog', b'servercachepath', default=None)
210 210 configitem(b"remotefilelog", b"serverexpiration", default=30)
211 211 configitem(b'remotefilelog', b'backgroundrepack', default=False)
212 212 configitem(b'remotefilelog', b'bgprefetchrevs', default=None)
213 213 configitem(b'remotefilelog', b'pullprefetch', default=None)
214 214 configitem(b'remotefilelog', b'backgroundprefetch', default=False)
215 215 configitem(b'remotefilelog', b'prefetchdelay', default=120)
216 216 configitem(b'remotefilelog', b'prefetchdays', default=14)
217 217
218 218 configitem(b'remotefilelog', b'getfilesstep', default=10000)
219 219 configitem(b'remotefilelog', b'getfilestype', default=b'optimistic')
220 220 configitem(b'remotefilelog', b'batchsize', configitems.dynamicdefault)
221 221 configitem(b'remotefilelog', b'fetchwarning', default=b'')
222 222
223 223 configitem(b'remotefilelog', b'includepattern', default=None)
224 224 configitem(b'remotefilelog', b'excludepattern', default=None)
225 225
226 226 configitem(b'remotefilelog', b'gcrepack', default=False)
227 227 configitem(b'remotefilelog', b'repackonhggc', default=False)
228 228 configitem(b'repack', b'chainorphansbysize', default=True, experimental=True)
229 229
230 230 configitem(b'packs', b'maxpacksize', default=0)
231 231 configitem(b'packs', b'maxchainlen', default=1000)
232 232
233 233 configitem(b'devel', b'remotefilelog.bg-wait', default=False)
234 234
235 235 # default TTL limit is 30 days
236 236 _defaultlimit = 60 * 60 * 24 * 30
237 237 configitem(b'remotefilelog', b'nodettl', default=_defaultlimit)
238 238
239 239 configitem(b'remotefilelog', b'data.gencountlimit', default=2),
240 240 configitem(
241 241 b'remotefilelog', b'data.generations', default=[b'1GB', b'100MB', b'1MB']
242 242 )
243 243 configitem(b'remotefilelog', b'data.maxrepackpacks', default=50)
244 244 configitem(b'remotefilelog', b'data.repackmaxpacksize', default=b'4GB')
245 245 configitem(b'remotefilelog', b'data.repacksizelimit', default=b'100MB')
246 246
247 247 configitem(b'remotefilelog', b'history.gencountlimit', default=2),
248 248 configitem(b'remotefilelog', b'history.generations', default=[b'100MB'])
249 249 configitem(b'remotefilelog', b'history.maxrepackpacks', default=50)
250 250 configitem(b'remotefilelog', b'history.repackmaxpacksize', default=b'400MB')
251 251 configitem(b'remotefilelog', b'history.repacksizelimit', default=b'100MB')
252 252
253 253 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
254 254 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
255 255 # be specifying the version(s) of Mercurial they are tested with, or
256 256 # leave the attribute unspecified.
257 257 testedwith = b'ships-with-hg-core'
258 258
259 259 repoclass = localrepo.localrepository
260 260 repoclass._basesupported.add(constants.SHALLOWREPO_REQUIREMENT)
261 261
262 262 isenabled = shallowutil.isenabled
263 263
264 264
265 265 def uisetup(ui):
266 266 """Wraps user facing Mercurial commands to swap them out with shallow
267 267 versions.
268 268 """
269 269 hg.wirepeersetupfuncs.append(fileserverclient.peersetup)
270 270
271 271 entry = extensions.wrapcommand(commands.table, b'clone', cloneshallow)
272 272 entry[1].append(
273 273 (
274 274 b'',
275 275 b'shallow',
276 276 None,
277 277 _(b"create a shallow clone which uses remote file history"),
278 278 )
279 279 )
280 280
281 281 extensions.wrapcommand(
282 282 commands.table, b'debugindex', debugcommands.debugindex
283 283 )
284 284 extensions.wrapcommand(
285 285 commands.table, b'debugindexdot', debugcommands.debugindexdot
286 286 )
287 287 extensions.wrapcommand(commands.table, b'log', log)
288 288 extensions.wrapcommand(commands.table, b'pull', pull)
289 289
290 290 # Prevent 'hg manifest --all'
291 291 def _manifest(orig, ui, repo, *args, **opts):
292 292 if isenabled(repo) and opts.get('all'):
293 293 raise error.Abort(_(b"--all is not supported in a shallow repo"))
294 294
295 295 return orig(ui, repo, *args, **opts)
296 296
297 297 extensions.wrapcommand(commands.table, b"manifest", _manifest)
298 298
299 299 # Wrap remotefilelog with lfs code
300 300 def _lfsloaded(loaded=False):
301 301 lfsmod = None
302 302 try:
303 303 lfsmod = extensions.find(b'lfs')
304 304 except KeyError:
305 305 pass
306 306 if lfsmod:
307 307 lfsmod.wrapfilelog(remotefilelog.remotefilelog)
308 308 fileserverclient._lfsmod = lfsmod
309 309
310 310 extensions.afterloaded(b'lfs', _lfsloaded)
311 311
312 312 # debugdata needs remotefilelog.len to work
313 313 extensions.wrapcommand(commands.table, b'debugdata', debugdatashallow)
314 314
315 315 changegroup.cgpacker = shallowbundle.shallowcg1packer
316 316
317 317 extensions.wrapfunction(
318 318 changegroup, b'_addchangegroupfiles', shallowbundle.addchangegroupfiles
319 319 )
320 320 extensions.wrapfunction(
321 321 changegroup, b'makechangegroup', shallowbundle.makechangegroup
322 322 )
323 323 extensions.wrapfunction(localrepo, b'makestore', storewrapper)
324 324 extensions.wrapfunction(exchange, b'pull', exchangepull)
325 325 extensions.wrapfunction(merge, b'applyupdates', applyupdates)
326 326 extensions.wrapfunction(merge, b'_checkunknownfiles', checkunknownfiles)
327 327 extensions.wrapfunction(context.workingctx, b'_checklookup', checklookup)
328 328 extensions.wrapfunction(scmutil, b'_findrenames', findrenames)
329 329 extensions.wrapfunction(
330 330 copies, b'_computeforwardmissing', computeforwardmissing
331 331 )
332 332 extensions.wrapfunction(dispatch, b'runcommand', runcommand)
333 333 extensions.wrapfunction(repair, b'_collectbrokencsets', _collectbrokencsets)
334 334 extensions.wrapfunction(context.changectx, b'filectx', filectx)
335 335 extensions.wrapfunction(context.workingctx, b'filectx', workingfilectx)
336 336 extensions.wrapfunction(patch, b'trydiff', trydiff)
337 337 extensions.wrapfunction(hg, b'verify', _verify)
338 338 scmutil.fileprefetchhooks.add(b'remotefilelog', _fileprefetchhook)
339 339
340 340 # disappointing hacks below
341 341 extensions.wrapfunction(scmutil, b'getrenamedfn', getrenamedfn)
342 342 extensions.wrapfunction(revset, b'filelog', filelogrevset)
343 343 revset.symbols[b'filelog'] = revset.filelog
344 344 extensions.wrapfunction(cmdutil, b'walkfilerevs', walkfilerevs)
345 345
346 346
347 347 def cloneshallow(orig, ui, repo, *args, **opts):
348 348 if opts.get('shallow'):
349 349 repos = []
350 350
351 351 def pull_shallow(orig, self, *args, **kwargs):
352 352 if not isenabled(self):
353 353 repos.append(self.unfiltered())
354 354 # set up the client hooks so the post-clone update works
355 355 setupclient(self.ui, self.unfiltered())
356 356
357 357 # setupclient fixed the class on the repo itself
358 358 # but we also need to fix it on the repoview
359 359 if isinstance(self, repoview.repoview):
360 360 self.__class__.__bases__ = (
361 361 self.__class__.__bases__[0],
362 362 self.unfiltered().__class__,
363 363 )
364 364 self.requirements.add(constants.SHALLOWREPO_REQUIREMENT)
365 365 scmutil.writereporequirements(self)
366 366
367 367 # Since setupclient hadn't been called, exchange.pull was not
368 368 # wrapped. So we need to manually invoke our version of it.
369 369 return exchangepull(orig, self, *args, **kwargs)
370 370 else:
371 371 return orig(self, *args, **kwargs)
372 372
373 373 extensions.wrapfunction(exchange, b'pull', pull_shallow)
374 374
375 375 # Wrap the stream logic to add requirements and to pass include/exclude
376 376 # patterns around.
377 377 def setup_streamout(repo, remote):
378 378 # Replace remote.stream_out with a version that sends file
379 379 # patterns.
380 380 def stream_out_shallow(orig):
381 381 caps = remote.capabilities()
382 382 if constants.NETWORK_CAP_LEGACY_SSH_GETFILES in caps:
383 383 opts = {}
384 384 if repo.includepattern:
385 385 opts['includepattern'] = b'\0'.join(repo.includepattern)
386 386 if repo.excludepattern:
387 387 opts['excludepattern'] = b'\0'.join(repo.excludepattern)
388 388 return remote._callstream(b'stream_out_shallow', **opts)
389 389 else:
390 390 return orig()
391 391
392 392 extensions.wrapfunction(remote, b'stream_out', stream_out_shallow)
393 393
394 394 def stream_wrap(orig, op):
395 395 setup_streamout(op.repo, op.remote)
396 396 return orig(op)
397 397
398 398 extensions.wrapfunction(
399 399 streamclone, b'maybeperformlegacystreamclone', stream_wrap
400 400 )
401 401
402 402 def canperformstreamclone(orig, pullop, bundle2=False):
403 403 # remotefilelog is currently incompatible with the
404 404 # bundle2 flavor of streamclones, so force us to use
405 405 # v1 instead.
406 406 if b'v2' in pullop.remotebundle2caps.get(b'stream', []):
407 407 pullop.remotebundle2caps[b'stream'] = [
408 408 c for c in pullop.remotebundle2caps[b'stream'] if c != b'v2'
409 409 ]
410 410 if bundle2:
411 411 return False, None
412 412 supported, requirements = orig(pullop, bundle2=bundle2)
413 413 if requirements is not None:
414 414 requirements.add(constants.SHALLOWREPO_REQUIREMENT)
415 415 return supported, requirements
416 416
417 417 extensions.wrapfunction(
418 418 streamclone, b'canperformstreamclone', canperformstreamclone
419 419 )
420 420
421 421 try:
422 422 orig(ui, repo, *args, **opts)
423 423 finally:
424 424 if opts.get('shallow'):
425 425 for r in repos:
426 426 if util.safehasattr(r, b'fileservice'):
427 427 r.fileservice.close()
428 428
429 429
430 430 def debugdatashallow(orig, *args, **kwds):
431 431 oldlen = remotefilelog.remotefilelog.__len__
432 432 try:
433 433 remotefilelog.remotefilelog.__len__ = lambda x: 1
434 434 return orig(*args, **kwds)
435 435 finally:
436 436 remotefilelog.remotefilelog.__len__ = oldlen
437 437
438 438
439 439 def reposetup(ui, repo):
440 440 if not repo.local():
441 441 return
442 442
443 443 # put here intentionally bc doesnt work in uisetup
444 444 ui.setconfig(b'hooks', b'update.prefetch', wcpprefetch)
445 445 ui.setconfig(b'hooks', b'commit.prefetch', wcpprefetch)
446 446
447 447 isserverenabled = ui.configbool(b'remotefilelog', b'server')
448 448 isshallowclient = isenabled(repo)
449 449
450 450 if isserverenabled and isshallowclient:
451 451 raise RuntimeError(b"Cannot be both a server and shallow client.")
452 452
453 453 if isshallowclient:
454 454 setupclient(ui, repo)
455 455
456 456 if isserverenabled:
457 457 remotefilelogserver.setupserver(ui, repo)
458 458
459 459
460 460 def setupclient(ui, repo):
461 461 if not isinstance(repo, localrepo.localrepository):
462 462 return
463 463
464 464 # Even clients get the server setup since they need to have the
465 465 # wireprotocol endpoints registered.
466 466 remotefilelogserver.onetimesetup(ui)
467 467 onetimeclientsetup(ui)
468 468
469 469 shallowrepo.wraprepo(repo)
470 470 repo.store = shallowstore.wrapstore(repo.store)
471 471
472 472
473 473 def storewrapper(orig, requirements, path, vfstype):
474 474 s = orig(requirements, path, vfstype)
475 475 if constants.SHALLOWREPO_REQUIREMENT in requirements:
476 476 s = shallowstore.wrapstore(s)
477 477
478 478 return s
479 479
480 480
481 481 # prefetch files before update
482 482 def applyupdates(
483 483 orig, repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts
484 484 ):
485 485 if isenabled(repo):
486 486 manifest = mctx.manifest()
487 487 files = []
488 488 for f, args, msg in mresult.getactions([mergestatemod.ACTION_GET]):
489 489 files.append((f, hex(manifest[f])))
490 490 # batch fetch the needed files from the server
491 491 repo.fileservice.prefetch(files)
492 492 return orig(repo, mresult, wctx, mctx, overwrite, wantfiledata, **opts)
493 493
494 494
495 495 # Prefetch merge checkunknownfiles
496 496 def checkunknownfiles(orig, repo, wctx, mctx, force, mresult, *args, **kwargs):
497 497 if isenabled(repo):
498 498 files = []
499 499 sparsematch = repo.maybesparsematch(mctx.rev())
500 for f, (m, actionargs, msg) in pycompat.iteritems(mresult.actions):
500 for f, (m, actionargs, msg) in mresult.filemap():
501 501 if sparsematch and not sparsematch(f):
502 502 continue
503 503 if m in (
504 504 mergestatemod.ACTION_CREATED,
505 505 mergestatemod.ACTION_DELETED_CHANGED,
506 506 mergestatemod.ACTION_CREATED_MERGE,
507 507 ):
508 508 files.append((f, hex(mctx.filenode(f))))
509 509 elif m == mergestatemod.ACTION_LOCAL_DIR_RENAME_GET:
510 510 f2 = actionargs[0]
511 511 files.append((f2, hex(mctx.filenode(f2))))
512 512 # batch fetch the needed files from the server
513 513 repo.fileservice.prefetch(files)
514 514 return orig(repo, wctx, mctx, force, mresult, *args, **kwargs)
515 515
516 516
517 517 # Prefetch files before status attempts to look at their size and contents
518 518 def checklookup(orig, self, files):
519 519 repo = self._repo
520 520 if isenabled(repo):
521 521 prefetchfiles = []
522 522 for parent in self._parents:
523 523 for f in files:
524 524 if f in parent:
525 525 prefetchfiles.append((f, hex(parent.filenode(f))))
526 526 # batch fetch the needed files from the server
527 527 repo.fileservice.prefetch(prefetchfiles)
528 528 return orig(self, files)
529 529
530 530
531 531 # Prefetch the logic that compares added and removed files for renames
532 532 def findrenames(orig, repo, matcher, added, removed, *args, **kwargs):
533 533 if isenabled(repo):
534 534 files = []
535 535 pmf = repo[b'.'].manifest()
536 536 for f in removed:
537 537 if f in pmf:
538 538 files.append((f, hex(pmf[f])))
539 539 # batch fetch the needed files from the server
540 540 repo.fileservice.prefetch(files)
541 541 return orig(repo, matcher, added, removed, *args, **kwargs)
542 542
543 543
544 544 # prefetch files before pathcopies check
545 545 def computeforwardmissing(orig, a, b, match=None):
546 546 missing = orig(a, b, match=match)
547 547 repo = a._repo
548 548 if isenabled(repo):
549 549 mb = b.manifest()
550 550
551 551 files = []
552 552 sparsematch = repo.maybesparsematch(b.rev())
553 553 if sparsematch:
554 554 sparsemissing = set()
555 555 for f in missing:
556 556 if sparsematch(f):
557 557 files.append((f, hex(mb[f])))
558 558 sparsemissing.add(f)
559 559 missing = sparsemissing
560 560
561 561 # batch fetch the needed files from the server
562 562 repo.fileservice.prefetch(files)
563 563 return missing
564 564
565 565
566 566 # close cache miss server connection after the command has finished
567 567 def runcommand(orig, lui, repo, *args, **kwargs):
568 568 fileservice = None
569 569 # repo can be None when running in chg:
570 570 # - at startup, reposetup was called because serve is not norepo
571 571 # - a norepo command like "help" is called
572 572 if repo and isenabled(repo):
573 573 fileservice = repo.fileservice
574 574 try:
575 575 return orig(lui, repo, *args, **kwargs)
576 576 finally:
577 577 if fileservice:
578 578 fileservice.close()
579 579
580 580
581 581 # prevent strip from stripping remotefilelogs
582 582 def _collectbrokencsets(orig, repo, files, striprev):
583 583 if isenabled(repo):
584 584 files = list([f for f in files if not repo.shallowmatch(f)])
585 585 return orig(repo, files, striprev)
586 586
587 587
588 588 # changectx wrappers
589 589 def filectx(orig, self, path, fileid=None, filelog=None):
590 590 if fileid is None:
591 591 fileid = self.filenode(path)
592 592 if isenabled(self._repo) and self._repo.shallowmatch(path):
593 593 return remotefilectx.remotefilectx(
594 594 self._repo, path, fileid=fileid, changectx=self, filelog=filelog
595 595 )
596 596 return orig(self, path, fileid=fileid, filelog=filelog)
597 597
598 598
599 599 def workingfilectx(orig, self, path, filelog=None):
600 600 if isenabled(self._repo) and self._repo.shallowmatch(path):
601 601 return remotefilectx.remoteworkingfilectx(
602 602 self._repo, path, workingctx=self, filelog=filelog
603 603 )
604 604 return orig(self, path, filelog=filelog)
605 605
606 606
607 607 # prefetch required revisions before a diff
608 608 def trydiff(
609 609 orig,
610 610 repo,
611 611 revs,
612 612 ctx1,
613 613 ctx2,
614 614 modified,
615 615 added,
616 616 removed,
617 617 copy,
618 618 getfilectx,
619 619 *args,
620 620 **kwargs
621 621 ):
622 622 if isenabled(repo):
623 623 prefetch = []
624 624 mf1 = ctx1.manifest()
625 625 for fname in modified + added + removed:
626 626 if fname in mf1:
627 627 fnode = getfilectx(fname, ctx1).filenode()
628 628 # fnode can be None if it's a edited working ctx file
629 629 if fnode:
630 630 prefetch.append((fname, hex(fnode)))
631 631 if fname not in removed:
632 632 fnode = getfilectx(fname, ctx2).filenode()
633 633 if fnode:
634 634 prefetch.append((fname, hex(fnode)))
635 635
636 636 repo.fileservice.prefetch(prefetch)
637 637
638 638 return orig(
639 639 repo,
640 640 revs,
641 641 ctx1,
642 642 ctx2,
643 643 modified,
644 644 added,
645 645 removed,
646 646 copy,
647 647 getfilectx,
648 648 *args,
649 649 **kwargs
650 650 )
651 651
652 652
653 653 # Prevent verify from processing files
654 654 # a stub for mercurial.hg.verify()
655 655 def _verify(orig, repo, level=None):
656 656 lock = repo.lock()
657 657 try:
658 658 return shallowverifier.shallowverifier(repo).verify()
659 659 finally:
660 660 lock.release()
661 661
662 662
663 663 clientonetime = False
664 664
665 665
666 666 def onetimeclientsetup(ui):
667 667 global clientonetime
668 668 if clientonetime:
669 669 return
670 670 clientonetime = True
671 671
672 672 # Don't commit filelogs until we know the commit hash, since the hash
673 673 # is present in the filelog blob.
674 674 # This violates Mercurial's filelog->manifest->changelog write order,
675 675 # but is generally fine for client repos.
676 676 pendingfilecommits = []
677 677
678 678 def addrawrevision(
679 679 orig,
680 680 self,
681 681 rawtext,
682 682 transaction,
683 683 link,
684 684 p1,
685 685 p2,
686 686 node,
687 687 flags,
688 688 cachedelta=None,
689 689 _metatuple=None,
690 690 ):
691 691 if isinstance(link, int):
692 692 pendingfilecommits.append(
693 693 (
694 694 self,
695 695 rawtext,
696 696 transaction,
697 697 link,
698 698 p1,
699 699 p2,
700 700 node,
701 701 flags,
702 702 cachedelta,
703 703 _metatuple,
704 704 )
705 705 )
706 706 return node
707 707 else:
708 708 return orig(
709 709 self,
710 710 rawtext,
711 711 transaction,
712 712 link,
713 713 p1,
714 714 p2,
715 715 node,
716 716 flags,
717 717 cachedelta,
718 718 _metatuple=_metatuple,
719 719 )
720 720
721 721 extensions.wrapfunction(
722 722 remotefilelog.remotefilelog, b'addrawrevision', addrawrevision
723 723 )
724 724
725 725 def changelogadd(orig, self, *args, **kwargs):
726 726 oldlen = len(self)
727 727 node = orig(self, *args, **kwargs)
728 728 newlen = len(self)
729 729 if oldlen != newlen:
730 730 for oldargs in pendingfilecommits:
731 731 log, rt, tr, link, p1, p2, n, fl, c, m = oldargs
732 732 linknode = self.node(link)
733 733 if linknode == node:
734 734 log.addrawrevision(rt, tr, linknode, p1, p2, n, fl, c, m)
735 735 else:
736 736 raise error.ProgrammingError(
737 737 b'pending multiple integer revisions are not supported'
738 738 )
739 739 else:
740 740 # "link" is actually wrong here (it is set to len(changelog))
741 741 # if changelog remains unchanged, skip writing file revisions
742 742 # but still do a sanity check about pending multiple revisions
743 743 if len({x[3] for x in pendingfilecommits}) > 1:
744 744 raise error.ProgrammingError(
745 745 b'pending multiple integer revisions are not supported'
746 746 )
747 747 del pendingfilecommits[:]
748 748 return node
749 749
750 750 extensions.wrapfunction(changelog.changelog, b'add', changelogadd)
751 751
752 752
753 753 def getrenamedfn(orig, repo, endrev=None):
754 754 if not isenabled(repo) or copies.usechangesetcentricalgo(repo):
755 755 return orig(repo, endrev)
756 756
757 757 rcache = {}
758 758
759 759 def getrenamed(fn, rev):
760 760 '''looks up all renames for a file (up to endrev) the first
761 761 time the file is given. It indexes on the changerev and only
762 762 parses the manifest if linkrev != changerev.
763 763 Returns rename info for fn at changerev rev.'''
764 764 if rev in rcache.setdefault(fn, {}):
765 765 return rcache[fn][rev]
766 766
767 767 try:
768 768 fctx = repo[rev].filectx(fn)
769 769 for ancestor in fctx.ancestors():
770 770 if ancestor.path() == fn:
771 771 renamed = ancestor.renamed()
772 772 rcache[fn][ancestor.rev()] = renamed and renamed[0]
773 773
774 774 renamed = fctx.renamed()
775 775 return renamed and renamed[0]
776 776 except error.LookupError:
777 777 return None
778 778
779 779 return getrenamed
780 780
781 781
782 782 def walkfilerevs(orig, repo, match, follow, revs, fncache):
783 783 if not isenabled(repo):
784 784 return orig(repo, match, follow, revs, fncache)
785 785
786 786 # remotefilelog's can't be walked in rev order, so throw.
787 787 # The caller will see the exception and walk the commit tree instead.
788 788 if not follow:
789 789 raise cmdutil.FileWalkError(b"Cannot walk via filelog")
790 790
791 791 wanted = set()
792 792 minrev, maxrev = min(revs), max(revs)
793 793
794 794 pctx = repo[b'.']
795 795 for filename in match.files():
796 796 if filename not in pctx:
797 797 raise error.Abort(
798 798 _(b'cannot follow file not in parent revision: "%s"') % filename
799 799 )
800 800 fctx = pctx[filename]
801 801
802 802 linkrev = fctx.linkrev()
803 803 if linkrev >= minrev and linkrev <= maxrev:
804 804 fncache.setdefault(linkrev, []).append(filename)
805 805 wanted.add(linkrev)
806 806
807 807 for ancestor in fctx.ancestors():
808 808 linkrev = ancestor.linkrev()
809 809 if linkrev >= minrev and linkrev <= maxrev:
810 810 fncache.setdefault(linkrev, []).append(ancestor.path())
811 811 wanted.add(linkrev)
812 812
813 813 return wanted
814 814
815 815
816 816 def filelogrevset(orig, repo, subset, x):
817 817 """``filelog(pattern)``
818 818 Changesets connected to the specified filelog.
819 819
820 820 For performance reasons, ``filelog()`` does not show every changeset
821 821 that affects the requested file(s). See :hg:`help log` for details. For
822 822 a slower, more accurate result, use ``file()``.
823 823 """
824 824
825 825 if not isenabled(repo):
826 826 return orig(repo, subset, x)
827 827
828 828 # i18n: "filelog" is a keyword
829 829 pat = revset.getstring(x, _(b"filelog requires a pattern"))
830 830 m = matchmod.match(
831 831 repo.root, repo.getcwd(), [pat], default=b'relpath', ctx=repo[None]
832 832 )
833 833 s = set()
834 834
835 835 if not matchmod.patkind(pat):
836 836 # slow
837 837 for r in subset:
838 838 ctx = repo[r]
839 839 cfiles = ctx.files()
840 840 for f in m.files():
841 841 if f in cfiles:
842 842 s.add(ctx.rev())
843 843 break
844 844 else:
845 845 # partial
846 846 files = (f for f in repo[None] if m(f))
847 847 for f in files:
848 848 fctx = repo[None].filectx(f)
849 849 s.add(fctx.linkrev())
850 850 for actx in fctx.ancestors():
851 851 s.add(actx.linkrev())
852 852
853 853 return smartset.baseset([r for r in subset if r in s])
854 854
855 855
856 856 @command(b'gc', [], _(b'hg gc [REPO...]'), norepo=True)
857 857 def gc(ui, *args, **opts):
858 858 '''garbage collect the client and server filelog caches
859 859 '''
860 860 cachepaths = set()
861 861
862 862 # get the system client cache
863 863 systemcache = shallowutil.getcachepath(ui, allowempty=True)
864 864 if systemcache:
865 865 cachepaths.add(systemcache)
866 866
867 867 # get repo client and server cache
868 868 repopaths = []
869 869 pwd = ui.environ.get(b'PWD')
870 870 if pwd:
871 871 repopaths.append(pwd)
872 872
873 873 repopaths.extend(args)
874 874 repos = []
875 875 for repopath in repopaths:
876 876 try:
877 877 repo = hg.peer(ui, {}, repopath)
878 878 repos.append(repo)
879 879
880 880 repocache = shallowutil.getcachepath(repo.ui, allowempty=True)
881 881 if repocache:
882 882 cachepaths.add(repocache)
883 883 except error.RepoError:
884 884 pass
885 885
886 886 # gc client cache
887 887 for cachepath in cachepaths:
888 888 gcclient(ui, cachepath)
889 889
890 890 # gc server cache
891 891 for repo in repos:
892 892 remotefilelogserver.gcserver(ui, repo._repo)
893 893
894 894
895 895 def gcclient(ui, cachepath):
896 896 # get list of repos that use this cache
897 897 repospath = os.path.join(cachepath, b'repos')
898 898 if not os.path.exists(repospath):
899 899 ui.warn(_(b"no known cache at %s\n") % cachepath)
900 900 return
901 901
902 902 reposfile = open(repospath, b'rb')
903 903 repos = {r[:-1] for r in reposfile.readlines()}
904 904 reposfile.close()
905 905
906 906 # build list of useful files
907 907 validrepos = []
908 908 keepkeys = set()
909 909
910 910 sharedcache = None
911 911 filesrepacked = False
912 912
913 913 count = 0
914 914 progress = ui.makeprogress(
915 915 _(b"analyzing repositories"), unit=b"repos", total=len(repos)
916 916 )
917 917 for path in repos:
918 918 progress.update(count)
919 919 count += 1
920 920 try:
921 921 path = ui.expandpath(os.path.normpath(path))
922 922 except TypeError as e:
923 923 ui.warn(_(b"warning: malformed path: %r:%s\n") % (path, e))
924 924 traceback.print_exc()
925 925 continue
926 926 try:
927 927 peer = hg.peer(ui, {}, path)
928 928 repo = peer._repo
929 929 except error.RepoError:
930 930 continue
931 931
932 932 validrepos.append(path)
933 933
934 934 # Protect against any repo or config changes that have happened since
935 935 # this repo was added to the repos file. We'd rather this loop succeed
936 936 # and too much be deleted, than the loop fail and nothing gets deleted.
937 937 if not isenabled(repo):
938 938 continue
939 939
940 940 if not util.safehasattr(repo, b'name'):
941 941 ui.warn(
942 942 _(b"repo %s is a misconfigured remotefilelog repo\n") % path
943 943 )
944 944 continue
945 945
946 946 # If garbage collection on repack and repack on hg gc are enabled
947 947 # then loose files are repacked and garbage collected.
948 948 # Otherwise regular garbage collection is performed.
949 949 repackonhggc = repo.ui.configbool(b'remotefilelog', b'repackonhggc')
950 950 gcrepack = repo.ui.configbool(b'remotefilelog', b'gcrepack')
951 951 if repackonhggc and gcrepack:
952 952 try:
953 953 repackmod.incrementalrepack(repo)
954 954 filesrepacked = True
955 955 continue
956 956 except (IOError, repackmod.RepackAlreadyRunning):
957 957 # If repack cannot be performed due to not enough disk space
958 958 # continue doing garbage collection of loose files w/o repack
959 959 pass
960 960
961 961 reponame = repo.name
962 962 if not sharedcache:
963 963 sharedcache = repo.sharedstore
964 964
965 965 # Compute a keepset which is not garbage collected
966 966 def keyfn(fname, fnode):
967 967 return fileserverclient.getcachekey(reponame, fname, hex(fnode))
968 968
969 969 keepkeys = repackmod.keepset(repo, keyfn=keyfn, lastkeepkeys=keepkeys)
970 970
971 971 progress.complete()
972 972
973 973 # write list of valid repos back
974 974 oldumask = os.umask(0o002)
975 975 try:
976 976 reposfile = open(repospath, b'wb')
977 977 reposfile.writelines([(b"%s\n" % r) for r in validrepos])
978 978 reposfile.close()
979 979 finally:
980 980 os.umask(oldumask)
981 981
982 982 # prune cache
983 983 if sharedcache is not None:
984 984 sharedcache.gc(keepkeys)
985 985 elif not filesrepacked:
986 986 ui.warn(_(b"warning: no valid repos in repofile\n"))
987 987
988 988
989 989 def log(orig, ui, repo, *pats, **opts):
990 990 if not isenabled(repo):
991 991 return orig(ui, repo, *pats, **opts)
992 992
993 993 follow = opts.get('follow')
994 994 revs = opts.get('rev')
995 995 if pats:
996 996 # Force slowpath for non-follow patterns and follows that start from
997 997 # non-working-copy-parent revs.
998 998 if not follow or revs:
999 999 # This forces the slowpath
1000 1000 opts['removed'] = True
1001 1001
1002 1002 # If this is a non-follow log without any revs specified, recommend that
1003 1003 # the user add -f to speed it up.
1004 1004 if not follow and not revs:
1005 1005 match = scmutil.match(repo[b'.'], pats, pycompat.byteskwargs(opts))
1006 1006 isfile = not match.anypats()
1007 1007 if isfile:
1008 1008 for file in match.files():
1009 1009 if not os.path.isfile(repo.wjoin(file)):
1010 1010 isfile = False
1011 1011 break
1012 1012
1013 1013 if isfile:
1014 1014 ui.warn(
1015 1015 _(
1016 1016 b"warning: file log can be slow on large repos - "
1017 1017 + b"use -f to speed it up\n"
1018 1018 )
1019 1019 )
1020 1020
1021 1021 return orig(ui, repo, *pats, **opts)
1022 1022
1023 1023
1024 1024 def revdatelimit(ui, revset):
1025 1025 """Update revset so that only changesets no older than 'prefetchdays' days
1026 1026 are included. The default value is set to 14 days. If 'prefetchdays' is set
1027 1027 to zero or negative value then date restriction is not applied.
1028 1028 """
1029 1029 days = ui.configint(b'remotefilelog', b'prefetchdays')
1030 1030 if days > 0:
1031 1031 revset = b'(%s) & date(-%s)' % (revset, days)
1032 1032 return revset
1033 1033
1034 1034
1035 1035 def readytofetch(repo):
1036 1036 """Check that enough time has passed since the last background prefetch.
1037 1037 This only relates to prefetches after operations that change the working
1038 1038 copy parent. Default delay between background prefetches is 2 minutes.
1039 1039 """
1040 1040 timeout = repo.ui.configint(b'remotefilelog', b'prefetchdelay')
1041 1041 fname = repo.vfs.join(b'lastprefetch')
1042 1042
1043 1043 ready = False
1044 1044 with open(fname, b'a'):
1045 1045 # the with construct above is used to avoid race conditions
1046 1046 modtime = os.path.getmtime(fname)
1047 1047 if (time.time() - modtime) > timeout:
1048 1048 os.utime(fname, None)
1049 1049 ready = True
1050 1050
1051 1051 return ready
1052 1052
1053 1053
1054 1054 def wcpprefetch(ui, repo, **kwargs):
1055 1055 """Prefetches in background revisions specified by bgprefetchrevs revset.
1056 1056 Does background repack if backgroundrepack flag is set in config.
1057 1057 """
1058 1058 shallow = isenabled(repo)
1059 1059 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs')
1060 1060 isready = readytofetch(repo)
1061 1061
1062 1062 if not (shallow and bgprefetchrevs and isready):
1063 1063 return
1064 1064
1065 1065 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1066 1066 # update a revset with a date limit
1067 1067 bgprefetchrevs = revdatelimit(ui, bgprefetchrevs)
1068 1068
1069 1069 def anon(unused_success):
1070 1070 if util.safehasattr(repo, b'ranprefetch') and repo.ranprefetch:
1071 1071 return
1072 1072 repo.ranprefetch = True
1073 1073 repo.backgroundprefetch(bgprefetchrevs, repack=bgrepack)
1074 1074
1075 1075 repo._afterlock(anon)
1076 1076
1077 1077
1078 1078 def pull(orig, ui, repo, *pats, **opts):
1079 1079 result = orig(ui, repo, *pats, **opts)
1080 1080
1081 1081 if isenabled(repo):
1082 1082 # prefetch if it's configured
1083 1083 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch')
1084 1084 bgrepack = repo.ui.configbool(b'remotefilelog', b'backgroundrepack')
1085 1085 bgprefetch = repo.ui.configbool(b'remotefilelog', b'backgroundprefetch')
1086 1086
1087 1087 if prefetchrevset:
1088 1088 ui.status(_(b"prefetching file contents\n"))
1089 1089 revs = scmutil.revrange(repo, [prefetchrevset])
1090 1090 base = repo[b'.'].rev()
1091 1091 if bgprefetch:
1092 1092 repo.backgroundprefetch(prefetchrevset, repack=bgrepack)
1093 1093 else:
1094 1094 repo.prefetch(revs, base=base)
1095 1095 if bgrepack:
1096 1096 repackmod.backgroundrepack(repo, incremental=True)
1097 1097 elif bgrepack:
1098 1098 repackmod.backgroundrepack(repo, incremental=True)
1099 1099
1100 1100 return result
1101 1101
1102 1102
1103 1103 def exchangepull(orig, repo, remote, *args, **kwargs):
1104 1104 # Hook into the callstream/getbundle to insert bundle capabilities
1105 1105 # during a pull.
1106 1106 def localgetbundle(
1107 1107 orig, source, heads=None, common=None, bundlecaps=None, **kwargs
1108 1108 ):
1109 1109 if not bundlecaps:
1110 1110 bundlecaps = set()
1111 1111 bundlecaps.add(constants.BUNDLE2_CAPABLITY)
1112 1112 return orig(
1113 1113 source, heads=heads, common=common, bundlecaps=bundlecaps, **kwargs
1114 1114 )
1115 1115
1116 1116 if util.safehasattr(remote, b'_callstream'):
1117 1117 remote._localrepo = repo
1118 1118 elif util.safehasattr(remote, b'getbundle'):
1119 1119 extensions.wrapfunction(remote, b'getbundle', localgetbundle)
1120 1120
1121 1121 return orig(repo, remote, *args, **kwargs)
1122 1122
1123 1123
1124 1124 def _fileprefetchhook(repo, revmatches):
1125 1125 if isenabled(repo):
1126 1126 allfiles = []
1127 1127 for rev, match in revmatches:
1128 1128 if rev == nodemod.wdirrev or rev is None:
1129 1129 continue
1130 1130 ctx = repo[rev]
1131 1131 mf = ctx.manifest()
1132 1132 sparsematch = repo.maybesparsematch(ctx.rev())
1133 1133 for path in ctx.walk(match):
1134 1134 if (not sparsematch or sparsematch(path)) and path in mf:
1135 1135 allfiles.append((path, hex(mf[path])))
1136 1136 repo.fileservice.prefetch(allfiles)
1137 1137
1138 1138
1139 1139 @command(
1140 1140 b'debugremotefilelog',
1141 1141 [(b'd', b'decompress', None, _(b'decompress the filelog first')),],
1142 1142 _(b'hg debugremotefilelog <path>'),
1143 1143 norepo=True,
1144 1144 )
1145 1145 def debugremotefilelog(ui, path, **opts):
1146 1146 return debugcommands.debugremotefilelog(ui, path, **opts)
1147 1147
1148 1148
1149 1149 @command(
1150 1150 b'verifyremotefilelog',
1151 1151 [(b'd', b'decompress', None, _(b'decompress the filelogs first')),],
1152 1152 _(b'hg verifyremotefilelogs <directory>'),
1153 1153 norepo=True,
1154 1154 )
1155 1155 def verifyremotefilelog(ui, path, **opts):
1156 1156 return debugcommands.verifyremotefilelog(ui, path, **opts)
1157 1157
1158 1158
1159 1159 @command(
1160 1160 b'debugdatapack',
1161 1161 [
1162 1162 (b'', b'long', None, _(b'print the long hashes')),
1163 1163 (b'', b'node', b'', _(b'dump the contents of node'), b'NODE'),
1164 1164 ],
1165 1165 _(b'hg debugdatapack <paths>'),
1166 1166 norepo=True,
1167 1167 )
1168 1168 def debugdatapack(ui, *paths, **opts):
1169 1169 return debugcommands.debugdatapack(ui, *paths, **opts)
1170 1170
1171 1171
1172 1172 @command(b'debughistorypack', [], _(b'hg debughistorypack <path>'), norepo=True)
1173 1173 def debughistorypack(ui, path, **opts):
1174 1174 return debugcommands.debughistorypack(ui, path)
1175 1175
1176 1176
1177 1177 @command(b'debugkeepset', [], _(b'hg debugkeepset'))
1178 1178 def debugkeepset(ui, repo, **opts):
1179 1179 # The command is used to measure keepset computation time
1180 1180 def keyfn(fname, fnode):
1181 1181 return fileserverclient.getcachekey(repo.name, fname, hex(fnode))
1182 1182
1183 1183 repackmod.keepset(repo, keyfn)
1184 1184 return
1185 1185
1186 1186
1187 1187 @command(b'debugwaitonrepack', [], _(b'hg debugwaitonrepack'))
1188 1188 def debugwaitonrepack(ui, repo, **opts):
1189 1189 return debugcommands.debugwaitonrepack(repo)
1190 1190
1191 1191
1192 1192 @command(b'debugwaitonprefetch', [], _(b'hg debugwaitonprefetch'))
1193 1193 def debugwaitonprefetch(ui, repo, **opts):
1194 1194 return debugcommands.debugwaitonprefetch(repo)
1195 1195
1196 1196
1197 1197 def resolveprefetchopts(ui, opts):
1198 1198 if not opts.get(b'rev'):
1199 1199 revset = [b'.', b'draft()']
1200 1200
1201 1201 prefetchrevset = ui.config(b'remotefilelog', b'pullprefetch', None)
1202 1202 if prefetchrevset:
1203 1203 revset.append(b'(%s)' % prefetchrevset)
1204 1204 bgprefetchrevs = ui.config(b'remotefilelog', b'bgprefetchrevs', None)
1205 1205 if bgprefetchrevs:
1206 1206 revset.append(b'(%s)' % bgprefetchrevs)
1207 1207 revset = b'+'.join(revset)
1208 1208
1209 1209 # update a revset with a date limit
1210 1210 revset = revdatelimit(ui, revset)
1211 1211
1212 1212 opts[b'rev'] = [revset]
1213 1213
1214 1214 if not opts.get(b'base'):
1215 1215 opts[b'base'] = None
1216 1216
1217 1217 return opts
1218 1218
1219 1219
1220 1220 @command(
1221 1221 b'prefetch',
1222 1222 [
1223 1223 (b'r', b'rev', [], _(b'prefetch the specified revisions'), _(b'REV')),
1224 1224 (b'', b'repack', False, _(b'run repack after prefetch')),
1225 1225 (b'b', b'base', b'', _(b"rev that is assumed to already be local")),
1226 1226 ]
1227 1227 + commands.walkopts,
1228 1228 _(b'hg prefetch [OPTIONS] [FILE...]'),
1229 1229 helpcategory=command.CATEGORY_MAINTENANCE,
1230 1230 )
1231 1231 def prefetch(ui, repo, *pats, **opts):
1232 1232 """prefetch file revisions from the server
1233 1233
1234 1234 Prefetchs file revisions for the specified revs and stores them in the
1235 1235 local remotefilelog cache. If no rev is specified, the default rev is
1236 1236 used which is the union of dot, draft, pullprefetch and bgprefetchrev.
1237 1237 File names or patterns can be used to limit which files are downloaded.
1238 1238
1239 1239 Return 0 on success.
1240 1240 """
1241 1241 opts = pycompat.byteskwargs(opts)
1242 1242 if not isenabled(repo):
1243 1243 raise error.Abort(_(b"repo is not shallow"))
1244 1244
1245 1245 opts = resolveprefetchopts(ui, opts)
1246 1246 revs = scmutil.revrange(repo, opts.get(b'rev'))
1247 1247 repo.prefetch(revs, opts.get(b'base'), pats, opts)
1248 1248
1249 1249 # Run repack in background
1250 1250 if opts.get(b'repack'):
1251 1251 repackmod.backgroundrepack(repo, incremental=True)
1252 1252
1253 1253
1254 1254 @command(
1255 1255 b'repack',
1256 1256 [
1257 1257 (b'', b'background', None, _(b'run in a background process'), None),
1258 1258 (b'', b'incremental', None, _(b'do an incremental repack'), None),
1259 1259 (
1260 1260 b'',
1261 1261 b'packsonly',
1262 1262 None,
1263 1263 _(b'only repack packs (skip loose objects)'),
1264 1264 None,
1265 1265 ),
1266 1266 ],
1267 1267 _(b'hg repack [OPTIONS]'),
1268 1268 )
1269 1269 def repack_(ui, repo, *pats, **opts):
1270 1270 if opts.get('background'):
1271 1271 repackmod.backgroundrepack(
1272 1272 repo,
1273 1273 incremental=opts.get('incremental'),
1274 1274 packsonly=opts.get('packsonly', False),
1275 1275 )
1276 1276 return
1277 1277
1278 1278 options = {b'packsonly': opts.get('packsonly')}
1279 1279
1280 1280 try:
1281 1281 if opts.get('incremental'):
1282 1282 repackmod.incrementalrepack(repo, options=options)
1283 1283 else:
1284 1284 repackmod.fullrepack(repo, options=options)
1285 1285 except repackmod.RepackAlreadyRunning as ex:
1286 1286 # Don't propogate the exception if the repack is already in
1287 1287 # progress, since we want the command to exit 0.
1288 1288 repo.ui.warn(b'%s\n' % ex)
@@ -1,2311 +1,2315 b''
1 1 # merge.py - directory-level update/merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import errno
12 12 import stat
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 modifiednodeid,
19 19 nullid,
20 20 nullrev,
21 21 )
22 22 from .thirdparty import attr
23 23 from . import (
24 24 copies,
25 25 encoding,
26 26 error,
27 27 filemerge,
28 28 match as matchmod,
29 29 mergestate as mergestatemod,
30 30 obsutil,
31 31 pathutil,
32 32 pycompat,
33 33 scmutil,
34 34 subrepoutil,
35 35 util,
36 36 worker,
37 37 )
38 38
39 39 _pack = struct.pack
40 40 _unpack = struct.unpack
41 41
42 42
43 43 def _getcheckunknownconfig(repo, section, name):
44 44 config = repo.ui.config(section, name)
45 45 valid = [b'abort', b'ignore', b'warn']
46 46 if config not in valid:
47 47 validstr = b', '.join([b"'" + v + b"'" for v in valid])
48 48 raise error.ConfigError(
49 49 _(b"%s.%s not valid ('%s' is none of %s)")
50 50 % (section, name, config, validstr)
51 51 )
52 52 return config
53 53
54 54
55 55 def _checkunknownfile(repo, wctx, mctx, f, f2=None):
56 56 if wctx.isinmemory():
57 57 # Nothing to do in IMM because nothing in the "working copy" can be an
58 58 # unknown file.
59 59 #
60 60 # Note that we should bail out here, not in ``_checkunknownfiles()``,
61 61 # because that function does other useful work.
62 62 return False
63 63
64 64 if f2 is None:
65 65 f2 = f
66 66 return (
67 67 repo.wvfs.audit.check(f)
68 68 and repo.wvfs.isfileorlink(f)
69 69 and repo.dirstate.normalize(f) not in repo.dirstate
70 70 and mctx[f2].cmp(wctx[f])
71 71 )
72 72
73 73
74 74 class _unknowndirschecker(object):
75 75 """
76 76 Look for any unknown files or directories that may have a path conflict
77 77 with a file. If any path prefix of the file exists as a file or link,
78 78 then it conflicts. If the file itself is a directory that contains any
79 79 file that is not tracked, then it conflicts.
80 80
81 81 Returns the shortest path at which a conflict occurs, or None if there is
82 82 no conflict.
83 83 """
84 84
85 85 def __init__(self):
86 86 # A set of paths known to be good. This prevents repeated checking of
87 87 # dirs. It will be updated with any new dirs that are checked and found
88 88 # to be safe.
89 89 self._unknowndircache = set()
90 90
91 91 # A set of paths that are known to be absent. This prevents repeated
92 92 # checking of subdirectories that are known not to exist. It will be
93 93 # updated with any new dirs that are checked and found to be absent.
94 94 self._missingdircache = set()
95 95
96 96 def __call__(self, repo, wctx, f):
97 97 if wctx.isinmemory():
98 98 # Nothing to do in IMM for the same reason as ``_checkunknownfile``.
99 99 return False
100 100
101 101 # Check for path prefixes that exist as unknown files.
102 102 for p in reversed(list(pathutil.finddirs(f))):
103 103 if p in self._missingdircache:
104 104 return
105 105 if p in self._unknowndircache:
106 106 continue
107 107 if repo.wvfs.audit.check(p):
108 108 if (
109 109 repo.wvfs.isfileorlink(p)
110 110 and repo.dirstate.normalize(p) not in repo.dirstate
111 111 ):
112 112 return p
113 113 if not repo.wvfs.lexists(p):
114 114 self._missingdircache.add(p)
115 115 return
116 116 self._unknowndircache.add(p)
117 117
118 118 # Check if the file conflicts with a directory containing unknown files.
119 119 if repo.wvfs.audit.check(f) and repo.wvfs.isdir(f):
120 120 # Does the directory contain any files that are not in the dirstate?
121 121 for p, dirs, files in repo.wvfs.walk(f):
122 122 for fn in files:
123 123 relf = util.pconvert(repo.wvfs.reljoin(p, fn))
124 124 relf = repo.dirstate.normalize(relf, isknown=True)
125 125 if relf not in repo.dirstate:
126 126 return f
127 127 return None
128 128
129 129
130 130 def _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce):
131 131 """
132 132 Considers any actions that care about the presence of conflicting unknown
133 133 files. For some actions, the result is to abort; for others, it is to
134 134 choose a different action.
135 135 """
136 136 fileconflicts = set()
137 137 pathconflicts = set()
138 138 warnconflicts = set()
139 139 abortconflicts = set()
140 140 unknownconfig = _getcheckunknownconfig(repo, b'merge', b'checkunknown')
141 141 ignoredconfig = _getcheckunknownconfig(repo, b'merge', b'checkignored')
142 142 pathconfig = repo.ui.configbool(
143 143 b'experimental', b'merge.checkpathconflicts'
144 144 )
145 145 if not force:
146 146
147 147 def collectconflicts(conflicts, config):
148 148 if config == b'abort':
149 149 abortconflicts.update(conflicts)
150 150 elif config == b'warn':
151 151 warnconflicts.update(conflicts)
152 152
153 153 checkunknowndirs = _unknowndirschecker()
154 154 for f in mresult.files(
155 155 (
156 156 mergestatemod.ACTION_CREATED,
157 157 mergestatemod.ACTION_DELETED_CHANGED,
158 158 )
159 159 ):
160 160 if _checkunknownfile(repo, wctx, mctx, f):
161 161 fileconflicts.add(f)
162 162 elif pathconfig and f not in wctx:
163 163 path = checkunknowndirs(repo, wctx, f)
164 164 if path is not None:
165 165 pathconflicts.add(path)
166 166 for f, args, msg in mresult.getactions(
167 167 [mergestatemod.ACTION_LOCAL_DIR_RENAME_GET]
168 168 ):
169 169 if _checkunknownfile(repo, wctx, mctx, f, args[0]):
170 170 fileconflicts.add(f)
171 171
172 172 allconflicts = fileconflicts | pathconflicts
173 173 ignoredconflicts = {c for c in allconflicts if repo.dirstate._ignore(c)}
174 174 unknownconflicts = allconflicts - ignoredconflicts
175 175 collectconflicts(ignoredconflicts, ignoredconfig)
176 176 collectconflicts(unknownconflicts, unknownconfig)
177 177 else:
178 178 for f, args, msg in list(
179 179 mresult.getactions([mergestatemod.ACTION_CREATED_MERGE])
180 180 ):
181 181 fl2, anc = args
182 182 different = _checkunknownfile(repo, wctx, mctx, f)
183 183 if repo.dirstate._ignore(f):
184 184 config = ignoredconfig
185 185 else:
186 186 config = unknownconfig
187 187
188 188 # The behavior when force is True is described by this table:
189 189 # config different mergeforce | action backup
190 190 # * n * | get n
191 191 # * y y | merge -
192 192 # abort y n | merge - (1)
193 193 # warn y n | warn + get y
194 194 # ignore y n | get y
195 195 #
196 196 # (1) this is probably the wrong behavior here -- we should
197 197 # probably abort, but some actions like rebases currently
198 198 # don't like an abort happening in the middle of
199 199 # merge.update.
200 200 if not different:
201 201 mresult.addfile(
202 202 f,
203 203 mergestatemod.ACTION_GET,
204 204 (fl2, False),
205 205 b'remote created',
206 206 )
207 207 elif mergeforce or config == b'abort':
208 208 mresult.addfile(
209 209 f,
210 210 mergestatemod.ACTION_MERGE,
211 211 (f, f, None, False, anc),
212 212 b'remote differs from untracked local',
213 213 )
214 214 elif config == b'abort':
215 215 abortconflicts.add(f)
216 216 else:
217 217 if config == b'warn':
218 218 warnconflicts.add(f)
219 219 mresult.addfile(
220 220 f, mergestatemod.ACTION_GET, (fl2, True), b'remote created',
221 221 )
222 222
223 223 for f in sorted(abortconflicts):
224 224 warn = repo.ui.warn
225 225 if f in pathconflicts:
226 226 if repo.wvfs.isfileorlink(f):
227 227 warn(_(b"%s: untracked file conflicts with directory\n") % f)
228 228 else:
229 229 warn(_(b"%s: untracked directory conflicts with file\n") % f)
230 230 else:
231 231 warn(_(b"%s: untracked file differs\n") % f)
232 232 if abortconflicts:
233 233 raise error.Abort(
234 234 _(
235 235 b"untracked files in working directory "
236 236 b"differ from files in requested revision"
237 237 )
238 238 )
239 239
240 240 for f in sorted(warnconflicts):
241 241 if repo.wvfs.isfileorlink(f):
242 242 repo.ui.warn(_(b"%s: replacing untracked file\n") % f)
243 243 else:
244 244 repo.ui.warn(_(b"%s: replacing untracked files in directory\n") % f)
245 245
246 246 for f, args, msg in list(
247 247 mresult.getactions([mergestatemod.ACTION_CREATED])
248 248 ):
249 249 backup = (
250 250 f in fileconflicts
251 251 or f in pathconflicts
252 252 or any(p in pathconflicts for p in pathutil.finddirs(f))
253 253 )
254 254 (flags,) = args
255 255 mresult.addfile(f, mergestatemod.ACTION_GET, (flags, backup), msg)
256 256
257 257
258 258 def _forgetremoved(wctx, mctx, branchmerge):
259 259 """
260 260 Forget removed files
261 261
262 262 If we're jumping between revisions (as opposed to merging), and if
263 263 neither the working directory nor the target rev has the file,
264 264 then we need to remove it from the dirstate, to prevent the
265 265 dirstate from listing the file when it is no longer in the
266 266 manifest.
267 267
268 268 If we're merging, and the other revision has removed a file
269 269 that is not present in the working directory, we need to mark it
270 270 as removed.
271 271 """
272 272
273 273 actions = {}
274 274 m = mergestatemod.ACTION_FORGET
275 275 if branchmerge:
276 276 m = mergestatemod.ACTION_REMOVE
277 277 for f in wctx.deleted():
278 278 if f not in mctx:
279 279 actions[f] = m, None, b"forget deleted"
280 280
281 281 if not branchmerge:
282 282 for f in wctx.removed():
283 283 if f not in mctx:
284 284 actions[f] = (
285 285 mergestatemod.ACTION_FORGET,
286 286 None,
287 287 b"forget removed",
288 288 )
289 289
290 290 return actions
291 291
292 292
293 293 def _checkcollision(repo, wmf, mresult):
294 294 """
295 295 Check for case-folding collisions.
296 296 """
297 297 # If the repo is narrowed, filter out files outside the narrowspec.
298 298 narrowmatch = repo.narrowmatch()
299 299 if not narrowmatch.always():
300 300 pmmf = set(wmf.walk(narrowmatch))
301 301 if mresult:
302 302 for f in list(mresult.files()):
303 303 if not narrowmatch(f):
304 304 mresult.removefile(f)
305 305 else:
306 306 # build provisional merged manifest up
307 307 pmmf = set(wmf)
308 308
309 309 if mresult:
310 310 # KEEP and EXEC are no-op
311 311 for f in mresult.files(
312 312 (
313 313 mergestatemod.ACTION_ADD,
314 314 mergestatemod.ACTION_ADD_MODIFIED,
315 315 mergestatemod.ACTION_FORGET,
316 316 mergestatemod.ACTION_GET,
317 317 mergestatemod.ACTION_CHANGED_DELETED,
318 318 mergestatemod.ACTION_DELETED_CHANGED,
319 319 )
320 320 ):
321 321 pmmf.add(f)
322 322 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
323 323 pmmf.discard(f)
324 324 for f, args, msg in mresult.getactions(
325 325 [mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL]
326 326 ):
327 327 f2, flags = args
328 328 pmmf.discard(f2)
329 329 pmmf.add(f)
330 330 for f in mresult.files((mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,)):
331 331 pmmf.add(f)
332 332 for f, args, msg in mresult.getactions([mergestatemod.ACTION_MERGE]):
333 333 f1, f2, fa, move, anc = args
334 334 if move:
335 335 pmmf.discard(f1)
336 336 pmmf.add(f)
337 337
338 338 # check case-folding collision in provisional merged manifest
339 339 foldmap = {}
340 340 for f in pmmf:
341 341 fold = util.normcase(f)
342 342 if fold in foldmap:
343 343 raise error.Abort(
344 344 _(b"case-folding collision between %s and %s")
345 345 % (f, foldmap[fold])
346 346 )
347 347 foldmap[fold] = f
348 348
349 349 # check case-folding of directories
350 350 foldprefix = unfoldprefix = lastfull = b''
351 351 for fold, f in sorted(foldmap.items()):
352 352 if fold.startswith(foldprefix) and not f.startswith(unfoldprefix):
353 353 # the folded prefix matches but actual casing is different
354 354 raise error.Abort(
355 355 _(b"case-folding collision between %s and directory of %s")
356 356 % (lastfull, f)
357 357 )
358 358 foldprefix = fold + b'/'
359 359 unfoldprefix = f + b'/'
360 360 lastfull = f
361 361
362 362
363 363 def driverpreprocess(repo, ms, wctx, labels=None):
364 364 """run the preprocess step of the merge driver, if any
365 365
366 366 This is currently not implemented -- it's an extension point."""
367 367 return True
368 368
369 369
370 370 def driverconclude(repo, ms, wctx, labels=None):
371 371 """run the conclude step of the merge driver, if any
372 372
373 373 This is currently not implemented -- it's an extension point."""
374 374 return True
375 375
376 376
377 377 def _filesindirs(repo, manifest, dirs):
378 378 """
379 379 Generator that yields pairs of all the files in the manifest that are found
380 380 inside the directories listed in dirs, and which directory they are found
381 381 in.
382 382 """
383 383 for f in manifest:
384 384 for p in pathutil.finddirs(f):
385 385 if p in dirs:
386 386 yield f, p
387 387 break
388 388
389 389
390 390 def checkpathconflicts(repo, wctx, mctx, mresult):
391 391 """
392 392 Check if any actions introduce path conflicts in the repository, updating
393 393 actions to record or handle the path conflict accordingly.
394 394 """
395 395 mf = wctx.manifest()
396 396
397 397 # The set of local files that conflict with a remote directory.
398 398 localconflicts = set()
399 399
400 400 # The set of directories that conflict with a remote file, and so may cause
401 401 # conflicts if they still contain any files after the merge.
402 402 remoteconflicts = set()
403 403
404 404 # The set of directories that appear as both a file and a directory in the
405 405 # remote manifest. These indicate an invalid remote manifest, which
406 406 # can't be updated to cleanly.
407 407 invalidconflicts = set()
408 408
409 409 # The set of directories that contain files that are being created.
410 410 createdfiledirs = set()
411 411
412 412 # The set of files deleted by all the actions.
413 413 deletedfiles = set()
414 414
415 415 for f in mresult.files(
416 416 (
417 417 mergestatemod.ACTION_CREATED,
418 418 mergestatemod.ACTION_DELETED_CHANGED,
419 419 mergestatemod.ACTION_MERGE,
420 420 mergestatemod.ACTION_CREATED_MERGE,
421 421 )
422 422 ):
423 423 # This action may create a new local file.
424 424 createdfiledirs.update(pathutil.finddirs(f))
425 425 if mf.hasdir(f):
426 426 # The file aliases a local directory. This might be ok if all
427 427 # the files in the local directory are being deleted. This
428 428 # will be checked once we know what all the deleted files are.
429 429 remoteconflicts.add(f)
430 430 # Track the names of all deleted files.
431 431 for f in mresult.files((mergestatemod.ACTION_REMOVE,)):
432 432 deletedfiles.add(f)
433 433 for (f, args, msg) in mresult.getactions((mergestatemod.ACTION_MERGE,)):
434 434 f1, f2, fa, move, anc = args
435 435 if move:
436 436 deletedfiles.add(f1)
437 437 for (f, args, msg) in mresult.getactions(
438 438 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,)
439 439 ):
440 440 f2, flags = args
441 441 deletedfiles.add(f2)
442 442
443 443 # Check all directories that contain created files for path conflicts.
444 444 for p in createdfiledirs:
445 445 if p in mf:
446 446 if p in mctx:
447 447 # A file is in a directory which aliases both a local
448 448 # and a remote file. This is an internal inconsistency
449 449 # within the remote manifest.
450 450 invalidconflicts.add(p)
451 451 else:
452 452 # A file is in a directory which aliases a local file.
453 453 # We will need to rename the local file.
454 454 localconflicts.add(p)
455 455 pd = mresult.getfile(p)
456 456 if pd and pd[0] in (
457 457 mergestatemod.ACTION_CREATED,
458 458 mergestatemod.ACTION_DELETED_CHANGED,
459 459 mergestatemod.ACTION_MERGE,
460 460 mergestatemod.ACTION_CREATED_MERGE,
461 461 ):
462 462 # The file is in a directory which aliases a remote file.
463 463 # This is an internal inconsistency within the remote
464 464 # manifest.
465 465 invalidconflicts.add(p)
466 466
467 467 # Rename all local conflicting files that have not been deleted.
468 468 for p in localconflicts:
469 469 if p not in deletedfiles:
470 470 ctxname = bytes(wctx).rstrip(b'+')
471 471 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
472 472 porig = wctx[p].copysource() or p
473 473 mresult.addfile(
474 474 pnew,
475 475 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
476 476 (p, porig),
477 477 b'local path conflict',
478 478 )
479 479 mresult.addfile(
480 480 p,
481 481 mergestatemod.ACTION_PATH_CONFLICT,
482 482 (pnew, b'l'),
483 483 b'path conflict',
484 484 )
485 485
486 486 if remoteconflicts:
487 487 # Check if all files in the conflicting directories have been removed.
488 488 ctxname = bytes(mctx).rstrip(b'+')
489 489 for f, p in _filesindirs(repo, mf, remoteconflicts):
490 490 if f not in deletedfiles:
491 491 m, args, msg = mresult.getfile(p)
492 492 pnew = util.safename(p, ctxname, wctx, set(mresult.files()))
493 493 if m in (
494 494 mergestatemod.ACTION_DELETED_CHANGED,
495 495 mergestatemod.ACTION_MERGE,
496 496 ):
497 497 # Action was merge, just update target.
498 498 mresult.addfile(pnew, m, args, msg)
499 499 else:
500 500 # Action was create, change to renamed get action.
501 501 fl = args[0]
502 502 mresult.addfile(
503 503 pnew,
504 504 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
505 505 (p, fl),
506 506 b'remote path conflict',
507 507 )
508 508 mresult.addfile(
509 509 p,
510 510 mergestatemod.ACTION_PATH_CONFLICT,
511 511 (pnew, mergestatemod.ACTION_REMOVE),
512 512 b'path conflict',
513 513 )
514 514 remoteconflicts.remove(p)
515 515 break
516 516
517 517 if invalidconflicts:
518 518 for p in invalidconflicts:
519 519 repo.ui.warn(_(b"%s: is both a file and a directory\n") % p)
520 520 raise error.Abort(_(b"destination manifest contains path conflicts"))
521 521
522 522
523 523 def _filternarrowactions(narrowmatch, branchmerge, mresult):
524 524 """
525 525 Filters out actions that can ignored because the repo is narrowed.
526 526
527 527 Raise an exception if the merge cannot be completed because the repo is
528 528 narrowed.
529 529 """
530 530 # TODO: handle with nonconflicttypes
531 531 nooptypes = {mergestatemod.ACTION_KEEP}
532 532 nonconflicttypes = {
533 533 mergestatemod.ACTION_ADD,
534 534 mergestatemod.ACTION_ADD_MODIFIED,
535 535 mergestatemod.ACTION_CREATED,
536 536 mergestatemod.ACTION_CREATED_MERGE,
537 537 mergestatemod.ACTION_FORGET,
538 538 mergestatemod.ACTION_GET,
539 539 mergestatemod.ACTION_REMOVE,
540 540 mergestatemod.ACTION_EXEC,
541 541 }
542 542 # We mutate the items in the dict during iteration, so iterate
543 543 # over a copy.
544 for f, action in list(mresult.actions.items()):
544 for f, action in mresult.filemap():
545 545 if narrowmatch(f):
546 546 pass
547 547 elif not branchmerge:
548 548 mresult.removefile(f) # just updating, ignore changes outside clone
549 549 elif action[0] in nooptypes:
550 550 mresult.removefile(f) # merge does not affect file
551 551 elif action[0] in nonconflicttypes:
552 552 raise error.Abort(
553 553 _(
554 554 b'merge affects file \'%s\' outside narrow, '
555 555 b'which is not yet supported'
556 556 )
557 557 % f,
558 558 hint=_(b'merging in the other direction may work'),
559 559 )
560 560 else:
561 561 raise error.Abort(
562 562 _(b'conflict in file \'%s\' is outside narrow clone') % f
563 563 )
564 564
565 565
566 566 class mergeresult(object):
567 567 ''''An object representing result of merging manifests.
568 568
569 569 It has information about what actions need to be performed on dirstate
570 570 mapping of divergent renames and other such cases. '''
571 571
572 572 def __init__(self):
573 573 """
574 574 filemapping: dict of filename as keys and action related info as values
575 575 diverge: mapping of source name -> list of dest name for
576 576 divergent renames
577 577 renamedelete: mapping of source name -> list of destinations for files
578 578 deleted on one side and renamed on other.
579 579 commitinfo: dict containing data which should be used on commit
580 580 contains a filename -> info mapping
581 581 actionmapping: dict of action names as keys and values are dict of
582 582 filename as key and related data as values
583 583 """
584 584 self._filemapping = {}
585 585 self._diverge = {}
586 586 self._renamedelete = {}
587 587 self._commitinfo = {}
588 588 self._actionmapping = collections.defaultdict(dict)
589 589
590 590 def updatevalues(self, diverge, renamedelete, commitinfo):
591 591 self._diverge = diverge
592 592 self._renamedelete = renamedelete
593 593 self._commitinfo = commitinfo
594 594
595 595 def addfile(self, filename, action, data, message):
596 596 """ adds a new file to the mergeresult object
597 597
598 598 filename: file which we are adding
599 599 action: one of mergestatemod.ACTION_*
600 600 data: a tuple of information like fctx and ctx related to this merge
601 601 message: a message about the merge
602 602 """
603 603 # if the file already existed, we need to delete it's old
604 604 # entry form _actionmapping too
605 605 if filename in self._filemapping:
606 606 a, d, m = self._filemapping[filename]
607 607 del self._actionmapping[a][filename]
608 608
609 609 self._filemapping[filename] = (action, data, message)
610 610 self._actionmapping[action][filename] = (data, message)
611 611
612 612 def getfile(self, filename, default_return=None):
613 613 """ returns (action, args, msg) about this file
614 614
615 615 returns default_return if the file is not present """
616 616 if filename in self._filemapping:
617 617 return self._filemapping[filename]
618 618 return default_return
619 619
620 620 def files(self, actions=None):
621 621 """ returns files on which provided action needs to perfromed
622 622
623 623 If actions is None, all files are returned
624 624 """
625 625 # TODO: think whether we should return renamedelete and
626 626 # diverge filenames also
627 627 if actions is None:
628 628 for f in self._filemapping:
629 629 yield f
630 630
631 631 else:
632 632 for a in actions:
633 633 for f in self._actionmapping[a]:
634 634 yield f
635 635
636 636 def removefile(self, filename):
637 637 """ removes a file from the mergeresult object as the file might
638 638 not merging anymore """
639 639 action, data, message = self._filemapping[filename]
640 640 del self._filemapping[filename]
641 641 del self._actionmapping[action][filename]
642 642
643 643 def getactions(self, actions, sort=False):
644 644 """ get list of files which are marked with these actions
645 645 if sort is true, files for each action is sorted and then added
646 646
647 647 Returns a list of tuple of form (filename, data, message)
648 648 """
649 649 for a in actions:
650 650 if sort:
651 651 for f in sorted(self._actionmapping[a]):
652 652 args, msg = self._actionmapping[a][f]
653 653 yield f, args, msg
654 654 else:
655 655 for f, (args, msg) in pycompat.iteritems(
656 656 self._actionmapping[a]
657 657 ):
658 658 yield f, args, msg
659 659
660 660 def len(self, actions=None):
661 661 """ returns number of files which needs actions
662 662
663 663 if actions is passed, total of number of files in that action
664 664 only is returned """
665 665
666 666 if actions is None:
667 667 return len(self._filemapping)
668 668
669 669 return sum(len(self._actionmapping[a]) for a in actions)
670 670
671 @property
672 def actions(self):
673 return self._filemapping
671 def filemap(self, sort=False):
672 if sorted:
673 for key, val in sorted(pycompat.iteritems(self._filemapping)):
674 yield key, val
675 else:
676 for key, val in pycompat.iteritems(self._filemapping):
677 yield key, val
674 678
675 679 @property
676 680 def diverge(self):
677 681 return self._diverge
678 682
679 683 @property
680 684 def renamedelete(self):
681 685 return self._renamedelete
682 686
683 687 @property
684 688 def commitinfo(self):
685 689 return self._commitinfo
686 690
687 691 @property
688 692 def actionsdict(self):
689 693 """ returns a dictionary of actions to be perfomed with action as key
690 694 and a list of files and related arguments as values """
691 695 res = emptyactions()
692 696 for a, d in pycompat.iteritems(self._actionmapping):
693 697 for f, (args, msg) in pycompat.iteritems(d):
694 698 res[a].append((f, args, msg))
695 699 return res
696 700
697 701 def setactions(self, actions):
698 702 self._filemapping = actions
699 703 self._actionmapping = collections.defaultdict(dict)
700 704 for f, (act, data, msg) in pycompat.iteritems(self._filemapping):
701 705 self._actionmapping[act][f] = data, msg
702 706
703 707 def updateactions(self, updates):
704 708 for f, (a, data, msg) in pycompat.iteritems(updates):
705 709 self.addfile(f, a, data, msg)
706 710
707 711 def hasconflicts(self):
708 712 """ tells whether this merge resulted in some actions which can
709 713 result in conflicts or not """
710 714 for a in self._actionmapping.keys():
711 715 if (
712 716 a
713 717 not in (
714 718 mergestatemod.ACTION_GET,
715 719 mergestatemod.ACTION_KEEP,
716 720 mergestatemod.ACTION_EXEC,
717 721 mergestatemod.ACTION_REMOVE,
718 722 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
719 723 )
720 724 and self._actionmapping[a]
721 725 ):
722 726 return True
723 727
724 728 return False
725 729
726 730
727 731 def manifestmerge(
728 732 repo,
729 733 wctx,
730 734 p2,
731 735 pa,
732 736 branchmerge,
733 737 force,
734 738 matcher,
735 739 acceptremote,
736 740 followcopies,
737 741 forcefulldiff=False,
738 742 ):
739 743 """
740 744 Merge wctx and p2 with ancestor pa and generate merge action list
741 745
742 746 branchmerge and force are as passed in to update
743 747 matcher = matcher to filter file lists
744 748 acceptremote = accept the incoming changes without prompting
745 749
746 750 Returns an object of mergeresult class
747 751 """
748 752 mresult = mergeresult()
749 753 if matcher is not None and matcher.always():
750 754 matcher = None
751 755
752 756 # manifests fetched in order are going to be faster, so prime the caches
753 757 [
754 758 x.manifest()
755 759 for x in sorted(wctx.parents() + [p2, pa], key=scmutil.intrev)
756 760 ]
757 761
758 762 branch_copies1 = copies.branch_copies()
759 763 branch_copies2 = copies.branch_copies()
760 764 diverge = {}
761 765 # information from merge which is needed at commit time
762 766 # for example choosing filelog of which parent to commit
763 767 # TODO: use specific constants in future for this mapping
764 768 commitinfo = {}
765 769 if followcopies:
766 770 branch_copies1, branch_copies2, diverge = copies.mergecopies(
767 771 repo, wctx, p2, pa
768 772 )
769 773
770 774 boolbm = pycompat.bytestr(bool(branchmerge))
771 775 boolf = pycompat.bytestr(bool(force))
772 776 boolm = pycompat.bytestr(bool(matcher))
773 777 repo.ui.note(_(b"resolving manifests\n"))
774 778 repo.ui.debug(
775 779 b" branchmerge: %s, force: %s, partial: %s\n" % (boolbm, boolf, boolm)
776 780 )
777 781 repo.ui.debug(b" ancestor: %s, local: %s, remote: %s\n" % (pa, wctx, p2))
778 782
779 783 m1, m2, ma = wctx.manifest(), p2.manifest(), pa.manifest()
780 784 copied1 = set(branch_copies1.copy.values())
781 785 copied1.update(branch_copies1.movewithdir.values())
782 786 copied2 = set(branch_copies2.copy.values())
783 787 copied2.update(branch_copies2.movewithdir.values())
784 788
785 789 if b'.hgsubstate' in m1 and wctx.rev() is None:
786 790 # Check whether sub state is modified, and overwrite the manifest
787 791 # to flag the change. If wctx is a committed revision, we shouldn't
788 792 # care for the dirty state of the working directory.
789 793 if any(wctx.sub(s).dirty() for s in wctx.substate):
790 794 m1[b'.hgsubstate'] = modifiednodeid
791 795
792 796 # Don't use m2-vs-ma optimization if:
793 797 # - ma is the same as m1 or m2, which we're just going to diff again later
794 798 # - The caller specifically asks for a full diff, which is useful during bid
795 799 # merge.
796 800 if pa not in ([wctx, p2] + wctx.parents()) and not forcefulldiff:
797 801 # Identify which files are relevant to the merge, so we can limit the
798 802 # total m1-vs-m2 diff to just those files. This has significant
799 803 # performance benefits in large repositories.
800 804 relevantfiles = set(ma.diff(m2).keys())
801 805
802 806 # For copied and moved files, we need to add the source file too.
803 807 for copykey, copyvalue in pycompat.iteritems(branch_copies1.copy):
804 808 if copyvalue in relevantfiles:
805 809 relevantfiles.add(copykey)
806 810 for movedirkey in branch_copies1.movewithdir:
807 811 relevantfiles.add(movedirkey)
808 812 filesmatcher = scmutil.matchfiles(repo, relevantfiles)
809 813 matcher = matchmod.intersectmatchers(matcher, filesmatcher)
810 814
811 815 diff = m1.diff(m2, match=matcher)
812 816
813 817 for f, ((n1, fl1), (n2, fl2)) in pycompat.iteritems(diff):
814 818 if n1 and n2: # file exists on both local and remote side
815 819 if f not in ma:
816 820 # TODO: what if they're renamed from different sources?
817 821 fa = branch_copies1.copy.get(
818 822 f, None
819 823 ) or branch_copies2.copy.get(f, None)
820 824 args, msg = None, None
821 825 if fa is not None:
822 826 args = (f, f, fa, False, pa.node())
823 827 msg = b'both renamed from %s' % fa
824 828 else:
825 829 args = (f, f, None, False, pa.node())
826 830 msg = b'both created'
827 831 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
828 832 else:
829 833 a = ma[f]
830 834 fla = ma.flags(f)
831 835 nol = b'l' not in fl1 + fl2 + fla
832 836 if n2 == a and fl2 == fla:
833 837 mresult.addfile(
834 838 f, mergestatemod.ACTION_KEEP, (), b'remote unchanged',
835 839 )
836 840 elif n1 == a and fl1 == fla: # local unchanged - use remote
837 841 if n1 == n2: # optimization: keep local content
838 842 mresult.addfile(
839 843 f,
840 844 mergestatemod.ACTION_EXEC,
841 845 (fl2,),
842 846 b'update permissions',
843 847 )
844 848 else:
845 849 mresult.addfile(
846 850 f,
847 851 mergestatemod.ACTION_GET,
848 852 (fl2, False),
849 853 b'remote is newer',
850 854 )
851 855 if branchmerge:
852 856 commitinfo[f] = b'other'
853 857 elif nol and n2 == a: # remote only changed 'x'
854 858 mresult.addfile(
855 859 f,
856 860 mergestatemod.ACTION_EXEC,
857 861 (fl2,),
858 862 b'update permissions',
859 863 )
860 864 elif nol and n1 == a: # local only changed 'x'
861 865 mresult.addfile(
862 866 f,
863 867 mergestatemod.ACTION_GET,
864 868 (fl1, False),
865 869 b'remote is newer',
866 870 )
867 871 if branchmerge:
868 872 commitinfo[f] = b'other'
869 873 else: # both changed something
870 874 mresult.addfile(
871 875 f,
872 876 mergestatemod.ACTION_MERGE,
873 877 (f, f, f, False, pa.node()),
874 878 b'versions differ',
875 879 )
876 880 elif n1: # file exists only on local side
877 881 if f in copied2:
878 882 pass # we'll deal with it on m2 side
879 883 elif (
880 884 f in branch_copies1.movewithdir
881 885 ): # directory rename, move local
882 886 f2 = branch_copies1.movewithdir[f]
883 887 if f2 in m2:
884 888 mresult.addfile(
885 889 f2,
886 890 mergestatemod.ACTION_MERGE,
887 891 (f, f2, None, True, pa.node()),
888 892 b'remote directory rename, both created',
889 893 )
890 894 else:
891 895 mresult.addfile(
892 896 f2,
893 897 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
894 898 (f, fl1),
895 899 b'remote directory rename - move from %s' % f,
896 900 )
897 901 elif f in branch_copies1.copy:
898 902 f2 = branch_copies1.copy[f]
899 903 mresult.addfile(
900 904 f,
901 905 mergestatemod.ACTION_MERGE,
902 906 (f, f2, f2, False, pa.node()),
903 907 b'local copied/moved from %s' % f2,
904 908 )
905 909 elif f in ma: # clean, a different, no remote
906 910 if n1 != ma[f]:
907 911 if acceptremote:
908 912 mresult.addfile(
909 913 f,
910 914 mergestatemod.ACTION_REMOVE,
911 915 None,
912 916 b'remote delete',
913 917 )
914 918 else:
915 919 mresult.addfile(
916 920 f,
917 921 mergestatemod.ACTION_CHANGED_DELETED,
918 922 (f, None, f, False, pa.node()),
919 923 b'prompt changed/deleted',
920 924 )
921 925 elif n1 == addednodeid:
922 926 # This file was locally added. We should forget it instead of
923 927 # deleting it.
924 928 mresult.addfile(
925 929 f, mergestatemod.ACTION_FORGET, None, b'remote deleted',
926 930 )
927 931 else:
928 932 mresult.addfile(
929 933 f, mergestatemod.ACTION_REMOVE, None, b'other deleted',
930 934 )
931 935 elif n2: # file exists only on remote side
932 936 if f in copied1:
933 937 pass # we'll deal with it on m1 side
934 938 elif f in branch_copies2.movewithdir:
935 939 f2 = branch_copies2.movewithdir[f]
936 940 if f2 in m1:
937 941 mresult.addfile(
938 942 f2,
939 943 mergestatemod.ACTION_MERGE,
940 944 (f2, f, None, False, pa.node()),
941 945 b'local directory rename, both created',
942 946 )
943 947 else:
944 948 mresult.addfile(
945 949 f2,
946 950 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
947 951 (f, fl2),
948 952 b'local directory rename - get from %s' % f,
949 953 )
950 954 elif f in branch_copies2.copy:
951 955 f2 = branch_copies2.copy[f]
952 956 msg, args = None, None
953 957 if f2 in m2:
954 958 args = (f2, f, f2, False, pa.node())
955 959 msg = b'remote copied from %s' % f2
956 960 else:
957 961 args = (f2, f, f2, True, pa.node())
958 962 msg = b'remote moved from %s' % f2
959 963 mresult.addfile(f, mergestatemod.ACTION_MERGE, args, msg)
960 964 elif f not in ma:
961 965 # local unknown, remote created: the logic is described by the
962 966 # following table:
963 967 #
964 968 # force branchmerge different | action
965 969 # n * * | create
966 970 # y n * | create
967 971 # y y n | create
968 972 # y y y | merge
969 973 #
970 974 # Checking whether the files are different is expensive, so we
971 975 # don't do that when we can avoid it.
972 976 if not force:
973 977 mresult.addfile(
974 978 f,
975 979 mergestatemod.ACTION_CREATED,
976 980 (fl2,),
977 981 b'remote created',
978 982 )
979 983 elif not branchmerge:
980 984 mresult.addfile(
981 985 f,
982 986 mergestatemod.ACTION_CREATED,
983 987 (fl2,),
984 988 b'remote created',
985 989 )
986 990 else:
987 991 mresult.addfile(
988 992 f,
989 993 mergestatemod.ACTION_CREATED_MERGE,
990 994 (fl2, pa.node()),
991 995 b'remote created, get or merge',
992 996 )
993 997 elif n2 != ma[f]:
994 998 df = None
995 999 for d in branch_copies1.dirmove:
996 1000 if f.startswith(d):
997 1001 # new file added in a directory that was moved
998 1002 df = branch_copies1.dirmove[d] + f[len(d) :]
999 1003 break
1000 1004 if df is not None and df in m1:
1001 1005 mresult.addfile(
1002 1006 df,
1003 1007 mergestatemod.ACTION_MERGE,
1004 1008 (df, f, f, False, pa.node()),
1005 1009 b'local directory rename - respect move '
1006 1010 b'from %s' % f,
1007 1011 )
1008 1012 elif acceptremote:
1009 1013 mresult.addfile(
1010 1014 f,
1011 1015 mergestatemod.ACTION_CREATED,
1012 1016 (fl2,),
1013 1017 b'remote recreating',
1014 1018 )
1015 1019 else:
1016 1020 mresult.addfile(
1017 1021 f,
1018 1022 mergestatemod.ACTION_DELETED_CHANGED,
1019 1023 (None, f, f, False, pa.node()),
1020 1024 b'prompt deleted/changed',
1021 1025 )
1022 1026
1023 1027 if repo.ui.configbool(b'experimental', b'merge.checkpathconflicts'):
1024 1028 # If we are merging, look for path conflicts.
1025 1029 checkpathconflicts(repo, wctx, p2, mresult)
1026 1030
1027 1031 narrowmatch = repo.narrowmatch()
1028 1032 if not narrowmatch.always():
1029 1033 # Updates "actions" in place
1030 1034 _filternarrowactions(narrowmatch, branchmerge, mresult)
1031 1035
1032 1036 renamedelete = branch_copies1.renamedelete
1033 1037 renamedelete.update(branch_copies2.renamedelete)
1034 1038
1035 1039 mresult.updatevalues(diverge, renamedelete, commitinfo)
1036 1040 return mresult
1037 1041
1038 1042
1039 1043 def _resolvetrivial(repo, wctx, mctx, ancestor, mresult):
1040 1044 """Resolves false conflicts where the nodeid changed but the content
1041 1045 remained the same."""
1042 1046 # We force a copy of actions.items() because we're going to mutate
1043 1047 # actions as we resolve trivial conflicts.
1044 1048 for f in list(mresult.files((mergestatemod.ACTION_CHANGED_DELETED,))):
1045 1049 if f in ancestor and not wctx[f].cmp(ancestor[f]):
1046 1050 # local did change but ended up with same content
1047 1051 mresult.addfile(
1048 1052 f, mergestatemod.ACTION_REMOVE, None, b'prompt same'
1049 1053 )
1050 1054
1051 1055 for f in list(mresult.files((mergestatemod.ACTION_DELETED_CHANGED,))):
1052 1056 if f in ancestor and not mctx[f].cmp(ancestor[f]):
1053 1057 # remote did change but ended up with same content
1054 1058 mresult.removefile(f) # don't get = keep local deleted
1055 1059
1056 1060
1057 1061 def calculateupdates(
1058 1062 repo,
1059 1063 wctx,
1060 1064 mctx,
1061 1065 ancestors,
1062 1066 branchmerge,
1063 1067 force,
1064 1068 acceptremote,
1065 1069 followcopies,
1066 1070 matcher=None,
1067 1071 mergeforce=False,
1068 1072 ):
1069 1073 """
1070 1074 Calculate the actions needed to merge mctx into wctx using ancestors
1071 1075
1072 1076 Uses manifestmerge() to merge manifest and get list of actions required to
1073 1077 perform for merging two manifests. If there are multiple ancestors, uses bid
1074 1078 merge if enabled.
1075 1079
1076 1080 Also filters out actions which are unrequired if repository is sparse.
1077 1081
1078 1082 Returns mergeresult object same as manifestmerge().
1079 1083 """
1080 1084 # Avoid cycle.
1081 1085 from . import sparse
1082 1086
1083 1087 mresult = None
1084 1088 if len(ancestors) == 1: # default
1085 1089 mresult = manifestmerge(
1086 1090 repo,
1087 1091 wctx,
1088 1092 mctx,
1089 1093 ancestors[0],
1090 1094 branchmerge,
1091 1095 force,
1092 1096 matcher,
1093 1097 acceptremote,
1094 1098 followcopies,
1095 1099 )
1096 1100 _checkunknownfiles(repo, wctx, mctx, force, mresult, mergeforce)
1097 1101
1098 1102 else: # only when merge.preferancestor=* - the default
1099 1103 repo.ui.note(
1100 1104 _(b"note: merging %s and %s using bids from ancestors %s\n")
1101 1105 % (
1102 1106 wctx,
1103 1107 mctx,
1104 1108 _(b' and ').join(pycompat.bytestr(anc) for anc in ancestors),
1105 1109 )
1106 1110 )
1107 1111
1108 1112 # mapping filename to bids (action method to list af actions)
1109 1113 # {FILENAME1 : BID1, FILENAME2 : BID2}
1110 1114 # BID is another dictionary which contains
1111 1115 # mapping of following form:
1112 1116 # {ACTION_X : [info, ..], ACTION_Y : [info, ..]}
1113 1117 fbids = {}
1114 1118 diverge, renamedelete = None, None
1115 1119 for ancestor in ancestors:
1116 1120 repo.ui.note(_(b'\ncalculating bids for ancestor %s\n') % ancestor)
1117 1121 mresult1 = manifestmerge(
1118 1122 repo,
1119 1123 wctx,
1120 1124 mctx,
1121 1125 ancestor,
1122 1126 branchmerge,
1123 1127 force,
1124 1128 matcher,
1125 1129 acceptremote,
1126 1130 followcopies,
1127 1131 forcefulldiff=True,
1128 1132 )
1129 1133 _checkunknownfiles(repo, wctx, mctx, force, mresult1, mergeforce)
1130 1134
1131 1135 # Track the shortest set of warning on the theory that bid
1132 1136 # merge will correctly incorporate more information
1133 1137 if diverge is None or len(mresult1.diverge) < len(diverge):
1134 1138 diverge = mresult1.diverge
1135 1139 if renamedelete is None or len(renamedelete) < len(
1136 1140 mresult1.renamedelete
1137 1141 ):
1138 1142 renamedelete = mresult1.renamedelete
1139 1143
1140 for f, a in sorted(pycompat.iteritems(mresult1.actions)):
1144 for f, a in mresult1.filemap(sort=True):
1141 1145 m, args, msg = a
1142 1146 repo.ui.debug(b' %s: %s -> %s\n' % (f, msg, m))
1143 1147 if f in fbids:
1144 1148 d = fbids[f]
1145 1149 if m in d:
1146 1150 d[m].append(a)
1147 1151 else:
1148 1152 d[m] = [a]
1149 1153 else:
1150 1154 fbids[f] = {m: [a]}
1151 1155
1152 1156 # Call for bids
1153 1157 # Pick the best bid for each file
1154 1158 repo.ui.note(_(b'\nauction for merging merge bids\n'))
1155 1159 mresult = mergeresult()
1156 1160 for f, bids in sorted(fbids.items()):
1157 1161 # bids is a mapping from action method to list af actions
1158 1162 # Consensus?
1159 1163 if len(bids) == 1: # all bids are the same kind of method
1160 1164 m, l = list(bids.items())[0]
1161 1165 if all(a == l[0] for a in l[1:]): # len(bids) is > 1
1162 1166 repo.ui.note(_(b" %s: consensus for %s\n") % (f, m))
1163 1167 mresult.addfile(f, *l[0])
1164 1168 continue
1165 1169 # If keep is an option, just do it.
1166 1170 if mergestatemod.ACTION_KEEP in bids:
1167 1171 repo.ui.note(_(b" %s: picking 'keep' action\n") % f)
1168 1172 mresult.addfile(f, *bids[mergestatemod.ACTION_KEEP][0])
1169 1173 continue
1170 1174 # If there are gets and they all agree [how could they not?], do it.
1171 1175 if mergestatemod.ACTION_GET in bids:
1172 1176 ga0 = bids[mergestatemod.ACTION_GET][0]
1173 1177 if all(a == ga0 for a in bids[mergestatemod.ACTION_GET][1:]):
1174 1178 repo.ui.note(_(b" %s: picking 'get' action\n") % f)
1175 1179 mresult.addfile(f, *ga0)
1176 1180 continue
1177 1181 # TODO: Consider other simple actions such as mode changes
1178 1182 # Handle inefficient democrazy.
1179 1183 repo.ui.note(_(b' %s: multiple bids for merge action:\n') % f)
1180 1184 for m, l in sorted(bids.items()):
1181 1185 for _f, args, msg in l:
1182 1186 repo.ui.note(b' %s -> %s\n' % (msg, m))
1183 1187 # Pick random action. TODO: Instead, prompt user when resolving
1184 1188 m, l = list(bids.items())[0]
1185 1189 repo.ui.warn(
1186 1190 _(b' %s: ambiguous merge - picked %s action\n') % (f, m)
1187 1191 )
1188 1192 mresult.addfile(f, *l[0])
1189 1193 continue
1190 1194 repo.ui.note(_(b'end of auction\n\n'))
1191 1195 # TODO: think about commitinfo when bid merge is used
1192 1196 mresult.updatevalues(diverge, renamedelete, {})
1193 1197
1194 1198 if wctx.rev() is None:
1195 1199 fractions = _forgetremoved(wctx, mctx, branchmerge)
1196 1200 mresult.updateactions(fractions)
1197 1201
1198 1202 sparse.filterupdatesactions(repo, wctx, mctx, branchmerge, mresult)
1199 1203 _resolvetrivial(repo, wctx, mctx, ancestors[0], mresult)
1200 1204
1201 1205 return mresult
1202 1206
1203 1207
1204 1208 def _getcwd():
1205 1209 try:
1206 1210 return encoding.getcwd()
1207 1211 except OSError as err:
1208 1212 if err.errno == errno.ENOENT:
1209 1213 return None
1210 1214 raise
1211 1215
1212 1216
1213 1217 def batchremove(repo, wctx, actions):
1214 1218 """apply removes to the working directory
1215 1219
1216 1220 yields tuples for progress updates
1217 1221 """
1218 1222 verbose = repo.ui.verbose
1219 1223 cwd = _getcwd()
1220 1224 i = 0
1221 1225 for f, args, msg in actions:
1222 1226 repo.ui.debug(b" %s: %s -> r\n" % (f, msg))
1223 1227 if verbose:
1224 1228 repo.ui.note(_(b"removing %s\n") % f)
1225 1229 wctx[f].audit()
1226 1230 try:
1227 1231 wctx[f].remove(ignoremissing=True)
1228 1232 except OSError as inst:
1229 1233 repo.ui.warn(
1230 1234 _(b"update failed to remove %s: %s!\n") % (f, inst.strerror)
1231 1235 )
1232 1236 if i == 100:
1233 1237 yield i, f
1234 1238 i = 0
1235 1239 i += 1
1236 1240 if i > 0:
1237 1241 yield i, f
1238 1242
1239 1243 if cwd and not _getcwd():
1240 1244 # cwd was removed in the course of removing files; print a helpful
1241 1245 # warning.
1242 1246 repo.ui.warn(
1243 1247 _(
1244 1248 b"current directory was removed\n"
1245 1249 b"(consider changing to repo root: %s)\n"
1246 1250 )
1247 1251 % repo.root
1248 1252 )
1249 1253
1250 1254
1251 1255 def batchget(repo, mctx, wctx, wantfiledata, actions):
1252 1256 """apply gets to the working directory
1253 1257
1254 1258 mctx is the context to get from
1255 1259
1256 1260 Yields arbitrarily many (False, tuple) for progress updates, followed by
1257 1261 exactly one (True, filedata). When wantfiledata is false, filedata is an
1258 1262 empty dict. When wantfiledata is true, filedata[f] is a triple (mode, size,
1259 1263 mtime) of the file f written for each action.
1260 1264 """
1261 1265 filedata = {}
1262 1266 verbose = repo.ui.verbose
1263 1267 fctx = mctx.filectx
1264 1268 ui = repo.ui
1265 1269 i = 0
1266 1270 with repo.wvfs.backgroundclosing(ui, expectedcount=len(actions)):
1267 1271 for f, (flags, backup), msg in actions:
1268 1272 repo.ui.debug(b" %s: %s -> g\n" % (f, msg))
1269 1273 if verbose:
1270 1274 repo.ui.note(_(b"getting %s\n") % f)
1271 1275
1272 1276 if backup:
1273 1277 # If a file or directory exists with the same name, back that
1274 1278 # up. Otherwise, look to see if there is a file that conflicts
1275 1279 # with a directory this file is in, and if so, back that up.
1276 1280 conflicting = f
1277 1281 if not repo.wvfs.lexists(f):
1278 1282 for p in pathutil.finddirs(f):
1279 1283 if repo.wvfs.isfileorlink(p):
1280 1284 conflicting = p
1281 1285 break
1282 1286 if repo.wvfs.lexists(conflicting):
1283 1287 orig = scmutil.backuppath(ui, repo, conflicting)
1284 1288 util.rename(repo.wjoin(conflicting), orig)
1285 1289 wfctx = wctx[f]
1286 1290 wfctx.clearunknown()
1287 1291 atomictemp = ui.configbool(b"experimental", b"update.atomic-file")
1288 1292 size = wfctx.write(
1289 1293 fctx(f).data(),
1290 1294 flags,
1291 1295 backgroundclose=True,
1292 1296 atomictemp=atomictemp,
1293 1297 )
1294 1298 if wantfiledata:
1295 1299 s = wfctx.lstat()
1296 1300 mode = s.st_mode
1297 1301 mtime = s[stat.ST_MTIME]
1298 1302 filedata[f] = (mode, size, mtime) # for dirstate.normal
1299 1303 if i == 100:
1300 1304 yield False, (i, f)
1301 1305 i = 0
1302 1306 i += 1
1303 1307 if i > 0:
1304 1308 yield False, (i, f)
1305 1309 yield True, filedata
1306 1310
1307 1311
1308 1312 def _prefetchfiles(repo, ctx, mresult):
1309 1313 """Invoke ``scmutil.prefetchfiles()`` for the files relevant to the dict
1310 1314 of merge actions. ``ctx`` is the context being merged in."""
1311 1315
1312 1316 # Skipping 'a', 'am', 'f', 'r', 'dm', 'e', 'k', 'p' and 'pr', because they
1313 1317 # don't touch the context to be merged in. 'cd' is skipped, because
1314 1318 # changed/deleted never resolves to something from the remote side.
1315 1319 files = mresult.files(
1316 1320 [
1317 1321 mergestatemod.ACTION_GET,
1318 1322 mergestatemod.ACTION_DELETED_CHANGED,
1319 1323 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1320 1324 mergestatemod.ACTION_MERGE,
1321 1325 ]
1322 1326 )
1323 1327
1324 1328 prefetch = scmutil.prefetchfiles
1325 1329 matchfiles = scmutil.matchfiles
1326 1330 prefetch(
1327 1331 repo, [(ctx.rev(), matchfiles(repo, files),)],
1328 1332 )
1329 1333
1330 1334
1331 1335 @attr.s(frozen=True)
1332 1336 class updateresult(object):
1333 1337 updatedcount = attr.ib()
1334 1338 mergedcount = attr.ib()
1335 1339 removedcount = attr.ib()
1336 1340 unresolvedcount = attr.ib()
1337 1341
1338 1342 def isempty(self):
1339 1343 return not (
1340 1344 self.updatedcount
1341 1345 or self.mergedcount
1342 1346 or self.removedcount
1343 1347 or self.unresolvedcount
1344 1348 )
1345 1349
1346 1350
1347 1351 def emptyactions():
1348 1352 """create an actions dict, to be populated and passed to applyupdates()"""
1349 1353 return {
1350 1354 m: []
1351 1355 for m in (
1352 1356 mergestatemod.ACTION_ADD,
1353 1357 mergestatemod.ACTION_ADD_MODIFIED,
1354 1358 mergestatemod.ACTION_FORGET,
1355 1359 mergestatemod.ACTION_GET,
1356 1360 mergestatemod.ACTION_CHANGED_DELETED,
1357 1361 mergestatemod.ACTION_DELETED_CHANGED,
1358 1362 mergestatemod.ACTION_REMOVE,
1359 1363 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1360 1364 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1361 1365 mergestatemod.ACTION_MERGE,
1362 1366 mergestatemod.ACTION_EXEC,
1363 1367 mergestatemod.ACTION_KEEP,
1364 1368 mergestatemod.ACTION_PATH_CONFLICT,
1365 1369 mergestatemod.ACTION_PATH_CONFLICT_RESOLVE,
1366 1370 )
1367 1371 }
1368 1372
1369 1373
1370 1374 def applyupdates(
1371 1375 repo,
1372 1376 mresult,
1373 1377 wctx,
1374 1378 mctx,
1375 1379 overwrite,
1376 1380 wantfiledata,
1377 1381 labels=None,
1378 1382 commitinfo=None,
1379 1383 ):
1380 1384 """apply the merge action list to the working directory
1381 1385
1382 1386 mresult is a mergeresult object representing result of the merge
1383 1387 wctx is the working copy context
1384 1388 mctx is the context to be merged into the working copy
1385 1389 commitinfo is a mapping of information which needs to be stored somewhere
1386 1390 (probably mergestate) so that it can be used at commit time.
1387 1391
1388 1392 Return a tuple of (counts, filedata), where counts is a tuple
1389 1393 (updated, merged, removed, unresolved) that describes how many
1390 1394 files were affected by the update, and filedata is as described in
1391 1395 batchget.
1392 1396 """
1393 1397
1394 1398 _prefetchfiles(repo, mctx, mresult)
1395 1399
1396 1400 updated, merged, removed = 0, 0, 0
1397 1401 ms = mergestatemod.mergestate.clean(
1398 1402 repo, wctx.p1().node(), mctx.node(), labels
1399 1403 )
1400 1404
1401 1405 if commitinfo is None:
1402 1406 commitinfo = {}
1403 1407
1404 1408 for f, op in pycompat.iteritems(commitinfo):
1405 1409 # the other side of filenode was choosen while merging, store this in
1406 1410 # mergestate so that it can be reused on commit
1407 1411 if op == b'other':
1408 1412 ms.addmergedother(f)
1409 1413
1410 1414 moves = []
1411 1415
1412 1416 # 'cd' and 'dc' actions are treated like other merge conflicts
1413 1417 mergeactions = list(
1414 1418 mresult.getactions(
1415 1419 [
1416 1420 mergestatemod.ACTION_CHANGED_DELETED,
1417 1421 mergestatemod.ACTION_DELETED_CHANGED,
1418 1422 mergestatemod.ACTION_MERGE,
1419 1423 ],
1420 1424 sort=True,
1421 1425 )
1422 1426 )
1423 1427 for f, args, msg in mergeactions:
1424 1428 f1, f2, fa, move, anc = args
1425 1429 if f == b'.hgsubstate': # merged internally
1426 1430 continue
1427 1431 if f1 is None:
1428 1432 fcl = filemerge.absentfilectx(wctx, fa)
1429 1433 else:
1430 1434 repo.ui.debug(b" preserving %s for resolve of %s\n" % (f1, f))
1431 1435 fcl = wctx[f1]
1432 1436 if f2 is None:
1433 1437 fco = filemerge.absentfilectx(mctx, fa)
1434 1438 else:
1435 1439 fco = mctx[f2]
1436 1440 actx = repo[anc]
1437 1441 if fa in actx:
1438 1442 fca = actx[fa]
1439 1443 else:
1440 1444 # TODO: move to absentfilectx
1441 1445 fca = repo.filectx(f1, fileid=nullrev)
1442 1446 ms.add(fcl, fco, fca, f)
1443 1447 if f1 != f and move:
1444 1448 moves.append(f1)
1445 1449
1446 1450 # remove renamed files after safely stored
1447 1451 for f in moves:
1448 1452 if wctx[f].lexists():
1449 1453 repo.ui.debug(b"removing %s\n" % f)
1450 1454 wctx[f].audit()
1451 1455 wctx[f].remove()
1452 1456
1453 1457 numupdates = mresult.len() - mresult.len((mergestatemod.ACTION_KEEP,))
1454 1458 progress = repo.ui.makeprogress(
1455 1459 _(b'updating'), unit=_(b'files'), total=numupdates
1456 1460 )
1457 1461
1458 1462 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_REMOVE]:
1459 1463 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1460 1464
1461 1465 # record path conflicts
1462 1466 for f, args, msg in mresult.getactions(
1463 1467 [mergestatemod.ACTION_PATH_CONFLICT], sort=True
1464 1468 ):
1465 1469 f1, fo = args
1466 1470 s = repo.ui.status
1467 1471 s(
1468 1472 _(
1469 1473 b"%s: path conflict - a file or link has the same name as a "
1470 1474 b"directory\n"
1471 1475 )
1472 1476 % f
1473 1477 )
1474 1478 if fo == b'l':
1475 1479 s(_(b"the local file has been renamed to %s\n") % f1)
1476 1480 else:
1477 1481 s(_(b"the remote file has been renamed to %s\n") % f1)
1478 1482 s(_(b"resolve manually then use 'hg resolve --mark %s'\n") % f)
1479 1483 ms.addpathconflict(f, f1, fo)
1480 1484 progress.increment(item=f)
1481 1485
1482 1486 # When merging in-memory, we can't support worker processes, so set the
1483 1487 # per-item cost at 0 in that case.
1484 1488 cost = 0 if wctx.isinmemory() else 0.001
1485 1489
1486 1490 # remove in parallel (must come before resolving path conflicts and getting)
1487 1491 prog = worker.worker(
1488 1492 repo.ui,
1489 1493 cost,
1490 1494 batchremove,
1491 1495 (repo, wctx),
1492 1496 list(mresult.getactions([mergestatemod.ACTION_REMOVE], sort=True)),
1493 1497 )
1494 1498 for i, item in prog:
1495 1499 progress.increment(step=i, item=item)
1496 1500 removed = mresult.len((mergestatemod.ACTION_REMOVE,))
1497 1501
1498 1502 # resolve path conflicts (must come before getting)
1499 1503 for f, args, msg in mresult.getactions(
1500 1504 [mergestatemod.ACTION_PATH_CONFLICT_RESOLVE], sort=True
1501 1505 ):
1502 1506 repo.ui.debug(b" %s: %s -> pr\n" % (f, msg))
1503 1507 (f0, origf0) = args
1504 1508 if wctx[f0].lexists():
1505 1509 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1506 1510 wctx[f].audit()
1507 1511 wctx[f].write(wctx.filectx(f0).data(), wctx.filectx(f0).flags())
1508 1512 wctx[f0].remove()
1509 1513 progress.increment(item=f)
1510 1514
1511 1515 # get in parallel.
1512 1516 threadsafe = repo.ui.configbool(
1513 1517 b'experimental', b'worker.wdir-get-thread-safe'
1514 1518 )
1515 1519 prog = worker.worker(
1516 1520 repo.ui,
1517 1521 cost,
1518 1522 batchget,
1519 1523 (repo, mctx, wctx, wantfiledata),
1520 1524 list(mresult.getactions([mergestatemod.ACTION_GET], sort=True)),
1521 1525 threadsafe=threadsafe,
1522 1526 hasretval=True,
1523 1527 )
1524 1528 getfiledata = {}
1525 1529 for final, res in prog:
1526 1530 if final:
1527 1531 getfiledata = res
1528 1532 else:
1529 1533 i, item = res
1530 1534 progress.increment(step=i, item=item)
1531 1535
1532 1536 if b'.hgsubstate' in mresult._actionmapping[mergestatemod.ACTION_GET]:
1533 1537 subrepoutil.submerge(repo, wctx, mctx, wctx, overwrite, labels)
1534 1538
1535 1539 # forget (manifest only, just log it) (must come first)
1536 1540 for f, args, msg in mresult.getactions(
1537 1541 (mergestatemod.ACTION_FORGET,), sort=True
1538 1542 ):
1539 1543 repo.ui.debug(b" %s: %s -> f\n" % (f, msg))
1540 1544 progress.increment(item=f)
1541 1545
1542 1546 # re-add (manifest only, just log it)
1543 1547 for f, args, msg in mresult.getactions(
1544 1548 (mergestatemod.ACTION_ADD,), sort=True
1545 1549 ):
1546 1550 repo.ui.debug(b" %s: %s -> a\n" % (f, msg))
1547 1551 progress.increment(item=f)
1548 1552
1549 1553 # re-add/mark as modified (manifest only, just log it)
1550 1554 for f, args, msg in mresult.getactions(
1551 1555 (mergestatemod.ACTION_ADD_MODIFIED,), sort=True
1552 1556 ):
1553 1557 repo.ui.debug(b" %s: %s -> am\n" % (f, msg))
1554 1558 progress.increment(item=f)
1555 1559
1556 1560 # keep (noop, just log it)
1557 1561 for f, args, msg in mresult.getactions(
1558 1562 (mergestatemod.ACTION_KEEP,), sort=True
1559 1563 ):
1560 1564 repo.ui.debug(b" %s: %s -> k\n" % (f, msg))
1561 1565 # no progress
1562 1566
1563 1567 # directory rename, move local
1564 1568 for f, args, msg in mresult.getactions(
1565 1569 (mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,), sort=True
1566 1570 ):
1567 1571 repo.ui.debug(b" %s: %s -> dm\n" % (f, msg))
1568 1572 progress.increment(item=f)
1569 1573 f0, flags = args
1570 1574 repo.ui.note(_(b"moving %s to %s\n") % (f0, f))
1571 1575 wctx[f].audit()
1572 1576 wctx[f].write(wctx.filectx(f0).data(), flags)
1573 1577 wctx[f0].remove()
1574 1578
1575 1579 # local directory rename, get
1576 1580 for f, args, msg in mresult.getactions(
1577 1581 (mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,), sort=True
1578 1582 ):
1579 1583 repo.ui.debug(b" %s: %s -> dg\n" % (f, msg))
1580 1584 progress.increment(item=f)
1581 1585 f0, flags = args
1582 1586 repo.ui.note(_(b"getting %s to %s\n") % (f0, f))
1583 1587 wctx[f].write(mctx.filectx(f0).data(), flags)
1584 1588
1585 1589 # exec
1586 1590 for f, args, msg in mresult.getactions(
1587 1591 (mergestatemod.ACTION_EXEC,), sort=True
1588 1592 ):
1589 1593 repo.ui.debug(b" %s: %s -> e\n" % (f, msg))
1590 1594 progress.increment(item=f)
1591 1595 (flags,) = args
1592 1596 wctx[f].audit()
1593 1597 wctx[f].setflags(b'l' in flags, b'x' in flags)
1594 1598
1595 1599 # these actions updates the file
1596 1600 updated = mresult.len(
1597 1601 (
1598 1602 mergestatemod.ACTION_GET,
1599 1603 mergestatemod.ACTION_EXEC,
1600 1604 mergestatemod.ACTION_LOCAL_DIR_RENAME_GET,
1601 1605 mergestatemod.ACTION_DIR_RENAME_MOVE_LOCAL,
1602 1606 )
1603 1607 )
1604 1608 # the ordering is important here -- ms.mergedriver will raise if the merge
1605 1609 # driver has changed, and we want to be able to bypass it when overwrite is
1606 1610 # True
1607 1611 usemergedriver = not overwrite and mergeactions and ms.mergedriver
1608 1612
1609 1613 if usemergedriver:
1610 1614 if wctx.isinmemory():
1611 1615 raise error.InMemoryMergeConflictsError(
1612 1616 b"in-memory merge does not support mergedriver"
1613 1617 )
1614 1618 ms.commit()
1615 1619 proceed = driverpreprocess(repo, ms, wctx, labels=labels)
1616 1620 # the driver might leave some files unresolved
1617 1621 unresolvedf = set(ms.unresolved())
1618 1622 if not proceed:
1619 1623 # XXX setting unresolved to at least 1 is a hack to make sure we
1620 1624 # error out
1621 1625 return updateresult(
1622 1626 updated, merged, removed, max(len(unresolvedf), 1)
1623 1627 )
1624 1628 newactions = []
1625 1629 for f, args, msg in mergeactions:
1626 1630 if f in unresolvedf:
1627 1631 newactions.append((f, args, msg))
1628 1632 mergeactions = newactions
1629 1633
1630 1634 try:
1631 1635 # premerge
1632 1636 tocomplete = []
1633 1637 for f, args, msg in mergeactions:
1634 1638 repo.ui.debug(b" %s: %s -> m (premerge)\n" % (f, msg))
1635 1639 progress.increment(item=f)
1636 1640 if f == b'.hgsubstate': # subrepo states need updating
1637 1641 subrepoutil.submerge(
1638 1642 repo, wctx, mctx, wctx.ancestor(mctx), overwrite, labels
1639 1643 )
1640 1644 continue
1641 1645 wctx[f].audit()
1642 1646 complete, r = ms.preresolve(f, wctx)
1643 1647 if not complete:
1644 1648 numupdates += 1
1645 1649 tocomplete.append((f, args, msg))
1646 1650
1647 1651 # merge
1648 1652 for f, args, msg in tocomplete:
1649 1653 repo.ui.debug(b" %s: %s -> m (merge)\n" % (f, msg))
1650 1654 progress.increment(item=f, total=numupdates)
1651 1655 ms.resolve(f, wctx)
1652 1656
1653 1657 finally:
1654 1658 ms.commit()
1655 1659
1656 1660 unresolved = ms.unresolvedcount()
1657 1661
1658 1662 if (
1659 1663 usemergedriver
1660 1664 and not unresolved
1661 1665 and ms.mdstate() != mergestatemod.MERGE_DRIVER_STATE_SUCCESS
1662 1666 ):
1663 1667 if not driverconclude(repo, ms, wctx, labels=labels):
1664 1668 # XXX setting unresolved to at least 1 is a hack to make sure we
1665 1669 # error out
1666 1670 unresolved = max(unresolved, 1)
1667 1671
1668 1672 ms.commit()
1669 1673
1670 1674 msupdated, msmerged, msremoved = ms.counts()
1671 1675 updated += msupdated
1672 1676 merged += msmerged
1673 1677 removed += msremoved
1674 1678
1675 1679 extraactions = ms.actions()
1676 1680 if extraactions:
1677 1681 mfiles = {
1678 1682 a[0] for a in mresult.getactions((mergestatemod.ACTION_MERGE,))
1679 1683 }
1680 1684 for k, acts in pycompat.iteritems(extraactions):
1681 1685 for a in acts:
1682 1686 mresult.addfile(a[0], k, *a[1:])
1683 1687 if k == mergestatemod.ACTION_GET and wantfiledata:
1684 1688 # no filedata until mergestate is updated to provide it
1685 1689 for a in acts:
1686 1690 getfiledata[a[0]] = None
1687 1691 # Remove these files from actions[ACTION_MERGE] as well. This is
1688 1692 # important because in recordupdates, files in actions[ACTION_MERGE]
1689 1693 # are processed after files in other actions, and the merge driver
1690 1694 # might add files to those actions via extraactions above. This can
1691 1695 # lead to a file being recorded twice, with poor results. This is
1692 1696 # especially problematic for actions[ACTION_REMOVE] (currently only
1693 1697 # possible with the merge driver in the initial merge process;
1694 1698 # interrupted merges don't go through this flow).
1695 1699 #
1696 1700 # The real fix here is to have indexes by both file and action so
1697 1701 # that when the action for a file is changed it is automatically
1698 1702 # reflected in the other action lists. But that involves a more
1699 1703 # complex data structure, so this will do for now.
1700 1704 #
1701 1705 # We don't need to do the same operation for 'dc' and 'cd' because
1702 1706 # those lists aren't consulted again.
1703 1707 mfiles.difference_update(a[0] for a in acts)
1704 1708
1705 1709 for a in list(mresult.getactions((mergestatemod.ACTION_MERGE,))):
1706 1710 if a[0] not in mfiles:
1707 1711 mresult.removefile(a[0])
1708 1712
1709 1713 progress.complete()
1710 1714 assert len(getfiledata) == (
1711 1715 mresult.len((mergestatemod.ACTION_GET,)) if wantfiledata else 0
1712 1716 )
1713 1717 return updateresult(updated, merged, removed, unresolved), getfiledata
1714 1718
1715 1719
1716 1720 def _advertisefsmonitor(repo, num_gets, p1node):
1717 1721 # Advertise fsmonitor when its presence could be useful.
1718 1722 #
1719 1723 # We only advertise when performing an update from an empty working
1720 1724 # directory. This typically only occurs during initial clone.
1721 1725 #
1722 1726 # We give users a mechanism to disable the warning in case it is
1723 1727 # annoying.
1724 1728 #
1725 1729 # We only allow on Linux and MacOS because that's where fsmonitor is
1726 1730 # considered stable.
1727 1731 fsmonitorwarning = repo.ui.configbool(b'fsmonitor', b'warn_when_unused')
1728 1732 fsmonitorthreshold = repo.ui.configint(
1729 1733 b'fsmonitor', b'warn_update_file_count'
1730 1734 )
1731 1735 try:
1732 1736 # avoid cycle: extensions -> cmdutil -> merge
1733 1737 from . import extensions
1734 1738
1735 1739 extensions.find(b'fsmonitor')
1736 1740 fsmonitorenabled = repo.ui.config(b'fsmonitor', b'mode') != b'off'
1737 1741 # We intentionally don't look at whether fsmonitor has disabled
1738 1742 # itself because a) fsmonitor may have already printed a warning
1739 1743 # b) we only care about the config state here.
1740 1744 except KeyError:
1741 1745 fsmonitorenabled = False
1742 1746
1743 1747 if (
1744 1748 fsmonitorwarning
1745 1749 and not fsmonitorenabled
1746 1750 and p1node == nullid
1747 1751 and num_gets >= fsmonitorthreshold
1748 1752 and pycompat.sysplatform.startswith((b'linux', b'darwin'))
1749 1753 ):
1750 1754 repo.ui.warn(
1751 1755 _(
1752 1756 b'(warning: large working directory being used without '
1753 1757 b'fsmonitor enabled; enable fsmonitor to improve performance; '
1754 1758 b'see "hg help -e fsmonitor")\n'
1755 1759 )
1756 1760 )
1757 1761
1758 1762
1759 1763 UPDATECHECK_ABORT = b'abort' # handled at higher layers
1760 1764 UPDATECHECK_NONE = b'none'
1761 1765 UPDATECHECK_LINEAR = b'linear'
1762 1766 UPDATECHECK_NO_CONFLICT = b'noconflict'
1763 1767
1764 1768
1765 1769 def update(
1766 1770 repo,
1767 1771 node,
1768 1772 branchmerge,
1769 1773 force,
1770 1774 ancestor=None,
1771 1775 mergeancestor=False,
1772 1776 labels=None,
1773 1777 matcher=None,
1774 1778 mergeforce=False,
1775 1779 updatedirstate=True,
1776 1780 updatecheck=None,
1777 1781 wc=None,
1778 1782 ):
1779 1783 """
1780 1784 Perform a merge between the working directory and the given node
1781 1785
1782 1786 node = the node to update to
1783 1787 branchmerge = whether to merge between branches
1784 1788 force = whether to force branch merging or file overwriting
1785 1789 matcher = a matcher to filter file lists (dirstate not updated)
1786 1790 mergeancestor = whether it is merging with an ancestor. If true,
1787 1791 we should accept the incoming changes for any prompts that occur.
1788 1792 If false, merging with an ancestor (fast-forward) is only allowed
1789 1793 between different named branches. This flag is used by rebase extension
1790 1794 as a temporary fix and should be avoided in general.
1791 1795 labels = labels to use for base, local and other
1792 1796 mergeforce = whether the merge was run with 'merge --force' (deprecated): if
1793 1797 this is True, then 'force' should be True as well.
1794 1798
1795 1799 The table below shows all the behaviors of the update command given the
1796 1800 -c/--check and -C/--clean or no options, whether the working directory is
1797 1801 dirty, whether a revision is specified, and the relationship of the parent
1798 1802 rev to the target rev (linear or not). Match from top first. The -n
1799 1803 option doesn't exist on the command line, but represents the
1800 1804 experimental.updatecheck=noconflict option.
1801 1805
1802 1806 This logic is tested by test-update-branches.t.
1803 1807
1804 1808 -c -C -n -m dirty rev linear | result
1805 1809 y y * * * * * | (1)
1806 1810 y * y * * * * | (1)
1807 1811 y * * y * * * | (1)
1808 1812 * y y * * * * | (1)
1809 1813 * y * y * * * | (1)
1810 1814 * * y y * * * | (1)
1811 1815 * * * * * n n | x
1812 1816 * * * * n * * | ok
1813 1817 n n n n y * y | merge
1814 1818 n n n n y y n | (2)
1815 1819 n n n y y * * | merge
1816 1820 n n y n y * * | merge if no conflict
1817 1821 n y n n y * * | discard
1818 1822 y n n n y * * | (3)
1819 1823
1820 1824 x = can't happen
1821 1825 * = don't-care
1822 1826 1 = incompatible options (checked in commands.py)
1823 1827 2 = abort: uncommitted changes (commit or update --clean to discard changes)
1824 1828 3 = abort: uncommitted changes (checked in commands.py)
1825 1829
1826 1830 The merge is performed inside ``wc``, a workingctx-like objects. It defaults
1827 1831 to repo[None] if None is passed.
1828 1832
1829 1833 Return the same tuple as applyupdates().
1830 1834 """
1831 1835 # Avoid cycle.
1832 1836 from . import sparse
1833 1837
1834 1838 # This function used to find the default destination if node was None, but
1835 1839 # that's now in destutil.py.
1836 1840 assert node is not None
1837 1841 if not branchmerge and not force:
1838 1842 # TODO: remove the default once all callers that pass branchmerge=False
1839 1843 # and force=False pass a value for updatecheck. We may want to allow
1840 1844 # updatecheck='abort' to better suppport some of these callers.
1841 1845 if updatecheck is None:
1842 1846 updatecheck = UPDATECHECK_LINEAR
1843 1847 if updatecheck not in (
1844 1848 UPDATECHECK_NONE,
1845 1849 UPDATECHECK_LINEAR,
1846 1850 UPDATECHECK_NO_CONFLICT,
1847 1851 ):
1848 1852 raise ValueError(
1849 1853 r'Invalid updatecheck %r (can accept %r)'
1850 1854 % (
1851 1855 updatecheck,
1852 1856 (
1853 1857 UPDATECHECK_NONE,
1854 1858 UPDATECHECK_LINEAR,
1855 1859 UPDATECHECK_NO_CONFLICT,
1856 1860 ),
1857 1861 )
1858 1862 )
1859 1863 if wc is not None and wc.isinmemory():
1860 1864 maybe_wlock = util.nullcontextmanager()
1861 1865 else:
1862 1866 maybe_wlock = repo.wlock()
1863 1867 with maybe_wlock:
1864 1868 if wc is None:
1865 1869 wc = repo[None]
1866 1870 pl = wc.parents()
1867 1871 p1 = pl[0]
1868 1872 p2 = repo[node]
1869 1873 if ancestor is not None:
1870 1874 pas = [repo[ancestor]]
1871 1875 else:
1872 1876 if repo.ui.configlist(b'merge', b'preferancestor') == [b'*']:
1873 1877 cahs = repo.changelog.commonancestorsheads(p1.node(), p2.node())
1874 1878 pas = [repo[anc] for anc in (sorted(cahs) or [nullid])]
1875 1879 else:
1876 1880 pas = [p1.ancestor(p2, warn=branchmerge)]
1877 1881
1878 1882 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), bytes(p1), bytes(p2)
1879 1883
1880 1884 overwrite = force and not branchmerge
1881 1885 ### check phase
1882 1886 if not overwrite:
1883 1887 if len(pl) > 1:
1884 1888 raise error.Abort(_(b"outstanding uncommitted merge"))
1885 1889 ms = mergestatemod.mergestate.read(repo)
1886 1890 if list(ms.unresolved()):
1887 1891 raise error.Abort(
1888 1892 _(b"outstanding merge conflicts"),
1889 1893 hint=_(b"use 'hg resolve' to resolve"),
1890 1894 )
1891 1895 if branchmerge:
1892 1896 if pas == [p2]:
1893 1897 raise error.Abort(
1894 1898 _(
1895 1899 b"merging with a working directory ancestor"
1896 1900 b" has no effect"
1897 1901 )
1898 1902 )
1899 1903 elif pas == [p1]:
1900 1904 if not mergeancestor and wc.branch() == p2.branch():
1901 1905 raise error.Abort(
1902 1906 _(b"nothing to merge"),
1903 1907 hint=_(b"use 'hg update' or check 'hg heads'"),
1904 1908 )
1905 1909 if not force and (wc.files() or wc.deleted()):
1906 1910 raise error.Abort(
1907 1911 _(b"uncommitted changes"),
1908 1912 hint=_(b"use 'hg status' to list changes"),
1909 1913 )
1910 1914 if not wc.isinmemory():
1911 1915 for s in sorted(wc.substate):
1912 1916 wc.sub(s).bailifchanged()
1913 1917
1914 1918 elif not overwrite:
1915 1919 if p1 == p2: # no-op update
1916 1920 # call the hooks and exit early
1917 1921 repo.hook(b'preupdate', throw=True, parent1=xp2, parent2=b'')
1918 1922 repo.hook(b'update', parent1=xp2, parent2=b'', error=0)
1919 1923 return updateresult(0, 0, 0, 0)
1920 1924
1921 1925 if updatecheck == UPDATECHECK_LINEAR and pas not in (
1922 1926 [p1],
1923 1927 [p2],
1924 1928 ): # nonlinear
1925 1929 dirty = wc.dirty(missing=True)
1926 1930 if dirty:
1927 1931 # Branching is a bit strange to ensure we do the minimal
1928 1932 # amount of call to obsutil.foreground.
1929 1933 foreground = obsutil.foreground(repo, [p1.node()])
1930 1934 # note: the <node> variable contains a random identifier
1931 1935 if repo[node].node() in foreground:
1932 1936 pass # allow updating to successors
1933 1937 else:
1934 1938 msg = _(b"uncommitted changes")
1935 1939 hint = _(b"commit or update --clean to discard changes")
1936 1940 raise error.UpdateAbort(msg, hint=hint)
1937 1941 else:
1938 1942 # Allow jumping branches if clean and specific rev given
1939 1943 pass
1940 1944
1941 1945 if overwrite:
1942 1946 pas = [wc]
1943 1947 elif not branchmerge:
1944 1948 pas = [p1]
1945 1949
1946 1950 # deprecated config: merge.followcopies
1947 1951 followcopies = repo.ui.configbool(b'merge', b'followcopies')
1948 1952 if overwrite:
1949 1953 followcopies = False
1950 1954 elif not pas[0]:
1951 1955 followcopies = False
1952 1956 if not branchmerge and not wc.dirty(missing=True):
1953 1957 followcopies = False
1954 1958
1955 1959 ### calculate phase
1956 1960 mresult = calculateupdates(
1957 1961 repo,
1958 1962 wc,
1959 1963 p2,
1960 1964 pas,
1961 1965 branchmerge,
1962 1966 force,
1963 1967 mergeancestor,
1964 1968 followcopies,
1965 1969 matcher=matcher,
1966 1970 mergeforce=mergeforce,
1967 1971 )
1968 1972
1969 1973 if updatecheck == UPDATECHECK_NO_CONFLICT:
1970 1974 if mresult.hasconflicts():
1971 1975 msg = _(b"conflicting changes")
1972 1976 hint = _(b"commit or update --clean to discard changes")
1973 1977 raise error.Abort(msg, hint=hint)
1974 1978
1975 1979 # Prompt and create actions. Most of this is in the resolve phase
1976 1980 # already, but we can't handle .hgsubstate in filemerge or
1977 1981 # subrepoutil.submerge yet so we have to keep prompting for it.
1978 1982 vals = mresult.getfile(b'.hgsubstate')
1979 1983 if vals:
1980 1984 f = b'.hgsubstate'
1981 1985 m, args, msg = vals
1982 1986 prompts = filemerge.partextras(labels)
1983 1987 prompts[b'f'] = f
1984 1988 if m == mergestatemod.ACTION_CHANGED_DELETED:
1985 1989 if repo.ui.promptchoice(
1986 1990 _(
1987 1991 b"local%(l)s changed %(f)s which other%(o)s deleted\n"
1988 1992 b"use (c)hanged version or (d)elete?"
1989 1993 b"$$ &Changed $$ &Delete"
1990 1994 )
1991 1995 % prompts,
1992 1996 0,
1993 1997 ):
1994 1998 mresult.addfile(
1995 1999 f, mergestatemod.ACTION_REMOVE, None, b'prompt delete',
1996 2000 )
1997 2001 elif f in p1:
1998 2002 mresult.addfile(
1999 2003 f,
2000 2004 mergestatemod.ACTION_ADD_MODIFIED,
2001 2005 None,
2002 2006 b'prompt keep',
2003 2007 )
2004 2008 else:
2005 2009 mresult.addfile(
2006 2010 f, mergestatemod.ACTION_ADD, None, b'prompt keep',
2007 2011 )
2008 2012 elif m == mergestatemod.ACTION_DELETED_CHANGED:
2009 2013 f1, f2, fa, move, anc = args
2010 2014 flags = p2[f2].flags()
2011 2015 if (
2012 2016 repo.ui.promptchoice(
2013 2017 _(
2014 2018 b"other%(o)s changed %(f)s which local%(l)s deleted\n"
2015 2019 b"use (c)hanged version or leave (d)eleted?"
2016 2020 b"$$ &Changed $$ &Deleted"
2017 2021 )
2018 2022 % prompts,
2019 2023 0,
2020 2024 )
2021 2025 == 0
2022 2026 ):
2023 2027 mresult.addfile(
2024 2028 f,
2025 2029 mergestatemod.ACTION_GET,
2026 2030 (flags, False),
2027 2031 b'prompt recreating',
2028 2032 )
2029 2033 else:
2030 2034 mresult.removefile(f)
2031 2035
2032 2036 if not util.fscasesensitive(repo.path):
2033 2037 # check collision between files only in p2 for clean update
2034 2038 if not branchmerge and (
2035 2039 force or not wc.dirty(missing=True, branch=False)
2036 2040 ):
2037 2041 _checkcollision(repo, p2.manifest(), None)
2038 2042 else:
2039 2043 _checkcollision(repo, wc.manifest(), mresult)
2040 2044
2041 2045 # divergent renames
2042 2046 for f, fl in sorted(pycompat.iteritems(mresult.diverge)):
2043 2047 repo.ui.warn(
2044 2048 _(
2045 2049 b"note: possible conflict - %s was renamed "
2046 2050 b"multiple times to:\n"
2047 2051 )
2048 2052 % f
2049 2053 )
2050 2054 for nf in sorted(fl):
2051 2055 repo.ui.warn(b" %s\n" % nf)
2052 2056
2053 2057 # rename and delete
2054 2058 for f, fl in sorted(pycompat.iteritems(mresult.renamedelete)):
2055 2059 repo.ui.warn(
2056 2060 _(
2057 2061 b"note: possible conflict - %s was deleted "
2058 2062 b"and renamed to:\n"
2059 2063 )
2060 2064 % f
2061 2065 )
2062 2066 for nf in sorted(fl):
2063 2067 repo.ui.warn(b" %s\n" % nf)
2064 2068
2065 2069 ### apply phase
2066 2070 if not branchmerge: # just jump to the new rev
2067 2071 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, b''
2068 2072 # If we're doing a partial update, we need to skip updating
2069 2073 # the dirstate.
2070 2074 always = matcher is None or matcher.always()
2071 2075 updatedirstate = updatedirstate and always and not wc.isinmemory()
2072 2076 if updatedirstate:
2073 2077 repo.hook(b'preupdate', throw=True, parent1=xp1, parent2=xp2)
2074 2078 # note that we're in the middle of an update
2075 2079 repo.vfs.write(b'updatestate', p2.hex())
2076 2080
2077 2081 _advertisefsmonitor(
2078 2082 repo, mresult.len((mergestatemod.ACTION_GET,)), p1.node()
2079 2083 )
2080 2084
2081 2085 wantfiledata = updatedirstate and not branchmerge
2082 2086 stats, getfiledata = applyupdates(
2083 2087 repo,
2084 2088 mresult,
2085 2089 wc,
2086 2090 p2,
2087 2091 overwrite,
2088 2092 wantfiledata,
2089 2093 labels=labels,
2090 2094 commitinfo=mresult.commitinfo,
2091 2095 )
2092 2096
2093 2097 if updatedirstate:
2094 2098 with repo.dirstate.parentchange():
2095 2099 repo.setparents(fp1, fp2)
2096 2100 mergestatemod.recordupdates(
2097 2101 repo, mresult.actionsdict, branchmerge, getfiledata
2098 2102 )
2099 2103 # update completed, clear state
2100 2104 util.unlink(repo.vfs.join(b'updatestate'))
2101 2105
2102 2106 if not branchmerge:
2103 2107 repo.dirstate.setbranch(p2.branch())
2104 2108
2105 2109 # If we're updating to a location, clean up any stale temporary includes
2106 2110 # (ex: this happens during hg rebase --abort).
2107 2111 if not branchmerge:
2108 2112 sparse.prunetemporaryincludes(repo)
2109 2113
2110 2114 if updatedirstate:
2111 2115 repo.hook(
2112 2116 b'update', parent1=xp1, parent2=xp2, error=stats.unresolvedcount
2113 2117 )
2114 2118 return stats
2115 2119
2116 2120
2117 2121 def merge(ctx, labels=None, force=False, wc=None):
2118 2122 """Merge another topological branch into the working copy.
2119 2123
2120 2124 force = whether the merge was run with 'merge --force' (deprecated)
2121 2125 """
2122 2126
2123 2127 return update(
2124 2128 ctx.repo(),
2125 2129 ctx.rev(),
2126 2130 labels=labels,
2127 2131 branchmerge=True,
2128 2132 force=force,
2129 2133 mergeforce=force,
2130 2134 wc=wc,
2131 2135 )
2132 2136
2133 2137
2134 2138 def clean_update(ctx, wc=None):
2135 2139 """Do a clean update to the given commit.
2136 2140
2137 2141 This involves updating to the commit and discarding any changes in the
2138 2142 working copy.
2139 2143 """
2140 2144 return update(ctx.repo(), ctx.rev(), branchmerge=False, force=True, wc=wc)
2141 2145
2142 2146
2143 2147 def revert_to(ctx, matcher=None, wc=None):
2144 2148 """Revert the working copy to the given commit.
2145 2149
2146 2150 The working copy will keep its current parent(s) but its content will
2147 2151 be the same as in the given commit.
2148 2152 """
2149 2153
2150 2154 return update(
2151 2155 ctx.repo(),
2152 2156 ctx.rev(),
2153 2157 branchmerge=False,
2154 2158 force=True,
2155 2159 updatedirstate=False,
2156 2160 matcher=matcher,
2157 2161 wc=wc,
2158 2162 )
2159 2163
2160 2164
2161 2165 def graft(
2162 2166 repo,
2163 2167 ctx,
2164 2168 base=None,
2165 2169 labels=None,
2166 2170 keepparent=False,
2167 2171 keepconflictparent=False,
2168 2172 wctx=None,
2169 2173 ):
2170 2174 """Do a graft-like merge.
2171 2175
2172 2176 This is a merge where the merge ancestor is chosen such that one
2173 2177 or more changesets are grafted onto the current changeset. In
2174 2178 addition to the merge, this fixes up the dirstate to include only
2175 2179 a single parent (if keepparent is False) and tries to duplicate any
2176 2180 renames/copies appropriately.
2177 2181
2178 2182 ctx - changeset to rebase
2179 2183 base - merge base, or ctx.p1() if not specified
2180 2184 labels - merge labels eg ['local', 'graft']
2181 2185 keepparent - keep second parent if any
2182 2186 keepconflictparent - if unresolved, keep parent used for the merge
2183 2187
2184 2188 """
2185 2189 # If we're grafting a descendant onto an ancestor, be sure to pass
2186 2190 # mergeancestor=True to update. This does two things: 1) allows the merge if
2187 2191 # the destination is the same as the parent of the ctx (so we can use graft
2188 2192 # to copy commits), and 2) informs update that the incoming changes are
2189 2193 # newer than the destination so it doesn't prompt about "remote changed foo
2190 2194 # which local deleted".
2191 2195 # We also pass mergeancestor=True when base is the same revision as p1. 2)
2192 2196 # doesn't matter as there can't possibly be conflicts, but 1) is necessary.
2193 2197 wctx = wctx or repo[None]
2194 2198 pctx = wctx.p1()
2195 2199 base = base or ctx.p1()
2196 2200 mergeancestor = (
2197 2201 repo.changelog.isancestor(pctx.node(), ctx.node())
2198 2202 or pctx.rev() == base.rev()
2199 2203 )
2200 2204
2201 2205 stats = update(
2202 2206 repo,
2203 2207 ctx.node(),
2204 2208 True,
2205 2209 True,
2206 2210 base.node(),
2207 2211 mergeancestor=mergeancestor,
2208 2212 labels=labels,
2209 2213 wc=wctx,
2210 2214 )
2211 2215
2212 2216 if keepconflictparent and stats.unresolvedcount:
2213 2217 pother = ctx.node()
2214 2218 else:
2215 2219 pother = nullid
2216 2220 parents = ctx.parents()
2217 2221 if keepparent and len(parents) == 2 and base in parents:
2218 2222 parents.remove(base)
2219 2223 pother = parents[0].node()
2220 2224 # Never set both parents equal to each other
2221 2225 if pother == pctx.node():
2222 2226 pother = nullid
2223 2227
2224 2228 if wctx.isinmemory():
2225 2229 wctx.setparents(pctx.node(), pother)
2226 2230 # fix up dirstate for copies and renames
2227 2231 copies.graftcopies(wctx, ctx, base)
2228 2232 else:
2229 2233 with repo.dirstate.parentchange():
2230 2234 repo.setparents(pctx.node(), pother)
2231 2235 repo.dirstate.write(repo.currenttransaction())
2232 2236 # fix up dirstate for copies and renames
2233 2237 copies.graftcopies(wctx, ctx, base)
2234 2238 return stats
2235 2239
2236 2240
2237 2241 def purge(
2238 2242 repo,
2239 2243 matcher,
2240 2244 unknown=True,
2241 2245 ignored=False,
2242 2246 removeemptydirs=True,
2243 2247 removefiles=True,
2244 2248 abortonerror=False,
2245 2249 noop=False,
2246 2250 ):
2247 2251 """Purge the working directory of untracked files.
2248 2252
2249 2253 ``matcher`` is a matcher configured to scan the working directory -
2250 2254 potentially a subset.
2251 2255
2252 2256 ``unknown`` controls whether unknown files should be purged.
2253 2257
2254 2258 ``ignored`` controls whether ignored files should be purged.
2255 2259
2256 2260 ``removeemptydirs`` controls whether empty directories should be removed.
2257 2261
2258 2262 ``removefiles`` controls whether files are removed.
2259 2263
2260 2264 ``abortonerror`` causes an exception to be raised if an error occurs
2261 2265 deleting a file or directory.
2262 2266
2263 2267 ``noop`` controls whether to actually remove files. If not defined, actions
2264 2268 will be taken.
2265 2269
2266 2270 Returns an iterable of relative paths in the working directory that were
2267 2271 or would be removed.
2268 2272 """
2269 2273
2270 2274 def remove(removefn, path):
2271 2275 try:
2272 2276 removefn(path)
2273 2277 except OSError:
2274 2278 m = _(b'%s cannot be removed') % path
2275 2279 if abortonerror:
2276 2280 raise error.Abort(m)
2277 2281 else:
2278 2282 repo.ui.warn(_(b'warning: %s\n') % m)
2279 2283
2280 2284 # There's no API to copy a matcher. So mutate the passed matcher and
2281 2285 # restore it when we're done.
2282 2286 oldtraversedir = matcher.traversedir
2283 2287
2284 2288 res = []
2285 2289
2286 2290 try:
2287 2291 if removeemptydirs:
2288 2292 directories = []
2289 2293 matcher.traversedir = directories.append
2290 2294
2291 2295 status = repo.status(match=matcher, ignored=ignored, unknown=unknown)
2292 2296
2293 2297 if removefiles:
2294 2298 for f in sorted(status.unknown + status.ignored):
2295 2299 if not noop:
2296 2300 repo.ui.note(_(b'removing file %s\n') % f)
2297 2301 remove(repo.wvfs.unlink, f)
2298 2302 res.append(f)
2299 2303
2300 2304 if removeemptydirs:
2301 2305 for f in sorted(directories, reverse=True):
2302 2306 if matcher(f) and not repo.wvfs.listdir(f):
2303 2307 if not noop:
2304 2308 repo.ui.note(_(b'removing directory %s\n') % f)
2305 2309 remove(repo.wvfs.rmdir, f)
2306 2310 res.append(f)
2307 2311
2308 2312 return res
2309 2313
2310 2314 finally:
2311 2315 matcher.traversedir = oldtraversedir
@@ -1,831 +1,831 b''
1 1 # sparse.py - functionality for sparse checkouts
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import os
11 11
12 12 from .i18n import _
13 13 from .node import (
14 14 hex,
15 15 nullid,
16 16 )
17 17 from . import (
18 18 error,
19 19 match as matchmod,
20 20 merge as mergemod,
21 21 mergestate as mergestatemod,
22 22 pathutil,
23 23 pycompat,
24 24 scmutil,
25 25 util,
26 26 )
27 27 from .utils import hashutil
28 28
29 29 # Whether sparse features are enabled. This variable is intended to be
30 30 # temporary to facilitate porting sparse to core. It should eventually be
31 31 # a per-repo option, possibly a repo requirement.
32 32 enabled = False
33 33
34 34
35 35 def parseconfig(ui, raw, action):
36 36 """Parse sparse config file content.
37 37
38 38 action is the command which is trigerring this read, can be narrow, sparse
39 39
40 40 Returns a tuple of includes, excludes, and profiles.
41 41 """
42 42 includes = set()
43 43 excludes = set()
44 44 profiles = set()
45 45 current = None
46 46 havesection = False
47 47
48 48 for line in raw.split(b'\n'):
49 49 line = line.strip()
50 50 if not line or line.startswith(b'#'):
51 51 # empty or comment line, skip
52 52 continue
53 53 elif line.startswith(b'%include '):
54 54 line = line[9:].strip()
55 55 if line:
56 56 profiles.add(line)
57 57 elif line == b'[include]':
58 58 if havesection and current != includes:
59 59 # TODO pass filename into this API so we can report it.
60 60 raise error.Abort(
61 61 _(
62 62 b'%(action)s config cannot have includes '
63 63 b'after excludes'
64 64 )
65 65 % {b'action': action}
66 66 )
67 67 havesection = True
68 68 current = includes
69 69 continue
70 70 elif line == b'[exclude]':
71 71 havesection = True
72 72 current = excludes
73 73 elif line:
74 74 if current is None:
75 75 raise error.Abort(
76 76 _(
77 77 b'%(action)s config entry outside of '
78 78 b'section: %(line)s'
79 79 )
80 80 % {b'action': action, b'line': line},
81 81 hint=_(
82 82 b'add an [include] or [exclude] line '
83 83 b'to declare the entry type'
84 84 ),
85 85 )
86 86
87 87 if line.strip().startswith(b'/'):
88 88 ui.warn(
89 89 _(
90 90 b'warning: %(action)s profile cannot use'
91 91 b' paths starting with /, ignoring %(line)s\n'
92 92 )
93 93 % {b'action': action, b'line': line}
94 94 )
95 95 continue
96 96 current.add(line)
97 97
98 98 return includes, excludes, profiles
99 99
100 100
101 101 # Exists as separate function to facilitate monkeypatching.
102 102 def readprofile(repo, profile, changeid):
103 103 """Resolve the raw content of a sparse profile file."""
104 104 # TODO add some kind of cache here because this incurs a manifest
105 105 # resolve and can be slow.
106 106 return repo.filectx(profile, changeid=changeid).data()
107 107
108 108
109 109 def patternsforrev(repo, rev):
110 110 """Obtain sparse checkout patterns for the given rev.
111 111
112 112 Returns a tuple of iterables representing includes, excludes, and
113 113 patterns.
114 114 """
115 115 # Feature isn't enabled. No-op.
116 116 if not enabled:
117 117 return set(), set(), set()
118 118
119 119 raw = repo.vfs.tryread(b'sparse')
120 120 if not raw:
121 121 return set(), set(), set()
122 122
123 123 if rev is None:
124 124 raise error.Abort(
125 125 _(b'cannot parse sparse patterns from working directory')
126 126 )
127 127
128 128 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
129 129 ctx = repo[rev]
130 130
131 131 if profiles:
132 132 visited = set()
133 133 while profiles:
134 134 profile = profiles.pop()
135 135 if profile in visited:
136 136 continue
137 137
138 138 visited.add(profile)
139 139
140 140 try:
141 141 raw = readprofile(repo, profile, rev)
142 142 except error.ManifestLookupError:
143 143 msg = (
144 144 b"warning: sparse profile '%s' not found "
145 145 b"in rev %s - ignoring it\n" % (profile, ctx)
146 146 )
147 147 # experimental config: sparse.missingwarning
148 148 if repo.ui.configbool(b'sparse', b'missingwarning'):
149 149 repo.ui.warn(msg)
150 150 else:
151 151 repo.ui.debug(msg)
152 152 continue
153 153
154 154 pincludes, pexcludes, subprofs = parseconfig(
155 155 repo.ui, raw, b'sparse'
156 156 )
157 157 includes.update(pincludes)
158 158 excludes.update(pexcludes)
159 159 profiles.update(subprofs)
160 160
161 161 profiles = visited
162 162
163 163 if includes:
164 164 includes.add(b'.hg*')
165 165
166 166 return includes, excludes, profiles
167 167
168 168
169 169 def activeconfig(repo):
170 170 """Determine the active sparse config rules.
171 171
172 172 Rules are constructed by reading the current sparse config and bringing in
173 173 referenced profiles from parents of the working directory.
174 174 """
175 175 revs = [
176 176 repo.changelog.rev(node)
177 177 for node in repo.dirstate.parents()
178 178 if node != nullid
179 179 ]
180 180
181 181 allincludes = set()
182 182 allexcludes = set()
183 183 allprofiles = set()
184 184
185 185 for rev in revs:
186 186 includes, excludes, profiles = patternsforrev(repo, rev)
187 187 allincludes |= includes
188 188 allexcludes |= excludes
189 189 allprofiles |= profiles
190 190
191 191 return allincludes, allexcludes, allprofiles
192 192
193 193
194 194 def configsignature(repo, includetemp=True):
195 195 """Obtain the signature string for the current sparse configuration.
196 196
197 197 This is used to construct a cache key for matchers.
198 198 """
199 199 cache = repo._sparsesignaturecache
200 200
201 201 signature = cache.get(b'signature')
202 202
203 203 if includetemp:
204 204 tempsignature = cache.get(b'tempsignature')
205 205 else:
206 206 tempsignature = b'0'
207 207
208 208 if signature is None or (includetemp and tempsignature is None):
209 209 signature = hex(hashutil.sha1(repo.vfs.tryread(b'sparse')).digest())
210 210 cache[b'signature'] = signature
211 211
212 212 if includetemp:
213 213 raw = repo.vfs.tryread(b'tempsparse')
214 214 tempsignature = hex(hashutil.sha1(raw).digest())
215 215 cache[b'tempsignature'] = tempsignature
216 216
217 217 return b'%s %s' % (signature, tempsignature)
218 218
219 219
220 220 def writeconfig(repo, includes, excludes, profiles):
221 221 """Write the sparse config file given a sparse configuration."""
222 222 with repo.vfs(b'sparse', b'wb') as fh:
223 223 for p in sorted(profiles):
224 224 fh.write(b'%%include %s\n' % p)
225 225
226 226 if includes:
227 227 fh.write(b'[include]\n')
228 228 for i in sorted(includes):
229 229 fh.write(i)
230 230 fh.write(b'\n')
231 231
232 232 if excludes:
233 233 fh.write(b'[exclude]\n')
234 234 for e in sorted(excludes):
235 235 fh.write(e)
236 236 fh.write(b'\n')
237 237
238 238 repo._sparsesignaturecache.clear()
239 239
240 240
241 241 def readtemporaryincludes(repo):
242 242 raw = repo.vfs.tryread(b'tempsparse')
243 243 if not raw:
244 244 return set()
245 245
246 246 return set(raw.split(b'\n'))
247 247
248 248
249 249 def writetemporaryincludes(repo, includes):
250 250 repo.vfs.write(b'tempsparse', b'\n'.join(sorted(includes)))
251 251 repo._sparsesignaturecache.clear()
252 252
253 253
254 254 def addtemporaryincludes(repo, additional):
255 255 includes = readtemporaryincludes(repo)
256 256 for i in additional:
257 257 includes.add(i)
258 258 writetemporaryincludes(repo, includes)
259 259
260 260
261 261 def prunetemporaryincludes(repo):
262 262 if not enabled or not repo.vfs.exists(b'tempsparse'):
263 263 return
264 264
265 265 s = repo.status()
266 266 if s.modified or s.added or s.removed or s.deleted:
267 267 # Still have pending changes. Don't bother trying to prune.
268 268 return
269 269
270 270 sparsematch = matcher(repo, includetemp=False)
271 271 dirstate = repo.dirstate
272 272 mresult = mergemod.mergeresult()
273 273 dropped = []
274 274 tempincludes = readtemporaryincludes(repo)
275 275 for file in tempincludes:
276 276 if file in dirstate and not sparsematch(file):
277 277 message = _(b'dropping temporarily included sparse files')
278 278 mresult.addfile(file, mergestatemod.ACTION_REMOVE, None, message)
279 279 dropped.append(file)
280 280
281 281 mergemod.applyupdates(
282 282 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
283 283 )
284 284
285 285 # Fix dirstate
286 286 for file in dropped:
287 287 dirstate.drop(file)
288 288
289 289 repo.vfs.unlink(b'tempsparse')
290 290 repo._sparsesignaturecache.clear()
291 291 msg = _(
292 292 b'cleaned up %d temporarily added file(s) from the '
293 293 b'sparse checkout\n'
294 294 )
295 295 repo.ui.status(msg % len(tempincludes))
296 296
297 297
298 298 def forceincludematcher(matcher, includes):
299 299 """Returns a matcher that returns true for any of the forced includes
300 300 before testing against the actual matcher."""
301 301 kindpats = [(b'path', include, b'') for include in includes]
302 302 includematcher = matchmod.includematcher(b'', kindpats)
303 303 return matchmod.unionmatcher([includematcher, matcher])
304 304
305 305
306 306 def matcher(repo, revs=None, includetemp=True):
307 307 """Obtain a matcher for sparse working directories for the given revs.
308 308
309 309 If multiple revisions are specified, the matcher is the union of all
310 310 revs.
311 311
312 312 ``includetemp`` indicates whether to use the temporary sparse profile.
313 313 """
314 314 # If sparse isn't enabled, sparse matcher matches everything.
315 315 if not enabled:
316 316 return matchmod.always()
317 317
318 318 if not revs or revs == [None]:
319 319 revs = [
320 320 repo.changelog.rev(node)
321 321 for node in repo.dirstate.parents()
322 322 if node != nullid
323 323 ]
324 324
325 325 signature = configsignature(repo, includetemp=includetemp)
326 326
327 327 key = b'%s %s' % (signature, b' '.join(map(pycompat.bytestr, revs)))
328 328
329 329 result = repo._sparsematchercache.get(key)
330 330 if result:
331 331 return result
332 332
333 333 matchers = []
334 334 for rev in revs:
335 335 try:
336 336 includes, excludes, profiles = patternsforrev(repo, rev)
337 337
338 338 if includes or excludes:
339 339 matcher = matchmod.match(
340 340 repo.root,
341 341 b'',
342 342 [],
343 343 include=includes,
344 344 exclude=excludes,
345 345 default=b'relpath',
346 346 )
347 347 matchers.append(matcher)
348 348 except IOError:
349 349 pass
350 350
351 351 if not matchers:
352 352 result = matchmod.always()
353 353 elif len(matchers) == 1:
354 354 result = matchers[0]
355 355 else:
356 356 result = matchmod.unionmatcher(matchers)
357 357
358 358 if includetemp:
359 359 tempincludes = readtemporaryincludes(repo)
360 360 result = forceincludematcher(result, tempincludes)
361 361
362 362 repo._sparsematchercache[key] = result
363 363
364 364 return result
365 365
366 366
367 367 def filterupdatesactions(repo, wctx, mctx, branchmerge, mresult):
368 368 """Filter updates to only lay out files that match the sparse rules."""
369 369 if not enabled:
370 370 return
371 371
372 372 oldrevs = [pctx.rev() for pctx in wctx.parents()]
373 373 oldsparsematch = matcher(repo, oldrevs)
374 374
375 375 if oldsparsematch.always():
376 376 return
377 377
378 378 files = set()
379 379 prunedactions = {}
380 380
381 381 if branchmerge:
382 382 # If we're merging, use the wctx filter, since we're merging into
383 383 # the wctx.
384 384 sparsematch = matcher(repo, [wctx.p1().rev()])
385 385 else:
386 386 # If we're updating, use the target context's filter, since we're
387 387 # moving to the target context.
388 388 sparsematch = matcher(repo, [mctx.rev()])
389 389
390 390 temporaryfiles = []
391 for file, action in pycompat.iteritems(mresult.actions):
391 for file, action in mresult.filemap():
392 392 type, args, msg = action
393 393 files.add(file)
394 394 if sparsematch(file):
395 395 prunedactions[file] = action
396 396 elif type == mergestatemod.ACTION_MERGE:
397 397 temporaryfiles.append(file)
398 398 prunedactions[file] = action
399 399 elif branchmerge:
400 400 if type != mergestatemod.ACTION_KEEP:
401 401 temporaryfiles.append(file)
402 402 prunedactions[file] = action
403 403 elif type == mergestatemod.ACTION_FORGET:
404 404 prunedactions[file] = action
405 405 elif file in wctx:
406 406 prunedactions[file] = (mergestatemod.ACTION_REMOVE, args, msg)
407 407
408 408 # in case or rename on one side, it is possible that f1 might not
409 409 # be present in sparse checkout we should include it
410 410 # TODO: should we do the same for f2?
411 411 # exists as a separate check because file can be in sparse and hence
412 412 # if we try to club this condition in above `elif type == ACTION_MERGE`
413 413 # it won't be triggered
414 414 if branchmerge and type == mergestatemod.ACTION_MERGE:
415 415 f1, f2, fa, move, anc = args
416 416 if not sparsematch(f1):
417 417 temporaryfiles.append(f1)
418 418
419 419 if len(temporaryfiles) > 0:
420 420 repo.ui.status(
421 421 _(
422 422 b'temporarily included %d file(s) in the sparse '
423 423 b'checkout for merging\n'
424 424 )
425 425 % len(temporaryfiles)
426 426 )
427 427 addtemporaryincludes(repo, temporaryfiles)
428 428
429 429 # Add the new files to the working copy so they can be merged, etc
430 430 tmresult = mergemod.mergeresult()
431 431 message = b'temporarily adding to sparse checkout'
432 432 wctxmanifest = repo[None].manifest()
433 433 for file in temporaryfiles:
434 434 if file in wctxmanifest:
435 435 fctx = repo[None][file]
436 436 tmresult.addfile(
437 437 file,
438 438 mergestatemod.ACTION_GET,
439 439 (fctx.flags(), False),
440 440 message,
441 441 )
442 442
443 443 mergemod.applyupdates(
444 444 repo, tmresult, repo[None], repo[b'.'], False, wantfiledata=False
445 445 )
446 446
447 447 dirstate = repo.dirstate
448 448 for file, flags, msg in tmresult.getactions([mergestatemod.ACTION_GET]):
449 449 dirstate.normal(file)
450 450
451 451 profiles = activeconfig(repo)[2]
452 452 changedprofiles = profiles & files
453 453 # If an active profile changed during the update, refresh the checkout.
454 454 # Don't do this during a branch merge, since all incoming changes should
455 455 # have been handled by the temporary includes above.
456 456 if changedprofiles and not branchmerge:
457 457 mf = mctx.manifest()
458 458 for file in mf:
459 459 old = oldsparsematch(file)
460 460 new = sparsematch(file)
461 461 if not old and new:
462 462 flags = mf.flags(file)
463 463 prunedactions[file] = (
464 464 mergestatemod.ACTION_GET,
465 465 (flags, False),
466 466 b'',
467 467 )
468 468 elif old and not new:
469 469 prunedactions[file] = (mergestatemod.ACTION_REMOVE, [], b'')
470 470
471 471 mresult.setactions(prunedactions)
472 472
473 473
474 474 def refreshwdir(repo, origstatus, origsparsematch, force=False):
475 475 """Refreshes working directory by taking sparse config into account.
476 476
477 477 The old status and sparse matcher is compared against the current sparse
478 478 matcher.
479 479
480 480 Will abort if a file with pending changes is being excluded or included
481 481 unless ``force`` is True.
482 482 """
483 483 # Verify there are no pending changes
484 484 pending = set()
485 485 pending.update(origstatus.modified)
486 486 pending.update(origstatus.added)
487 487 pending.update(origstatus.removed)
488 488 sparsematch = matcher(repo)
489 489 abort = False
490 490
491 491 for f in pending:
492 492 if not sparsematch(f):
493 493 repo.ui.warn(_(b"pending changes to '%s'\n") % f)
494 494 abort = not force
495 495
496 496 if abort:
497 497 raise error.Abort(
498 498 _(b'could not update sparseness due to pending changes')
499 499 )
500 500
501 501 # Calculate merge result
502 502 dirstate = repo.dirstate
503 503 ctx = repo[b'.']
504 504 added = []
505 505 lookup = []
506 506 dropped = []
507 507 mf = ctx.manifest()
508 508 files = set(mf)
509 509 mresult = mergemod.mergeresult()
510 510
511 511 for file in files:
512 512 old = origsparsematch(file)
513 513 new = sparsematch(file)
514 514 # Add files that are newly included, or that don't exist in
515 515 # the dirstate yet.
516 516 if (new and not old) or (old and new and not file in dirstate):
517 517 fl = mf.flags(file)
518 518 if repo.wvfs.exists(file):
519 519 mresult.addfile(file, mergestatemod.ACTION_EXEC, (fl,), b'')
520 520 lookup.append(file)
521 521 else:
522 522 mresult.addfile(
523 523 file, mergestatemod.ACTION_GET, (fl, False), b''
524 524 )
525 525 added.append(file)
526 526 # Drop files that are newly excluded, or that still exist in
527 527 # the dirstate.
528 528 elif (old and not new) or (not old and not new and file in dirstate):
529 529 dropped.append(file)
530 530 if file not in pending:
531 531 mresult.addfile(file, mergestatemod.ACTION_REMOVE, [], b'')
532 532
533 533 # Verify there are no pending changes in newly included files
534 534 abort = False
535 535 for file in lookup:
536 536 repo.ui.warn(_(b"pending changes to '%s'\n") % file)
537 537 abort = not force
538 538 if abort:
539 539 raise error.Abort(
540 540 _(
541 541 b'cannot change sparseness due to pending '
542 542 b'changes (delete the files or use '
543 543 b'--force to bring them back dirty)'
544 544 )
545 545 )
546 546
547 547 # Check for files that were only in the dirstate.
548 548 for file, state in pycompat.iteritems(dirstate):
549 549 if not file in files:
550 550 old = origsparsematch(file)
551 551 new = sparsematch(file)
552 552 if old and not new:
553 553 dropped.append(file)
554 554
555 555 mergemod.applyupdates(
556 556 repo, mresult, repo[None], repo[b'.'], False, wantfiledata=False
557 557 )
558 558
559 559 # Fix dirstate
560 560 for file in added:
561 561 dirstate.normal(file)
562 562
563 563 for file in dropped:
564 564 dirstate.drop(file)
565 565
566 566 for file in lookup:
567 567 # File exists on disk, and we're bringing it back in an unknown state.
568 568 dirstate.normallookup(file)
569 569
570 570 return added, dropped, lookup
571 571
572 572
573 573 def aftercommit(repo, node):
574 574 """Perform actions after a working directory commit."""
575 575 # This function is called unconditionally, even if sparse isn't
576 576 # enabled.
577 577 ctx = repo[node]
578 578
579 579 profiles = patternsforrev(repo, ctx.rev())[2]
580 580
581 581 # profiles will only have data if sparse is enabled.
582 582 if profiles & set(ctx.files()):
583 583 origstatus = repo.status()
584 584 origsparsematch = matcher(repo)
585 585 refreshwdir(repo, origstatus, origsparsematch, force=True)
586 586
587 587 prunetemporaryincludes(repo)
588 588
589 589
590 590 def _updateconfigandrefreshwdir(
591 591 repo, includes, excludes, profiles, force=False, removing=False
592 592 ):
593 593 """Update the sparse config and working directory state."""
594 594 raw = repo.vfs.tryread(b'sparse')
595 595 oldincludes, oldexcludes, oldprofiles = parseconfig(repo.ui, raw, b'sparse')
596 596
597 597 oldstatus = repo.status()
598 598 oldmatch = matcher(repo)
599 599 oldrequires = set(repo.requirements)
600 600
601 601 # TODO remove this try..except once the matcher integrates better
602 602 # with dirstate. We currently have to write the updated config
603 603 # because that will invalidate the matcher cache and force a
604 604 # re-read. We ideally want to update the cached matcher on the
605 605 # repo instance then flush the new config to disk once wdir is
606 606 # updated. But this requires massive rework to matcher() and its
607 607 # consumers.
608 608
609 609 if b'exp-sparse' in oldrequires and removing:
610 610 repo.requirements.discard(b'exp-sparse')
611 611 scmutil.writereporequirements(repo)
612 612 elif b'exp-sparse' not in oldrequires:
613 613 repo.requirements.add(b'exp-sparse')
614 614 scmutil.writereporequirements(repo)
615 615
616 616 try:
617 617 writeconfig(repo, includes, excludes, profiles)
618 618 return refreshwdir(repo, oldstatus, oldmatch, force=force)
619 619 except Exception:
620 620 if repo.requirements != oldrequires:
621 621 repo.requirements.clear()
622 622 repo.requirements |= oldrequires
623 623 scmutil.writereporequirements(repo)
624 624 writeconfig(repo, oldincludes, oldexcludes, oldprofiles)
625 625 raise
626 626
627 627
628 628 def clearrules(repo, force=False):
629 629 """Clears include/exclude rules from the sparse config.
630 630
631 631 The remaining sparse config only has profiles, if defined. The working
632 632 directory is refreshed, as needed.
633 633 """
634 634 with repo.wlock():
635 635 raw = repo.vfs.tryread(b'sparse')
636 636 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
637 637
638 638 if not includes and not excludes:
639 639 return
640 640
641 641 _updateconfigandrefreshwdir(repo, set(), set(), profiles, force=force)
642 642
643 643
644 644 def importfromfiles(repo, opts, paths, force=False):
645 645 """Import sparse config rules from files.
646 646
647 647 The updated sparse config is written out and the working directory
648 648 is refreshed, as needed.
649 649 """
650 650 with repo.wlock():
651 651 # read current configuration
652 652 raw = repo.vfs.tryread(b'sparse')
653 653 includes, excludes, profiles = parseconfig(repo.ui, raw, b'sparse')
654 654 aincludes, aexcludes, aprofiles = activeconfig(repo)
655 655
656 656 # Import rules on top; only take in rules that are not yet
657 657 # part of the active rules.
658 658 changed = False
659 659 for p in paths:
660 660 with util.posixfile(util.expandpath(p), mode=b'rb') as fh:
661 661 raw = fh.read()
662 662
663 663 iincludes, iexcludes, iprofiles = parseconfig(
664 664 repo.ui, raw, b'sparse'
665 665 )
666 666 oldsize = len(includes) + len(excludes) + len(profiles)
667 667 includes.update(iincludes - aincludes)
668 668 excludes.update(iexcludes - aexcludes)
669 669 profiles.update(iprofiles - aprofiles)
670 670 if len(includes) + len(excludes) + len(profiles) > oldsize:
671 671 changed = True
672 672
673 673 profilecount = includecount = excludecount = 0
674 674 fcounts = (0, 0, 0)
675 675
676 676 if changed:
677 677 profilecount = len(profiles - aprofiles)
678 678 includecount = len(includes - aincludes)
679 679 excludecount = len(excludes - aexcludes)
680 680
681 681 fcounts = map(
682 682 len,
683 683 _updateconfigandrefreshwdir(
684 684 repo, includes, excludes, profiles, force=force
685 685 ),
686 686 )
687 687
688 688 printchanges(
689 689 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
690 690 )
691 691
692 692
693 693 def updateconfig(
694 694 repo,
695 695 pats,
696 696 opts,
697 697 include=False,
698 698 exclude=False,
699 699 reset=False,
700 700 delete=False,
701 701 enableprofile=False,
702 702 disableprofile=False,
703 703 force=False,
704 704 usereporootpaths=False,
705 705 ):
706 706 """Perform a sparse config update.
707 707
708 708 Only one of the actions may be performed.
709 709
710 710 The new config is written out and a working directory refresh is performed.
711 711 """
712 712 with repo.wlock():
713 713 raw = repo.vfs.tryread(b'sparse')
714 714 oldinclude, oldexclude, oldprofiles = parseconfig(
715 715 repo.ui, raw, b'sparse'
716 716 )
717 717
718 718 if reset:
719 719 newinclude = set()
720 720 newexclude = set()
721 721 newprofiles = set()
722 722 else:
723 723 newinclude = set(oldinclude)
724 724 newexclude = set(oldexclude)
725 725 newprofiles = set(oldprofiles)
726 726
727 727 if any(os.path.isabs(pat) for pat in pats):
728 728 raise error.Abort(_(b'paths cannot be absolute'))
729 729
730 730 if not usereporootpaths:
731 731 # let's treat paths as relative to cwd
732 732 root, cwd = repo.root, repo.getcwd()
733 733 abspats = []
734 734 for kindpat in pats:
735 735 kind, pat = matchmod._patsplit(kindpat, None)
736 736 if kind in matchmod.cwdrelativepatternkinds or kind is None:
737 737 ap = (kind + b':' if kind else b'') + pathutil.canonpath(
738 738 root, cwd, pat
739 739 )
740 740 abspats.append(ap)
741 741 else:
742 742 abspats.append(kindpat)
743 743 pats = abspats
744 744
745 745 if include:
746 746 newinclude.update(pats)
747 747 elif exclude:
748 748 newexclude.update(pats)
749 749 elif enableprofile:
750 750 newprofiles.update(pats)
751 751 elif disableprofile:
752 752 newprofiles.difference_update(pats)
753 753 elif delete:
754 754 newinclude.difference_update(pats)
755 755 newexclude.difference_update(pats)
756 756
757 757 profilecount = len(newprofiles - oldprofiles) - len(
758 758 oldprofiles - newprofiles
759 759 )
760 760 includecount = len(newinclude - oldinclude) - len(
761 761 oldinclude - newinclude
762 762 )
763 763 excludecount = len(newexclude - oldexclude) - len(
764 764 oldexclude - newexclude
765 765 )
766 766
767 767 fcounts = map(
768 768 len,
769 769 _updateconfigandrefreshwdir(
770 770 repo,
771 771 newinclude,
772 772 newexclude,
773 773 newprofiles,
774 774 force=force,
775 775 removing=reset,
776 776 ),
777 777 )
778 778
779 779 printchanges(
780 780 repo.ui, opts, profilecount, includecount, excludecount, *fcounts
781 781 )
782 782
783 783
784 784 def printchanges(
785 785 ui,
786 786 opts,
787 787 profilecount=0,
788 788 includecount=0,
789 789 excludecount=0,
790 790 added=0,
791 791 dropped=0,
792 792 conflicting=0,
793 793 ):
794 794 """Print output summarizing sparse config changes."""
795 795 with ui.formatter(b'sparse', opts) as fm:
796 796 fm.startitem()
797 797 fm.condwrite(
798 798 ui.verbose,
799 799 b'profiles_added',
800 800 _(b'Profiles changed: %d\n'),
801 801 profilecount,
802 802 )
803 803 fm.condwrite(
804 804 ui.verbose,
805 805 b'include_rules_added',
806 806 _(b'Include rules changed: %d\n'),
807 807 includecount,
808 808 )
809 809 fm.condwrite(
810 810 ui.verbose,
811 811 b'exclude_rules_added',
812 812 _(b'Exclude rules changed: %d\n'),
813 813 excludecount,
814 814 )
815 815
816 816 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
817 817 # files are added or removed outside of the templating formatter
818 818 # framework. No point in repeating ourselves in that case.
819 819 if not fm.isplain():
820 820 fm.condwrite(
821 821 ui.verbose, b'files_added', _(b'Files added: %d\n'), added
822 822 )
823 823 fm.condwrite(
824 824 ui.verbose, b'files_dropped', _(b'Files dropped: %d\n'), dropped
825 825 )
826 826 fm.condwrite(
827 827 ui.verbose,
828 828 b'files_conflicting',
829 829 _(b'Files conflicting: %d\n'),
830 830 conflicting,
831 831 )
General Comments 0
You need to be logged in to leave comments. Login now