##// END OF EJS Templates
sparse: move post commit actions into core...
Gregory Szorc -
r33353:160efb55 default
parent child Browse files
Show More
@@ -1,567 +1,542
1 1 # sparse.py - allow sparse checkouts of the working directory
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """allow sparse checkouts of the working directory (EXPERIMENTAL)
9 9
10 10 (This extension is not yet protected by backwards compatibility
11 11 guarantees. Any aspect may break in future releases until this
12 12 notice is removed.)
13 13
14 14 This extension allows the working directory to only consist of a
15 15 subset of files for the revision. This allows specific files or
16 16 directories to be explicitly included or excluded. Many repository
17 17 operations have performance proportional to the number of files in
18 18 the working directory. So only realizing a subset of files in the
19 19 working directory can improve performance.
20 20
21 21 Sparse Config Files
22 22 -------------------
23 23
24 24 The set of files that are part of a sparse checkout are defined by
25 25 a sparse config file. The file defines 3 things: includes (files to
26 26 include in the sparse checkout), excludes (files to exclude from the
27 27 sparse checkout), and profiles (links to other config files).
28 28
29 29 The file format is newline delimited. Empty lines and lines beginning
30 30 with ``#`` are ignored.
31 31
32 32 Lines beginning with ``%include `` denote another sparse config file
33 33 to include. e.g. ``%include tests.sparse``. The filename is relative
34 34 to the repository root.
35 35
36 36 The special lines ``[include]`` and ``[exclude]`` denote the section
37 37 for includes and excludes that follow, respectively. It is illegal to
38 38 have ``[include]`` after ``[exclude]``. If no sections are defined,
39 39 entries are assumed to be in the ``[include]`` section.
40 40
41 41 Non-special lines resemble file patterns to be added to either includes
42 42 or excludes. The syntax of these lines is documented by :hg:`help patterns`.
43 43 Patterns are interpreted as ``glob:`` by default and match against the
44 44 root of the repository.
45 45
46 46 Exclusion patterns take precedence over inclusion patterns. So even
47 47 if a file is explicitly included, an ``[exclude]`` entry can remove it.
48 48
49 49 For example, say you have a repository with 3 directories, ``frontend/``,
50 50 ``backend/``, and ``tools/``. ``frontend/`` and ``backend/`` correspond
51 51 to different projects and it is uncommon for someone working on one
52 52 to need the files for the other. But ``tools/`` contains files shared
53 53 between both projects. Your sparse config files may resemble::
54 54
55 55 # frontend.sparse
56 56 frontend/**
57 57 tools/**
58 58
59 59 # backend.sparse
60 60 backend/**
61 61 tools/**
62 62
63 63 Say the backend grows in size. Or there's a directory with thousands
64 64 of files you wish to exclude. You can modify the profile to exclude
65 65 certain files::
66 66
67 67 [include]
68 68 backend/**
69 69 tools/**
70 70
71 71 [exclude]
72 72 tools/tests/**
73 73 """
74 74
75 75 from __future__ import absolute_import
76 76
77 77 from mercurial.i18n import _
78 78 from mercurial.node import nullid
79 79 from mercurial import (
80 80 cmdutil,
81 81 commands,
82 context,
83 82 dirstate,
84 83 error,
85 84 extensions,
86 85 hg,
87 86 localrepo,
88 87 match as matchmod,
89 88 registrar,
90 89 sparse,
91 90 util,
92 91 )
93 92
94 93 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
95 94 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
96 95 # be specifying the version(s) of Mercurial they are tested with, or
97 96 # leave the attribute unspecified.
98 97 testedwith = 'ships-with-hg-core'
99 98
100 99 cmdtable = {}
101 100 command = registrar.command(cmdtable)
102 101
103 def uisetup(ui):
104 _setupcommit(ui)
105
106 102 def extsetup(ui):
107 103 sparse.enabled = True
108 104
109 105 _setupclone(ui)
110 106 _setuplog(ui)
111 107 _setupadd(ui)
112 108 _setupdirstate(ui)
113 109
114 110 def reposetup(ui, repo):
115 111 if not util.safehasattr(repo, 'dirstate'):
116 112 return
117 113
118 114 if 'dirstate' in repo._filecache:
119 115 repo.dirstate.repo = repo
120 116
121 117 def replacefilecache(cls, propname, replacement):
122 118 """Replace a filecache property with a new class. This allows changing the
123 119 cache invalidation condition."""
124 120 origcls = cls
125 121 assert callable(replacement)
126 122 while cls is not object:
127 123 if propname in cls.__dict__:
128 124 orig = cls.__dict__[propname]
129 125 setattr(cls, propname, replacement(orig))
130 126 break
131 127 cls = cls.__bases__[0]
132 128
133 129 if cls is object:
134 130 raise AttributeError(_("type '%s' has no property '%s'") % (origcls,
135 131 propname))
136 132
137 def _setupcommit(ui):
138 def _refreshoncommit(orig, self, node):
139 """Refresh the checkout when commits touch .hgsparse
140 """
141 orig(self, node)
142 repo = self._repo
143
144 ctx = repo[node]
145 profiles = sparse.patternsforrev(repo, ctx.rev())[2]
146
147 # profiles will only have data if sparse is enabled.
148 if set(profiles) & set(ctx.files()):
149 origstatus = repo.status()
150 origsparsematch = sparse.matcher(repo)
151 sparse.refreshwdir(repo, origstatus, origsparsematch, force=True)
152
153 sparse.prunetemporaryincludes(repo)
154
155 extensions.wrapfunction(context.committablectx, 'markcommitted',
156 _refreshoncommit)
157
158 133 def _setuplog(ui):
159 134 entry = commands.table['^log|history']
160 135 entry[1].append(('', 'sparse', None,
161 136 "limit to changesets affecting the sparse checkout"))
162 137
163 138 def _logrevs(orig, repo, opts):
164 139 revs = orig(repo, opts)
165 140 if opts.get('sparse'):
166 141 sparsematch = sparse.matcher(repo)
167 142 def ctxmatch(rev):
168 143 ctx = repo[rev]
169 144 return any(f for f in ctx.files() if sparsematch(f))
170 145 revs = revs.filter(ctxmatch)
171 146 return revs
172 147 extensions.wrapfunction(cmdutil, '_logrevs', _logrevs)
173 148
174 149 def _clonesparsecmd(orig, ui, repo, *args, **opts):
175 150 include_pat = opts.get('include')
176 151 exclude_pat = opts.get('exclude')
177 152 enableprofile_pat = opts.get('enable_profile')
178 153 include = exclude = enableprofile = False
179 154 if include_pat:
180 155 pat = include_pat
181 156 include = True
182 157 if exclude_pat:
183 158 pat = exclude_pat
184 159 exclude = True
185 160 if enableprofile_pat:
186 161 pat = enableprofile_pat
187 162 enableprofile = True
188 163 if sum([include, exclude, enableprofile]) > 1:
189 164 raise error.Abort(_("too many flags specified."))
190 165 if include or exclude or enableprofile:
191 166 def clonesparse(orig, self, node, overwrite, *args, **kwargs):
192 167 _config(self.ui, self.unfiltered(), pat, {}, include=include,
193 168 exclude=exclude, enableprofile=enableprofile)
194 169 return orig(self, node, overwrite, *args, **kwargs)
195 170 extensions.wrapfunction(hg, 'updaterepo', clonesparse)
196 171 return orig(ui, repo, *args, **opts)
197 172
198 173 def _setupclone(ui):
199 174 entry = commands.table['^clone']
200 175 entry[1].append(('', 'enable-profile', [],
201 176 'enable a sparse profile'))
202 177 entry[1].append(('', 'include', [],
203 178 'include sparse pattern'))
204 179 entry[1].append(('', 'exclude', [],
205 180 'exclude sparse pattern'))
206 181 extensions.wrapcommand(commands.table, 'clone', _clonesparsecmd)
207 182
208 183 def _setupadd(ui):
209 184 entry = commands.table['^add']
210 185 entry[1].append(('s', 'sparse', None,
211 186 'also include directories of added files in sparse config'))
212 187
213 188 def _add(orig, ui, repo, *pats, **opts):
214 189 if opts.get('sparse'):
215 190 dirs = set()
216 191 for pat in pats:
217 192 dirname, basename = util.split(pat)
218 193 dirs.add(dirname)
219 194 _config(ui, repo, list(dirs), opts, include=True)
220 195 return orig(ui, repo, *pats, **opts)
221 196
222 197 extensions.wrapcommand(commands.table, 'add', _add)
223 198
224 199 def _setupdirstate(ui):
225 200 """Modify the dirstate to prevent stat'ing excluded files,
226 201 and to prevent modifications to files outside the checkout.
227 202 """
228 203
229 204 def _dirstate(orig, repo):
230 205 dirstate = orig(repo)
231 206 dirstate.repo = repo
232 207 return dirstate
233 208 extensions.wrapfunction(
234 209 localrepo.localrepository.dirstate, 'func', _dirstate)
235 210
236 211 # The atrocity below is needed to wrap dirstate._ignore. It is a cached
237 212 # property, which means normal function wrapping doesn't work.
238 213 class ignorewrapper(object):
239 214 def __init__(self, orig):
240 215 self.orig = orig
241 216 self.origignore = None
242 217 self.func = None
243 218 self.sparsematch = None
244 219
245 220 def __get__(self, obj, type=None):
246 221 repo = obj.repo
247 222 origignore = self.orig.__get__(obj)
248 223
249 224 sparsematch = sparse.matcher(repo)
250 225 if sparsematch.always():
251 226 return origignore
252 227
253 228 if self.sparsematch != sparsematch or self.origignore != origignore:
254 229 self.func = matchmod.unionmatcher([
255 230 origignore, matchmod.negatematcher(sparsematch)])
256 231 self.sparsematch = sparsematch
257 232 self.origignore = origignore
258 233 return self.func
259 234
260 235 def __set__(self, obj, value):
261 236 return self.orig.__set__(obj, value)
262 237
263 238 def __delete__(self, obj):
264 239 return self.orig.__delete__(obj)
265 240
266 241 replacefilecache(dirstate.dirstate, '_ignore', ignorewrapper)
267 242
268 243 # dirstate.rebuild should not add non-matching files
269 244 def _rebuild(orig, self, parent, allfiles, changedfiles=None):
270 245 matcher = sparse.matcher(self.repo)
271 246 if not matcher.always():
272 247 allfiles = allfiles.matches(matcher)
273 248 if changedfiles:
274 249 changedfiles = [f for f in changedfiles if matcher(f)]
275 250
276 251 if changedfiles is not None:
277 252 # In _rebuild, these files will be deleted from the dirstate
278 253 # when they are not found to be in allfiles
279 254 dirstatefilestoremove = set(f for f in self if not matcher(f))
280 255 changedfiles = dirstatefilestoremove.union(changedfiles)
281 256
282 257 return orig(self, parent, allfiles, changedfiles)
283 258 extensions.wrapfunction(dirstate.dirstate, 'rebuild', _rebuild)
284 259
285 260 # Prevent adding files that are outside the sparse checkout
286 261 editfuncs = ['normal', 'add', 'normallookup', 'copy', 'remove', 'merge']
287 262 hint = _('include file with `hg debugsparse --include <pattern>` or use ' +
288 263 '`hg add -s <file>` to include file directory while adding')
289 264 for func in editfuncs:
290 265 def _wrapper(orig, self, *args):
291 266 repo = self.repo
292 267 sparsematch = sparse.matcher(repo)
293 268 if not sparsematch.always():
294 269 dirstate = repo.dirstate
295 270 for f in args:
296 271 if (f is not None and not sparsematch(f) and
297 272 f not in dirstate):
298 273 raise error.Abort(_("cannot add '%s' - it is outside "
299 274 "the sparse checkout") % f,
300 275 hint=hint)
301 276 return orig(self, *args)
302 277 extensions.wrapfunction(dirstate.dirstate, func, _wrapper)
303 278
304 279 @command('^debugsparse', [
305 280 ('I', 'include', False, _('include files in the sparse checkout')),
306 281 ('X', 'exclude', False, _('exclude files in the sparse checkout')),
307 282 ('d', 'delete', False, _('delete an include/exclude rule')),
308 283 ('f', 'force', False, _('allow changing rules even with pending changes')),
309 284 ('', 'enable-profile', False, _('enables the specified profile')),
310 285 ('', 'disable-profile', False, _('disables the specified profile')),
311 286 ('', 'import-rules', False, _('imports rules from a file')),
312 287 ('', 'clear-rules', False, _('clears local include/exclude rules')),
313 288 ('', 'refresh', False, _('updates the working after sparseness changes')),
314 289 ('', 'reset', False, _('makes the repo full again')),
315 290 ] + commands.templateopts,
316 291 _('[--OPTION] PATTERN...'))
317 292 def debugsparse(ui, repo, *pats, **opts):
318 293 """make the current checkout sparse, or edit the existing checkout
319 294
320 295 The sparse command is used to make the current checkout sparse.
321 296 This means files that don't meet the sparse condition will not be
322 297 written to disk, or show up in any working copy operations. It does
323 298 not affect files in history in any way.
324 299
325 300 Passing no arguments prints the currently applied sparse rules.
326 301
327 302 --include and --exclude are used to add and remove files from the sparse
328 303 checkout. The effects of adding an include or exclude rule are applied
329 304 immediately. If applying the new rule would cause a file with pending
330 305 changes to be added or removed, the command will fail. Pass --force to
331 306 force a rule change even with pending changes (the changes on disk will
332 307 be preserved).
333 308
334 309 --delete removes an existing include/exclude rule. The effects are
335 310 immediate.
336 311
337 312 --refresh refreshes the files on disk based on the sparse rules. This is
338 313 only necessary if .hg/sparse was changed by hand.
339 314
340 315 --enable-profile and --disable-profile accept a path to a .hgsparse file.
341 316 This allows defining sparse checkouts and tracking them inside the
342 317 repository. This is useful for defining commonly used sparse checkouts for
343 318 many people to use. As the profile definition changes over time, the sparse
344 319 checkout will automatically be updated appropriately, depending on which
345 320 changeset is checked out. Changes to .hgsparse are not applied until they
346 321 have been committed.
347 322
348 323 --import-rules accepts a path to a file containing rules in the .hgsparse
349 324 format, allowing you to add --include, --exclude and --enable-profile rules
350 325 in bulk. Like the --include, --exclude and --enable-profile switches, the
351 326 changes are applied immediately.
352 327
353 328 --clear-rules removes all local include and exclude rules, while leaving
354 329 any enabled profiles in place.
355 330
356 331 Returns 0 if editing the sparse checkout succeeds.
357 332 """
358 333 include = opts.get('include')
359 334 exclude = opts.get('exclude')
360 335 force = opts.get('force')
361 336 enableprofile = opts.get('enable_profile')
362 337 disableprofile = opts.get('disable_profile')
363 338 importrules = opts.get('import_rules')
364 339 clearrules = opts.get('clear_rules')
365 340 delete = opts.get('delete')
366 341 refresh = opts.get('refresh')
367 342 reset = opts.get('reset')
368 343 count = sum([include, exclude, enableprofile, disableprofile, delete,
369 344 importrules, refresh, clearrules, reset])
370 345 if count > 1:
371 346 raise error.Abort(_("too many flags specified"))
372 347
373 348 if count == 0:
374 349 if repo.vfs.exists('sparse'):
375 350 ui.status(repo.vfs.read("sparse") + "\n")
376 351 temporaryincludes = sparse.readtemporaryincludes(repo)
377 352 if temporaryincludes:
378 353 ui.status(_("Temporarily Included Files (for merge/rebase):\n"))
379 354 ui.status(("\n".join(temporaryincludes) + "\n"))
380 355 else:
381 356 ui.status(_('repo is not sparse\n'))
382 357 return
383 358
384 359 if include or exclude or delete or reset or enableprofile or disableprofile:
385 360 _config(ui, repo, pats, opts, include=include, exclude=exclude,
386 361 reset=reset, delete=delete, enableprofile=enableprofile,
387 362 disableprofile=disableprofile, force=force)
388 363
389 364 if importrules:
390 365 _import(ui, repo, pats, opts, force=force)
391 366
392 367 if clearrules:
393 368 _clear(ui, repo, pats, force=force)
394 369
395 370 if refresh:
396 371 try:
397 372 wlock = repo.wlock()
398 373 fcounts = map(
399 374 len,
400 375 sparse.refreshwdir(repo, repo.status(), sparse.matcher(repo),
401 376 force=force))
402 377 _verbose_output(ui, opts, 0, 0, 0, *fcounts)
403 378 finally:
404 379 wlock.release()
405 380
406 381 def _config(ui, repo, pats, opts, include=False, exclude=False, reset=False,
407 382 delete=False, enableprofile=False, disableprofile=False,
408 383 force=False):
409 384 """
410 385 Perform a sparse config update. Only one of the kwargs may be specified.
411 386 """
412 387 wlock = repo.wlock()
413 388 try:
414 389 oldsparsematch = sparse.matcher(repo)
415 390
416 391 raw = repo.vfs.tryread('sparse')
417 392 if raw:
418 393 oldinclude, oldexclude, oldprofiles = map(
419 394 set, sparse.parseconfig(ui, raw))
420 395 else:
421 396 oldinclude = set()
422 397 oldexclude = set()
423 398 oldprofiles = set()
424 399
425 400 try:
426 401 if reset:
427 402 newinclude = set()
428 403 newexclude = set()
429 404 newprofiles = set()
430 405 else:
431 406 newinclude = set(oldinclude)
432 407 newexclude = set(oldexclude)
433 408 newprofiles = set(oldprofiles)
434 409
435 410 oldstatus = repo.status()
436 411
437 412 if any(pat.startswith('/') for pat in pats):
438 413 ui.warn(_('warning: paths cannot start with /, ignoring: %s\n')
439 414 % ([pat for pat in pats if pat.startswith('/')]))
440 415 elif include:
441 416 newinclude.update(pats)
442 417 elif exclude:
443 418 newexclude.update(pats)
444 419 elif enableprofile:
445 420 newprofiles.update(pats)
446 421 elif disableprofile:
447 422 newprofiles.difference_update(pats)
448 423 elif delete:
449 424 newinclude.difference_update(pats)
450 425 newexclude.difference_update(pats)
451 426
452 427 sparse.writeconfig(repo, newinclude, newexclude, newprofiles)
453 428
454 429 fcounts = map(
455 430 len,
456 431 sparse.refreshwdir(repo, oldstatus, oldsparsematch,
457 432 force=force))
458 433
459 434 profilecount = (len(newprofiles - oldprofiles) -
460 435 len(oldprofiles - newprofiles))
461 436 includecount = (len(newinclude - oldinclude) -
462 437 len(oldinclude - newinclude))
463 438 excludecount = (len(newexclude - oldexclude) -
464 439 len(oldexclude - newexclude))
465 440 _verbose_output(
466 441 ui, opts, profilecount, includecount, excludecount, *fcounts)
467 442 except Exception:
468 443 sparse.writeconfig(repo, oldinclude, oldexclude, oldprofiles)
469 444 raise
470 445 finally:
471 446 wlock.release()
472 447
473 448 def _import(ui, repo, files, opts, force=False):
474 449 with repo.wlock():
475 450 # load union of current active profile
476 451 revs = [repo.changelog.rev(node) for node in
477 452 repo.dirstate.parents() if node != nullid]
478 453
479 454 # read current configuration
480 455 raw = repo.vfs.tryread('sparse')
481 456 oincludes, oexcludes, oprofiles = sparse.parseconfig(ui, raw)
482 457 includes, excludes, profiles = map(
483 458 set, (oincludes, oexcludes, oprofiles))
484 459
485 460 # all active rules
486 461 aincludes, aexcludes, aprofiles = set(), set(), set()
487 462 for rev in revs:
488 463 rincludes, rexcludes, rprofiles = sparse.patternsforrev(repo, rev)
489 464 aincludes.update(rincludes)
490 465 aexcludes.update(rexcludes)
491 466 aprofiles.update(rprofiles)
492 467
493 468 # import rules on top; only take in rules that are not yet
494 469 # part of the active rules.
495 470 changed = False
496 471 for file in files:
497 472 with util.posixfile(util.expandpath(file)) as importfile:
498 473 iincludes, iexcludes, iprofiles = sparse.parseconfig(
499 474 ui, importfile.read())
500 475 oldsize = len(includes) + len(excludes) + len(profiles)
501 476 includes.update(iincludes - aincludes)
502 477 excludes.update(iexcludes - aexcludes)
503 478 profiles.update(set(iprofiles) - aprofiles)
504 479 if len(includes) + len(excludes) + len(profiles) > oldsize:
505 480 changed = True
506 481
507 482 profilecount = includecount = excludecount = 0
508 483 fcounts = (0, 0, 0)
509 484
510 485 if changed:
511 486 profilecount = len(profiles - aprofiles)
512 487 includecount = len(includes - aincludes)
513 488 excludecount = len(excludes - aexcludes)
514 489
515 490 oldstatus = repo.status()
516 491 oldsparsematch = sparse.matcher(repo)
517 492 sparse.writeconfig(repo, includes, excludes, profiles)
518 493
519 494 try:
520 495 fcounts = map(
521 496 len,
522 497 sparse.refreshwdir(repo, oldstatus, oldsparsematch,
523 498 force=force))
524 499 except Exception:
525 500 sparse.writeconfig(repo, oincludes, oexcludes, oprofiles)
526 501 raise
527 502
528 503 _verbose_output(ui, opts, profilecount, includecount, excludecount,
529 504 *fcounts)
530 505
531 506 def _clear(ui, repo, files, force=False):
532 507 with repo.wlock():
533 508 raw = repo.vfs.tryread('sparse')
534 509 includes, excludes, profiles = sparse.parseconfig(ui, raw)
535 510
536 511 if includes or excludes:
537 512 oldstatus = repo.status()
538 513 oldsparsematch = sparse.matcher(repo)
539 514 sparse.writeconfig(repo, set(), set(), profiles)
540 515 sparse.refreshwdir(repo, oldstatus, oldsparsematch, force)
541 516
542 517 def _verbose_output(ui, opts, profilecount, includecount, excludecount, added,
543 518 dropped, lookup):
544 519 """Produce --verbose and templatable output
545 520
546 521 This specifically enables -Tjson, providing machine-readable stats on how
547 522 the sparse profile changed.
548 523
549 524 """
550 525 with ui.formatter('sparse', opts) as fm:
551 526 fm.startitem()
552 527 fm.condwrite(ui.verbose, 'profiles_added', 'Profile # change: %d\n',
553 528 profilecount)
554 529 fm.condwrite(ui.verbose, 'include_rules_added',
555 530 'Include rule # change: %d\n', includecount)
556 531 fm.condwrite(ui.verbose, 'exclude_rules_added',
557 532 'Exclude rule # change: %d\n', excludecount)
558 533 # In 'plain' verbose mode, mergemod.applyupdates already outputs what
559 534 # files are added or removed outside of the templating formatter
560 535 # framework. No point in repeating ourselves in that case.
561 536 if not fm.isplain():
562 537 fm.condwrite(ui.verbose, 'files_added', 'Files added: %d\n',
563 538 added)
564 539 fm.condwrite(ui.verbose, 'files_dropped', 'Files dropped: %d\n',
565 540 dropped)
566 541 fm.condwrite(ui.verbose, 'files_conflicting',
567 542 'Files conflicting: %d\n', lookup)
@@ -1,2316 +1,2322
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import os
12 12 import re
13 13 import stat
14 14
15 15 from .i18n import _
16 16 from .node import (
17 17 addednodeid,
18 18 bin,
19 19 hex,
20 20 modifiednodeid,
21 21 nullid,
22 22 nullrev,
23 23 short,
24 24 wdirid,
25 25 wdirnodes,
26 26 wdirrev,
27 27 )
28 28 from . import (
29 29 encoding,
30 30 error,
31 31 fileset,
32 32 match as matchmod,
33 33 mdiff,
34 34 obsolete as obsmod,
35 35 patch,
36 36 phases,
37 37 pycompat,
38 38 repoview,
39 39 revlog,
40 40 scmutil,
41 sparse,
41 42 subrepo,
42 43 util,
43 44 )
44 45
45 46 propertycache = util.propertycache
46 47
47 48 nonascii = re.compile(r'[^\x21-\x7f]').search
48 49
49 50 class basectx(object):
50 51 """A basectx object represents the common logic for its children:
51 52 changectx: read-only context that is already present in the repo,
52 53 workingctx: a context that represents the working directory and can
53 54 be committed,
54 55 memctx: a context that represents changes in-memory and can also
55 56 be committed."""
56 57 def __new__(cls, repo, changeid='', *args, **kwargs):
57 58 if isinstance(changeid, basectx):
58 59 return changeid
59 60
60 61 o = super(basectx, cls).__new__(cls)
61 62
62 63 o._repo = repo
63 64 o._rev = nullrev
64 65 o._node = nullid
65 66
66 67 return o
67 68
68 69 def __bytes__(self):
69 70 return short(self.node())
70 71
71 72 __str__ = encoding.strmethod(__bytes__)
72 73
73 74 def __int__(self):
74 75 return self.rev()
75 76
76 77 def __repr__(self):
77 78 return r"<%s %s>" % (type(self).__name__, str(self))
78 79
79 80 def __eq__(self, other):
80 81 try:
81 82 return type(self) == type(other) and self._rev == other._rev
82 83 except AttributeError:
83 84 return False
84 85
85 86 def __ne__(self, other):
86 87 return not (self == other)
87 88
88 89 def __contains__(self, key):
89 90 return key in self._manifest
90 91
91 92 def __getitem__(self, key):
92 93 return self.filectx(key)
93 94
94 95 def __iter__(self):
95 96 return iter(self._manifest)
96 97
97 98 def _buildstatusmanifest(self, status):
98 99 """Builds a manifest that includes the given status results, if this is
99 100 a working copy context. For non-working copy contexts, it just returns
100 101 the normal manifest."""
101 102 return self.manifest()
102 103
103 104 def _matchstatus(self, other, match):
104 105 """return match.always if match is none
105 106
106 107 This internal method provides a way for child objects to override the
107 108 match operator.
108 109 """
109 110 return match or matchmod.always(self._repo.root, self._repo.getcwd())
110 111
111 112 def _buildstatus(self, other, s, match, listignored, listclean,
112 113 listunknown):
113 114 """build a status with respect to another context"""
114 115 # Load earliest manifest first for caching reasons. More specifically,
115 116 # if you have revisions 1000 and 1001, 1001 is probably stored as a
116 117 # delta against 1000. Thus, if you read 1000 first, we'll reconstruct
117 118 # 1000 and cache it so that when you read 1001, we just need to apply a
118 119 # delta to what's in the cache. So that's one full reconstruction + one
119 120 # delta application.
120 121 mf2 = None
121 122 if self.rev() is not None and self.rev() < other.rev():
122 123 mf2 = self._buildstatusmanifest(s)
123 124 mf1 = other._buildstatusmanifest(s)
124 125 if mf2 is None:
125 126 mf2 = self._buildstatusmanifest(s)
126 127
127 128 modified, added = [], []
128 129 removed = []
129 130 clean = []
130 131 deleted, unknown, ignored = s.deleted, s.unknown, s.ignored
131 132 deletedset = set(deleted)
132 133 d = mf1.diff(mf2, match=match, clean=listclean)
133 134 for fn, value in d.iteritems():
134 135 if fn in deletedset:
135 136 continue
136 137 if value is None:
137 138 clean.append(fn)
138 139 continue
139 140 (node1, flag1), (node2, flag2) = value
140 141 if node1 is None:
141 142 added.append(fn)
142 143 elif node2 is None:
143 144 removed.append(fn)
144 145 elif flag1 != flag2:
145 146 modified.append(fn)
146 147 elif node2 not in wdirnodes:
147 148 # When comparing files between two commits, we save time by
148 149 # not comparing the file contents when the nodeids differ.
149 150 # Note that this means we incorrectly report a reverted change
150 151 # to a file as a modification.
151 152 modified.append(fn)
152 153 elif self[fn].cmp(other[fn]):
153 154 modified.append(fn)
154 155 else:
155 156 clean.append(fn)
156 157
157 158 if removed:
158 159 # need to filter files if they are already reported as removed
159 160 unknown = [fn for fn in unknown if fn not in mf1 and
160 161 (not match or match(fn))]
161 162 ignored = [fn for fn in ignored if fn not in mf1 and
162 163 (not match or match(fn))]
163 164 # if they're deleted, don't report them as removed
164 165 removed = [fn for fn in removed if fn not in deletedset]
165 166
166 167 return scmutil.status(modified, added, removed, deleted, unknown,
167 168 ignored, clean)
168 169
169 170 @propertycache
170 171 def substate(self):
171 172 return subrepo.state(self, self._repo.ui)
172 173
173 174 def subrev(self, subpath):
174 175 return self.substate[subpath][1]
175 176
176 177 def rev(self):
177 178 return self._rev
178 179 def node(self):
179 180 return self._node
180 181 def hex(self):
181 182 return hex(self.node())
182 183 def manifest(self):
183 184 return self._manifest
184 185 def manifestctx(self):
185 186 return self._manifestctx
186 187 def repo(self):
187 188 return self._repo
188 189 def phasestr(self):
189 190 return phases.phasenames[self.phase()]
190 191 def mutable(self):
191 192 return self.phase() > phases.public
192 193
193 194 def getfileset(self, expr):
194 195 return fileset.getfileset(self, expr)
195 196
196 197 def obsolete(self):
197 198 """True if the changeset is obsolete"""
198 199 return self.rev() in obsmod.getrevs(self._repo, 'obsolete')
199 200
200 201 def extinct(self):
201 202 """True if the changeset is extinct"""
202 203 return self.rev() in obsmod.getrevs(self._repo, 'extinct')
203 204
204 205 def unstable(self):
205 206 """True if the changeset is not obsolete but it's ancestor are"""
206 207 return self.rev() in obsmod.getrevs(self._repo, 'unstable')
207 208
208 209 def bumped(self):
209 210 """True if the changeset try to be a successor of a public changeset
210 211
211 212 Only non-public and non-obsolete changesets may be bumped.
212 213 """
213 214 return self.rev() in obsmod.getrevs(self._repo, 'bumped')
214 215
215 216 def divergent(self):
216 217 """Is a successors of a changeset with multiple possible successors set
217 218
218 219 Only non-public and non-obsolete changesets may be divergent.
219 220 """
220 221 return self.rev() in obsmod.getrevs(self._repo, 'divergent')
221 222
222 223 def troubled(self):
223 224 """True if the changeset is either unstable, bumped or divergent"""
224 225 return self.unstable() or self.bumped() or self.divergent()
225 226
226 227 def troubles(self):
227 228 """return the list of troubles affecting this changesets.
228 229
229 230 Troubles are returned as strings. possible values are:
230 231 - unstable,
231 232 - bumped,
232 233 - divergent.
233 234 """
234 235 troubles = []
235 236 if self.unstable():
236 237 troubles.append('unstable')
237 238 if self.bumped():
238 239 troubles.append('bumped')
239 240 if self.divergent():
240 241 troubles.append('divergent')
241 242 return troubles
242 243
243 244 def parents(self):
244 245 """return contexts for each parent changeset"""
245 246 return self._parents
246 247
247 248 def p1(self):
248 249 return self._parents[0]
249 250
250 251 def p2(self):
251 252 parents = self._parents
252 253 if len(parents) == 2:
253 254 return parents[1]
254 255 return changectx(self._repo, nullrev)
255 256
256 257 def _fileinfo(self, path):
257 258 if r'_manifest' in self.__dict__:
258 259 try:
259 260 return self._manifest[path], self._manifest.flags(path)
260 261 except KeyError:
261 262 raise error.ManifestLookupError(self._node, path,
262 263 _('not found in manifest'))
263 264 if r'_manifestdelta' in self.__dict__ or path in self.files():
264 265 if path in self._manifestdelta:
265 266 return (self._manifestdelta[path],
266 267 self._manifestdelta.flags(path))
267 268 mfl = self._repo.manifestlog
268 269 try:
269 270 node, flag = mfl[self._changeset.manifest].find(path)
270 271 except KeyError:
271 272 raise error.ManifestLookupError(self._node, path,
272 273 _('not found in manifest'))
273 274
274 275 return node, flag
275 276
276 277 def filenode(self, path):
277 278 return self._fileinfo(path)[0]
278 279
279 280 def flags(self, path):
280 281 try:
281 282 return self._fileinfo(path)[1]
282 283 except error.LookupError:
283 284 return ''
284 285
285 286 def sub(self, path, allowcreate=True):
286 287 '''return a subrepo for the stored revision of path, never wdir()'''
287 288 return subrepo.subrepo(self, path, allowcreate=allowcreate)
288 289
289 290 def nullsub(self, path, pctx):
290 291 return subrepo.nullsubrepo(self, path, pctx)
291 292
292 293 def workingsub(self, path):
293 294 '''return a subrepo for the stored revision, or wdir if this is a wdir
294 295 context.
295 296 '''
296 297 return subrepo.subrepo(self, path, allowwdir=True)
297 298
298 299 def match(self, pats=None, include=None, exclude=None, default='glob',
299 300 listsubrepos=False, badfn=None):
300 301 r = self._repo
301 302 return matchmod.match(r.root, r.getcwd(), pats,
302 303 include, exclude, default,
303 304 auditor=r.nofsauditor, ctx=self,
304 305 listsubrepos=listsubrepos, badfn=badfn)
305 306
306 307 def diff(self, ctx2=None, match=None, **opts):
307 308 """Returns a diff generator for the given contexts and matcher"""
308 309 if ctx2 is None:
309 310 ctx2 = self.p1()
310 311 if ctx2 is not None:
311 312 ctx2 = self._repo[ctx2]
312 313 diffopts = patch.diffopts(self._repo.ui, opts)
313 314 return patch.diff(self._repo, ctx2, self, match=match, opts=diffopts)
314 315
315 316 def dirs(self):
316 317 return self._manifest.dirs()
317 318
318 319 def hasdir(self, dir):
319 320 return self._manifest.hasdir(dir)
320 321
321 322 def status(self, other=None, match=None, listignored=False,
322 323 listclean=False, listunknown=False, listsubrepos=False):
323 324 """return status of files between two nodes or node and working
324 325 directory.
325 326
326 327 If other is None, compare this node with working directory.
327 328
328 329 returns (modified, added, removed, deleted, unknown, ignored, clean)
329 330 """
330 331
331 332 ctx1 = self
332 333 ctx2 = self._repo[other]
333 334
334 335 # This next code block is, admittedly, fragile logic that tests for
335 336 # reversing the contexts and wouldn't need to exist if it weren't for
336 337 # the fast (and common) code path of comparing the working directory
337 338 # with its first parent.
338 339 #
339 340 # What we're aiming for here is the ability to call:
340 341 #
341 342 # workingctx.status(parentctx)
342 343 #
343 344 # If we always built the manifest for each context and compared those,
344 345 # then we'd be done. But the special case of the above call means we
345 346 # just copy the manifest of the parent.
346 347 reversed = False
347 348 if (not isinstance(ctx1, changectx)
348 349 and isinstance(ctx2, changectx)):
349 350 reversed = True
350 351 ctx1, ctx2 = ctx2, ctx1
351 352
352 353 match = ctx2._matchstatus(ctx1, match)
353 354 r = scmutil.status([], [], [], [], [], [], [])
354 355 r = ctx2._buildstatus(ctx1, r, match, listignored, listclean,
355 356 listunknown)
356 357
357 358 if reversed:
358 359 # Reverse added and removed. Clear deleted, unknown and ignored as
359 360 # these make no sense to reverse.
360 361 r = scmutil.status(r.modified, r.removed, r.added, [], [], [],
361 362 r.clean)
362 363
363 364 if listsubrepos:
364 365 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
365 366 try:
366 367 rev2 = ctx2.subrev(subpath)
367 368 except KeyError:
368 369 # A subrepo that existed in node1 was deleted between
369 370 # node1 and node2 (inclusive). Thus, ctx2's substate
370 371 # won't contain that subpath. The best we can do ignore it.
371 372 rev2 = None
372 373 submatch = matchmod.subdirmatcher(subpath, match)
373 374 s = sub.status(rev2, match=submatch, ignored=listignored,
374 375 clean=listclean, unknown=listunknown,
375 376 listsubrepos=True)
376 377 for rfiles, sfiles in zip(r, s):
377 378 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
378 379
379 380 for l in r:
380 381 l.sort()
381 382
382 383 return r
383 384
384 385 def _filterederror(repo, changeid):
385 386 """build an exception to be raised about a filtered changeid
386 387
387 388 This is extracted in a function to help extensions (eg: evolve) to
388 389 experiment with various message variants."""
389 390 if repo.filtername.startswith('visible'):
390 391 msg = _("hidden revision '%s'") % changeid
391 392 hint = _('use --hidden to access hidden revisions')
392 393 return error.FilteredRepoLookupError(msg, hint=hint)
393 394 msg = _("filtered revision '%s' (not in '%s' subset)")
394 395 msg %= (changeid, repo.filtername)
395 396 return error.FilteredRepoLookupError(msg)
396 397
397 398 class changectx(basectx):
398 399 """A changecontext object makes access to data related to a particular
399 400 changeset convenient. It represents a read-only context already present in
400 401 the repo."""
401 402 def __init__(self, repo, changeid=''):
402 403 """changeid is a revision number, node, or tag"""
403 404
404 405 # since basectx.__new__ already took care of copying the object, we
405 406 # don't need to do anything in __init__, so we just exit here
406 407 if isinstance(changeid, basectx):
407 408 return
408 409
409 410 if changeid == '':
410 411 changeid = '.'
411 412 self._repo = repo
412 413
413 414 try:
414 415 if isinstance(changeid, int):
415 416 self._node = repo.changelog.node(changeid)
416 417 self._rev = changeid
417 418 return
418 419 if not pycompat.ispy3 and isinstance(changeid, long):
419 420 changeid = str(changeid)
420 421 if changeid == 'null':
421 422 self._node = nullid
422 423 self._rev = nullrev
423 424 return
424 425 if changeid == 'tip':
425 426 self._node = repo.changelog.tip()
426 427 self._rev = repo.changelog.rev(self._node)
427 428 return
428 429 if changeid == '.' or changeid == repo.dirstate.p1():
429 430 # this is a hack to delay/avoid loading obsmarkers
430 431 # when we know that '.' won't be hidden
431 432 self._node = repo.dirstate.p1()
432 433 self._rev = repo.unfiltered().changelog.rev(self._node)
433 434 return
434 435 if len(changeid) == 20:
435 436 try:
436 437 self._node = changeid
437 438 self._rev = repo.changelog.rev(changeid)
438 439 return
439 440 except error.FilteredRepoLookupError:
440 441 raise
441 442 except LookupError:
442 443 pass
443 444
444 445 try:
445 446 r = int(changeid)
446 447 if '%d' % r != changeid:
447 448 raise ValueError
448 449 l = len(repo.changelog)
449 450 if r < 0:
450 451 r += l
451 452 if r < 0 or r >= l and r != wdirrev:
452 453 raise ValueError
453 454 self._rev = r
454 455 self._node = repo.changelog.node(r)
455 456 return
456 457 except error.FilteredIndexError:
457 458 raise
458 459 except (ValueError, OverflowError, IndexError):
459 460 pass
460 461
461 462 if len(changeid) == 40:
462 463 try:
463 464 self._node = bin(changeid)
464 465 self._rev = repo.changelog.rev(self._node)
465 466 return
466 467 except error.FilteredLookupError:
467 468 raise
468 469 except (TypeError, LookupError):
469 470 pass
470 471
471 472 # lookup bookmarks through the name interface
472 473 try:
473 474 self._node = repo.names.singlenode(repo, changeid)
474 475 self._rev = repo.changelog.rev(self._node)
475 476 return
476 477 except KeyError:
477 478 pass
478 479 except error.FilteredRepoLookupError:
479 480 raise
480 481 except error.RepoLookupError:
481 482 pass
482 483
483 484 self._node = repo.unfiltered().changelog._partialmatch(changeid)
484 485 if self._node is not None:
485 486 self._rev = repo.changelog.rev(self._node)
486 487 return
487 488
488 489 # lookup failed
489 490 # check if it might have come from damaged dirstate
490 491 #
491 492 # XXX we could avoid the unfiltered if we had a recognizable
492 493 # exception for filtered changeset access
493 494 if changeid in repo.unfiltered().dirstate.parents():
494 495 msg = _("working directory has unknown parent '%s'!")
495 496 raise error.Abort(msg % short(changeid))
496 497 try:
497 498 if len(changeid) == 20 and nonascii(changeid):
498 499 changeid = hex(changeid)
499 500 except TypeError:
500 501 pass
501 502 except (error.FilteredIndexError, error.FilteredLookupError,
502 503 error.FilteredRepoLookupError):
503 504 raise _filterederror(repo, changeid)
504 505 except IndexError:
505 506 pass
506 507 raise error.RepoLookupError(
507 508 _("unknown revision '%s'") % changeid)
508 509
509 510 def __hash__(self):
510 511 try:
511 512 return hash(self._rev)
512 513 except AttributeError:
513 514 return id(self)
514 515
515 516 def __nonzero__(self):
516 517 return self._rev != nullrev
517 518
518 519 __bool__ = __nonzero__
519 520
520 521 @propertycache
521 522 def _changeset(self):
522 523 return self._repo.changelog.changelogrevision(self.rev())
523 524
524 525 @propertycache
525 526 def _manifest(self):
526 527 return self._manifestctx.read()
527 528
528 529 @property
529 530 def _manifestctx(self):
530 531 return self._repo.manifestlog[self._changeset.manifest]
531 532
532 533 @propertycache
533 534 def _manifestdelta(self):
534 535 return self._manifestctx.readdelta()
535 536
536 537 @propertycache
537 538 def _parents(self):
538 539 repo = self._repo
539 540 p1, p2 = repo.changelog.parentrevs(self._rev)
540 541 if p2 == nullrev:
541 542 return [changectx(repo, p1)]
542 543 return [changectx(repo, p1), changectx(repo, p2)]
543 544
544 545 def changeset(self):
545 546 c = self._changeset
546 547 return (
547 548 c.manifest,
548 549 c.user,
549 550 c.date,
550 551 c.files,
551 552 c.description,
552 553 c.extra,
553 554 )
554 555 def manifestnode(self):
555 556 return self._changeset.manifest
556 557
557 558 def user(self):
558 559 return self._changeset.user
559 560 def date(self):
560 561 return self._changeset.date
561 562 def files(self):
562 563 return self._changeset.files
563 564 def description(self):
564 565 return self._changeset.description
565 566 def branch(self):
566 567 return encoding.tolocal(self._changeset.extra.get("branch"))
567 568 def closesbranch(self):
568 569 return 'close' in self._changeset.extra
569 570 def extra(self):
570 571 return self._changeset.extra
571 572 def tags(self):
572 573 return self._repo.nodetags(self._node)
573 574 def bookmarks(self):
574 575 return self._repo.nodebookmarks(self._node)
575 576 def phase(self):
576 577 return self._repo._phasecache.phase(self._repo, self._rev)
577 578 def hidden(self):
578 579 return self._rev in repoview.filterrevs(self._repo, 'visible')
579 580
580 581 def children(self):
581 582 """return contexts for each child changeset"""
582 583 c = self._repo.changelog.children(self._node)
583 584 return [changectx(self._repo, x) for x in c]
584 585
585 586 def ancestors(self):
586 587 for a in self._repo.changelog.ancestors([self._rev]):
587 588 yield changectx(self._repo, a)
588 589
589 590 def descendants(self):
590 591 for d in self._repo.changelog.descendants([self._rev]):
591 592 yield changectx(self._repo, d)
592 593
593 594 def filectx(self, path, fileid=None, filelog=None):
594 595 """get a file context from this changeset"""
595 596 if fileid is None:
596 597 fileid = self.filenode(path)
597 598 return filectx(self._repo, path, fileid=fileid,
598 599 changectx=self, filelog=filelog)
599 600
600 601 def ancestor(self, c2, warn=False):
601 602 """return the "best" ancestor context of self and c2
602 603
603 604 If there are multiple candidates, it will show a message and check
604 605 merge.preferancestor configuration before falling back to the
605 606 revlog ancestor."""
606 607 # deal with workingctxs
607 608 n2 = c2._node
608 609 if n2 is None:
609 610 n2 = c2._parents[0]._node
610 611 cahs = self._repo.changelog.commonancestorsheads(self._node, n2)
611 612 if not cahs:
612 613 anc = nullid
613 614 elif len(cahs) == 1:
614 615 anc = cahs[0]
615 616 else:
616 617 # experimental config: merge.preferancestor
617 618 for r in self._repo.ui.configlist('merge', 'preferancestor', ['*']):
618 619 try:
619 620 ctx = changectx(self._repo, r)
620 621 except error.RepoLookupError:
621 622 continue
622 623 anc = ctx.node()
623 624 if anc in cahs:
624 625 break
625 626 else:
626 627 anc = self._repo.changelog.ancestor(self._node, n2)
627 628 if warn:
628 629 self._repo.ui.status(
629 630 (_("note: using %s as ancestor of %s and %s\n") %
630 631 (short(anc), short(self._node), short(n2))) +
631 632 ''.join(_(" alternatively, use --config "
632 633 "merge.preferancestor=%s\n") %
633 634 short(n) for n in sorted(cahs) if n != anc))
634 635 return changectx(self._repo, anc)
635 636
636 637 def descendant(self, other):
637 638 """True if other is descendant of this changeset"""
638 639 return self._repo.changelog.descendant(self._rev, other._rev)
639 640
640 641 def walk(self, match):
641 642 '''Generates matching file names.'''
642 643
643 644 # Wrap match.bad method to have message with nodeid
644 645 def bad(fn, msg):
645 646 # The manifest doesn't know about subrepos, so don't complain about
646 647 # paths into valid subrepos.
647 648 if any(fn == s or fn.startswith(s + '/')
648 649 for s in self.substate):
649 650 return
650 651 match.bad(fn, _('no such file in rev %s') % self)
651 652
652 653 m = matchmod.badmatch(match, bad)
653 654 return self._manifest.walk(m)
654 655
655 656 def matches(self, match):
656 657 return self.walk(match)
657 658
658 659 class basefilectx(object):
659 660 """A filecontext object represents the common logic for its children:
660 661 filectx: read-only access to a filerevision that is already present
661 662 in the repo,
662 663 workingfilectx: a filecontext that represents files from the working
663 664 directory,
664 665 memfilectx: a filecontext that represents files in-memory,
665 666 overlayfilectx: duplicate another filecontext with some fields overridden.
666 667 """
667 668 @propertycache
668 669 def _filelog(self):
669 670 return self._repo.file(self._path)
670 671
671 672 @propertycache
672 673 def _changeid(self):
673 674 if r'_changeid' in self.__dict__:
674 675 return self._changeid
675 676 elif r'_changectx' in self.__dict__:
676 677 return self._changectx.rev()
677 678 elif r'_descendantrev' in self.__dict__:
678 679 # this file context was created from a revision with a known
679 680 # descendant, we can (lazily) correct for linkrev aliases
680 681 return self._adjustlinkrev(self._descendantrev)
681 682 else:
682 683 return self._filelog.linkrev(self._filerev)
683 684
684 685 @propertycache
685 686 def _filenode(self):
686 687 if r'_fileid' in self.__dict__:
687 688 return self._filelog.lookup(self._fileid)
688 689 else:
689 690 return self._changectx.filenode(self._path)
690 691
691 692 @propertycache
692 693 def _filerev(self):
693 694 return self._filelog.rev(self._filenode)
694 695
695 696 @propertycache
696 697 def _repopath(self):
697 698 return self._path
698 699
699 700 def __nonzero__(self):
700 701 try:
701 702 self._filenode
702 703 return True
703 704 except error.LookupError:
704 705 # file is missing
705 706 return False
706 707
707 708 __bool__ = __nonzero__
708 709
709 710 def __bytes__(self):
710 711 try:
711 712 return "%s@%s" % (self.path(), self._changectx)
712 713 except error.LookupError:
713 714 return "%s@???" % self.path()
714 715
715 716 __str__ = encoding.strmethod(__bytes__)
716 717
717 718 def __repr__(self):
718 719 return "<%s %s>" % (type(self).__name__, str(self))
719 720
720 721 def __hash__(self):
721 722 try:
722 723 return hash((self._path, self._filenode))
723 724 except AttributeError:
724 725 return id(self)
725 726
726 727 def __eq__(self, other):
727 728 try:
728 729 return (type(self) == type(other) and self._path == other._path
729 730 and self._filenode == other._filenode)
730 731 except AttributeError:
731 732 return False
732 733
733 734 def __ne__(self, other):
734 735 return not (self == other)
735 736
736 737 def filerev(self):
737 738 return self._filerev
738 739 def filenode(self):
739 740 return self._filenode
740 741 @propertycache
741 742 def _flags(self):
742 743 return self._changectx.flags(self._path)
743 744 def flags(self):
744 745 return self._flags
745 746 def filelog(self):
746 747 return self._filelog
747 748 def rev(self):
748 749 return self._changeid
749 750 def linkrev(self):
750 751 return self._filelog.linkrev(self._filerev)
751 752 def node(self):
752 753 return self._changectx.node()
753 754 def hex(self):
754 755 return self._changectx.hex()
755 756 def user(self):
756 757 return self._changectx.user()
757 758 def date(self):
758 759 return self._changectx.date()
759 760 def files(self):
760 761 return self._changectx.files()
761 762 def description(self):
762 763 return self._changectx.description()
763 764 def branch(self):
764 765 return self._changectx.branch()
765 766 def extra(self):
766 767 return self._changectx.extra()
767 768 def phase(self):
768 769 return self._changectx.phase()
769 770 def phasestr(self):
770 771 return self._changectx.phasestr()
771 772 def manifest(self):
772 773 return self._changectx.manifest()
773 774 def changectx(self):
774 775 return self._changectx
775 776 def renamed(self):
776 777 return self._copied
777 778 def repo(self):
778 779 return self._repo
779 780 def size(self):
780 781 return len(self.data())
781 782
782 783 def path(self):
783 784 return self._path
784 785
785 786 def isbinary(self):
786 787 try:
787 788 return util.binary(self.data())
788 789 except IOError:
789 790 return False
790 791 def isexec(self):
791 792 return 'x' in self.flags()
792 793 def islink(self):
793 794 return 'l' in self.flags()
794 795
795 796 def isabsent(self):
796 797 """whether this filectx represents a file not in self._changectx
797 798
798 799 This is mainly for merge code to detect change/delete conflicts. This is
799 800 expected to be True for all subclasses of basectx."""
800 801 return False
801 802
802 803 _customcmp = False
803 804 def cmp(self, fctx):
804 805 """compare with other file context
805 806
806 807 returns True if different than fctx.
807 808 """
808 809 if fctx._customcmp:
809 810 return fctx.cmp(self)
810 811
811 812 if (fctx._filenode is None
812 813 and (self._repo._encodefilterpats
813 814 # if file data starts with '\1\n', empty metadata block is
814 815 # prepended, which adds 4 bytes to filelog.size().
815 816 or self.size() - 4 == fctx.size())
816 817 or self.size() == fctx.size()):
817 818 return self._filelog.cmp(self._filenode, fctx.data())
818 819
819 820 return True
820 821
821 822 def _adjustlinkrev(self, srcrev, inclusive=False):
822 823 """return the first ancestor of <srcrev> introducing <fnode>
823 824
824 825 If the linkrev of the file revision does not point to an ancestor of
825 826 srcrev, we'll walk down the ancestors until we find one introducing
826 827 this file revision.
827 828
828 829 :srcrev: the changeset revision we search ancestors from
829 830 :inclusive: if true, the src revision will also be checked
830 831 """
831 832 repo = self._repo
832 833 cl = repo.unfiltered().changelog
833 834 mfl = repo.manifestlog
834 835 # fetch the linkrev
835 836 lkr = self.linkrev()
836 837 # hack to reuse ancestor computation when searching for renames
837 838 memberanc = getattr(self, '_ancestrycontext', None)
838 839 iteranc = None
839 840 if srcrev is None:
840 841 # wctx case, used by workingfilectx during mergecopy
841 842 revs = [p.rev() for p in self._repo[None].parents()]
842 843 inclusive = True # we skipped the real (revless) source
843 844 else:
844 845 revs = [srcrev]
845 846 if memberanc is None:
846 847 memberanc = iteranc = cl.ancestors(revs, lkr,
847 848 inclusive=inclusive)
848 849 # check if this linkrev is an ancestor of srcrev
849 850 if lkr not in memberanc:
850 851 if iteranc is None:
851 852 iteranc = cl.ancestors(revs, lkr, inclusive=inclusive)
852 853 fnode = self._filenode
853 854 path = self._path
854 855 for a in iteranc:
855 856 ac = cl.read(a) # get changeset data (we avoid object creation)
856 857 if path in ac[3]: # checking the 'files' field.
857 858 # The file has been touched, check if the content is
858 859 # similar to the one we search for.
859 860 if fnode == mfl[ac[0]].readfast().get(path):
860 861 return a
861 862 # In theory, we should never get out of that loop without a result.
862 863 # But if manifest uses a buggy file revision (not children of the
863 864 # one it replaces) we could. Such a buggy situation will likely
864 865 # result is crash somewhere else at to some point.
865 866 return lkr
866 867
867 868 def introrev(self):
868 869 """return the rev of the changeset which introduced this file revision
869 870
870 871 This method is different from linkrev because it take into account the
871 872 changeset the filectx was created from. It ensures the returned
872 873 revision is one of its ancestors. This prevents bugs from
873 874 'linkrev-shadowing' when a file revision is used by multiple
874 875 changesets.
875 876 """
876 877 lkr = self.linkrev()
877 878 attrs = vars(self)
878 879 noctx = not ('_changeid' in attrs or '_changectx' in attrs)
879 880 if noctx or self.rev() == lkr:
880 881 return self.linkrev()
881 882 return self._adjustlinkrev(self.rev(), inclusive=True)
882 883
883 884 def _parentfilectx(self, path, fileid, filelog):
884 885 """create parent filectx keeping ancestry info for _adjustlinkrev()"""
885 886 fctx = filectx(self._repo, path, fileid=fileid, filelog=filelog)
886 887 if '_changeid' in vars(self) or '_changectx' in vars(self):
887 888 # If self is associated with a changeset (probably explicitly
888 889 # fed), ensure the created filectx is associated with a
889 890 # changeset that is an ancestor of self.changectx.
890 891 # This lets us later use _adjustlinkrev to get a correct link.
891 892 fctx._descendantrev = self.rev()
892 893 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
893 894 elif '_descendantrev' in vars(self):
894 895 # Otherwise propagate _descendantrev if we have one associated.
895 896 fctx._descendantrev = self._descendantrev
896 897 fctx._ancestrycontext = getattr(self, '_ancestrycontext', None)
897 898 return fctx
898 899
899 900 def parents(self):
900 901 _path = self._path
901 902 fl = self._filelog
902 903 parents = self._filelog.parents(self._filenode)
903 904 pl = [(_path, node, fl) for node in parents if node != nullid]
904 905
905 906 r = fl.renamed(self._filenode)
906 907 if r:
907 908 # - In the simple rename case, both parent are nullid, pl is empty.
908 909 # - In case of merge, only one of the parent is null id and should
909 910 # be replaced with the rename information. This parent is -always-
910 911 # the first one.
911 912 #
912 913 # As null id have always been filtered out in the previous list
913 914 # comprehension, inserting to 0 will always result in "replacing
914 915 # first nullid parent with rename information.
915 916 pl.insert(0, (r[0], r[1], self._repo.file(r[0])))
916 917
917 918 return [self._parentfilectx(path, fnode, l) for path, fnode, l in pl]
918 919
919 920 def p1(self):
920 921 return self.parents()[0]
921 922
922 923 def p2(self):
923 924 p = self.parents()
924 925 if len(p) == 2:
925 926 return p[1]
926 927 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
927 928
928 929 def annotate(self, follow=False, linenumber=False, skiprevs=None,
929 930 diffopts=None):
930 931 '''returns a list of tuples of ((ctx, number), line) for each line
931 932 in the file, where ctx is the filectx of the node where
932 933 that line was last changed; if linenumber parameter is true, number is
933 934 the line number at the first appearance in the managed file, otherwise,
934 935 number has a fixed value of False.
935 936 '''
936 937
937 938 def lines(text):
938 939 if text.endswith("\n"):
939 940 return text.count("\n")
940 941 return text.count("\n") + int(bool(text))
941 942
942 943 if linenumber:
943 944 def decorate(text, rev):
944 945 return ([(rev, i) for i in xrange(1, lines(text) + 1)], text)
945 946 else:
946 947 def decorate(text, rev):
947 948 return ([(rev, False)] * lines(text), text)
948 949
949 950 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
950 951
951 952 def parents(f):
952 953 # Cut _descendantrev here to mitigate the penalty of lazy linkrev
953 954 # adjustment. Otherwise, p._adjustlinkrev() would walk changelog
954 955 # from the topmost introrev (= srcrev) down to p.linkrev() if it
955 956 # isn't an ancestor of the srcrev.
956 957 f._changeid
957 958 pl = f.parents()
958 959
959 960 # Don't return renamed parents if we aren't following.
960 961 if not follow:
961 962 pl = [p for p in pl if p.path() == f.path()]
962 963
963 964 # renamed filectx won't have a filelog yet, so set it
964 965 # from the cache to save time
965 966 for p in pl:
966 967 if not '_filelog' in p.__dict__:
967 968 p._filelog = getlog(p.path())
968 969
969 970 return pl
970 971
971 972 # use linkrev to find the first changeset where self appeared
972 973 base = self
973 974 introrev = self.introrev()
974 975 if self.rev() != introrev:
975 976 base = self.filectx(self.filenode(), changeid=introrev)
976 977 if getattr(base, '_ancestrycontext', None) is None:
977 978 cl = self._repo.changelog
978 979 if introrev is None:
979 980 # wctx is not inclusive, but works because _ancestrycontext
980 981 # is used to test filelog revisions
981 982 ac = cl.ancestors([p.rev() for p in base.parents()],
982 983 inclusive=True)
983 984 else:
984 985 ac = cl.ancestors([introrev], inclusive=True)
985 986 base._ancestrycontext = ac
986 987
987 988 # This algorithm would prefer to be recursive, but Python is a
988 989 # bit recursion-hostile. Instead we do an iterative
989 990 # depth-first search.
990 991
991 992 # 1st DFS pre-calculates pcache and needed
992 993 visit = [base]
993 994 pcache = {}
994 995 needed = {base: 1}
995 996 while visit:
996 997 f = visit.pop()
997 998 if f in pcache:
998 999 continue
999 1000 pl = parents(f)
1000 1001 pcache[f] = pl
1001 1002 for p in pl:
1002 1003 needed[p] = needed.get(p, 0) + 1
1003 1004 if p not in pcache:
1004 1005 visit.append(p)
1005 1006
1006 1007 # 2nd DFS does the actual annotate
1007 1008 visit[:] = [base]
1008 1009 hist = {}
1009 1010 while visit:
1010 1011 f = visit[-1]
1011 1012 if f in hist:
1012 1013 visit.pop()
1013 1014 continue
1014 1015
1015 1016 ready = True
1016 1017 pl = pcache[f]
1017 1018 for p in pl:
1018 1019 if p not in hist:
1019 1020 ready = False
1020 1021 visit.append(p)
1021 1022 if ready:
1022 1023 visit.pop()
1023 1024 curr = decorate(f.data(), f)
1024 1025 skipchild = False
1025 1026 if skiprevs is not None:
1026 1027 skipchild = f._changeid in skiprevs
1027 1028 curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild,
1028 1029 diffopts)
1029 1030 for p in pl:
1030 1031 if needed[p] == 1:
1031 1032 del hist[p]
1032 1033 del needed[p]
1033 1034 else:
1034 1035 needed[p] -= 1
1035 1036
1036 1037 hist[f] = curr
1037 1038 del pcache[f]
1038 1039
1039 1040 return zip(hist[base][0], hist[base][1].splitlines(True))
1040 1041
1041 1042 def ancestors(self, followfirst=False):
1042 1043 visit = {}
1043 1044 c = self
1044 1045 if followfirst:
1045 1046 cut = 1
1046 1047 else:
1047 1048 cut = None
1048 1049
1049 1050 while True:
1050 1051 for parent in c.parents()[:cut]:
1051 1052 visit[(parent.linkrev(), parent.filenode())] = parent
1052 1053 if not visit:
1053 1054 break
1054 1055 c = visit.pop(max(visit))
1055 1056 yield c
1056 1057
1057 1058 def _annotatepair(parents, childfctx, child, skipchild, diffopts):
1058 1059 r'''
1059 1060 Given parent and child fctxes and annotate data for parents, for all lines
1060 1061 in either parent that match the child, annotate the child with the parent's
1061 1062 data.
1062 1063
1063 1064 Additionally, if `skipchild` is True, replace all other lines with parent
1064 1065 annotate data as well such that child is never blamed for any lines.
1065 1066
1066 1067 >>> oldfctx = 'old'
1067 1068 >>> p1fctx, p2fctx, childfctx = 'p1', 'p2', 'c'
1068 1069 >>> olddata = 'a\nb\n'
1069 1070 >>> p1data = 'a\nb\nc\n'
1070 1071 >>> p2data = 'a\nc\nd\n'
1071 1072 >>> childdata = 'a\nb2\nc\nc2\nd\n'
1072 1073 >>> diffopts = mdiff.diffopts()
1073 1074
1074 1075 >>> def decorate(text, rev):
1075 1076 ... return ([(rev, i) for i in xrange(1, text.count('\n') + 1)], text)
1076 1077
1077 1078 Basic usage:
1078 1079
1079 1080 >>> oldann = decorate(olddata, oldfctx)
1080 1081 >>> p1ann = decorate(p1data, p1fctx)
1081 1082 >>> p1ann = _annotatepair([oldann], p1fctx, p1ann, False, diffopts)
1082 1083 >>> p1ann[0]
1083 1084 [('old', 1), ('old', 2), ('p1', 3)]
1084 1085 >>> p2ann = decorate(p2data, p2fctx)
1085 1086 >>> p2ann = _annotatepair([oldann], p2fctx, p2ann, False, diffopts)
1086 1087 >>> p2ann[0]
1087 1088 [('old', 1), ('p2', 2), ('p2', 3)]
1088 1089
1089 1090 Test with multiple parents (note the difference caused by ordering):
1090 1091
1091 1092 >>> childann = decorate(childdata, childfctx)
1092 1093 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, False,
1093 1094 ... diffopts)
1094 1095 >>> childann[0]
1095 1096 [('old', 1), ('c', 2), ('p2', 2), ('c', 4), ('p2', 3)]
1096 1097
1097 1098 >>> childann = decorate(childdata, childfctx)
1098 1099 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, False,
1099 1100 ... diffopts)
1100 1101 >>> childann[0]
1101 1102 [('old', 1), ('c', 2), ('p1', 3), ('c', 4), ('p2', 3)]
1102 1103
1103 1104 Test with skipchild (note the difference caused by ordering):
1104 1105
1105 1106 >>> childann = decorate(childdata, childfctx)
1106 1107 >>> childann = _annotatepair([p1ann, p2ann], childfctx, childann, True,
1107 1108 ... diffopts)
1108 1109 >>> childann[0]
1109 1110 [('old', 1), ('old', 2), ('p2', 2), ('p2', 2), ('p2', 3)]
1110 1111
1111 1112 >>> childann = decorate(childdata, childfctx)
1112 1113 >>> childann = _annotatepair([p2ann, p1ann], childfctx, childann, True,
1113 1114 ... diffopts)
1114 1115 >>> childann[0]
1115 1116 [('old', 1), ('old', 2), ('p1', 3), ('p1', 3), ('p2', 3)]
1116 1117 '''
1117 1118 pblocks = [(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
1118 1119 for parent in parents]
1119 1120
1120 1121 if skipchild:
1121 1122 # Need to iterate over the blocks twice -- make it a list
1122 1123 pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
1123 1124 # Mercurial currently prefers p2 over p1 for annotate.
1124 1125 # TODO: change this?
1125 1126 for parent, blocks in pblocks:
1126 1127 for (a1, a2, b1, b2), t in blocks:
1127 1128 # Changed blocks ('!') or blocks made only of blank lines ('~')
1128 1129 # belong to the child.
1129 1130 if t == '=':
1130 1131 child[0][b1:b2] = parent[0][a1:a2]
1131 1132
1132 1133 if skipchild:
1133 1134 # Now try and match up anything that couldn't be matched,
1134 1135 # Reversing pblocks maintains bias towards p2, matching above
1135 1136 # behavior.
1136 1137 pblocks.reverse()
1137 1138
1138 1139 # The heuristics are:
1139 1140 # * Work on blocks of changed lines (effectively diff hunks with -U0).
1140 1141 # This could potentially be smarter but works well enough.
1141 1142 # * For a non-matching section, do a best-effort fit. Match lines in
1142 1143 # diff hunks 1:1, dropping lines as necessary.
1143 1144 # * Repeat the last line as a last resort.
1144 1145
1145 1146 # First, replace as much as possible without repeating the last line.
1146 1147 remaining = [(parent, []) for parent, _blocks in pblocks]
1147 1148 for idx, (parent, blocks) in enumerate(pblocks):
1148 1149 for (a1, a2, b1, b2), _t in blocks:
1149 1150 if a2 - a1 >= b2 - b1:
1150 1151 for bk in xrange(b1, b2):
1151 1152 if child[0][bk][0] == childfctx:
1152 1153 ak = min(a1 + (bk - b1), a2 - 1)
1153 1154 child[0][bk] = parent[0][ak]
1154 1155 else:
1155 1156 remaining[idx][1].append((a1, a2, b1, b2))
1156 1157
1157 1158 # Then, look at anything left, which might involve repeating the last
1158 1159 # line.
1159 1160 for parent, blocks in remaining:
1160 1161 for a1, a2, b1, b2 in blocks:
1161 1162 for bk in xrange(b1, b2):
1162 1163 if child[0][bk][0] == childfctx:
1163 1164 ak = min(a1 + (bk - b1), a2 - 1)
1164 1165 child[0][bk] = parent[0][ak]
1165 1166 return child
1166 1167
1167 1168 class filectx(basefilectx):
1168 1169 """A filecontext object makes access to data related to a particular
1169 1170 filerevision convenient."""
1170 1171 def __init__(self, repo, path, changeid=None, fileid=None,
1171 1172 filelog=None, changectx=None):
1172 1173 """changeid can be a changeset revision, node, or tag.
1173 1174 fileid can be a file revision or node."""
1174 1175 self._repo = repo
1175 1176 self._path = path
1176 1177
1177 1178 assert (changeid is not None
1178 1179 or fileid is not None
1179 1180 or changectx is not None), \
1180 1181 ("bad args: changeid=%r, fileid=%r, changectx=%r"
1181 1182 % (changeid, fileid, changectx))
1182 1183
1183 1184 if filelog is not None:
1184 1185 self._filelog = filelog
1185 1186
1186 1187 if changeid is not None:
1187 1188 self._changeid = changeid
1188 1189 if changectx is not None:
1189 1190 self._changectx = changectx
1190 1191 if fileid is not None:
1191 1192 self._fileid = fileid
1192 1193
1193 1194 @propertycache
1194 1195 def _changectx(self):
1195 1196 try:
1196 1197 return changectx(self._repo, self._changeid)
1197 1198 except error.FilteredRepoLookupError:
1198 1199 # Linkrev may point to any revision in the repository. When the
1199 1200 # repository is filtered this may lead to `filectx` trying to build
1200 1201 # `changectx` for filtered revision. In such case we fallback to
1201 1202 # creating `changectx` on the unfiltered version of the reposition.
1202 1203 # This fallback should not be an issue because `changectx` from
1203 1204 # `filectx` are not used in complex operations that care about
1204 1205 # filtering.
1205 1206 #
1206 1207 # This fallback is a cheap and dirty fix that prevent several
1207 1208 # crashes. It does not ensure the behavior is correct. However the
1208 1209 # behavior was not correct before filtering either and "incorrect
1209 1210 # behavior" is seen as better as "crash"
1210 1211 #
1211 1212 # Linkrevs have several serious troubles with filtering that are
1212 1213 # complicated to solve. Proper handling of the issue here should be
1213 1214 # considered when solving linkrev issue are on the table.
1214 1215 return changectx(self._repo.unfiltered(), self._changeid)
1215 1216
1216 1217 def filectx(self, fileid, changeid=None):
1217 1218 '''opens an arbitrary revision of the file without
1218 1219 opening a new filelog'''
1219 1220 return filectx(self._repo, self._path, fileid=fileid,
1220 1221 filelog=self._filelog, changeid=changeid)
1221 1222
1222 1223 def rawdata(self):
1223 1224 return self._filelog.revision(self._filenode, raw=True)
1224 1225
1225 1226 def rawflags(self):
1226 1227 """low-level revlog flags"""
1227 1228 return self._filelog.flags(self._filerev)
1228 1229
1229 1230 def data(self):
1230 1231 try:
1231 1232 return self._filelog.read(self._filenode)
1232 1233 except error.CensoredNodeError:
1233 1234 if self._repo.ui.config("censor", "policy", "abort") == "ignore":
1234 1235 return ""
1235 1236 raise error.Abort(_("censored node: %s") % short(self._filenode),
1236 1237 hint=_("set censor.policy to ignore errors"))
1237 1238
1238 1239 def size(self):
1239 1240 return self._filelog.size(self._filerev)
1240 1241
1241 1242 @propertycache
1242 1243 def _copied(self):
1243 1244 """check if file was actually renamed in this changeset revision
1244 1245
1245 1246 If rename logged in file revision, we report copy for changeset only
1246 1247 if file revisions linkrev points back to the changeset in question
1247 1248 or both changeset parents contain different file revisions.
1248 1249 """
1249 1250
1250 1251 renamed = self._filelog.renamed(self._filenode)
1251 1252 if not renamed:
1252 1253 return renamed
1253 1254
1254 1255 if self.rev() == self.linkrev():
1255 1256 return renamed
1256 1257
1257 1258 name = self.path()
1258 1259 fnode = self._filenode
1259 1260 for p in self._changectx.parents():
1260 1261 try:
1261 1262 if fnode == p.filenode(name):
1262 1263 return None
1263 1264 except error.LookupError:
1264 1265 pass
1265 1266 return renamed
1266 1267
1267 1268 def children(self):
1268 1269 # hard for renames
1269 1270 c = self._filelog.children(self._filenode)
1270 1271 return [filectx(self._repo, self._path, fileid=x,
1271 1272 filelog=self._filelog) for x in c]
1272 1273
1273 1274 class committablectx(basectx):
1274 1275 """A committablectx object provides common functionality for a context that
1275 1276 wants the ability to commit, e.g. workingctx or memctx."""
1276 1277 def __init__(self, repo, text="", user=None, date=None, extra=None,
1277 1278 changes=None):
1278 1279 self._repo = repo
1279 1280 self._rev = None
1280 1281 self._node = None
1281 1282 self._text = text
1282 1283 if date:
1283 1284 self._date = util.parsedate(date)
1284 1285 if user:
1285 1286 self._user = user
1286 1287 if changes:
1287 1288 self._status = changes
1288 1289
1289 1290 self._extra = {}
1290 1291 if extra:
1291 1292 self._extra = extra.copy()
1292 1293 if 'branch' not in self._extra:
1293 1294 try:
1294 1295 branch = encoding.fromlocal(self._repo.dirstate.branch())
1295 1296 except UnicodeDecodeError:
1296 1297 raise error.Abort(_('branch name not in UTF-8!'))
1297 1298 self._extra['branch'] = branch
1298 1299 if self._extra['branch'] == '':
1299 1300 self._extra['branch'] = 'default'
1300 1301
1301 1302 def __bytes__(self):
1302 1303 return bytes(self._parents[0]) + "+"
1303 1304
1304 1305 __str__ = encoding.strmethod(__bytes__)
1305 1306
1306 1307 def __nonzero__(self):
1307 1308 return True
1308 1309
1309 1310 __bool__ = __nonzero__
1310 1311
1311 1312 def _buildflagfunc(self):
1312 1313 # Create a fallback function for getting file flags when the
1313 1314 # filesystem doesn't support them
1314 1315
1315 1316 copiesget = self._repo.dirstate.copies().get
1316 1317 parents = self.parents()
1317 1318 if len(parents) < 2:
1318 1319 # when we have one parent, it's easy: copy from parent
1319 1320 man = parents[0].manifest()
1320 1321 def func(f):
1321 1322 f = copiesget(f, f)
1322 1323 return man.flags(f)
1323 1324 else:
1324 1325 # merges are tricky: we try to reconstruct the unstored
1325 1326 # result from the merge (issue1802)
1326 1327 p1, p2 = parents
1327 1328 pa = p1.ancestor(p2)
1328 1329 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
1329 1330
1330 1331 def func(f):
1331 1332 f = copiesget(f, f) # may be wrong for merges with copies
1332 1333 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
1333 1334 if fl1 == fl2:
1334 1335 return fl1
1335 1336 if fl1 == fla:
1336 1337 return fl2
1337 1338 if fl2 == fla:
1338 1339 return fl1
1339 1340 return '' # punt for conflicts
1340 1341
1341 1342 return func
1342 1343
1343 1344 @propertycache
1344 1345 def _flagfunc(self):
1345 1346 return self._repo.dirstate.flagfunc(self._buildflagfunc)
1346 1347
1347 1348 @propertycache
1348 1349 def _status(self):
1349 1350 return self._repo.status()
1350 1351
1351 1352 @propertycache
1352 1353 def _user(self):
1353 1354 return self._repo.ui.username()
1354 1355
1355 1356 @propertycache
1356 1357 def _date(self):
1357 1358 ui = self._repo.ui
1358 1359 date = ui.configdate('devel', 'default-date')
1359 1360 if date is None:
1360 1361 date = util.makedate()
1361 1362 return date
1362 1363
1363 1364 def subrev(self, subpath):
1364 1365 return None
1365 1366
1366 1367 def manifestnode(self):
1367 1368 return None
1368 1369 def user(self):
1369 1370 return self._user or self._repo.ui.username()
1370 1371 def date(self):
1371 1372 return self._date
1372 1373 def description(self):
1373 1374 return self._text
1374 1375 def files(self):
1375 1376 return sorted(self._status.modified + self._status.added +
1376 1377 self._status.removed)
1377 1378
1378 1379 def modified(self):
1379 1380 return self._status.modified
1380 1381 def added(self):
1381 1382 return self._status.added
1382 1383 def removed(self):
1383 1384 return self._status.removed
1384 1385 def deleted(self):
1385 1386 return self._status.deleted
1386 1387 def branch(self):
1387 1388 return encoding.tolocal(self._extra['branch'])
1388 1389 def closesbranch(self):
1389 1390 return 'close' in self._extra
1390 1391 def extra(self):
1391 1392 return self._extra
1392 1393
1393 1394 def tags(self):
1394 1395 return []
1395 1396
1396 1397 def bookmarks(self):
1397 1398 b = []
1398 1399 for p in self.parents():
1399 1400 b.extend(p.bookmarks())
1400 1401 return b
1401 1402
1402 1403 def phase(self):
1403 1404 phase = phases.draft # default phase to draft
1404 1405 for p in self.parents():
1405 1406 phase = max(phase, p.phase())
1406 1407 return phase
1407 1408
1408 1409 def hidden(self):
1409 1410 return False
1410 1411
1411 1412 def children(self):
1412 1413 return []
1413 1414
1414 1415 def flags(self, path):
1415 1416 if r'_manifest' in self.__dict__:
1416 1417 try:
1417 1418 return self._manifest.flags(path)
1418 1419 except KeyError:
1419 1420 return ''
1420 1421
1421 1422 try:
1422 1423 return self._flagfunc(path)
1423 1424 except OSError:
1424 1425 return ''
1425 1426
1426 1427 def ancestor(self, c2):
1427 1428 """return the "best" ancestor context of self and c2"""
1428 1429 return self._parents[0].ancestor(c2) # punt on two parents for now
1429 1430
1430 1431 def walk(self, match):
1431 1432 '''Generates matching file names.'''
1432 1433 return sorted(self._repo.dirstate.walk(match, sorted(self.substate),
1433 1434 True, False))
1434 1435
1435 1436 def matches(self, match):
1436 1437 return sorted(self._repo.dirstate.matches(match))
1437 1438
1438 1439 def ancestors(self):
1439 1440 for p in self._parents:
1440 1441 yield p
1441 1442 for a in self._repo.changelog.ancestors(
1442 1443 [p.rev() for p in self._parents]):
1443 1444 yield changectx(self._repo, a)
1444 1445
1445 1446 def markcommitted(self, node):
1446 1447 """Perform post-commit cleanup necessary after committing this ctx
1447 1448
1448 1449 Specifically, this updates backing stores this working context
1449 1450 wraps to reflect the fact that the changes reflected by this
1450 1451 workingctx have been committed. For example, it marks
1451 1452 modified and added files as normal in the dirstate.
1452 1453
1453 1454 """
1454 1455
1455 1456 with self._repo.dirstate.parentchange():
1456 1457 for f in self.modified() + self.added():
1457 1458 self._repo.dirstate.normal(f)
1458 1459 for f in self.removed():
1459 1460 self._repo.dirstate.drop(f)
1460 1461 self._repo.dirstate.setparents(node)
1461 1462
1462 1463 # write changes out explicitly, because nesting wlock at
1463 1464 # runtime may prevent 'wlock.release()' in 'repo.commit()'
1464 1465 # from immediately doing so for subsequent changing files
1465 1466 self._repo.dirstate.write(self._repo.currenttransaction())
1466 1467
1467 1468 def dirty(self, missing=False, merge=True, branch=True):
1468 1469 return False
1469 1470
1470 1471 class workingctx(committablectx):
1471 1472 """A workingctx object makes access to data related to
1472 1473 the current working directory convenient.
1473 1474 date - any valid date string or (unixtime, offset), or None.
1474 1475 user - username string, or None.
1475 1476 extra - a dictionary of extra values, or None.
1476 1477 changes - a list of file lists as returned by localrepo.status()
1477 1478 or None to use the repository status.
1478 1479 """
1479 1480 def __init__(self, repo, text="", user=None, date=None, extra=None,
1480 1481 changes=None):
1481 1482 super(workingctx, self).__init__(repo, text, user, date, extra, changes)
1482 1483
1483 1484 def __iter__(self):
1484 1485 d = self._repo.dirstate
1485 1486 for f in d:
1486 1487 if d[f] != 'r':
1487 1488 yield f
1488 1489
1489 1490 def __contains__(self, key):
1490 1491 return self._repo.dirstate[key] not in "?r"
1491 1492
1492 1493 def hex(self):
1493 1494 return hex(wdirid)
1494 1495
1495 1496 @propertycache
1496 1497 def _parents(self):
1497 1498 p = self._repo.dirstate.parents()
1498 1499 if p[1] == nullid:
1499 1500 p = p[:-1]
1500 1501 return [changectx(self._repo, x) for x in p]
1501 1502
1502 1503 def filectx(self, path, filelog=None):
1503 1504 """get a file context from the working directory"""
1504 1505 return workingfilectx(self._repo, path, workingctx=self,
1505 1506 filelog=filelog)
1506 1507
1507 1508 def dirty(self, missing=False, merge=True, branch=True):
1508 1509 "check whether a working directory is modified"
1509 1510 # check subrepos first
1510 1511 for s in sorted(self.substate):
1511 1512 if self.sub(s).dirty():
1512 1513 return True
1513 1514 # check current working dir
1514 1515 return ((merge and self.p2()) or
1515 1516 (branch and self.branch() != self.p1().branch()) or
1516 1517 self.modified() or self.added() or self.removed() or
1517 1518 (missing and self.deleted()))
1518 1519
1519 1520 def add(self, list, prefix=""):
1520 1521 join = lambda f: os.path.join(prefix, f)
1521 1522 with self._repo.wlock():
1522 1523 ui, ds = self._repo.ui, self._repo.dirstate
1523 1524 rejected = []
1524 1525 lstat = self._repo.wvfs.lstat
1525 1526 for f in list:
1526 1527 scmutil.checkportable(ui, join(f))
1527 1528 try:
1528 1529 st = lstat(f)
1529 1530 except OSError:
1530 1531 ui.warn(_("%s does not exist!\n") % join(f))
1531 1532 rejected.append(f)
1532 1533 continue
1533 1534 if st.st_size > 10000000:
1534 1535 ui.warn(_("%s: up to %d MB of RAM may be required "
1535 1536 "to manage this file\n"
1536 1537 "(use 'hg revert %s' to cancel the "
1537 1538 "pending addition)\n")
1538 1539 % (f, 3 * st.st_size // 1000000, join(f)))
1539 1540 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1540 1541 ui.warn(_("%s not added: only files and symlinks "
1541 1542 "supported currently\n") % join(f))
1542 1543 rejected.append(f)
1543 1544 elif ds[f] in 'amn':
1544 1545 ui.warn(_("%s already tracked!\n") % join(f))
1545 1546 elif ds[f] == 'r':
1546 1547 ds.normallookup(f)
1547 1548 else:
1548 1549 ds.add(f)
1549 1550 return rejected
1550 1551
1551 1552 def forget(self, files, prefix=""):
1552 1553 join = lambda f: os.path.join(prefix, f)
1553 1554 with self._repo.wlock():
1554 1555 rejected = []
1555 1556 for f in files:
1556 1557 if f not in self._repo.dirstate:
1557 1558 self._repo.ui.warn(_("%s not tracked!\n") % join(f))
1558 1559 rejected.append(f)
1559 1560 elif self._repo.dirstate[f] != 'a':
1560 1561 self._repo.dirstate.remove(f)
1561 1562 else:
1562 1563 self._repo.dirstate.drop(f)
1563 1564 return rejected
1564 1565
1565 1566 def undelete(self, list):
1566 1567 pctxs = self.parents()
1567 1568 with self._repo.wlock():
1568 1569 for f in list:
1569 1570 if self._repo.dirstate[f] != 'r':
1570 1571 self._repo.ui.warn(_("%s not removed!\n") % f)
1571 1572 else:
1572 1573 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
1573 1574 t = fctx.data()
1574 1575 self._repo.wwrite(f, t, fctx.flags())
1575 1576 self._repo.dirstate.normal(f)
1576 1577
1577 1578 def copy(self, source, dest):
1578 1579 try:
1579 1580 st = self._repo.wvfs.lstat(dest)
1580 1581 except OSError as err:
1581 1582 if err.errno != errno.ENOENT:
1582 1583 raise
1583 1584 self._repo.ui.warn(_("%s does not exist!\n") % dest)
1584 1585 return
1585 1586 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1586 1587 self._repo.ui.warn(_("copy failed: %s is not a file or a "
1587 1588 "symbolic link\n") % dest)
1588 1589 else:
1589 1590 with self._repo.wlock():
1590 1591 if self._repo.dirstate[dest] in '?':
1591 1592 self._repo.dirstate.add(dest)
1592 1593 elif self._repo.dirstate[dest] in 'r':
1593 1594 self._repo.dirstate.normallookup(dest)
1594 1595 self._repo.dirstate.copy(source, dest)
1595 1596
1596 1597 def match(self, pats=None, include=None, exclude=None, default='glob',
1597 1598 listsubrepos=False, badfn=None):
1598 1599 r = self._repo
1599 1600
1600 1601 # Only a case insensitive filesystem needs magic to translate user input
1601 1602 # to actual case in the filesystem.
1602 1603 icasefs = not util.fscasesensitive(r.root)
1603 1604 return matchmod.match(r.root, r.getcwd(), pats, include, exclude,
1604 1605 default, auditor=r.auditor, ctx=self,
1605 1606 listsubrepos=listsubrepos, badfn=badfn,
1606 1607 icasefs=icasefs)
1607 1608
1608 1609 def _filtersuspectsymlink(self, files):
1609 1610 if not files or self._repo.dirstate._checklink:
1610 1611 return files
1611 1612
1612 1613 # Symlink placeholders may get non-symlink-like contents
1613 1614 # via user error or dereferencing by NFS or Samba servers,
1614 1615 # so we filter out any placeholders that don't look like a
1615 1616 # symlink
1616 1617 sane = []
1617 1618 for f in files:
1618 1619 if self.flags(f) == 'l':
1619 1620 d = self[f].data()
1620 1621 if d == '' or len(d) >= 1024 or '\n' in d or util.binary(d):
1621 1622 self._repo.ui.debug('ignoring suspect symlink placeholder'
1622 1623 ' "%s"\n' % f)
1623 1624 continue
1624 1625 sane.append(f)
1625 1626 return sane
1626 1627
1627 1628 def _checklookup(self, files):
1628 1629 # check for any possibly clean files
1629 1630 if not files:
1630 1631 return [], [], []
1631 1632
1632 1633 modified = []
1633 1634 deleted = []
1634 1635 fixup = []
1635 1636 pctx = self._parents[0]
1636 1637 # do a full compare of any files that might have changed
1637 1638 for f in sorted(files):
1638 1639 try:
1639 1640 # This will return True for a file that got replaced by a
1640 1641 # directory in the interim, but fixing that is pretty hard.
1641 1642 if (f not in pctx or self.flags(f) != pctx.flags(f)
1642 1643 or pctx[f].cmp(self[f])):
1643 1644 modified.append(f)
1644 1645 else:
1645 1646 fixup.append(f)
1646 1647 except (IOError, OSError):
1647 1648 # A file become inaccessible in between? Mark it as deleted,
1648 1649 # matching dirstate behavior (issue5584).
1649 1650 # The dirstate has more complex behavior around whether a
1650 1651 # missing file matches a directory, etc, but we don't need to
1651 1652 # bother with that: if f has made it to this point, we're sure
1652 1653 # it's in the dirstate.
1653 1654 deleted.append(f)
1654 1655
1655 1656 return modified, deleted, fixup
1656 1657
1657 1658 def _poststatusfixup(self, status, fixup):
1658 1659 """update dirstate for files that are actually clean"""
1659 1660 poststatus = self._repo.postdsstatus()
1660 1661 if fixup or poststatus:
1661 1662 try:
1662 1663 oldid = self._repo.dirstate.identity()
1663 1664
1664 1665 # updating the dirstate is optional
1665 1666 # so we don't wait on the lock
1666 1667 # wlock can invalidate the dirstate, so cache normal _after_
1667 1668 # taking the lock
1668 1669 with self._repo.wlock(False):
1669 1670 if self._repo.dirstate.identity() == oldid:
1670 1671 if fixup:
1671 1672 normal = self._repo.dirstate.normal
1672 1673 for f in fixup:
1673 1674 normal(f)
1674 1675 # write changes out explicitly, because nesting
1675 1676 # wlock at runtime may prevent 'wlock.release()'
1676 1677 # after this block from doing so for subsequent
1677 1678 # changing files
1678 1679 tr = self._repo.currenttransaction()
1679 1680 self._repo.dirstate.write(tr)
1680 1681
1681 1682 if poststatus:
1682 1683 for ps in poststatus:
1683 1684 ps(self, status)
1684 1685 else:
1685 1686 # in this case, writing changes out breaks
1686 1687 # consistency, because .hg/dirstate was
1687 1688 # already changed simultaneously after last
1688 1689 # caching (see also issue5584 for detail)
1689 1690 self._repo.ui.debug('skip updating dirstate: '
1690 1691 'identity mismatch\n')
1691 1692 except error.LockError:
1692 1693 pass
1693 1694 finally:
1694 1695 # Even if the wlock couldn't be grabbed, clear out the list.
1695 1696 self._repo.clearpostdsstatus()
1696 1697
1697 1698 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1698 1699 unknown=False):
1699 1700 '''Gets the status from the dirstate -- internal use only.'''
1700 1701 listignored, listclean, listunknown = ignored, clean, unknown
1701 1702 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1702 1703 subrepos = []
1703 1704 if '.hgsub' in self:
1704 1705 subrepos = sorted(self.substate)
1705 1706 cmp, s = self._repo.dirstate.status(match, subrepos, listignored,
1706 1707 listclean, listunknown)
1707 1708
1708 1709 # check for any possibly clean files
1709 1710 fixup = []
1710 1711 if cmp:
1711 1712 modified2, deleted2, fixup = self._checklookup(cmp)
1712 1713 s.modified.extend(modified2)
1713 1714 s.deleted.extend(deleted2)
1714 1715
1715 1716 if fixup and listclean:
1716 1717 s.clean.extend(fixup)
1717 1718
1718 1719 self._poststatusfixup(s, fixup)
1719 1720
1720 1721 if match.always():
1721 1722 # cache for performance
1722 1723 if s.unknown or s.ignored or s.clean:
1723 1724 # "_status" is cached with list*=False in the normal route
1724 1725 self._status = scmutil.status(s.modified, s.added, s.removed,
1725 1726 s.deleted, [], [], [])
1726 1727 else:
1727 1728 self._status = s
1728 1729
1729 1730 return s
1730 1731
1731 1732 @propertycache
1732 1733 def _manifest(self):
1733 1734 """generate a manifest corresponding to the values in self._status
1734 1735
1735 1736 This reuse the file nodeid from parent, but we use special node
1736 1737 identifiers for added and modified files. This is used by manifests
1737 1738 merge to see that files are different and by update logic to avoid
1738 1739 deleting newly added files.
1739 1740 """
1740 1741 return self._buildstatusmanifest(self._status)
1741 1742
1742 1743 def _buildstatusmanifest(self, status):
1743 1744 """Builds a manifest that includes the given status results."""
1744 1745 parents = self.parents()
1745 1746
1746 1747 man = parents[0].manifest().copy()
1747 1748
1748 1749 ff = self._flagfunc
1749 1750 for i, l in ((addednodeid, status.added),
1750 1751 (modifiednodeid, status.modified)):
1751 1752 for f in l:
1752 1753 man[f] = i
1753 1754 try:
1754 1755 man.setflag(f, ff(f))
1755 1756 except OSError:
1756 1757 pass
1757 1758
1758 1759 for f in status.deleted + status.removed:
1759 1760 if f in man:
1760 1761 del man[f]
1761 1762
1762 1763 return man
1763 1764
1764 1765 def _buildstatus(self, other, s, match, listignored, listclean,
1765 1766 listunknown):
1766 1767 """build a status with respect to another context
1767 1768
1768 1769 This includes logic for maintaining the fast path of status when
1769 1770 comparing the working directory against its parent, which is to skip
1770 1771 building a new manifest if self (working directory) is not comparing
1771 1772 against its parent (repo['.']).
1772 1773 """
1773 1774 s = self._dirstatestatus(match, listignored, listclean, listunknown)
1774 1775 # Filter out symlinks that, in the case of FAT32 and NTFS filesystems,
1775 1776 # might have accidentally ended up with the entire contents of the file
1776 1777 # they are supposed to be linking to.
1777 1778 s.modified[:] = self._filtersuspectsymlink(s.modified)
1778 1779 if other != self._repo['.']:
1779 1780 s = super(workingctx, self)._buildstatus(other, s, match,
1780 1781 listignored, listclean,
1781 1782 listunknown)
1782 1783 return s
1783 1784
1784 1785 def _matchstatus(self, other, match):
1785 1786 """override the match method with a filter for directory patterns
1786 1787
1787 1788 We use inheritance to customize the match.bad method only in cases of
1788 1789 workingctx since it belongs only to the working directory when
1789 1790 comparing against the parent changeset.
1790 1791
1791 1792 If we aren't comparing against the working directory's parent, then we
1792 1793 just use the default match object sent to us.
1793 1794 """
1794 1795 superself = super(workingctx, self)
1795 1796 match = superself._matchstatus(other, match)
1796 1797 if other != self._repo['.']:
1797 1798 def bad(f, msg):
1798 1799 # 'f' may be a directory pattern from 'match.files()',
1799 1800 # so 'f not in ctx1' is not enough
1800 1801 if f not in other and not other.hasdir(f):
1801 1802 self._repo.ui.warn('%s: %s\n' %
1802 1803 (self._repo.dirstate.pathto(f), msg))
1803 1804 match.bad = bad
1804 1805 return match
1805 1806
1807 def markcommitted(self, node):
1808 super(workingctx, self).markcommitted(node)
1809
1810 sparse.aftercommit(self._repo, node)
1811
1806 1812 class committablefilectx(basefilectx):
1807 1813 """A committablefilectx provides common functionality for a file context
1808 1814 that wants the ability to commit, e.g. workingfilectx or memfilectx."""
1809 1815 def __init__(self, repo, path, filelog=None, ctx=None):
1810 1816 self._repo = repo
1811 1817 self._path = path
1812 1818 self._changeid = None
1813 1819 self._filerev = self._filenode = None
1814 1820
1815 1821 if filelog is not None:
1816 1822 self._filelog = filelog
1817 1823 if ctx:
1818 1824 self._changectx = ctx
1819 1825
1820 1826 def __nonzero__(self):
1821 1827 return True
1822 1828
1823 1829 __bool__ = __nonzero__
1824 1830
1825 1831 def linkrev(self):
1826 1832 # linked to self._changectx no matter if file is modified or not
1827 1833 return self.rev()
1828 1834
1829 1835 def parents(self):
1830 1836 '''return parent filectxs, following copies if necessary'''
1831 1837 def filenode(ctx, path):
1832 1838 return ctx._manifest.get(path, nullid)
1833 1839
1834 1840 path = self._path
1835 1841 fl = self._filelog
1836 1842 pcl = self._changectx._parents
1837 1843 renamed = self.renamed()
1838 1844
1839 1845 if renamed:
1840 1846 pl = [renamed + (None,)]
1841 1847 else:
1842 1848 pl = [(path, filenode(pcl[0], path), fl)]
1843 1849
1844 1850 for pc in pcl[1:]:
1845 1851 pl.append((path, filenode(pc, path), fl))
1846 1852
1847 1853 return [self._parentfilectx(p, fileid=n, filelog=l)
1848 1854 for p, n, l in pl if n != nullid]
1849 1855
1850 1856 def children(self):
1851 1857 return []
1852 1858
1853 1859 class workingfilectx(committablefilectx):
1854 1860 """A workingfilectx object makes access to data related to a particular
1855 1861 file in the working directory convenient."""
1856 1862 def __init__(self, repo, path, filelog=None, workingctx=None):
1857 1863 super(workingfilectx, self).__init__(repo, path, filelog, workingctx)
1858 1864
1859 1865 @propertycache
1860 1866 def _changectx(self):
1861 1867 return workingctx(self._repo)
1862 1868
1863 1869 def data(self):
1864 1870 return self._repo.wread(self._path)
1865 1871 def renamed(self):
1866 1872 rp = self._repo.dirstate.copied(self._path)
1867 1873 if not rp:
1868 1874 return None
1869 1875 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
1870 1876
1871 1877 def size(self):
1872 1878 return self._repo.wvfs.lstat(self._path).st_size
1873 1879 def date(self):
1874 1880 t, tz = self._changectx.date()
1875 1881 try:
1876 1882 return (self._repo.wvfs.lstat(self._path).st_mtime, tz)
1877 1883 except OSError as err:
1878 1884 if err.errno != errno.ENOENT:
1879 1885 raise
1880 1886 return (t, tz)
1881 1887
1882 1888 def exists(self):
1883 1889 return self._repo.wvfs.exists(self._path)
1884 1890
1885 1891 def lexists(self):
1886 1892 return self._repo.wvfs.lexists(self._path)
1887 1893
1888 1894 def audit(self):
1889 1895 return self._repo.wvfs.audit(self._path)
1890 1896
1891 1897 def cmp(self, fctx):
1892 1898 """compare with other file context
1893 1899
1894 1900 returns True if different than fctx.
1895 1901 """
1896 1902 # fctx should be a filectx (not a workingfilectx)
1897 1903 # invert comparison to reuse the same code path
1898 1904 return fctx.cmp(self)
1899 1905
1900 1906 def remove(self, ignoremissing=False):
1901 1907 """wraps unlink for a repo's working directory"""
1902 1908 self._repo.wvfs.unlinkpath(self._path, ignoremissing=ignoremissing)
1903 1909
1904 1910 def write(self, data, flags, backgroundclose=False):
1905 1911 """wraps repo.wwrite"""
1906 1912 self._repo.wwrite(self._path, data, flags,
1907 1913 backgroundclose=backgroundclose)
1908 1914
1909 1915 def setflags(self, l, x):
1910 1916 self._repo.wvfs.setflags(self._path, l, x)
1911 1917
1912 1918 class workingcommitctx(workingctx):
1913 1919 """A workingcommitctx object makes access to data related to
1914 1920 the revision being committed convenient.
1915 1921
1916 1922 This hides changes in the working directory, if they aren't
1917 1923 committed in this context.
1918 1924 """
1919 1925 def __init__(self, repo, changes,
1920 1926 text="", user=None, date=None, extra=None):
1921 1927 super(workingctx, self).__init__(repo, text, user, date, extra,
1922 1928 changes)
1923 1929
1924 1930 def _dirstatestatus(self, match=None, ignored=False, clean=False,
1925 1931 unknown=False):
1926 1932 """Return matched files only in ``self._status``
1927 1933
1928 1934 Uncommitted files appear "clean" via this context, even if
1929 1935 they aren't actually so in the working directory.
1930 1936 """
1931 1937 match = match or matchmod.always(self._repo.root, self._repo.getcwd())
1932 1938 if clean:
1933 1939 clean = [f for f in self._manifest if f not in self._changedset]
1934 1940 else:
1935 1941 clean = []
1936 1942 return scmutil.status([f for f in self._status.modified if match(f)],
1937 1943 [f for f in self._status.added if match(f)],
1938 1944 [f for f in self._status.removed if match(f)],
1939 1945 [], [], [], clean)
1940 1946
1941 1947 @propertycache
1942 1948 def _changedset(self):
1943 1949 """Return the set of files changed in this context
1944 1950 """
1945 1951 changed = set(self._status.modified)
1946 1952 changed.update(self._status.added)
1947 1953 changed.update(self._status.removed)
1948 1954 return changed
1949 1955
1950 1956 def makecachingfilectxfn(func):
1951 1957 """Create a filectxfn that caches based on the path.
1952 1958
1953 1959 We can't use util.cachefunc because it uses all arguments as the cache
1954 1960 key and this creates a cycle since the arguments include the repo and
1955 1961 memctx.
1956 1962 """
1957 1963 cache = {}
1958 1964
1959 1965 def getfilectx(repo, memctx, path):
1960 1966 if path not in cache:
1961 1967 cache[path] = func(repo, memctx, path)
1962 1968 return cache[path]
1963 1969
1964 1970 return getfilectx
1965 1971
1966 1972 def memfilefromctx(ctx):
1967 1973 """Given a context return a memfilectx for ctx[path]
1968 1974
1969 1975 This is a convenience method for building a memctx based on another
1970 1976 context.
1971 1977 """
1972 1978 def getfilectx(repo, memctx, path):
1973 1979 fctx = ctx[path]
1974 1980 # this is weird but apparently we only keep track of one parent
1975 1981 # (why not only store that instead of a tuple?)
1976 1982 copied = fctx.renamed()
1977 1983 if copied:
1978 1984 copied = copied[0]
1979 1985 return memfilectx(repo, path, fctx.data(),
1980 1986 islink=fctx.islink(), isexec=fctx.isexec(),
1981 1987 copied=copied, memctx=memctx)
1982 1988
1983 1989 return getfilectx
1984 1990
1985 1991 def memfilefrompatch(patchstore):
1986 1992 """Given a patch (e.g. patchstore object) return a memfilectx
1987 1993
1988 1994 This is a convenience method for building a memctx based on a patchstore.
1989 1995 """
1990 1996 def getfilectx(repo, memctx, path):
1991 1997 data, mode, copied = patchstore.getfile(path)
1992 1998 if data is None:
1993 1999 return None
1994 2000 islink, isexec = mode
1995 2001 return memfilectx(repo, path, data, islink=islink,
1996 2002 isexec=isexec, copied=copied,
1997 2003 memctx=memctx)
1998 2004
1999 2005 return getfilectx
2000 2006
2001 2007 class memctx(committablectx):
2002 2008 """Use memctx to perform in-memory commits via localrepo.commitctx().
2003 2009
2004 2010 Revision information is supplied at initialization time while
2005 2011 related files data and is made available through a callback
2006 2012 mechanism. 'repo' is the current localrepo, 'parents' is a
2007 2013 sequence of two parent revisions identifiers (pass None for every
2008 2014 missing parent), 'text' is the commit message and 'files' lists
2009 2015 names of files touched by the revision (normalized and relative to
2010 2016 repository root).
2011 2017
2012 2018 filectxfn(repo, memctx, path) is a callable receiving the
2013 2019 repository, the current memctx object and the normalized path of
2014 2020 requested file, relative to repository root. It is fired by the
2015 2021 commit function for every file in 'files', but calls order is
2016 2022 undefined. If the file is available in the revision being
2017 2023 committed (updated or added), filectxfn returns a memfilectx
2018 2024 object. If the file was removed, filectxfn return None for recent
2019 2025 Mercurial. Moved files are represented by marking the source file
2020 2026 removed and the new file added with copy information (see
2021 2027 memfilectx).
2022 2028
2023 2029 user receives the committer name and defaults to current
2024 2030 repository username, date is the commit date in any format
2025 2031 supported by util.parsedate() and defaults to current date, extra
2026 2032 is a dictionary of metadata or is left empty.
2027 2033 """
2028 2034
2029 2035 # Mercurial <= 3.1 expects the filectxfn to raise IOError for missing files.
2030 2036 # Extensions that need to retain compatibility across Mercurial 3.1 can use
2031 2037 # this field to determine what to do in filectxfn.
2032 2038 _returnnoneformissingfiles = True
2033 2039
2034 2040 def __init__(self, repo, parents, text, files, filectxfn, user=None,
2035 2041 date=None, extra=None, branch=None, editor=False):
2036 2042 super(memctx, self).__init__(repo, text, user, date, extra)
2037 2043 self._rev = None
2038 2044 self._node = None
2039 2045 parents = [(p or nullid) for p in parents]
2040 2046 p1, p2 = parents
2041 2047 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
2042 2048 files = sorted(set(files))
2043 2049 self._files = files
2044 2050 if branch is not None:
2045 2051 self._extra['branch'] = encoding.fromlocal(branch)
2046 2052 self.substate = {}
2047 2053
2048 2054 if isinstance(filectxfn, patch.filestore):
2049 2055 filectxfn = memfilefrompatch(filectxfn)
2050 2056 elif not callable(filectxfn):
2051 2057 # if store is not callable, wrap it in a function
2052 2058 filectxfn = memfilefromctx(filectxfn)
2053 2059
2054 2060 # memoizing increases performance for e.g. vcs convert scenarios.
2055 2061 self._filectxfn = makecachingfilectxfn(filectxfn)
2056 2062
2057 2063 if editor:
2058 2064 self._text = editor(self._repo, self, [])
2059 2065 self._repo.savecommitmessage(self._text)
2060 2066
2061 2067 def filectx(self, path, filelog=None):
2062 2068 """get a file context from the working directory
2063 2069
2064 2070 Returns None if file doesn't exist and should be removed."""
2065 2071 return self._filectxfn(self._repo, self, path)
2066 2072
2067 2073 def commit(self):
2068 2074 """commit context to the repo"""
2069 2075 return self._repo.commitctx(self)
2070 2076
2071 2077 @propertycache
2072 2078 def _manifest(self):
2073 2079 """generate a manifest based on the return values of filectxfn"""
2074 2080
2075 2081 # keep this simple for now; just worry about p1
2076 2082 pctx = self._parents[0]
2077 2083 man = pctx.manifest().copy()
2078 2084
2079 2085 for f in self._status.modified:
2080 2086 p1node = nullid
2081 2087 p2node = nullid
2082 2088 p = pctx[f].parents() # if file isn't in pctx, check p2?
2083 2089 if len(p) > 0:
2084 2090 p1node = p[0].filenode()
2085 2091 if len(p) > 1:
2086 2092 p2node = p[1].filenode()
2087 2093 man[f] = revlog.hash(self[f].data(), p1node, p2node)
2088 2094
2089 2095 for f in self._status.added:
2090 2096 man[f] = revlog.hash(self[f].data(), nullid, nullid)
2091 2097
2092 2098 for f in self._status.removed:
2093 2099 if f in man:
2094 2100 del man[f]
2095 2101
2096 2102 return man
2097 2103
2098 2104 @propertycache
2099 2105 def _status(self):
2100 2106 """Calculate exact status from ``files`` specified at construction
2101 2107 """
2102 2108 man1 = self.p1().manifest()
2103 2109 p2 = self._parents[1]
2104 2110 # "1 < len(self._parents)" can't be used for checking
2105 2111 # existence of the 2nd parent, because "memctx._parents" is
2106 2112 # explicitly initialized by the list, of which length is 2.
2107 2113 if p2.node() != nullid:
2108 2114 man2 = p2.manifest()
2109 2115 managing = lambda f: f in man1 or f in man2
2110 2116 else:
2111 2117 managing = lambda f: f in man1
2112 2118
2113 2119 modified, added, removed = [], [], []
2114 2120 for f in self._files:
2115 2121 if not managing(f):
2116 2122 added.append(f)
2117 2123 elif self[f]:
2118 2124 modified.append(f)
2119 2125 else:
2120 2126 removed.append(f)
2121 2127
2122 2128 return scmutil.status(modified, added, removed, [], [], [], [])
2123 2129
2124 2130 class memfilectx(committablefilectx):
2125 2131 """memfilectx represents an in-memory file to commit.
2126 2132
2127 2133 See memctx and committablefilectx for more details.
2128 2134 """
2129 2135 def __init__(self, repo, path, data, islink=False,
2130 2136 isexec=False, copied=None, memctx=None):
2131 2137 """
2132 2138 path is the normalized file path relative to repository root.
2133 2139 data is the file content as a string.
2134 2140 islink is True if the file is a symbolic link.
2135 2141 isexec is True if the file is executable.
2136 2142 copied is the source file path if current file was copied in the
2137 2143 revision being committed, or None."""
2138 2144 super(memfilectx, self).__init__(repo, path, None, memctx)
2139 2145 self._data = data
2140 2146 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
2141 2147 self._copied = None
2142 2148 if copied:
2143 2149 self._copied = (copied, nullid)
2144 2150
2145 2151 def data(self):
2146 2152 return self._data
2147 2153
2148 2154 def remove(self, ignoremissing=False):
2149 2155 """wraps unlink for a repo's working directory"""
2150 2156 # need to figure out what to do here
2151 2157 del self._changectx[self._path]
2152 2158
2153 2159 def write(self, data, flags):
2154 2160 """wraps repo.wwrite"""
2155 2161 self._data = data
2156 2162
2157 2163 class overlayfilectx(committablefilectx):
2158 2164 """Like memfilectx but take an original filectx and optional parameters to
2159 2165 override parts of it. This is useful when fctx.data() is expensive (i.e.
2160 2166 flag processor is expensive) and raw data, flags, and filenode could be
2161 2167 reused (ex. rebase or mode-only amend a REVIDX_EXTSTORED file).
2162 2168 """
2163 2169
2164 2170 def __init__(self, originalfctx, datafunc=None, path=None, flags=None,
2165 2171 copied=None, ctx=None):
2166 2172 """originalfctx: filecontext to duplicate
2167 2173
2168 2174 datafunc: None or a function to override data (file content). It is a
2169 2175 function to be lazy. path, flags, copied, ctx: None or overridden value
2170 2176
2171 2177 copied could be (path, rev), or False. copied could also be just path,
2172 2178 and will be converted to (path, nullid). This simplifies some callers.
2173 2179 """
2174 2180
2175 2181 if path is None:
2176 2182 path = originalfctx.path()
2177 2183 if ctx is None:
2178 2184 ctx = originalfctx.changectx()
2179 2185 ctxmatch = lambda: True
2180 2186 else:
2181 2187 ctxmatch = lambda: ctx == originalfctx.changectx()
2182 2188
2183 2189 repo = originalfctx.repo()
2184 2190 flog = originalfctx.filelog()
2185 2191 super(overlayfilectx, self).__init__(repo, path, flog, ctx)
2186 2192
2187 2193 if copied is None:
2188 2194 copied = originalfctx.renamed()
2189 2195 copiedmatch = lambda: True
2190 2196 else:
2191 2197 if copied and not isinstance(copied, tuple):
2192 2198 # repo._filecommit will recalculate copyrev so nullid is okay
2193 2199 copied = (copied, nullid)
2194 2200 copiedmatch = lambda: copied == originalfctx.renamed()
2195 2201
2196 2202 # When data, copied (could affect data), ctx (could affect filelog
2197 2203 # parents) are not overridden, rawdata, rawflags, and filenode may be
2198 2204 # reused (repo._filecommit should double check filelog parents).
2199 2205 #
2200 2206 # path, flags are not hashed in filelog (but in manifestlog) so they do
2201 2207 # not affect reusable here.
2202 2208 #
2203 2209 # If ctx or copied is overridden to a same value with originalfctx,
2204 2210 # still consider it's reusable. originalfctx.renamed() may be a bit
2205 2211 # expensive so it's not called unless necessary. Assuming datafunc is
2206 2212 # always expensive, do not call it for this "reusable" test.
2207 2213 reusable = datafunc is None and ctxmatch() and copiedmatch()
2208 2214
2209 2215 if datafunc is None:
2210 2216 datafunc = originalfctx.data
2211 2217 if flags is None:
2212 2218 flags = originalfctx.flags()
2213 2219
2214 2220 self._datafunc = datafunc
2215 2221 self._flags = flags
2216 2222 self._copied = copied
2217 2223
2218 2224 if reusable:
2219 2225 # copy extra fields from originalfctx
2220 2226 attrs = ['rawdata', 'rawflags', '_filenode', '_filerev']
2221 2227 for attr in attrs:
2222 2228 if util.safehasattr(originalfctx, attr):
2223 2229 setattr(self, attr, getattr(originalfctx, attr))
2224 2230
2225 2231 def data(self):
2226 2232 return self._datafunc()
2227 2233
2228 2234 class metadataonlyctx(committablectx):
2229 2235 """Like memctx but it's reusing the manifest of different commit.
2230 2236 Intended to be used by lightweight operations that are creating
2231 2237 metadata-only changes.
2232 2238
2233 2239 Revision information is supplied at initialization time. 'repo' is the
2234 2240 current localrepo, 'ctx' is original revision which manifest we're reuisng
2235 2241 'parents' is a sequence of two parent revisions identifiers (pass None for
2236 2242 every missing parent), 'text' is the commit.
2237 2243
2238 2244 user receives the committer name and defaults to current repository
2239 2245 username, date is the commit date in any format supported by
2240 2246 util.parsedate() and defaults to current date, extra is a dictionary of
2241 2247 metadata or is left empty.
2242 2248 """
2243 2249 def __new__(cls, repo, originalctx, *args, **kwargs):
2244 2250 return super(metadataonlyctx, cls).__new__(cls, repo)
2245 2251
2246 2252 def __init__(self, repo, originalctx, parents, text, user=None, date=None,
2247 2253 extra=None, editor=False):
2248 2254 super(metadataonlyctx, self).__init__(repo, text, user, date, extra)
2249 2255 self._rev = None
2250 2256 self._node = None
2251 2257 self._originalctx = originalctx
2252 2258 self._manifestnode = originalctx.manifestnode()
2253 2259 parents = [(p or nullid) for p in parents]
2254 2260 p1, p2 = self._parents = [changectx(self._repo, p) for p in parents]
2255 2261
2256 2262 # sanity check to ensure that the reused manifest parents are
2257 2263 # manifests of our commit parents
2258 2264 mp1, mp2 = self.manifestctx().parents
2259 2265 if p1 != nullid and p1.manifestnode() != mp1:
2260 2266 raise RuntimeError('can\'t reuse the manifest: '
2261 2267 'its p1 doesn\'t match the new ctx p1')
2262 2268 if p2 != nullid and p2.manifestnode() != mp2:
2263 2269 raise RuntimeError('can\'t reuse the manifest: '
2264 2270 'its p2 doesn\'t match the new ctx p2')
2265 2271
2266 2272 self._files = originalctx.files()
2267 2273 self.substate = {}
2268 2274
2269 2275 if editor:
2270 2276 self._text = editor(self._repo, self, [])
2271 2277 self._repo.savecommitmessage(self._text)
2272 2278
2273 2279 def manifestnode(self):
2274 2280 return self._manifestnode
2275 2281
2276 2282 @property
2277 2283 def _manifestctx(self):
2278 2284 return self._repo.manifestlog[self._manifestnode]
2279 2285
2280 2286 def filectx(self, path, filelog=None):
2281 2287 return self._originalctx.filectx(path, filelog=filelog)
2282 2288
2283 2289 def commit(self):
2284 2290 """commit context to the repo"""
2285 2291 return self._repo.commitctx(self)
2286 2292
2287 2293 @property
2288 2294 def _manifest(self):
2289 2295 return self._originalctx.manifest()
2290 2296
2291 2297 @propertycache
2292 2298 def _status(self):
2293 2299 """Calculate exact status from ``files`` specified in the ``origctx``
2294 2300 and parents manifests.
2295 2301 """
2296 2302 man1 = self.p1().manifest()
2297 2303 p2 = self._parents[1]
2298 2304 # "1 < len(self._parents)" can't be used for checking
2299 2305 # existence of the 2nd parent, because "metadataonlyctx._parents" is
2300 2306 # explicitly initialized by the list, of which length is 2.
2301 2307 if p2.node() != nullid:
2302 2308 man2 = p2.manifest()
2303 2309 managing = lambda f: f in man1 or f in man2
2304 2310 else:
2305 2311 managing = lambda f: f in man1
2306 2312
2307 2313 modified, added, removed = [], [], []
2308 2314 for f in self._files:
2309 2315 if not managing(f):
2310 2316 added.append(f)
2311 2317 elif self[f]:
2312 2318 modified.append(f)
2313 2319 else:
2314 2320 removed.append(f)
2315 2321
2316 2322 return scmutil.status(modified, added, removed, [], [], [], [])
@@ -1,480 +1,496
1 1 # sparse.py - functionality for sparse checkouts
2 2 #
3 3 # Copyright 2014 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import collections
11 11 import hashlib
12 12 import os
13 13
14 14 from .i18n import _
15 15 from .node import nullid
16 16 from . import (
17 17 error,
18 18 match as matchmod,
19 19 merge as mergemod,
20 20 pycompat,
21 21 )
22 22
23 23 # Whether sparse features are enabled. This variable is intended to be
24 24 # temporary to facilitate porting sparse to core. It should eventually be
25 25 # a per-repo option, possibly a repo requirement.
26 26 enabled = False
27 27
28 28 def parseconfig(ui, raw):
29 29 """Parse sparse config file content.
30 30
31 31 Returns a tuple of includes, excludes, and profiles.
32 32 """
33 33 includes = set()
34 34 excludes = set()
35 35 current = includes
36 36 profiles = []
37 37 for line in raw.split('\n'):
38 38 line = line.strip()
39 39 if not line or line.startswith('#'):
40 40 # empty or comment line, skip
41 41 continue
42 42 elif line.startswith('%include '):
43 43 line = line[9:].strip()
44 44 if line:
45 45 profiles.append(line)
46 46 elif line == '[include]':
47 47 if current != includes:
48 48 # TODO pass filename into this API so we can report it.
49 49 raise error.Abort(_('sparse config cannot have includes ' +
50 50 'after excludes'))
51 51 continue
52 52 elif line == '[exclude]':
53 53 current = excludes
54 54 elif line:
55 55 if line.strip().startswith('/'):
56 56 ui.warn(_('warning: sparse profile cannot use' +
57 57 ' paths starting with /, ignoring %s\n') % line)
58 58 continue
59 59 current.add(line)
60 60
61 61 return includes, excludes, profiles
62 62
63 63 # Exists as separate function to facilitate monkeypatching.
64 64 def readprofile(repo, profile, changeid):
65 65 """Resolve the raw content of a sparse profile file."""
66 66 # TODO add some kind of cache here because this incurs a manifest
67 67 # resolve and can be slow.
68 68 return repo.filectx(profile, changeid=changeid).data()
69 69
70 70 def patternsforrev(repo, rev):
71 71 """Obtain sparse checkout patterns for the given rev.
72 72
73 73 Returns a tuple of iterables representing includes, excludes, and
74 74 patterns.
75 75 """
76 76 # Feature isn't enabled. No-op.
77 77 if not enabled:
78 78 return set(), set(), []
79 79
80 80 raw = repo.vfs.tryread('sparse')
81 81 if not raw:
82 82 return set(), set(), []
83 83
84 84 if rev is None:
85 85 raise error.Abort(_('cannot parse sparse patterns from working '
86 86 'directory'))
87 87
88 88 includes, excludes, profiles = parseconfig(repo.ui, raw)
89 89 ctx = repo[rev]
90 90
91 91 if profiles:
92 92 visited = set()
93 93 while profiles:
94 94 profile = profiles.pop()
95 95 if profile in visited:
96 96 continue
97 97
98 98 visited.add(profile)
99 99
100 100 try:
101 101 raw = readprofile(repo, profile, rev)
102 102 except error.ManifestLookupError:
103 103 msg = (
104 104 "warning: sparse profile '%s' not found "
105 105 "in rev %s - ignoring it\n" % (profile, ctx))
106 106 # experimental config: sparse.missingwarning
107 107 if repo.ui.configbool(
108 108 'sparse', 'missingwarning', True):
109 109 repo.ui.warn(msg)
110 110 else:
111 111 repo.ui.debug(msg)
112 112 continue
113 113
114 114 pincludes, pexcludes, subprofs = parseconfig(repo.ui, raw)
115 115 includes.update(pincludes)
116 116 excludes.update(pexcludes)
117 117 for subprofile in subprofs:
118 118 profiles.append(subprofile)
119 119
120 120 profiles = visited
121 121
122 122 if includes:
123 123 includes.add('.hg*')
124 124
125 125 return includes, excludes, profiles
126 126
127 127 def activeprofiles(repo):
128 128 revs = [repo.changelog.rev(node) for node in
129 129 repo.dirstate.parents() if node != nullid]
130 130
131 131 profiles = set()
132 132 for rev in revs:
133 133 profiles.update(patternsforrev(repo, rev)[2])
134 134
135 135 return profiles
136 136
137 137 def configsignature(repo, includetemp=True):
138 138 """Obtain the signature string for the current sparse configuration.
139 139
140 140 This is used to construct a cache key for matchers.
141 141 """
142 142 cache = repo._sparsesignaturecache
143 143
144 144 signature = cache.get('signature')
145 145
146 146 if includetemp:
147 147 tempsignature = cache.get('tempsignature')
148 148 else:
149 149 tempsignature = '0'
150 150
151 151 if signature is None or (includetemp and tempsignature is None):
152 152 signature = hashlib.sha1(repo.vfs.tryread('sparse')).hexdigest()
153 153 cache['signature'] = signature
154 154
155 155 if includetemp:
156 156 raw = repo.vfs.tryread('tempsparse')
157 157 tempsignature = hashlib.sha1(raw).hexdigest()
158 158 cache['tempsignature'] = tempsignature
159 159
160 160 return '%s %s' % (signature, tempsignature)
161 161
162 162 def writeconfig(repo, includes, excludes, profiles):
163 163 """Write the sparse config file given a sparse configuration."""
164 164 with repo.vfs('sparse', 'wb') as fh:
165 165 for p in sorted(profiles):
166 166 fh.write('%%include %s\n' % p)
167 167
168 168 if includes:
169 169 fh.write('[include]\n')
170 170 for i in sorted(includes):
171 171 fh.write(i)
172 172 fh.write('\n')
173 173
174 174 if excludes:
175 175 fh.write('[exclude]\n')
176 176 for e in sorted(excludes):
177 177 fh.write(e)
178 178 fh.write('\n')
179 179
180 180 repo._sparsesignaturecache.clear()
181 181
182 182 def readtemporaryincludes(repo):
183 183 raw = repo.vfs.tryread('tempsparse')
184 184 if not raw:
185 185 return set()
186 186
187 187 return set(raw.split('\n'))
188 188
189 189 def writetemporaryincludes(repo, includes):
190 190 repo.vfs.write('tempsparse', '\n'.join(sorted(includes)))
191 191 repo._sparsesignaturecache.clear()
192 192
193 193 def addtemporaryincludes(repo, additional):
194 194 includes = readtemporaryincludes(repo)
195 195 for i in additional:
196 196 includes.add(i)
197 197 writetemporaryincludes(repo, includes)
198 198
199 199 def prunetemporaryincludes(repo):
200 200 if not enabled or not repo.vfs.exists('tempsparse'):
201 201 return
202 202
203 203 origstatus = repo.status()
204 204 modified, added, removed, deleted, a, b, c = origstatus
205 205 if modified or added or removed or deleted:
206 206 # Still have pending changes. Don't bother trying to prune.
207 207 return
208 208
209 209 sparsematch = matcher(repo, includetemp=False)
210 210 dirstate = repo.dirstate
211 211 actions = []
212 212 dropped = []
213 213 tempincludes = readtemporaryincludes(repo)
214 214 for file in tempincludes:
215 215 if file in dirstate and not sparsematch(file):
216 216 message = _('dropping temporarily included sparse files')
217 217 actions.append((file, None, message))
218 218 dropped.append(file)
219 219
220 220 typeactions = collections.defaultdict(list)
221 221 typeactions['r'] = actions
222 222 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
223 223
224 224 # Fix dirstate
225 225 for file in dropped:
226 226 dirstate.drop(file)
227 227
228 228 repo.vfs.unlink('tempsparse')
229 229 repo._sparsesignaturecache.clear()
230 230 msg = _('cleaned up %d temporarily added file(s) from the '
231 231 'sparse checkout\n')
232 232 repo.ui.status(msg % len(tempincludes))
233 233
234 234 def matcher(repo, revs=None, includetemp=True):
235 235 """Obtain a matcher for sparse working directories for the given revs.
236 236
237 237 If multiple revisions are specified, the matcher is the union of all
238 238 revs.
239 239
240 240 ``includetemp`` indicates whether to use the temporary sparse profile.
241 241 """
242 242 # If sparse isn't enabled, sparse matcher matches everything.
243 243 if not enabled:
244 244 return matchmod.always(repo.root, '')
245 245
246 246 if not revs or revs == [None]:
247 247 revs = [repo.changelog.rev(node)
248 248 for node in repo.dirstate.parents() if node != nullid]
249 249
250 250 signature = configsignature(repo, includetemp=includetemp)
251 251
252 252 key = '%s %s' % (signature, ' '.join(map(pycompat.bytestr, revs)))
253 253
254 254 result = repo._sparsematchercache.get(key)
255 255 if result:
256 256 return result
257 257
258 258 matchers = []
259 259 for rev in revs:
260 260 try:
261 261 includes, excludes, profiles = patternsforrev(repo, rev)
262 262
263 263 if includes or excludes:
264 264 # Explicitly include subdirectories of includes so
265 265 # status will walk them down to the actual include.
266 266 subdirs = set()
267 267 for include in includes:
268 268 # TODO consider using posix path functions here so Windows
269 269 # \ directory separators don't come into play.
270 270 dirname = os.path.dirname(include)
271 271 # basename is used to avoid issues with absolute
272 272 # paths (which on Windows can include the drive).
273 273 while os.path.basename(dirname):
274 274 subdirs.add(dirname)
275 275 dirname = os.path.dirname(dirname)
276 276
277 277 matcher = matchmod.match(repo.root, '', [],
278 278 include=includes, exclude=excludes,
279 279 default='relpath')
280 280 if subdirs:
281 281 matcher = matchmod.forceincludematcher(matcher, subdirs)
282 282 matchers.append(matcher)
283 283 except IOError:
284 284 pass
285 285
286 286 if not matchers:
287 287 result = matchmod.always(repo.root, '')
288 288 elif len(matchers) == 1:
289 289 result = matchers[0]
290 290 else:
291 291 result = matchmod.unionmatcher(matchers)
292 292
293 293 if includetemp:
294 294 tempincludes = readtemporaryincludes(repo)
295 295 result = matchmod.forceincludematcher(result, tempincludes)
296 296
297 297 repo._sparsematchercache[key] = result
298 298
299 299 return result
300 300
301 301 def filterupdatesactions(repo, wctx, mctx, branchmerge, actions):
302 302 """Filter updates to only lay out files that match the sparse rules."""
303 303 if not enabled:
304 304 return actions
305 305
306 306 oldrevs = [pctx.rev() for pctx in wctx.parents()]
307 307 oldsparsematch = matcher(repo, oldrevs)
308 308
309 309 if oldsparsematch.always():
310 310 return actions
311 311
312 312 files = set()
313 313 prunedactions = {}
314 314
315 315 if branchmerge:
316 316 # If we're merging, use the wctx filter, since we're merging into
317 317 # the wctx.
318 318 sparsematch = matcher(repo, [wctx.parents()[0].rev()])
319 319 else:
320 320 # If we're updating, use the target context's filter, since we're
321 321 # moving to the target context.
322 322 sparsematch = matcher(repo, [mctx.rev()])
323 323
324 324 temporaryfiles = []
325 325 for file, action in actions.iteritems():
326 326 type, args, msg = action
327 327 files.add(file)
328 328 if sparsematch(file):
329 329 prunedactions[file] = action
330 330 elif type == 'm':
331 331 temporaryfiles.append(file)
332 332 prunedactions[file] = action
333 333 elif branchmerge:
334 334 if type != 'k':
335 335 temporaryfiles.append(file)
336 336 prunedactions[file] = action
337 337 elif type == 'f':
338 338 prunedactions[file] = action
339 339 elif file in wctx:
340 340 prunedactions[file] = ('r', args, msg)
341 341
342 342 if len(temporaryfiles) > 0:
343 343 repo.ui.status(_('temporarily included %d file(s) in the sparse '
344 344 'checkout for merging\n') % len(temporaryfiles))
345 345 addtemporaryincludes(repo, temporaryfiles)
346 346
347 347 # Add the new files to the working copy so they can be merged, etc
348 348 actions = []
349 349 message = 'temporarily adding to sparse checkout'
350 350 wctxmanifest = repo[None].manifest()
351 351 for file in temporaryfiles:
352 352 if file in wctxmanifest:
353 353 fctx = repo[None][file]
354 354 actions.append((file, (fctx.flags(), False), message))
355 355
356 356 typeactions = collections.defaultdict(list)
357 357 typeactions['g'] = actions
358 358 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'],
359 359 False)
360 360
361 361 dirstate = repo.dirstate
362 362 for file, flags, msg in actions:
363 363 dirstate.normal(file)
364 364
365 365 profiles = activeprofiles(repo)
366 366 changedprofiles = profiles & files
367 367 # If an active profile changed during the update, refresh the checkout.
368 368 # Don't do this during a branch merge, since all incoming changes should
369 369 # have been handled by the temporary includes above.
370 370 if changedprofiles and not branchmerge:
371 371 mf = mctx.manifest()
372 372 for file in mf:
373 373 old = oldsparsematch(file)
374 374 new = sparsematch(file)
375 375 if not old and new:
376 376 flags = mf.flags(file)
377 377 prunedactions[file] = ('g', (flags, False), '')
378 378 elif old and not new:
379 379 prunedactions[file] = ('r', [], '')
380 380
381 381 return prunedactions
382 382
383 383 def refreshwdir(repo, origstatus, origsparsematch, force=False):
384 384 """Refreshes working directory by taking sparse config into account.
385 385
386 386 The old status and sparse matcher is compared against the current sparse
387 387 matcher.
388 388
389 389 Will abort if a file with pending changes is being excluded or included
390 390 unless ``force`` is True.
391 391 """
392 392 modified, added, removed, deleted, unknown, ignored, clean = origstatus
393 393
394 394 # Verify there are no pending changes
395 395 pending = set()
396 396 pending.update(modified)
397 397 pending.update(added)
398 398 pending.update(removed)
399 399 sparsematch = matcher(repo)
400 400 abort = False
401 401
402 402 for f in pending:
403 403 if not sparsematch(f):
404 404 repo.ui.warn(_("pending changes to '%s'\n") % f)
405 405 abort = not force
406 406
407 407 if abort:
408 408 raise error.Abort(_('could not update sparseness due to pending '
409 409 'changes'))
410 410
411 411 # Calculate actions
412 412 dirstate = repo.dirstate
413 413 ctx = repo['.']
414 414 added = []
415 415 lookup = []
416 416 dropped = []
417 417 mf = ctx.manifest()
418 418 files = set(mf)
419 419
420 420 actions = {}
421 421
422 422 for file in files:
423 423 old = origsparsematch(file)
424 424 new = sparsematch(file)
425 425 # Add files that are newly included, or that don't exist in
426 426 # the dirstate yet.
427 427 if (new and not old) or (old and new and not file in dirstate):
428 428 fl = mf.flags(file)
429 429 if repo.wvfs.exists(file):
430 430 actions[file] = ('e', (fl,), '')
431 431 lookup.append(file)
432 432 else:
433 433 actions[file] = ('g', (fl, False), '')
434 434 added.append(file)
435 435 # Drop files that are newly excluded, or that still exist in
436 436 # the dirstate.
437 437 elif (old and not new) or (not old and not new and file in dirstate):
438 438 dropped.append(file)
439 439 if file not in pending:
440 440 actions[file] = ('r', [], '')
441 441
442 442 # Verify there are no pending changes in newly included files
443 443 abort = False
444 444 for file in lookup:
445 445 repo.ui.warn(_("pending changes to '%s'\n") % file)
446 446 abort = not force
447 447 if abort:
448 448 raise error.Abort(_('cannot change sparseness due to pending '
449 449 'changes (delete the files or use '
450 450 '--force to bring them back dirty)'))
451 451
452 452 # Check for files that were only in the dirstate.
453 453 for file, state in dirstate.iteritems():
454 454 if not file in files:
455 455 old = origsparsematch(file)
456 456 new = sparsematch(file)
457 457 if old and not new:
458 458 dropped.append(file)
459 459
460 460 # Apply changes to disk
461 461 typeactions = dict((m, []) for m in 'a f g am cd dc r dm dg m e k'.split())
462 462 for f, (m, args, msg) in actions.iteritems():
463 463 if m not in typeactions:
464 464 typeactions[m] = []
465 465 typeactions[m].append((f, args, msg))
466 466
467 467 mergemod.applyupdates(repo, typeactions, repo[None], repo['.'], False)
468 468
469 469 # Fix dirstate
470 470 for file in added:
471 471 dirstate.normal(file)
472 472
473 473 for file in dropped:
474 474 dirstate.drop(file)
475 475
476 476 for file in lookup:
477 477 # File exists on disk, and we're bringing it back in an unknown state.
478 478 dirstate.normallookup(file)
479 479
480 480 return added, dropped, lookup
481
482 def aftercommit(repo, node):
483 """Perform actions after a working directory commit."""
484 # This function is called unconditionally, even if sparse isn't
485 # enabled.
486 ctx = repo[node]
487
488 profiles = patternsforrev(repo, ctx.rev())[2]
489
490 # profiles will only have data if sparse is enabled.
491 if set(profiles) & set(ctx.files()):
492 origstatus = repo.status()
493 origsparsematch = matcher(repo)
494 refreshwdir(repo, origstatus, origsparsematch, force=True)
495
496 prunetemporaryincludes(repo)
General Comments 0
You need to be logged in to leave comments. Login now