##// END OF EJS Templates
windows: recompute flags when committing a merge (issue1802)...
Matt Mackall -
r15337:cf5f9df6 stable
parent child Browse files
Show More
@@ -0,0 +1,69 b''
1 Create extension that can disable exec checks:
2
3 $ cat > noexec.py <<EOF
4 > from mercurial import extensions, util
5 > def setflags(orig, f, l, x):
6 > pass
7 > def checkexec(orig, path):
8 > return False
9 > def extsetup(ui):
10 > extensions.wrapfunction(util, 'setflags', setflags)
11 > extensions.wrapfunction(util, 'checkexec', checkexec)
12 > EOF
13
14 $ hg init unix-repo
15 $ cd unix-repo
16 $ touch a
17 $ hg add a
18 $ hg commit -m 'unix: add a'
19 $ hg clone . ../win-repo
20 updating to branch default
21 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
22 $ chmod +x a
23 $ hg commit -m 'unix: chmod a'
24 $ hg manifest -v
25 755 * a
26
27 $ cd ../win-repo
28
29 $ touch b
30 $ hg add b
31 $ hg commit -m 'win: add b'
32
33 $ hg manifest -v
34 644 a
35 644 b
36
37 $ hg pull
38 pulling from $TESTTMP/unix-repo
39 searching for changes
40 adding changesets
41 adding manifests
42 adding file changes
43 added 1 changesets with 0 changes to 0 files (+1 heads)
44 (run 'hg heads' to see heads, 'hg merge' to merge)
45
46 $ hg manifest -v -r tip
47 755 * a
48
49 Simulate a Windows merge:
50
51 $ hg --config extensions.n=$TESTTMP/noexec.py merge --debug
52 searching for copies back to rev 1
53 unmatched files in local:
54 b
55 resolving manifests
56 overwrite None partial False
57 ancestor a03b0deabf2b local d6fa54f68ae1+ remote 2d8bcf2dda39
58 a: update permissions -> e
59 updating: a 1/1 files (100.00%)
60 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
61 (branch merge, don't forget to commit)
62
63 Simulate a Windows commit:
64
65 $ hg --config extensions.n=$TESTTMP/noexec.py commit -m 'win: merge'
66
67 $ hg manifest -v
68 755 * a
69 644 b
@@ -1,1116 +1,1137 b''
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, error, util, scmutil, subrepo, patch, encoding
11 11 import match as matchmod
12 12 import os, errno, stat
13 13
14 14 propertycache = util.propertycache
15 15
16 16 class changectx(object):
17 17 """A changecontext object makes access to data related to a particular
18 18 changeset convenient."""
19 19 def __init__(self, repo, changeid=''):
20 20 """changeid is a revision number, node, or tag"""
21 21 if changeid == '':
22 22 changeid = '.'
23 23 self._repo = repo
24 24 if isinstance(changeid, (long, int)):
25 25 self._rev = changeid
26 26 self._node = self._repo.changelog.node(changeid)
27 27 else:
28 28 self._node = self._repo.lookup(changeid)
29 29 self._rev = self._repo.changelog.rev(self._node)
30 30
31 31 def __str__(self):
32 32 return short(self.node())
33 33
34 34 def __int__(self):
35 35 return self.rev()
36 36
37 37 def __repr__(self):
38 38 return "<changectx %s>" % str(self)
39 39
40 40 def __hash__(self):
41 41 try:
42 42 return hash(self._rev)
43 43 except AttributeError:
44 44 return id(self)
45 45
46 46 def __eq__(self, other):
47 47 try:
48 48 return self._rev == other._rev
49 49 except AttributeError:
50 50 return False
51 51
52 52 def __ne__(self, other):
53 53 return not (self == other)
54 54
55 55 def __nonzero__(self):
56 56 return self._rev != nullrev
57 57
58 58 @propertycache
59 59 def _changeset(self):
60 60 return self._repo.changelog.read(self.node())
61 61
62 62 @propertycache
63 63 def _manifest(self):
64 64 return self._repo.manifest.read(self._changeset[0])
65 65
66 66 @propertycache
67 67 def _manifestdelta(self):
68 68 return self._repo.manifest.readdelta(self._changeset[0])
69 69
70 70 @propertycache
71 71 def _parents(self):
72 72 p = self._repo.changelog.parentrevs(self._rev)
73 73 if p[1] == nullrev:
74 74 p = p[:-1]
75 75 return [changectx(self._repo, x) for x in p]
76 76
77 77 @propertycache
78 78 def substate(self):
79 79 return subrepo.state(self, self._repo.ui)
80 80
81 81 def __contains__(self, key):
82 82 return key in self._manifest
83 83
84 84 def __getitem__(self, key):
85 85 return self.filectx(key)
86 86
87 87 def __iter__(self):
88 88 for f in sorted(self._manifest):
89 89 yield f
90 90
91 91 def changeset(self):
92 92 return self._changeset
93 93 def manifest(self):
94 94 return self._manifest
95 95 def manifestnode(self):
96 96 return self._changeset[0]
97 97
98 98 def rev(self):
99 99 return self._rev
100 100 def node(self):
101 101 return self._node
102 102 def hex(self):
103 103 return hex(self._node)
104 104 def user(self):
105 105 return self._changeset[1]
106 106 def date(self):
107 107 return self._changeset[2]
108 108 def files(self):
109 109 return self._changeset[3]
110 110 def description(self):
111 111 return self._changeset[4]
112 112 def branch(self):
113 113 return encoding.tolocal(self._changeset[5].get("branch"))
114 114 def extra(self):
115 115 return self._changeset[5]
116 116 def tags(self):
117 117 return self._repo.nodetags(self._node)
118 118 def bookmarks(self):
119 119 return self._repo.nodebookmarks(self._node)
120 120 def hidden(self):
121 121 return self._rev in self._repo.changelog.hiddenrevs
122 122
123 123 def parents(self):
124 124 """return contexts for each parent changeset"""
125 125 return self._parents
126 126
127 127 def p1(self):
128 128 return self._parents[0]
129 129
130 130 def p2(self):
131 131 if len(self._parents) == 2:
132 132 return self._parents[1]
133 133 return changectx(self._repo, -1)
134 134
135 135 def children(self):
136 136 """return contexts for each child changeset"""
137 137 c = self._repo.changelog.children(self._node)
138 138 return [changectx(self._repo, x) for x in c]
139 139
140 140 def ancestors(self):
141 141 for a in self._repo.changelog.ancestors(self._rev):
142 142 yield changectx(self._repo, a)
143 143
144 144 def descendants(self):
145 145 for d in self._repo.changelog.descendants(self._rev):
146 146 yield changectx(self._repo, d)
147 147
148 148 def _fileinfo(self, path):
149 149 if '_manifest' in self.__dict__:
150 150 try:
151 151 return self._manifest[path], self._manifest.flags(path)
152 152 except KeyError:
153 153 raise error.LookupError(self._node, path,
154 154 _('not found in manifest'))
155 155 if '_manifestdelta' in self.__dict__ or path in self.files():
156 156 if path in self._manifestdelta:
157 157 return self._manifestdelta[path], self._manifestdelta.flags(path)
158 158 node, flag = self._repo.manifest.find(self._changeset[0], path)
159 159 if not node:
160 160 raise error.LookupError(self._node, path,
161 161 _('not found in manifest'))
162 162
163 163 return node, flag
164 164
165 165 def filenode(self, path):
166 166 return self._fileinfo(path)[0]
167 167
168 168 def flags(self, path):
169 169 try:
170 170 return self._fileinfo(path)[1]
171 171 except error.LookupError:
172 172 return ''
173 173
174 174 def filectx(self, path, fileid=None, filelog=None):
175 175 """get a file context from this changeset"""
176 176 if fileid is None:
177 177 fileid = self.filenode(path)
178 178 return filectx(self._repo, path, fileid=fileid,
179 179 changectx=self, filelog=filelog)
180 180
181 181 def ancestor(self, c2):
182 182 """
183 183 return the ancestor context of self and c2
184 184 """
185 185 # deal with workingctxs
186 186 n2 = c2._node
187 187 if n2 is None:
188 188 n2 = c2._parents[0]._node
189 189 n = self._repo.changelog.ancestor(self._node, n2)
190 190 return changectx(self._repo, n)
191 191
192 192 def walk(self, match):
193 193 fset = set(match.files())
194 194 # for dirstate.walk, files=['.'] means "walk the whole tree".
195 195 # follow that here, too
196 196 fset.discard('.')
197 197 for fn in self:
198 198 for ffn in fset:
199 199 # match if the file is the exact name or a directory
200 200 if ffn == fn or fn.startswith("%s/" % ffn):
201 201 fset.remove(ffn)
202 202 break
203 203 if match(fn):
204 204 yield fn
205 205 for fn in sorted(fset):
206 206 if match.bad(fn, _('no such file in rev %s') % self) and match(fn):
207 207 yield fn
208 208
209 209 def sub(self, path):
210 210 return subrepo.subrepo(self, path)
211 211
212 212 def match(self, pats=[], include=None, exclude=None, default='glob'):
213 213 r = self._repo
214 214 return matchmod.match(r.root, r.getcwd(), pats,
215 215 include, exclude, default,
216 216 auditor=r.auditor, ctx=self)
217 217
218 218 def diff(self, ctx2=None, match=None, **opts):
219 219 """Returns a diff generator for the given contexts and matcher"""
220 220 if ctx2 is None:
221 221 ctx2 = self.p1()
222 222 if ctx2 is not None and not isinstance(ctx2, changectx):
223 223 ctx2 = self._repo[ctx2]
224 224 diffopts = patch.diffopts(self._repo.ui, opts)
225 225 return patch.diff(self._repo, ctx2.node(), self.node(),
226 226 match=match, opts=diffopts)
227 227
228 228 class filectx(object):
229 229 """A filecontext object makes access to data related to a particular
230 230 filerevision convenient."""
231 231 def __init__(self, repo, path, changeid=None, fileid=None,
232 232 filelog=None, changectx=None):
233 233 """changeid can be a changeset revision, node, or tag.
234 234 fileid can be a file revision or node."""
235 235 self._repo = repo
236 236 self._path = path
237 237
238 238 assert (changeid is not None
239 239 or fileid is not None
240 240 or changectx is not None), \
241 241 ("bad args: changeid=%r, fileid=%r, changectx=%r"
242 242 % (changeid, fileid, changectx))
243 243
244 244 if filelog:
245 245 self._filelog = filelog
246 246
247 247 if changeid is not None:
248 248 self._changeid = changeid
249 249 if changectx is not None:
250 250 self._changectx = changectx
251 251 if fileid is not None:
252 252 self._fileid = fileid
253 253
254 254 @propertycache
255 255 def _changectx(self):
256 256 return changectx(self._repo, self._changeid)
257 257
258 258 @propertycache
259 259 def _filelog(self):
260 260 return self._repo.file(self._path)
261 261
262 262 @propertycache
263 263 def _changeid(self):
264 264 if '_changectx' in self.__dict__:
265 265 return self._changectx.rev()
266 266 else:
267 267 return self._filelog.linkrev(self._filerev)
268 268
269 269 @propertycache
270 270 def _filenode(self):
271 271 if '_fileid' in self.__dict__:
272 272 return self._filelog.lookup(self._fileid)
273 273 else:
274 274 return self._changectx.filenode(self._path)
275 275
276 276 @propertycache
277 277 def _filerev(self):
278 278 return self._filelog.rev(self._filenode)
279 279
280 280 @propertycache
281 281 def _repopath(self):
282 282 return self._path
283 283
284 284 def __nonzero__(self):
285 285 try:
286 286 self._filenode
287 287 return True
288 288 except error.LookupError:
289 289 # file is missing
290 290 return False
291 291
292 292 def __str__(self):
293 293 return "%s@%s" % (self.path(), short(self.node()))
294 294
295 295 def __repr__(self):
296 296 return "<filectx %s>" % str(self)
297 297
298 298 def __hash__(self):
299 299 try:
300 300 return hash((self._path, self._filenode))
301 301 except AttributeError:
302 302 return id(self)
303 303
304 304 def __eq__(self, other):
305 305 try:
306 306 return (self._path == other._path
307 307 and self._filenode == other._filenode)
308 308 except AttributeError:
309 309 return False
310 310
311 311 def __ne__(self, other):
312 312 return not (self == other)
313 313
314 314 def filectx(self, fileid):
315 315 '''opens an arbitrary revision of the file without
316 316 opening a new filelog'''
317 317 return filectx(self._repo, self._path, fileid=fileid,
318 318 filelog=self._filelog)
319 319
320 320 def filerev(self):
321 321 return self._filerev
322 322 def filenode(self):
323 323 return self._filenode
324 324 def flags(self):
325 325 return self._changectx.flags(self._path)
326 326 def filelog(self):
327 327 return self._filelog
328 328
329 329 def rev(self):
330 330 if '_changectx' in self.__dict__:
331 331 return self._changectx.rev()
332 332 if '_changeid' in self.__dict__:
333 333 return self._changectx.rev()
334 334 return self._filelog.linkrev(self._filerev)
335 335
336 336 def linkrev(self):
337 337 return self._filelog.linkrev(self._filerev)
338 338 def node(self):
339 339 return self._changectx.node()
340 340 def hex(self):
341 341 return hex(self.node())
342 342 def user(self):
343 343 return self._changectx.user()
344 344 def date(self):
345 345 return self._changectx.date()
346 346 def files(self):
347 347 return self._changectx.files()
348 348 def description(self):
349 349 return self._changectx.description()
350 350 def branch(self):
351 351 return self._changectx.branch()
352 352 def extra(self):
353 353 return self._changectx.extra()
354 354 def manifest(self):
355 355 return self._changectx.manifest()
356 356 def changectx(self):
357 357 return self._changectx
358 358
359 359 def data(self):
360 360 return self._filelog.read(self._filenode)
361 361 def path(self):
362 362 return self._path
363 363 def size(self):
364 364 return self._filelog.size(self._filerev)
365 365
366 366 def cmp(self, fctx):
367 367 """compare with other file context
368 368
369 369 returns True if different than fctx.
370 370 """
371 371 if (fctx._filerev is None and self._repo._encodefilterpats
372 372 or self.size() == fctx.size()):
373 373 return self._filelog.cmp(self._filenode, fctx.data())
374 374
375 375 return True
376 376
377 377 def renamed(self):
378 378 """check if file was actually renamed in this changeset revision
379 379
380 380 If rename logged in file revision, we report copy for changeset only
381 381 if file revisions linkrev points back to the changeset in question
382 382 or both changeset parents contain different file revisions.
383 383 """
384 384
385 385 renamed = self._filelog.renamed(self._filenode)
386 386 if not renamed:
387 387 return renamed
388 388
389 389 if self.rev() == self.linkrev():
390 390 return renamed
391 391
392 392 name = self.path()
393 393 fnode = self._filenode
394 394 for p in self._changectx.parents():
395 395 try:
396 396 if fnode == p.filenode(name):
397 397 return None
398 398 except error.LookupError:
399 399 pass
400 400 return renamed
401 401
402 402 def parents(self):
403 403 p = self._path
404 404 fl = self._filelog
405 405 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
406 406
407 407 r = self._filelog.renamed(self._filenode)
408 408 if r:
409 409 pl[0] = (r[0], r[1], None)
410 410
411 411 return [filectx(self._repo, p, fileid=n, filelog=l)
412 412 for p, n, l in pl if n != nullid]
413 413
414 414 def p1(self):
415 415 return self.parents()[0]
416 416
417 417 def p2(self):
418 418 p = self.parents()
419 419 if len(p) == 2:
420 420 return p[1]
421 421 return filectx(self._repo, self._path, fileid=-1, filelog=self._filelog)
422 422
423 423 def children(self):
424 424 # hard for renames
425 425 c = self._filelog.children(self._filenode)
426 426 return [filectx(self._repo, self._path, fileid=x,
427 427 filelog=self._filelog) for x in c]
428 428
429 429 def annotate(self, follow=False, linenumber=None):
430 430 '''returns a list of tuples of (ctx, line) for each line
431 431 in the file, where ctx is the filectx of the node where
432 432 that line was last changed.
433 433 This returns tuples of ((ctx, linenumber), line) for each line,
434 434 if "linenumber" parameter is NOT "None".
435 435 In such tuples, linenumber means one at the first appearance
436 436 in the managed file.
437 437 To reduce annotation cost,
438 438 this returns fixed value(False is used) as linenumber,
439 439 if "linenumber" parameter is "False".'''
440 440
441 441 def decorate_compat(text, rev):
442 442 return ([rev] * len(text.splitlines()), text)
443 443
444 444 def without_linenumber(text, rev):
445 445 return ([(rev, False)] * len(text.splitlines()), text)
446 446
447 447 def with_linenumber(text, rev):
448 448 size = len(text.splitlines())
449 449 return ([(rev, i) for i in xrange(1, size + 1)], text)
450 450
451 451 decorate = (((linenumber is None) and decorate_compat) or
452 452 (linenumber and with_linenumber) or
453 453 without_linenumber)
454 454
455 455 def pair(parent, child):
456 456 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
457 457 child[0][b1:b2] = parent[0][a1:a2]
458 458 return child
459 459
460 460 getlog = util.lrucachefunc(lambda x: self._repo.file(x))
461 461 def getctx(path, fileid):
462 462 log = path == self._path and self._filelog or getlog(path)
463 463 return filectx(self._repo, path, fileid=fileid, filelog=log)
464 464 getctx = util.lrucachefunc(getctx)
465 465
466 466 def parents(f):
467 467 # we want to reuse filectx objects as much as possible
468 468 p = f._path
469 469 if f._filerev is None: # working dir
470 470 pl = [(n.path(), n.filerev()) for n in f.parents()]
471 471 else:
472 472 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
473 473
474 474 if follow:
475 475 r = f.renamed()
476 476 if r:
477 477 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
478 478
479 479 return [getctx(p, n) for p, n in pl if n != nullrev]
480 480
481 481 # use linkrev to find the first changeset where self appeared
482 482 if self.rev() != self.linkrev():
483 483 base = self.filectx(self.filerev())
484 484 else:
485 485 base = self
486 486
487 487 # This algorithm would prefer to be recursive, but Python is a
488 488 # bit recursion-hostile. Instead we do an iterative
489 489 # depth-first search.
490 490
491 491 visit = [base]
492 492 hist = {}
493 493 pcache = {}
494 494 needed = {base: 1}
495 495 while visit:
496 496 f = visit[-1]
497 497 if f not in pcache:
498 498 pcache[f] = parents(f)
499 499
500 500 ready = True
501 501 pl = pcache[f]
502 502 for p in pl:
503 503 if p not in hist:
504 504 ready = False
505 505 visit.append(p)
506 506 needed[p] = needed.get(p, 0) + 1
507 507 if ready:
508 508 visit.pop()
509 509 curr = decorate(f.data(), f)
510 510 for p in pl:
511 511 curr = pair(hist[p], curr)
512 512 if needed[p] == 1:
513 513 del hist[p]
514 514 else:
515 515 needed[p] -= 1
516 516
517 517 hist[f] = curr
518 518 pcache[f] = []
519 519
520 520 return zip(hist[base][0], hist[base][1].splitlines(True))
521 521
522 522 def ancestor(self, fc2, actx=None):
523 523 """
524 524 find the common ancestor file context, if any, of self, and fc2
525 525
526 526 If actx is given, it must be the changectx of the common ancestor
527 527 of self's and fc2's respective changesets.
528 528 """
529 529
530 530 if actx is None:
531 531 actx = self.changectx().ancestor(fc2.changectx())
532 532
533 533 # the trivial case: changesets are unrelated, files must be too
534 534 if not actx:
535 535 return None
536 536
537 537 # the easy case: no (relevant) renames
538 538 if fc2.path() == self.path() and self.path() in actx:
539 539 return actx[self.path()]
540 540 acache = {}
541 541
542 542 # prime the ancestor cache for the working directory
543 543 for c in (self, fc2):
544 544 if c._filerev is None:
545 545 pl = [(n.path(), n.filenode()) for n in c.parents()]
546 546 acache[(c._path, None)] = pl
547 547
548 548 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
549 549 def parents(vertex):
550 550 if vertex in acache:
551 551 return acache[vertex]
552 552 f, n = vertex
553 553 if f not in flcache:
554 554 flcache[f] = self._repo.file(f)
555 555 fl = flcache[f]
556 556 pl = [(f, p) for p in fl.parents(n) if p != nullid]
557 557 re = fl.renamed(n)
558 558 if re:
559 559 pl.append(re)
560 560 acache[vertex] = pl
561 561 return pl
562 562
563 563 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
564 564 v = ancestor.ancestor(a, b, parents)
565 565 if v:
566 566 f, n = v
567 567 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
568 568
569 569 return None
570 570
571 571 def ancestors(self):
572 572 visit = {}
573 573 c = self
574 574 while True:
575 575 for parent in c.parents():
576 576 visit[(parent.rev(), parent.node())] = parent
577 577 if not visit:
578 578 break
579 579 c = visit.pop(max(visit))
580 580 yield c
581 581
582 582 class workingctx(changectx):
583 583 """A workingctx object makes access to data related to
584 584 the current working directory convenient.
585 585 date - any valid date string or (unixtime, offset), or None.
586 586 user - username string, or None.
587 587 extra - a dictionary of extra values, or None.
588 588 changes - a list of file lists as returned by localrepo.status()
589 589 or None to use the repository status.
590 590 """
591 591 def __init__(self, repo, text="", user=None, date=None, extra=None,
592 592 changes=None):
593 593 self._repo = repo
594 594 self._rev = None
595 595 self._node = None
596 596 self._text = text
597 597 if date:
598 598 self._date = util.parsedate(date)
599 599 if user:
600 600 self._user = user
601 601 if changes:
602 602 self._status = list(changes[:4])
603 603 self._unknown = changes[4]
604 604 self._ignored = changes[5]
605 605 self._clean = changes[6]
606 606 else:
607 607 self._unknown = None
608 608 self._ignored = None
609 609 self._clean = None
610 610
611 611 self._extra = {}
612 612 if extra:
613 613 self._extra = extra.copy()
614 614 if 'branch' not in self._extra:
615 615 try:
616 616 branch = encoding.fromlocal(self._repo.dirstate.branch())
617 617 except UnicodeDecodeError:
618 618 raise util.Abort(_('branch name not in UTF-8!'))
619 619 self._extra['branch'] = branch
620 620 if self._extra['branch'] == '':
621 621 self._extra['branch'] = 'default'
622 622
623 623 def __str__(self):
624 624 return str(self._parents[0]) + "+"
625 625
626 626 def __repr__(self):
627 627 return "<workingctx %s>" % str(self)
628 628
629 629 def __nonzero__(self):
630 630 return True
631 631
632 632 def __contains__(self, key):
633 633 return self._repo.dirstate[key] not in "?r"
634 634
635 def _buildflagfunc(self):
636 # Create a fallback function for getting file flags when the
637 # filesystem doesn't support them
638
639 copiesget = self._repo.dirstate.copies().get
640
641 if len(self._parents) < 2:
642 # when we have one parent, it's easy: copy from parent
643 man = self._parents[0].manifest()
644 def func(f):
645 f = copiesget(f, f)
646 return man.flags(f)
647 else:
648 # merges are tricky: we try to reconstruct the unstored
649 # result from the merge (issue1802)
650 p1, p2 = self._parents
651 pa = p1.ancestor(p2)
652 m1, m2, ma = p1.manifest(), p2.manifest(), pa.manifest()
653
654 def func(f):
655 f = copiesget(f, f) # may be wrong for merges with copies
656 fl1, fl2, fla = m1.flags(f), m2.flags(f), ma.flags(f)
657 if fl1 == fl2:
658 return fl1
659 if fl1 == fla:
660 return fl2
661 if fl2 == fla:
662 return fl1
663 return '' # punt for conflicts
664
665 return func
666
667 @propertycache
668 def _flagfunc(self):
669 return self._repo.dirstate.flagfunc(self._buildflagfunc)
670
635 671 @propertycache
636 672 def _manifest(self):
637 673 """generate a manifest corresponding to the working directory"""
638 674
639 675 if self._unknown is None:
640 676 self.status(unknown=True)
641 677
642 678 man = self._parents[0].manifest().copy()
643 copied = self._repo.dirstate.copies()
644 679 if len(self._parents) > 1:
645 680 man2 = self.p2().manifest()
646 681 def getman(f):
647 682 if f in man:
648 683 return man
649 684 return man2
650 685 else:
651 686 getman = lambda f: man
652 def cf(f):
653 f = copied.get(f, f)
654 return getman(f).flags(f)
655 ff = self._repo.dirstate.flagfunc(cf)
687
688 copied = self._repo.dirstate.copies()
689 ff = self._flagfunc
656 690 modified, added, removed, deleted = self._status
657 691 unknown = self._unknown
658 692 for i, l in (("a", added), ("m", modified), ("u", unknown)):
659 693 for f in l:
660 694 orig = copied.get(f, f)
661 695 man[f] = getman(orig).get(orig, nullid) + i
662 696 try:
663 697 man.set(f, ff(f))
664 698 except OSError:
665 699 pass
666 700
667 701 for f in deleted + removed:
668 702 if f in man:
669 703 del man[f]
670 704
671 705 return man
672 706
673 707 def __iter__(self):
674 708 d = self._repo.dirstate
675 709 for f in d:
676 710 if d[f] != 'r':
677 711 yield f
678 712
679 713 @propertycache
680 714 def _status(self):
681 715 return self._repo.status()[:4]
682 716
683 717 @propertycache
684 718 def _user(self):
685 719 return self._repo.ui.username()
686 720
687 721 @propertycache
688 722 def _date(self):
689 723 return util.makedate()
690 724
691 725 @propertycache
692 726 def _parents(self):
693 727 p = self._repo.dirstate.parents()
694 728 if p[1] == nullid:
695 729 p = p[:-1]
696 730 self._parents = [changectx(self._repo, x) for x in p]
697 731 return self._parents
698 732
699 733 def status(self, ignored=False, clean=False, unknown=False):
700 734 """Explicit status query
701 735 Unless this method is used to query the working copy status, the
702 736 _status property will implicitly read the status using its default
703 737 arguments."""
704 738 stat = self._repo.status(ignored=ignored, clean=clean, unknown=unknown)
705 739 self._unknown = self._ignored = self._clean = None
706 740 if unknown:
707 741 self._unknown = stat[4]
708 742 if ignored:
709 743 self._ignored = stat[5]
710 744 if clean:
711 745 self._clean = stat[6]
712 746 self._status = stat[:4]
713 747 return stat
714 748
715 749 def manifest(self):
716 750 return self._manifest
717 751 def user(self):
718 752 return self._user or self._repo.ui.username()
719 753 def date(self):
720 754 return self._date
721 755 def description(self):
722 756 return self._text
723 757 def files(self):
724 758 return sorted(self._status[0] + self._status[1] + self._status[2])
725 759
726 760 def modified(self):
727 761 return self._status[0]
728 762 def added(self):
729 763 return self._status[1]
730 764 def removed(self):
731 765 return self._status[2]
732 766 def deleted(self):
733 767 return self._status[3]
734 768 def unknown(self):
735 769 assert self._unknown is not None # must call status first
736 770 return self._unknown
737 771 def ignored(self):
738 772 assert self._ignored is not None # must call status first
739 773 return self._ignored
740 774 def clean(self):
741 775 assert self._clean is not None # must call status first
742 776 return self._clean
743 777 def branch(self):
744 778 return encoding.tolocal(self._extra['branch'])
745 779 def extra(self):
746 780 return self._extra
747 781
748 782 def tags(self):
749 783 t = []
750 784 for p in self.parents():
751 785 t.extend(p.tags())
752 786 return t
753 787
754 788 def bookmarks(self):
755 789 b = []
756 790 for p in self.parents():
757 791 b.extend(p.bookmarks())
758 792 return b
759 793
760 794 def children(self):
761 795 return []
762 796
763 797 def flags(self, path):
764 798 if '_manifest' in self.__dict__:
765 799 try:
766 800 return self._manifest.flags(path)
767 801 except KeyError:
768 802 return ''
769 803
770 orig = self._repo.dirstate.copies().get(path, path)
771
772 def findflag(ctx):
773 mnode = ctx.changeset()[0]
774 node, flag = self._repo.manifest.find(mnode, orig)
775 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
776 try:
777 return ff(path)
778 except OSError:
779 pass
780
781 flag = findflag(self._parents[0])
782 if flag is None and len(self.parents()) > 1:
783 flag = findflag(self._parents[1])
784 if flag is None or self._repo.dirstate[path] == 'r':
804 try:
805 return self._flagfunc(path)
806 except OSError:
785 807 return ''
786 return flag
787 808
788 809 def filectx(self, path, filelog=None):
789 810 """get a file context from the working directory"""
790 811 return workingfilectx(self._repo, path, workingctx=self,
791 812 filelog=filelog)
792 813
793 814 def ancestor(self, c2):
794 815 """return the ancestor context of self and c2"""
795 816 return self._parents[0].ancestor(c2) # punt on two parents for now
796 817
797 818 def walk(self, match):
798 819 return sorted(self._repo.dirstate.walk(match, self.substate.keys(),
799 820 True, False))
800 821
801 822 def dirty(self, missing=False):
802 823 "check whether a working directory is modified"
803 824 # check subrepos first
804 825 for s in self.substate:
805 826 if self.sub(s).dirty():
806 827 return True
807 828 # check current working dir
808 829 return (self.p2() or self.branch() != self.p1().branch() or
809 830 self.modified() or self.added() or self.removed() or
810 831 (missing and self.deleted()))
811 832
812 833 def add(self, list, prefix=""):
813 834 join = lambda f: os.path.join(prefix, f)
814 835 wlock = self._repo.wlock()
815 836 ui, ds = self._repo.ui, self._repo.dirstate
816 837 try:
817 838 rejected = []
818 839 for f in list:
819 840 scmutil.checkportable(ui, join(f))
820 841 p = self._repo.wjoin(f)
821 842 try:
822 843 st = os.lstat(p)
823 844 except OSError:
824 845 ui.warn(_("%s does not exist!\n") % join(f))
825 846 rejected.append(f)
826 847 continue
827 848 if st.st_size > 10000000:
828 849 ui.warn(_("%s: up to %d MB of RAM may be required "
829 850 "to manage this file\n"
830 851 "(use 'hg revert %s' to cancel the "
831 852 "pending addition)\n")
832 853 % (f, 3 * st.st_size // 1000000, join(f)))
833 854 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
834 855 ui.warn(_("%s not added: only files and symlinks "
835 856 "supported currently\n") % join(f))
836 857 rejected.append(p)
837 858 elif ds[f] in 'amn':
838 859 ui.warn(_("%s already tracked!\n") % join(f))
839 860 elif ds[f] == 'r':
840 861 ds.normallookup(f)
841 862 else:
842 863 ds.add(f)
843 864 return rejected
844 865 finally:
845 866 wlock.release()
846 867
847 868 def forget(self, files):
848 869 wlock = self._repo.wlock()
849 870 try:
850 871 for f in files:
851 872 if self._repo.dirstate[f] != 'a':
852 873 self._repo.dirstate.remove(f)
853 874 elif f not in self._repo.dirstate:
854 875 self._repo.ui.warn(_("%s not tracked!\n") % f)
855 876 else:
856 877 self._repo.dirstate.drop(f)
857 878 finally:
858 879 wlock.release()
859 880
860 881 def ancestors(self):
861 882 for a in self._repo.changelog.ancestors(
862 883 *[p.rev() for p in self._parents]):
863 884 yield changectx(self._repo, a)
864 885
865 886 def undelete(self, list):
866 887 pctxs = self.parents()
867 888 wlock = self._repo.wlock()
868 889 try:
869 890 for f in list:
870 891 if self._repo.dirstate[f] != 'r':
871 892 self._repo.ui.warn(_("%s not removed!\n") % f)
872 893 else:
873 894 fctx = f in pctxs[0] and pctxs[0][f] or pctxs[1][f]
874 895 t = fctx.data()
875 896 self._repo.wwrite(f, t, fctx.flags())
876 897 self._repo.dirstate.normal(f)
877 898 finally:
878 899 wlock.release()
879 900
880 901 def copy(self, source, dest):
881 902 p = self._repo.wjoin(dest)
882 903 if not os.path.lexists(p):
883 904 self._repo.ui.warn(_("%s does not exist!\n") % dest)
884 905 elif not (os.path.isfile(p) or os.path.islink(p)):
885 906 self._repo.ui.warn(_("copy failed: %s is not a file or a "
886 907 "symbolic link\n") % dest)
887 908 else:
888 909 wlock = self._repo.wlock()
889 910 try:
890 911 if self._repo.dirstate[dest] in '?r':
891 912 self._repo.dirstate.add(dest)
892 913 self._repo.dirstate.copy(source, dest)
893 914 finally:
894 915 wlock.release()
895 916
896 917 class workingfilectx(filectx):
897 918 """A workingfilectx object makes access to data related to a particular
898 919 file in the working directory convenient."""
899 920 def __init__(self, repo, path, filelog=None, workingctx=None):
900 921 """changeid can be a changeset revision, node, or tag.
901 922 fileid can be a file revision or node."""
902 923 self._repo = repo
903 924 self._path = path
904 925 self._changeid = None
905 926 self._filerev = self._filenode = None
906 927
907 928 if filelog:
908 929 self._filelog = filelog
909 930 if workingctx:
910 931 self._changectx = workingctx
911 932
912 933 @propertycache
913 934 def _changectx(self):
914 935 return workingctx(self._repo)
915 936
916 937 def __nonzero__(self):
917 938 return True
918 939
919 940 def __str__(self):
920 941 return "%s@%s" % (self.path(), self._changectx)
921 942
922 943 def __repr__(self):
923 944 return "<workingfilectx %s>" % str(self)
924 945
925 946 def data(self):
926 947 return self._repo.wread(self._path)
927 948 def renamed(self):
928 949 rp = self._repo.dirstate.copied(self._path)
929 950 if not rp:
930 951 return None
931 952 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
932 953
933 954 def parents(self):
934 955 '''return parent filectxs, following copies if necessary'''
935 956 def filenode(ctx, path):
936 957 return ctx._manifest.get(path, nullid)
937 958
938 959 path = self._path
939 960 fl = self._filelog
940 961 pcl = self._changectx._parents
941 962 renamed = self.renamed()
942 963
943 964 if renamed:
944 965 pl = [renamed + (None,)]
945 966 else:
946 967 pl = [(path, filenode(pcl[0], path), fl)]
947 968
948 969 for pc in pcl[1:]:
949 970 pl.append((path, filenode(pc, path), fl))
950 971
951 972 return [filectx(self._repo, p, fileid=n, filelog=l)
952 973 for p, n, l in pl if n != nullid]
953 974
954 975 def children(self):
955 976 return []
956 977
957 978 def size(self):
958 979 return os.lstat(self._repo.wjoin(self._path)).st_size
959 980 def date(self):
960 981 t, tz = self._changectx.date()
961 982 try:
962 983 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
963 984 except OSError, err:
964 985 if err.errno != errno.ENOENT:
965 986 raise
966 987 return (t, tz)
967 988
968 989 def cmp(self, fctx):
969 990 """compare with other file context
970 991
971 992 returns True if different than fctx.
972 993 """
973 994 # fctx should be a filectx (not a wfctx)
974 995 # invert comparison to reuse the same code path
975 996 return fctx.cmp(self)
976 997
977 998 class memctx(object):
978 999 """Use memctx to perform in-memory commits via localrepo.commitctx().
979 1000
980 1001 Revision information is supplied at initialization time while
981 1002 related files data and is made available through a callback
982 1003 mechanism. 'repo' is the current localrepo, 'parents' is a
983 1004 sequence of two parent revisions identifiers (pass None for every
984 1005 missing parent), 'text' is the commit message and 'files' lists
985 1006 names of files touched by the revision (normalized and relative to
986 1007 repository root).
987 1008
988 1009 filectxfn(repo, memctx, path) is a callable receiving the
989 1010 repository, the current memctx object and the normalized path of
990 1011 requested file, relative to repository root. It is fired by the
991 1012 commit function for every file in 'files', but calls order is
992 1013 undefined. If the file is available in the revision being
993 1014 committed (updated or added), filectxfn returns a memfilectx
994 1015 object. If the file was removed, filectxfn raises an
995 1016 IOError. Moved files are represented by marking the source file
996 1017 removed and the new file added with copy information (see
997 1018 memfilectx).
998 1019
999 1020 user receives the committer name and defaults to current
1000 1021 repository username, date is the commit date in any format
1001 1022 supported by util.parsedate() and defaults to current date, extra
1002 1023 is a dictionary of metadata or is left empty.
1003 1024 """
1004 1025 def __init__(self, repo, parents, text, files, filectxfn, user=None,
1005 1026 date=None, extra=None):
1006 1027 self._repo = repo
1007 1028 self._rev = None
1008 1029 self._node = None
1009 1030 self._text = text
1010 1031 self._date = date and util.parsedate(date) or util.makedate()
1011 1032 self._user = user
1012 1033 parents = [(p or nullid) for p in parents]
1013 1034 p1, p2 = parents
1014 1035 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
1015 1036 files = sorted(set(files))
1016 1037 self._status = [files, [], [], [], []]
1017 1038 self._filectxfn = filectxfn
1018 1039
1019 1040 self._extra = extra and extra.copy() or {}
1020 1041 if self._extra.get('branch', '') == '':
1021 1042 self._extra['branch'] = 'default'
1022 1043
1023 1044 def __str__(self):
1024 1045 return str(self._parents[0]) + "+"
1025 1046
1026 1047 def __int__(self):
1027 1048 return self._rev
1028 1049
1029 1050 def __nonzero__(self):
1030 1051 return True
1031 1052
1032 1053 def __getitem__(self, key):
1033 1054 return self.filectx(key)
1034 1055
1035 1056 def p1(self):
1036 1057 return self._parents[0]
1037 1058 def p2(self):
1038 1059 return self._parents[1]
1039 1060
1040 1061 def user(self):
1041 1062 return self._user or self._repo.ui.username()
1042 1063 def date(self):
1043 1064 return self._date
1044 1065 def description(self):
1045 1066 return self._text
1046 1067 def files(self):
1047 1068 return self.modified()
1048 1069 def modified(self):
1049 1070 return self._status[0]
1050 1071 def added(self):
1051 1072 return self._status[1]
1052 1073 def removed(self):
1053 1074 return self._status[2]
1054 1075 def deleted(self):
1055 1076 return self._status[3]
1056 1077 def unknown(self):
1057 1078 return self._status[4]
1058 1079 def ignored(self):
1059 1080 return self._status[5]
1060 1081 def clean(self):
1061 1082 return self._status[6]
1062 1083 def branch(self):
1063 1084 return encoding.tolocal(self._extra['branch'])
1064 1085 def extra(self):
1065 1086 return self._extra
1066 1087 def flags(self, f):
1067 1088 return self[f].flags()
1068 1089
1069 1090 def parents(self):
1070 1091 """return contexts for each parent changeset"""
1071 1092 return self._parents
1072 1093
1073 1094 def filectx(self, path, filelog=None):
1074 1095 """get a file context from the working directory"""
1075 1096 return self._filectxfn(self._repo, self, path)
1076 1097
1077 1098 def commit(self):
1078 1099 """commit context to the repo"""
1079 1100 return self._repo.commitctx(self)
1080 1101
1081 1102 class memfilectx(object):
1082 1103 """memfilectx represents an in-memory file to commit.
1083 1104
1084 1105 See memctx for more details.
1085 1106 """
1086 1107 def __init__(self, path, data, islink=False, isexec=False, copied=None):
1087 1108 """
1088 1109 path is the normalized file path relative to repository root.
1089 1110 data is the file content as a string.
1090 1111 islink is True if the file is a symbolic link.
1091 1112 isexec is True if the file is executable.
1092 1113 copied is the source file path if current file was copied in the
1093 1114 revision being committed, or None."""
1094 1115 self._path = path
1095 1116 self._data = data
1096 1117 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
1097 1118 self._copied = None
1098 1119 if copied:
1099 1120 self._copied = (copied, nullid)
1100 1121
1101 1122 def __nonzero__(self):
1102 1123 return True
1103 1124 def __str__(self):
1104 1125 return "%s@%s" % (self.path(), self._changectx)
1105 1126 def path(self):
1106 1127 return self._path
1107 1128 def data(self):
1108 1129 return self._data
1109 1130 def flags(self):
1110 1131 return self._flags
1111 1132 def isexec(self):
1112 1133 return 'x' in self._flags
1113 1134 def islink(self):
1114 1135 return 'l' in self._flags
1115 1136 def renamed(self):
1116 1137 return self._copied
@@ -1,721 +1,724 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid
9 9 from i18n import _
10 10 import scmutil, util, ignore, osutil, parsers, encoding
11 11 import struct, os, stat, errno
12 12 import cStringIO
13 13
14 14 _format = ">cllll"
15 15 propertycache = util.propertycache
16 16
17 17 def _finddirs(path):
18 18 pos = path.rfind('/')
19 19 while pos != -1:
20 20 yield path[:pos]
21 21 pos = path.rfind('/', 0, pos)
22 22
23 23 def _incdirs(dirs, path):
24 24 for base in _finddirs(path):
25 25 if base in dirs:
26 26 dirs[base] += 1
27 27 return
28 28 dirs[base] = 1
29 29
30 30 def _decdirs(dirs, path):
31 31 for base in _finddirs(path):
32 32 if dirs[base] > 1:
33 33 dirs[base] -= 1
34 34 return
35 35 del dirs[base]
36 36
37 37 class dirstate(object):
38 38
39 39 def __init__(self, opener, ui, root, validate):
40 40 '''Create a new dirstate object.
41 41
42 42 opener is an open()-like callable that can be used to open the
43 43 dirstate file; root is the root of the directory tracked by
44 44 the dirstate.
45 45 '''
46 46 self._opener = opener
47 47 self._validate = validate
48 48 self._root = root
49 49 self._rootdir = os.path.join(root, '')
50 50 self._dirty = False
51 51 self._dirtypl = False
52 52 self._lastnormaltime = None
53 53 self._ui = ui
54 54
55 55 @propertycache
56 56 def _map(self):
57 57 '''Return the dirstate contents as a map from filename to
58 58 (state, mode, size, time).'''
59 59 self._read()
60 60 return self._map
61 61
62 62 @propertycache
63 63 def _copymap(self):
64 64 self._read()
65 65 return self._copymap
66 66
67 67 @propertycache
68 68 def _foldmap(self):
69 69 f = {}
70 70 for name in self._map:
71 71 f[os.path.normcase(name)] = name
72 72 return f
73 73
74 74 @propertycache
75 75 def _branch(self):
76 76 try:
77 77 return self._opener.read("branch").strip() or "default"
78 78 except IOError:
79 79 return "default"
80 80
81 81 @propertycache
82 82 def _pl(self):
83 83 try:
84 84 fp = self._opener("dirstate")
85 85 st = fp.read(40)
86 86 fp.close()
87 87 l = len(st)
88 88 if l == 40:
89 89 return st[:20], st[20:40]
90 90 elif l > 0 and l < 40:
91 91 raise util.Abort(_('working directory state appears damaged!'))
92 92 except IOError, err:
93 93 if err.errno != errno.ENOENT:
94 94 raise
95 95 return [nullid, nullid]
96 96
97 97 @propertycache
98 98 def _dirs(self):
99 99 dirs = {}
100 100 for f, s in self._map.iteritems():
101 101 if s[0] != 'r':
102 102 _incdirs(dirs, f)
103 103 return dirs
104 104
105 105 @propertycache
106 106 def _ignore(self):
107 107 files = [self._join('.hgignore')]
108 108 for name, path in self._ui.configitems("ui"):
109 109 if name == 'ignore' or name.startswith('ignore.'):
110 110 files.append(util.expandpath(path))
111 111 return ignore.ignore(self._root, files, self._ui.warn)
112 112
113 113 @propertycache
114 114 def _slash(self):
115 115 return self._ui.configbool('ui', 'slash') and os.sep != '/'
116 116
117 117 @propertycache
118 118 def _checklink(self):
119 119 return util.checklink(self._root)
120 120
121 121 @propertycache
122 122 def _checkexec(self):
123 123 return util.checkexec(self._root)
124 124
125 125 @propertycache
126 126 def _checkcase(self):
127 127 return not util.checkcase(self._join('.hg'))
128 128
129 129 def _join(self, f):
130 130 # much faster than os.path.join()
131 131 # it's safe because f is always a relative path
132 132 return self._rootdir + f
133 133
134 def flagfunc(self, fallback):
134 def flagfunc(self, buildfallback):
135 if self._checklink and self._checkexec:
136 def f(x):
137 p = self._join(x)
138 if os.path.islink(p):
139 return 'l'
140 if util.isexec(p):
141 return 'x'
142 return ''
143 return f
144
145 fallback = buildfallback()
135 146 if self._checklink:
136 if self._checkexec:
137 def f(x):
138 p = self._join(x)
139 if os.path.islink(p):
140 return 'l'
141 if util.isexec(p):
142 return 'x'
143 return ''
144 return f
145 147 def f(x):
146 148 if os.path.islink(self._join(x)):
147 149 return 'l'
148 150 if 'x' in fallback(x):
149 151 return 'x'
150 152 return ''
151 153 return f
152 154 if self._checkexec:
153 155 def f(x):
154 156 if 'l' in fallback(x):
155 157 return 'l'
156 158 if util.isexec(self._join(x)):
157 159 return 'x'
158 160 return ''
159 161 return f
160 return fallback
162 else:
163 return fallback
161 164
162 165 def getcwd(self):
163 166 cwd = os.getcwd()
164 167 if cwd == self._root:
165 168 return ''
166 169 # self._root ends with a path separator if self._root is '/' or 'C:\'
167 170 rootsep = self._root
168 171 if not util.endswithsep(rootsep):
169 172 rootsep += os.sep
170 173 if cwd.startswith(rootsep):
171 174 return cwd[len(rootsep):]
172 175 else:
173 176 # we're outside the repo. return an absolute path.
174 177 return cwd
175 178
176 179 def pathto(self, f, cwd=None):
177 180 if cwd is None:
178 181 cwd = self.getcwd()
179 182 path = util.pathto(self._root, cwd, f)
180 183 if self._slash:
181 184 return util.normpath(path)
182 185 return path
183 186
184 187 def __getitem__(self, key):
185 188 '''Return the current state of key (a filename) in the dirstate.
186 189
187 190 States are:
188 191 n normal
189 192 m needs merging
190 193 r marked for removal
191 194 a marked for addition
192 195 ? not tracked
193 196 '''
194 197 return self._map.get(key, ("?",))[0]
195 198
196 199 def __contains__(self, key):
197 200 return key in self._map
198 201
199 202 def __iter__(self):
200 203 for x in sorted(self._map):
201 204 yield x
202 205
203 206 def parents(self):
204 207 return [self._validate(p) for p in self._pl]
205 208
206 209 def p1(self):
207 210 return self._validate(self._pl[0])
208 211
209 212 def p2(self):
210 213 return self._validate(self._pl[1])
211 214
212 215 def branch(self):
213 216 return encoding.tolocal(self._branch)
214 217
215 218 def setparents(self, p1, p2=nullid):
216 219 self._dirty = self._dirtypl = True
217 220 self._pl = p1, p2
218 221
219 222 def setbranch(self, branch):
220 223 if branch in ['tip', '.', 'null']:
221 224 raise util.Abort(_('the name \'%s\' is reserved') % branch)
222 225 self._branch = encoding.fromlocal(branch)
223 226 self._opener.write("branch", self._branch + '\n')
224 227
225 228 def _read(self):
226 229 self._map = {}
227 230 self._copymap = {}
228 231 try:
229 232 st = self._opener.read("dirstate")
230 233 except IOError, err:
231 234 if err.errno != errno.ENOENT:
232 235 raise
233 236 return
234 237 if not st:
235 238 return
236 239
237 240 p = parsers.parse_dirstate(self._map, self._copymap, st)
238 241 if not self._dirtypl:
239 242 self._pl = p
240 243
241 244 def invalidate(self):
242 245 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
243 246 "_ignore"):
244 247 if a in self.__dict__:
245 248 delattr(self, a)
246 249 self._lastnormaltime = None
247 250 self._dirty = False
248 251
249 252 def copy(self, source, dest):
250 253 """Mark dest as a copy of source. Unmark dest if source is None."""
251 254 if source == dest:
252 255 return
253 256 self._dirty = True
254 257 if source is not None:
255 258 self._copymap[dest] = source
256 259 elif dest in self._copymap:
257 260 del self._copymap[dest]
258 261
259 262 def copied(self, file):
260 263 return self._copymap.get(file, None)
261 264
262 265 def copies(self):
263 266 return self._copymap
264 267
265 268 def _droppath(self, f):
266 269 if self[f] not in "?r" and "_dirs" in self.__dict__:
267 270 _decdirs(self._dirs, f)
268 271
269 272 def _addpath(self, f, check=False):
270 273 oldstate = self[f]
271 274 if check or oldstate == "r":
272 275 scmutil.checkfilename(f)
273 276 if f in self._dirs:
274 277 raise util.Abort(_('directory %r already in dirstate') % f)
275 278 # shadows
276 279 for d in _finddirs(f):
277 280 if d in self._dirs:
278 281 break
279 282 if d in self._map and self[d] != 'r':
280 283 raise util.Abort(
281 284 _('file %r in dirstate clashes with %r') % (d, f))
282 285 if oldstate in "?r" and "_dirs" in self.__dict__:
283 286 _incdirs(self._dirs, f)
284 287
285 288 def normal(self, f):
286 289 '''Mark a file normal and clean.'''
287 290 self._dirty = True
288 291 self._addpath(f)
289 292 s = os.lstat(self._join(f))
290 293 mtime = int(s.st_mtime)
291 294 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
292 295 if f in self._copymap:
293 296 del self._copymap[f]
294 297 if mtime > self._lastnormaltime:
295 298 # Remember the most recent modification timeslot for status(),
296 299 # to make sure we won't miss future size-preserving file content
297 300 # modifications that happen within the same timeslot.
298 301 self._lastnormaltime = mtime
299 302
300 303 def normallookup(self, f):
301 304 '''Mark a file normal, but possibly dirty.'''
302 305 if self._pl[1] != nullid and f in self._map:
303 306 # if there is a merge going on and the file was either
304 307 # in state 'm' (-1) or coming from other parent (-2) before
305 308 # being removed, restore that state.
306 309 entry = self._map[f]
307 310 if entry[0] == 'r' and entry[2] in (-1, -2):
308 311 source = self._copymap.get(f)
309 312 if entry[2] == -1:
310 313 self.merge(f)
311 314 elif entry[2] == -2:
312 315 self.otherparent(f)
313 316 if source:
314 317 self.copy(source, f)
315 318 return
316 319 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
317 320 return
318 321 self._dirty = True
319 322 self._addpath(f)
320 323 self._map[f] = ('n', 0, -1, -1)
321 324 if f in self._copymap:
322 325 del self._copymap[f]
323 326
324 327 def otherparent(self, f):
325 328 '''Mark as coming from the other parent, always dirty.'''
326 329 if self._pl[1] == nullid:
327 330 raise util.Abort(_("setting %r to other parent "
328 331 "only allowed in merges") % f)
329 332 self._dirty = True
330 333 self._addpath(f)
331 334 self._map[f] = ('n', 0, -2, -1)
332 335 if f in self._copymap:
333 336 del self._copymap[f]
334 337
335 338 def add(self, f):
336 339 '''Mark a file added.'''
337 340 self._dirty = True
338 341 self._addpath(f, True)
339 342 self._map[f] = ('a', 0, -1, -1)
340 343 if f in self._copymap:
341 344 del self._copymap[f]
342 345
343 346 def remove(self, f):
344 347 '''Mark a file removed.'''
345 348 self._dirty = True
346 349 self._droppath(f)
347 350 size = 0
348 351 if self._pl[1] != nullid and f in self._map:
349 352 # backup the previous state
350 353 entry = self._map[f]
351 354 if entry[0] == 'm': # merge
352 355 size = -1
353 356 elif entry[0] == 'n' and entry[2] == -2: # other parent
354 357 size = -2
355 358 self._map[f] = ('r', 0, size, 0)
356 359 if size == 0 and f in self._copymap:
357 360 del self._copymap[f]
358 361
359 362 def merge(self, f):
360 363 '''Mark a file merged.'''
361 364 self._dirty = True
362 365 s = os.lstat(self._join(f))
363 366 self._addpath(f)
364 367 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
365 368 if f in self._copymap:
366 369 del self._copymap[f]
367 370
368 371 def drop(self, f):
369 372 '''Drop a file from the dirstate'''
370 373 self._dirty = True
371 374 self._droppath(f)
372 375 del self._map[f]
373 376
374 377 def _normalize(self, path, isknown):
375 378 normed = os.path.normcase(path)
376 379 folded = self._foldmap.get(normed, None)
377 380 if folded is None:
378 381 if isknown or not os.path.lexists(os.path.join(self._root, path)):
379 382 folded = path
380 383 else:
381 384 folded = self._foldmap.setdefault(normed,
382 385 util.fspath(path, self._root))
383 386 return folded
384 387
385 388 def normalize(self, path, isknown=False):
386 389 '''
387 390 normalize the case of a pathname when on a casefolding filesystem
388 391
389 392 isknown specifies whether the filename came from walking the
390 393 disk, to avoid extra filesystem access
391 394
392 395 The normalized case is determined based on the following precedence:
393 396
394 397 - version of name already stored in the dirstate
395 398 - version of name stored on disk
396 399 - version provided via command arguments
397 400 '''
398 401
399 402 if self._checkcase:
400 403 return self._normalize(path, isknown)
401 404 return path
402 405
403 406 def clear(self):
404 407 self._map = {}
405 408 if "_dirs" in self.__dict__:
406 409 delattr(self, "_dirs")
407 410 self._copymap = {}
408 411 self._pl = [nullid, nullid]
409 412 self._lastnormaltime = None
410 413 self._dirty = True
411 414
412 415 def rebuild(self, parent, files):
413 416 self.clear()
414 417 for f in files:
415 418 if 'x' in files.flags(f):
416 419 self._map[f] = ('n', 0777, -1, 0)
417 420 else:
418 421 self._map[f] = ('n', 0666, -1, 0)
419 422 self._pl = (parent, nullid)
420 423 self._dirty = True
421 424
422 425 def write(self):
423 426 if not self._dirty:
424 427 return
425 428 st = self._opener("dirstate", "w", atomictemp=True)
426 429
427 430 # use the modification time of the newly created temporary file as the
428 431 # filesystem's notion of 'now'
429 432 now = int(util.fstat(st).st_mtime)
430 433
431 434 cs = cStringIO.StringIO()
432 435 copymap = self._copymap
433 436 pack = struct.pack
434 437 write = cs.write
435 438 write("".join(self._pl))
436 439 for f, e in self._map.iteritems():
437 440 if e[0] == 'n' and e[3] == now:
438 441 # The file was last modified "simultaneously" with the current
439 442 # write to dirstate (i.e. within the same second for file-
440 443 # systems with a granularity of 1 sec). This commonly happens
441 444 # for at least a couple of files on 'update'.
442 445 # The user could change the file without changing its size
443 446 # within the same second. Invalidate the file's stat data in
444 447 # dirstate, forcing future 'status' calls to compare the
445 448 # contents of the file. This prevents mistakenly treating such
446 449 # files as clean.
447 450 e = (e[0], 0, -1, -1) # mark entry as 'unset'
448 451 self._map[f] = e
449 452
450 453 if f in copymap:
451 454 f = "%s\0%s" % (f, copymap[f])
452 455 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
453 456 write(e)
454 457 write(f)
455 458 st.write(cs.getvalue())
456 459 st.close()
457 460 self._lastnormaltime = None
458 461 self._dirty = self._dirtypl = False
459 462
460 463 def _dirignore(self, f):
461 464 if f == '.':
462 465 return False
463 466 if self._ignore(f):
464 467 return True
465 468 for p in _finddirs(f):
466 469 if self._ignore(p):
467 470 return True
468 471 return False
469 472
470 473 def walk(self, match, subrepos, unknown, ignored):
471 474 '''
472 475 Walk recursively through the directory tree, finding all files
473 476 matched by match.
474 477
475 478 Return a dict mapping filename to stat-like object (either
476 479 mercurial.osutil.stat instance or return value of os.stat()).
477 480 '''
478 481
479 482 def fwarn(f, msg):
480 483 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
481 484 return False
482 485
483 486 def badtype(mode):
484 487 kind = _('unknown')
485 488 if stat.S_ISCHR(mode):
486 489 kind = _('character device')
487 490 elif stat.S_ISBLK(mode):
488 491 kind = _('block device')
489 492 elif stat.S_ISFIFO(mode):
490 493 kind = _('fifo')
491 494 elif stat.S_ISSOCK(mode):
492 495 kind = _('socket')
493 496 elif stat.S_ISDIR(mode):
494 497 kind = _('directory')
495 498 return _('unsupported file type (type is %s)') % kind
496 499
497 500 ignore = self._ignore
498 501 dirignore = self._dirignore
499 502 if ignored:
500 503 ignore = util.never
501 504 dirignore = util.never
502 505 elif not unknown:
503 506 # if unknown and ignored are False, skip step 2
504 507 ignore = util.always
505 508 dirignore = util.always
506 509
507 510 matchfn = match.matchfn
508 511 badfn = match.bad
509 512 dmap = self._map
510 513 normpath = util.normpath
511 514 listdir = osutil.listdir
512 515 lstat = os.lstat
513 516 getkind = stat.S_IFMT
514 517 dirkind = stat.S_IFDIR
515 518 regkind = stat.S_IFREG
516 519 lnkkind = stat.S_IFLNK
517 520 join = self._join
518 521 work = []
519 522 wadd = work.append
520 523
521 524 exact = skipstep3 = False
522 525 if matchfn == match.exact: # match.exact
523 526 exact = True
524 527 dirignore = util.always # skip step 2
525 528 elif match.files() and not match.anypats(): # match.match, no patterns
526 529 skipstep3 = True
527 530
528 531 if self._checkcase:
529 532 normalize = self._normalize
530 533 skipstep3 = False
531 534 else:
532 535 normalize = lambda x, y: x
533 536
534 537 files = sorted(match.files())
535 538 subrepos.sort()
536 539 i, j = 0, 0
537 540 while i < len(files) and j < len(subrepos):
538 541 subpath = subrepos[j] + "/"
539 542 if files[i] < subpath:
540 543 i += 1
541 544 continue
542 545 while i < len(files) and files[i].startswith(subpath):
543 546 del files[i]
544 547 j += 1
545 548
546 549 if not files or '.' in files:
547 550 files = ['']
548 551 results = dict.fromkeys(subrepos)
549 552 results['.hg'] = None
550 553
551 554 # step 1: find all explicit files
552 555 for ff in files:
553 556 nf = normalize(normpath(ff), False)
554 557 if nf in results:
555 558 continue
556 559
557 560 try:
558 561 st = lstat(join(nf))
559 562 kind = getkind(st.st_mode)
560 563 if kind == dirkind:
561 564 skipstep3 = False
562 565 if nf in dmap:
563 566 #file deleted on disk but still in dirstate
564 567 results[nf] = None
565 568 match.dir(nf)
566 569 if not dirignore(nf):
567 570 wadd(nf)
568 571 elif kind == regkind or kind == lnkkind:
569 572 results[nf] = st
570 573 else:
571 574 badfn(ff, badtype(kind))
572 575 if nf in dmap:
573 576 results[nf] = None
574 577 except OSError, inst:
575 578 if nf in dmap: # does it exactly match a file?
576 579 results[nf] = None
577 580 else: # does it match a directory?
578 581 prefix = nf + "/"
579 582 for fn in dmap:
580 583 if fn.startswith(prefix):
581 584 match.dir(nf)
582 585 skipstep3 = False
583 586 break
584 587 else:
585 588 badfn(ff, inst.strerror)
586 589
587 590 # step 2: visit subdirectories
588 591 while work:
589 592 nd = work.pop()
590 593 skip = None
591 594 if nd == '.':
592 595 nd = ''
593 596 else:
594 597 skip = '.hg'
595 598 try:
596 599 entries = listdir(join(nd), stat=True, skip=skip)
597 600 except OSError, inst:
598 601 if inst.errno == errno.EACCES:
599 602 fwarn(nd, inst.strerror)
600 603 continue
601 604 raise
602 605 for f, kind, st in entries:
603 606 nf = normalize(nd and (nd + "/" + f) or f, True)
604 607 if nf not in results:
605 608 if kind == dirkind:
606 609 if not ignore(nf):
607 610 match.dir(nf)
608 611 wadd(nf)
609 612 if nf in dmap and matchfn(nf):
610 613 results[nf] = None
611 614 elif kind == regkind or kind == lnkkind:
612 615 if nf in dmap:
613 616 if matchfn(nf):
614 617 results[nf] = st
615 618 elif matchfn(nf) and not ignore(nf):
616 619 results[nf] = st
617 620 elif nf in dmap and matchfn(nf):
618 621 results[nf] = None
619 622
620 623 # step 3: report unseen items in the dmap hash
621 624 if not skipstep3 and not exact:
622 625 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
623 626 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
624 627 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
625 628 st = None
626 629 results[nf] = st
627 630 for s in subrepos:
628 631 del results[s]
629 632 del results['.hg']
630 633 return results
631 634
632 635 def status(self, match, subrepos, ignored, clean, unknown):
633 636 '''Determine the status of the working copy relative to the
634 637 dirstate and return a tuple of lists (unsure, modified, added,
635 638 removed, deleted, unknown, ignored, clean), where:
636 639
637 640 unsure:
638 641 files that might have been modified since the dirstate was
639 642 written, but need to be read to be sure (size is the same
640 643 but mtime differs)
641 644 modified:
642 645 files that have definitely been modified since the dirstate
643 646 was written (different size or mode)
644 647 added:
645 648 files that have been explicitly added with hg add
646 649 removed:
647 650 files that have been explicitly removed with hg remove
648 651 deleted:
649 652 files that have been deleted through other means ("missing")
650 653 unknown:
651 654 files not in the dirstate that are not ignored
652 655 ignored:
653 656 files not in the dirstate that are ignored
654 657 (by _dirignore())
655 658 clean:
656 659 files that have definitely not been modified since the
657 660 dirstate was written
658 661 '''
659 662 listignored, listclean, listunknown = ignored, clean, unknown
660 663 lookup, modified, added, unknown, ignored = [], [], [], [], []
661 664 removed, deleted, clean = [], [], []
662 665
663 666 dmap = self._map
664 667 ladd = lookup.append # aka "unsure"
665 668 madd = modified.append
666 669 aadd = added.append
667 670 uadd = unknown.append
668 671 iadd = ignored.append
669 672 radd = removed.append
670 673 dadd = deleted.append
671 674 cadd = clean.append
672 675
673 676 lnkkind = stat.S_IFLNK
674 677
675 678 for fn, st in self.walk(match, subrepos, listunknown,
676 679 listignored).iteritems():
677 680 if fn not in dmap:
678 681 if (listignored or match.exact(fn)) and self._dirignore(fn):
679 682 if listignored:
680 683 iadd(fn)
681 684 elif listunknown:
682 685 uadd(fn)
683 686 continue
684 687
685 688 state, mode, size, time = dmap[fn]
686 689
687 690 if not st and state in "nma":
688 691 dadd(fn)
689 692 elif state == 'n':
690 693 # The "mode & lnkkind != lnkkind or self._checklink"
691 694 # lines are an expansion of "islink => checklink"
692 695 # where islink means "is this a link?" and checklink
693 696 # means "can we check links?".
694 697 mtime = int(st.st_mtime)
695 698 if (size >= 0 and
696 699 (size != st.st_size
697 700 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
698 701 and (mode & lnkkind != lnkkind or self._checklink)
699 702 or size == -2 # other parent
700 703 or fn in self._copymap):
701 704 madd(fn)
702 705 elif (mtime != time
703 706 and (mode & lnkkind != lnkkind or self._checklink)):
704 707 ladd(fn)
705 708 elif mtime == self._lastnormaltime:
706 709 # fn may have been changed in the same timeslot without
707 710 # changing its size. This can happen if we quickly do
708 711 # multiple commits in a single transaction.
709 712 # Force lookup, so we don't miss such a racy file change.
710 713 ladd(fn)
711 714 elif listclean:
712 715 cadd(fn)
713 716 elif state == 'm':
714 717 madd(fn)
715 718 elif state == 'a':
716 719 aadd(fn)
717 720 elif state == 'r':
718 721 radd(fn)
719 722
720 723 return (lookup, modified, added, removed, deleted, unknown, ignored,
721 724 clean)
General Comments 0
You need to be logged in to leave comments. Login now