##// END OF EJS Templates
dirstate.walk: push sorting up
Matt Mackall -
r6827:c978d675 default
parent child Browse files
Show More
@@ -1,774 +1,774
1 1 # context.py - changeset and file context objects for mercurial
2 2 #
3 3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import nullid, nullrev, short, hex
9 9 from i18n import _
10 10 import ancestor, bdiff, revlog, util, os, errno
11 11
12 12 class changectx(object):
13 13 """A changecontext object makes access to data related to a particular
14 14 changeset convenient."""
15 15 def __init__(self, repo, changeid=''):
16 16 """changeid is a revision number, node, or tag"""
17 17 if changeid == '':
18 18 changeid = '.'
19 19 self._repo = repo
20 20 self._node = self._repo.lookup(changeid)
21 21 self._rev = self._repo.changelog.rev(self._node)
22 22
23 23 def __str__(self):
24 24 return short(self.node())
25 25
26 26 def __int__(self):
27 27 return self.rev()
28 28
29 29 def __repr__(self):
30 30 return "<changectx %s>" % str(self)
31 31
32 32 def __hash__(self):
33 33 try:
34 34 return hash(self._rev)
35 35 except AttributeError:
36 36 return id(self)
37 37
38 38 def __eq__(self, other):
39 39 try:
40 40 return self._rev == other._rev
41 41 except AttributeError:
42 42 return False
43 43
44 44 def __ne__(self, other):
45 45 return not (self == other)
46 46
47 47 def __nonzero__(self):
48 48 return self._rev != nullrev
49 49
50 50 def __getattr__(self, name):
51 51 if name == '_changeset':
52 52 self._changeset = self._repo.changelog.read(self.node())
53 53 return self._changeset
54 54 elif name == '_manifest':
55 55 self._manifest = self._repo.manifest.read(self._changeset[0])
56 56 return self._manifest
57 57 elif name == '_manifestdelta':
58 58 md = self._repo.manifest.readdelta(self._changeset[0])
59 59 self._manifestdelta = md
60 60 return self._manifestdelta
61 61 elif name == '_parents':
62 62 p = self._repo.changelog.parents(self._node)
63 63 if p[1] == nullid:
64 64 p = p[:-1]
65 65 self._parents = [changectx(self._repo, x) for x in p]
66 66 return self._parents
67 67 else:
68 68 raise AttributeError, name
69 69
70 70 def __contains__(self, key):
71 71 return key in self._manifest
72 72
73 73 def __getitem__(self, key):
74 74 return self.filectx(key)
75 75
76 76 def __iter__(self):
77 77 for f in util.sort(self._manifest):
78 78 yield f
79 79
80 80 def changeset(self): return self._changeset
81 81 def manifest(self): return self._manifest
82 82
83 83 def rev(self): return self._rev
84 84 def node(self): return self._node
85 85 def hex(self): return hex(self._node)
86 86 def user(self): return self._changeset[1]
87 87 def date(self): return self._changeset[2]
88 88 def files(self): return self._changeset[3]
89 89 def description(self): return self._changeset[4]
90 90 def branch(self): return self._changeset[5].get("branch")
91 91 def extra(self): return self._changeset[5]
92 92 def tags(self): return self._repo.nodetags(self._node)
93 93
94 94 def parents(self):
95 95 """return contexts for each parent changeset"""
96 96 return self._parents
97 97
98 98 def children(self):
99 99 """return contexts for each child changeset"""
100 100 c = self._repo.changelog.children(self._node)
101 101 return [changectx(self._repo, x) for x in c]
102 102
103 103 def _fileinfo(self, path):
104 104 if '_manifest' in self.__dict__:
105 105 try:
106 106 return self._manifest[path], self._manifest.flags(path)
107 107 except KeyError:
108 108 raise revlog.LookupError(self._node, path,
109 109 _('not found in manifest'))
110 110 if '_manifestdelta' in self.__dict__ or path in self.files():
111 111 if path in self._manifestdelta:
112 112 return self._manifestdelta[path], self._manifestdelta.flags(path)
113 113 node, flag = self._repo.manifest.find(self._changeset[0], path)
114 114 if not node:
115 115 raise revlog.LookupError(self._node, path,
116 116 _('not found in manifest'))
117 117
118 118 return node, flag
119 119
120 120 def filenode(self, path):
121 121 return self._fileinfo(path)[0]
122 122
123 123 def flags(self, path):
124 124 try:
125 125 return self._fileinfo(path)[1]
126 126 except revlog.LookupError:
127 127 return ''
128 128
129 129 def filectx(self, path, fileid=None, filelog=None):
130 130 """get a file context from this changeset"""
131 131 if fileid is None:
132 132 fileid = self.filenode(path)
133 133 return filectx(self._repo, path, fileid=fileid,
134 134 changectx=self, filelog=filelog)
135 135
136 136 def filectxs(self):
137 137 """generate a file context for each file in this changeset's
138 138 manifest"""
139 139 for f in util.sort(mf):
140 140 yield self.filectx(f, fileid=mf[f])
141 141
142 142 def ancestor(self, c2):
143 143 """
144 144 return the ancestor context of self and c2
145 145 """
146 146 n = self._repo.changelog.ancestor(self._node, c2._node)
147 147 return changectx(self._repo, n)
148 148
149 149 def walk(self, match):
150 150 fdict = dict.fromkeys(match.files())
151 151 # for dirstate.walk, files=['.'] means "walk the whole tree".
152 152 # follow that here, too
153 153 fdict.pop('.', None)
154 154 for fn in self:
155 155 for ffn in fdict:
156 156 # match if the file is the exact name or a directory
157 157 if ffn == fn or fn.startswith("%s/" % ffn):
158 158 del fdict[ffn]
159 159 break
160 160 if match(fn):
161 161 yield fn
162 162 for fn in util.sort(fdict):
163 163 if match.bad(fn, 'No such file in rev ' + str(self)) and match(fn):
164 164 yield fn
165 165
166 166 class filectx(object):
167 167 """A filecontext object makes access to data related to a particular
168 168 filerevision convenient."""
169 169 def __init__(self, repo, path, changeid=None, fileid=None,
170 170 filelog=None, changectx=None):
171 171 """changeid can be a changeset revision, node, or tag.
172 172 fileid can be a file revision or node."""
173 173 self._repo = repo
174 174 self._path = path
175 175
176 176 assert (changeid is not None
177 177 or fileid is not None
178 178 or changectx is not None)
179 179
180 180 if filelog:
181 181 self._filelog = filelog
182 182
183 183 if changeid is not None:
184 184 self._changeid = changeid
185 185 if changectx is not None:
186 186 self._changectx = changectx
187 187 if fileid is not None:
188 188 self._fileid = fileid
189 189
190 190 def __getattr__(self, name):
191 191 if name == '_changectx':
192 192 self._changectx = changectx(self._repo, self._changeid)
193 193 return self._changectx
194 194 elif name == '_filelog':
195 195 self._filelog = self._repo.file(self._path)
196 196 return self._filelog
197 197 elif name == '_changeid':
198 198 if '_changectx' in self.__dict__:
199 199 self._changeid = self._changectx.rev()
200 200 else:
201 201 self._changeid = self._filelog.linkrev(self._filenode)
202 202 return self._changeid
203 203 elif name == '_filenode':
204 204 if '_fileid' in self.__dict__:
205 205 self._filenode = self._filelog.lookup(self._fileid)
206 206 else:
207 207 self._filenode = self._changectx.filenode(self._path)
208 208 return self._filenode
209 209 elif name == '_filerev':
210 210 self._filerev = self._filelog.rev(self._filenode)
211 211 return self._filerev
212 212 elif name == '_repopath':
213 213 self._repopath = self._path
214 214 return self._repopath
215 215 else:
216 216 raise AttributeError, name
217 217
218 218 def __nonzero__(self):
219 219 try:
220 220 n = self._filenode
221 221 return True
222 222 except revlog.LookupError:
223 223 # file is missing
224 224 return False
225 225
226 226 def __str__(self):
227 227 return "%s@%s" % (self.path(), short(self.node()))
228 228
229 229 def __repr__(self):
230 230 return "<filectx %s>" % str(self)
231 231
232 232 def __hash__(self):
233 233 try:
234 234 return hash((self._path, self._fileid))
235 235 except AttributeError:
236 236 return id(self)
237 237
238 238 def __eq__(self, other):
239 239 try:
240 240 return (self._path == other._path
241 241 and self._fileid == other._fileid)
242 242 except AttributeError:
243 243 return False
244 244
245 245 def __ne__(self, other):
246 246 return not (self == other)
247 247
248 248 def filectx(self, fileid):
249 249 '''opens an arbitrary revision of the file without
250 250 opening a new filelog'''
251 251 return filectx(self._repo, self._path, fileid=fileid,
252 252 filelog=self._filelog)
253 253
254 254 def filerev(self): return self._filerev
255 255 def filenode(self): return self._filenode
256 256 def flags(self): return self._changectx.flags(self._path)
257 257 def filelog(self): return self._filelog
258 258
259 259 def rev(self):
260 260 if '_changectx' in self.__dict__:
261 261 return self._changectx.rev()
262 262 if '_changeid' in self.__dict__:
263 263 return self._changectx.rev()
264 264 return self._filelog.linkrev(self._filenode)
265 265
266 266 def linkrev(self): return self._filelog.linkrev(self._filenode)
267 267 def node(self): return self._changectx.node()
268 268 def user(self): return self._changectx.user()
269 269 def date(self): return self._changectx.date()
270 270 def files(self): return self._changectx.files()
271 271 def description(self): return self._changectx.description()
272 272 def branch(self): return self._changectx.branch()
273 273 def manifest(self): return self._changectx.manifest()
274 274 def changectx(self): return self._changectx
275 275
276 276 def data(self): return self._filelog.read(self._filenode)
277 277 def path(self): return self._path
278 278 def size(self): return self._filelog.size(self._filerev)
279 279
280 280 def cmp(self, text): return self._filelog.cmp(self._filenode, text)
281 281
282 282 def renamed(self):
283 283 """check if file was actually renamed in this changeset revision
284 284
285 285 If rename logged in file revision, we report copy for changeset only
286 286 if file revisions linkrev points back to the changeset in question
287 287 or both changeset parents contain different file revisions.
288 288 """
289 289
290 290 renamed = self._filelog.renamed(self._filenode)
291 291 if not renamed:
292 292 return renamed
293 293
294 294 if self.rev() == self.linkrev():
295 295 return renamed
296 296
297 297 name = self.path()
298 298 fnode = self._filenode
299 299 for p in self._changectx.parents():
300 300 try:
301 301 if fnode == p.filenode(name):
302 302 return None
303 303 except revlog.LookupError:
304 304 pass
305 305 return renamed
306 306
307 307 def parents(self):
308 308 p = self._path
309 309 fl = self._filelog
310 310 pl = [(p, n, fl) for n in self._filelog.parents(self._filenode)]
311 311
312 312 r = self._filelog.renamed(self._filenode)
313 313 if r:
314 314 pl[0] = (r[0], r[1], None)
315 315
316 316 return [filectx(self._repo, p, fileid=n, filelog=l)
317 317 for p,n,l in pl if n != nullid]
318 318
319 319 def children(self):
320 320 # hard for renames
321 321 c = self._filelog.children(self._filenode)
322 322 return [filectx(self._repo, self._path, fileid=x,
323 323 filelog=self._filelog) for x in c]
324 324
325 325 def annotate(self, follow=False, linenumber=None):
326 326 '''returns a list of tuples of (ctx, line) for each line
327 327 in the file, where ctx is the filectx of the node where
328 328 that line was last changed.
329 329 This returns tuples of ((ctx, linenumber), line) for each line,
330 330 if "linenumber" parameter is NOT "None".
331 331 In such tuples, linenumber means one at the first appearance
332 332 in the managed file.
333 333 To reduce annotation cost,
334 334 this returns fixed value(False is used) as linenumber,
335 335 if "linenumber" parameter is "False".'''
336 336
337 337 def decorate_compat(text, rev):
338 338 return ([rev] * len(text.splitlines()), text)
339 339
340 340 def without_linenumber(text, rev):
341 341 return ([(rev, False)] * len(text.splitlines()), text)
342 342
343 343 def with_linenumber(text, rev):
344 344 size = len(text.splitlines())
345 345 return ([(rev, i) for i in xrange(1, size + 1)], text)
346 346
347 347 decorate = (((linenumber is None) and decorate_compat) or
348 348 (linenumber and with_linenumber) or
349 349 without_linenumber)
350 350
351 351 def pair(parent, child):
352 352 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
353 353 child[0][b1:b2] = parent[0][a1:a2]
354 354 return child
355 355
356 356 getlog = util.cachefunc(lambda x: self._repo.file(x))
357 357 def getctx(path, fileid):
358 358 log = path == self._path and self._filelog or getlog(path)
359 359 return filectx(self._repo, path, fileid=fileid, filelog=log)
360 360 getctx = util.cachefunc(getctx)
361 361
362 362 def parents(f):
363 363 # we want to reuse filectx objects as much as possible
364 364 p = f._path
365 365 if f._filerev is None: # working dir
366 366 pl = [(n.path(), n.filerev()) for n in f.parents()]
367 367 else:
368 368 pl = [(p, n) for n in f._filelog.parentrevs(f._filerev)]
369 369
370 370 if follow:
371 371 r = f.renamed()
372 372 if r:
373 373 pl[0] = (r[0], getlog(r[0]).rev(r[1]))
374 374
375 375 return [getctx(p, n) for p, n in pl if n != nullrev]
376 376
377 377 # use linkrev to find the first changeset where self appeared
378 378 if self.rev() != self.linkrev():
379 379 base = self.filectx(self.filerev())
380 380 else:
381 381 base = self
382 382
383 383 # find all ancestors
384 384 needed = {base: 1}
385 385 visit = [base]
386 386 files = [base._path]
387 387 while visit:
388 388 f = visit.pop(0)
389 389 for p in parents(f):
390 390 if p not in needed:
391 391 needed[p] = 1
392 392 visit.append(p)
393 393 if p._path not in files:
394 394 files.append(p._path)
395 395 else:
396 396 # count how many times we'll use this
397 397 needed[p] += 1
398 398
399 399 # sort by revision (per file) which is a topological order
400 400 visit = []
401 401 for f in files:
402 402 fn = [(n.rev(), n) for n in needed if n._path == f]
403 403 visit.extend(fn)
404 404
405 405 hist = {}
406 406 for r, f in util.sort(visit):
407 407 curr = decorate(f.data(), f)
408 408 for p in parents(f):
409 409 if p != nullid:
410 410 curr = pair(hist[p], curr)
411 411 # trim the history of unneeded revs
412 412 needed[p] -= 1
413 413 if not needed[p]:
414 414 del hist[p]
415 415 hist[f] = curr
416 416
417 417 return zip(hist[f][0], hist[f][1].splitlines(1))
418 418
419 419 def ancestor(self, fc2):
420 420 """
421 421 find the common ancestor file context, if any, of self, and fc2
422 422 """
423 423
424 424 acache = {}
425 425
426 426 # prime the ancestor cache for the working directory
427 427 for c in (self, fc2):
428 428 if c._filerev == None:
429 429 pl = [(n.path(), n.filenode()) for n in c.parents()]
430 430 acache[(c._path, None)] = pl
431 431
432 432 flcache = {self._repopath:self._filelog, fc2._repopath:fc2._filelog}
433 433 def parents(vertex):
434 434 if vertex in acache:
435 435 return acache[vertex]
436 436 f, n = vertex
437 437 if f not in flcache:
438 438 flcache[f] = self._repo.file(f)
439 439 fl = flcache[f]
440 440 pl = [(f, p) for p in fl.parents(n) if p != nullid]
441 441 re = fl.renamed(n)
442 442 if re:
443 443 pl.append(re)
444 444 acache[vertex] = pl
445 445 return pl
446 446
447 447 a, b = (self._path, self._filenode), (fc2._path, fc2._filenode)
448 448 v = ancestor.ancestor(a, b, parents)
449 449 if v:
450 450 f, n = v
451 451 return filectx(self._repo, f, fileid=n, filelog=flcache[f])
452 452
453 453 return None
454 454
455 455 class workingctx(changectx):
456 456 """A workingctx object makes access to data related to
457 457 the current working directory convenient.
458 458 parents - a pair of parent nodeids, or None to use the dirstate.
459 459 date - any valid date string or (unixtime, offset), or None.
460 460 user - username string, or None.
461 461 extra - a dictionary of extra values, or None.
462 462 changes - a list of file lists as returned by localrepo.status()
463 463 or None to use the repository status.
464 464 """
465 465 def __init__(self, repo, parents=None, text="", user=None, date=None,
466 466 extra=None, changes=None):
467 467 self._repo = repo
468 468 self._rev = None
469 469 self._node = None
470 470 self._text = text
471 471 if date:
472 472 self._date = util.parsedate(date)
473 473 if user:
474 474 self._user = user
475 475 if parents:
476 476 self._parents = [changectx(self._repo, p) for p in parents]
477 477 if changes:
478 478 self._status = list(changes)
479 479
480 480 self._extra = {}
481 481 if extra:
482 482 self._extra = extra.copy()
483 483 if 'branch' not in self._extra:
484 484 branch = self._repo.dirstate.branch()
485 485 try:
486 486 branch = branch.decode('UTF-8').encode('UTF-8')
487 487 except UnicodeDecodeError:
488 488 raise util.Abort(_('branch name not in UTF-8!'))
489 489 self._extra['branch'] = branch
490 490 if self._extra['branch'] == '':
491 491 self._extra['branch'] = 'default'
492 492
493 493 def __str__(self):
494 494 return str(self._parents[0]) + "+"
495 495
496 496 def __nonzero__(self):
497 497 return True
498 498
499 499 def __contains__(self, key):
500 500 return self._dirstate[f] not in "?r"
501 501
502 502 def __getattr__(self, name):
503 503 if name == '_status':
504 504 self._status = self._repo.status(unknown=True)
505 505 return self._status
506 506 elif name == '_user':
507 507 self._user = self._repo.ui.username()
508 508 return self._user
509 509 elif name == '_date':
510 510 self._date = util.makedate()
511 511 return self._date
512 512 if name == '_manifest':
513 513 self._buildmanifest()
514 514 return self._manifest
515 515 elif name == '_parents':
516 516 p = self._repo.dirstate.parents()
517 517 if p[1] == nullid:
518 518 p = p[:-1]
519 519 self._parents = [changectx(self._repo, x) for x in p]
520 520 return self._parents
521 521 else:
522 522 raise AttributeError, name
523 523
524 524 def _buildmanifest(self):
525 525 """generate a manifest corresponding to the working directory"""
526 526
527 527 man = self._parents[0].manifest().copy()
528 528 copied = self._repo.dirstate.copies()
529 529 cf = lambda x: man.flags(copied.get(x, x))
530 530 ff = self._repo.dirstate.flagfunc(cf)
531 531 modified, added, removed, deleted, unknown = self._status[:5]
532 532 for i, l in (("a", added), ("m", modified), ("u", unknown)):
533 533 for f in l:
534 534 man[f] = man.get(copied.get(f, f), nullid) + i
535 535 try:
536 536 man.set(f, ff(f))
537 537 except OSError:
538 538 pass
539 539
540 540 for f in deleted + removed:
541 541 if f in man:
542 542 del man[f]
543 543
544 544 self._manifest = man
545 545
546 546 def manifest(self): return self._manifest
547 547
548 548 def user(self): return self._user or self._repo.ui.username()
549 549 def date(self): return self._date
550 550 def description(self): return self._text
551 551 def files(self):
552 552 return util.sort(self._status[0] + self._status[1] + self._status[2])
553 553
554 554 def modified(self): return self._status[0]
555 555 def added(self): return self._status[1]
556 556 def removed(self): return self._status[2]
557 557 def deleted(self): return self._status[3]
558 558 def unknown(self): return self._status[4]
559 559 def clean(self): return self._status[5]
560 560 def branch(self): return self._extra['branch']
561 561 def extra(self): return self._extra
562 562
563 563 def tags(self):
564 564 t = []
565 565 [t.extend(p.tags()) for p in self.parents()]
566 566 return t
567 567
568 568 def children(self):
569 569 return []
570 570
571 571 def flags(self, path):
572 572 if '_manifest' in self.__dict__:
573 573 try:
574 574 return self._manifest.flags(path)
575 575 except KeyError:
576 576 return ''
577 577
578 578 pnode = self._parents[0].changeset()[0]
579 579 orig = self._repo.dirstate.copies().get(path, path)
580 580 node, flag = self._repo.manifest.find(pnode, orig)
581 581 try:
582 582 ff = self._repo.dirstate.flagfunc(lambda x: flag or '')
583 583 return ff(path)
584 584 except OSError:
585 585 pass
586 586
587 587 if not node or path in self.deleted() or path in self.removed():
588 588 return ''
589 589 return flag
590 590
591 591 def filectx(self, path, filelog=None):
592 592 """get a file context from the working directory"""
593 593 return workingfilectx(self._repo, path, workingctx=self,
594 594 filelog=filelog)
595 595
596 596 def ancestor(self, c2):
597 597 """return the ancestor context of self and c2"""
598 598 return self._parents[0].ancestor(c2) # punt on two parents for now
599 599
600 600 def walk(self, match):
601 for fn, st in self._repo.dirstate.walk(match, True, False):
601 for fn, st in util.sort(self._repo.dirstate.walk(match, True, False)):
602 602 yield fn
603 603
604 604 class workingfilectx(filectx):
605 605 """A workingfilectx object makes access to data related to a particular
606 606 file in the working directory convenient."""
607 607 def __init__(self, repo, path, filelog=None, workingctx=None):
608 608 """changeid can be a changeset revision, node, or tag.
609 609 fileid can be a file revision or node."""
610 610 self._repo = repo
611 611 self._path = path
612 612 self._changeid = None
613 613 self._filerev = self._filenode = None
614 614
615 615 if filelog:
616 616 self._filelog = filelog
617 617 if workingctx:
618 618 self._changectx = workingctx
619 619
620 620 def __getattr__(self, name):
621 621 if name == '_changectx':
622 622 self._changectx = workingctx(self._repo)
623 623 return self._changectx
624 624 elif name == '_repopath':
625 625 self._repopath = (self._repo.dirstate.copied(self._path)
626 626 or self._path)
627 627 return self._repopath
628 628 elif name == '_filelog':
629 629 self._filelog = self._repo.file(self._repopath)
630 630 return self._filelog
631 631 else:
632 632 raise AttributeError, name
633 633
634 634 def __nonzero__(self):
635 635 return True
636 636
637 637 def __str__(self):
638 638 return "%s@%s" % (self.path(), self._changectx)
639 639
640 640 def filectx(self, fileid):
641 641 '''opens an arbitrary revision of the file without
642 642 opening a new filelog'''
643 643 return filectx(self._repo, self._repopath, fileid=fileid,
644 644 filelog=self._filelog)
645 645
646 646 def rev(self):
647 647 if '_changectx' in self.__dict__:
648 648 return self._changectx.rev()
649 649 return self._filelog.linkrev(self._filenode)
650 650
651 651 def data(self): return self._repo.wread(self._path)
652 652 def renamed(self):
653 653 rp = self._repopath
654 654 if rp == self._path:
655 655 return None
656 656 return rp, self._changectx._parents[0]._manifest.get(rp, nullid)
657 657
658 658 def parents(self):
659 659 '''return parent filectxs, following copies if necessary'''
660 660 p = self._path
661 661 rp = self._repopath
662 662 pcl = self._changectx._parents
663 663 fl = self._filelog
664 664 pl = [(rp, pcl[0]._manifest.get(rp, nullid), fl)]
665 665 if len(pcl) > 1:
666 666 if rp != p:
667 667 fl = None
668 668 pl.append((p, pcl[1]._manifest.get(p, nullid), fl))
669 669
670 670 return [filectx(self._repo, p, fileid=n, filelog=l)
671 671 for p,n,l in pl if n != nullid]
672 672
673 673 def children(self):
674 674 return []
675 675
676 676 def size(self): return os.stat(self._repo.wjoin(self._path)).st_size
677 677 def date(self):
678 678 t, tz = self._changectx.date()
679 679 try:
680 680 return (int(os.lstat(self._repo.wjoin(self._path)).st_mtime), tz)
681 681 except OSError, err:
682 682 if err.errno != errno.ENOENT: raise
683 683 return (t, tz)
684 684
685 685 def cmp(self, text): return self._repo.wread(self._path) == text
686 686
687 687 class memctx(object):
688 688 """A memctx is a subset of changectx supposed to be built on memory
689 689 and passed to commit functions.
690 690
691 691 NOTE: this interface and the related memfilectx are experimental and
692 692 may change without notice.
693 693
694 694 parents - a pair of parent nodeids.
695 695 filectxfn - a callable taking (repo, memctx, path) arguments and
696 696 returning a memctx object.
697 697 date - any valid date string or (unixtime, offset), or None.
698 698 user - username string, or None.
699 699 extra - a dictionary of extra values, or None.
700 700 """
701 701 def __init__(self, repo, parents, text, files, filectxfn, user=None,
702 702 date=None, extra=None):
703 703 self._repo = repo
704 704 self._rev = None
705 705 self._node = None
706 706 self._text = text
707 707 self._date = date and util.parsedate(date) or util.makedate()
708 708 self._user = user
709 709 parents = [(p or nullid) for p in parents]
710 710 p1, p2 = parents
711 711 self._parents = [changectx(self._repo, p) for p in (p1, p2)]
712 712 files = util.sort(list(files))
713 713 self._status = [files, [], [], [], []]
714 714 self._filectxfn = filectxfn
715 715
716 716 self._extra = extra and extra.copy() or {}
717 717 if 'branch' not in self._extra:
718 718 self._extra['branch'] = 'default'
719 719 elif self._extra.get('branch') == '':
720 720 self._extra['branch'] = 'default'
721 721
722 722 def __str__(self):
723 723 return str(self._parents[0]) + "+"
724 724
725 725 def __int__(self):
726 726 return self._rev
727 727
728 728 def __nonzero__(self):
729 729 return True
730 730
731 731 def user(self): return self._user or self._repo.ui.username()
732 732 def date(self): return self._date
733 733 def description(self): return self._text
734 734 def files(self): return self.modified()
735 735 def modified(self): return self._status[0]
736 736 def added(self): return self._status[1]
737 737 def removed(self): return self._status[2]
738 738 def deleted(self): return self._status[3]
739 739 def unknown(self): return self._status[4]
740 740 def clean(self): return self._status[5]
741 741 def branch(self): return self._extra['branch']
742 742 def extra(self): return self._extra
743 743 def flags(self, f): return self[f].flags()
744 744
745 745 def parents(self):
746 746 """return contexts for each parent changeset"""
747 747 return self._parents
748 748
749 749 def filectx(self, path, filelog=None):
750 750 """get a file context from the working directory"""
751 751 return self._filectxfn(self._repo, self, path)
752 752
753 753 class memfilectx(object):
754 754 """A memfilectx is a subset of filectx supposed to be built by client
755 755 code and passed to commit functions.
756 756 """
757 757 def __init__(self, path, data, islink, isexec, copied):
758 758 """copied is the source file path, or None."""
759 759 self._path = path
760 760 self._data = data
761 761 self._flags = (islink and 'l' or '') + (isexec and 'x' or '')
762 762 self._copied = None
763 763 if copied:
764 764 self._copied = (copied, nullid)
765 765
766 766 def __nonzero__(self): return True
767 767 def __str__(self): return "%s@%s" % (self.path(), self._changectx)
768 768 def path(self): return self._path
769 769 def data(self): return self._data
770 770 def flags(self): return self._flags
771 771 def isexec(self): return 'x' in self._flags
772 772 def islink(self): return 'l' in self._flags
773 773 def renamed(self): return self._copied
774 774
@@ -1,609 +1,605
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import nullid
11 11 from i18n import _
12 12 import struct, os, bisect, stat, util, errno, ignore
13 13 import cStringIO, osutil, sys
14 14
15 15 _unknown = ('?', 0, 0, 0)
16 16 _format = ">cllll"
17 17
18 18 def _finddirs(path):
19 19 pos = len(path)
20 20 while 1:
21 21 pos = path.rfind('/', 0, pos)
22 22 if pos == -1:
23 23 break
24 24 yield path[:pos]
25 25
26 26 class dirstate(object):
27 27
28 28 def __init__(self, opener, ui, root):
29 29 self._opener = opener
30 30 self._root = root
31 31 self._dirty = False
32 32 self._dirtypl = False
33 33 self._ui = ui
34 34
35 35 def __getattr__(self, name):
36 36 if name == '_map':
37 37 self._read()
38 38 return self._map
39 39 elif name == '_copymap':
40 40 self._read()
41 41 return self._copymap
42 42 elif name == '_foldmap':
43 43 _foldmap = {}
44 44 for name in self._map:
45 45 norm = os.path.normcase(os.path.normpath(name))
46 46 _foldmap[norm] = name
47 47 self._foldmap = _foldmap
48 48 return self._foldmap
49 49 elif name == '_branch':
50 50 try:
51 51 self._branch = (self._opener("branch").read().strip()
52 52 or "default")
53 53 except IOError:
54 54 self._branch = "default"
55 55 return self._branch
56 56 elif name == '_pl':
57 57 self._pl = [nullid, nullid]
58 58 try:
59 59 st = self._opener("dirstate").read(40)
60 60 if len(st) == 40:
61 61 self._pl = st[:20], st[20:40]
62 62 except IOError, err:
63 63 if err.errno != errno.ENOENT: raise
64 64 return self._pl
65 65 elif name == '_dirs':
66 66 dirs = {}
67 67 for f,s in self._map.items():
68 68 if s[0] != 'r':
69 69 for base in _finddirs(f):
70 70 dirs[base] = dirs.get(base, 0) + 1
71 71 self._dirs = dirs
72 72 return self._dirs
73 73 elif name == '_ignore':
74 74 files = [self._join('.hgignore')]
75 75 for name, path in self._ui.configitems("ui"):
76 76 if name == 'ignore' or name.startswith('ignore.'):
77 77 files.append(os.path.expanduser(path))
78 78 self._ignore = ignore.ignore(self._root, files, self._ui.warn)
79 79 return self._ignore
80 80 elif name == '_slash':
81 81 self._slash = self._ui.configbool('ui', 'slash') and os.sep != '/'
82 82 return self._slash
83 83 elif name == '_checklink':
84 84 self._checklink = util.checklink(self._root)
85 85 return self._checklink
86 86 elif name == '_checkexec':
87 87 self._checkexec = util.checkexec(self._root)
88 88 return self._checkexec
89 89 elif name == '_checkcase':
90 90 self._checkcase = not util.checkcase(self._join('.hg'))
91 91 return self._checkcase
92 92 elif name == 'normalize':
93 93 if self._checkcase:
94 94 self.normalize = self._normalize
95 95 else:
96 96 self.normalize = lambda x: x
97 97 return self.normalize
98 98 else:
99 99 raise AttributeError, name
100 100
101 101 def _join(self, f):
102 102 return os.path.join(self._root, f)
103 103
104 104 def flagfunc(self, fallback):
105 105 if self._checklink:
106 106 if self._checkexec:
107 107 def f(x):
108 108 p = os.path.join(self._root, x)
109 109 if os.path.islink(p):
110 110 return 'l'
111 111 if util.is_exec(p):
112 112 return 'x'
113 113 return ''
114 114 return f
115 115 def f(x):
116 116 if os.path.islink(os.path.join(self._root, x)):
117 117 return 'l'
118 118 if 'x' in fallback(x):
119 119 return 'x'
120 120 return ''
121 121 return f
122 122 if self._checkexec:
123 123 def f(x):
124 124 if 'l' in fallback(x):
125 125 return 'l'
126 126 if util.is_exec(os.path.join(self._root, x)):
127 127 return 'x'
128 128 return ''
129 129 return f
130 130 return fallback
131 131
132 132 def getcwd(self):
133 133 cwd = os.getcwd()
134 134 if cwd == self._root: return ''
135 135 # self._root ends with a path separator if self._root is '/' or 'C:\'
136 136 rootsep = self._root
137 137 if not util.endswithsep(rootsep):
138 138 rootsep += os.sep
139 139 if cwd.startswith(rootsep):
140 140 return cwd[len(rootsep):]
141 141 else:
142 142 # we're outside the repo. return an absolute path.
143 143 return cwd
144 144
145 145 def pathto(self, f, cwd=None):
146 146 if cwd is None:
147 147 cwd = self.getcwd()
148 148 path = util.pathto(self._root, cwd, f)
149 149 if self._slash:
150 150 return util.normpath(path)
151 151 return path
152 152
153 153 def __getitem__(self, key):
154 154 ''' current states:
155 155 n normal
156 156 m needs merging
157 157 r marked for removal
158 158 a marked for addition
159 159 ? not tracked'''
160 160 return self._map.get(key, ("?",))[0]
161 161
162 162 def __contains__(self, key):
163 163 return key in self._map
164 164
165 165 def __iter__(self):
166 166 for x in util.sort(self._map):
167 167 yield x
168 168
169 169 def parents(self):
170 170 return self._pl
171 171
172 172 def branch(self):
173 173 return self._branch
174 174
175 175 def setparents(self, p1, p2=nullid):
176 176 self._dirty = self._dirtypl = True
177 177 self._pl = p1, p2
178 178
179 179 def setbranch(self, branch):
180 180 self._branch = branch
181 181 self._opener("branch", "w").write(branch + '\n')
182 182
183 183 def _read(self):
184 184 self._map = {}
185 185 self._copymap = {}
186 186 if not self._dirtypl:
187 187 self._pl = [nullid, nullid]
188 188 try:
189 189 st = self._opener("dirstate").read()
190 190 except IOError, err:
191 191 if err.errno != errno.ENOENT: raise
192 192 return
193 193 if not st:
194 194 return
195 195
196 196 if not self._dirtypl:
197 197 self._pl = [st[:20], st[20: 40]]
198 198
199 199 # deref fields so they will be local in loop
200 200 dmap = self._map
201 201 copymap = self._copymap
202 202 unpack = struct.unpack
203 203 e_size = struct.calcsize(_format)
204 204 pos1 = 40
205 205 l = len(st)
206 206
207 207 # the inner loop
208 208 while pos1 < l:
209 209 pos2 = pos1 + e_size
210 210 e = unpack(">cllll", st[pos1:pos2]) # a literal here is faster
211 211 pos1 = pos2 + e[4]
212 212 f = st[pos2:pos1]
213 213 if '\0' in f:
214 214 f, c = f.split('\0')
215 215 copymap[f] = c
216 216 dmap[f] = e # we hold onto e[4] because making a subtuple is slow
217 217
218 218 def invalidate(self):
219 219 for a in "_map _copymap _foldmap _branch _pl _dirs _ignore".split():
220 220 if a in self.__dict__:
221 221 delattr(self, a)
222 222 self._dirty = False
223 223
224 224 def copy(self, source, dest):
225 225 if source == dest:
226 226 return
227 227 self._dirty = True
228 228 self._copymap[dest] = source
229 229
230 230 def copied(self, file):
231 231 return self._copymap.get(file, None)
232 232
233 233 def copies(self):
234 234 return self._copymap
235 235
236 236 def _droppath(self, f):
237 237 if self[f] not in "?r" and "_dirs" in self.__dict__:
238 238 dirs = self._dirs
239 239 for base in _finddirs(f):
240 240 if dirs[base] == 1:
241 241 del dirs[base]
242 242 else:
243 243 dirs[base] -= 1
244 244
245 245 def _addpath(self, f, check=False):
246 246 oldstate = self[f]
247 247 if check or oldstate == "r":
248 248 if '\r' in f or '\n' in f:
249 249 raise util.Abort(
250 250 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
251 251 if f in self._dirs:
252 252 raise util.Abort(_('directory %r already in dirstate') % f)
253 253 # shadows
254 254 for d in _finddirs(f):
255 255 if d in self._dirs:
256 256 break
257 257 if d in self._map and self[d] != 'r':
258 258 raise util.Abort(
259 259 _('file %r in dirstate clashes with %r') % (d, f))
260 260 if oldstate in "?r" and "_dirs" in self.__dict__:
261 261 dirs = self._dirs
262 262 for base in _finddirs(f):
263 263 dirs[base] = dirs.get(base, 0) + 1
264 264
265 265 def normal(self, f):
266 266 'mark a file normal and clean'
267 267 self._dirty = True
268 268 self._addpath(f)
269 269 s = os.lstat(self._join(f))
270 270 self._map[f] = ('n', s.st_mode, s.st_size, s.st_mtime, 0)
271 271 if f in self._copymap:
272 272 del self._copymap[f]
273 273
274 274 def normallookup(self, f):
275 275 'mark a file normal, but possibly dirty'
276 276 if self._pl[1] != nullid and f in self._map:
277 277 # if there is a merge going on and the file was either
278 278 # in state 'm' or dirty before being removed, restore that state.
279 279 entry = self._map[f]
280 280 if entry[0] == 'r' and entry[2] in (-1, -2):
281 281 source = self._copymap.get(f)
282 282 if entry[2] == -1:
283 283 self.merge(f)
284 284 elif entry[2] == -2:
285 285 self.normaldirty(f)
286 286 if source:
287 287 self.copy(source, f)
288 288 return
289 289 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
290 290 return
291 291 self._dirty = True
292 292 self._addpath(f)
293 293 self._map[f] = ('n', 0, -1, -1, 0)
294 294 if f in self._copymap:
295 295 del self._copymap[f]
296 296
297 297 def normaldirty(self, f):
298 298 'mark a file normal, but dirty'
299 299 self._dirty = True
300 300 self._addpath(f)
301 301 self._map[f] = ('n', 0, -2, -1, 0)
302 302 if f in self._copymap:
303 303 del self._copymap[f]
304 304
305 305 def add(self, f):
306 306 'mark a file added'
307 307 self._dirty = True
308 308 self._addpath(f, True)
309 309 self._map[f] = ('a', 0, -1, -1, 0)
310 310 if f in self._copymap:
311 311 del self._copymap[f]
312 312
313 313 def remove(self, f):
314 314 'mark a file removed'
315 315 self._dirty = True
316 316 self._droppath(f)
317 317 size = 0
318 318 if self._pl[1] != nullid and f in self._map:
319 319 entry = self._map[f]
320 320 if entry[0] == 'm':
321 321 size = -1
322 322 elif entry[0] == 'n' and entry[2] == -2:
323 323 size = -2
324 324 self._map[f] = ('r', 0, size, 0, 0)
325 325 if size == 0 and f in self._copymap:
326 326 del self._copymap[f]
327 327
328 328 def merge(self, f):
329 329 'mark a file merged'
330 330 self._dirty = True
331 331 s = os.lstat(self._join(f))
332 332 self._addpath(f)
333 333 self._map[f] = ('m', s.st_mode, s.st_size, s.st_mtime, 0)
334 334 if f in self._copymap:
335 335 del self._copymap[f]
336 336
337 337 def forget(self, f):
338 338 'forget a file'
339 339 self._dirty = True
340 340 try:
341 341 self._droppath(f)
342 342 del self._map[f]
343 343 except KeyError:
344 344 self._ui.warn(_("not in dirstate: %s\n") % f)
345 345
346 346 def _normalize(self, path):
347 347 if path not in self._foldmap:
348 348 if not os.path.exists(path):
349 349 return path
350 350 self._foldmap[path] = util.fspath(path, self._root)
351 351 return self._foldmap[path]
352 352
353 353 def clear(self):
354 354 self._map = {}
355 355 if "_dirs" in self.__dict__:
356 356 delattr(self, "_dirs");
357 357 self._copymap = {}
358 358 self._pl = [nullid, nullid]
359 359 self._dirty = True
360 360
361 361 def rebuild(self, parent, files):
362 362 self.clear()
363 363 for f in files:
364 364 if 'x' in files.flags(f):
365 365 self._map[f] = ('n', 0777, -1, 0, 0)
366 366 else:
367 367 self._map[f] = ('n', 0666, -1, 0, 0)
368 368 self._pl = (parent, nullid)
369 369 self._dirty = True
370 370
371 371 def write(self):
372 372 if not self._dirty:
373 373 return
374 374 st = self._opener("dirstate", "w", atomictemp=True)
375 375
376 376 try:
377 377 gran = int(self._ui.config('dirstate', 'granularity', 1))
378 378 except ValueError:
379 379 gran = 1
380 380 limit = sys.maxint
381 381 if gran > 0:
382 382 limit = util.fstat(st).st_mtime - gran
383 383
384 384 cs = cStringIO.StringIO()
385 385 copymap = self._copymap
386 386 pack = struct.pack
387 387 write = cs.write
388 388 write("".join(self._pl))
389 389 for f, e in self._map.iteritems():
390 390 if f in copymap:
391 391 f = "%s\0%s" % (f, copymap[f])
392 392 if e[3] > limit and e[0] == 'n':
393 393 e = (e[0], 0, -1, -1, 0)
394 394 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
395 395 write(e)
396 396 write(f)
397 397 st.write(cs.getvalue())
398 398 st.rename()
399 399 self._dirty = self._dirtypl = False
400 400
401 401 def _supported(self, f, mode, verbose=False):
402 402 if stat.S_ISREG(mode) or stat.S_ISLNK(mode):
403 403 return True
404 404 if verbose:
405 405 kind = 'unknown'
406 406 if stat.S_ISCHR(mode): kind = _('character device')
407 407 elif stat.S_ISBLK(mode): kind = _('block device')
408 408 elif stat.S_ISFIFO(mode): kind = _('fifo')
409 409 elif stat.S_ISSOCK(mode): kind = _('socket')
410 410 elif stat.S_ISDIR(mode): kind = _('directory')
411 411 self._ui.warn(_('%s: unsupported file type (type is %s)\n')
412 412 % (self.pathto(f), kind))
413 413 return False
414 414
415 415 def _dirignore(self, f):
416 416 if f == '.':
417 417 return False
418 418 if self._ignore(f):
419 419 return True
420 420 for p in _finddirs(f):
421 421 if self._ignore(p):
422 422 return True
423 423 return False
424 424
425 425 def walk(self, match, unknown, ignored):
426 426 '''
427 427 walk recursively through the directory tree, finding all files
428 428 matched by the match function
429 429
430 430 results are yielded in a tuple (filename, stat), where stat
431 431 and st is the stat result if the file was found in the directory.
432 432 '''
433 433
434 434 def fwarn(f, msg):
435 435 self._ui.warn('%s: %s\n' % (self.pathto(ff), msg))
436 436 return False
437 437 badfn = fwarn
438 438 if hasattr(match, 'bad'):
439 439 badfn = match.bad
440 440
441 441 files = util.unique(match.files())
442 442 if not files or '.' in files:
443 443 files = ['']
444 444 dmap = self._map
445 445
446 446 def imatch(file_):
447 447 if file_ not in dmap and self._ignore(file_):
448 448 return False
449 449 return match(file_)
450 450
451 451 # TODO: don't walk unknown directories if unknown and ignored are False
452 452 ignore = self._ignore
453 453 dirignore = self._dirignore
454 454 if ignored:
455 455 imatch = match
456 456 ignore = util.never
457 457 dirignore = util.never
458 458
459 459 normpath = util.normpath
460 460 normalize = self.normalize
461 461 listdir = osutil.listdir
462 462 lstat = os.lstat
463 463 bisect_left = bisect.bisect_left
464 464 isdir = os.path.isdir
465 465 pconvert = util.pconvert
466 466 join = os.path.join
467 467 s_isdir = stat.S_ISDIR
468 468 supported = self._supported
469 469 _join = self._join
470 470 work = []
471 471 wadd = work.append
472 found = []
473 add = found.append
474 472
475 473 seen = {'.hg': 1}
476 474
477 475 # step 1: find all explicit files
478 476 for ff in util.sort(files):
479 477 nf = normalize(normpath(ff))
480 478 if nf in seen:
481 479 continue
482 480
483 481 try:
484 482 st = lstat(_join(nf))
485 483 except OSError, inst:
486 484 keep = False
487 485 for fn in dmap:
488 486 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
489 487 keep = True
490 488 break
491 489 if not keep:
492 490 if inst.errno != errno.ENOENT:
493 491 fwarn(ff, inst.strerror)
494 492 elif badfn(ff, inst.strerror) and imatch(nf):
495 493 yield nf, None
496 494 continue
497 495
498 496 if s_isdir(st.st_mode):
499 497 if not dirignore(nf):
500 498 wadd(nf)
501 499 else:
502 500 seen[nf] = 1
503 501 if supported(ff, st.st_mode, verbose=True):
504 502 yield nf, st
505 503 elif nf in dmap:
506 504 yield nf, None
507 505
508 506 # step 2: visit subdirectories
509 507 while work:
510 508 nd = work.pop()
511 509 if hasattr(match, 'dir'):
512 510 match.dir(nd)
513 511 entries = listdir(_join(nd), stat=True)
514 512 # nd is the top of the repository dir tree
515 513 if nd == '.':
516 514 nd = ''
517 515 else:
518 516 # do not recurse into a repo contained in this
519 517 # one. use bisect to find .hg directory so speed
520 518 # is good on big directory.
521 519 hg = bisect_left(entries, ('.hg'))
522 520 if hg < len(entries) and entries[hg][0] == '.hg' \
523 521 and entries[hg][1] == stat.S_IFDIR:
524 522 continue
525 523 for f, kind, st in entries:
526 524 nf = normalize(pconvert(join(nd, f)))
527 525 if nf in seen:
528 526 continue
529 527 seen[nf] = 1
530 528 # don't trip over symlinks
531 529 if kind == stat.S_IFDIR:
532 530 if not ignore(nf):
533 531 wadd(nf)
534 532 if nf in dmap and match(nf):
535 add((nf, None))
533 yield nf, None
536 534 elif imatch(nf):
537 535 if supported(nf, st.st_mode):
538 add((nf, st))
536 yield nf, st
539 537 elif nf in dmap:
540 add((nf, None))
541 for e in util.sort(found):
542 yield e
538 yield nf, None
543 539
544 540 # step 3: report unseen items in the dmap hash
545 541 for f in util.sort(dmap):
546 542 if f in seen or not match(f):
547 543 continue
548 544 seen[f] = 1
549 545 try:
550 546 st = lstat(_join(f))
551 547 if supported(f, st.st_mode):
552 548 yield f, st
553 549 continue
554 550 except OSError, inst:
555 551 if inst.errno not in (errno.ENOENT, errno.ENOTDIR):
556 552 raise
557 553 yield f, None
558 554
559 555 def status(self, match, ignored, clean, unknown):
560 556 listignored, listclean, listunknown = ignored, clean, unknown
561 557 lookup, modified, added, unknown, ignored = [], [], [], [], []
562 558 removed, deleted, clean = [], [], []
563 559
564 560 _join = self._join
565 561 lstat = os.lstat
566 562 cmap = self._copymap
567 563 dmap = self._map
568 564 ladd = lookup.append
569 565 madd = modified.append
570 566 aadd = added.append
571 567 uadd = unknown.append
572 568 iadd = ignored.append
573 569 radd = removed.append
574 570 dadd = deleted.append
575 571 cadd = clean.append
576 572
577 573 for fn, st in self.walk(match, listunknown, listignored):
578 574 if fn not in dmap:
579 575 if (listignored or match.exact(fn)) and self._dirignore(fn):
580 576 if listignored:
581 577 iadd(fn)
582 578 elif listunknown:
583 579 uadd(fn)
584 580 continue
585 581
586 582 state, mode, size, time, foo = dmap[fn]
587 583
588 584 if not st and state in "nma":
589 585 dadd(fn)
590 586 elif state == 'n':
591 587 if (size >= 0 and
592 588 (size != st.st_size
593 589 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
594 590 or size == -2
595 591 or fn in self._copymap):
596 592 madd(fn)
597 593 elif time != int(st.st_mtime):
598 594 ladd(fn)
599 595 elif listclean:
600 596 cadd(fn)
601 597 elif state == 'm':
602 598 madd(fn)
603 599 elif state == 'a':
604 600 aadd(fn)
605 601 elif state == 'r':
606 602 radd(fn)
607 603
608 604 return (lookup, modified, added, removed, deleted, unknown, ignored,
609 605 clean)
@@ -1,2077 +1,2076
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context, weakref
12 12 import lock, transaction, stat, errno, ui
13 13 import os, revlog, time, util, extensions, hook, inspect
14 14 import match as match_
15 15
16 16 class localrepository(repo.repository):
17 17 capabilities = util.set(('lookup', 'changegroupsubset'))
18 18 supported = ('revlogv1', 'store')
19 19
20 20 def __init__(self, parentui, path=None, create=0):
21 21 repo.repository.__init__(self)
22 22 self.root = os.path.realpath(path)
23 23 self.path = os.path.join(self.root, ".hg")
24 24 self.origroot = path
25 25 self.opener = util.opener(self.path)
26 26 self.wopener = util.opener(self.root)
27 27
28 28 if not os.path.isdir(self.path):
29 29 if create:
30 30 if not os.path.exists(path):
31 31 os.mkdir(path)
32 32 os.mkdir(self.path)
33 33 requirements = ["revlogv1"]
34 34 if parentui.configbool('format', 'usestore', True):
35 35 os.mkdir(os.path.join(self.path, "store"))
36 36 requirements.append("store")
37 37 # create an invalid changelog
38 38 self.opener("00changelog.i", "a").write(
39 39 '\0\0\0\2' # represents revlogv2
40 40 ' dummy changelog to prevent using the old repo layout'
41 41 )
42 42 reqfile = self.opener("requires", "w")
43 43 for r in requirements:
44 44 reqfile.write("%s\n" % r)
45 45 reqfile.close()
46 46 else:
47 47 raise repo.RepoError(_("repository %s not found") % path)
48 48 elif create:
49 49 raise repo.RepoError(_("repository %s already exists") % path)
50 50 else:
51 51 # find requirements
52 52 try:
53 53 requirements = self.opener("requires").read().splitlines()
54 54 except IOError, inst:
55 55 if inst.errno != errno.ENOENT:
56 56 raise
57 57 requirements = []
58 58 # check them
59 59 for r in requirements:
60 60 if r not in self.supported:
61 61 raise repo.RepoError(_("requirement '%s' not supported") % r)
62 62
63 63 # setup store
64 64 if "store" in requirements:
65 65 self.encodefn = util.encodefilename
66 66 self.decodefn = util.decodefilename
67 67 self.spath = os.path.join(self.path, "store")
68 68 else:
69 69 self.encodefn = lambda x: x
70 70 self.decodefn = lambda x: x
71 71 self.spath = self.path
72 72
73 73 try:
74 74 # files in .hg/ will be created using this mode
75 75 mode = os.stat(self.spath).st_mode
76 76 # avoid some useless chmods
77 77 if (0777 & ~util._umask) == (0777 & mode):
78 78 mode = None
79 79 except OSError:
80 80 mode = None
81 81
82 82 self._createmode = mode
83 83 self.opener.createmode = mode
84 84 sopener = util.opener(self.spath)
85 85 sopener.createmode = mode
86 86 self.sopener = util.encodedopener(sopener, self.encodefn)
87 87
88 88 self.ui = ui.ui(parentui=parentui)
89 89 try:
90 90 self.ui.readconfig(self.join("hgrc"), self.root)
91 91 extensions.loadall(self.ui)
92 92 except IOError:
93 93 pass
94 94
95 95 self.tagscache = None
96 96 self._tagstypecache = None
97 97 self.branchcache = None
98 98 self._ubranchcache = None # UTF-8 version of branchcache
99 99 self._branchcachetip = None
100 100 self.nodetagscache = None
101 101 self.filterpats = {}
102 102 self._datafilters = {}
103 103 self._transref = self._lockref = self._wlockref = None
104 104
105 105 def __getattr__(self, name):
106 106 if name == 'changelog':
107 107 self.changelog = changelog.changelog(self.sopener)
108 108 self.sopener.defversion = self.changelog.version
109 109 return self.changelog
110 110 if name == 'manifest':
111 111 self.changelog
112 112 self.manifest = manifest.manifest(self.sopener)
113 113 return self.manifest
114 114 if name == 'dirstate':
115 115 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
116 116 return self.dirstate
117 117 else:
118 118 raise AttributeError, name
119 119
120 120 def __getitem__(self, changeid):
121 121 if changeid == None:
122 122 return context.workingctx(self)
123 123 return context.changectx(self, changeid)
124 124
125 125 def __nonzero__(self):
126 126 return True
127 127
128 128 def __len__(self):
129 129 return len(self.changelog)
130 130
131 131 def __iter__(self):
132 132 for i in xrange(len(self)):
133 133 yield i
134 134
135 135 def url(self):
136 136 return 'file:' + self.root
137 137
138 138 def hook(self, name, throw=False, **args):
139 139 return hook.hook(self.ui, self, name, throw, **args)
140 140
141 141 tag_disallowed = ':\r\n'
142 142
143 143 def _tag(self, names, node, message, local, user, date, parent=None,
144 144 extra={}):
145 145 use_dirstate = parent is None
146 146
147 147 if isinstance(names, str):
148 148 allchars = names
149 149 names = (names,)
150 150 else:
151 151 allchars = ''.join(names)
152 152 for c in self.tag_disallowed:
153 153 if c in allchars:
154 154 raise util.Abort(_('%r cannot be used in a tag name') % c)
155 155
156 156 for name in names:
157 157 self.hook('pretag', throw=True, node=hex(node), tag=name,
158 158 local=local)
159 159
160 160 def writetags(fp, names, munge, prevtags):
161 161 fp.seek(0, 2)
162 162 if prevtags and prevtags[-1] != '\n':
163 163 fp.write('\n')
164 164 for name in names:
165 165 m = munge and munge(name) or name
166 166 if self._tagstypecache and name in self._tagstypecache:
167 167 old = self.tagscache.get(name, nullid)
168 168 fp.write('%s %s\n' % (hex(old), m))
169 169 fp.write('%s %s\n' % (hex(node), m))
170 170 fp.close()
171 171
172 172 prevtags = ''
173 173 if local:
174 174 try:
175 175 fp = self.opener('localtags', 'r+')
176 176 except IOError, err:
177 177 fp = self.opener('localtags', 'a')
178 178 else:
179 179 prevtags = fp.read()
180 180
181 181 # local tags are stored in the current charset
182 182 writetags(fp, names, None, prevtags)
183 183 for name in names:
184 184 self.hook('tag', node=hex(node), tag=name, local=local)
185 185 return
186 186
187 187 if use_dirstate:
188 188 try:
189 189 fp = self.wfile('.hgtags', 'rb+')
190 190 except IOError, err:
191 191 fp = self.wfile('.hgtags', 'ab')
192 192 else:
193 193 prevtags = fp.read()
194 194 else:
195 195 try:
196 196 prevtags = self.filectx('.hgtags', parent).data()
197 197 except revlog.LookupError:
198 198 pass
199 199 fp = self.wfile('.hgtags', 'wb')
200 200 if prevtags:
201 201 fp.write(prevtags)
202 202
203 203 # committed tags are stored in UTF-8
204 204 writetags(fp, names, util.fromlocal, prevtags)
205 205
206 206 if use_dirstate and '.hgtags' not in self.dirstate:
207 207 self.add(['.hgtags'])
208 208
209 209 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
210 210 extra=extra)
211 211
212 212 for name in names:
213 213 self.hook('tag', node=hex(node), tag=name, local=local)
214 214
215 215 return tagnode
216 216
217 217 def tag(self, names, node, message, local, user, date):
218 218 '''tag a revision with one or more symbolic names.
219 219
220 220 names is a list of strings or, when adding a single tag, names may be a
221 221 string.
222 222
223 223 if local is True, the tags are stored in a per-repository file.
224 224 otherwise, they are stored in the .hgtags file, and a new
225 225 changeset is committed with the change.
226 226
227 227 keyword arguments:
228 228
229 229 local: whether to store tags in non-version-controlled file
230 230 (default False)
231 231
232 232 message: commit message to use if committing
233 233
234 234 user: name of user to use if committing
235 235
236 236 date: date tuple to use if committing'''
237 237
238 238 for x in self.status()[:5]:
239 239 if '.hgtags' in x:
240 240 raise util.Abort(_('working copy of .hgtags is changed '
241 241 '(please commit .hgtags manually)'))
242 242
243 243 self._tag(names, node, message, local, user, date)
244 244
245 245 def tags(self):
246 246 '''return a mapping of tag to node'''
247 247 if self.tagscache:
248 248 return self.tagscache
249 249
250 250 globaltags = {}
251 251 tagtypes = {}
252 252
253 253 def readtags(lines, fn, tagtype):
254 254 filetags = {}
255 255 count = 0
256 256
257 257 def warn(msg):
258 258 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
259 259
260 260 for l in lines:
261 261 count += 1
262 262 if not l:
263 263 continue
264 264 s = l.split(" ", 1)
265 265 if len(s) != 2:
266 266 warn(_("cannot parse entry"))
267 267 continue
268 268 node, key = s
269 269 key = util.tolocal(key.strip()) # stored in UTF-8
270 270 try:
271 271 bin_n = bin(node)
272 272 except TypeError:
273 273 warn(_("node '%s' is not well formed") % node)
274 274 continue
275 275 if bin_n not in self.changelog.nodemap:
276 276 warn(_("tag '%s' refers to unknown node") % key)
277 277 continue
278 278
279 279 h = []
280 280 if key in filetags:
281 281 n, h = filetags[key]
282 282 h.append(n)
283 283 filetags[key] = (bin_n, h)
284 284
285 285 for k, nh in filetags.items():
286 286 if k not in globaltags:
287 287 globaltags[k] = nh
288 288 tagtypes[k] = tagtype
289 289 continue
290 290
291 291 # we prefer the global tag if:
292 292 # it supercedes us OR
293 293 # mutual supercedes and it has a higher rank
294 294 # otherwise we win because we're tip-most
295 295 an, ah = nh
296 296 bn, bh = globaltags[k]
297 297 if (bn != an and an in bh and
298 298 (bn not in ah or len(bh) > len(ah))):
299 299 an = bn
300 300 ah.extend([n for n in bh if n not in ah])
301 301 globaltags[k] = an, ah
302 302 tagtypes[k] = tagtype
303 303
304 304 # read the tags file from each head, ending with the tip
305 305 f = None
306 306 for rev, node, fnode in self._hgtagsnodes():
307 307 f = (f and f.filectx(fnode) or
308 308 self.filectx('.hgtags', fileid=fnode))
309 309 readtags(f.data().splitlines(), f, "global")
310 310
311 311 try:
312 312 data = util.fromlocal(self.opener("localtags").read())
313 313 # localtags are stored in the local character set
314 314 # while the internal tag table is stored in UTF-8
315 315 readtags(data.splitlines(), "localtags", "local")
316 316 except IOError:
317 317 pass
318 318
319 319 self.tagscache = {}
320 320 self._tagstypecache = {}
321 321 for k,nh in globaltags.items():
322 322 n = nh[0]
323 323 if n != nullid:
324 324 self.tagscache[k] = n
325 325 self._tagstypecache[k] = tagtypes[k]
326 326 self.tagscache['tip'] = self.changelog.tip()
327 327 return self.tagscache
328 328
329 329 def tagtype(self, tagname):
330 330 '''
331 331 return the type of the given tag. result can be:
332 332
333 333 'local' : a local tag
334 334 'global' : a global tag
335 335 None : tag does not exist
336 336 '''
337 337
338 338 self.tags()
339 339
340 340 return self._tagstypecache.get(tagname)
341 341
342 342 def _hgtagsnodes(self):
343 343 heads = self.heads()
344 344 heads.reverse()
345 345 last = {}
346 346 ret = []
347 347 for node in heads:
348 348 c = self[node]
349 349 rev = c.rev()
350 350 try:
351 351 fnode = c.filenode('.hgtags')
352 352 except revlog.LookupError:
353 353 continue
354 354 ret.append((rev, node, fnode))
355 355 if fnode in last:
356 356 ret[last[fnode]] = None
357 357 last[fnode] = len(ret) - 1
358 358 return [item for item in ret if item]
359 359
360 360 def tagslist(self):
361 361 '''return a list of tags ordered by revision'''
362 362 l = []
363 363 for t, n in self.tags().items():
364 364 try:
365 365 r = self.changelog.rev(n)
366 366 except:
367 367 r = -2 # sort to the beginning of the list if unknown
368 368 l.append((r, t, n))
369 369 return [(t, n) for r, t, n in util.sort(l)]
370 370
371 371 def nodetags(self, node):
372 372 '''return the tags associated with a node'''
373 373 if not self.nodetagscache:
374 374 self.nodetagscache = {}
375 375 for t, n in self.tags().items():
376 376 self.nodetagscache.setdefault(n, []).append(t)
377 377 return self.nodetagscache.get(node, [])
378 378
379 379 def _branchtags(self, partial, lrev):
380 380 tiprev = len(self) - 1
381 381 if lrev != tiprev:
382 382 self._updatebranchcache(partial, lrev+1, tiprev+1)
383 383 self._writebranchcache(partial, self.changelog.tip(), tiprev)
384 384
385 385 return partial
386 386
387 387 def branchtags(self):
388 388 tip = self.changelog.tip()
389 389 if self.branchcache is not None and self._branchcachetip == tip:
390 390 return self.branchcache
391 391
392 392 oldtip = self._branchcachetip
393 393 self._branchcachetip = tip
394 394 if self.branchcache is None:
395 395 self.branchcache = {} # avoid recursion in changectx
396 396 else:
397 397 self.branchcache.clear() # keep using the same dict
398 398 if oldtip is None or oldtip not in self.changelog.nodemap:
399 399 partial, last, lrev = self._readbranchcache()
400 400 else:
401 401 lrev = self.changelog.rev(oldtip)
402 402 partial = self._ubranchcache
403 403
404 404 self._branchtags(partial, lrev)
405 405
406 406 # the branch cache is stored on disk as UTF-8, but in the local
407 407 # charset internally
408 408 for k, v in partial.items():
409 409 self.branchcache[util.tolocal(k)] = v
410 410 self._ubranchcache = partial
411 411 return self.branchcache
412 412
413 413 def _readbranchcache(self):
414 414 partial = {}
415 415 try:
416 416 f = self.opener("branch.cache")
417 417 lines = f.read().split('\n')
418 418 f.close()
419 419 except (IOError, OSError):
420 420 return {}, nullid, nullrev
421 421
422 422 try:
423 423 last, lrev = lines.pop(0).split(" ", 1)
424 424 last, lrev = bin(last), int(lrev)
425 425 if lrev >= len(self) or self[lrev].node() != last:
426 426 # invalidate the cache
427 427 raise ValueError('invalidating branch cache (tip differs)')
428 428 for l in lines:
429 429 if not l: continue
430 430 node, label = l.split(" ", 1)
431 431 partial[label.strip()] = bin(node)
432 432 except (KeyboardInterrupt, util.SignalInterrupt):
433 433 raise
434 434 except Exception, inst:
435 435 if self.ui.debugflag:
436 436 self.ui.warn(str(inst), '\n')
437 437 partial, last, lrev = {}, nullid, nullrev
438 438 return partial, last, lrev
439 439
440 440 def _writebranchcache(self, branches, tip, tiprev):
441 441 try:
442 442 f = self.opener("branch.cache", "w", atomictemp=True)
443 443 f.write("%s %s\n" % (hex(tip), tiprev))
444 444 for label, node in branches.iteritems():
445 445 f.write("%s %s\n" % (hex(node), label))
446 446 f.rename()
447 447 except (IOError, OSError):
448 448 pass
449 449
450 450 def _updatebranchcache(self, partial, start, end):
451 451 for r in xrange(start, end):
452 452 c = self[r]
453 453 b = c.branch()
454 454 partial[b] = c.node()
455 455
456 456 def lookup(self, key):
457 457 if key == '.':
458 458 return self.dirstate.parents()[0]
459 459 elif key == 'null':
460 460 return nullid
461 461 n = self.changelog._match(key)
462 462 if n:
463 463 return n
464 464 if key in self.tags():
465 465 return self.tags()[key]
466 466 if key in self.branchtags():
467 467 return self.branchtags()[key]
468 468 n = self.changelog._partialmatch(key)
469 469 if n:
470 470 return n
471 471 try:
472 472 if len(key) == 20:
473 473 key = hex(key)
474 474 except:
475 475 pass
476 476 raise repo.RepoError(_("unknown revision '%s'") % key)
477 477
478 478 def local(self):
479 479 return True
480 480
481 481 def join(self, f):
482 482 return os.path.join(self.path, f)
483 483
484 484 def sjoin(self, f):
485 485 f = self.encodefn(f)
486 486 return os.path.join(self.spath, f)
487 487
488 488 def wjoin(self, f):
489 489 return os.path.join(self.root, f)
490 490
491 491 def rjoin(self, f):
492 492 return os.path.join(self.root, util.pconvert(f))
493 493
494 494 def file(self, f):
495 495 if f[0] == '/':
496 496 f = f[1:]
497 497 return filelog.filelog(self.sopener, f)
498 498
499 499 def changectx(self, changeid):
500 500 return self[changeid]
501 501
502 502 def parents(self, changeid=None):
503 503 '''get list of changectxs for parents of changeid'''
504 504 return self[changeid].parents()
505 505
506 506 def filectx(self, path, changeid=None, fileid=None):
507 507 """changeid can be a changeset revision, node, or tag.
508 508 fileid can be a file revision or node."""
509 509 return context.filectx(self, path, changeid, fileid)
510 510
511 511 def getcwd(self):
512 512 return self.dirstate.getcwd()
513 513
514 514 def pathto(self, f, cwd=None):
515 515 return self.dirstate.pathto(f, cwd)
516 516
517 517 def wfile(self, f, mode='r'):
518 518 return self.wopener(f, mode)
519 519
520 520 def _link(self, f):
521 521 return os.path.islink(self.wjoin(f))
522 522
523 523 def _filter(self, filter, filename, data):
524 524 if filter not in self.filterpats:
525 525 l = []
526 526 for pat, cmd in self.ui.configitems(filter):
527 527 mf = util.matcher(self.root, "", [pat], [], [])[1]
528 528 fn = None
529 529 params = cmd
530 530 for name, filterfn in self._datafilters.iteritems():
531 531 if cmd.startswith(name):
532 532 fn = filterfn
533 533 params = cmd[len(name):].lstrip()
534 534 break
535 535 if not fn:
536 536 fn = lambda s, c, **kwargs: util.filter(s, c)
537 537 # Wrap old filters not supporting keyword arguments
538 538 if not inspect.getargspec(fn)[2]:
539 539 oldfn = fn
540 540 fn = lambda s, c, **kwargs: oldfn(s, c)
541 541 l.append((mf, fn, params))
542 542 self.filterpats[filter] = l
543 543
544 544 for mf, fn, cmd in self.filterpats[filter]:
545 545 if mf(filename):
546 546 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
547 547 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
548 548 break
549 549
550 550 return data
551 551
552 552 def adddatafilter(self, name, filter):
553 553 self._datafilters[name] = filter
554 554
555 555 def wread(self, filename):
556 556 if self._link(filename):
557 557 data = os.readlink(self.wjoin(filename))
558 558 else:
559 559 data = self.wopener(filename, 'r').read()
560 560 return self._filter("encode", filename, data)
561 561
562 562 def wwrite(self, filename, data, flags):
563 563 data = self._filter("decode", filename, data)
564 564 try:
565 565 os.unlink(self.wjoin(filename))
566 566 except OSError:
567 567 pass
568 568 self.wopener(filename, 'w').write(data)
569 569 util.set_flags(self.wjoin(filename), flags)
570 570
571 571 def wwritedata(self, filename, data):
572 572 return self._filter("decode", filename, data)
573 573
574 574 def transaction(self):
575 575 if self._transref and self._transref():
576 576 return self._transref().nest()
577 577
578 578 # abort here if the journal already exists
579 579 if os.path.exists(self.sjoin("journal")):
580 580 raise repo.RepoError(_("journal already exists - run hg recover"))
581 581
582 582 # save dirstate for rollback
583 583 try:
584 584 ds = self.opener("dirstate").read()
585 585 except IOError:
586 586 ds = ""
587 587 self.opener("journal.dirstate", "w").write(ds)
588 588 self.opener("journal.branch", "w").write(self.dirstate.branch())
589 589
590 590 renames = [(self.sjoin("journal"), self.sjoin("undo")),
591 591 (self.join("journal.dirstate"), self.join("undo.dirstate")),
592 592 (self.join("journal.branch"), self.join("undo.branch"))]
593 593 tr = transaction.transaction(self.ui.warn, self.sopener,
594 594 self.sjoin("journal"),
595 595 aftertrans(renames),
596 596 self._createmode)
597 597 self._transref = weakref.ref(tr)
598 598 return tr
599 599
600 600 def recover(self):
601 601 l = self.lock()
602 602 try:
603 603 if os.path.exists(self.sjoin("journal")):
604 604 self.ui.status(_("rolling back interrupted transaction\n"))
605 605 transaction.rollback(self.sopener, self.sjoin("journal"))
606 606 self.invalidate()
607 607 return True
608 608 else:
609 609 self.ui.warn(_("no interrupted transaction available\n"))
610 610 return False
611 611 finally:
612 612 del l
613 613
614 614 def rollback(self):
615 615 wlock = lock = None
616 616 try:
617 617 wlock = self.wlock()
618 618 lock = self.lock()
619 619 if os.path.exists(self.sjoin("undo")):
620 620 self.ui.status(_("rolling back last transaction\n"))
621 621 transaction.rollback(self.sopener, self.sjoin("undo"))
622 622 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
623 623 try:
624 624 branch = self.opener("undo.branch").read()
625 625 self.dirstate.setbranch(branch)
626 626 except IOError:
627 627 self.ui.warn(_("Named branch could not be reset, "
628 628 "current branch still is: %s\n")
629 629 % util.tolocal(self.dirstate.branch()))
630 630 self.invalidate()
631 631 self.dirstate.invalidate()
632 632 else:
633 633 self.ui.warn(_("no rollback information available\n"))
634 634 finally:
635 635 del lock, wlock
636 636
637 637 def invalidate(self):
638 638 for a in "changelog manifest".split():
639 639 if a in self.__dict__:
640 640 delattr(self, a)
641 641 self.tagscache = None
642 642 self._tagstypecache = None
643 643 self.nodetagscache = None
644 644 self.branchcache = None
645 645 self._ubranchcache = None
646 646 self._branchcachetip = None
647 647
648 648 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
649 649 try:
650 650 l = lock.lock(lockname, 0, releasefn, desc=desc)
651 651 except lock.LockHeld, inst:
652 652 if not wait:
653 653 raise
654 654 self.ui.warn(_("waiting for lock on %s held by %r\n") %
655 655 (desc, inst.locker))
656 656 # default to 600 seconds timeout
657 657 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
658 658 releasefn, desc=desc)
659 659 if acquirefn:
660 660 acquirefn()
661 661 return l
662 662
663 663 def lock(self, wait=True):
664 664 if self._lockref and self._lockref():
665 665 return self._lockref()
666 666
667 667 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
668 668 _('repository %s') % self.origroot)
669 669 self._lockref = weakref.ref(l)
670 670 return l
671 671
672 672 def wlock(self, wait=True):
673 673 if self._wlockref and self._wlockref():
674 674 return self._wlockref()
675 675
676 676 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
677 677 self.dirstate.invalidate, _('working directory of %s') %
678 678 self.origroot)
679 679 self._wlockref = weakref.ref(l)
680 680 return l
681 681
682 682 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
683 683 """
684 684 commit an individual file as part of a larger transaction
685 685 """
686 686
687 687 fn = fctx.path()
688 688 t = fctx.data()
689 689 fl = self.file(fn)
690 690 fp1 = manifest1.get(fn, nullid)
691 691 fp2 = manifest2.get(fn, nullid)
692 692
693 693 meta = {}
694 694 cp = fctx.renamed()
695 695 if cp and cp[0] != fn:
696 696 cp = cp[0]
697 697 # Mark the new revision of this file as a copy of another
698 698 # file. This copy data will effectively act as a parent
699 699 # of this new revision. If this is a merge, the first
700 700 # parent will be the nullid (meaning "look up the copy data")
701 701 # and the second one will be the other parent. For example:
702 702 #
703 703 # 0 --- 1 --- 3 rev1 changes file foo
704 704 # \ / rev2 renames foo to bar and changes it
705 705 # \- 2 -/ rev3 should have bar with all changes and
706 706 # should record that bar descends from
707 707 # bar in rev2 and foo in rev1
708 708 #
709 709 # this allows this merge to succeed:
710 710 #
711 711 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
712 712 # \ / merging rev3 and rev4 should use bar@rev2
713 713 # \- 2 --- 4 as the merge base
714 714 #
715 715 meta["copy"] = cp
716 716 if not manifest2: # not a branch merge
717 717 meta["copyrev"] = hex(manifest1[cp])
718 718 fp2 = nullid
719 719 elif fp2 != nullid: # copied on remote side
720 720 meta["copyrev"] = hex(manifest1[cp])
721 721 elif fp1 != nullid: # copied on local side, reversed
722 722 meta["copyrev"] = hex(manifest2[cp])
723 723 fp2 = fp1
724 724 elif cp in manifest2: # directory rename on local side
725 725 meta["copyrev"] = hex(manifest2[cp])
726 726 else: # directory rename on remote side
727 727 meta["copyrev"] = hex(manifest1[cp])
728 728 self.ui.debug(_(" %s: copy %s:%s\n") %
729 729 (fn, cp, meta["copyrev"]))
730 730 fp1 = nullid
731 731 elif fp2 != nullid:
732 732 # is one parent an ancestor of the other?
733 733 fpa = fl.ancestor(fp1, fp2)
734 734 if fpa == fp1:
735 735 fp1, fp2 = fp2, nullid
736 736 elif fpa == fp2:
737 737 fp2 = nullid
738 738
739 739 # is the file unmodified from the parent? report existing entry
740 740 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
741 741 return fp1
742 742
743 743 changelist.append(fn)
744 744 return fl.add(t, meta, tr, linkrev, fp1, fp2)
745 745
746 746 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
747 747 if p1 is None:
748 748 p1, p2 = self.dirstate.parents()
749 749 return self.commit(files=files, text=text, user=user, date=date,
750 750 p1=p1, p2=p2, extra=extra, empty_ok=True)
751 751
752 752 def commit(self, files=None, text="", user=None, date=None,
753 753 match=None, force=False, force_editor=False,
754 754 p1=None, p2=None, extra={}, empty_ok=False):
755 755 wlock = lock = None
756 756 if files:
757 757 files = util.unique(files)
758 758 try:
759 759 wlock = self.wlock()
760 760 lock = self.lock()
761 761 use_dirstate = (p1 is None) # not rawcommit
762 762
763 763 if use_dirstate:
764 764 p1, p2 = self.dirstate.parents()
765 765 update_dirstate = True
766 766
767 767 if (not force and p2 != nullid and
768 768 (match and (match.files() or match.anypats()))):
769 769 raise util.Abort(_('cannot partially commit a merge '
770 770 '(do not specify files or patterns)'))
771 771
772 772 if files:
773 773 modified, removed = [], []
774 774 for f in files:
775 775 s = self.dirstate[f]
776 776 if s in 'nma':
777 777 modified.append(f)
778 778 elif s == 'r':
779 779 removed.append(f)
780 780 else:
781 781 self.ui.warn(_("%s not tracked!\n") % f)
782 782 changes = [modified, [], removed, [], []]
783 783 else:
784 784 changes = self.status(match=match)
785 785 else:
786 786 p1, p2 = p1, p2 or nullid
787 787 update_dirstate = (self.dirstate.parents()[0] == p1)
788 788 changes = [files, [], [], [], []]
789 789
790 790 wctx = context.workingctx(self, (p1, p2), text, user, date,
791 791 extra, changes)
792 792 return self._commitctx(wctx, force, force_editor, empty_ok,
793 793 use_dirstate, update_dirstate)
794 794 finally:
795 795 del lock, wlock
796 796
797 797 def commitctx(self, ctx):
798 798 wlock = lock = None
799 799 try:
800 800 wlock = self.wlock()
801 801 lock = self.lock()
802 802 return self._commitctx(ctx, force=True, force_editor=False,
803 803 empty_ok=True, use_dirstate=False,
804 804 update_dirstate=False)
805 805 finally:
806 806 del lock, wlock
807 807
808 808 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
809 809 use_dirstate=True, update_dirstate=True):
810 810 tr = None
811 811 valid = 0 # don't save the dirstate if this isn't set
812 812 try:
813 813 commit = util.sort(wctx.modified() + wctx.added())
814 814 remove = wctx.removed()
815 815 extra = wctx.extra().copy()
816 816 branchname = extra['branch']
817 817 user = wctx.user()
818 818 text = wctx.description()
819 819
820 820 p1, p2 = [p.node() for p in wctx.parents()]
821 821 c1 = self.changelog.read(p1)
822 822 c2 = self.changelog.read(p2)
823 823 m1 = self.manifest.read(c1[0]).copy()
824 824 m2 = self.manifest.read(c2[0])
825 825
826 826 if use_dirstate:
827 827 oldname = c1[5].get("branch") # stored in UTF-8
828 828 if (not commit and not remove and not force and p2 == nullid
829 829 and branchname == oldname):
830 830 self.ui.status(_("nothing changed\n"))
831 831 return None
832 832
833 833 xp1 = hex(p1)
834 834 if p2 == nullid: xp2 = ''
835 835 else: xp2 = hex(p2)
836 836
837 837 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
838 838
839 839 tr = self.transaction()
840 840 trp = weakref.proxy(tr)
841 841
842 842 # check in files
843 843 new = {}
844 844 changed = []
845 845 linkrev = len(self)
846 846 for f in commit:
847 847 self.ui.note(f + "\n")
848 848 try:
849 849 fctx = wctx.filectx(f)
850 850 newflags = fctx.flags()
851 851 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
852 852 if ((not changed or changed[-1] != f) and
853 853 m2.get(f) != new[f]):
854 854 # mention the file in the changelog if some
855 855 # flag changed, even if there was no content
856 856 # change.
857 857 if m1.flags(f) != newflags:
858 858 changed.append(f)
859 859 m1.set(f, newflags)
860 860 if use_dirstate:
861 861 self.dirstate.normal(f)
862 862
863 863 except (OSError, IOError):
864 864 if use_dirstate:
865 865 self.ui.warn(_("trouble committing %s!\n") % f)
866 866 raise
867 867 else:
868 868 remove.append(f)
869 869
870 870 # update manifest
871 871 m1.update(new)
872 872 removed = []
873 873
874 874 for f in util.sort(remove):
875 875 if f in m1:
876 876 del m1[f]
877 877 removed.append(f)
878 878 elif f in m2:
879 879 removed.append(f)
880 880 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
881 881 (new, removed))
882 882
883 883 # add changeset
884 884 if (not empty_ok and not text) or force_editor:
885 885 edittext = []
886 886 if text:
887 887 edittext.append(text)
888 888 edittext.append("")
889 889 edittext.append(_("HG: Enter commit message."
890 890 " Lines beginning with 'HG:' are removed."))
891 891 edittext.append("HG: --")
892 892 edittext.append("HG: user: %s" % user)
893 893 if p2 != nullid:
894 894 edittext.append("HG: branch merge")
895 895 if branchname:
896 896 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
897 897 edittext.extend(["HG: changed %s" % f for f in changed])
898 898 edittext.extend(["HG: removed %s" % f for f in removed])
899 899 if not changed and not remove:
900 900 edittext.append("HG: no files changed")
901 901 edittext.append("")
902 902 # run editor in the repository root
903 903 olddir = os.getcwd()
904 904 os.chdir(self.root)
905 905 text = self.ui.edit("\n".join(edittext), user)
906 906 os.chdir(olddir)
907 907
908 908 lines = [line.rstrip() for line in text.rstrip().splitlines()]
909 909 while lines and not lines[0]:
910 910 del lines[0]
911 911 if not lines and use_dirstate:
912 912 raise util.Abort(_("empty commit message"))
913 913 text = '\n'.join(lines)
914 914
915 915 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
916 916 user, wctx.date(), extra)
917 917 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
918 918 parent2=xp2)
919 919 tr.close()
920 920
921 921 if self.branchcache:
922 922 self.branchtags()
923 923
924 924 if use_dirstate or update_dirstate:
925 925 self.dirstate.setparents(n)
926 926 if use_dirstate:
927 927 for f in removed:
928 928 self.dirstate.forget(f)
929 929 valid = 1 # our dirstate updates are complete
930 930
931 931 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
932 932 return n
933 933 finally:
934 934 if not valid: # don't save our updated dirstate
935 935 self.dirstate.invalidate()
936 936 del tr
937 937
938 938 def walk(self, match, node=None):
939 939 '''
940 940 walk recursively through the directory tree or a given
941 941 changeset, finding all files matched by the match
942 942 function
943 943 '''
944 944 return self[node].walk(match)
945 945
946 946 def status(self, node1='.', node2=None, match=None,
947 947 ignored=False, clean=False, unknown=False):
948 948 """return status of files between two nodes or node and working directory
949 949
950 950 If node1 is None, use the first dirstate parent instead.
951 951 If node2 is None, compare node1 with working directory.
952 952 """
953 953
954 954 def mfmatches(ctx):
955 955 mf = ctx.manifest().copy()
956 956 for fn in mf.keys():
957 957 if not match(fn):
958 958 del mf[fn]
959 959 return mf
960 960
961 961 ctx1 = self[node1]
962 962 ctx2 = self[node2]
963 963 working = ctx2 == self[None]
964 964 parentworking = working and ctx1 == self['.']
965 965 match = match or match_.always(self.root, self.getcwd())
966 966 listignored, listclean, listunknown = ignored, clean, unknown
967 967
968 968 if working: # we need to scan the working dir
969 969 s = self.dirstate.status(match, listignored, listclean, listunknown)
970 970 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
971 removed.sort()
972 deleted.sort()
973 971
974 972 # check for any possibly clean files
975 973 if parentworking and cmp:
976 974 fixup = []
977 975 # do a full compare of any files that might have changed
978 976 for f in cmp:
979 977 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
980 978 or ctx1[f].cmp(ctx2[f].data())):
981 979 modified.append(f)
982 980 else:
983 981 fixup.append(f)
984 982
985 modified.sort()
986 983 if listclean:
987 clean = util.sort(clean + fixup)
984 clean += fixup
988 985
989 986 # update dirstate for files that are actually clean
990 987 if fixup:
991 988 wlock = None
992 989 try:
993 990 try:
994 991 wlock = self.wlock(False)
995 992 for f in fixup:
996 993 self.dirstate.normal(f)
997 994 except lock.LockException:
998 995 pass
999 996 finally:
1000 997 del wlock
1001 998
1002 999 if not parentworking:
1003 1000 mf1 = mfmatches(ctx1)
1004 1001 if working:
1005 1002 # we are comparing working dir against non-parent
1006 1003 # generate a pseudo-manifest for the working dir
1007 1004 mf2 = mfmatches(self['.'])
1008 1005 for f in cmp + modified + added:
1009 1006 mf2[f] = None
1010 1007 mf2.set(f, ctx2.flags(f))
1011 1008 for f in removed:
1012 1009 if f in mf2:
1013 1010 del mf2[f]
1014 1011 else:
1015 1012 # we are comparing two revisions
1016 1013 deleted, unknown, ignored = [], [], []
1017 1014 mf2 = mfmatches(ctx2)
1018 1015
1019 1016 modified, added, clean = [], [], []
1020 for fn in util.sort(mf2):
1017 for fn in mf2:
1021 1018 if fn in mf1:
1022 1019 if (mf1.flags(fn) != mf2.flags(fn) or
1023 1020 (mf1[fn] != mf2[fn] and
1024 1021 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1025 1022 modified.append(fn)
1026 1023 elif listclean:
1027 1024 clean.append(fn)
1028 1025 del mf1[fn]
1029 1026 else:
1030 1027 added.append(fn)
1031 removed = util.sort(mf1.keys())
1028 removed = mf1.keys()
1032 1029
1033 return modified, added, removed, deleted, unknown, ignored, clean
1030 r = modified, added, removed, deleted, unknown, ignored, clean
1031 [l.sort() for l in r]
1032 return r
1034 1033
1035 1034 def add(self, list):
1036 1035 wlock = self.wlock()
1037 1036 try:
1038 1037 rejected = []
1039 1038 for f in list:
1040 1039 p = self.wjoin(f)
1041 1040 try:
1042 1041 st = os.lstat(p)
1043 1042 except:
1044 1043 self.ui.warn(_("%s does not exist!\n") % f)
1045 1044 rejected.append(f)
1046 1045 continue
1047 1046 if st.st_size > 10000000:
1048 1047 self.ui.warn(_("%s: files over 10MB may cause memory and"
1049 1048 " performance problems\n"
1050 1049 "(use 'hg revert %s' to unadd the file)\n")
1051 1050 % (f, f))
1052 1051 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1053 1052 self.ui.warn(_("%s not added: only files and symlinks "
1054 1053 "supported currently\n") % f)
1055 1054 rejected.append(p)
1056 1055 elif self.dirstate[f] in 'amn':
1057 1056 self.ui.warn(_("%s already tracked!\n") % f)
1058 1057 elif self.dirstate[f] == 'r':
1059 1058 self.dirstate.normallookup(f)
1060 1059 else:
1061 1060 self.dirstate.add(f)
1062 1061 return rejected
1063 1062 finally:
1064 1063 del wlock
1065 1064
1066 1065 def forget(self, list):
1067 1066 wlock = self.wlock()
1068 1067 try:
1069 1068 for f in list:
1070 1069 if self.dirstate[f] != 'a':
1071 1070 self.ui.warn(_("%s not added!\n") % f)
1072 1071 else:
1073 1072 self.dirstate.forget(f)
1074 1073 finally:
1075 1074 del wlock
1076 1075
1077 1076 def remove(self, list, unlink=False):
1078 1077 wlock = None
1079 1078 try:
1080 1079 if unlink:
1081 1080 for f in list:
1082 1081 try:
1083 1082 util.unlink(self.wjoin(f))
1084 1083 except OSError, inst:
1085 1084 if inst.errno != errno.ENOENT:
1086 1085 raise
1087 1086 wlock = self.wlock()
1088 1087 for f in list:
1089 1088 if unlink and os.path.exists(self.wjoin(f)):
1090 1089 self.ui.warn(_("%s still exists!\n") % f)
1091 1090 elif self.dirstate[f] == 'a':
1092 1091 self.dirstate.forget(f)
1093 1092 elif f not in self.dirstate:
1094 1093 self.ui.warn(_("%s not tracked!\n") % f)
1095 1094 else:
1096 1095 self.dirstate.remove(f)
1097 1096 finally:
1098 1097 del wlock
1099 1098
1100 1099 def undelete(self, list):
1101 1100 wlock = None
1102 1101 try:
1103 1102 manifests = [self.manifest.read(self.changelog.read(p)[0])
1104 1103 for p in self.dirstate.parents() if p != nullid]
1105 1104 wlock = self.wlock()
1106 1105 for f in list:
1107 1106 if self.dirstate[f] != 'r':
1108 1107 self.ui.warn("%s not removed!\n" % f)
1109 1108 else:
1110 1109 m = f in manifests[0] and manifests[0] or manifests[1]
1111 1110 t = self.file(f).read(m[f])
1112 1111 self.wwrite(f, t, m.flags(f))
1113 1112 self.dirstate.normal(f)
1114 1113 finally:
1115 1114 del wlock
1116 1115
1117 1116 def copy(self, source, dest):
1118 1117 wlock = None
1119 1118 try:
1120 1119 p = self.wjoin(dest)
1121 1120 if not (os.path.exists(p) or os.path.islink(p)):
1122 1121 self.ui.warn(_("%s does not exist!\n") % dest)
1123 1122 elif not (os.path.isfile(p) or os.path.islink(p)):
1124 1123 self.ui.warn(_("copy failed: %s is not a file or a "
1125 1124 "symbolic link\n") % dest)
1126 1125 else:
1127 1126 wlock = self.wlock()
1128 1127 if dest not in self.dirstate:
1129 1128 self.dirstate.add(dest)
1130 1129 self.dirstate.copy(source, dest)
1131 1130 finally:
1132 1131 del wlock
1133 1132
1134 1133 def heads(self, start=None):
1135 1134 heads = self.changelog.heads(start)
1136 1135 # sort the output in rev descending order
1137 1136 heads = [(-self.changelog.rev(h), h) for h in heads]
1138 1137 return [n for (r, n) in util.sort(heads)]
1139 1138
1140 1139 def branchheads(self, branch=None, start=None):
1141 1140 if branch is None:
1142 1141 branch = self[None].branch()
1143 1142 branches = self.branchtags()
1144 1143 if branch not in branches:
1145 1144 return []
1146 1145 # The basic algorithm is this:
1147 1146 #
1148 1147 # Start from the branch tip since there are no later revisions that can
1149 1148 # possibly be in this branch, and the tip is a guaranteed head.
1150 1149 #
1151 1150 # Remember the tip's parents as the first ancestors, since these by
1152 1151 # definition are not heads.
1153 1152 #
1154 1153 # Step backwards from the brach tip through all the revisions. We are
1155 1154 # guaranteed by the rules of Mercurial that we will now be visiting the
1156 1155 # nodes in reverse topological order (children before parents).
1157 1156 #
1158 1157 # If a revision is one of the ancestors of a head then we can toss it
1159 1158 # out of the ancestors set (we've already found it and won't be
1160 1159 # visiting it again) and put its parents in the ancestors set.
1161 1160 #
1162 1161 # Otherwise, if a revision is in the branch it's another head, since it
1163 1162 # wasn't in the ancestor list of an existing head. So add it to the
1164 1163 # head list, and add its parents to the ancestor list.
1165 1164 #
1166 1165 # If it is not in the branch ignore it.
1167 1166 #
1168 1167 # Once we have a list of heads, use nodesbetween to filter out all the
1169 1168 # heads that cannot be reached from startrev. There may be a more
1170 1169 # efficient way to do this as part of the previous algorithm.
1171 1170
1172 1171 set = util.set
1173 1172 heads = [self.changelog.rev(branches[branch])]
1174 1173 # Don't care if ancestors contains nullrev or not.
1175 1174 ancestors = set(self.changelog.parentrevs(heads[0]))
1176 1175 for rev in xrange(heads[0] - 1, nullrev, -1):
1177 1176 if rev in ancestors:
1178 1177 ancestors.update(self.changelog.parentrevs(rev))
1179 1178 ancestors.remove(rev)
1180 1179 elif self[rev].branch() == branch:
1181 1180 heads.append(rev)
1182 1181 ancestors.update(self.changelog.parentrevs(rev))
1183 1182 heads = [self.changelog.node(rev) for rev in heads]
1184 1183 if start is not None:
1185 1184 heads = self.changelog.nodesbetween([start], heads)[2]
1186 1185 return heads
1187 1186
1188 1187 def branches(self, nodes):
1189 1188 if not nodes:
1190 1189 nodes = [self.changelog.tip()]
1191 1190 b = []
1192 1191 for n in nodes:
1193 1192 t = n
1194 1193 while 1:
1195 1194 p = self.changelog.parents(n)
1196 1195 if p[1] != nullid or p[0] == nullid:
1197 1196 b.append((t, n, p[0], p[1]))
1198 1197 break
1199 1198 n = p[0]
1200 1199 return b
1201 1200
1202 1201 def between(self, pairs):
1203 1202 r = []
1204 1203
1205 1204 for top, bottom in pairs:
1206 1205 n, l, i = top, [], 0
1207 1206 f = 1
1208 1207
1209 1208 while n != bottom:
1210 1209 p = self.changelog.parents(n)[0]
1211 1210 if i == f:
1212 1211 l.append(n)
1213 1212 f = f * 2
1214 1213 n = p
1215 1214 i += 1
1216 1215
1217 1216 r.append(l)
1218 1217
1219 1218 return r
1220 1219
1221 1220 def findincoming(self, remote, base=None, heads=None, force=False):
1222 1221 """Return list of roots of the subsets of missing nodes from remote
1223 1222
1224 1223 If base dict is specified, assume that these nodes and their parents
1225 1224 exist on the remote side and that no child of a node of base exists
1226 1225 in both remote and self.
1227 1226 Furthermore base will be updated to include the nodes that exists
1228 1227 in self and remote but no children exists in self and remote.
1229 1228 If a list of heads is specified, return only nodes which are heads
1230 1229 or ancestors of these heads.
1231 1230
1232 1231 All the ancestors of base are in self and in remote.
1233 1232 All the descendants of the list returned are missing in self.
1234 1233 (and so we know that the rest of the nodes are missing in remote, see
1235 1234 outgoing)
1236 1235 """
1237 1236 m = self.changelog.nodemap
1238 1237 search = []
1239 1238 fetch = {}
1240 1239 seen = {}
1241 1240 seenbranch = {}
1242 1241 if base == None:
1243 1242 base = {}
1244 1243
1245 1244 if not heads:
1246 1245 heads = remote.heads()
1247 1246
1248 1247 if self.changelog.tip() == nullid:
1249 1248 base[nullid] = 1
1250 1249 if heads != [nullid]:
1251 1250 return [nullid]
1252 1251 return []
1253 1252
1254 1253 # assume we're closer to the tip than the root
1255 1254 # and start by examining the heads
1256 1255 self.ui.status(_("searching for changes\n"))
1257 1256
1258 1257 unknown = []
1259 1258 for h in heads:
1260 1259 if h not in m:
1261 1260 unknown.append(h)
1262 1261 else:
1263 1262 base[h] = 1
1264 1263
1265 1264 if not unknown:
1266 1265 return []
1267 1266
1268 1267 req = dict.fromkeys(unknown)
1269 1268 reqcnt = 0
1270 1269
1271 1270 # search through remote branches
1272 1271 # a 'branch' here is a linear segment of history, with four parts:
1273 1272 # head, root, first parent, second parent
1274 1273 # (a branch always has two parents (or none) by definition)
1275 1274 unknown = remote.branches(unknown)
1276 1275 while unknown:
1277 1276 r = []
1278 1277 while unknown:
1279 1278 n = unknown.pop(0)
1280 1279 if n[0] in seen:
1281 1280 continue
1282 1281
1283 1282 self.ui.debug(_("examining %s:%s\n")
1284 1283 % (short(n[0]), short(n[1])))
1285 1284 if n[0] == nullid: # found the end of the branch
1286 1285 pass
1287 1286 elif n in seenbranch:
1288 1287 self.ui.debug(_("branch already found\n"))
1289 1288 continue
1290 1289 elif n[1] and n[1] in m: # do we know the base?
1291 1290 self.ui.debug(_("found incomplete branch %s:%s\n")
1292 1291 % (short(n[0]), short(n[1])))
1293 1292 search.append(n) # schedule branch range for scanning
1294 1293 seenbranch[n] = 1
1295 1294 else:
1296 1295 if n[1] not in seen and n[1] not in fetch:
1297 1296 if n[2] in m and n[3] in m:
1298 1297 self.ui.debug(_("found new changeset %s\n") %
1299 1298 short(n[1]))
1300 1299 fetch[n[1]] = 1 # earliest unknown
1301 1300 for p in n[2:4]:
1302 1301 if p in m:
1303 1302 base[p] = 1 # latest known
1304 1303
1305 1304 for p in n[2:4]:
1306 1305 if p not in req and p not in m:
1307 1306 r.append(p)
1308 1307 req[p] = 1
1309 1308 seen[n[0]] = 1
1310 1309
1311 1310 if r:
1312 1311 reqcnt += 1
1313 1312 self.ui.debug(_("request %d: %s\n") %
1314 1313 (reqcnt, " ".join(map(short, r))))
1315 1314 for p in xrange(0, len(r), 10):
1316 1315 for b in remote.branches(r[p:p+10]):
1317 1316 self.ui.debug(_("received %s:%s\n") %
1318 1317 (short(b[0]), short(b[1])))
1319 1318 unknown.append(b)
1320 1319
1321 1320 # do binary search on the branches we found
1322 1321 while search:
1323 1322 n = search.pop(0)
1324 1323 reqcnt += 1
1325 1324 l = remote.between([(n[0], n[1])])[0]
1326 1325 l.append(n[1])
1327 1326 p = n[0]
1328 1327 f = 1
1329 1328 for i in l:
1330 1329 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1331 1330 if i in m:
1332 1331 if f <= 2:
1333 1332 self.ui.debug(_("found new branch changeset %s\n") %
1334 1333 short(p))
1335 1334 fetch[p] = 1
1336 1335 base[i] = 1
1337 1336 else:
1338 1337 self.ui.debug(_("narrowed branch search to %s:%s\n")
1339 1338 % (short(p), short(i)))
1340 1339 search.append((p, i))
1341 1340 break
1342 1341 p, f = i, f * 2
1343 1342
1344 1343 # sanity check our fetch list
1345 1344 for f in fetch.keys():
1346 1345 if f in m:
1347 1346 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1348 1347
1349 1348 if base.keys() == [nullid]:
1350 1349 if force:
1351 1350 self.ui.warn(_("warning: repository is unrelated\n"))
1352 1351 else:
1353 1352 raise util.Abort(_("repository is unrelated"))
1354 1353
1355 1354 self.ui.debug(_("found new changesets starting at ") +
1356 1355 " ".join([short(f) for f in fetch]) + "\n")
1357 1356
1358 1357 self.ui.debug(_("%d total queries\n") % reqcnt)
1359 1358
1360 1359 return fetch.keys()
1361 1360
1362 1361 def findoutgoing(self, remote, base=None, heads=None, force=False):
1363 1362 """Return list of nodes that are roots of subsets not in remote
1364 1363
1365 1364 If base dict is specified, assume that these nodes and their parents
1366 1365 exist on the remote side.
1367 1366 If a list of heads is specified, return only nodes which are heads
1368 1367 or ancestors of these heads, and return a second element which
1369 1368 contains all remote heads which get new children.
1370 1369 """
1371 1370 if base == None:
1372 1371 base = {}
1373 1372 self.findincoming(remote, base, heads, force=force)
1374 1373
1375 1374 self.ui.debug(_("common changesets up to ")
1376 1375 + " ".join(map(short, base.keys())) + "\n")
1377 1376
1378 1377 remain = dict.fromkeys(self.changelog.nodemap)
1379 1378
1380 1379 # prune everything remote has from the tree
1381 1380 del remain[nullid]
1382 1381 remove = base.keys()
1383 1382 while remove:
1384 1383 n = remove.pop(0)
1385 1384 if n in remain:
1386 1385 del remain[n]
1387 1386 for p in self.changelog.parents(n):
1388 1387 remove.append(p)
1389 1388
1390 1389 # find every node whose parents have been pruned
1391 1390 subset = []
1392 1391 # find every remote head that will get new children
1393 1392 updated_heads = {}
1394 1393 for n in remain:
1395 1394 p1, p2 = self.changelog.parents(n)
1396 1395 if p1 not in remain and p2 not in remain:
1397 1396 subset.append(n)
1398 1397 if heads:
1399 1398 if p1 in heads:
1400 1399 updated_heads[p1] = True
1401 1400 if p2 in heads:
1402 1401 updated_heads[p2] = True
1403 1402
1404 1403 # this is the set of all roots we have to push
1405 1404 if heads:
1406 1405 return subset, updated_heads.keys()
1407 1406 else:
1408 1407 return subset
1409 1408
1410 1409 def pull(self, remote, heads=None, force=False):
1411 1410 lock = self.lock()
1412 1411 try:
1413 1412 fetch = self.findincoming(remote, heads=heads, force=force)
1414 1413 if fetch == [nullid]:
1415 1414 self.ui.status(_("requesting all changes\n"))
1416 1415
1417 1416 if not fetch:
1418 1417 self.ui.status(_("no changes found\n"))
1419 1418 return 0
1420 1419
1421 1420 if heads is None:
1422 1421 cg = remote.changegroup(fetch, 'pull')
1423 1422 else:
1424 1423 if 'changegroupsubset' not in remote.capabilities:
1425 1424 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1426 1425 cg = remote.changegroupsubset(fetch, heads, 'pull')
1427 1426 return self.addchangegroup(cg, 'pull', remote.url())
1428 1427 finally:
1429 1428 del lock
1430 1429
1431 1430 def push(self, remote, force=False, revs=None):
1432 1431 # there are two ways to push to remote repo:
1433 1432 #
1434 1433 # addchangegroup assumes local user can lock remote
1435 1434 # repo (local filesystem, old ssh servers).
1436 1435 #
1437 1436 # unbundle assumes local user cannot lock remote repo (new ssh
1438 1437 # servers, http servers).
1439 1438
1440 1439 if remote.capable('unbundle'):
1441 1440 return self.push_unbundle(remote, force, revs)
1442 1441 return self.push_addchangegroup(remote, force, revs)
1443 1442
1444 1443 def prepush(self, remote, force, revs):
1445 1444 base = {}
1446 1445 remote_heads = remote.heads()
1447 1446 inc = self.findincoming(remote, base, remote_heads, force=force)
1448 1447
1449 1448 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1450 1449 if revs is not None:
1451 1450 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1452 1451 else:
1453 1452 bases, heads = update, self.changelog.heads()
1454 1453
1455 1454 if not bases:
1456 1455 self.ui.status(_("no changes found\n"))
1457 1456 return None, 1
1458 1457 elif not force:
1459 1458 # check if we're creating new remote heads
1460 1459 # to be a remote head after push, node must be either
1461 1460 # - unknown locally
1462 1461 # - a local outgoing head descended from update
1463 1462 # - a remote head that's known locally and not
1464 1463 # ancestral to an outgoing head
1465 1464
1466 1465 warn = 0
1467 1466
1468 1467 if remote_heads == [nullid]:
1469 1468 warn = 0
1470 1469 elif not revs and len(heads) > len(remote_heads):
1471 1470 warn = 1
1472 1471 else:
1473 1472 newheads = list(heads)
1474 1473 for r in remote_heads:
1475 1474 if r in self.changelog.nodemap:
1476 1475 desc = self.changelog.heads(r, heads)
1477 1476 l = [h for h in heads if h in desc]
1478 1477 if not l:
1479 1478 newheads.append(r)
1480 1479 else:
1481 1480 newheads.append(r)
1482 1481 if len(newheads) > len(remote_heads):
1483 1482 warn = 1
1484 1483
1485 1484 if warn:
1486 1485 self.ui.warn(_("abort: push creates new remote heads!\n"))
1487 1486 self.ui.status(_("(did you forget to merge?"
1488 1487 " use push -f to force)\n"))
1489 1488 return None, 0
1490 1489 elif inc:
1491 1490 self.ui.warn(_("note: unsynced remote changes!\n"))
1492 1491
1493 1492
1494 1493 if revs is None:
1495 1494 cg = self.changegroup(update, 'push')
1496 1495 else:
1497 1496 cg = self.changegroupsubset(update, revs, 'push')
1498 1497 return cg, remote_heads
1499 1498
1500 1499 def push_addchangegroup(self, remote, force, revs):
1501 1500 lock = remote.lock()
1502 1501 try:
1503 1502 ret = self.prepush(remote, force, revs)
1504 1503 if ret[0] is not None:
1505 1504 cg, remote_heads = ret
1506 1505 return remote.addchangegroup(cg, 'push', self.url())
1507 1506 return ret[1]
1508 1507 finally:
1509 1508 del lock
1510 1509
1511 1510 def push_unbundle(self, remote, force, revs):
1512 1511 # local repo finds heads on server, finds out what revs it
1513 1512 # must push. once revs transferred, if server finds it has
1514 1513 # different heads (someone else won commit/push race), server
1515 1514 # aborts.
1516 1515
1517 1516 ret = self.prepush(remote, force, revs)
1518 1517 if ret[0] is not None:
1519 1518 cg, remote_heads = ret
1520 1519 if force: remote_heads = ['force']
1521 1520 return remote.unbundle(cg, remote_heads, 'push')
1522 1521 return ret[1]
1523 1522
1524 1523 def changegroupinfo(self, nodes, source):
1525 1524 if self.ui.verbose or source == 'bundle':
1526 1525 self.ui.status(_("%d changesets found\n") % len(nodes))
1527 1526 if self.ui.debugflag:
1528 1527 self.ui.debug(_("List of changesets:\n"))
1529 1528 for node in nodes:
1530 1529 self.ui.debug("%s\n" % hex(node))
1531 1530
1532 1531 def changegroupsubset(self, bases, heads, source, extranodes=None):
1533 1532 """This function generates a changegroup consisting of all the nodes
1534 1533 that are descendents of any of the bases, and ancestors of any of
1535 1534 the heads.
1536 1535
1537 1536 It is fairly complex as determining which filenodes and which
1538 1537 manifest nodes need to be included for the changeset to be complete
1539 1538 is non-trivial.
1540 1539
1541 1540 Another wrinkle is doing the reverse, figuring out which changeset in
1542 1541 the changegroup a particular filenode or manifestnode belongs to.
1543 1542
1544 1543 The caller can specify some nodes that must be included in the
1545 1544 changegroup using the extranodes argument. It should be a dict
1546 1545 where the keys are the filenames (or 1 for the manifest), and the
1547 1546 values are lists of (node, linknode) tuples, where node is a wanted
1548 1547 node and linknode is the changelog node that should be transmitted as
1549 1548 the linkrev.
1550 1549 """
1551 1550
1552 1551 self.hook('preoutgoing', throw=True, source=source)
1553 1552
1554 1553 # Set up some initial variables
1555 1554 # Make it easy to refer to self.changelog
1556 1555 cl = self.changelog
1557 1556 # msng is short for missing - compute the list of changesets in this
1558 1557 # changegroup.
1559 1558 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1560 1559 self.changegroupinfo(msng_cl_lst, source)
1561 1560 # Some bases may turn out to be superfluous, and some heads may be
1562 1561 # too. nodesbetween will return the minimal set of bases and heads
1563 1562 # necessary to re-create the changegroup.
1564 1563
1565 1564 # Known heads are the list of heads that it is assumed the recipient
1566 1565 # of this changegroup will know about.
1567 1566 knownheads = {}
1568 1567 # We assume that all parents of bases are known heads.
1569 1568 for n in bases:
1570 1569 for p in cl.parents(n):
1571 1570 if p != nullid:
1572 1571 knownheads[p] = 1
1573 1572 knownheads = knownheads.keys()
1574 1573 if knownheads:
1575 1574 # Now that we know what heads are known, we can compute which
1576 1575 # changesets are known. The recipient must know about all
1577 1576 # changesets required to reach the known heads from the null
1578 1577 # changeset.
1579 1578 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1580 1579 junk = None
1581 1580 # Transform the list into an ersatz set.
1582 1581 has_cl_set = dict.fromkeys(has_cl_set)
1583 1582 else:
1584 1583 # If there were no known heads, the recipient cannot be assumed to
1585 1584 # know about any changesets.
1586 1585 has_cl_set = {}
1587 1586
1588 1587 # Make it easy to refer to self.manifest
1589 1588 mnfst = self.manifest
1590 1589 # We don't know which manifests are missing yet
1591 1590 msng_mnfst_set = {}
1592 1591 # Nor do we know which filenodes are missing.
1593 1592 msng_filenode_set = {}
1594 1593
1595 1594 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1596 1595 junk = None
1597 1596
1598 1597 # A changeset always belongs to itself, so the changenode lookup
1599 1598 # function for a changenode is identity.
1600 1599 def identity(x):
1601 1600 return x
1602 1601
1603 1602 # A function generating function. Sets up an environment for the
1604 1603 # inner function.
1605 1604 def cmp_by_rev_func(revlog):
1606 1605 # Compare two nodes by their revision number in the environment's
1607 1606 # revision history. Since the revision number both represents the
1608 1607 # most efficient order to read the nodes in, and represents a
1609 1608 # topological sorting of the nodes, this function is often useful.
1610 1609 def cmp_by_rev(a, b):
1611 1610 return cmp(revlog.rev(a), revlog.rev(b))
1612 1611 return cmp_by_rev
1613 1612
1614 1613 # If we determine that a particular file or manifest node must be a
1615 1614 # node that the recipient of the changegroup will already have, we can
1616 1615 # also assume the recipient will have all the parents. This function
1617 1616 # prunes them from the set of missing nodes.
1618 1617 def prune_parents(revlog, hasset, msngset):
1619 1618 haslst = hasset.keys()
1620 1619 haslst.sort(cmp_by_rev_func(revlog))
1621 1620 for node in haslst:
1622 1621 parentlst = [p for p in revlog.parents(node) if p != nullid]
1623 1622 while parentlst:
1624 1623 n = parentlst.pop()
1625 1624 if n not in hasset:
1626 1625 hasset[n] = 1
1627 1626 p = [p for p in revlog.parents(n) if p != nullid]
1628 1627 parentlst.extend(p)
1629 1628 for n in hasset:
1630 1629 msngset.pop(n, None)
1631 1630
1632 1631 # This is a function generating function used to set up an environment
1633 1632 # for the inner function to execute in.
1634 1633 def manifest_and_file_collector(changedfileset):
1635 1634 # This is an information gathering function that gathers
1636 1635 # information from each changeset node that goes out as part of
1637 1636 # the changegroup. The information gathered is a list of which
1638 1637 # manifest nodes are potentially required (the recipient may
1639 1638 # already have them) and total list of all files which were
1640 1639 # changed in any changeset in the changegroup.
1641 1640 #
1642 1641 # We also remember the first changenode we saw any manifest
1643 1642 # referenced by so we can later determine which changenode 'owns'
1644 1643 # the manifest.
1645 1644 def collect_manifests_and_files(clnode):
1646 1645 c = cl.read(clnode)
1647 1646 for f in c[3]:
1648 1647 # This is to make sure we only have one instance of each
1649 1648 # filename string for each filename.
1650 1649 changedfileset.setdefault(f, f)
1651 1650 msng_mnfst_set.setdefault(c[0], clnode)
1652 1651 return collect_manifests_and_files
1653 1652
1654 1653 # Figure out which manifest nodes (of the ones we think might be part
1655 1654 # of the changegroup) the recipient must know about and remove them
1656 1655 # from the changegroup.
1657 1656 def prune_manifests():
1658 1657 has_mnfst_set = {}
1659 1658 for n in msng_mnfst_set:
1660 1659 # If a 'missing' manifest thinks it belongs to a changenode
1661 1660 # the recipient is assumed to have, obviously the recipient
1662 1661 # must have that manifest.
1663 1662 linknode = cl.node(mnfst.linkrev(n))
1664 1663 if linknode in has_cl_set:
1665 1664 has_mnfst_set[n] = 1
1666 1665 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1667 1666
1668 1667 # Use the information collected in collect_manifests_and_files to say
1669 1668 # which changenode any manifestnode belongs to.
1670 1669 def lookup_manifest_link(mnfstnode):
1671 1670 return msng_mnfst_set[mnfstnode]
1672 1671
1673 1672 # A function generating function that sets up the initial environment
1674 1673 # the inner function.
1675 1674 def filenode_collector(changedfiles):
1676 1675 next_rev = [0]
1677 1676 # This gathers information from each manifestnode included in the
1678 1677 # changegroup about which filenodes the manifest node references
1679 1678 # so we can include those in the changegroup too.
1680 1679 #
1681 1680 # It also remembers which changenode each filenode belongs to. It
1682 1681 # does this by assuming the a filenode belongs to the changenode
1683 1682 # the first manifest that references it belongs to.
1684 1683 def collect_msng_filenodes(mnfstnode):
1685 1684 r = mnfst.rev(mnfstnode)
1686 1685 if r == next_rev[0]:
1687 1686 # If the last rev we looked at was the one just previous,
1688 1687 # we only need to see a diff.
1689 1688 deltamf = mnfst.readdelta(mnfstnode)
1690 1689 # For each line in the delta
1691 1690 for f, fnode in deltamf.items():
1692 1691 f = changedfiles.get(f, None)
1693 1692 # And if the file is in the list of files we care
1694 1693 # about.
1695 1694 if f is not None:
1696 1695 # Get the changenode this manifest belongs to
1697 1696 clnode = msng_mnfst_set[mnfstnode]
1698 1697 # Create the set of filenodes for the file if
1699 1698 # there isn't one already.
1700 1699 ndset = msng_filenode_set.setdefault(f, {})
1701 1700 # And set the filenode's changelog node to the
1702 1701 # manifest's if it hasn't been set already.
1703 1702 ndset.setdefault(fnode, clnode)
1704 1703 else:
1705 1704 # Otherwise we need a full manifest.
1706 1705 m = mnfst.read(mnfstnode)
1707 1706 # For every file in we care about.
1708 1707 for f in changedfiles:
1709 1708 fnode = m.get(f, None)
1710 1709 # If it's in the manifest
1711 1710 if fnode is not None:
1712 1711 # See comments above.
1713 1712 clnode = msng_mnfst_set[mnfstnode]
1714 1713 ndset = msng_filenode_set.setdefault(f, {})
1715 1714 ndset.setdefault(fnode, clnode)
1716 1715 # Remember the revision we hope to see next.
1717 1716 next_rev[0] = r + 1
1718 1717 return collect_msng_filenodes
1719 1718
1720 1719 # We have a list of filenodes we think we need for a file, lets remove
1721 1720 # all those we now the recipient must have.
1722 1721 def prune_filenodes(f, filerevlog):
1723 1722 msngset = msng_filenode_set[f]
1724 1723 hasset = {}
1725 1724 # If a 'missing' filenode thinks it belongs to a changenode we
1726 1725 # assume the recipient must have, then the recipient must have
1727 1726 # that filenode.
1728 1727 for n in msngset:
1729 1728 clnode = cl.node(filerevlog.linkrev(n))
1730 1729 if clnode in has_cl_set:
1731 1730 hasset[n] = 1
1732 1731 prune_parents(filerevlog, hasset, msngset)
1733 1732
1734 1733 # A function generator function that sets up the a context for the
1735 1734 # inner function.
1736 1735 def lookup_filenode_link_func(fname):
1737 1736 msngset = msng_filenode_set[fname]
1738 1737 # Lookup the changenode the filenode belongs to.
1739 1738 def lookup_filenode_link(fnode):
1740 1739 return msngset[fnode]
1741 1740 return lookup_filenode_link
1742 1741
1743 1742 # Add the nodes that were explicitly requested.
1744 1743 def add_extra_nodes(name, nodes):
1745 1744 if not extranodes or name not in extranodes:
1746 1745 return
1747 1746
1748 1747 for node, linknode in extranodes[name]:
1749 1748 if node not in nodes:
1750 1749 nodes[node] = linknode
1751 1750
1752 1751 # Now that we have all theses utility functions to help out and
1753 1752 # logically divide up the task, generate the group.
1754 1753 def gengroup():
1755 1754 # The set of changed files starts empty.
1756 1755 changedfiles = {}
1757 1756 # Create a changenode group generator that will call our functions
1758 1757 # back to lookup the owning changenode and collect information.
1759 1758 group = cl.group(msng_cl_lst, identity,
1760 1759 manifest_and_file_collector(changedfiles))
1761 1760 for chnk in group:
1762 1761 yield chnk
1763 1762
1764 1763 # The list of manifests has been collected by the generator
1765 1764 # calling our functions back.
1766 1765 prune_manifests()
1767 1766 add_extra_nodes(1, msng_mnfst_set)
1768 1767 msng_mnfst_lst = msng_mnfst_set.keys()
1769 1768 # Sort the manifestnodes by revision number.
1770 1769 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1771 1770 # Create a generator for the manifestnodes that calls our lookup
1772 1771 # and data collection functions back.
1773 1772 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1774 1773 filenode_collector(changedfiles))
1775 1774 for chnk in group:
1776 1775 yield chnk
1777 1776
1778 1777 # These are no longer needed, dereference and toss the memory for
1779 1778 # them.
1780 1779 msng_mnfst_lst = None
1781 1780 msng_mnfst_set.clear()
1782 1781
1783 1782 if extranodes:
1784 1783 for fname in extranodes:
1785 1784 if isinstance(fname, int):
1786 1785 continue
1787 1786 add_extra_nodes(fname,
1788 1787 msng_filenode_set.setdefault(fname, {}))
1789 1788 changedfiles[fname] = 1
1790 1789 # Go through all our files in order sorted by name.
1791 1790 for fname in util.sort(changedfiles):
1792 1791 filerevlog = self.file(fname)
1793 1792 if not len(filerevlog):
1794 1793 raise util.Abort(_("empty or missing revlog for %s") % fname)
1795 1794 # Toss out the filenodes that the recipient isn't really
1796 1795 # missing.
1797 1796 if fname in msng_filenode_set:
1798 1797 prune_filenodes(fname, filerevlog)
1799 1798 msng_filenode_lst = msng_filenode_set[fname].keys()
1800 1799 else:
1801 1800 msng_filenode_lst = []
1802 1801 # If any filenodes are left, generate the group for them,
1803 1802 # otherwise don't bother.
1804 1803 if len(msng_filenode_lst) > 0:
1805 1804 yield changegroup.chunkheader(len(fname))
1806 1805 yield fname
1807 1806 # Sort the filenodes by their revision #
1808 1807 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1809 1808 # Create a group generator and only pass in a changenode
1810 1809 # lookup function as we need to collect no information
1811 1810 # from filenodes.
1812 1811 group = filerevlog.group(msng_filenode_lst,
1813 1812 lookup_filenode_link_func(fname))
1814 1813 for chnk in group:
1815 1814 yield chnk
1816 1815 if fname in msng_filenode_set:
1817 1816 # Don't need this anymore, toss it to free memory.
1818 1817 del msng_filenode_set[fname]
1819 1818 # Signal that no more groups are left.
1820 1819 yield changegroup.closechunk()
1821 1820
1822 1821 if msng_cl_lst:
1823 1822 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1824 1823
1825 1824 return util.chunkbuffer(gengroup())
1826 1825
1827 1826 def changegroup(self, basenodes, source):
1828 1827 """Generate a changegroup of all nodes that we have that a recipient
1829 1828 doesn't.
1830 1829
1831 1830 This is much easier than the previous function as we can assume that
1832 1831 the recipient has any changenode we aren't sending them."""
1833 1832
1834 1833 self.hook('preoutgoing', throw=True, source=source)
1835 1834
1836 1835 cl = self.changelog
1837 1836 nodes = cl.nodesbetween(basenodes, None)[0]
1838 1837 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1839 1838 self.changegroupinfo(nodes, source)
1840 1839
1841 1840 def identity(x):
1842 1841 return x
1843 1842
1844 1843 def gennodelst(log):
1845 1844 for r in log:
1846 1845 n = log.node(r)
1847 1846 if log.linkrev(n) in revset:
1848 1847 yield n
1849 1848
1850 1849 def changed_file_collector(changedfileset):
1851 1850 def collect_changed_files(clnode):
1852 1851 c = cl.read(clnode)
1853 1852 for fname in c[3]:
1854 1853 changedfileset[fname] = 1
1855 1854 return collect_changed_files
1856 1855
1857 1856 def lookuprevlink_func(revlog):
1858 1857 def lookuprevlink(n):
1859 1858 return cl.node(revlog.linkrev(n))
1860 1859 return lookuprevlink
1861 1860
1862 1861 def gengroup():
1863 1862 # construct a list of all changed files
1864 1863 changedfiles = {}
1865 1864
1866 1865 for chnk in cl.group(nodes, identity,
1867 1866 changed_file_collector(changedfiles)):
1868 1867 yield chnk
1869 1868
1870 1869 mnfst = self.manifest
1871 1870 nodeiter = gennodelst(mnfst)
1872 1871 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1873 1872 yield chnk
1874 1873
1875 1874 for fname in util.sort(changedfiles):
1876 1875 filerevlog = self.file(fname)
1877 1876 if not len(filerevlog):
1878 1877 raise util.Abort(_("empty or missing revlog for %s") % fname)
1879 1878 nodeiter = gennodelst(filerevlog)
1880 1879 nodeiter = list(nodeiter)
1881 1880 if nodeiter:
1882 1881 yield changegroup.chunkheader(len(fname))
1883 1882 yield fname
1884 1883 lookup = lookuprevlink_func(filerevlog)
1885 1884 for chnk in filerevlog.group(nodeiter, lookup):
1886 1885 yield chnk
1887 1886
1888 1887 yield changegroup.closechunk()
1889 1888
1890 1889 if nodes:
1891 1890 self.hook('outgoing', node=hex(nodes[0]), source=source)
1892 1891
1893 1892 return util.chunkbuffer(gengroup())
1894 1893
1895 1894 def addchangegroup(self, source, srctype, url, emptyok=False):
1896 1895 """add changegroup to repo.
1897 1896
1898 1897 return values:
1899 1898 - nothing changed or no source: 0
1900 1899 - more heads than before: 1+added heads (2..n)
1901 1900 - less heads than before: -1-removed heads (-2..-n)
1902 1901 - number of heads stays the same: 1
1903 1902 """
1904 1903 def csmap(x):
1905 1904 self.ui.debug(_("add changeset %s\n") % short(x))
1906 1905 return len(cl)
1907 1906
1908 1907 def revmap(x):
1909 1908 return cl.rev(x)
1910 1909
1911 1910 if not source:
1912 1911 return 0
1913 1912
1914 1913 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1915 1914
1916 1915 changesets = files = revisions = 0
1917 1916
1918 1917 # write changelog data to temp files so concurrent readers will not see
1919 1918 # inconsistent view
1920 1919 cl = self.changelog
1921 1920 cl.delayupdate()
1922 1921 oldheads = len(cl.heads())
1923 1922
1924 1923 tr = self.transaction()
1925 1924 try:
1926 1925 trp = weakref.proxy(tr)
1927 1926 # pull off the changeset group
1928 1927 self.ui.status(_("adding changesets\n"))
1929 1928 cor = len(cl) - 1
1930 1929 chunkiter = changegroup.chunkiter(source)
1931 1930 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1932 1931 raise util.Abort(_("received changelog group is empty"))
1933 1932 cnr = len(cl) - 1
1934 1933 changesets = cnr - cor
1935 1934
1936 1935 # pull off the manifest group
1937 1936 self.ui.status(_("adding manifests\n"))
1938 1937 chunkiter = changegroup.chunkiter(source)
1939 1938 # no need to check for empty manifest group here:
1940 1939 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1941 1940 # no new manifest will be created and the manifest group will
1942 1941 # be empty during the pull
1943 1942 self.manifest.addgroup(chunkiter, revmap, trp)
1944 1943
1945 1944 # process the files
1946 1945 self.ui.status(_("adding file changes\n"))
1947 1946 while 1:
1948 1947 f = changegroup.getchunk(source)
1949 1948 if not f:
1950 1949 break
1951 1950 self.ui.debug(_("adding %s revisions\n") % f)
1952 1951 fl = self.file(f)
1953 1952 o = len(fl)
1954 1953 chunkiter = changegroup.chunkiter(source)
1955 1954 if fl.addgroup(chunkiter, revmap, trp) is None:
1956 1955 raise util.Abort(_("received file revlog group is empty"))
1957 1956 revisions += len(fl) - o
1958 1957 files += 1
1959 1958
1960 1959 # make changelog see real files again
1961 1960 cl.finalize(trp)
1962 1961
1963 1962 newheads = len(self.changelog.heads())
1964 1963 heads = ""
1965 1964 if oldheads and newheads != oldheads:
1966 1965 heads = _(" (%+d heads)") % (newheads - oldheads)
1967 1966
1968 1967 self.ui.status(_("added %d changesets"
1969 1968 " with %d changes to %d files%s\n")
1970 1969 % (changesets, revisions, files, heads))
1971 1970
1972 1971 if changesets > 0:
1973 1972 self.hook('pretxnchangegroup', throw=True,
1974 1973 node=hex(self.changelog.node(cor+1)), source=srctype,
1975 1974 url=url)
1976 1975
1977 1976 tr.close()
1978 1977 finally:
1979 1978 del tr
1980 1979
1981 1980 if changesets > 0:
1982 1981 # forcefully update the on-disk branch cache
1983 1982 self.ui.debug(_("updating the branch cache\n"))
1984 1983 self.branchtags()
1985 1984 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1986 1985 source=srctype, url=url)
1987 1986
1988 1987 for i in xrange(cor + 1, cnr + 1):
1989 1988 self.hook("incoming", node=hex(self.changelog.node(i)),
1990 1989 source=srctype, url=url)
1991 1990
1992 1991 # never return 0 here:
1993 1992 if newheads < oldheads:
1994 1993 return newheads - oldheads - 1
1995 1994 else:
1996 1995 return newheads - oldheads + 1
1997 1996
1998 1997
1999 1998 def stream_in(self, remote):
2000 1999 fp = remote.stream_out()
2001 2000 l = fp.readline()
2002 2001 try:
2003 2002 resp = int(l)
2004 2003 except ValueError:
2005 2004 raise util.UnexpectedOutput(
2006 2005 _('Unexpected response from remote server:'), l)
2007 2006 if resp == 1:
2008 2007 raise util.Abort(_('operation forbidden by server'))
2009 2008 elif resp == 2:
2010 2009 raise util.Abort(_('locking the remote repository failed'))
2011 2010 elif resp != 0:
2012 2011 raise util.Abort(_('the server sent an unknown error code'))
2013 2012 self.ui.status(_('streaming all changes\n'))
2014 2013 l = fp.readline()
2015 2014 try:
2016 2015 total_files, total_bytes = map(int, l.split(' ', 1))
2017 2016 except (ValueError, TypeError):
2018 2017 raise util.UnexpectedOutput(
2019 2018 _('Unexpected response from remote server:'), l)
2020 2019 self.ui.status(_('%d files to transfer, %s of data\n') %
2021 2020 (total_files, util.bytecount(total_bytes)))
2022 2021 start = time.time()
2023 2022 for i in xrange(total_files):
2024 2023 # XXX doesn't support '\n' or '\r' in filenames
2025 2024 l = fp.readline()
2026 2025 try:
2027 2026 name, size = l.split('\0', 1)
2028 2027 size = int(size)
2029 2028 except ValueError, TypeError:
2030 2029 raise util.UnexpectedOutput(
2031 2030 _('Unexpected response from remote server:'), l)
2032 2031 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2033 2032 ofp = self.sopener(name, 'w')
2034 2033 for chunk in util.filechunkiter(fp, limit=size):
2035 2034 ofp.write(chunk)
2036 2035 ofp.close()
2037 2036 elapsed = time.time() - start
2038 2037 if elapsed <= 0:
2039 2038 elapsed = 0.001
2040 2039 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2041 2040 (util.bytecount(total_bytes), elapsed,
2042 2041 util.bytecount(total_bytes / elapsed)))
2043 2042 self.invalidate()
2044 2043 return len(self.heads()) + 1
2045 2044
2046 2045 def clone(self, remote, heads=[], stream=False):
2047 2046 '''clone remote repository.
2048 2047
2049 2048 keyword arguments:
2050 2049 heads: list of revs to clone (forces use of pull)
2051 2050 stream: use streaming clone if possible'''
2052 2051
2053 2052 # now, all clients that can request uncompressed clones can
2054 2053 # read repo formats supported by all servers that can serve
2055 2054 # them.
2056 2055
2057 2056 # if revlog format changes, client will have to check version
2058 2057 # and format flags on "stream" capability, and use
2059 2058 # uncompressed only if compatible.
2060 2059
2061 2060 if stream and not heads and remote.capable('stream'):
2062 2061 return self.stream_in(remote)
2063 2062 return self.pull(remote, heads)
2064 2063
2065 2064 # used to avoid circular references so destructors work
2066 2065 def aftertrans(files):
2067 2066 renamefiles = [tuple(t) for t in files]
2068 2067 def a():
2069 2068 for src, dest in renamefiles:
2070 2069 util.rename(src, dest)
2071 2070 return a
2072 2071
2073 2072 def instance(ui, path, create):
2074 2073 return localrepository(ui, util.drop_scheme('file', path), create)
2075 2074
2076 2075 def islocal(path):
2077 2076 return True
@@ -1,20 +1,20
1 1 adding empty-file
2 2 adding large-file
3 3 adding another-file
4 4 removing empty-file
5 5 removing large-file
6 6 recording removal of large-file as rename to another-file (99% similar)
7 7 % comparing two empty files caused ZeroDivisionError in the past
8 8 2 files updated, 0 files merged, 1 files removed, 0 files unresolved
9 9 adding another-empty-file
10 10 removing empty-file
11 11 adding large-file
12 12 adding tiny-file
13 removing large-file
13 14 adding small-file
14 removing large-file
15 15 removing tiny-file
16 16 recording removal of tiny-file as rename to small-file (82% similar)
17 17 % should all fail
18 18 abort: similarity must be a number
19 19 abort: similarity must be between 0 and 100
20 20 abort: similarity must be between 0 and 100
@@ -1,15 +1,15
1 1 adding dir/bar
2 2 adding foo
3 3 dir/bar
4 4 foo
5 5 adding dir/bar_2
6 6 adding foo_2
7 7 dir/bar_2
8 8 foo_2
9 9 adding a
10 10 adding c
11 removing a
11 12 adding b
13 removing c
12 14 adding d
13 removing a
14 removing c
15 15 recording removal of a as rename to b (100% similar)
General Comments 0
You need to be logged in to leave comments. Login now