##// END OF EJS Templates
localrepo.walk: if we're walking a specific revision, sort the files...
Alexis S. L. Carvalho -
r4194:8e947b0e default
parent child Browse files
Show More
@@ -1,2008 +1,2013 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19 supported = ('revlogv1', 'store')
20 20 branchcache_features = ('unnamed',)
21 21
22 22 def __del__(self):
23 23 self.transhandle = None
24 24 def __init__(self, parentui, path=None, create=0):
25 25 repo.repository.__init__(self)
26 26 if not path:
27 27 p = os.getcwd()
28 28 while not os.path.isdir(os.path.join(p, ".hg")):
29 29 oldp = p
30 30 p = os.path.dirname(p)
31 31 if p == oldp:
32 32 raise repo.RepoError(_("There is no Mercurial repository"
33 33 " here (.hg not found)"))
34 34 path = p
35 35
36 36 self.root = os.path.realpath(path)
37 37 self.path = os.path.join(self.root, ".hg")
38 38 self.origroot = path
39 39 self.opener = util.opener(self.path)
40 40 self.wopener = util.opener(self.root)
41 41
42 42 if not os.path.isdir(self.path):
43 43 if create:
44 44 if not os.path.exists(path):
45 45 os.mkdir(path)
46 46 os.mkdir(self.path)
47 47 os.mkdir(os.path.join(self.path, "store"))
48 48 requirements = ("revlogv1", "store")
49 49 reqfile = self.opener("requires", "w")
50 50 for r in requirements:
51 51 reqfile.write("%s\n" % r)
52 52 reqfile.close()
53 53 # create an invalid changelog
54 54 self.opener("00changelog.i", "a").write(
55 55 '\0\0\0\2' # represents revlogv2
56 56 ' dummy changelog to prevent using the old repo layout'
57 57 )
58 58 else:
59 59 raise repo.RepoError(_("repository %s not found") % path)
60 60 elif create:
61 61 raise repo.RepoError(_("repository %s already exists") % path)
62 62 else:
63 63 # find requirements
64 64 try:
65 65 requirements = self.opener("requires").read().splitlines()
66 66 except IOError, inst:
67 67 if inst.errno != errno.ENOENT:
68 68 raise
69 69 requirements = []
70 70 # check them
71 71 for r in requirements:
72 72 if r not in self.supported:
73 73 raise repo.RepoError(_("requirement '%s' not supported") % r)
74 74
75 75 # setup store
76 76 if "store" in requirements:
77 77 self.encodefn = util.encodefilename
78 78 self.decodefn = util.decodefilename
79 79 self.spath = os.path.join(self.path, "store")
80 80 else:
81 81 self.encodefn = lambda x: x
82 82 self.decodefn = lambda x: x
83 83 self.spath = self.path
84 84 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
85 85
86 86 self.ui = ui.ui(parentui=parentui)
87 87 try:
88 88 self.ui.readconfig(self.join("hgrc"), self.root)
89 89 except IOError:
90 90 pass
91 91
92 92 v = self.ui.configrevlog()
93 93 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
94 94 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
95 95 fl = v.get('flags', None)
96 96 flags = 0
97 97 if fl != None:
98 98 for x in fl.split():
99 99 flags |= revlog.flagstr(x)
100 100 elif self.revlogv1:
101 101 flags = revlog.REVLOG_DEFAULT_FLAGS
102 102
103 103 v = self.revlogversion | flags
104 104 self.manifest = manifest.manifest(self.sopener, v)
105 105 self.changelog = changelog.changelog(self.sopener, v)
106 106
107 107 fallback = self.ui.config('ui', 'fallbackencoding')
108 108 if fallback:
109 109 util._fallbackencoding = fallback
110 110
111 111 # the changelog might not have the inline index flag
112 112 # on. If the format of the changelog is the same as found in
113 113 # .hgrc, apply any flags found in the .hgrc as well.
114 114 # Otherwise, just version from the changelog
115 115 v = self.changelog.version
116 116 if v == self.revlogversion:
117 117 v |= flags
118 118 self.revlogversion = v
119 119
120 120 self.tagscache = None
121 121 self.branchcache = None
122 122 self.nodetagscache = None
123 123 self.encodepats = None
124 124 self.decodepats = None
125 125 self.transhandle = None
126 126
127 127 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
128 128
129 129 def url(self):
130 130 return 'file:' + self.root
131 131
132 132 def hook(self, name, throw=False, **args):
133 133 def callhook(hname, funcname):
134 134 '''call python hook. hook is callable object, looked up as
135 135 name in python module. if callable returns "true", hook
136 136 fails, else passes. if hook raises exception, treated as
137 137 hook failure. exception propagates if throw is "true".
138 138
139 139 reason for "true" meaning "hook failed" is so that
140 140 unmodified commands (e.g. mercurial.commands.update) can
141 141 be run as hooks without wrappers to convert return values.'''
142 142
143 143 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
144 144 d = funcname.rfind('.')
145 145 if d == -1:
146 146 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
147 147 % (hname, funcname))
148 148 modname = funcname[:d]
149 149 try:
150 150 obj = __import__(modname)
151 151 except ImportError:
152 152 try:
153 153 # extensions are loaded with hgext_ prefix
154 154 obj = __import__("hgext_%s" % modname)
155 155 except ImportError:
156 156 raise util.Abort(_('%s hook is invalid '
157 157 '(import of "%s" failed)') %
158 158 (hname, modname))
159 159 try:
160 160 for p in funcname.split('.')[1:]:
161 161 obj = getattr(obj, p)
162 162 except AttributeError, err:
163 163 raise util.Abort(_('%s hook is invalid '
164 164 '("%s" is not defined)') %
165 165 (hname, funcname))
166 166 if not callable(obj):
167 167 raise util.Abort(_('%s hook is invalid '
168 168 '("%s" is not callable)') %
169 169 (hname, funcname))
170 170 try:
171 171 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
172 172 except (KeyboardInterrupt, util.SignalInterrupt):
173 173 raise
174 174 except Exception, exc:
175 175 if isinstance(exc, util.Abort):
176 176 self.ui.warn(_('error: %s hook failed: %s\n') %
177 177 (hname, exc.args[0]))
178 178 else:
179 179 self.ui.warn(_('error: %s hook raised an exception: '
180 180 '%s\n') % (hname, exc))
181 181 if throw:
182 182 raise
183 183 self.ui.print_exc()
184 184 return True
185 185 if r:
186 186 if throw:
187 187 raise util.Abort(_('%s hook failed') % hname)
188 188 self.ui.warn(_('warning: %s hook failed\n') % hname)
189 189 return r
190 190
191 191 def runhook(name, cmd):
192 192 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
193 193 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
194 194 r = util.system(cmd, environ=env, cwd=self.root)
195 195 if r:
196 196 desc, r = util.explain_exit(r)
197 197 if throw:
198 198 raise util.Abort(_('%s hook %s') % (name, desc))
199 199 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
200 200 return r
201 201
202 202 r = False
203 203 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
204 204 if hname.split(".", 1)[0] == name and cmd]
205 205 hooks.sort()
206 206 for hname, cmd in hooks:
207 207 if cmd.startswith('python:'):
208 208 r = callhook(hname, cmd[7:].strip()) or r
209 209 else:
210 210 r = runhook(hname, cmd) or r
211 211 return r
212 212
213 213 tag_disallowed = ':\r\n'
214 214
215 215 def tag(self, name, node, message, local, user, date):
216 216 '''tag a revision with a symbolic name.
217 217
218 218 if local is True, the tag is stored in a per-repository file.
219 219 otherwise, it is stored in the .hgtags file, and a new
220 220 changeset is committed with the change.
221 221
222 222 keyword arguments:
223 223
224 224 local: whether to store tag in non-version-controlled file
225 225 (default False)
226 226
227 227 message: commit message to use if committing
228 228
229 229 user: name of user to use if committing
230 230
231 231 date: date tuple to use if committing'''
232 232
233 233 for c in self.tag_disallowed:
234 234 if c in name:
235 235 raise util.Abort(_('%r cannot be used in a tag name') % c)
236 236
237 237 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
238 238
239 239 if local:
240 240 # local tags are stored in the current charset
241 241 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
242 242 self.hook('tag', node=hex(node), tag=name, local=local)
243 243 return
244 244
245 245 for x in self.status()[:5]:
246 246 if '.hgtags' in x:
247 247 raise util.Abort(_('working copy of .hgtags is changed '
248 248 '(please commit .hgtags manually)'))
249 249
250 250 # committed tags are stored in UTF-8
251 251 line = '%s %s\n' % (hex(node), util.fromlocal(name))
252 252 self.wfile('.hgtags', 'ab').write(line)
253 253 if self.dirstate.state('.hgtags') == '?':
254 254 self.add(['.hgtags'])
255 255
256 256 self.commit(['.hgtags'], message, user, date)
257 257 self.hook('tag', node=hex(node), tag=name, local=local)
258 258
259 259 def tags(self):
260 260 '''return a mapping of tag to node'''
261 261 if not self.tagscache:
262 262 self.tagscache = {}
263 263
264 264 def parsetag(line, context):
265 265 if not line:
266 266 return
267 267 s = l.split(" ", 1)
268 268 if len(s) != 2:
269 269 self.ui.warn(_("%s: cannot parse entry\n") % context)
270 270 return
271 271 node, key = s
272 272 key = util.tolocal(key.strip()) # stored in UTF-8
273 273 try:
274 274 bin_n = bin(node)
275 275 except TypeError:
276 276 self.ui.warn(_("%s: node '%s' is not well formed\n") %
277 277 (context, node))
278 278 return
279 279 if bin_n not in self.changelog.nodemap:
280 280 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
281 281 (context, key))
282 282 return
283 283 self.tagscache[key] = bin_n
284 284
285 285 # read the tags file from each head, ending with the tip,
286 286 # and add each tag found to the map, with "newer" ones
287 287 # taking precedence
288 288 f = None
289 289 for rev, node, fnode in self._hgtagsnodes():
290 290 f = (f and f.filectx(fnode) or
291 291 self.filectx('.hgtags', fileid=fnode))
292 292 count = 0
293 293 for l in f.data().splitlines():
294 294 count += 1
295 295 parsetag(l, _("%s, line %d") % (str(f), count))
296 296
297 297 try:
298 298 f = self.opener("localtags")
299 299 count = 0
300 300 for l in f:
301 301 # localtags are stored in the local character set
302 302 # while the internal tag table is stored in UTF-8
303 303 l = util.fromlocal(l)
304 304 count += 1
305 305 parsetag(l, _("localtags, line %d") % count)
306 306 except IOError:
307 307 pass
308 308
309 309 self.tagscache['tip'] = self.changelog.tip()
310 310
311 311 return self.tagscache
312 312
313 313 def _hgtagsnodes(self):
314 314 heads = self.heads()
315 315 heads.reverse()
316 316 last = {}
317 317 ret = []
318 318 for node in heads:
319 319 c = self.changectx(node)
320 320 rev = c.rev()
321 321 try:
322 322 fnode = c.filenode('.hgtags')
323 323 except repo.LookupError:
324 324 continue
325 325 ret.append((rev, node, fnode))
326 326 if fnode in last:
327 327 ret[last[fnode]] = None
328 328 last[fnode] = len(ret) - 1
329 329 return [item for item in ret if item]
330 330
331 331 def tagslist(self):
332 332 '''return a list of tags ordered by revision'''
333 333 l = []
334 334 for t, n in self.tags().items():
335 335 try:
336 336 r = self.changelog.rev(n)
337 337 except:
338 338 r = -2 # sort to the beginning of the list if unknown
339 339 l.append((r, t, n))
340 340 l.sort()
341 341 return [(t, n) for r, t, n in l]
342 342
343 343 def nodetags(self, node):
344 344 '''return the tags associated with a node'''
345 345 if not self.nodetagscache:
346 346 self.nodetagscache = {}
347 347 for t, n in self.tags().items():
348 348 self.nodetagscache.setdefault(n, []).append(t)
349 349 return self.nodetagscache.get(node, [])
350 350
351 351 def _branchtags(self):
352 352 partial, last, lrev = self._readbranchcache()
353 353
354 354 tiprev = self.changelog.count() - 1
355 355 if lrev != tiprev:
356 356 self._updatebranchcache(partial, lrev+1, tiprev+1)
357 357 self._writebranchcache(partial, self.changelog.tip(), tiprev)
358 358
359 359 return partial
360 360
361 361 def branchtags(self):
362 362 if self.branchcache is not None:
363 363 return self.branchcache
364 364
365 365 self.branchcache = {} # avoid recursion in changectx
366 366 partial = self._branchtags()
367 367
368 368 # the branch cache is stored on disk as UTF-8, but in the local
369 369 # charset internally
370 370 for k, v in partial.items():
371 371 self.branchcache[util.tolocal(k)] = v
372 372 return self.branchcache
373 373
374 374 def _readbranchcache(self):
375 375 partial = {}
376 376 try:
377 377 f = self.opener("branches.cache")
378 378 lines = f.read().split('\n')
379 379 f.close()
380 380 features = lines.pop(0).strip()
381 381 if not features.startswith('features: '):
382 382 raise ValueError(_('branch cache: no features specified'))
383 383 features = features.split(' ', 1)[1].split()
384 384 missing_features = []
385 385 for feature in self.branchcache_features:
386 386 try:
387 387 features.remove(feature)
388 388 except ValueError, inst:
389 389 missing_features.append(feature)
390 390 if missing_features:
391 391 raise ValueError(_('branch cache: missing features: %s')
392 392 % ', '.join(missing_features))
393 393 if features:
394 394 raise ValueError(_('branch cache: unknown features: %s')
395 395 % ', '.join(features))
396 396 last, lrev = lines.pop(0).split(" ", 1)
397 397 last, lrev = bin(last), int(lrev)
398 398 if not (lrev < self.changelog.count() and
399 399 self.changelog.node(lrev) == last): # sanity check
400 400 # invalidate the cache
401 401 raise ValueError('Invalid branch cache: unknown tip')
402 402 for l in lines:
403 403 if not l: continue
404 404 node, label = l.split(" ", 1)
405 405 partial[label.strip()] = bin(node)
406 406 except (KeyboardInterrupt, util.SignalInterrupt):
407 407 raise
408 408 except Exception, inst:
409 409 if self.ui.debugflag:
410 410 self.ui.warn(str(inst), '\n')
411 411 partial, last, lrev = {}, nullid, nullrev
412 412 return partial, last, lrev
413 413
414 414 def _writebranchcache(self, branches, tip, tiprev):
415 415 try:
416 416 f = self.opener("branches.cache", "w")
417 417 f.write(" features: %s\n" % ' '.join(self.branchcache_features))
418 418 f.write("%s %s\n" % (hex(tip), tiprev))
419 419 for label, node in branches.iteritems():
420 420 f.write("%s %s\n" % (hex(node), label))
421 421 except IOError:
422 422 pass
423 423
424 424 def _updatebranchcache(self, partial, start, end):
425 425 for r in xrange(start, end):
426 426 c = self.changectx(r)
427 427 b = c.branch()
428 428 partial[b] = c.node()
429 429
430 430 def lookup(self, key):
431 431 if key == '.':
432 432 key = self.dirstate.parents()[0]
433 433 if key == nullid:
434 434 raise repo.RepoError(_("no revision checked out"))
435 435 elif key == 'null':
436 436 return nullid
437 437 n = self.changelog._match(key)
438 438 if n:
439 439 return n
440 440 if key in self.tags():
441 441 return self.tags()[key]
442 442 if key in self.branchtags():
443 443 return self.branchtags()[key]
444 444 n = self.changelog._partialmatch(key)
445 445 if n:
446 446 return n
447 447 raise repo.RepoError(_("unknown revision '%s'") % key)
448 448
449 449 def dev(self):
450 450 return os.lstat(self.path).st_dev
451 451
452 452 def local(self):
453 453 return True
454 454
455 455 def join(self, f):
456 456 return os.path.join(self.path, f)
457 457
458 458 def sjoin(self, f):
459 459 f = self.encodefn(f)
460 460 return os.path.join(self.spath, f)
461 461
462 462 def wjoin(self, f):
463 463 return os.path.join(self.root, f)
464 464
465 465 def file(self, f):
466 466 if f[0] == '/':
467 467 f = f[1:]
468 468 return filelog.filelog(self.sopener, f, self.revlogversion)
469 469
470 470 def changectx(self, changeid=None):
471 471 return context.changectx(self, changeid)
472 472
473 473 def workingctx(self):
474 474 return context.workingctx(self)
475 475
476 476 def parents(self, changeid=None):
477 477 '''
478 478 get list of changectxs for parents of changeid or working directory
479 479 '''
480 480 if changeid is None:
481 481 pl = self.dirstate.parents()
482 482 else:
483 483 n = self.changelog.lookup(changeid)
484 484 pl = self.changelog.parents(n)
485 485 if pl[1] == nullid:
486 486 return [self.changectx(pl[0])]
487 487 return [self.changectx(pl[0]), self.changectx(pl[1])]
488 488
489 489 def filectx(self, path, changeid=None, fileid=None):
490 490 """changeid can be a changeset revision, node, or tag.
491 491 fileid can be a file revision or node."""
492 492 return context.filectx(self, path, changeid, fileid)
493 493
494 494 def getcwd(self):
495 495 return self.dirstate.getcwd()
496 496
497 497 def wfile(self, f, mode='r'):
498 498 return self.wopener(f, mode)
499 499
500 500 def wread(self, filename):
501 501 if self.encodepats == None:
502 502 l = []
503 503 for pat, cmd in self.ui.configitems("encode"):
504 504 mf = util.matcher(self.root, "", [pat], [], [])[1]
505 505 l.append((mf, cmd))
506 506 self.encodepats = l
507 507
508 508 data = self.wopener(filename, 'r').read()
509 509
510 510 for mf, cmd in self.encodepats:
511 511 if mf(filename):
512 512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 513 data = util.filter(data, cmd)
514 514 break
515 515
516 516 return data
517 517
518 518 def wwrite(self, filename, data, fd=None):
519 519 if self.decodepats == None:
520 520 l = []
521 521 for pat, cmd in self.ui.configitems("decode"):
522 522 mf = util.matcher(self.root, "", [pat], [], [])[1]
523 523 l.append((mf, cmd))
524 524 self.decodepats = l
525 525
526 526 for mf, cmd in self.decodepats:
527 527 if mf(filename):
528 528 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
529 529 data = util.filter(data, cmd)
530 530 break
531 531
532 532 if fd:
533 533 return fd.write(data)
534 534 return self.wopener(filename, 'w').write(data)
535 535
536 536 def transaction(self):
537 537 tr = self.transhandle
538 538 if tr != None and tr.running():
539 539 return tr.nest()
540 540
541 541 # save dirstate for rollback
542 542 try:
543 543 ds = self.opener("dirstate").read()
544 544 except IOError:
545 545 ds = ""
546 546 self.opener("journal.dirstate", "w").write(ds)
547 547
548 548 renames = [(self.sjoin("journal"), self.sjoin("undo")),
549 549 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
550 550 tr = transaction.transaction(self.ui.warn, self.sopener,
551 551 self.sjoin("journal"),
552 552 aftertrans(renames))
553 553 self.transhandle = tr
554 554 return tr
555 555
556 556 def recover(self):
557 557 l = self.lock()
558 558 if os.path.exists(self.sjoin("journal")):
559 559 self.ui.status(_("rolling back interrupted transaction\n"))
560 560 transaction.rollback(self.sopener, self.sjoin("journal"))
561 561 self.reload()
562 562 return True
563 563 else:
564 564 self.ui.warn(_("no interrupted transaction available\n"))
565 565 return False
566 566
567 567 def rollback(self, wlock=None):
568 568 if not wlock:
569 569 wlock = self.wlock()
570 570 l = self.lock()
571 571 if os.path.exists(self.sjoin("undo")):
572 572 self.ui.status(_("rolling back last transaction\n"))
573 573 transaction.rollback(self.sopener, self.sjoin("undo"))
574 574 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
575 575 self.reload()
576 576 self.wreload()
577 577 else:
578 578 self.ui.warn(_("no rollback information available\n"))
579 579
580 580 def wreload(self):
581 581 self.dirstate.read()
582 582
583 583 def reload(self):
584 584 self.changelog.load()
585 585 self.manifest.load()
586 586 self.tagscache = None
587 587 self.nodetagscache = None
588 588
589 589 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
590 590 desc=None):
591 591 try:
592 592 l = lock.lock(lockname, 0, releasefn, desc=desc)
593 593 except lock.LockHeld, inst:
594 594 if not wait:
595 595 raise
596 596 self.ui.warn(_("waiting for lock on %s held by %r\n") %
597 597 (desc, inst.locker))
598 598 # default to 600 seconds timeout
599 599 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
600 600 releasefn, desc=desc)
601 601 if acquirefn:
602 602 acquirefn()
603 603 return l
604 604
605 605 def lock(self, wait=1):
606 606 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
607 607 desc=_('repository %s') % self.origroot)
608 608
609 609 def wlock(self, wait=1):
610 610 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
611 611 self.wreload,
612 612 desc=_('working directory of %s') % self.origroot)
613 613
614 614 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
615 615 """
616 616 commit an individual file as part of a larger transaction
617 617 """
618 618
619 619 t = self.wread(fn)
620 620 fl = self.file(fn)
621 621 fp1 = manifest1.get(fn, nullid)
622 622 fp2 = manifest2.get(fn, nullid)
623 623
624 624 meta = {}
625 625 cp = self.dirstate.copied(fn)
626 626 if cp:
627 627 # Mark the new revision of this file as a copy of another
628 628 # file. This copy data will effectively act as a parent
629 629 # of this new revision. If this is a merge, the first
630 630 # parent will be the nullid (meaning "look up the copy data")
631 631 # and the second one will be the other parent. For example:
632 632 #
633 633 # 0 --- 1 --- 3 rev1 changes file foo
634 634 # \ / rev2 renames foo to bar and changes it
635 635 # \- 2 -/ rev3 should have bar with all changes and
636 636 # should record that bar descends from
637 637 # bar in rev2 and foo in rev1
638 638 #
639 639 # this allows this merge to succeed:
640 640 #
641 641 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
642 642 # \ / merging rev3 and rev4 should use bar@rev2
643 643 # \- 2 --- 4 as the merge base
644 644 #
645 645 meta["copy"] = cp
646 646 if not manifest2: # not a branch merge
647 647 meta["copyrev"] = hex(manifest1.get(cp, nullid))
648 648 fp2 = nullid
649 649 elif fp2 != nullid: # copied on remote side
650 650 meta["copyrev"] = hex(manifest1.get(cp, nullid))
651 651 elif fp1 != nullid: # copied on local side, reversed
652 652 meta["copyrev"] = hex(manifest2.get(cp))
653 653 fp2 = fp1
654 654 else: # directory rename
655 655 meta["copyrev"] = hex(manifest1.get(cp, nullid))
656 656 self.ui.debug(_(" %s: copy %s:%s\n") %
657 657 (fn, cp, meta["copyrev"]))
658 658 fp1 = nullid
659 659 elif fp2 != nullid:
660 660 # is one parent an ancestor of the other?
661 661 fpa = fl.ancestor(fp1, fp2)
662 662 if fpa == fp1:
663 663 fp1, fp2 = fp2, nullid
664 664 elif fpa == fp2:
665 665 fp2 = nullid
666 666
667 667 # is the file unmodified from the parent? report existing entry
668 668 if fp2 == nullid and not fl.cmp(fp1, t):
669 669 return fp1
670 670
671 671 changelist.append(fn)
672 672 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
673 673
674 674 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
675 675 if p1 is None:
676 676 p1, p2 = self.dirstate.parents()
677 677 return self.commit(files=files, text=text, user=user, date=date,
678 678 p1=p1, p2=p2, wlock=wlock)
679 679
680 680 def commit(self, files=None, text="", user=None, date=None,
681 681 match=util.always, force=False, lock=None, wlock=None,
682 682 force_editor=False, p1=None, p2=None, extra={}):
683 683
684 684 commit = []
685 685 remove = []
686 686 changed = []
687 687 use_dirstate = (p1 is None) # not rawcommit
688 688 extra = extra.copy()
689 689
690 690 if use_dirstate:
691 691 if files:
692 692 for f in files:
693 693 s = self.dirstate.state(f)
694 694 if s in 'nmai':
695 695 commit.append(f)
696 696 elif s == 'r':
697 697 remove.append(f)
698 698 else:
699 699 self.ui.warn(_("%s not tracked!\n") % f)
700 700 else:
701 701 changes = self.status(match=match)[:5]
702 702 modified, added, removed, deleted, unknown = changes
703 703 commit = modified + added
704 704 remove = removed
705 705 else:
706 706 commit = files
707 707
708 708 if use_dirstate:
709 709 p1, p2 = self.dirstate.parents()
710 710 update_dirstate = True
711 711 else:
712 712 p1, p2 = p1, p2 or nullid
713 713 update_dirstate = (self.dirstate.parents()[0] == p1)
714 714
715 715 c1 = self.changelog.read(p1)
716 716 c2 = self.changelog.read(p2)
717 717 m1 = self.manifest.read(c1[0]).copy()
718 718 m2 = self.manifest.read(c2[0])
719 719
720 720 if use_dirstate:
721 721 branchname = self.workingctx().branch()
722 722 try:
723 723 branchname = branchname.decode('UTF-8').encode('UTF-8')
724 724 except UnicodeDecodeError:
725 725 raise util.Abort(_('branch name not in UTF-8!'))
726 726 else:
727 727 branchname = ""
728 728
729 729 if use_dirstate:
730 730 oldname = c1[5].get("branch", "") # stored in UTF-8
731 731 if not commit and not remove and not force and p2 == nullid and \
732 732 branchname == oldname:
733 733 self.ui.status(_("nothing changed\n"))
734 734 return None
735 735
736 736 xp1 = hex(p1)
737 737 if p2 == nullid: xp2 = ''
738 738 else: xp2 = hex(p2)
739 739
740 740 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
741 741
742 742 if not wlock:
743 743 wlock = self.wlock()
744 744 if not lock:
745 745 lock = self.lock()
746 746 tr = self.transaction()
747 747
748 748 # check in files
749 749 new = {}
750 750 linkrev = self.changelog.count()
751 751 commit.sort()
752 752 for f in commit:
753 753 self.ui.note(f + "\n")
754 754 try:
755 755 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
756 756 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
757 757 except IOError:
758 758 if use_dirstate:
759 759 self.ui.warn(_("trouble committing %s!\n") % f)
760 760 raise
761 761 else:
762 762 remove.append(f)
763 763
764 764 # update manifest
765 765 m1.update(new)
766 766 remove.sort()
767 767
768 768 for f in remove:
769 769 if f in m1:
770 770 del m1[f]
771 771 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
772 772
773 773 # add changeset
774 774 new = new.keys()
775 775 new.sort()
776 776
777 777 user = user or self.ui.username()
778 778 if not text or force_editor:
779 779 edittext = []
780 780 if text:
781 781 edittext.append(text)
782 782 edittext.append("")
783 783 edittext.append("HG: user: %s" % user)
784 784 if p2 != nullid:
785 785 edittext.append("HG: branch merge")
786 786 edittext.extend(["HG: changed %s" % f for f in changed])
787 787 edittext.extend(["HG: removed %s" % f for f in remove])
788 788 if not changed and not remove:
789 789 edittext.append("HG: no files changed")
790 790 edittext.append("")
791 791 # run editor in the repository root
792 792 olddir = os.getcwd()
793 793 os.chdir(self.root)
794 794 text = self.ui.edit("\n".join(edittext), user)
795 795 os.chdir(olddir)
796 796
797 797 lines = [line.rstrip() for line in text.rstrip().splitlines()]
798 798 while lines and not lines[0]:
799 799 del lines[0]
800 800 if not lines:
801 801 return None
802 802 text = '\n'.join(lines)
803 803 if branchname:
804 804 extra["branch"] = branchname
805 805 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
806 806 user, date, extra)
807 807 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
808 808 parent2=xp2)
809 809 tr.close()
810 810
811 811 if use_dirstate or update_dirstate:
812 812 self.dirstate.setparents(n)
813 813 if use_dirstate:
814 814 self.dirstate.update(new, "n")
815 815 self.dirstate.forget(remove)
816 816
817 817 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
818 818 return n
819 819
820 820 def walk(self, node=None, files=[], match=util.always, badmatch=None):
821 821 '''
822 822 walk recursively through the directory tree or a given
823 823 changeset, finding all files matched by the match
824 824 function
825 825
826 826 results are yielded in a tuple (src, filename), where src
827 827 is one of:
828 828 'f' the file was found in the directory tree
829 829 'm' the file was only in the dirstate and not in the tree
830 830 'b' file was not found and matched badmatch
831 831 '''
832 832
833 833 if node:
834 834 fdict = dict.fromkeys(files)
835 for fn in self.manifest.read(self.changelog.read(node)[0]):
835 mdict = self.manifest.read(self.changelog.read(node)[0])
836 mfiles = mdict.keys()
837 mfiles.sort()
838 for fn in mfiles:
836 839 for ffn in fdict:
837 840 # match if the file is the exact name or a directory
838 841 if ffn == fn or fn.startswith("%s/" % ffn):
839 842 del fdict[ffn]
840 843 break
841 844 if match(fn):
842 845 yield 'm', fn
843 for fn in fdict:
846 ffiles = fdict.keys()
847 ffiles.sort()
848 for fn in ffiles:
844 849 if badmatch and badmatch(fn):
845 850 if match(fn):
846 851 yield 'b', fn
847 852 else:
848 853 self.ui.warn(_('%s: No such file in rev %s\n') % (
849 854 util.pathto(self.getcwd(), fn), short(node)))
850 855 else:
851 856 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
852 857 yield src, fn
853 858
854 859 def status(self, node1=None, node2=None, files=[], match=util.always,
855 860 wlock=None, list_ignored=False, list_clean=False):
856 861 """return status of files between two nodes or node and working directory
857 862
858 863 If node1 is None, use the first dirstate parent instead.
859 864 If node2 is None, compare node1 with working directory.
860 865 """
861 866
862 867 def fcmp(fn, mf):
863 868 t1 = self.wread(fn)
864 869 return self.file(fn).cmp(mf.get(fn, nullid), t1)
865 870
866 871 def mfmatches(node):
867 872 change = self.changelog.read(node)
868 873 mf = self.manifest.read(change[0]).copy()
869 874 for fn in mf.keys():
870 875 if not match(fn):
871 876 del mf[fn]
872 877 return mf
873 878
874 879 modified, added, removed, deleted, unknown = [], [], [], [], []
875 880 ignored, clean = [], []
876 881
877 882 compareworking = False
878 883 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
879 884 compareworking = True
880 885
881 886 if not compareworking:
882 887 # read the manifest from node1 before the manifest from node2,
883 888 # so that we'll hit the manifest cache if we're going through
884 889 # all the revisions in parent->child order.
885 890 mf1 = mfmatches(node1)
886 891
887 892 # are we comparing the working directory?
888 893 if not node2:
889 894 if not wlock:
890 895 try:
891 896 wlock = self.wlock(wait=0)
892 897 except lock.LockException:
893 898 wlock = None
894 899 (lookup, modified, added, removed, deleted, unknown,
895 900 ignored, clean) = self.dirstate.status(files, match,
896 901 list_ignored, list_clean)
897 902
898 903 # are we comparing working dir against its parent?
899 904 if compareworking:
900 905 if lookup:
901 906 # do a full compare of any files that might have changed
902 907 mf2 = mfmatches(self.dirstate.parents()[0])
903 908 for f in lookup:
904 909 if fcmp(f, mf2):
905 910 modified.append(f)
906 911 else:
907 912 clean.append(f)
908 913 if wlock is not None:
909 914 self.dirstate.update([f], "n")
910 915 else:
911 916 # we are comparing working dir against non-parent
912 917 # generate a pseudo-manifest for the working dir
913 918 # XXX: create it in dirstate.py ?
914 919 mf2 = mfmatches(self.dirstate.parents()[0])
915 920 for f in lookup + modified + added:
916 921 mf2[f] = ""
917 922 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
918 923 for f in removed:
919 924 if f in mf2:
920 925 del mf2[f]
921 926 else:
922 927 # we are comparing two revisions
923 928 mf2 = mfmatches(node2)
924 929
925 930 if not compareworking:
926 931 # flush lists from dirstate before comparing manifests
927 932 modified, added, clean = [], [], []
928 933
929 934 # make sure to sort the files so we talk to the disk in a
930 935 # reasonable order
931 936 mf2keys = mf2.keys()
932 937 mf2keys.sort()
933 938 for fn in mf2keys:
934 939 if mf1.has_key(fn):
935 940 if mf1.flags(fn) != mf2.flags(fn) or \
936 941 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
937 942 modified.append(fn)
938 943 elif list_clean:
939 944 clean.append(fn)
940 945 del mf1[fn]
941 946 else:
942 947 added.append(fn)
943 948
944 949 removed = mf1.keys()
945 950
946 951 # sort and return results:
947 952 for l in modified, added, removed, deleted, unknown, ignored, clean:
948 953 l.sort()
949 954 return (modified, added, removed, deleted, unknown, ignored, clean)
950 955
951 956 def add(self, list, wlock=None):
952 957 if not wlock:
953 958 wlock = self.wlock()
954 959 for f in list:
955 960 p = self.wjoin(f)
956 961 if not os.path.exists(p):
957 962 self.ui.warn(_("%s does not exist!\n") % f)
958 963 elif not os.path.isfile(p):
959 964 self.ui.warn(_("%s not added: only files supported currently\n")
960 965 % f)
961 966 elif self.dirstate.state(f) in 'an':
962 967 self.ui.warn(_("%s already tracked!\n") % f)
963 968 else:
964 969 self.dirstate.update([f], "a")
965 970
966 971 def forget(self, list, wlock=None):
967 972 if not wlock:
968 973 wlock = self.wlock()
969 974 for f in list:
970 975 if self.dirstate.state(f) not in 'ai':
971 976 self.ui.warn(_("%s not added!\n") % f)
972 977 else:
973 978 self.dirstate.forget([f])
974 979
975 980 def remove(self, list, unlink=False, wlock=None):
976 981 if unlink:
977 982 for f in list:
978 983 try:
979 984 util.unlink(self.wjoin(f))
980 985 except OSError, inst:
981 986 if inst.errno != errno.ENOENT:
982 987 raise
983 988 if not wlock:
984 989 wlock = self.wlock()
985 990 for f in list:
986 991 p = self.wjoin(f)
987 992 if os.path.exists(p):
988 993 self.ui.warn(_("%s still exists!\n") % f)
989 994 elif self.dirstate.state(f) == 'a':
990 995 self.dirstate.forget([f])
991 996 elif f not in self.dirstate:
992 997 self.ui.warn(_("%s not tracked!\n") % f)
993 998 else:
994 999 self.dirstate.update([f], "r")
995 1000
996 1001 def undelete(self, list, wlock=None):
997 1002 p = self.dirstate.parents()[0]
998 1003 mn = self.changelog.read(p)[0]
999 1004 m = self.manifest.read(mn)
1000 1005 if not wlock:
1001 1006 wlock = self.wlock()
1002 1007 for f in list:
1003 1008 if self.dirstate.state(f) not in "r":
1004 1009 self.ui.warn("%s not removed!\n" % f)
1005 1010 else:
1006 1011 t = self.file(f).read(m[f])
1007 1012 self.wwrite(f, t)
1008 1013 util.set_exec(self.wjoin(f), m.execf(f))
1009 1014 self.dirstate.update([f], "n")
1010 1015
1011 1016 def copy(self, source, dest, wlock=None):
1012 1017 p = self.wjoin(dest)
1013 1018 if not os.path.exists(p):
1014 1019 self.ui.warn(_("%s does not exist!\n") % dest)
1015 1020 elif not os.path.isfile(p):
1016 1021 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1017 1022 else:
1018 1023 if not wlock:
1019 1024 wlock = self.wlock()
1020 1025 if self.dirstate.state(dest) == '?':
1021 1026 self.dirstate.update([dest], "a")
1022 1027 self.dirstate.copy(source, dest)
1023 1028
1024 1029 def heads(self, start=None):
1025 1030 heads = self.changelog.heads(start)
1026 1031 # sort the output in rev descending order
1027 1032 heads = [(-self.changelog.rev(h), h) for h in heads]
1028 1033 heads.sort()
1029 1034 return [n for (r, n) in heads]
1030 1035
1031 1036 # branchlookup returns a dict giving a list of branches for
1032 1037 # each head. A branch is defined as the tag of a node or
1033 1038 # the branch of the node's parents. If a node has multiple
1034 1039 # branch tags, tags are eliminated if they are visible from other
1035 1040 # branch tags.
1036 1041 #
1037 1042 # So, for this graph: a->b->c->d->e
1038 1043 # \ /
1039 1044 # aa -----/
1040 1045 # a has tag 2.6.12
1041 1046 # d has tag 2.6.13
1042 1047 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1043 1048 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1044 1049 # from the list.
1045 1050 #
1046 1051 # It is possible that more than one head will have the same branch tag.
1047 1052 # callers need to check the result for multiple heads under the same
1048 1053 # branch tag if that is a problem for them (ie checkout of a specific
1049 1054 # branch).
1050 1055 #
1051 1056 # passing in a specific branch will limit the depth of the search
1052 1057 # through the parents. It won't limit the branches returned in the
1053 1058 # result though.
1054 1059 def branchlookup(self, heads=None, branch=None):
1055 1060 if not heads:
1056 1061 heads = self.heads()
1057 1062 headt = [ h for h in heads ]
1058 1063 chlog = self.changelog
1059 1064 branches = {}
1060 1065 merges = []
1061 1066 seenmerge = {}
1062 1067
1063 1068 # traverse the tree once for each head, recording in the branches
1064 1069 # dict which tags are visible from this head. The branches
1065 1070 # dict also records which tags are visible from each tag
1066 1071 # while we traverse.
1067 1072 while headt or merges:
1068 1073 if merges:
1069 1074 n, found = merges.pop()
1070 1075 visit = [n]
1071 1076 else:
1072 1077 h = headt.pop()
1073 1078 visit = [h]
1074 1079 found = [h]
1075 1080 seen = {}
1076 1081 while visit:
1077 1082 n = visit.pop()
1078 1083 if n in seen:
1079 1084 continue
1080 1085 pp = chlog.parents(n)
1081 1086 tags = self.nodetags(n)
1082 1087 if tags:
1083 1088 for x in tags:
1084 1089 if x == 'tip':
1085 1090 continue
1086 1091 for f in found:
1087 1092 branches.setdefault(f, {})[n] = 1
1088 1093 branches.setdefault(n, {})[n] = 1
1089 1094 break
1090 1095 if n not in found:
1091 1096 found.append(n)
1092 1097 if branch in tags:
1093 1098 continue
1094 1099 seen[n] = 1
1095 1100 if pp[1] != nullid and n not in seenmerge:
1096 1101 merges.append((pp[1], [x for x in found]))
1097 1102 seenmerge[n] = 1
1098 1103 if pp[0] != nullid:
1099 1104 visit.append(pp[0])
1100 1105 # traverse the branches dict, eliminating branch tags from each
1101 1106 # head that are visible from another branch tag for that head.
1102 1107 out = {}
1103 1108 viscache = {}
1104 1109 for h in heads:
1105 1110 def visible(node):
1106 1111 if node in viscache:
1107 1112 return viscache[node]
1108 1113 ret = {}
1109 1114 visit = [node]
1110 1115 while visit:
1111 1116 x = visit.pop()
1112 1117 if x in viscache:
1113 1118 ret.update(viscache[x])
1114 1119 elif x not in ret:
1115 1120 ret[x] = 1
1116 1121 if x in branches:
1117 1122 visit[len(visit):] = branches[x].keys()
1118 1123 viscache[node] = ret
1119 1124 return ret
1120 1125 if h not in branches:
1121 1126 continue
1122 1127 # O(n^2), but somewhat limited. This only searches the
1123 1128 # tags visible from a specific head, not all the tags in the
1124 1129 # whole repo.
1125 1130 for b in branches[h]:
1126 1131 vis = False
1127 1132 for bb in branches[h].keys():
1128 1133 if b != bb:
1129 1134 if b in visible(bb):
1130 1135 vis = True
1131 1136 break
1132 1137 if not vis:
1133 1138 l = out.setdefault(h, [])
1134 1139 l[len(l):] = self.nodetags(b)
1135 1140 return out
1136 1141
1137 1142 def branches(self, nodes):
1138 1143 if not nodes:
1139 1144 nodes = [self.changelog.tip()]
1140 1145 b = []
1141 1146 for n in nodes:
1142 1147 t = n
1143 1148 while 1:
1144 1149 p = self.changelog.parents(n)
1145 1150 if p[1] != nullid or p[0] == nullid:
1146 1151 b.append((t, n, p[0], p[1]))
1147 1152 break
1148 1153 n = p[0]
1149 1154 return b
1150 1155
1151 1156 def between(self, pairs):
1152 1157 r = []
1153 1158
1154 1159 for top, bottom in pairs:
1155 1160 n, l, i = top, [], 0
1156 1161 f = 1
1157 1162
1158 1163 while n != bottom:
1159 1164 p = self.changelog.parents(n)[0]
1160 1165 if i == f:
1161 1166 l.append(n)
1162 1167 f = f * 2
1163 1168 n = p
1164 1169 i += 1
1165 1170
1166 1171 r.append(l)
1167 1172
1168 1173 return r
1169 1174
1170 1175 def findincoming(self, remote, base=None, heads=None, force=False):
1171 1176 """Return list of roots of the subsets of missing nodes from remote
1172 1177
1173 1178 If base dict is specified, assume that these nodes and their parents
1174 1179 exist on the remote side and that no child of a node of base exists
1175 1180 in both remote and self.
1176 1181 Furthermore base will be updated to include the nodes that exists
1177 1182 in self and remote but no children exists in self and remote.
1178 1183 If a list of heads is specified, return only nodes which are heads
1179 1184 or ancestors of these heads.
1180 1185
1181 1186 All the ancestors of base are in self and in remote.
1182 1187 All the descendants of the list returned are missing in self.
1183 1188 (and so we know that the rest of the nodes are missing in remote, see
1184 1189 outgoing)
1185 1190 """
1186 1191 m = self.changelog.nodemap
1187 1192 search = []
1188 1193 fetch = {}
1189 1194 seen = {}
1190 1195 seenbranch = {}
1191 1196 if base == None:
1192 1197 base = {}
1193 1198
1194 1199 if not heads:
1195 1200 heads = remote.heads()
1196 1201
1197 1202 if self.changelog.tip() == nullid:
1198 1203 base[nullid] = 1
1199 1204 if heads != [nullid]:
1200 1205 return [nullid]
1201 1206 return []
1202 1207
1203 1208 # assume we're closer to the tip than the root
1204 1209 # and start by examining the heads
1205 1210 self.ui.status(_("searching for changes\n"))
1206 1211
1207 1212 unknown = []
1208 1213 for h in heads:
1209 1214 if h not in m:
1210 1215 unknown.append(h)
1211 1216 else:
1212 1217 base[h] = 1
1213 1218
1214 1219 if not unknown:
1215 1220 return []
1216 1221
1217 1222 req = dict.fromkeys(unknown)
1218 1223 reqcnt = 0
1219 1224
1220 1225 # search through remote branches
1221 1226 # a 'branch' here is a linear segment of history, with four parts:
1222 1227 # head, root, first parent, second parent
1223 1228 # (a branch always has two parents (or none) by definition)
1224 1229 unknown = remote.branches(unknown)
1225 1230 while unknown:
1226 1231 r = []
1227 1232 while unknown:
1228 1233 n = unknown.pop(0)
1229 1234 if n[0] in seen:
1230 1235 continue
1231 1236
1232 1237 self.ui.debug(_("examining %s:%s\n")
1233 1238 % (short(n[0]), short(n[1])))
1234 1239 if n[0] == nullid: # found the end of the branch
1235 1240 pass
1236 1241 elif n in seenbranch:
1237 1242 self.ui.debug(_("branch already found\n"))
1238 1243 continue
1239 1244 elif n[1] and n[1] in m: # do we know the base?
1240 1245 self.ui.debug(_("found incomplete branch %s:%s\n")
1241 1246 % (short(n[0]), short(n[1])))
1242 1247 search.append(n) # schedule branch range for scanning
1243 1248 seenbranch[n] = 1
1244 1249 else:
1245 1250 if n[1] not in seen and n[1] not in fetch:
1246 1251 if n[2] in m and n[3] in m:
1247 1252 self.ui.debug(_("found new changeset %s\n") %
1248 1253 short(n[1]))
1249 1254 fetch[n[1]] = 1 # earliest unknown
1250 1255 for p in n[2:4]:
1251 1256 if p in m:
1252 1257 base[p] = 1 # latest known
1253 1258
1254 1259 for p in n[2:4]:
1255 1260 if p not in req and p not in m:
1256 1261 r.append(p)
1257 1262 req[p] = 1
1258 1263 seen[n[0]] = 1
1259 1264
1260 1265 if r:
1261 1266 reqcnt += 1
1262 1267 self.ui.debug(_("request %d: %s\n") %
1263 1268 (reqcnt, " ".join(map(short, r))))
1264 1269 for p in xrange(0, len(r), 10):
1265 1270 for b in remote.branches(r[p:p+10]):
1266 1271 self.ui.debug(_("received %s:%s\n") %
1267 1272 (short(b[0]), short(b[1])))
1268 1273 unknown.append(b)
1269 1274
1270 1275 # do binary search on the branches we found
1271 1276 while search:
1272 1277 n = search.pop(0)
1273 1278 reqcnt += 1
1274 1279 l = remote.between([(n[0], n[1])])[0]
1275 1280 l.append(n[1])
1276 1281 p = n[0]
1277 1282 f = 1
1278 1283 for i in l:
1279 1284 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1280 1285 if i in m:
1281 1286 if f <= 2:
1282 1287 self.ui.debug(_("found new branch changeset %s\n") %
1283 1288 short(p))
1284 1289 fetch[p] = 1
1285 1290 base[i] = 1
1286 1291 else:
1287 1292 self.ui.debug(_("narrowed branch search to %s:%s\n")
1288 1293 % (short(p), short(i)))
1289 1294 search.append((p, i))
1290 1295 break
1291 1296 p, f = i, f * 2
1292 1297
1293 1298 # sanity check our fetch list
1294 1299 for f in fetch.keys():
1295 1300 if f in m:
1296 1301 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1297 1302
1298 1303 if base.keys() == [nullid]:
1299 1304 if force:
1300 1305 self.ui.warn(_("warning: repository is unrelated\n"))
1301 1306 else:
1302 1307 raise util.Abort(_("repository is unrelated"))
1303 1308
1304 1309 self.ui.debug(_("found new changesets starting at ") +
1305 1310 " ".join([short(f) for f in fetch]) + "\n")
1306 1311
1307 1312 self.ui.debug(_("%d total queries\n") % reqcnt)
1308 1313
1309 1314 return fetch.keys()
1310 1315
1311 1316 def findoutgoing(self, remote, base=None, heads=None, force=False):
1312 1317 """Return list of nodes that are roots of subsets not in remote
1313 1318
1314 1319 If base dict is specified, assume that these nodes and their parents
1315 1320 exist on the remote side.
1316 1321 If a list of heads is specified, return only nodes which are heads
1317 1322 or ancestors of these heads, and return a second element which
1318 1323 contains all remote heads which get new children.
1319 1324 """
1320 1325 if base == None:
1321 1326 base = {}
1322 1327 self.findincoming(remote, base, heads, force=force)
1323 1328
1324 1329 self.ui.debug(_("common changesets up to ")
1325 1330 + " ".join(map(short, base.keys())) + "\n")
1326 1331
1327 1332 remain = dict.fromkeys(self.changelog.nodemap)
1328 1333
1329 1334 # prune everything remote has from the tree
1330 1335 del remain[nullid]
1331 1336 remove = base.keys()
1332 1337 while remove:
1333 1338 n = remove.pop(0)
1334 1339 if n in remain:
1335 1340 del remain[n]
1336 1341 for p in self.changelog.parents(n):
1337 1342 remove.append(p)
1338 1343
1339 1344 # find every node whose parents have been pruned
1340 1345 subset = []
1341 1346 # find every remote head that will get new children
1342 1347 updated_heads = {}
1343 1348 for n in remain:
1344 1349 p1, p2 = self.changelog.parents(n)
1345 1350 if p1 not in remain and p2 not in remain:
1346 1351 subset.append(n)
1347 1352 if heads:
1348 1353 if p1 in heads:
1349 1354 updated_heads[p1] = True
1350 1355 if p2 in heads:
1351 1356 updated_heads[p2] = True
1352 1357
1353 1358 # this is the set of all roots we have to push
1354 1359 if heads:
1355 1360 return subset, updated_heads.keys()
1356 1361 else:
1357 1362 return subset
1358 1363
1359 1364 def pull(self, remote, heads=None, force=False, lock=None):
1360 1365 mylock = False
1361 1366 if not lock:
1362 1367 lock = self.lock()
1363 1368 mylock = True
1364 1369
1365 1370 try:
1366 1371 fetch = self.findincoming(remote, force=force)
1367 1372 if fetch == [nullid]:
1368 1373 self.ui.status(_("requesting all changes\n"))
1369 1374
1370 1375 if not fetch:
1371 1376 self.ui.status(_("no changes found\n"))
1372 1377 return 0
1373 1378
1374 1379 if heads is None:
1375 1380 cg = remote.changegroup(fetch, 'pull')
1376 1381 else:
1377 1382 if 'changegroupsubset' not in remote.capabilities:
1378 1383 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1379 1384 cg = remote.changegroupsubset(fetch, heads, 'pull')
1380 1385 return self.addchangegroup(cg, 'pull', remote.url())
1381 1386 finally:
1382 1387 if mylock:
1383 1388 lock.release()
1384 1389
1385 1390 def push(self, remote, force=False, revs=None):
1386 1391 # there are two ways to push to remote repo:
1387 1392 #
1388 1393 # addchangegroup assumes local user can lock remote
1389 1394 # repo (local filesystem, old ssh servers).
1390 1395 #
1391 1396 # unbundle assumes local user cannot lock remote repo (new ssh
1392 1397 # servers, http servers).
1393 1398
1394 1399 if remote.capable('unbundle'):
1395 1400 return self.push_unbundle(remote, force, revs)
1396 1401 return self.push_addchangegroup(remote, force, revs)
1397 1402
1398 1403 def prepush(self, remote, force, revs):
1399 1404 base = {}
1400 1405 remote_heads = remote.heads()
1401 1406 inc = self.findincoming(remote, base, remote_heads, force=force)
1402 1407
1403 1408 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1404 1409 if revs is not None:
1405 1410 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1406 1411 else:
1407 1412 bases, heads = update, self.changelog.heads()
1408 1413
1409 1414 if not bases:
1410 1415 self.ui.status(_("no changes found\n"))
1411 1416 return None, 1
1412 1417 elif not force:
1413 1418 # check if we're creating new remote heads
1414 1419 # to be a remote head after push, node must be either
1415 1420 # - unknown locally
1416 1421 # - a local outgoing head descended from update
1417 1422 # - a remote head that's known locally and not
1418 1423 # ancestral to an outgoing head
1419 1424
1420 1425 warn = 0
1421 1426
1422 1427 if remote_heads == [nullid]:
1423 1428 warn = 0
1424 1429 elif not revs and len(heads) > len(remote_heads):
1425 1430 warn = 1
1426 1431 else:
1427 1432 newheads = list(heads)
1428 1433 for r in remote_heads:
1429 1434 if r in self.changelog.nodemap:
1430 1435 desc = self.changelog.heads(r, heads)
1431 1436 l = [h for h in heads if h in desc]
1432 1437 if not l:
1433 1438 newheads.append(r)
1434 1439 else:
1435 1440 newheads.append(r)
1436 1441 if len(newheads) > len(remote_heads):
1437 1442 warn = 1
1438 1443
1439 1444 if warn:
1440 1445 self.ui.warn(_("abort: push creates new remote branches!\n"))
1441 1446 self.ui.status(_("(did you forget to merge?"
1442 1447 " use push -f to force)\n"))
1443 1448 return None, 1
1444 1449 elif inc:
1445 1450 self.ui.warn(_("note: unsynced remote changes!\n"))
1446 1451
1447 1452
1448 1453 if revs is None:
1449 1454 cg = self.changegroup(update, 'push')
1450 1455 else:
1451 1456 cg = self.changegroupsubset(update, revs, 'push')
1452 1457 return cg, remote_heads
1453 1458
1454 1459 def push_addchangegroup(self, remote, force, revs):
1455 1460 lock = remote.lock()
1456 1461
1457 1462 ret = self.prepush(remote, force, revs)
1458 1463 if ret[0] is not None:
1459 1464 cg, remote_heads = ret
1460 1465 return remote.addchangegroup(cg, 'push', self.url())
1461 1466 return ret[1]
1462 1467
1463 1468 def push_unbundle(self, remote, force, revs):
1464 1469 # local repo finds heads on server, finds out what revs it
1465 1470 # must push. once revs transferred, if server finds it has
1466 1471 # different heads (someone else won commit/push race), server
1467 1472 # aborts.
1468 1473
1469 1474 ret = self.prepush(remote, force, revs)
1470 1475 if ret[0] is not None:
1471 1476 cg, remote_heads = ret
1472 1477 if force: remote_heads = ['force']
1473 1478 return remote.unbundle(cg, remote_heads, 'push')
1474 1479 return ret[1]
1475 1480
1476 1481 def changegroupinfo(self, nodes):
1477 1482 self.ui.note(_("%d changesets found\n") % len(nodes))
1478 1483 if self.ui.debugflag:
1479 1484 self.ui.debug(_("List of changesets:\n"))
1480 1485 for node in nodes:
1481 1486 self.ui.debug("%s\n" % hex(node))
1482 1487
1483 1488 def changegroupsubset(self, bases, heads, source):
1484 1489 """This function generates a changegroup consisting of all the nodes
1485 1490 that are descendents of any of the bases, and ancestors of any of
1486 1491 the heads.
1487 1492
1488 1493 It is fairly complex as determining which filenodes and which
1489 1494 manifest nodes need to be included for the changeset to be complete
1490 1495 is non-trivial.
1491 1496
1492 1497 Another wrinkle is doing the reverse, figuring out which changeset in
1493 1498 the changegroup a particular filenode or manifestnode belongs to."""
1494 1499
1495 1500 self.hook('preoutgoing', throw=True, source=source)
1496 1501
1497 1502 # Set up some initial variables
1498 1503 # Make it easy to refer to self.changelog
1499 1504 cl = self.changelog
1500 1505 # msng is short for missing - compute the list of changesets in this
1501 1506 # changegroup.
1502 1507 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1503 1508 self.changegroupinfo(msng_cl_lst)
1504 1509 # Some bases may turn out to be superfluous, and some heads may be
1505 1510 # too. nodesbetween will return the minimal set of bases and heads
1506 1511 # necessary to re-create the changegroup.
1507 1512
1508 1513 # Known heads are the list of heads that it is assumed the recipient
1509 1514 # of this changegroup will know about.
1510 1515 knownheads = {}
1511 1516 # We assume that all parents of bases are known heads.
1512 1517 for n in bases:
1513 1518 for p in cl.parents(n):
1514 1519 if p != nullid:
1515 1520 knownheads[p] = 1
1516 1521 knownheads = knownheads.keys()
1517 1522 if knownheads:
1518 1523 # Now that we know what heads are known, we can compute which
1519 1524 # changesets are known. The recipient must know about all
1520 1525 # changesets required to reach the known heads from the null
1521 1526 # changeset.
1522 1527 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1523 1528 junk = None
1524 1529 # Transform the list into an ersatz set.
1525 1530 has_cl_set = dict.fromkeys(has_cl_set)
1526 1531 else:
1527 1532 # If there were no known heads, the recipient cannot be assumed to
1528 1533 # know about any changesets.
1529 1534 has_cl_set = {}
1530 1535
1531 1536 # Make it easy to refer to self.manifest
1532 1537 mnfst = self.manifest
1533 1538 # We don't know which manifests are missing yet
1534 1539 msng_mnfst_set = {}
1535 1540 # Nor do we know which filenodes are missing.
1536 1541 msng_filenode_set = {}
1537 1542
1538 1543 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1539 1544 junk = None
1540 1545
1541 1546 # A changeset always belongs to itself, so the changenode lookup
1542 1547 # function for a changenode is identity.
1543 1548 def identity(x):
1544 1549 return x
1545 1550
1546 1551 # A function generating function. Sets up an environment for the
1547 1552 # inner function.
1548 1553 def cmp_by_rev_func(revlog):
1549 1554 # Compare two nodes by their revision number in the environment's
1550 1555 # revision history. Since the revision number both represents the
1551 1556 # most efficient order to read the nodes in, and represents a
1552 1557 # topological sorting of the nodes, this function is often useful.
1553 1558 def cmp_by_rev(a, b):
1554 1559 return cmp(revlog.rev(a), revlog.rev(b))
1555 1560 return cmp_by_rev
1556 1561
1557 1562 # If we determine that a particular file or manifest node must be a
1558 1563 # node that the recipient of the changegroup will already have, we can
1559 1564 # also assume the recipient will have all the parents. This function
1560 1565 # prunes them from the set of missing nodes.
1561 1566 def prune_parents(revlog, hasset, msngset):
1562 1567 haslst = hasset.keys()
1563 1568 haslst.sort(cmp_by_rev_func(revlog))
1564 1569 for node in haslst:
1565 1570 parentlst = [p for p in revlog.parents(node) if p != nullid]
1566 1571 while parentlst:
1567 1572 n = parentlst.pop()
1568 1573 if n not in hasset:
1569 1574 hasset[n] = 1
1570 1575 p = [p for p in revlog.parents(n) if p != nullid]
1571 1576 parentlst.extend(p)
1572 1577 for n in hasset:
1573 1578 msngset.pop(n, None)
1574 1579
1575 1580 # This is a function generating function used to set up an environment
1576 1581 # for the inner function to execute in.
1577 1582 def manifest_and_file_collector(changedfileset):
1578 1583 # This is an information gathering function that gathers
1579 1584 # information from each changeset node that goes out as part of
1580 1585 # the changegroup. The information gathered is a list of which
1581 1586 # manifest nodes are potentially required (the recipient may
1582 1587 # already have them) and total list of all files which were
1583 1588 # changed in any changeset in the changegroup.
1584 1589 #
1585 1590 # We also remember the first changenode we saw any manifest
1586 1591 # referenced by so we can later determine which changenode 'owns'
1587 1592 # the manifest.
1588 1593 def collect_manifests_and_files(clnode):
1589 1594 c = cl.read(clnode)
1590 1595 for f in c[3]:
1591 1596 # This is to make sure we only have one instance of each
1592 1597 # filename string for each filename.
1593 1598 changedfileset.setdefault(f, f)
1594 1599 msng_mnfst_set.setdefault(c[0], clnode)
1595 1600 return collect_manifests_and_files
1596 1601
1597 1602 # Figure out which manifest nodes (of the ones we think might be part
1598 1603 # of the changegroup) the recipient must know about and remove them
1599 1604 # from the changegroup.
1600 1605 def prune_manifests():
1601 1606 has_mnfst_set = {}
1602 1607 for n in msng_mnfst_set:
1603 1608 # If a 'missing' manifest thinks it belongs to a changenode
1604 1609 # the recipient is assumed to have, obviously the recipient
1605 1610 # must have that manifest.
1606 1611 linknode = cl.node(mnfst.linkrev(n))
1607 1612 if linknode in has_cl_set:
1608 1613 has_mnfst_set[n] = 1
1609 1614 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1610 1615
1611 1616 # Use the information collected in collect_manifests_and_files to say
1612 1617 # which changenode any manifestnode belongs to.
1613 1618 def lookup_manifest_link(mnfstnode):
1614 1619 return msng_mnfst_set[mnfstnode]
1615 1620
1616 1621 # A function generating function that sets up the initial environment
1617 1622 # the inner function.
1618 1623 def filenode_collector(changedfiles):
1619 1624 next_rev = [0]
1620 1625 # This gathers information from each manifestnode included in the
1621 1626 # changegroup about which filenodes the manifest node references
1622 1627 # so we can include those in the changegroup too.
1623 1628 #
1624 1629 # It also remembers which changenode each filenode belongs to. It
1625 1630 # does this by assuming the a filenode belongs to the changenode
1626 1631 # the first manifest that references it belongs to.
1627 1632 def collect_msng_filenodes(mnfstnode):
1628 1633 r = mnfst.rev(mnfstnode)
1629 1634 if r == next_rev[0]:
1630 1635 # If the last rev we looked at was the one just previous,
1631 1636 # we only need to see a diff.
1632 1637 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1633 1638 # For each line in the delta
1634 1639 for dline in delta.splitlines():
1635 1640 # get the filename and filenode for that line
1636 1641 f, fnode = dline.split('\0')
1637 1642 fnode = bin(fnode[:40])
1638 1643 f = changedfiles.get(f, None)
1639 1644 # And if the file is in the list of files we care
1640 1645 # about.
1641 1646 if f is not None:
1642 1647 # Get the changenode this manifest belongs to
1643 1648 clnode = msng_mnfst_set[mnfstnode]
1644 1649 # Create the set of filenodes for the file if
1645 1650 # there isn't one already.
1646 1651 ndset = msng_filenode_set.setdefault(f, {})
1647 1652 # And set the filenode's changelog node to the
1648 1653 # manifest's if it hasn't been set already.
1649 1654 ndset.setdefault(fnode, clnode)
1650 1655 else:
1651 1656 # Otherwise we need a full manifest.
1652 1657 m = mnfst.read(mnfstnode)
1653 1658 # For every file in we care about.
1654 1659 for f in changedfiles:
1655 1660 fnode = m.get(f, None)
1656 1661 # If it's in the manifest
1657 1662 if fnode is not None:
1658 1663 # See comments above.
1659 1664 clnode = msng_mnfst_set[mnfstnode]
1660 1665 ndset = msng_filenode_set.setdefault(f, {})
1661 1666 ndset.setdefault(fnode, clnode)
1662 1667 # Remember the revision we hope to see next.
1663 1668 next_rev[0] = r + 1
1664 1669 return collect_msng_filenodes
1665 1670
1666 1671 # We have a list of filenodes we think we need for a file, lets remove
1667 1672 # all those we now the recipient must have.
1668 1673 def prune_filenodes(f, filerevlog):
1669 1674 msngset = msng_filenode_set[f]
1670 1675 hasset = {}
1671 1676 # If a 'missing' filenode thinks it belongs to a changenode we
1672 1677 # assume the recipient must have, then the recipient must have
1673 1678 # that filenode.
1674 1679 for n in msngset:
1675 1680 clnode = cl.node(filerevlog.linkrev(n))
1676 1681 if clnode in has_cl_set:
1677 1682 hasset[n] = 1
1678 1683 prune_parents(filerevlog, hasset, msngset)
1679 1684
1680 1685 # A function generator function that sets up the a context for the
1681 1686 # inner function.
1682 1687 def lookup_filenode_link_func(fname):
1683 1688 msngset = msng_filenode_set[fname]
1684 1689 # Lookup the changenode the filenode belongs to.
1685 1690 def lookup_filenode_link(fnode):
1686 1691 return msngset[fnode]
1687 1692 return lookup_filenode_link
1688 1693
1689 1694 # Now that we have all theses utility functions to help out and
1690 1695 # logically divide up the task, generate the group.
1691 1696 def gengroup():
1692 1697 # The set of changed files starts empty.
1693 1698 changedfiles = {}
1694 1699 # Create a changenode group generator that will call our functions
1695 1700 # back to lookup the owning changenode and collect information.
1696 1701 group = cl.group(msng_cl_lst, identity,
1697 1702 manifest_and_file_collector(changedfiles))
1698 1703 for chnk in group:
1699 1704 yield chnk
1700 1705
1701 1706 # The list of manifests has been collected by the generator
1702 1707 # calling our functions back.
1703 1708 prune_manifests()
1704 1709 msng_mnfst_lst = msng_mnfst_set.keys()
1705 1710 # Sort the manifestnodes by revision number.
1706 1711 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1707 1712 # Create a generator for the manifestnodes that calls our lookup
1708 1713 # and data collection functions back.
1709 1714 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1710 1715 filenode_collector(changedfiles))
1711 1716 for chnk in group:
1712 1717 yield chnk
1713 1718
1714 1719 # These are no longer needed, dereference and toss the memory for
1715 1720 # them.
1716 1721 msng_mnfst_lst = None
1717 1722 msng_mnfst_set.clear()
1718 1723
1719 1724 changedfiles = changedfiles.keys()
1720 1725 changedfiles.sort()
1721 1726 # Go through all our files in order sorted by name.
1722 1727 for fname in changedfiles:
1723 1728 filerevlog = self.file(fname)
1724 1729 # Toss out the filenodes that the recipient isn't really
1725 1730 # missing.
1726 1731 if msng_filenode_set.has_key(fname):
1727 1732 prune_filenodes(fname, filerevlog)
1728 1733 msng_filenode_lst = msng_filenode_set[fname].keys()
1729 1734 else:
1730 1735 msng_filenode_lst = []
1731 1736 # If any filenodes are left, generate the group for them,
1732 1737 # otherwise don't bother.
1733 1738 if len(msng_filenode_lst) > 0:
1734 1739 yield changegroup.genchunk(fname)
1735 1740 # Sort the filenodes by their revision #
1736 1741 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1737 1742 # Create a group generator and only pass in a changenode
1738 1743 # lookup function as we need to collect no information
1739 1744 # from filenodes.
1740 1745 group = filerevlog.group(msng_filenode_lst,
1741 1746 lookup_filenode_link_func(fname))
1742 1747 for chnk in group:
1743 1748 yield chnk
1744 1749 if msng_filenode_set.has_key(fname):
1745 1750 # Don't need this anymore, toss it to free memory.
1746 1751 del msng_filenode_set[fname]
1747 1752 # Signal that no more groups are left.
1748 1753 yield changegroup.closechunk()
1749 1754
1750 1755 if msng_cl_lst:
1751 1756 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1752 1757
1753 1758 return util.chunkbuffer(gengroup())
1754 1759
1755 1760 def changegroup(self, basenodes, source):
1756 1761 """Generate a changegroup of all nodes that we have that a recipient
1757 1762 doesn't.
1758 1763
1759 1764 This is much easier than the previous function as we can assume that
1760 1765 the recipient has any changenode we aren't sending them."""
1761 1766
1762 1767 self.hook('preoutgoing', throw=True, source=source)
1763 1768
1764 1769 cl = self.changelog
1765 1770 nodes = cl.nodesbetween(basenodes, None)[0]
1766 1771 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1767 1772 self.changegroupinfo(nodes)
1768 1773
1769 1774 def identity(x):
1770 1775 return x
1771 1776
1772 1777 def gennodelst(revlog):
1773 1778 for r in xrange(0, revlog.count()):
1774 1779 n = revlog.node(r)
1775 1780 if revlog.linkrev(n) in revset:
1776 1781 yield n
1777 1782
1778 1783 def changed_file_collector(changedfileset):
1779 1784 def collect_changed_files(clnode):
1780 1785 c = cl.read(clnode)
1781 1786 for fname in c[3]:
1782 1787 changedfileset[fname] = 1
1783 1788 return collect_changed_files
1784 1789
1785 1790 def lookuprevlink_func(revlog):
1786 1791 def lookuprevlink(n):
1787 1792 return cl.node(revlog.linkrev(n))
1788 1793 return lookuprevlink
1789 1794
1790 1795 def gengroup():
1791 1796 # construct a list of all changed files
1792 1797 changedfiles = {}
1793 1798
1794 1799 for chnk in cl.group(nodes, identity,
1795 1800 changed_file_collector(changedfiles)):
1796 1801 yield chnk
1797 1802 changedfiles = changedfiles.keys()
1798 1803 changedfiles.sort()
1799 1804
1800 1805 mnfst = self.manifest
1801 1806 nodeiter = gennodelst(mnfst)
1802 1807 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1803 1808 yield chnk
1804 1809
1805 1810 for fname in changedfiles:
1806 1811 filerevlog = self.file(fname)
1807 1812 nodeiter = gennodelst(filerevlog)
1808 1813 nodeiter = list(nodeiter)
1809 1814 if nodeiter:
1810 1815 yield changegroup.genchunk(fname)
1811 1816 lookup = lookuprevlink_func(filerevlog)
1812 1817 for chnk in filerevlog.group(nodeiter, lookup):
1813 1818 yield chnk
1814 1819
1815 1820 yield changegroup.closechunk()
1816 1821
1817 1822 if nodes:
1818 1823 self.hook('outgoing', node=hex(nodes[0]), source=source)
1819 1824
1820 1825 return util.chunkbuffer(gengroup())
1821 1826
1822 1827 def addchangegroup(self, source, srctype, url):
1823 1828 """add changegroup to repo.
1824 1829
1825 1830 return values:
1826 1831 - nothing changed or no source: 0
1827 1832 - more heads than before: 1+added heads (2..n)
1828 1833 - less heads than before: -1-removed heads (-2..-n)
1829 1834 - number of heads stays the same: 1
1830 1835 """
1831 1836 def csmap(x):
1832 1837 self.ui.debug(_("add changeset %s\n") % short(x))
1833 1838 return cl.count()
1834 1839
1835 1840 def revmap(x):
1836 1841 return cl.rev(x)
1837 1842
1838 1843 if not source:
1839 1844 return 0
1840 1845
1841 1846 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1842 1847
1843 1848 changesets = files = revisions = 0
1844 1849
1845 1850 tr = self.transaction()
1846 1851
1847 1852 # write changelog data to temp files so concurrent readers will not see
1848 1853 # inconsistent view
1849 1854 cl = None
1850 1855 try:
1851 1856 cl = appendfile.appendchangelog(self.sopener,
1852 1857 self.changelog.version)
1853 1858
1854 1859 oldheads = len(cl.heads())
1855 1860
1856 1861 # pull off the changeset group
1857 1862 self.ui.status(_("adding changesets\n"))
1858 1863 cor = cl.count() - 1
1859 1864 chunkiter = changegroup.chunkiter(source)
1860 1865 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1861 1866 raise util.Abort(_("received changelog group is empty"))
1862 1867 cnr = cl.count() - 1
1863 1868 changesets = cnr - cor
1864 1869
1865 1870 # pull off the manifest group
1866 1871 self.ui.status(_("adding manifests\n"))
1867 1872 chunkiter = changegroup.chunkiter(source)
1868 1873 # no need to check for empty manifest group here:
1869 1874 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1870 1875 # no new manifest will be created and the manifest group will
1871 1876 # be empty during the pull
1872 1877 self.manifest.addgroup(chunkiter, revmap, tr)
1873 1878
1874 1879 # process the files
1875 1880 self.ui.status(_("adding file changes\n"))
1876 1881 while 1:
1877 1882 f = changegroup.getchunk(source)
1878 1883 if not f:
1879 1884 break
1880 1885 self.ui.debug(_("adding %s revisions\n") % f)
1881 1886 fl = self.file(f)
1882 1887 o = fl.count()
1883 1888 chunkiter = changegroup.chunkiter(source)
1884 1889 if fl.addgroup(chunkiter, revmap, tr) is None:
1885 1890 raise util.Abort(_("received file revlog group is empty"))
1886 1891 revisions += fl.count() - o
1887 1892 files += 1
1888 1893
1889 1894 cl.writedata()
1890 1895 finally:
1891 1896 if cl:
1892 1897 cl.cleanup()
1893 1898
1894 1899 # make changelog see real files again
1895 1900 self.changelog = changelog.changelog(self.sopener,
1896 1901 self.changelog.version)
1897 1902 self.changelog.checkinlinesize(tr)
1898 1903
1899 1904 newheads = len(self.changelog.heads())
1900 1905 heads = ""
1901 1906 if oldheads and newheads != oldheads:
1902 1907 heads = _(" (%+d heads)") % (newheads - oldheads)
1903 1908
1904 1909 self.ui.status(_("added %d changesets"
1905 1910 " with %d changes to %d files%s\n")
1906 1911 % (changesets, revisions, files, heads))
1907 1912
1908 1913 if changesets > 0:
1909 1914 self.hook('pretxnchangegroup', throw=True,
1910 1915 node=hex(self.changelog.node(cor+1)), source=srctype,
1911 1916 url=url)
1912 1917
1913 1918 tr.close()
1914 1919
1915 1920 if changesets > 0:
1916 1921 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1917 1922 source=srctype, url=url)
1918 1923
1919 1924 for i in xrange(cor + 1, cnr + 1):
1920 1925 self.hook("incoming", node=hex(self.changelog.node(i)),
1921 1926 source=srctype, url=url)
1922 1927
1923 1928 # never return 0 here:
1924 1929 if newheads < oldheads:
1925 1930 return newheads - oldheads - 1
1926 1931 else:
1927 1932 return newheads - oldheads + 1
1928 1933
1929 1934
1930 1935 def stream_in(self, remote):
1931 1936 fp = remote.stream_out()
1932 1937 l = fp.readline()
1933 1938 try:
1934 1939 resp = int(l)
1935 1940 except ValueError:
1936 1941 raise util.UnexpectedOutput(
1937 1942 _('Unexpected response from remote server:'), l)
1938 1943 if resp == 1:
1939 1944 raise util.Abort(_('operation forbidden by server'))
1940 1945 elif resp == 2:
1941 1946 raise util.Abort(_('locking the remote repository failed'))
1942 1947 elif resp != 0:
1943 1948 raise util.Abort(_('the server sent an unknown error code'))
1944 1949 self.ui.status(_('streaming all changes\n'))
1945 1950 l = fp.readline()
1946 1951 try:
1947 1952 total_files, total_bytes = map(int, l.split(' ', 1))
1948 1953 except ValueError, TypeError:
1949 1954 raise util.UnexpectedOutput(
1950 1955 _('Unexpected response from remote server:'), l)
1951 1956 self.ui.status(_('%d files to transfer, %s of data\n') %
1952 1957 (total_files, util.bytecount(total_bytes)))
1953 1958 start = time.time()
1954 1959 for i in xrange(total_files):
1955 1960 # XXX doesn't support '\n' or '\r' in filenames
1956 1961 l = fp.readline()
1957 1962 try:
1958 1963 name, size = l.split('\0', 1)
1959 1964 size = int(size)
1960 1965 except ValueError, TypeError:
1961 1966 raise util.UnexpectedOutput(
1962 1967 _('Unexpected response from remote server:'), l)
1963 1968 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1964 1969 ofp = self.sopener(name, 'w')
1965 1970 for chunk in util.filechunkiter(fp, limit=size):
1966 1971 ofp.write(chunk)
1967 1972 ofp.close()
1968 1973 elapsed = time.time() - start
1969 1974 if elapsed <= 0:
1970 1975 elapsed = 0.001
1971 1976 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1972 1977 (util.bytecount(total_bytes), elapsed,
1973 1978 util.bytecount(total_bytes / elapsed)))
1974 1979 self.reload()
1975 1980 return len(self.heads()) + 1
1976 1981
1977 1982 def clone(self, remote, heads=[], stream=False):
1978 1983 '''clone remote repository.
1979 1984
1980 1985 keyword arguments:
1981 1986 heads: list of revs to clone (forces use of pull)
1982 1987 stream: use streaming clone if possible'''
1983 1988
1984 1989 # now, all clients that can request uncompressed clones can
1985 1990 # read repo formats supported by all servers that can serve
1986 1991 # them.
1987 1992
1988 1993 # if revlog format changes, client will have to check version
1989 1994 # and format flags on "stream" capability, and use
1990 1995 # uncompressed only if compatible.
1991 1996
1992 1997 if stream and not heads and remote.capable('stream'):
1993 1998 return self.stream_in(remote)
1994 1999 return self.pull(remote, heads)
1995 2000
1996 2001 # used to avoid circular references so destructors work
1997 2002 def aftertrans(files):
1998 2003 renamefiles = [tuple(t) for t in files]
1999 2004 def a():
2000 2005 for src, dest in renamefiles:
2001 2006 util.rename(src, dest)
2002 2007 return a
2003 2008
2004 2009 def instance(ui, path, create):
2005 2010 return localrepository(ui, util.drop_scheme('file', path), create)
2006 2011
2007 2012 def islocal(path):
2008 2013 return True
@@ -1,27 +1,27 b''
1 1 adding a
2 2 adding b
3 3 adding t.h
4 4 adding t/x
5 5 a
6 6 NONEXISTENT: No such file or directory
7 7 a
8 8 b
9 9 t.h
10 10 t/x
11 11 a: No such file or directory
12 12 NONEXISTENT: No such file or directory
13 13 b
14 14 t.h
15 15 t/x
16 16 a
17 17 NONEXISTENT: No such file in rev ce18e5bc5cd3
18 18 a
19 t/x
20 19 b
21 20 t.h
21 t/x
22 22 % -I/-X with relative path should work
23 23 b
24 24 t.h
25 25 t/x
26 26 t/x
27 27 t/x
General Comments 0
You need to be logged in to leave comments. Login now