##// END OF EJS Templates
Ignore all errors while parsing the branch cache.
Alexis S. L. Carvalho -
r3761:9433bdca default
parent child Browse files
Show More
@@ -1,1899 +1,1903
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import gettext as _
10 10 from demandload import *
11 11 import repo
12 12 demandload(globals(), "appendfile changegroup")
13 13 demandload(globals(), "changelog dirstate filelog manifest context")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
15 15 demandload(globals(), "os revlog time util")
16 16
17 17 class localrepository(repo.repository):
18 18 capabilities = ('lookup', 'changegroupsubset')
19 19
20 20 def __del__(self):
21 21 self.transhandle = None
22 22 def __init__(self, parentui, path=None, create=0):
23 23 repo.repository.__init__(self)
24 24 if not path:
25 25 p = os.getcwd()
26 26 while not os.path.isdir(os.path.join(p, ".hg")):
27 27 oldp = p
28 28 p = os.path.dirname(p)
29 29 if p == oldp:
30 30 raise repo.RepoError(_("There is no Mercurial repository"
31 31 " here (.hg not found)"))
32 32 path = p
33 33 self.path = os.path.join(path, ".hg")
34 34
35 35 if not os.path.isdir(self.path):
36 36 if create:
37 37 if not os.path.exists(path):
38 38 os.mkdir(path)
39 39 os.mkdir(self.path)
40 40 else:
41 41 raise repo.RepoError(_("repository %s not found") % path)
42 42 elif create:
43 43 raise repo.RepoError(_("repository %s already exists") % path)
44 44
45 45 self.root = os.path.realpath(path)
46 46 self.origroot = path
47 47 self.ui = ui.ui(parentui=parentui)
48 48 self.opener = util.opener(self.path)
49 49 self.sopener = util.opener(self.path)
50 50 self.wopener = util.opener(self.root)
51 51
52 52 try:
53 53 self.ui.readconfig(self.join("hgrc"), self.root)
54 54 except IOError:
55 55 pass
56 56
57 57 v = self.ui.configrevlog()
58 58 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
59 59 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
60 60 fl = v.get('flags', None)
61 61 flags = 0
62 62 if fl != None:
63 63 for x in fl.split():
64 64 flags |= revlog.flagstr(x)
65 65 elif self.revlogv1:
66 66 flags = revlog.REVLOG_DEFAULT_FLAGS
67 67
68 68 v = self.revlogversion | flags
69 69 self.manifest = manifest.manifest(self.sopener, v)
70 70 self.changelog = changelog.changelog(self.sopener, v)
71 71
72 72 # the changelog might not have the inline index flag
73 73 # on. If the format of the changelog is the same as found in
74 74 # .hgrc, apply any flags found in the .hgrc as well.
75 75 # Otherwise, just version from the changelog
76 76 v = self.changelog.version
77 77 if v == self.revlogversion:
78 78 v |= flags
79 79 self.revlogversion = v
80 80
81 81 self.tagscache = None
82 82 self.branchcache = None
83 83 self.nodetagscache = None
84 84 self.encodepats = None
85 85 self.decodepats = None
86 86 self.transhandle = None
87 87
88 88 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
89 89
90 90 def url(self):
91 91 return 'file:' + self.root
92 92
93 93 def hook(self, name, throw=False, **args):
94 94 def callhook(hname, funcname):
95 95 '''call python hook. hook is callable object, looked up as
96 96 name in python module. if callable returns "true", hook
97 97 fails, else passes. if hook raises exception, treated as
98 98 hook failure. exception propagates if throw is "true".
99 99
100 100 reason for "true" meaning "hook failed" is so that
101 101 unmodified commands (e.g. mercurial.commands.update) can
102 102 be run as hooks without wrappers to convert return values.'''
103 103
104 104 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
105 105 d = funcname.rfind('.')
106 106 if d == -1:
107 107 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
108 108 % (hname, funcname))
109 109 modname = funcname[:d]
110 110 try:
111 111 obj = __import__(modname)
112 112 except ImportError:
113 113 try:
114 114 # extensions are loaded with hgext_ prefix
115 115 obj = __import__("hgext_%s" % modname)
116 116 except ImportError:
117 117 raise util.Abort(_('%s hook is invalid '
118 118 '(import of "%s" failed)') %
119 119 (hname, modname))
120 120 try:
121 121 for p in funcname.split('.')[1:]:
122 122 obj = getattr(obj, p)
123 123 except AttributeError, err:
124 124 raise util.Abort(_('%s hook is invalid '
125 125 '("%s" is not defined)') %
126 126 (hname, funcname))
127 127 if not callable(obj):
128 128 raise util.Abort(_('%s hook is invalid '
129 129 '("%s" is not callable)') %
130 130 (hname, funcname))
131 131 try:
132 132 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
133 133 except (KeyboardInterrupt, util.SignalInterrupt):
134 134 raise
135 135 except Exception, exc:
136 136 if isinstance(exc, util.Abort):
137 137 self.ui.warn(_('error: %s hook failed: %s\n') %
138 138 (hname, exc.args[0]))
139 139 else:
140 140 self.ui.warn(_('error: %s hook raised an exception: '
141 141 '%s\n') % (hname, exc))
142 142 if throw:
143 143 raise
144 144 self.ui.print_exc()
145 145 return True
146 146 if r:
147 147 if throw:
148 148 raise util.Abort(_('%s hook failed') % hname)
149 149 self.ui.warn(_('warning: %s hook failed\n') % hname)
150 150 return r
151 151
152 152 def runhook(name, cmd):
153 153 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
154 154 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
155 155 r = util.system(cmd, environ=env, cwd=self.root)
156 156 if r:
157 157 desc, r = util.explain_exit(r)
158 158 if throw:
159 159 raise util.Abort(_('%s hook %s') % (name, desc))
160 160 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
161 161 return r
162 162
163 163 r = False
164 164 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
165 165 if hname.split(".", 1)[0] == name and cmd]
166 166 hooks.sort()
167 167 for hname, cmd in hooks:
168 168 if cmd.startswith('python:'):
169 169 r = callhook(hname, cmd[7:].strip()) or r
170 170 else:
171 171 r = runhook(hname, cmd) or r
172 172 return r
173 173
174 174 tag_disallowed = ':\r\n'
175 175
176 176 def tag(self, name, node, message, local, user, date):
177 177 '''tag a revision with a symbolic name.
178 178
179 179 if local is True, the tag is stored in a per-repository file.
180 180 otherwise, it is stored in the .hgtags file, and a new
181 181 changeset is committed with the change.
182 182
183 183 keyword arguments:
184 184
185 185 local: whether to store tag in non-version-controlled file
186 186 (default False)
187 187
188 188 message: commit message to use if committing
189 189
190 190 user: name of user to use if committing
191 191
192 192 date: date tuple to use if committing'''
193 193
194 194 for c in self.tag_disallowed:
195 195 if c in name:
196 196 raise util.Abort(_('%r cannot be used in a tag name') % c)
197 197
198 198 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
199 199
200 200 if local:
201 201 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
202 202 self.hook('tag', node=hex(node), tag=name, local=local)
203 203 return
204 204
205 205 for x in self.status()[:5]:
206 206 if '.hgtags' in x:
207 207 raise util.Abort(_('working copy of .hgtags is changed '
208 208 '(please commit .hgtags manually)'))
209 209
210 210 self.wfile('.hgtags', 'ab').write('%s %s\n' % (hex(node), name))
211 211 if self.dirstate.state('.hgtags') == '?':
212 212 self.add(['.hgtags'])
213 213
214 214 self.commit(['.hgtags'], message, user, date)
215 215 self.hook('tag', node=hex(node), tag=name, local=local)
216 216
217 217 def tags(self):
218 218 '''return a mapping of tag to node'''
219 219 if not self.tagscache:
220 220 self.tagscache = {}
221 221
222 222 def parsetag(line, context):
223 223 if not line:
224 224 return
225 225 s = l.split(" ", 1)
226 226 if len(s) != 2:
227 227 self.ui.warn(_("%s: cannot parse entry\n") % context)
228 228 return
229 229 node, key = s
230 230 key = key.strip()
231 231 try:
232 232 bin_n = bin(node)
233 233 except TypeError:
234 234 self.ui.warn(_("%s: node '%s' is not well formed\n") %
235 235 (context, node))
236 236 return
237 237 if bin_n not in self.changelog.nodemap:
238 238 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
239 239 (context, key))
240 240 return
241 241 self.tagscache[key] = bin_n
242 242
243 243 # read the tags file from each head, ending with the tip,
244 244 # and add each tag found to the map, with "newer" ones
245 245 # taking precedence
246 246 f = None
247 247 for rev, node, fnode in self._hgtagsnodes():
248 248 f = (f and f.filectx(fnode) or
249 249 self.filectx('.hgtags', fileid=fnode))
250 250 count = 0
251 251 for l in f.data().splitlines():
252 252 count += 1
253 253 parsetag(l, _("%s, line %d") % (str(f), count))
254 254
255 255 try:
256 256 f = self.opener("localtags")
257 257 count = 0
258 258 for l in f:
259 259 count += 1
260 260 parsetag(l, _("localtags, line %d") % count)
261 261 except IOError:
262 262 pass
263 263
264 264 self.tagscache['tip'] = self.changelog.tip()
265 265
266 266 return self.tagscache
267 267
268 268 def _hgtagsnodes(self):
269 269 heads = self.heads()
270 270 heads.reverse()
271 271 last = {}
272 272 ret = []
273 273 for node in heads:
274 274 c = self.changectx(node)
275 275 rev = c.rev()
276 276 try:
277 277 fnode = c.filenode('.hgtags')
278 278 except repo.LookupError:
279 279 continue
280 280 ret.append((rev, node, fnode))
281 281 if fnode in last:
282 282 ret[last[fnode]] = None
283 283 last[fnode] = len(ret) - 1
284 284 return [item for item in ret if item]
285 285
286 286 def tagslist(self):
287 287 '''return a list of tags ordered by revision'''
288 288 l = []
289 289 for t, n in self.tags().items():
290 290 try:
291 291 r = self.changelog.rev(n)
292 292 except:
293 293 r = -2 # sort to the beginning of the list if unknown
294 294 l.append((r, t, n))
295 295 l.sort()
296 296 return [(t, n) for r, t, n in l]
297 297
298 298 def nodetags(self, node):
299 299 '''return the tags associated with a node'''
300 300 if not self.nodetagscache:
301 301 self.nodetagscache = {}
302 302 for t, n in self.tags().items():
303 303 self.nodetagscache.setdefault(n, []).append(t)
304 304 return self.nodetagscache.get(node, [])
305 305
306 306 def branchtags(self):
307 307 if self.branchcache != None:
308 308 return self.branchcache
309 309
310 310 self.branchcache = {} # avoid recursion in changectx
311 311
312 312 partial, last, lrev = self._readbranchcache()
313 313
314 314 tiprev = self.changelog.count() - 1
315 315 if lrev != tiprev:
316 316 self._updatebranchcache(partial, lrev+1, tiprev+1)
317 317 self._writebranchcache(partial, self.changelog.tip(), tiprev)
318 318
319 319 self.branchcache = partial
320 320 return self.branchcache
321 321
322 322 def _readbranchcache(self):
323 323 partial = {}
324 324 try:
325 325 f = self.opener("branches.cache")
326 326 lines = f.read().split('\n')
327 327 f.close()
328 328 last, lrev = lines.pop(0).rstrip().split(" ", 1)
329 329 last, lrev = bin(last), int(lrev)
330 if (lrev < self.changelog.count() and
330 if not (lrev < self.changelog.count() and
331 331 self.changelog.node(lrev) == last): # sanity check
332 # invalidate the cache
333 raise ValueError('Invalid branch cache: unknown tip')
332 334 for l in lines:
333 335 if not l: continue
334 336 node, label = l.rstrip().split(" ", 1)
335 337 partial[label] = bin(node)
336 else: # invalidate the cache
337 last, lrev = nullid, nullrev
338 except IOError:
339 last, lrev = nullid, nullrev
338 except (KeyboardInterrupt, util.SignalInterrupt):
339 raise
340 except Exception, inst:
341 if self.ui.debugflag:
342 self.ui.warn(str(inst), '\n')
343 partial, last, lrev = {}, nullid, nullrev
340 344 return partial, last, lrev
341 345
342 346 def _writebranchcache(self, branches, tip, tiprev):
343 347 try:
344 348 f = self.opener("branches.cache", "w")
345 349 f.write("%s %s\n" % (hex(tip), tiprev))
346 350 for label, node in branches.iteritems():
347 351 f.write("%s %s\n" % (hex(node), label))
348 352 except IOError:
349 353 pass
350 354
351 355 def _updatebranchcache(self, partial, start, end):
352 356 for r in xrange(start, end):
353 357 c = self.changectx(r)
354 358 b = c.branch()
355 359 if b:
356 360 partial[b] = c.node()
357 361
358 362 def lookup(self, key):
359 363 if key == '.':
360 364 key = self.dirstate.parents()[0]
361 365 if key == nullid:
362 366 raise repo.RepoError(_("no revision checked out"))
363 367 n = self.changelog._match(key)
364 368 if n:
365 369 return n
366 370 if key in self.tags():
367 371 return self.tags()[key]
368 372 if key in self.branchtags():
369 373 return self.branchtags()[key]
370 374 n = self.changelog._partialmatch(key)
371 375 if n:
372 376 return n
373 377 raise repo.RepoError(_("unknown revision '%s'") % key)
374 378
375 379 def dev(self):
376 380 return os.lstat(self.path).st_dev
377 381
378 382 def local(self):
379 383 return True
380 384
381 385 def join(self, f):
382 386 return os.path.join(self.path, f)
383 387
384 388 def sjoin(self, f):
385 389 return os.path.join(self.path, f)
386 390
387 391 def wjoin(self, f):
388 392 return os.path.join(self.root, f)
389 393
390 394 def file(self, f):
391 395 if f[0] == '/':
392 396 f = f[1:]
393 397 return filelog.filelog(self.sopener, f, self.revlogversion)
394 398
395 399 def changectx(self, changeid=None):
396 400 return context.changectx(self, changeid)
397 401
398 402 def workingctx(self):
399 403 return context.workingctx(self)
400 404
401 405 def parents(self, changeid=None):
402 406 '''
403 407 get list of changectxs for parents of changeid or working directory
404 408 '''
405 409 if changeid is None:
406 410 pl = self.dirstate.parents()
407 411 else:
408 412 n = self.changelog.lookup(changeid)
409 413 pl = self.changelog.parents(n)
410 414 if pl[1] == nullid:
411 415 return [self.changectx(pl[0])]
412 416 return [self.changectx(pl[0]), self.changectx(pl[1])]
413 417
414 418 def filectx(self, path, changeid=None, fileid=None):
415 419 """changeid can be a changeset revision, node, or tag.
416 420 fileid can be a file revision or node."""
417 421 return context.filectx(self, path, changeid, fileid)
418 422
419 423 def getcwd(self):
420 424 return self.dirstate.getcwd()
421 425
422 426 def wfile(self, f, mode='r'):
423 427 return self.wopener(f, mode)
424 428
425 429 def wread(self, filename):
426 430 if self.encodepats == None:
427 431 l = []
428 432 for pat, cmd in self.ui.configitems("encode"):
429 433 mf = util.matcher(self.root, "", [pat], [], [])[1]
430 434 l.append((mf, cmd))
431 435 self.encodepats = l
432 436
433 437 data = self.wopener(filename, 'r').read()
434 438
435 439 for mf, cmd in self.encodepats:
436 440 if mf(filename):
437 441 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
438 442 data = util.filter(data, cmd)
439 443 break
440 444
441 445 return data
442 446
443 447 def wwrite(self, filename, data, fd=None):
444 448 if self.decodepats == None:
445 449 l = []
446 450 for pat, cmd in self.ui.configitems("decode"):
447 451 mf = util.matcher(self.root, "", [pat], [], [])[1]
448 452 l.append((mf, cmd))
449 453 self.decodepats = l
450 454
451 455 for mf, cmd in self.decodepats:
452 456 if mf(filename):
453 457 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
454 458 data = util.filter(data, cmd)
455 459 break
456 460
457 461 if fd:
458 462 return fd.write(data)
459 463 return self.wopener(filename, 'w').write(data)
460 464
461 465 def transaction(self):
462 466 tr = self.transhandle
463 467 if tr != None and tr.running():
464 468 return tr.nest()
465 469
466 470 # save dirstate for rollback
467 471 try:
468 472 ds = self.opener("dirstate").read()
469 473 except IOError:
470 474 ds = ""
471 475 self.opener("journal.dirstate", "w").write(ds)
472 476
473 477 tr = transaction.transaction(self.ui.warn, self.sopener,
474 478 self.sjoin("journal"),
475 479 aftertrans(self.path))
476 480 self.transhandle = tr
477 481 return tr
478 482
479 483 def recover(self):
480 484 l = self.lock()
481 485 if os.path.exists(self.sjoin("journal")):
482 486 self.ui.status(_("rolling back interrupted transaction\n"))
483 487 transaction.rollback(self.sopener, self.sjoin("journal"))
484 488 self.reload()
485 489 return True
486 490 else:
487 491 self.ui.warn(_("no interrupted transaction available\n"))
488 492 return False
489 493
490 494 def rollback(self, wlock=None):
491 495 if not wlock:
492 496 wlock = self.wlock()
493 497 l = self.lock()
494 498 if os.path.exists(self.sjoin("undo")):
495 499 self.ui.status(_("rolling back last transaction\n"))
496 500 transaction.rollback(self.sopener, self.sjoin("undo"))
497 501 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
498 502 self.reload()
499 503 self.wreload()
500 504 else:
501 505 self.ui.warn(_("no rollback information available\n"))
502 506
503 507 def wreload(self):
504 508 self.dirstate.read()
505 509
506 510 def reload(self):
507 511 self.changelog.load()
508 512 self.manifest.load()
509 513 self.tagscache = None
510 514 self.nodetagscache = None
511 515
512 516 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
513 517 desc=None):
514 518 try:
515 519 l = lock.lock(lockname, 0, releasefn, desc=desc)
516 520 except lock.LockHeld, inst:
517 521 if not wait:
518 522 raise
519 523 self.ui.warn(_("waiting for lock on %s held by %r\n") %
520 524 (desc, inst.locker))
521 525 # default to 600 seconds timeout
522 526 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
523 527 releasefn, desc=desc)
524 528 if acquirefn:
525 529 acquirefn()
526 530 return l
527 531
528 532 def lock(self, wait=1):
529 533 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
530 534 desc=_('repository %s') % self.origroot)
531 535
532 536 def wlock(self, wait=1):
533 537 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
534 538 self.wreload,
535 539 desc=_('working directory of %s') % self.origroot)
536 540
537 541 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
538 542 """
539 543 commit an individual file as part of a larger transaction
540 544 """
541 545
542 546 t = self.wread(fn)
543 547 fl = self.file(fn)
544 548 fp1 = manifest1.get(fn, nullid)
545 549 fp2 = manifest2.get(fn, nullid)
546 550
547 551 meta = {}
548 552 cp = self.dirstate.copied(fn)
549 553 if cp:
550 554 meta["copy"] = cp
551 555 if not manifest2: # not a branch merge
552 556 meta["copyrev"] = hex(manifest1.get(cp, nullid))
553 557 fp2 = nullid
554 558 elif fp2 != nullid: # copied on remote side
555 559 meta["copyrev"] = hex(manifest1.get(cp, nullid))
556 560 elif fp1 != nullid: # copied on local side, reversed
557 561 meta["copyrev"] = hex(manifest2.get(cp))
558 562 fp2 = nullid
559 563 else: # directory rename
560 564 meta["copyrev"] = hex(manifest1.get(cp, nullid))
561 565 self.ui.debug(_(" %s: copy %s:%s\n") %
562 566 (fn, cp, meta["copyrev"]))
563 567 fp1 = nullid
564 568 elif fp2 != nullid:
565 569 # is one parent an ancestor of the other?
566 570 fpa = fl.ancestor(fp1, fp2)
567 571 if fpa == fp1:
568 572 fp1, fp2 = fp2, nullid
569 573 elif fpa == fp2:
570 574 fp2 = nullid
571 575
572 576 # is the file unmodified from the parent? report existing entry
573 577 if fp2 == nullid and not fl.cmp(fp1, t):
574 578 return fp1
575 579
576 580 changelist.append(fn)
577 581 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
578 582
579 583 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
580 584 if p1 is None:
581 585 p1, p2 = self.dirstate.parents()
582 586 return self.commit(files=files, text=text, user=user, date=date,
583 587 p1=p1, p2=p2, wlock=wlock)
584 588
585 589 def commit(self, files=None, text="", user=None, date=None,
586 590 match=util.always, force=False, lock=None, wlock=None,
587 591 force_editor=False, p1=None, p2=None, extra={}):
588 592
589 593 commit = []
590 594 remove = []
591 595 changed = []
592 596 use_dirstate = (p1 is None) # not rawcommit
593 597 extra = extra.copy()
594 598
595 599 if use_dirstate:
596 600 if files:
597 601 for f in files:
598 602 s = self.dirstate.state(f)
599 603 if s in 'nmai':
600 604 commit.append(f)
601 605 elif s == 'r':
602 606 remove.append(f)
603 607 else:
604 608 self.ui.warn(_("%s not tracked!\n") % f)
605 609 else:
606 610 changes = self.status(match=match)[:5]
607 611 modified, added, removed, deleted, unknown = changes
608 612 commit = modified + added
609 613 remove = removed
610 614 else:
611 615 commit = files
612 616
613 617 if use_dirstate:
614 618 p1, p2 = self.dirstate.parents()
615 619 update_dirstate = True
616 620 else:
617 621 p1, p2 = p1, p2 or nullid
618 622 update_dirstate = (self.dirstate.parents()[0] == p1)
619 623
620 624 c1 = self.changelog.read(p1)
621 625 c2 = self.changelog.read(p2)
622 626 m1 = self.manifest.read(c1[0]).copy()
623 627 m2 = self.manifest.read(c2[0])
624 628
625 629 if use_dirstate:
626 630 branchname = self.workingctx().branch()
627 631 else:
628 632 branchname = ""
629 633
630 634 if use_dirstate:
631 635 oldname = c1[5].get("branch", "")
632 636 if not commit and not remove and not force and p2 == nullid and \
633 637 branchname == oldname:
634 638 self.ui.status(_("nothing changed\n"))
635 639 return None
636 640
637 641 xp1 = hex(p1)
638 642 if p2 == nullid: xp2 = ''
639 643 else: xp2 = hex(p2)
640 644
641 645 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
642 646
643 647 if not wlock:
644 648 wlock = self.wlock()
645 649 if not lock:
646 650 lock = self.lock()
647 651 tr = self.transaction()
648 652
649 653 # check in files
650 654 new = {}
651 655 linkrev = self.changelog.count()
652 656 commit.sort()
653 657 for f in commit:
654 658 self.ui.note(f + "\n")
655 659 try:
656 660 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
657 661 m1.set(f, util.is_exec(self.wjoin(f), m1.execf(f)))
658 662 except IOError:
659 663 if use_dirstate:
660 664 self.ui.warn(_("trouble committing %s!\n") % f)
661 665 raise
662 666 else:
663 667 remove.append(f)
664 668
665 669 # update manifest
666 670 m1.update(new)
667 671 remove.sort()
668 672
669 673 for f in remove:
670 674 if f in m1:
671 675 del m1[f]
672 676 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, remove))
673 677
674 678 # add changeset
675 679 new = new.keys()
676 680 new.sort()
677 681
678 682 user = user or self.ui.username()
679 683 if not text or force_editor:
680 684 edittext = []
681 685 if text:
682 686 edittext.append(text)
683 687 edittext.append("")
684 688 edittext.append("HG: user: %s" % user)
685 689 if p2 != nullid:
686 690 edittext.append("HG: branch merge")
687 691 edittext.extend(["HG: changed %s" % f for f in changed])
688 692 edittext.extend(["HG: removed %s" % f for f in remove])
689 693 if not changed and not remove:
690 694 edittext.append("HG: no files changed")
691 695 edittext.append("")
692 696 # run editor in the repository root
693 697 olddir = os.getcwd()
694 698 os.chdir(self.root)
695 699 text = self.ui.edit("\n".join(edittext), user)
696 700 os.chdir(olddir)
697 701
698 702 lines = [line.rstrip() for line in text.rstrip().splitlines()]
699 703 while lines and not lines[0]:
700 704 del lines[0]
701 705 if not lines:
702 706 return None
703 707 text = '\n'.join(lines)
704 708 if branchname:
705 709 extra["branch"] = branchname
706 710 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2,
707 711 user, date, extra)
708 712 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
709 713 parent2=xp2)
710 714 tr.close()
711 715
712 716 if use_dirstate or update_dirstate:
713 717 self.dirstate.setparents(n)
714 718 if use_dirstate:
715 719 self.dirstate.update(new, "n")
716 720 self.dirstate.forget(remove)
717 721
718 722 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
719 723 return n
720 724
721 725 def walk(self, node=None, files=[], match=util.always, badmatch=None):
722 726 '''
723 727 walk recursively through the directory tree or a given
724 728 changeset, finding all files matched by the match
725 729 function
726 730
727 731 results are yielded in a tuple (src, filename), where src
728 732 is one of:
729 733 'f' the file was found in the directory tree
730 734 'm' the file was only in the dirstate and not in the tree
731 735 'b' file was not found and matched badmatch
732 736 '''
733 737
734 738 if node:
735 739 fdict = dict.fromkeys(files)
736 740 for fn in self.manifest.read(self.changelog.read(node)[0]):
737 741 for ffn in fdict:
738 742 # match if the file is the exact name or a directory
739 743 if ffn == fn or fn.startswith("%s/" % ffn):
740 744 del fdict[ffn]
741 745 break
742 746 if match(fn):
743 747 yield 'm', fn
744 748 for fn in fdict:
745 749 if badmatch and badmatch(fn):
746 750 if match(fn):
747 751 yield 'b', fn
748 752 else:
749 753 self.ui.warn(_('%s: No such file in rev %s\n') % (
750 754 util.pathto(self.getcwd(), fn), short(node)))
751 755 else:
752 756 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
753 757 yield src, fn
754 758
755 759 def status(self, node1=None, node2=None, files=[], match=util.always,
756 760 wlock=None, list_ignored=False, list_clean=False):
757 761 """return status of files between two nodes or node and working directory
758 762
759 763 If node1 is None, use the first dirstate parent instead.
760 764 If node2 is None, compare node1 with working directory.
761 765 """
762 766
763 767 def fcmp(fn, mf):
764 768 t1 = self.wread(fn)
765 769 return self.file(fn).cmp(mf.get(fn, nullid), t1)
766 770
767 771 def mfmatches(node):
768 772 change = self.changelog.read(node)
769 773 mf = self.manifest.read(change[0]).copy()
770 774 for fn in mf.keys():
771 775 if not match(fn):
772 776 del mf[fn]
773 777 return mf
774 778
775 779 modified, added, removed, deleted, unknown = [], [], [], [], []
776 780 ignored, clean = [], []
777 781
778 782 compareworking = False
779 783 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
780 784 compareworking = True
781 785
782 786 if not compareworking:
783 787 # read the manifest from node1 before the manifest from node2,
784 788 # so that we'll hit the manifest cache if we're going through
785 789 # all the revisions in parent->child order.
786 790 mf1 = mfmatches(node1)
787 791
788 792 # are we comparing the working directory?
789 793 if not node2:
790 794 if not wlock:
791 795 try:
792 796 wlock = self.wlock(wait=0)
793 797 except lock.LockException:
794 798 wlock = None
795 799 (lookup, modified, added, removed, deleted, unknown,
796 800 ignored, clean) = self.dirstate.status(files, match,
797 801 list_ignored, list_clean)
798 802
799 803 # are we comparing working dir against its parent?
800 804 if compareworking:
801 805 if lookup:
802 806 # do a full compare of any files that might have changed
803 807 mf2 = mfmatches(self.dirstate.parents()[0])
804 808 for f in lookup:
805 809 if fcmp(f, mf2):
806 810 modified.append(f)
807 811 else:
808 812 clean.append(f)
809 813 if wlock is not None:
810 814 self.dirstate.update([f], "n")
811 815 else:
812 816 # we are comparing working dir against non-parent
813 817 # generate a pseudo-manifest for the working dir
814 818 # XXX: create it in dirstate.py ?
815 819 mf2 = mfmatches(self.dirstate.parents()[0])
816 820 for f in lookup + modified + added:
817 821 mf2[f] = ""
818 822 mf2.set(f, execf=util.is_exec(self.wjoin(f), mf2.execf(f)))
819 823 for f in removed:
820 824 if f in mf2:
821 825 del mf2[f]
822 826 else:
823 827 # we are comparing two revisions
824 828 mf2 = mfmatches(node2)
825 829
826 830 if not compareworking:
827 831 # flush lists from dirstate before comparing manifests
828 832 modified, added, clean = [], [], []
829 833
830 834 # make sure to sort the files so we talk to the disk in a
831 835 # reasonable order
832 836 mf2keys = mf2.keys()
833 837 mf2keys.sort()
834 838 for fn in mf2keys:
835 839 if mf1.has_key(fn):
836 840 if mf1.flags(fn) != mf2.flags(fn) or \
837 841 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
838 842 modified.append(fn)
839 843 elif list_clean:
840 844 clean.append(fn)
841 845 del mf1[fn]
842 846 else:
843 847 added.append(fn)
844 848
845 849 removed = mf1.keys()
846 850
847 851 # sort and return results:
848 852 for l in modified, added, removed, deleted, unknown, ignored, clean:
849 853 l.sort()
850 854 return (modified, added, removed, deleted, unknown, ignored, clean)
851 855
852 856 def add(self, list, wlock=None):
853 857 if not wlock:
854 858 wlock = self.wlock()
855 859 for f in list:
856 860 p = self.wjoin(f)
857 861 if not os.path.exists(p):
858 862 self.ui.warn(_("%s does not exist!\n") % f)
859 863 elif not os.path.isfile(p):
860 864 self.ui.warn(_("%s not added: only files supported currently\n")
861 865 % f)
862 866 elif self.dirstate.state(f) in 'an':
863 867 self.ui.warn(_("%s already tracked!\n") % f)
864 868 else:
865 869 self.dirstate.update([f], "a")
866 870
867 871 def forget(self, list, wlock=None):
868 872 if not wlock:
869 873 wlock = self.wlock()
870 874 for f in list:
871 875 if self.dirstate.state(f) not in 'ai':
872 876 self.ui.warn(_("%s not added!\n") % f)
873 877 else:
874 878 self.dirstate.forget([f])
875 879
876 880 def remove(self, list, unlink=False, wlock=None):
877 881 if unlink:
878 882 for f in list:
879 883 try:
880 884 util.unlink(self.wjoin(f))
881 885 except OSError, inst:
882 886 if inst.errno != errno.ENOENT:
883 887 raise
884 888 if not wlock:
885 889 wlock = self.wlock()
886 890 for f in list:
887 891 p = self.wjoin(f)
888 892 if os.path.exists(p):
889 893 self.ui.warn(_("%s still exists!\n") % f)
890 894 elif self.dirstate.state(f) == 'a':
891 895 self.dirstate.forget([f])
892 896 elif f not in self.dirstate:
893 897 self.ui.warn(_("%s not tracked!\n") % f)
894 898 else:
895 899 self.dirstate.update([f], "r")
896 900
897 901 def undelete(self, list, wlock=None):
898 902 p = self.dirstate.parents()[0]
899 903 mn = self.changelog.read(p)[0]
900 904 m = self.manifest.read(mn)
901 905 if not wlock:
902 906 wlock = self.wlock()
903 907 for f in list:
904 908 if self.dirstate.state(f) not in "r":
905 909 self.ui.warn("%s not removed!\n" % f)
906 910 else:
907 911 t = self.file(f).read(m[f])
908 912 self.wwrite(f, t)
909 913 util.set_exec(self.wjoin(f), m.execf(f))
910 914 self.dirstate.update([f], "n")
911 915
912 916 def copy(self, source, dest, wlock=None):
913 917 p = self.wjoin(dest)
914 918 if not os.path.exists(p):
915 919 self.ui.warn(_("%s does not exist!\n") % dest)
916 920 elif not os.path.isfile(p):
917 921 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
918 922 else:
919 923 if not wlock:
920 924 wlock = self.wlock()
921 925 if self.dirstate.state(dest) == '?':
922 926 self.dirstate.update([dest], "a")
923 927 self.dirstate.copy(source, dest)
924 928
925 929 def heads(self, start=None):
926 930 heads = self.changelog.heads(start)
927 931 # sort the output in rev descending order
928 932 heads = [(-self.changelog.rev(h), h) for h in heads]
929 933 heads.sort()
930 934 return [n for (r, n) in heads]
931 935
932 936 # branchlookup returns a dict giving a list of branches for
933 937 # each head. A branch is defined as the tag of a node or
934 938 # the branch of the node's parents. If a node has multiple
935 939 # branch tags, tags are eliminated if they are visible from other
936 940 # branch tags.
937 941 #
938 942 # So, for this graph: a->b->c->d->e
939 943 # \ /
940 944 # aa -----/
941 945 # a has tag 2.6.12
942 946 # d has tag 2.6.13
943 947 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
944 948 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
945 949 # from the list.
946 950 #
947 951 # It is possible that more than one head will have the same branch tag.
948 952 # callers need to check the result for multiple heads under the same
949 953 # branch tag if that is a problem for them (ie checkout of a specific
950 954 # branch).
951 955 #
952 956 # passing in a specific branch will limit the depth of the search
953 957 # through the parents. It won't limit the branches returned in the
954 958 # result though.
955 959 def branchlookup(self, heads=None, branch=None):
956 960 if not heads:
957 961 heads = self.heads()
958 962 headt = [ h for h in heads ]
959 963 chlog = self.changelog
960 964 branches = {}
961 965 merges = []
962 966 seenmerge = {}
963 967
964 968 # traverse the tree once for each head, recording in the branches
965 969 # dict which tags are visible from this head. The branches
966 970 # dict also records which tags are visible from each tag
967 971 # while we traverse.
968 972 while headt or merges:
969 973 if merges:
970 974 n, found = merges.pop()
971 975 visit = [n]
972 976 else:
973 977 h = headt.pop()
974 978 visit = [h]
975 979 found = [h]
976 980 seen = {}
977 981 while visit:
978 982 n = visit.pop()
979 983 if n in seen:
980 984 continue
981 985 pp = chlog.parents(n)
982 986 tags = self.nodetags(n)
983 987 if tags:
984 988 for x in tags:
985 989 if x == 'tip':
986 990 continue
987 991 for f in found:
988 992 branches.setdefault(f, {})[n] = 1
989 993 branches.setdefault(n, {})[n] = 1
990 994 break
991 995 if n not in found:
992 996 found.append(n)
993 997 if branch in tags:
994 998 continue
995 999 seen[n] = 1
996 1000 if pp[1] != nullid and n not in seenmerge:
997 1001 merges.append((pp[1], [x for x in found]))
998 1002 seenmerge[n] = 1
999 1003 if pp[0] != nullid:
1000 1004 visit.append(pp[0])
1001 1005 # traverse the branches dict, eliminating branch tags from each
1002 1006 # head that are visible from another branch tag for that head.
1003 1007 out = {}
1004 1008 viscache = {}
1005 1009 for h in heads:
1006 1010 def visible(node):
1007 1011 if node in viscache:
1008 1012 return viscache[node]
1009 1013 ret = {}
1010 1014 visit = [node]
1011 1015 while visit:
1012 1016 x = visit.pop()
1013 1017 if x in viscache:
1014 1018 ret.update(viscache[x])
1015 1019 elif x not in ret:
1016 1020 ret[x] = 1
1017 1021 if x in branches:
1018 1022 visit[len(visit):] = branches[x].keys()
1019 1023 viscache[node] = ret
1020 1024 return ret
1021 1025 if h not in branches:
1022 1026 continue
1023 1027 # O(n^2), but somewhat limited. This only searches the
1024 1028 # tags visible from a specific head, not all the tags in the
1025 1029 # whole repo.
1026 1030 for b in branches[h]:
1027 1031 vis = False
1028 1032 for bb in branches[h].keys():
1029 1033 if b != bb:
1030 1034 if b in visible(bb):
1031 1035 vis = True
1032 1036 break
1033 1037 if not vis:
1034 1038 l = out.setdefault(h, [])
1035 1039 l[len(l):] = self.nodetags(b)
1036 1040 return out
1037 1041
1038 1042 def branches(self, nodes):
1039 1043 if not nodes:
1040 1044 nodes = [self.changelog.tip()]
1041 1045 b = []
1042 1046 for n in nodes:
1043 1047 t = n
1044 1048 while 1:
1045 1049 p = self.changelog.parents(n)
1046 1050 if p[1] != nullid or p[0] == nullid:
1047 1051 b.append((t, n, p[0], p[1]))
1048 1052 break
1049 1053 n = p[0]
1050 1054 return b
1051 1055
1052 1056 def between(self, pairs):
1053 1057 r = []
1054 1058
1055 1059 for top, bottom in pairs:
1056 1060 n, l, i = top, [], 0
1057 1061 f = 1
1058 1062
1059 1063 while n != bottom:
1060 1064 p = self.changelog.parents(n)[0]
1061 1065 if i == f:
1062 1066 l.append(n)
1063 1067 f = f * 2
1064 1068 n = p
1065 1069 i += 1
1066 1070
1067 1071 r.append(l)
1068 1072
1069 1073 return r
1070 1074
1071 1075 def findincoming(self, remote, base=None, heads=None, force=False):
1072 1076 """Return list of roots of the subsets of missing nodes from remote
1073 1077
1074 1078 If base dict is specified, assume that these nodes and their parents
1075 1079 exist on the remote side and that no child of a node of base exists
1076 1080 in both remote and self.
1077 1081 Furthermore base will be updated to include the nodes that exists
1078 1082 in self and remote but no children exists in self and remote.
1079 1083 If a list of heads is specified, return only nodes which are heads
1080 1084 or ancestors of these heads.
1081 1085
1082 1086 All the ancestors of base are in self and in remote.
1083 1087 All the descendants of the list returned are missing in self.
1084 1088 (and so we know that the rest of the nodes are missing in remote, see
1085 1089 outgoing)
1086 1090 """
1087 1091 m = self.changelog.nodemap
1088 1092 search = []
1089 1093 fetch = {}
1090 1094 seen = {}
1091 1095 seenbranch = {}
1092 1096 if base == None:
1093 1097 base = {}
1094 1098
1095 1099 if not heads:
1096 1100 heads = remote.heads()
1097 1101
1098 1102 if self.changelog.tip() == nullid:
1099 1103 base[nullid] = 1
1100 1104 if heads != [nullid]:
1101 1105 return [nullid]
1102 1106 return []
1103 1107
1104 1108 # assume we're closer to the tip than the root
1105 1109 # and start by examining the heads
1106 1110 self.ui.status(_("searching for changes\n"))
1107 1111
1108 1112 unknown = []
1109 1113 for h in heads:
1110 1114 if h not in m:
1111 1115 unknown.append(h)
1112 1116 else:
1113 1117 base[h] = 1
1114 1118
1115 1119 if not unknown:
1116 1120 return []
1117 1121
1118 1122 req = dict.fromkeys(unknown)
1119 1123 reqcnt = 0
1120 1124
1121 1125 # search through remote branches
1122 1126 # a 'branch' here is a linear segment of history, with four parts:
1123 1127 # head, root, first parent, second parent
1124 1128 # (a branch always has two parents (or none) by definition)
1125 1129 unknown = remote.branches(unknown)
1126 1130 while unknown:
1127 1131 r = []
1128 1132 while unknown:
1129 1133 n = unknown.pop(0)
1130 1134 if n[0] in seen:
1131 1135 continue
1132 1136
1133 1137 self.ui.debug(_("examining %s:%s\n")
1134 1138 % (short(n[0]), short(n[1])))
1135 1139 if n[0] == nullid: # found the end of the branch
1136 1140 pass
1137 1141 elif n in seenbranch:
1138 1142 self.ui.debug(_("branch already found\n"))
1139 1143 continue
1140 1144 elif n[1] and n[1] in m: # do we know the base?
1141 1145 self.ui.debug(_("found incomplete branch %s:%s\n")
1142 1146 % (short(n[0]), short(n[1])))
1143 1147 search.append(n) # schedule branch range for scanning
1144 1148 seenbranch[n] = 1
1145 1149 else:
1146 1150 if n[1] not in seen and n[1] not in fetch:
1147 1151 if n[2] in m and n[3] in m:
1148 1152 self.ui.debug(_("found new changeset %s\n") %
1149 1153 short(n[1]))
1150 1154 fetch[n[1]] = 1 # earliest unknown
1151 1155 for p in n[2:4]:
1152 1156 if p in m:
1153 1157 base[p] = 1 # latest known
1154 1158
1155 1159 for p in n[2:4]:
1156 1160 if p not in req and p not in m:
1157 1161 r.append(p)
1158 1162 req[p] = 1
1159 1163 seen[n[0]] = 1
1160 1164
1161 1165 if r:
1162 1166 reqcnt += 1
1163 1167 self.ui.debug(_("request %d: %s\n") %
1164 1168 (reqcnt, " ".join(map(short, r))))
1165 1169 for p in xrange(0, len(r), 10):
1166 1170 for b in remote.branches(r[p:p+10]):
1167 1171 self.ui.debug(_("received %s:%s\n") %
1168 1172 (short(b[0]), short(b[1])))
1169 1173 unknown.append(b)
1170 1174
1171 1175 # do binary search on the branches we found
1172 1176 while search:
1173 1177 n = search.pop(0)
1174 1178 reqcnt += 1
1175 1179 l = remote.between([(n[0], n[1])])[0]
1176 1180 l.append(n[1])
1177 1181 p = n[0]
1178 1182 f = 1
1179 1183 for i in l:
1180 1184 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1181 1185 if i in m:
1182 1186 if f <= 2:
1183 1187 self.ui.debug(_("found new branch changeset %s\n") %
1184 1188 short(p))
1185 1189 fetch[p] = 1
1186 1190 base[i] = 1
1187 1191 else:
1188 1192 self.ui.debug(_("narrowed branch search to %s:%s\n")
1189 1193 % (short(p), short(i)))
1190 1194 search.append((p, i))
1191 1195 break
1192 1196 p, f = i, f * 2
1193 1197
1194 1198 # sanity check our fetch list
1195 1199 for f in fetch.keys():
1196 1200 if f in m:
1197 1201 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1198 1202
1199 1203 if base.keys() == [nullid]:
1200 1204 if force:
1201 1205 self.ui.warn(_("warning: repository is unrelated\n"))
1202 1206 else:
1203 1207 raise util.Abort(_("repository is unrelated"))
1204 1208
1205 1209 self.ui.debug(_("found new changesets starting at ") +
1206 1210 " ".join([short(f) for f in fetch]) + "\n")
1207 1211
1208 1212 self.ui.debug(_("%d total queries\n") % reqcnt)
1209 1213
1210 1214 return fetch.keys()
1211 1215
1212 1216 def findoutgoing(self, remote, base=None, heads=None, force=False):
1213 1217 """Return list of nodes that are roots of subsets not in remote
1214 1218
1215 1219 If base dict is specified, assume that these nodes and their parents
1216 1220 exist on the remote side.
1217 1221 If a list of heads is specified, return only nodes which are heads
1218 1222 or ancestors of these heads, and return a second element which
1219 1223 contains all remote heads which get new children.
1220 1224 """
1221 1225 if base == None:
1222 1226 base = {}
1223 1227 self.findincoming(remote, base, heads, force=force)
1224 1228
1225 1229 self.ui.debug(_("common changesets up to ")
1226 1230 + " ".join(map(short, base.keys())) + "\n")
1227 1231
1228 1232 remain = dict.fromkeys(self.changelog.nodemap)
1229 1233
1230 1234 # prune everything remote has from the tree
1231 1235 del remain[nullid]
1232 1236 remove = base.keys()
1233 1237 while remove:
1234 1238 n = remove.pop(0)
1235 1239 if n in remain:
1236 1240 del remain[n]
1237 1241 for p in self.changelog.parents(n):
1238 1242 remove.append(p)
1239 1243
1240 1244 # find every node whose parents have been pruned
1241 1245 subset = []
1242 1246 # find every remote head that will get new children
1243 1247 updated_heads = {}
1244 1248 for n in remain:
1245 1249 p1, p2 = self.changelog.parents(n)
1246 1250 if p1 not in remain and p2 not in remain:
1247 1251 subset.append(n)
1248 1252 if heads:
1249 1253 if p1 in heads:
1250 1254 updated_heads[p1] = True
1251 1255 if p2 in heads:
1252 1256 updated_heads[p2] = True
1253 1257
1254 1258 # this is the set of all roots we have to push
1255 1259 if heads:
1256 1260 return subset, updated_heads.keys()
1257 1261 else:
1258 1262 return subset
1259 1263
1260 1264 def pull(self, remote, heads=None, force=False, lock=None):
1261 1265 mylock = False
1262 1266 if not lock:
1263 1267 lock = self.lock()
1264 1268 mylock = True
1265 1269
1266 1270 try:
1267 1271 fetch = self.findincoming(remote, force=force)
1268 1272 if fetch == [nullid]:
1269 1273 self.ui.status(_("requesting all changes\n"))
1270 1274
1271 1275 if not fetch:
1272 1276 self.ui.status(_("no changes found\n"))
1273 1277 return 0
1274 1278
1275 1279 if heads is None:
1276 1280 cg = remote.changegroup(fetch, 'pull')
1277 1281 else:
1278 1282 if 'changegroupsubset' not in remote.capabilities:
1279 1283 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1280 1284 cg = remote.changegroupsubset(fetch, heads, 'pull')
1281 1285 return self.addchangegroup(cg, 'pull', remote.url())
1282 1286 finally:
1283 1287 if mylock:
1284 1288 lock.release()
1285 1289
1286 1290 def push(self, remote, force=False, revs=None):
1287 1291 # there are two ways to push to remote repo:
1288 1292 #
1289 1293 # addchangegroup assumes local user can lock remote
1290 1294 # repo (local filesystem, old ssh servers).
1291 1295 #
1292 1296 # unbundle assumes local user cannot lock remote repo (new ssh
1293 1297 # servers, http servers).
1294 1298
1295 1299 if remote.capable('unbundle'):
1296 1300 return self.push_unbundle(remote, force, revs)
1297 1301 return self.push_addchangegroup(remote, force, revs)
1298 1302
1299 1303 def prepush(self, remote, force, revs):
1300 1304 base = {}
1301 1305 remote_heads = remote.heads()
1302 1306 inc = self.findincoming(remote, base, remote_heads, force=force)
1303 1307
1304 1308 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1305 1309 if revs is not None:
1306 1310 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1307 1311 else:
1308 1312 bases, heads = update, self.changelog.heads()
1309 1313
1310 1314 if not bases:
1311 1315 self.ui.status(_("no changes found\n"))
1312 1316 return None, 1
1313 1317 elif not force:
1314 1318 # check if we're creating new remote heads
1315 1319 # to be a remote head after push, node must be either
1316 1320 # - unknown locally
1317 1321 # - a local outgoing head descended from update
1318 1322 # - a remote head that's known locally and not
1319 1323 # ancestral to an outgoing head
1320 1324
1321 1325 warn = 0
1322 1326
1323 1327 if remote_heads == [nullid]:
1324 1328 warn = 0
1325 1329 elif not revs and len(heads) > len(remote_heads):
1326 1330 warn = 1
1327 1331 else:
1328 1332 newheads = list(heads)
1329 1333 for r in remote_heads:
1330 1334 if r in self.changelog.nodemap:
1331 1335 desc = self.changelog.heads(r)
1332 1336 l = [h for h in heads if h in desc]
1333 1337 if not l:
1334 1338 newheads.append(r)
1335 1339 else:
1336 1340 newheads.append(r)
1337 1341 if len(newheads) > len(remote_heads):
1338 1342 warn = 1
1339 1343
1340 1344 if warn:
1341 1345 self.ui.warn(_("abort: push creates new remote branches!\n"))
1342 1346 self.ui.status(_("(did you forget to merge?"
1343 1347 " use push -f to force)\n"))
1344 1348 return None, 1
1345 1349 elif inc:
1346 1350 self.ui.warn(_("note: unsynced remote changes!\n"))
1347 1351
1348 1352
1349 1353 if revs is None:
1350 1354 cg = self.changegroup(update, 'push')
1351 1355 else:
1352 1356 cg = self.changegroupsubset(update, revs, 'push')
1353 1357 return cg, remote_heads
1354 1358
1355 1359 def push_addchangegroup(self, remote, force, revs):
1356 1360 lock = remote.lock()
1357 1361
1358 1362 ret = self.prepush(remote, force, revs)
1359 1363 if ret[0] is not None:
1360 1364 cg, remote_heads = ret
1361 1365 return remote.addchangegroup(cg, 'push', self.url())
1362 1366 return ret[1]
1363 1367
1364 1368 def push_unbundle(self, remote, force, revs):
1365 1369 # local repo finds heads on server, finds out what revs it
1366 1370 # must push. once revs transferred, if server finds it has
1367 1371 # different heads (someone else won commit/push race), server
1368 1372 # aborts.
1369 1373
1370 1374 ret = self.prepush(remote, force, revs)
1371 1375 if ret[0] is not None:
1372 1376 cg, remote_heads = ret
1373 1377 if force: remote_heads = ['force']
1374 1378 return remote.unbundle(cg, remote_heads, 'push')
1375 1379 return ret[1]
1376 1380
1377 1381 def changegroupinfo(self, nodes):
1378 1382 self.ui.note(_("%d changesets found\n") % len(nodes))
1379 1383 if self.ui.debugflag:
1380 1384 self.ui.debug(_("List of changesets:\n"))
1381 1385 for node in nodes:
1382 1386 self.ui.debug("%s\n" % hex(node))
1383 1387
1384 1388 def changegroupsubset(self, bases, heads, source):
1385 1389 """This function generates a changegroup consisting of all the nodes
1386 1390 that are descendents of any of the bases, and ancestors of any of
1387 1391 the heads.
1388 1392
1389 1393 It is fairly complex as determining which filenodes and which
1390 1394 manifest nodes need to be included for the changeset to be complete
1391 1395 is non-trivial.
1392 1396
1393 1397 Another wrinkle is doing the reverse, figuring out which changeset in
1394 1398 the changegroup a particular filenode or manifestnode belongs to."""
1395 1399
1396 1400 self.hook('preoutgoing', throw=True, source=source)
1397 1401
1398 1402 # Set up some initial variables
1399 1403 # Make it easy to refer to self.changelog
1400 1404 cl = self.changelog
1401 1405 # msng is short for missing - compute the list of changesets in this
1402 1406 # changegroup.
1403 1407 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1404 1408 self.changegroupinfo(msng_cl_lst)
1405 1409 # Some bases may turn out to be superfluous, and some heads may be
1406 1410 # too. nodesbetween will return the minimal set of bases and heads
1407 1411 # necessary to re-create the changegroup.
1408 1412
1409 1413 # Known heads are the list of heads that it is assumed the recipient
1410 1414 # of this changegroup will know about.
1411 1415 knownheads = {}
1412 1416 # We assume that all parents of bases are known heads.
1413 1417 for n in bases:
1414 1418 for p in cl.parents(n):
1415 1419 if p != nullid:
1416 1420 knownheads[p] = 1
1417 1421 knownheads = knownheads.keys()
1418 1422 if knownheads:
1419 1423 # Now that we know what heads are known, we can compute which
1420 1424 # changesets are known. The recipient must know about all
1421 1425 # changesets required to reach the known heads from the null
1422 1426 # changeset.
1423 1427 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1424 1428 junk = None
1425 1429 # Transform the list into an ersatz set.
1426 1430 has_cl_set = dict.fromkeys(has_cl_set)
1427 1431 else:
1428 1432 # If there were no known heads, the recipient cannot be assumed to
1429 1433 # know about any changesets.
1430 1434 has_cl_set = {}
1431 1435
1432 1436 # Make it easy to refer to self.manifest
1433 1437 mnfst = self.manifest
1434 1438 # We don't know which manifests are missing yet
1435 1439 msng_mnfst_set = {}
1436 1440 # Nor do we know which filenodes are missing.
1437 1441 msng_filenode_set = {}
1438 1442
1439 1443 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1440 1444 junk = None
1441 1445
1442 1446 # A changeset always belongs to itself, so the changenode lookup
1443 1447 # function for a changenode is identity.
1444 1448 def identity(x):
1445 1449 return x
1446 1450
1447 1451 # A function generating function. Sets up an environment for the
1448 1452 # inner function.
1449 1453 def cmp_by_rev_func(revlog):
1450 1454 # Compare two nodes by their revision number in the environment's
1451 1455 # revision history. Since the revision number both represents the
1452 1456 # most efficient order to read the nodes in, and represents a
1453 1457 # topological sorting of the nodes, this function is often useful.
1454 1458 def cmp_by_rev(a, b):
1455 1459 return cmp(revlog.rev(a), revlog.rev(b))
1456 1460 return cmp_by_rev
1457 1461
1458 1462 # If we determine that a particular file or manifest node must be a
1459 1463 # node that the recipient of the changegroup will already have, we can
1460 1464 # also assume the recipient will have all the parents. This function
1461 1465 # prunes them from the set of missing nodes.
1462 1466 def prune_parents(revlog, hasset, msngset):
1463 1467 haslst = hasset.keys()
1464 1468 haslst.sort(cmp_by_rev_func(revlog))
1465 1469 for node in haslst:
1466 1470 parentlst = [p for p in revlog.parents(node) if p != nullid]
1467 1471 while parentlst:
1468 1472 n = parentlst.pop()
1469 1473 if n not in hasset:
1470 1474 hasset[n] = 1
1471 1475 p = [p for p in revlog.parents(n) if p != nullid]
1472 1476 parentlst.extend(p)
1473 1477 for n in hasset:
1474 1478 msngset.pop(n, None)
1475 1479
1476 1480 # This is a function generating function used to set up an environment
1477 1481 # for the inner function to execute in.
1478 1482 def manifest_and_file_collector(changedfileset):
1479 1483 # This is an information gathering function that gathers
1480 1484 # information from each changeset node that goes out as part of
1481 1485 # the changegroup. The information gathered is a list of which
1482 1486 # manifest nodes are potentially required (the recipient may
1483 1487 # already have them) and total list of all files which were
1484 1488 # changed in any changeset in the changegroup.
1485 1489 #
1486 1490 # We also remember the first changenode we saw any manifest
1487 1491 # referenced by so we can later determine which changenode 'owns'
1488 1492 # the manifest.
1489 1493 def collect_manifests_and_files(clnode):
1490 1494 c = cl.read(clnode)
1491 1495 for f in c[3]:
1492 1496 # This is to make sure we only have one instance of each
1493 1497 # filename string for each filename.
1494 1498 changedfileset.setdefault(f, f)
1495 1499 msng_mnfst_set.setdefault(c[0], clnode)
1496 1500 return collect_manifests_and_files
1497 1501
1498 1502 # Figure out which manifest nodes (of the ones we think might be part
1499 1503 # of the changegroup) the recipient must know about and remove them
1500 1504 # from the changegroup.
1501 1505 def prune_manifests():
1502 1506 has_mnfst_set = {}
1503 1507 for n in msng_mnfst_set:
1504 1508 # If a 'missing' manifest thinks it belongs to a changenode
1505 1509 # the recipient is assumed to have, obviously the recipient
1506 1510 # must have that manifest.
1507 1511 linknode = cl.node(mnfst.linkrev(n))
1508 1512 if linknode in has_cl_set:
1509 1513 has_mnfst_set[n] = 1
1510 1514 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1511 1515
1512 1516 # Use the information collected in collect_manifests_and_files to say
1513 1517 # which changenode any manifestnode belongs to.
1514 1518 def lookup_manifest_link(mnfstnode):
1515 1519 return msng_mnfst_set[mnfstnode]
1516 1520
1517 1521 # A function generating function that sets up the initial environment
1518 1522 # the inner function.
1519 1523 def filenode_collector(changedfiles):
1520 1524 next_rev = [0]
1521 1525 # This gathers information from each manifestnode included in the
1522 1526 # changegroup about which filenodes the manifest node references
1523 1527 # so we can include those in the changegroup too.
1524 1528 #
1525 1529 # It also remembers which changenode each filenode belongs to. It
1526 1530 # does this by assuming the a filenode belongs to the changenode
1527 1531 # the first manifest that references it belongs to.
1528 1532 def collect_msng_filenodes(mnfstnode):
1529 1533 r = mnfst.rev(mnfstnode)
1530 1534 if r == next_rev[0]:
1531 1535 # If the last rev we looked at was the one just previous,
1532 1536 # we only need to see a diff.
1533 1537 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1534 1538 # For each line in the delta
1535 1539 for dline in delta.splitlines():
1536 1540 # get the filename and filenode for that line
1537 1541 f, fnode = dline.split('\0')
1538 1542 fnode = bin(fnode[:40])
1539 1543 f = changedfiles.get(f, None)
1540 1544 # And if the file is in the list of files we care
1541 1545 # about.
1542 1546 if f is not None:
1543 1547 # Get the changenode this manifest belongs to
1544 1548 clnode = msng_mnfst_set[mnfstnode]
1545 1549 # Create the set of filenodes for the file if
1546 1550 # there isn't one already.
1547 1551 ndset = msng_filenode_set.setdefault(f, {})
1548 1552 # And set the filenode's changelog node to the
1549 1553 # manifest's if it hasn't been set already.
1550 1554 ndset.setdefault(fnode, clnode)
1551 1555 else:
1552 1556 # Otherwise we need a full manifest.
1553 1557 m = mnfst.read(mnfstnode)
1554 1558 # For every file in we care about.
1555 1559 for f in changedfiles:
1556 1560 fnode = m.get(f, None)
1557 1561 # If it's in the manifest
1558 1562 if fnode is not None:
1559 1563 # See comments above.
1560 1564 clnode = msng_mnfst_set[mnfstnode]
1561 1565 ndset = msng_filenode_set.setdefault(f, {})
1562 1566 ndset.setdefault(fnode, clnode)
1563 1567 # Remember the revision we hope to see next.
1564 1568 next_rev[0] = r + 1
1565 1569 return collect_msng_filenodes
1566 1570
1567 1571 # We have a list of filenodes we think we need for a file, lets remove
1568 1572 # all those we now the recipient must have.
1569 1573 def prune_filenodes(f, filerevlog):
1570 1574 msngset = msng_filenode_set[f]
1571 1575 hasset = {}
1572 1576 # If a 'missing' filenode thinks it belongs to a changenode we
1573 1577 # assume the recipient must have, then the recipient must have
1574 1578 # that filenode.
1575 1579 for n in msngset:
1576 1580 clnode = cl.node(filerevlog.linkrev(n))
1577 1581 if clnode in has_cl_set:
1578 1582 hasset[n] = 1
1579 1583 prune_parents(filerevlog, hasset, msngset)
1580 1584
1581 1585 # A function generator function that sets up the a context for the
1582 1586 # inner function.
1583 1587 def lookup_filenode_link_func(fname):
1584 1588 msngset = msng_filenode_set[fname]
1585 1589 # Lookup the changenode the filenode belongs to.
1586 1590 def lookup_filenode_link(fnode):
1587 1591 return msngset[fnode]
1588 1592 return lookup_filenode_link
1589 1593
1590 1594 # Now that we have all theses utility functions to help out and
1591 1595 # logically divide up the task, generate the group.
1592 1596 def gengroup():
1593 1597 # The set of changed files starts empty.
1594 1598 changedfiles = {}
1595 1599 # Create a changenode group generator that will call our functions
1596 1600 # back to lookup the owning changenode and collect information.
1597 1601 group = cl.group(msng_cl_lst, identity,
1598 1602 manifest_and_file_collector(changedfiles))
1599 1603 for chnk in group:
1600 1604 yield chnk
1601 1605
1602 1606 # The list of manifests has been collected by the generator
1603 1607 # calling our functions back.
1604 1608 prune_manifests()
1605 1609 msng_mnfst_lst = msng_mnfst_set.keys()
1606 1610 # Sort the manifestnodes by revision number.
1607 1611 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1608 1612 # Create a generator for the manifestnodes that calls our lookup
1609 1613 # and data collection functions back.
1610 1614 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1611 1615 filenode_collector(changedfiles))
1612 1616 for chnk in group:
1613 1617 yield chnk
1614 1618
1615 1619 # These are no longer needed, dereference and toss the memory for
1616 1620 # them.
1617 1621 msng_mnfst_lst = None
1618 1622 msng_mnfst_set.clear()
1619 1623
1620 1624 changedfiles = changedfiles.keys()
1621 1625 changedfiles.sort()
1622 1626 # Go through all our files in order sorted by name.
1623 1627 for fname in changedfiles:
1624 1628 filerevlog = self.file(fname)
1625 1629 # Toss out the filenodes that the recipient isn't really
1626 1630 # missing.
1627 1631 if msng_filenode_set.has_key(fname):
1628 1632 prune_filenodes(fname, filerevlog)
1629 1633 msng_filenode_lst = msng_filenode_set[fname].keys()
1630 1634 else:
1631 1635 msng_filenode_lst = []
1632 1636 # If any filenodes are left, generate the group for them,
1633 1637 # otherwise don't bother.
1634 1638 if len(msng_filenode_lst) > 0:
1635 1639 yield changegroup.genchunk(fname)
1636 1640 # Sort the filenodes by their revision #
1637 1641 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1638 1642 # Create a group generator and only pass in a changenode
1639 1643 # lookup function as we need to collect no information
1640 1644 # from filenodes.
1641 1645 group = filerevlog.group(msng_filenode_lst,
1642 1646 lookup_filenode_link_func(fname))
1643 1647 for chnk in group:
1644 1648 yield chnk
1645 1649 if msng_filenode_set.has_key(fname):
1646 1650 # Don't need this anymore, toss it to free memory.
1647 1651 del msng_filenode_set[fname]
1648 1652 # Signal that no more groups are left.
1649 1653 yield changegroup.closechunk()
1650 1654
1651 1655 if msng_cl_lst:
1652 1656 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1653 1657
1654 1658 return util.chunkbuffer(gengroup())
1655 1659
1656 1660 def changegroup(self, basenodes, source):
1657 1661 """Generate a changegroup of all nodes that we have that a recipient
1658 1662 doesn't.
1659 1663
1660 1664 This is much easier than the previous function as we can assume that
1661 1665 the recipient has any changenode we aren't sending them."""
1662 1666
1663 1667 self.hook('preoutgoing', throw=True, source=source)
1664 1668
1665 1669 cl = self.changelog
1666 1670 nodes = cl.nodesbetween(basenodes, None)[0]
1667 1671 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1668 1672 self.changegroupinfo(nodes)
1669 1673
1670 1674 def identity(x):
1671 1675 return x
1672 1676
1673 1677 def gennodelst(revlog):
1674 1678 for r in xrange(0, revlog.count()):
1675 1679 n = revlog.node(r)
1676 1680 if revlog.linkrev(n) in revset:
1677 1681 yield n
1678 1682
1679 1683 def changed_file_collector(changedfileset):
1680 1684 def collect_changed_files(clnode):
1681 1685 c = cl.read(clnode)
1682 1686 for fname in c[3]:
1683 1687 changedfileset[fname] = 1
1684 1688 return collect_changed_files
1685 1689
1686 1690 def lookuprevlink_func(revlog):
1687 1691 def lookuprevlink(n):
1688 1692 return cl.node(revlog.linkrev(n))
1689 1693 return lookuprevlink
1690 1694
1691 1695 def gengroup():
1692 1696 # construct a list of all changed files
1693 1697 changedfiles = {}
1694 1698
1695 1699 for chnk in cl.group(nodes, identity,
1696 1700 changed_file_collector(changedfiles)):
1697 1701 yield chnk
1698 1702 changedfiles = changedfiles.keys()
1699 1703 changedfiles.sort()
1700 1704
1701 1705 mnfst = self.manifest
1702 1706 nodeiter = gennodelst(mnfst)
1703 1707 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1704 1708 yield chnk
1705 1709
1706 1710 for fname in changedfiles:
1707 1711 filerevlog = self.file(fname)
1708 1712 nodeiter = gennodelst(filerevlog)
1709 1713 nodeiter = list(nodeiter)
1710 1714 if nodeiter:
1711 1715 yield changegroup.genchunk(fname)
1712 1716 lookup = lookuprevlink_func(filerevlog)
1713 1717 for chnk in filerevlog.group(nodeiter, lookup):
1714 1718 yield chnk
1715 1719
1716 1720 yield changegroup.closechunk()
1717 1721
1718 1722 if nodes:
1719 1723 self.hook('outgoing', node=hex(nodes[0]), source=source)
1720 1724
1721 1725 return util.chunkbuffer(gengroup())
1722 1726
1723 1727 def addchangegroup(self, source, srctype, url):
1724 1728 """add changegroup to repo.
1725 1729 returns number of heads modified or added + 1."""
1726 1730
1727 1731 def csmap(x):
1728 1732 self.ui.debug(_("add changeset %s\n") % short(x))
1729 1733 return cl.count()
1730 1734
1731 1735 def revmap(x):
1732 1736 return cl.rev(x)
1733 1737
1734 1738 if not source:
1735 1739 return 0
1736 1740
1737 1741 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1738 1742
1739 1743 changesets = files = revisions = 0
1740 1744
1741 1745 tr = self.transaction()
1742 1746
1743 1747 # write changelog data to temp files so concurrent readers will not see
1744 1748 # inconsistent view
1745 1749 cl = None
1746 1750 try:
1747 1751 cl = appendfile.appendchangelog(self.sopener,
1748 1752 self.changelog.version)
1749 1753
1750 1754 oldheads = len(cl.heads())
1751 1755
1752 1756 # pull off the changeset group
1753 1757 self.ui.status(_("adding changesets\n"))
1754 1758 cor = cl.count() - 1
1755 1759 chunkiter = changegroup.chunkiter(source)
1756 1760 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1757 1761 raise util.Abort(_("received changelog group is empty"))
1758 1762 cnr = cl.count() - 1
1759 1763 changesets = cnr - cor
1760 1764
1761 1765 # pull off the manifest group
1762 1766 self.ui.status(_("adding manifests\n"))
1763 1767 chunkiter = changegroup.chunkiter(source)
1764 1768 # no need to check for empty manifest group here:
1765 1769 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1766 1770 # no new manifest will be created and the manifest group will
1767 1771 # be empty during the pull
1768 1772 self.manifest.addgroup(chunkiter, revmap, tr)
1769 1773
1770 1774 # process the files
1771 1775 self.ui.status(_("adding file changes\n"))
1772 1776 while 1:
1773 1777 f = changegroup.getchunk(source)
1774 1778 if not f:
1775 1779 break
1776 1780 self.ui.debug(_("adding %s revisions\n") % f)
1777 1781 fl = self.file(f)
1778 1782 o = fl.count()
1779 1783 chunkiter = changegroup.chunkiter(source)
1780 1784 if fl.addgroup(chunkiter, revmap, tr) is None:
1781 1785 raise util.Abort(_("received file revlog group is empty"))
1782 1786 revisions += fl.count() - o
1783 1787 files += 1
1784 1788
1785 1789 cl.writedata()
1786 1790 finally:
1787 1791 if cl:
1788 1792 cl.cleanup()
1789 1793
1790 1794 # make changelog see real files again
1791 1795 self.changelog = changelog.changelog(self.sopener,
1792 1796 self.changelog.version)
1793 1797 self.changelog.checkinlinesize(tr)
1794 1798
1795 1799 newheads = len(self.changelog.heads())
1796 1800 heads = ""
1797 1801 if oldheads and newheads != oldheads:
1798 1802 heads = _(" (%+d heads)") % (newheads - oldheads)
1799 1803
1800 1804 self.ui.status(_("added %d changesets"
1801 1805 " with %d changes to %d files%s\n")
1802 1806 % (changesets, revisions, files, heads))
1803 1807
1804 1808 if changesets > 0:
1805 1809 self.hook('pretxnchangegroup', throw=True,
1806 1810 node=hex(self.changelog.node(cor+1)), source=srctype,
1807 1811 url=url)
1808 1812
1809 1813 tr.close()
1810 1814
1811 1815 if changesets > 0:
1812 1816 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1813 1817 source=srctype, url=url)
1814 1818
1815 1819 for i in xrange(cor + 1, cnr + 1):
1816 1820 self.hook("incoming", node=hex(self.changelog.node(i)),
1817 1821 source=srctype, url=url)
1818 1822
1819 1823 return newheads - oldheads + 1
1820 1824
1821 1825
1822 1826 def stream_in(self, remote):
1823 1827 fp = remote.stream_out()
1824 1828 l = fp.readline()
1825 1829 try:
1826 1830 resp = int(l)
1827 1831 except ValueError:
1828 1832 raise util.UnexpectedOutput(
1829 1833 _('Unexpected response from remote server:'), l)
1830 1834 if resp == 1:
1831 1835 raise util.Abort(_('operation forbidden by server'))
1832 1836 elif resp == 2:
1833 1837 raise util.Abort(_('locking the remote repository failed'))
1834 1838 elif resp != 0:
1835 1839 raise util.Abort(_('the server sent an unknown error code'))
1836 1840 self.ui.status(_('streaming all changes\n'))
1837 1841 l = fp.readline()
1838 1842 try:
1839 1843 total_files, total_bytes = map(int, l.split(' ', 1))
1840 1844 except ValueError, TypeError:
1841 1845 raise util.UnexpectedOutput(
1842 1846 _('Unexpected response from remote server:'), l)
1843 1847 self.ui.status(_('%d files to transfer, %s of data\n') %
1844 1848 (total_files, util.bytecount(total_bytes)))
1845 1849 start = time.time()
1846 1850 for i in xrange(total_files):
1847 1851 # XXX doesn't support '\n' or '\r' in filenames
1848 1852 l = fp.readline()
1849 1853 try:
1850 1854 name, size = l.split('\0', 1)
1851 1855 size = int(size)
1852 1856 except ValueError, TypeError:
1853 1857 raise util.UnexpectedOutput(
1854 1858 _('Unexpected response from remote server:'), l)
1855 1859 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1856 1860 ofp = self.sopener(name, 'w')
1857 1861 for chunk in util.filechunkiter(fp, limit=size):
1858 1862 ofp.write(chunk)
1859 1863 ofp.close()
1860 1864 elapsed = time.time() - start
1861 1865 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1862 1866 (util.bytecount(total_bytes), elapsed,
1863 1867 util.bytecount(total_bytes / elapsed)))
1864 1868 self.reload()
1865 1869 return len(self.heads()) + 1
1866 1870
1867 1871 def clone(self, remote, heads=[], stream=False):
1868 1872 '''clone remote repository.
1869 1873
1870 1874 keyword arguments:
1871 1875 heads: list of revs to clone (forces use of pull)
1872 1876 stream: use streaming clone if possible'''
1873 1877
1874 1878 # now, all clients that can request uncompressed clones can
1875 1879 # read repo formats supported by all servers that can serve
1876 1880 # them.
1877 1881
1878 1882 # if revlog format changes, client will have to check version
1879 1883 # and format flags on "stream" capability, and use
1880 1884 # uncompressed only if compatible.
1881 1885
1882 1886 if stream and not heads and remote.capable('stream'):
1883 1887 return self.stream_in(remote)
1884 1888 return self.pull(remote, heads)
1885 1889
1886 1890 # used to avoid circular references so destructors work
1887 1891 def aftertrans(base):
1888 1892 p = base
1889 1893 def a():
1890 1894 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1891 1895 util.rename(os.path.join(p, "journal.dirstate"),
1892 1896 os.path.join(p, "undo.dirstate"))
1893 1897 return a
1894 1898
1895 1899 def instance(ui, path, create):
1896 1900 return localrepository(ui, util.drop_scheme('file', path), create)
1897 1901
1898 1902 def islocal(path):
1899 1903 return True
@@ -1,33 +1,40
1 1 #!/bin/sh
2 2
3 3 hg init t
4 4 cd t
5 5 hg branches
6 6
7 7 echo foo > a
8 8 hg add a
9 9 hg ci -m "initial" -d "1000000 0"
10 10 hg branch foo
11 11 hg branch
12 12 hg ci -m "add branch name" -d "1000000 0"
13 13 hg branch bar
14 14 hg ci -m "change branch name" -d "1000000 0"
15 15 hg branch ""
16 16 hg ci -m "clear branch name" -d "1000000 0"
17 17
18 18 hg co foo
19 19 hg branch
20 20 echo bleah > a
21 21 hg ci -m "modify a branch" -d "1000000 0"
22 22
23 23 hg merge
24 24 hg branch
25 25 hg ci -m "merge" -d "1000000 0"
26 26 hg log
27 27
28 28 hg branches
29 29 hg branches -q
30 30
31 31 echo % test for invalid branch cache
32 32 hg rollback
33 cp .hg/branches.cache .hg/bc-invalid
33 34 hg log -r foo
35 cp .hg/bc-invalid .hg/branches.cache
36 hg --debug log -r foo
37 rm .hg/branches.cache
38 echo corrupted > .hg/branches.cache
39 hg log -qr foo
40 cat .hg/branches.cache
@@ -1,58 +1,77
1 1 foo
2 2 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
3 3 foo
4 4 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 5 (branch merge, don't forget to commit)
6 6 foo
7 7 changeset: 5:5f8fb06e083e
8 8 branch: foo
9 9 tag: tip
10 10 parent: 4:4909a3732169
11 11 parent: 3:bf1bc2f45e83
12 12 user: test
13 13 date: Mon Jan 12 13:46:40 1970 +0000
14 14 summary: merge
15 15
16 16 changeset: 4:4909a3732169
17 17 branch: foo
18 18 parent: 1:b699b1cec9c2
19 19 user: test
20 20 date: Mon Jan 12 13:46:40 1970 +0000
21 21 summary: modify a branch
22 22
23 23 changeset: 3:bf1bc2f45e83
24 24 user: test
25 25 date: Mon Jan 12 13:46:40 1970 +0000
26 26 summary: clear branch name
27 27
28 28 changeset: 2:67ec16bde7f1
29 29 branch: bar
30 30 user: test
31 31 date: Mon Jan 12 13:46:40 1970 +0000
32 32 summary: change branch name
33 33
34 34 changeset: 1:b699b1cec9c2
35 35 branch: foo
36 36 user: test
37 37 date: Mon Jan 12 13:46:40 1970 +0000
38 38 summary: add branch name
39 39
40 40 changeset: 0:be8523e69bf8
41 41 user: test
42 42 date: Mon Jan 12 13:46:40 1970 +0000
43 43 summary: initial
44 44
45 45 foo 5:5f8fb06e083e
46 46 bar 2:67ec16bde7f1
47 47 foo
48 48 bar
49 49 % test for invalid branch cache
50 50 rolling back last transaction
51 51 changeset: 4:4909a3732169
52 52 branch: foo
53 53 tag: tip
54 54 parent: 1:b699b1cec9c2
55 55 user: test
56 56 date: Mon Jan 12 13:46:40 1970 +0000
57 57 summary: modify a branch
58 58
59 Invalid branch cache: unknown tip
60 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
61 branch: foo
62 tag: tip
63 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
64 parent: -1:0000000000000000000000000000000000000000
65 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
66 user: test
67 date: Mon Jan 12 13:46:40 1970 +0000
68 files: a
69 extra: branch=foo
70 description:
71 modify a branch
72
73
74 4:4909a3732169
75 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
76 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
77 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
General Comments 0
You need to be logged in to leave comments. Login now