##// END OF EJS Templates
Merge branchname changes in localrepo.commit.
Thomas Arendsen Hein -
r4022:bf329bda merge default
parent child Browse files
Show More
@@ -1,1880 +1,1885 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.filterpats = {}
121 121 self.transhandle = None
122 122
123 123 self._link = lambda x: False
124 124 if util.checklink(self.root):
125 125 r = self.root # avoid circular reference in lambda
126 126 self._link = lambda x: util.is_link(os.path.join(r, x))
127 127
128 128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129 129
130 130 def url(self):
131 131 return 'file:' + self.root
132 132
133 133 def hook(self, name, throw=False, **args):
134 134 def callhook(hname, funcname):
135 135 '''call python hook. hook is callable object, looked up as
136 136 name in python module. if callable returns "true", hook
137 137 fails, else passes. if hook raises exception, treated as
138 138 hook failure. exception propagates if throw is "true".
139 139
140 140 reason for "true" meaning "hook failed" is so that
141 141 unmodified commands (e.g. mercurial.commands.update) can
142 142 be run as hooks without wrappers to convert return values.'''
143 143
144 144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 145 d = funcname.rfind('.')
146 146 if d == -1:
147 147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
148 148 % (hname, funcname))
149 149 modname = funcname[:d]
150 150 try:
151 151 obj = __import__(modname)
152 152 except ImportError:
153 153 try:
154 154 # extensions are loaded with hgext_ prefix
155 155 obj = __import__("hgext_%s" % modname)
156 156 except ImportError:
157 157 raise util.Abort(_('%s hook is invalid '
158 158 '(import of "%s" failed)') %
159 159 (hname, modname))
160 160 try:
161 161 for p in funcname.split('.')[1:]:
162 162 obj = getattr(obj, p)
163 163 except AttributeError, err:
164 164 raise util.Abort(_('%s hook is invalid '
165 165 '("%s" is not defined)') %
166 166 (hname, funcname))
167 167 if not callable(obj):
168 168 raise util.Abort(_('%s hook is invalid '
169 169 '("%s" is not callable)') %
170 170 (hname, funcname))
171 171 try:
172 172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
173 173 except (KeyboardInterrupt, util.SignalInterrupt):
174 174 raise
175 175 except Exception, exc:
176 176 if isinstance(exc, util.Abort):
177 177 self.ui.warn(_('error: %s hook failed: %s\n') %
178 178 (hname, exc.args[0]))
179 179 else:
180 180 self.ui.warn(_('error: %s hook raised an exception: '
181 181 '%s\n') % (hname, exc))
182 182 if throw:
183 183 raise
184 184 self.ui.print_exc()
185 185 return True
186 186 if r:
187 187 if throw:
188 188 raise util.Abort(_('%s hook failed') % hname)
189 189 self.ui.warn(_('warning: %s hook failed\n') % hname)
190 190 return r
191 191
192 192 def runhook(name, cmd):
193 193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
194 194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
195 195 r = util.system(cmd, environ=env, cwd=self.root)
196 196 if r:
197 197 desc, r = util.explain_exit(r)
198 198 if throw:
199 199 raise util.Abort(_('%s hook %s') % (name, desc))
200 200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
201 201 return r
202 202
203 203 r = False
204 204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
205 205 if hname.split(".", 1)[0] == name and cmd]
206 206 hooks.sort()
207 207 for hname, cmd in hooks:
208 208 if cmd.startswith('python:'):
209 209 r = callhook(hname, cmd[7:].strip()) or r
210 210 else:
211 211 r = runhook(hname, cmd) or r
212 212 return r
213 213
214 214 tag_disallowed = ':\r\n'
215 215
216 216 def tag(self, name, node, message, local, user, date):
217 217 '''tag a revision with a symbolic name.
218 218
219 219 if local is True, the tag is stored in a per-repository file.
220 220 otherwise, it is stored in the .hgtags file, and a new
221 221 changeset is committed with the change.
222 222
223 223 keyword arguments:
224 224
225 225 local: whether to store tag in non-version-controlled file
226 226 (default False)
227 227
228 228 message: commit message to use if committing
229 229
230 230 user: name of user to use if committing
231 231
232 232 date: date tuple to use if committing'''
233 233
234 234 for c in self.tag_disallowed:
235 235 if c in name:
236 236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237 237
238 238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
239 239
240 240 if local:
241 241 # local tags are stored in the current charset
242 242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
243 243 self.hook('tag', node=hex(node), tag=name, local=local)
244 244 return
245 245
246 246 for x in self.status()[:5]:
247 247 if '.hgtags' in x:
248 248 raise util.Abort(_('working copy of .hgtags is changed '
249 249 '(please commit .hgtags manually)'))
250 250
251 251 # committed tags are stored in UTF-8
252 252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
253 253 self.wfile('.hgtags', 'ab').write(line)
254 254 if self.dirstate.state('.hgtags') == '?':
255 255 self.add(['.hgtags'])
256 256
257 257 self.commit(['.hgtags'], message, user, date)
258 258 self.hook('tag', node=hex(node), tag=name, local=local)
259 259
260 260 def tags(self):
261 261 '''return a mapping of tag to node'''
262 262 if not self.tagscache:
263 263 self.tagscache = {}
264 264
265 265 def parsetag(line, context):
266 266 if not line:
267 267 return
268 268 s = l.split(" ", 1)
269 269 if len(s) != 2:
270 270 self.ui.warn(_("%s: cannot parse entry\n") % context)
271 271 return
272 272 node, key = s
273 273 key = util.tolocal(key.strip()) # stored in UTF-8
274 274 try:
275 275 bin_n = bin(node)
276 276 except TypeError:
277 277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
278 278 (context, node))
279 279 return
280 280 if bin_n not in self.changelog.nodemap:
281 281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
282 282 (context, key))
283 283 return
284 284 self.tagscache[key] = bin_n
285 285
286 286 # read the tags file from each head, ending with the tip,
287 287 # and add each tag found to the map, with "newer" ones
288 288 # taking precedence
289 289 f = None
290 290 for rev, node, fnode in self._hgtagsnodes():
291 291 f = (f and f.filectx(fnode) or
292 292 self.filectx('.hgtags', fileid=fnode))
293 293 count = 0
294 294 for l in f.data().splitlines():
295 295 count += 1
296 296 parsetag(l, _("%s, line %d") % (str(f), count))
297 297
298 298 try:
299 299 f = self.opener("localtags")
300 300 count = 0
301 301 for l in f:
302 302 # localtags are stored in the local character set
303 303 # while the internal tag table is stored in UTF-8
304 304 l = util.fromlocal(l)
305 305 count += 1
306 306 parsetag(l, _("localtags, line %d") % count)
307 307 except IOError:
308 308 pass
309 309
310 310 self.tagscache['tip'] = self.changelog.tip()
311 311
312 312 return self.tagscache
313 313
314 314 def _hgtagsnodes(self):
315 315 heads = self.heads()
316 316 heads.reverse()
317 317 last = {}
318 318 ret = []
319 319 for node in heads:
320 320 c = self.changectx(node)
321 321 rev = c.rev()
322 322 try:
323 323 fnode = c.filenode('.hgtags')
324 324 except revlog.LookupError:
325 325 continue
326 326 ret.append((rev, node, fnode))
327 327 if fnode in last:
328 328 ret[last[fnode]] = None
329 329 last[fnode] = len(ret) - 1
330 330 return [item for item in ret if item]
331 331
332 332 def tagslist(self):
333 333 '''return a list of tags ordered by revision'''
334 334 l = []
335 335 for t, n in self.tags().items():
336 336 try:
337 337 r = self.changelog.rev(n)
338 338 except:
339 339 r = -2 # sort to the beginning of the list if unknown
340 340 l.append((r, t, n))
341 341 l.sort()
342 342 return [(t, n) for r, t, n in l]
343 343
344 344 def nodetags(self, node):
345 345 '''return the tags associated with a node'''
346 346 if not self.nodetagscache:
347 347 self.nodetagscache = {}
348 348 for t, n in self.tags().items():
349 349 self.nodetagscache.setdefault(n, []).append(t)
350 350 return self.nodetagscache.get(node, [])
351 351
352 352 def _branchtags(self):
353 353 partial, last, lrev = self._readbranchcache()
354 354
355 355 tiprev = self.changelog.count() - 1
356 356 if lrev != tiprev:
357 357 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 359
360 360 return partial
361 361
362 362 def branchtags(self):
363 363 if self.branchcache is not None:
364 364 return self.branchcache
365 365
366 366 self.branchcache = {} # avoid recursion in changectx
367 367 partial = self._branchtags()
368 368
369 369 # the branch cache is stored on disk as UTF-8, but in the local
370 370 # charset internally
371 371 for k, v in partial.items():
372 372 self.branchcache[util.tolocal(k)] = v
373 373 return self.branchcache
374 374
375 375 def _readbranchcache(self):
376 376 partial = {}
377 377 try:
378 378 f = self.opener("branches.cache")
379 379 lines = f.read().split('\n')
380 380 f.close()
381 381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
382 382 last, lrev = bin(last), int(lrev)
383 383 if not (lrev < self.changelog.count() and
384 384 self.changelog.node(lrev) == last): # sanity check
385 385 # invalidate the cache
386 386 raise ValueError('Invalid branch cache: unknown tip')
387 387 for l in lines:
388 388 if not l: continue
389 389 node, label = l.rstrip().split(" ", 1)
390 390 partial[label] = bin(node)
391 391 except (KeyboardInterrupt, util.SignalInterrupt):
392 392 raise
393 393 except Exception, inst:
394 394 if self.ui.debugflag:
395 395 self.ui.warn(str(inst), '\n')
396 396 partial, last, lrev = {}, nullid, nullrev
397 397 return partial, last, lrev
398 398
399 399 def _writebranchcache(self, branches, tip, tiprev):
400 400 try:
401 401 f = self.opener("branches.cache", "w")
402 402 f.write("%s %s\n" % (hex(tip), tiprev))
403 403 for label, node in branches.iteritems():
404 404 f.write("%s %s\n" % (hex(node), label))
405 405 except IOError:
406 406 pass
407 407
408 408 def _updatebranchcache(self, partial, start, end):
409 409 for r in xrange(start, end):
410 410 c = self.changectx(r)
411 411 b = c.branch()
412 412 if b:
413 413 partial[b] = c.node()
414 414
415 415 def lookup(self, key):
416 416 if key == '.':
417 417 key = self.dirstate.parents()[0]
418 418 if key == nullid:
419 419 raise repo.RepoError(_("no revision checked out"))
420 420 elif key == 'null':
421 421 return nullid
422 422 n = self.changelog._match(key)
423 423 if n:
424 424 return n
425 425 if key in self.tags():
426 426 return self.tags()[key]
427 427 if key in self.branchtags():
428 428 return self.branchtags()[key]
429 429 n = self.changelog._partialmatch(key)
430 430 if n:
431 431 return n
432 432 raise repo.RepoError(_("unknown revision '%s'") % key)
433 433
434 434 def dev(self):
435 435 return os.lstat(self.path).st_dev
436 436
437 437 def local(self):
438 438 return True
439 439
440 440 def join(self, f):
441 441 return os.path.join(self.path, f)
442 442
443 443 def sjoin(self, f):
444 444 f = self.encodefn(f)
445 445 return os.path.join(self.spath, f)
446 446
447 447 def wjoin(self, f):
448 448 return os.path.join(self.root, f)
449 449
450 450 def file(self, f):
451 451 if f[0] == '/':
452 452 f = f[1:]
453 453 return filelog.filelog(self.sopener, f, self.revlogversion)
454 454
455 455 def changectx(self, changeid=None):
456 456 return context.changectx(self, changeid)
457 457
458 458 def workingctx(self):
459 459 return context.workingctx(self)
460 460
461 461 def parents(self, changeid=None):
462 462 '''
463 463 get list of changectxs for parents of changeid or working directory
464 464 '''
465 465 if changeid is None:
466 466 pl = self.dirstate.parents()
467 467 else:
468 468 n = self.changelog.lookup(changeid)
469 469 pl = self.changelog.parents(n)
470 470 if pl[1] == nullid:
471 471 return [self.changectx(pl[0])]
472 472 return [self.changectx(pl[0]), self.changectx(pl[1])]
473 473
474 474 def filectx(self, path, changeid=None, fileid=None):
475 475 """changeid can be a changeset revision, node, or tag.
476 476 fileid can be a file revision or node."""
477 477 return context.filectx(self, path, changeid, fileid)
478 478
479 479 def getcwd(self):
480 480 return self.dirstate.getcwd()
481 481
482 482 def wfile(self, f, mode='r'):
483 483 return self.wopener(f, mode)
484 484
485 485 def _filter(self, filter, filename, data):
486 486 if filter not in self.filterpats:
487 487 l = []
488 488 for pat, cmd in self.ui.configitems(filter):
489 489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 490 l.append((mf, cmd))
491 491 self.filterpats[filter] = l
492 492
493 493 for mf, cmd in self.filterpats[filter]:
494 494 if mf(filename):
495 495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 496 data = util.filter(data, cmd)
497 497 break
498 498
499 499 return data
500 500
501 501 def wread(self, filename):
502 502 if self._link(filename):
503 503 data = os.readlink(self.wjoin(filename))
504 504 else:
505 505 data = self.wopener(filename, 'r').read()
506 506 return self._filter("encode", filename, data)
507 507
508 508 def wwrite(self, filename, data, flags):
509 509 data = self._filter("decode", filename, data)
510 510 if "l" in flags:
511 511 try:
512 512 os.unlink(self.wjoin(filename))
513 513 except OSError:
514 514 pass
515 515 os.symlink(data, self.wjoin(filename))
516 516 else:
517 517 try:
518 518 if self._link(filename):
519 519 os.unlink(self.wjoin(filename))
520 520 except OSError:
521 521 pass
522 522 self.wopener(filename, 'w').write(data)
523 523 util.set_exec(self.wjoin(filename), "x" in flags)
524 524
525 525 def wwritedata(self, filename, data):
526 526 return self._filter("decode", filename, data)
527 527
528 528 def transaction(self):
529 529 tr = self.transhandle
530 530 if tr != None and tr.running():
531 531 return tr.nest()
532 532
533 533 # save dirstate for rollback
534 534 try:
535 535 ds = self.opener("dirstate").read()
536 536 except IOError:
537 537 ds = ""
538 538 self.opener("journal.dirstate", "w").write(ds)
539 539
540 540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
541 541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
542 542 tr = transaction.transaction(self.ui.warn, self.sopener,
543 543 self.sjoin("journal"),
544 544 aftertrans(renames))
545 545 self.transhandle = tr
546 546 return tr
547 547
548 548 def recover(self):
549 549 l = self.lock()
550 550 if os.path.exists(self.sjoin("journal")):
551 551 self.ui.status(_("rolling back interrupted transaction\n"))
552 552 transaction.rollback(self.sopener, self.sjoin("journal"))
553 553 self.reload()
554 554 return True
555 555 else:
556 556 self.ui.warn(_("no interrupted transaction available\n"))
557 557 return False
558 558
559 559 def rollback(self, wlock=None):
560 560 if not wlock:
561 561 wlock = self.wlock()
562 562 l = self.lock()
563 563 if os.path.exists(self.sjoin("undo")):
564 564 self.ui.status(_("rolling back last transaction\n"))
565 565 transaction.rollback(self.sopener, self.sjoin("undo"))
566 566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
567 567 self.reload()
568 568 self.wreload()
569 569 else:
570 570 self.ui.warn(_("no rollback information available\n"))
571 571
572 572 def wreload(self):
573 573 self.dirstate.read()
574 574
575 575 def reload(self):
576 576 self.changelog.load()
577 577 self.manifest.load()
578 578 self.tagscache = None
579 579 self.nodetagscache = None
580 580
581 581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
582 582 desc=None):
583 583 try:
584 584 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 585 except lock.LockHeld, inst:
586 586 if not wait:
587 587 raise
588 588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 589 (desc, inst.locker))
590 590 # default to 600 seconds timeout
591 591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 592 releasefn, desc=desc)
593 593 if acquirefn:
594 594 acquirefn()
595 595 return l
596 596
597 597 def lock(self, wait=1):
598 598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
599 599 desc=_('repository %s') % self.origroot)
600 600
601 601 def wlock(self, wait=1):
602 602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
603 603 self.wreload,
604 604 desc=_('working directory of %s') % self.origroot)
605 605
606 606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
607 607 """
608 608 commit an individual file as part of a larger transaction
609 609 """
610 610
611 611 t = self.wread(fn)
612 612 fl = self.file(fn)
613 613 fp1 = manifest1.get(fn, nullid)
614 614 fp2 = manifest2.get(fn, nullid)
615 615
616 616 meta = {}
617 617 cp = self.dirstate.copied(fn)
618 618 if cp:
619 619 meta["copy"] = cp
620 620 if not manifest2: # not a branch merge
621 621 meta["copyrev"] = hex(manifest1.get(cp, nullid))
622 622 fp2 = nullid
623 623 elif fp2 != nullid: # copied on remote side
624 624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 625 elif fp1 != nullid: # copied on local side, reversed
626 626 meta["copyrev"] = hex(manifest2.get(cp))
627 627 fp2 = nullid
628 628 else: # directory rename
629 629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 630 self.ui.debug(_(" %s: copy %s:%s\n") %
631 631 (fn, cp, meta["copyrev"]))
632 632 fp1 = nullid
633 633 elif fp2 != nullid:
634 634 # is one parent an ancestor of the other?
635 635 fpa = fl.ancestor(fp1, fp2)
636 636 if fpa == fp1:
637 637 fp1, fp2 = fp2, nullid
638 638 elif fpa == fp2:
639 639 fp2 = nullid
640 640
641 641 # is the file unmodified from the parent? report existing entry
642 642 if fp2 == nullid and not fl.cmp(fp1, t):
643 643 return fp1
644 644
645 645 changelist.append(fn)
646 646 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
647 647
648 648 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
649 649 if p1 is None:
650 650 p1, p2 = self.dirstate.parents()
651 651 return self.commit(files=files, text=text, user=user, date=date,
652 652 p1=p1, p2=p2, wlock=wlock, extra=extra)
653 653
654 654 def commit(self, files=None, text="", user=None, date=None,
655 655 match=util.always, force=False, lock=None, wlock=None,
656 656 force_editor=False, p1=None, p2=None, extra={}):
657 657
658 658 commit = []
659 659 remove = []
660 660 changed = []
661 661 use_dirstate = (p1 is None) # not rawcommit
662 662 extra = extra.copy()
663 663
664 664 if use_dirstate:
665 665 if files:
666 666 for f in files:
667 667 s = self.dirstate.state(f)
668 668 if s in 'nmai':
669 669 commit.append(f)
670 670 elif s == 'r':
671 671 remove.append(f)
672 672 else:
673 673 self.ui.warn(_("%s not tracked!\n") % f)
674 674 else:
675 675 changes = self.status(match=match)[:5]
676 676 modified, added, removed, deleted, unknown = changes
677 677 commit = modified + added
678 678 remove = removed
679 679 else:
680 680 commit = files
681 681
682 682 if use_dirstate:
683 683 p1, p2 = self.dirstate.parents()
684 684 update_dirstate = True
685 685 else:
686 686 p1, p2 = p1, p2 or nullid
687 687 update_dirstate = (self.dirstate.parents()[0] == p1)
688 688
689 689 c1 = self.changelog.read(p1)
690 690 c2 = self.changelog.read(p2)
691 691 m1 = self.manifest.read(c1[0]).copy()
692 692 m2 = self.manifest.read(c2[0])
693 693
694 694 if use_dirstate:
695 695 branchname = self.workingctx().branch()
696 696 try:
697 697 branchname = branchname.decode('UTF-8').encode('UTF-8')
698 698 except UnicodeDecodeError:
699 699 raise util.Abort(_('branch name not in UTF-8!'))
700 700 else:
701 701 branchname = ""
702 702
703 703 if use_dirstate:
704 704 oldname = c1[5].get("branch", "") # stored in UTF-8
705 705 if not commit and not remove and not force and p2 == nullid and \
706 706 branchname == oldname:
707 707 self.ui.status(_("nothing changed\n"))
708 708 return None
709 709
710 710 xp1 = hex(p1)
711 711 if p2 == nullid: xp2 = ''
712 712 else: xp2 = hex(p2)
713 713
714 714 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
715 715
716 716 if not wlock:
717 717 wlock = self.wlock()
718 718 if not lock:
719 719 lock = self.lock()
720 720 tr = self.transaction()
721 721
722 722 # check in files
723 723 new = {}
724 724 linkrev = self.changelog.count()
725 725 commit.sort()
726 726 is_exec = util.execfunc(self.root, m1.execf)
727 727 is_link = util.linkfunc(self.root, m1.linkf)
728 728 for f in commit:
729 729 self.ui.note(f + "\n")
730 730 try:
731 731 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
732 732 m1.set(f, is_exec(f), is_link(f))
733 733 except OSError:
734 734 if use_dirstate:
735 735 self.ui.warn(_("trouble committing %s!\n") % f)
736 736 raise
737 737 else:
738 738 remove.append(f)
739 739
740 740 # update manifest
741 741 m1.update(new)
742 742 remove.sort()
743 743 removed = []
744 744
745 745 for f in remove:
746 746 if f in m1:
747 747 del m1[f]
748 748 removed.append(f)
749 749 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
750 750
751 751 # add changeset
752 752 new = new.keys()
753 753 new.sort()
754 754
755 755 user = user or self.ui.username()
756 756 if not text or force_editor:
757 757 edittext = []
758 758 if text:
759 759 edittext.append(text)
760 760 edittext.append("")
761 761 edittext.append("HG: user: %s" % user)
762 762 if p2 != nullid:
763 763 edittext.append("HG: branch merge")
764 if branchname:
765 edittext.append("HG: branch %s" % util.tolocal(branchname))
764 766 edittext.extend(["HG: changed %s" % f for f in changed])
765 767 edittext.extend(["HG: removed %s" % f for f in removed])
766 768 if not changed and not remove:
767 769 edittext.append("HG: no files changed")
768 770 edittext.append("")
769 771 # run editor in the repository root
770 772 olddir = os.getcwd()
771 773 os.chdir(self.root)
772 774 text = self.ui.edit("\n".join(edittext), user)
773 775 os.chdir(olddir)
774 776
775 777 lines = [line.rstrip() for line in text.rstrip().splitlines()]
776 778 while lines and not lines[0]:
777 779 del lines[0]
778 780 if not lines:
779 781 return None
780 782 text = '\n'.join(lines)
781 783 if branchname:
782 784 extra["branch"] = branchname
783 785 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
784 786 user, date, extra)
785 787 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
786 788 parent2=xp2)
787 789 tr.close()
788 790
791 if self.branchcache and "branch" in extra:
792 self.branchcache[util.tolocal(extra["branch"])] = n
793
789 794 if use_dirstate or update_dirstate:
790 795 self.dirstate.setparents(n)
791 796 if use_dirstate:
792 797 self.dirstate.update(new, "n")
793 798 self.dirstate.forget(removed)
794 799
795 800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
796 801 return n
797 802
798 803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
799 804 '''
800 805 walk recursively through the directory tree or a given
801 806 changeset, finding all files matched by the match
802 807 function
803 808
804 809 results are yielded in a tuple (src, filename), where src
805 810 is one of:
806 811 'f' the file was found in the directory tree
807 812 'm' the file was only in the dirstate and not in the tree
808 813 'b' file was not found and matched badmatch
809 814 '''
810 815
811 816 if node:
812 817 fdict = dict.fromkeys(files)
813 818 for fn in self.manifest.read(self.changelog.read(node)[0]):
814 819 for ffn in fdict:
815 820 # match if the file is the exact name or a directory
816 821 if ffn == fn or fn.startswith("%s/" % ffn):
817 822 del fdict[ffn]
818 823 break
819 824 if match(fn):
820 825 yield 'm', fn
821 826 for fn in fdict:
822 827 if badmatch and badmatch(fn):
823 828 if match(fn):
824 829 yield 'b', fn
825 830 else:
826 831 self.ui.warn(_('%s: No such file in rev %s\n') % (
827 832 util.pathto(self.getcwd(), fn), short(node)))
828 833 else:
829 834 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
830 835 yield src, fn
831 836
832 837 def status(self, node1=None, node2=None, files=[], match=util.always,
833 838 wlock=None, list_ignored=False, list_clean=False):
834 839 """return status of files between two nodes or node and working directory
835 840
836 841 If node1 is None, use the first dirstate parent instead.
837 842 If node2 is None, compare node1 with working directory.
838 843 """
839 844
840 845 def fcmp(fn, mf):
841 846 t1 = self.wread(fn)
842 847 return self.file(fn).cmp(mf.get(fn, nullid), t1)
843 848
844 849 def mfmatches(node):
845 850 change = self.changelog.read(node)
846 851 mf = self.manifest.read(change[0]).copy()
847 852 for fn in mf.keys():
848 853 if not match(fn):
849 854 del mf[fn]
850 855 return mf
851 856
852 857 modified, added, removed, deleted, unknown = [], [], [], [], []
853 858 ignored, clean = [], []
854 859
855 860 compareworking = False
856 861 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
857 862 compareworking = True
858 863
859 864 if not compareworking:
860 865 # read the manifest from node1 before the manifest from node2,
861 866 # so that we'll hit the manifest cache if we're going through
862 867 # all the revisions in parent->child order.
863 868 mf1 = mfmatches(node1)
864 869
865 870 # are we comparing the working directory?
866 871 if not node2:
867 872 if not wlock:
868 873 try:
869 874 wlock = self.wlock(wait=0)
870 875 except lock.LockException:
871 876 wlock = None
872 877 (lookup, modified, added, removed, deleted, unknown,
873 878 ignored, clean) = self.dirstate.status(files, match,
874 879 list_ignored, list_clean)
875 880
876 881 # are we comparing working dir against its parent?
877 882 if compareworking:
878 883 if lookup:
879 884 # do a full compare of any files that might have changed
880 885 mf2 = mfmatches(self.dirstate.parents()[0])
881 886 for f in lookup:
882 887 if fcmp(f, mf2):
883 888 modified.append(f)
884 889 else:
885 890 clean.append(f)
886 891 if wlock is not None:
887 892 self.dirstate.update([f], "n")
888 893 else:
889 894 # we are comparing working dir against non-parent
890 895 # generate a pseudo-manifest for the working dir
891 896 # XXX: create it in dirstate.py ?
892 897 mf2 = mfmatches(self.dirstate.parents()[0])
893 898 is_exec = util.execfunc(self.root, mf2.execf)
894 899 is_link = util.linkfunc(self.root, mf2.linkf)
895 900 for f in lookup + modified + added:
896 901 mf2[f] = ""
897 902 mf2.set(f, is_exec(f), is_link(f))
898 903 for f in removed:
899 904 if f in mf2:
900 905 del mf2[f]
901 906 else:
902 907 # we are comparing two revisions
903 908 mf2 = mfmatches(node2)
904 909
905 910 if not compareworking:
906 911 # flush lists from dirstate before comparing manifests
907 912 modified, added, clean = [], [], []
908 913
909 914 # make sure to sort the files so we talk to the disk in a
910 915 # reasonable order
911 916 mf2keys = mf2.keys()
912 917 mf2keys.sort()
913 918 for fn in mf2keys:
914 919 if mf1.has_key(fn):
915 920 if mf1.flags(fn) != mf2.flags(fn) or \
916 921 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
917 922 modified.append(fn)
918 923 elif list_clean:
919 924 clean.append(fn)
920 925 del mf1[fn]
921 926 else:
922 927 added.append(fn)
923 928
924 929 removed = mf1.keys()
925 930
926 931 # sort and return results:
927 932 for l in modified, added, removed, deleted, unknown, ignored, clean:
928 933 l.sort()
929 934 return (modified, added, removed, deleted, unknown, ignored, clean)
930 935
931 936 def add(self, list, wlock=None):
932 937 if not wlock:
933 938 wlock = self.wlock()
934 939 for f in list:
935 940 p = self.wjoin(f)
936 941 islink = os.path.islink(p)
937 942 if not islink and not os.path.exists(p):
938 943 self.ui.warn(_("%s does not exist!\n") % f)
939 944 elif not islink and not os.path.isfile(p):
940 945 self.ui.warn(_("%s not added: only files and symlinks "
941 946 "supported currently\n") % f)
942 947 elif self.dirstate.state(f) in 'an':
943 948 self.ui.warn(_("%s already tracked!\n") % f)
944 949 else:
945 950 self.dirstate.update([f], "a")
946 951
947 952 def forget(self, list, wlock=None):
948 953 if not wlock:
949 954 wlock = self.wlock()
950 955 for f in list:
951 956 if self.dirstate.state(f) not in 'ai':
952 957 self.ui.warn(_("%s not added!\n") % f)
953 958 else:
954 959 self.dirstate.forget([f])
955 960
956 961 def remove(self, list, unlink=False, wlock=None):
957 962 if unlink:
958 963 for f in list:
959 964 try:
960 965 util.unlink(self.wjoin(f))
961 966 except OSError, inst:
962 967 if inst.errno != errno.ENOENT:
963 968 raise
964 969 if not wlock:
965 970 wlock = self.wlock()
966 971 for f in list:
967 972 p = self.wjoin(f)
968 973 if os.path.exists(p):
969 974 self.ui.warn(_("%s still exists!\n") % f)
970 975 elif self.dirstate.state(f) == 'a':
971 976 self.dirstate.forget([f])
972 977 elif f not in self.dirstate:
973 978 self.ui.warn(_("%s not tracked!\n") % f)
974 979 else:
975 980 self.dirstate.update([f], "r")
976 981
977 982 def undelete(self, list, wlock=None):
978 983 p = self.dirstate.parents()[0]
979 984 mn = self.changelog.read(p)[0]
980 985 m = self.manifest.read(mn)
981 986 if not wlock:
982 987 wlock = self.wlock()
983 988 for f in list:
984 989 if self.dirstate.state(f) not in "r":
985 990 self.ui.warn("%s not removed!\n" % f)
986 991 else:
987 992 t = self.file(f).read(m[f])
988 993 self.wwrite(f, t, m.flags(f))
989 994 self.dirstate.update([f], "n")
990 995
991 996 def copy(self, source, dest, wlock=None):
992 997 p = self.wjoin(dest)
993 998 if not os.path.exists(p):
994 999 self.ui.warn(_("%s does not exist!\n") % dest)
995 1000 elif not os.path.isfile(p):
996 1001 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
997 1002 else:
998 1003 if not wlock:
999 1004 wlock = self.wlock()
1000 1005 if self.dirstate.state(dest) == '?':
1001 1006 self.dirstate.update([dest], "a")
1002 1007 self.dirstate.copy(source, dest)
1003 1008
1004 1009 def heads(self, start=None):
1005 1010 heads = self.changelog.heads(start)
1006 1011 # sort the output in rev descending order
1007 1012 heads = [(-self.changelog.rev(h), h) for h in heads]
1008 1013 heads.sort()
1009 1014 return [n for (r, n) in heads]
1010 1015
1011 1016 def branches(self, nodes):
1012 1017 if not nodes:
1013 1018 nodes = [self.changelog.tip()]
1014 1019 b = []
1015 1020 for n in nodes:
1016 1021 t = n
1017 1022 while 1:
1018 1023 p = self.changelog.parents(n)
1019 1024 if p[1] != nullid or p[0] == nullid:
1020 1025 b.append((t, n, p[0], p[1]))
1021 1026 break
1022 1027 n = p[0]
1023 1028 return b
1024 1029
1025 1030 def between(self, pairs):
1026 1031 r = []
1027 1032
1028 1033 for top, bottom in pairs:
1029 1034 n, l, i = top, [], 0
1030 1035 f = 1
1031 1036
1032 1037 while n != bottom:
1033 1038 p = self.changelog.parents(n)[0]
1034 1039 if i == f:
1035 1040 l.append(n)
1036 1041 f = f * 2
1037 1042 n = p
1038 1043 i += 1
1039 1044
1040 1045 r.append(l)
1041 1046
1042 1047 return r
1043 1048
1044 1049 def findincoming(self, remote, base=None, heads=None, force=False):
1045 1050 """Return list of roots of the subsets of missing nodes from remote
1046 1051
1047 1052 If base dict is specified, assume that these nodes and their parents
1048 1053 exist on the remote side and that no child of a node of base exists
1049 1054 in both remote and self.
1050 1055 Furthermore base will be updated to include the nodes that exists
1051 1056 in self and remote but no children exists in self and remote.
1052 1057 If a list of heads is specified, return only nodes which are heads
1053 1058 or ancestors of these heads.
1054 1059
1055 1060 All the ancestors of base are in self and in remote.
1056 1061 All the descendants of the list returned are missing in self.
1057 1062 (and so we know that the rest of the nodes are missing in remote, see
1058 1063 outgoing)
1059 1064 """
1060 1065 m = self.changelog.nodemap
1061 1066 search = []
1062 1067 fetch = {}
1063 1068 seen = {}
1064 1069 seenbranch = {}
1065 1070 if base == None:
1066 1071 base = {}
1067 1072
1068 1073 if not heads:
1069 1074 heads = remote.heads()
1070 1075
1071 1076 if self.changelog.tip() == nullid:
1072 1077 base[nullid] = 1
1073 1078 if heads != [nullid]:
1074 1079 return [nullid]
1075 1080 return []
1076 1081
1077 1082 # assume we're closer to the tip than the root
1078 1083 # and start by examining the heads
1079 1084 self.ui.status(_("searching for changes\n"))
1080 1085
1081 1086 unknown = []
1082 1087 for h in heads:
1083 1088 if h not in m:
1084 1089 unknown.append(h)
1085 1090 else:
1086 1091 base[h] = 1
1087 1092
1088 1093 if not unknown:
1089 1094 return []
1090 1095
1091 1096 req = dict.fromkeys(unknown)
1092 1097 reqcnt = 0
1093 1098
1094 1099 # search through remote branches
1095 1100 # a 'branch' here is a linear segment of history, with four parts:
1096 1101 # head, root, first parent, second parent
1097 1102 # (a branch always has two parents (or none) by definition)
1098 1103 unknown = remote.branches(unknown)
1099 1104 while unknown:
1100 1105 r = []
1101 1106 while unknown:
1102 1107 n = unknown.pop(0)
1103 1108 if n[0] in seen:
1104 1109 continue
1105 1110
1106 1111 self.ui.debug(_("examining %s:%s\n")
1107 1112 % (short(n[0]), short(n[1])))
1108 1113 if n[0] == nullid: # found the end of the branch
1109 1114 pass
1110 1115 elif n in seenbranch:
1111 1116 self.ui.debug(_("branch already found\n"))
1112 1117 continue
1113 1118 elif n[1] and n[1] in m: # do we know the base?
1114 1119 self.ui.debug(_("found incomplete branch %s:%s\n")
1115 1120 % (short(n[0]), short(n[1])))
1116 1121 search.append(n) # schedule branch range for scanning
1117 1122 seenbranch[n] = 1
1118 1123 else:
1119 1124 if n[1] not in seen and n[1] not in fetch:
1120 1125 if n[2] in m and n[3] in m:
1121 1126 self.ui.debug(_("found new changeset %s\n") %
1122 1127 short(n[1]))
1123 1128 fetch[n[1]] = 1 # earliest unknown
1124 1129 for p in n[2:4]:
1125 1130 if p in m:
1126 1131 base[p] = 1 # latest known
1127 1132
1128 1133 for p in n[2:4]:
1129 1134 if p not in req and p not in m:
1130 1135 r.append(p)
1131 1136 req[p] = 1
1132 1137 seen[n[0]] = 1
1133 1138
1134 1139 if r:
1135 1140 reqcnt += 1
1136 1141 self.ui.debug(_("request %d: %s\n") %
1137 1142 (reqcnt, " ".join(map(short, r))))
1138 1143 for p in xrange(0, len(r), 10):
1139 1144 for b in remote.branches(r[p:p+10]):
1140 1145 self.ui.debug(_("received %s:%s\n") %
1141 1146 (short(b[0]), short(b[1])))
1142 1147 unknown.append(b)
1143 1148
1144 1149 # do binary search on the branches we found
1145 1150 while search:
1146 1151 n = search.pop(0)
1147 1152 reqcnt += 1
1148 1153 l = remote.between([(n[0], n[1])])[0]
1149 1154 l.append(n[1])
1150 1155 p = n[0]
1151 1156 f = 1
1152 1157 for i in l:
1153 1158 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1154 1159 if i in m:
1155 1160 if f <= 2:
1156 1161 self.ui.debug(_("found new branch changeset %s\n") %
1157 1162 short(p))
1158 1163 fetch[p] = 1
1159 1164 base[i] = 1
1160 1165 else:
1161 1166 self.ui.debug(_("narrowed branch search to %s:%s\n")
1162 1167 % (short(p), short(i)))
1163 1168 search.append((p, i))
1164 1169 break
1165 1170 p, f = i, f * 2
1166 1171
1167 1172 # sanity check our fetch list
1168 1173 for f in fetch.keys():
1169 1174 if f in m:
1170 1175 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1171 1176
1172 1177 if base.keys() == [nullid]:
1173 1178 if force:
1174 1179 self.ui.warn(_("warning: repository is unrelated\n"))
1175 1180 else:
1176 1181 raise util.Abort(_("repository is unrelated"))
1177 1182
1178 1183 self.ui.debug(_("found new changesets starting at ") +
1179 1184 " ".join([short(f) for f in fetch]) + "\n")
1180 1185
1181 1186 self.ui.debug(_("%d total queries\n") % reqcnt)
1182 1187
1183 1188 return fetch.keys()
1184 1189
1185 1190 def findoutgoing(self, remote, base=None, heads=None, force=False):
1186 1191 """Return list of nodes that are roots of subsets not in remote
1187 1192
1188 1193 If base dict is specified, assume that these nodes and their parents
1189 1194 exist on the remote side.
1190 1195 If a list of heads is specified, return only nodes which are heads
1191 1196 or ancestors of these heads, and return a second element which
1192 1197 contains all remote heads which get new children.
1193 1198 """
1194 1199 if base == None:
1195 1200 base = {}
1196 1201 self.findincoming(remote, base, heads, force=force)
1197 1202
1198 1203 self.ui.debug(_("common changesets up to ")
1199 1204 + " ".join(map(short, base.keys())) + "\n")
1200 1205
1201 1206 remain = dict.fromkeys(self.changelog.nodemap)
1202 1207
1203 1208 # prune everything remote has from the tree
1204 1209 del remain[nullid]
1205 1210 remove = base.keys()
1206 1211 while remove:
1207 1212 n = remove.pop(0)
1208 1213 if n in remain:
1209 1214 del remain[n]
1210 1215 for p in self.changelog.parents(n):
1211 1216 remove.append(p)
1212 1217
1213 1218 # find every node whose parents have been pruned
1214 1219 subset = []
1215 1220 # find every remote head that will get new children
1216 1221 updated_heads = {}
1217 1222 for n in remain:
1218 1223 p1, p2 = self.changelog.parents(n)
1219 1224 if p1 not in remain and p2 not in remain:
1220 1225 subset.append(n)
1221 1226 if heads:
1222 1227 if p1 in heads:
1223 1228 updated_heads[p1] = True
1224 1229 if p2 in heads:
1225 1230 updated_heads[p2] = True
1226 1231
1227 1232 # this is the set of all roots we have to push
1228 1233 if heads:
1229 1234 return subset, updated_heads.keys()
1230 1235 else:
1231 1236 return subset
1232 1237
1233 1238 def pull(self, remote, heads=None, force=False, lock=None):
1234 1239 mylock = False
1235 1240 if not lock:
1236 1241 lock = self.lock()
1237 1242 mylock = True
1238 1243
1239 1244 try:
1240 1245 fetch = self.findincoming(remote, force=force)
1241 1246 if fetch == [nullid]:
1242 1247 self.ui.status(_("requesting all changes\n"))
1243 1248
1244 1249 if not fetch:
1245 1250 self.ui.status(_("no changes found\n"))
1246 1251 return 0
1247 1252
1248 1253 if heads is None:
1249 1254 cg = remote.changegroup(fetch, 'pull')
1250 1255 else:
1251 1256 if 'changegroupsubset' not in remote.capabilities:
1252 1257 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1253 1258 cg = remote.changegroupsubset(fetch, heads, 'pull')
1254 1259 return self.addchangegroup(cg, 'pull', remote.url())
1255 1260 finally:
1256 1261 if mylock:
1257 1262 lock.release()
1258 1263
1259 1264 def push(self, remote, force=False, revs=None):
1260 1265 # there are two ways to push to remote repo:
1261 1266 #
1262 1267 # addchangegroup assumes local user can lock remote
1263 1268 # repo (local filesystem, old ssh servers).
1264 1269 #
1265 1270 # unbundle assumes local user cannot lock remote repo (new ssh
1266 1271 # servers, http servers).
1267 1272
1268 1273 if remote.capable('unbundle'):
1269 1274 return self.push_unbundle(remote, force, revs)
1270 1275 return self.push_addchangegroup(remote, force, revs)
1271 1276
1272 1277 def prepush(self, remote, force, revs):
1273 1278 base = {}
1274 1279 remote_heads = remote.heads()
1275 1280 inc = self.findincoming(remote, base, remote_heads, force=force)
1276 1281
1277 1282 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1278 1283 if revs is not None:
1279 1284 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1280 1285 else:
1281 1286 bases, heads = update, self.changelog.heads()
1282 1287
1283 1288 if not bases:
1284 1289 self.ui.status(_("no changes found\n"))
1285 1290 return None, 1
1286 1291 elif not force:
1287 1292 # check if we're creating new remote heads
1288 1293 # to be a remote head after push, node must be either
1289 1294 # - unknown locally
1290 1295 # - a local outgoing head descended from update
1291 1296 # - a remote head that's known locally and not
1292 1297 # ancestral to an outgoing head
1293 1298
1294 1299 warn = 0
1295 1300
1296 1301 if remote_heads == [nullid]:
1297 1302 warn = 0
1298 1303 elif not revs and len(heads) > len(remote_heads):
1299 1304 warn = 1
1300 1305 else:
1301 1306 newheads = list(heads)
1302 1307 for r in remote_heads:
1303 1308 if r in self.changelog.nodemap:
1304 1309 desc = self.changelog.heads(r, heads)
1305 1310 l = [h for h in heads if h in desc]
1306 1311 if not l:
1307 1312 newheads.append(r)
1308 1313 else:
1309 1314 newheads.append(r)
1310 1315 if len(newheads) > len(remote_heads):
1311 1316 warn = 1
1312 1317
1313 1318 if warn:
1314 1319 self.ui.warn(_("abort: push creates new remote branches!\n"))
1315 1320 self.ui.status(_("(did you forget to merge?"
1316 1321 " use push -f to force)\n"))
1317 1322 return None, 1
1318 1323 elif inc:
1319 1324 self.ui.warn(_("note: unsynced remote changes!\n"))
1320 1325
1321 1326
1322 1327 if revs is None:
1323 1328 cg = self.changegroup(update, 'push')
1324 1329 else:
1325 1330 cg = self.changegroupsubset(update, revs, 'push')
1326 1331 return cg, remote_heads
1327 1332
1328 1333 def push_addchangegroup(self, remote, force, revs):
1329 1334 lock = remote.lock()
1330 1335
1331 1336 ret = self.prepush(remote, force, revs)
1332 1337 if ret[0] is not None:
1333 1338 cg, remote_heads = ret
1334 1339 return remote.addchangegroup(cg, 'push', self.url())
1335 1340 return ret[1]
1336 1341
1337 1342 def push_unbundle(self, remote, force, revs):
1338 1343 # local repo finds heads on server, finds out what revs it
1339 1344 # must push. once revs transferred, if server finds it has
1340 1345 # different heads (someone else won commit/push race), server
1341 1346 # aborts.
1342 1347
1343 1348 ret = self.prepush(remote, force, revs)
1344 1349 if ret[0] is not None:
1345 1350 cg, remote_heads = ret
1346 1351 if force: remote_heads = ['force']
1347 1352 return remote.unbundle(cg, remote_heads, 'push')
1348 1353 return ret[1]
1349 1354
1350 1355 def changegroupinfo(self, nodes):
1351 1356 self.ui.note(_("%d changesets found\n") % len(nodes))
1352 1357 if self.ui.debugflag:
1353 1358 self.ui.debug(_("List of changesets:\n"))
1354 1359 for node in nodes:
1355 1360 self.ui.debug("%s\n" % hex(node))
1356 1361
1357 1362 def changegroupsubset(self, bases, heads, source):
1358 1363 """This function generates a changegroup consisting of all the nodes
1359 1364 that are descendents of any of the bases, and ancestors of any of
1360 1365 the heads.
1361 1366
1362 1367 It is fairly complex as determining which filenodes and which
1363 1368 manifest nodes need to be included for the changeset to be complete
1364 1369 is non-trivial.
1365 1370
1366 1371 Another wrinkle is doing the reverse, figuring out which changeset in
1367 1372 the changegroup a particular filenode or manifestnode belongs to."""
1368 1373
1369 1374 self.hook('preoutgoing', throw=True, source=source)
1370 1375
1371 1376 # Set up some initial variables
1372 1377 # Make it easy to refer to self.changelog
1373 1378 cl = self.changelog
1374 1379 # msng is short for missing - compute the list of changesets in this
1375 1380 # changegroup.
1376 1381 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1377 1382 self.changegroupinfo(msng_cl_lst)
1378 1383 # Some bases may turn out to be superfluous, and some heads may be
1379 1384 # too. nodesbetween will return the minimal set of bases and heads
1380 1385 # necessary to re-create the changegroup.
1381 1386
1382 1387 # Known heads are the list of heads that it is assumed the recipient
1383 1388 # of this changegroup will know about.
1384 1389 knownheads = {}
1385 1390 # We assume that all parents of bases are known heads.
1386 1391 for n in bases:
1387 1392 for p in cl.parents(n):
1388 1393 if p != nullid:
1389 1394 knownheads[p] = 1
1390 1395 knownheads = knownheads.keys()
1391 1396 if knownheads:
1392 1397 # Now that we know what heads are known, we can compute which
1393 1398 # changesets are known. The recipient must know about all
1394 1399 # changesets required to reach the known heads from the null
1395 1400 # changeset.
1396 1401 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1397 1402 junk = None
1398 1403 # Transform the list into an ersatz set.
1399 1404 has_cl_set = dict.fromkeys(has_cl_set)
1400 1405 else:
1401 1406 # If there were no known heads, the recipient cannot be assumed to
1402 1407 # know about any changesets.
1403 1408 has_cl_set = {}
1404 1409
1405 1410 # Make it easy to refer to self.manifest
1406 1411 mnfst = self.manifest
1407 1412 # We don't know which manifests are missing yet
1408 1413 msng_mnfst_set = {}
1409 1414 # Nor do we know which filenodes are missing.
1410 1415 msng_filenode_set = {}
1411 1416
1412 1417 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1413 1418 junk = None
1414 1419
1415 1420 # A changeset always belongs to itself, so the changenode lookup
1416 1421 # function for a changenode is identity.
1417 1422 def identity(x):
1418 1423 return x
1419 1424
1420 1425 # A function generating function. Sets up an environment for the
1421 1426 # inner function.
1422 1427 def cmp_by_rev_func(revlog):
1423 1428 # Compare two nodes by their revision number in the environment's
1424 1429 # revision history. Since the revision number both represents the
1425 1430 # most efficient order to read the nodes in, and represents a
1426 1431 # topological sorting of the nodes, this function is often useful.
1427 1432 def cmp_by_rev(a, b):
1428 1433 return cmp(revlog.rev(a), revlog.rev(b))
1429 1434 return cmp_by_rev
1430 1435
1431 1436 # If we determine that a particular file or manifest node must be a
1432 1437 # node that the recipient of the changegroup will already have, we can
1433 1438 # also assume the recipient will have all the parents. This function
1434 1439 # prunes them from the set of missing nodes.
1435 1440 def prune_parents(revlog, hasset, msngset):
1436 1441 haslst = hasset.keys()
1437 1442 haslst.sort(cmp_by_rev_func(revlog))
1438 1443 for node in haslst:
1439 1444 parentlst = [p for p in revlog.parents(node) if p != nullid]
1440 1445 while parentlst:
1441 1446 n = parentlst.pop()
1442 1447 if n not in hasset:
1443 1448 hasset[n] = 1
1444 1449 p = [p for p in revlog.parents(n) if p != nullid]
1445 1450 parentlst.extend(p)
1446 1451 for n in hasset:
1447 1452 msngset.pop(n, None)
1448 1453
1449 1454 # This is a function generating function used to set up an environment
1450 1455 # for the inner function to execute in.
1451 1456 def manifest_and_file_collector(changedfileset):
1452 1457 # This is an information gathering function that gathers
1453 1458 # information from each changeset node that goes out as part of
1454 1459 # the changegroup. The information gathered is a list of which
1455 1460 # manifest nodes are potentially required (the recipient may
1456 1461 # already have them) and total list of all files which were
1457 1462 # changed in any changeset in the changegroup.
1458 1463 #
1459 1464 # We also remember the first changenode we saw any manifest
1460 1465 # referenced by so we can later determine which changenode 'owns'
1461 1466 # the manifest.
1462 1467 def collect_manifests_and_files(clnode):
1463 1468 c = cl.read(clnode)
1464 1469 for f in c[3]:
1465 1470 # This is to make sure we only have one instance of each
1466 1471 # filename string for each filename.
1467 1472 changedfileset.setdefault(f, f)
1468 1473 msng_mnfst_set.setdefault(c[0], clnode)
1469 1474 return collect_manifests_and_files
1470 1475
1471 1476 # Figure out which manifest nodes (of the ones we think might be part
1472 1477 # of the changegroup) the recipient must know about and remove them
1473 1478 # from the changegroup.
1474 1479 def prune_manifests():
1475 1480 has_mnfst_set = {}
1476 1481 for n in msng_mnfst_set:
1477 1482 # If a 'missing' manifest thinks it belongs to a changenode
1478 1483 # the recipient is assumed to have, obviously the recipient
1479 1484 # must have that manifest.
1480 1485 linknode = cl.node(mnfst.linkrev(n))
1481 1486 if linknode in has_cl_set:
1482 1487 has_mnfst_set[n] = 1
1483 1488 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1484 1489
1485 1490 # Use the information collected in collect_manifests_and_files to say
1486 1491 # which changenode any manifestnode belongs to.
1487 1492 def lookup_manifest_link(mnfstnode):
1488 1493 return msng_mnfst_set[mnfstnode]
1489 1494
1490 1495 # A function generating function that sets up the initial environment
1491 1496 # the inner function.
1492 1497 def filenode_collector(changedfiles):
1493 1498 next_rev = [0]
1494 1499 # This gathers information from each manifestnode included in the
1495 1500 # changegroup about which filenodes the manifest node references
1496 1501 # so we can include those in the changegroup too.
1497 1502 #
1498 1503 # It also remembers which changenode each filenode belongs to. It
1499 1504 # does this by assuming the a filenode belongs to the changenode
1500 1505 # the first manifest that references it belongs to.
1501 1506 def collect_msng_filenodes(mnfstnode):
1502 1507 r = mnfst.rev(mnfstnode)
1503 1508 if r == next_rev[0]:
1504 1509 # If the last rev we looked at was the one just previous,
1505 1510 # we only need to see a diff.
1506 1511 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1507 1512 # For each line in the delta
1508 1513 for dline in delta.splitlines():
1509 1514 # get the filename and filenode for that line
1510 1515 f, fnode = dline.split('\0')
1511 1516 fnode = bin(fnode[:40])
1512 1517 f = changedfiles.get(f, None)
1513 1518 # And if the file is in the list of files we care
1514 1519 # about.
1515 1520 if f is not None:
1516 1521 # Get the changenode this manifest belongs to
1517 1522 clnode = msng_mnfst_set[mnfstnode]
1518 1523 # Create the set of filenodes for the file if
1519 1524 # there isn't one already.
1520 1525 ndset = msng_filenode_set.setdefault(f, {})
1521 1526 # And set the filenode's changelog node to the
1522 1527 # manifest's if it hasn't been set already.
1523 1528 ndset.setdefault(fnode, clnode)
1524 1529 else:
1525 1530 # Otherwise we need a full manifest.
1526 1531 m = mnfst.read(mnfstnode)
1527 1532 # For every file in we care about.
1528 1533 for f in changedfiles:
1529 1534 fnode = m.get(f, None)
1530 1535 # If it's in the manifest
1531 1536 if fnode is not None:
1532 1537 # See comments above.
1533 1538 clnode = msng_mnfst_set[mnfstnode]
1534 1539 ndset = msng_filenode_set.setdefault(f, {})
1535 1540 ndset.setdefault(fnode, clnode)
1536 1541 # Remember the revision we hope to see next.
1537 1542 next_rev[0] = r + 1
1538 1543 return collect_msng_filenodes
1539 1544
1540 1545 # We have a list of filenodes we think we need for a file, lets remove
1541 1546 # all those we now the recipient must have.
1542 1547 def prune_filenodes(f, filerevlog):
1543 1548 msngset = msng_filenode_set[f]
1544 1549 hasset = {}
1545 1550 # If a 'missing' filenode thinks it belongs to a changenode we
1546 1551 # assume the recipient must have, then the recipient must have
1547 1552 # that filenode.
1548 1553 for n in msngset:
1549 1554 clnode = cl.node(filerevlog.linkrev(n))
1550 1555 if clnode in has_cl_set:
1551 1556 hasset[n] = 1
1552 1557 prune_parents(filerevlog, hasset, msngset)
1553 1558
1554 1559 # A function generator function that sets up the a context for the
1555 1560 # inner function.
1556 1561 def lookup_filenode_link_func(fname):
1557 1562 msngset = msng_filenode_set[fname]
1558 1563 # Lookup the changenode the filenode belongs to.
1559 1564 def lookup_filenode_link(fnode):
1560 1565 return msngset[fnode]
1561 1566 return lookup_filenode_link
1562 1567
1563 1568 # Now that we have all theses utility functions to help out and
1564 1569 # logically divide up the task, generate the group.
1565 1570 def gengroup():
1566 1571 # The set of changed files starts empty.
1567 1572 changedfiles = {}
1568 1573 # Create a changenode group generator that will call our functions
1569 1574 # back to lookup the owning changenode and collect information.
1570 1575 group = cl.group(msng_cl_lst, identity,
1571 1576 manifest_and_file_collector(changedfiles))
1572 1577 for chnk in group:
1573 1578 yield chnk
1574 1579
1575 1580 # The list of manifests has been collected by the generator
1576 1581 # calling our functions back.
1577 1582 prune_manifests()
1578 1583 msng_mnfst_lst = msng_mnfst_set.keys()
1579 1584 # Sort the manifestnodes by revision number.
1580 1585 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1581 1586 # Create a generator for the manifestnodes that calls our lookup
1582 1587 # and data collection functions back.
1583 1588 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1584 1589 filenode_collector(changedfiles))
1585 1590 for chnk in group:
1586 1591 yield chnk
1587 1592
1588 1593 # These are no longer needed, dereference and toss the memory for
1589 1594 # them.
1590 1595 msng_mnfst_lst = None
1591 1596 msng_mnfst_set.clear()
1592 1597
1593 1598 changedfiles = changedfiles.keys()
1594 1599 changedfiles.sort()
1595 1600 # Go through all our files in order sorted by name.
1596 1601 for fname in changedfiles:
1597 1602 filerevlog = self.file(fname)
1598 1603 # Toss out the filenodes that the recipient isn't really
1599 1604 # missing.
1600 1605 if msng_filenode_set.has_key(fname):
1601 1606 prune_filenodes(fname, filerevlog)
1602 1607 msng_filenode_lst = msng_filenode_set[fname].keys()
1603 1608 else:
1604 1609 msng_filenode_lst = []
1605 1610 # If any filenodes are left, generate the group for them,
1606 1611 # otherwise don't bother.
1607 1612 if len(msng_filenode_lst) > 0:
1608 1613 yield changegroup.genchunk(fname)
1609 1614 # Sort the filenodes by their revision #
1610 1615 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1611 1616 # Create a group generator and only pass in a changenode
1612 1617 # lookup function as we need to collect no information
1613 1618 # from filenodes.
1614 1619 group = filerevlog.group(msng_filenode_lst,
1615 1620 lookup_filenode_link_func(fname))
1616 1621 for chnk in group:
1617 1622 yield chnk
1618 1623 if msng_filenode_set.has_key(fname):
1619 1624 # Don't need this anymore, toss it to free memory.
1620 1625 del msng_filenode_set[fname]
1621 1626 # Signal that no more groups are left.
1622 1627 yield changegroup.closechunk()
1623 1628
1624 1629 if msng_cl_lst:
1625 1630 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1626 1631
1627 1632 return util.chunkbuffer(gengroup())
1628 1633
1629 1634 def changegroup(self, basenodes, source):
1630 1635 """Generate a changegroup of all nodes that we have that a recipient
1631 1636 doesn't.
1632 1637
1633 1638 This is much easier than the previous function as we can assume that
1634 1639 the recipient has any changenode we aren't sending them."""
1635 1640
1636 1641 self.hook('preoutgoing', throw=True, source=source)
1637 1642
1638 1643 cl = self.changelog
1639 1644 nodes = cl.nodesbetween(basenodes, None)[0]
1640 1645 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1641 1646 self.changegroupinfo(nodes)
1642 1647
1643 1648 def identity(x):
1644 1649 return x
1645 1650
1646 1651 def gennodelst(revlog):
1647 1652 for r in xrange(0, revlog.count()):
1648 1653 n = revlog.node(r)
1649 1654 if revlog.linkrev(n) in revset:
1650 1655 yield n
1651 1656
1652 1657 def changed_file_collector(changedfileset):
1653 1658 def collect_changed_files(clnode):
1654 1659 c = cl.read(clnode)
1655 1660 for fname in c[3]:
1656 1661 changedfileset[fname] = 1
1657 1662 return collect_changed_files
1658 1663
1659 1664 def lookuprevlink_func(revlog):
1660 1665 def lookuprevlink(n):
1661 1666 return cl.node(revlog.linkrev(n))
1662 1667 return lookuprevlink
1663 1668
1664 1669 def gengroup():
1665 1670 # construct a list of all changed files
1666 1671 changedfiles = {}
1667 1672
1668 1673 for chnk in cl.group(nodes, identity,
1669 1674 changed_file_collector(changedfiles)):
1670 1675 yield chnk
1671 1676 changedfiles = changedfiles.keys()
1672 1677 changedfiles.sort()
1673 1678
1674 1679 mnfst = self.manifest
1675 1680 nodeiter = gennodelst(mnfst)
1676 1681 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1677 1682 yield chnk
1678 1683
1679 1684 for fname in changedfiles:
1680 1685 filerevlog = self.file(fname)
1681 1686 nodeiter = gennodelst(filerevlog)
1682 1687 nodeiter = list(nodeiter)
1683 1688 if nodeiter:
1684 1689 yield changegroup.genchunk(fname)
1685 1690 lookup = lookuprevlink_func(filerevlog)
1686 1691 for chnk in filerevlog.group(nodeiter, lookup):
1687 1692 yield chnk
1688 1693
1689 1694 yield changegroup.closechunk()
1690 1695
1691 1696 if nodes:
1692 1697 self.hook('outgoing', node=hex(nodes[0]), source=source)
1693 1698
1694 1699 return util.chunkbuffer(gengroup())
1695 1700
1696 1701 def addchangegroup(self, source, srctype, url):
1697 1702 """add changegroup to repo.
1698 1703
1699 1704 return values:
1700 1705 - nothing changed or no source: 0
1701 1706 - more heads than before: 1+added heads (2..n)
1702 1707 - less heads than before: -1-removed heads (-2..-n)
1703 1708 - number of heads stays the same: 1
1704 1709 """
1705 1710 def csmap(x):
1706 1711 self.ui.debug(_("add changeset %s\n") % short(x))
1707 1712 return cl.count()
1708 1713
1709 1714 def revmap(x):
1710 1715 return cl.rev(x)
1711 1716
1712 1717 if not source:
1713 1718 return 0
1714 1719
1715 1720 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1716 1721
1717 1722 changesets = files = revisions = 0
1718 1723
1719 1724 tr = self.transaction()
1720 1725
1721 1726 # write changelog data to temp files so concurrent readers will not see
1722 1727 # inconsistent view
1723 1728 cl = None
1724 1729 try:
1725 1730 cl = appendfile.appendchangelog(self.sopener,
1726 1731 self.changelog.version)
1727 1732
1728 1733 oldheads = len(cl.heads())
1729 1734
1730 1735 # pull off the changeset group
1731 1736 self.ui.status(_("adding changesets\n"))
1732 1737 cor = cl.count() - 1
1733 1738 chunkiter = changegroup.chunkiter(source)
1734 1739 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1735 1740 raise util.Abort(_("received changelog group is empty"))
1736 1741 cnr = cl.count() - 1
1737 1742 changesets = cnr - cor
1738 1743
1739 1744 # pull off the manifest group
1740 1745 self.ui.status(_("adding manifests\n"))
1741 1746 chunkiter = changegroup.chunkiter(source)
1742 1747 # no need to check for empty manifest group here:
1743 1748 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1744 1749 # no new manifest will be created and the manifest group will
1745 1750 # be empty during the pull
1746 1751 self.manifest.addgroup(chunkiter, revmap, tr)
1747 1752
1748 1753 # process the files
1749 1754 self.ui.status(_("adding file changes\n"))
1750 1755 while 1:
1751 1756 f = changegroup.getchunk(source)
1752 1757 if not f:
1753 1758 break
1754 1759 self.ui.debug(_("adding %s revisions\n") % f)
1755 1760 fl = self.file(f)
1756 1761 o = fl.count()
1757 1762 chunkiter = changegroup.chunkiter(source)
1758 1763 if fl.addgroup(chunkiter, revmap, tr) is None:
1759 1764 raise util.Abort(_("received file revlog group is empty"))
1760 1765 revisions += fl.count() - o
1761 1766 files += 1
1762 1767
1763 1768 cl.writedata()
1764 1769 finally:
1765 1770 if cl:
1766 1771 cl.cleanup()
1767 1772
1768 1773 # make changelog see real files again
1769 1774 self.changelog = changelog.changelog(self.sopener,
1770 1775 self.changelog.version)
1771 1776 self.changelog.checkinlinesize(tr)
1772 1777
1773 1778 newheads = len(self.changelog.heads())
1774 1779 heads = ""
1775 1780 if oldheads and newheads != oldheads:
1776 1781 heads = _(" (%+d heads)") % (newheads - oldheads)
1777 1782
1778 1783 self.ui.status(_("added %d changesets"
1779 1784 " with %d changes to %d files%s\n")
1780 1785 % (changesets, revisions, files, heads))
1781 1786
1782 1787 if changesets > 0:
1783 1788 self.hook('pretxnchangegroup', throw=True,
1784 1789 node=hex(self.changelog.node(cor+1)), source=srctype,
1785 1790 url=url)
1786 1791
1787 1792 tr.close()
1788 1793
1789 1794 if changesets > 0:
1790 1795 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1791 1796 source=srctype, url=url)
1792 1797
1793 1798 for i in xrange(cor + 1, cnr + 1):
1794 1799 self.hook("incoming", node=hex(self.changelog.node(i)),
1795 1800 source=srctype, url=url)
1796 1801
1797 1802 # never return 0 here:
1798 1803 if newheads < oldheads:
1799 1804 return newheads - oldheads - 1
1800 1805 else:
1801 1806 return newheads - oldheads + 1
1802 1807
1803 1808
1804 1809 def stream_in(self, remote):
1805 1810 fp = remote.stream_out()
1806 1811 l = fp.readline()
1807 1812 try:
1808 1813 resp = int(l)
1809 1814 except ValueError:
1810 1815 raise util.UnexpectedOutput(
1811 1816 _('Unexpected response from remote server:'), l)
1812 1817 if resp == 1:
1813 1818 raise util.Abort(_('operation forbidden by server'))
1814 1819 elif resp == 2:
1815 1820 raise util.Abort(_('locking the remote repository failed'))
1816 1821 elif resp != 0:
1817 1822 raise util.Abort(_('the server sent an unknown error code'))
1818 1823 self.ui.status(_('streaming all changes\n'))
1819 1824 l = fp.readline()
1820 1825 try:
1821 1826 total_files, total_bytes = map(int, l.split(' ', 1))
1822 1827 except ValueError, TypeError:
1823 1828 raise util.UnexpectedOutput(
1824 1829 _('Unexpected response from remote server:'), l)
1825 1830 self.ui.status(_('%d files to transfer, %s of data\n') %
1826 1831 (total_files, util.bytecount(total_bytes)))
1827 1832 start = time.time()
1828 1833 for i in xrange(total_files):
1829 1834 # XXX doesn't support '\n' or '\r' in filenames
1830 1835 l = fp.readline()
1831 1836 try:
1832 1837 name, size = l.split('\0', 1)
1833 1838 size = int(size)
1834 1839 except ValueError, TypeError:
1835 1840 raise util.UnexpectedOutput(
1836 1841 _('Unexpected response from remote server:'), l)
1837 1842 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1838 1843 ofp = self.sopener(name, 'w')
1839 1844 for chunk in util.filechunkiter(fp, limit=size):
1840 1845 ofp.write(chunk)
1841 1846 ofp.close()
1842 1847 elapsed = time.time() - start
1843 1848 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1844 1849 (util.bytecount(total_bytes), elapsed,
1845 1850 util.bytecount(total_bytes / elapsed)))
1846 1851 self.reload()
1847 1852 return len(self.heads()) + 1
1848 1853
1849 1854 def clone(self, remote, heads=[], stream=False):
1850 1855 '''clone remote repository.
1851 1856
1852 1857 keyword arguments:
1853 1858 heads: list of revs to clone (forces use of pull)
1854 1859 stream: use streaming clone if possible'''
1855 1860
1856 1861 # now, all clients that can request uncompressed clones can
1857 1862 # read repo formats supported by all servers that can serve
1858 1863 # them.
1859 1864
1860 1865 # if revlog format changes, client will have to check version
1861 1866 # and format flags on "stream" capability, and use
1862 1867 # uncompressed only if compatible.
1863 1868
1864 1869 if stream and not heads and remote.capable('stream'):
1865 1870 return self.stream_in(remote)
1866 1871 return self.pull(remote, heads)
1867 1872
1868 1873 # used to avoid circular references so destructors work
1869 1874 def aftertrans(files):
1870 1875 renamefiles = [tuple(t) for t in files]
1871 1876 def a():
1872 1877 for src, dest in renamefiles:
1873 1878 util.rename(src, dest)
1874 1879 return a
1875 1880
1876 1881 def instance(ui, path, create):
1877 1882 return localrepository(ui, util.drop_scheme('file', path), create)
1878 1883
1879 1884 def islocal(path):
1880 1885 return True
General Comments 0
You need to be logged in to leave comments. Login now