##// END OF EJS Templates
Use local encoding for "HG: branch" line in commit editor.
Thomas Arendsen Hein -
r4021:1590558e default
parent child Browse files
Show More
@@ -1,1884 +1,1884
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.filterpats = {}
121 121 self.transhandle = None
122 122
123 123 self._link = lambda x: False
124 124 if util.checklink(self.root):
125 125 r = self.root # avoid circular reference in lambda
126 126 self._link = lambda x: util.is_link(os.path.join(r, x))
127 127
128 128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129 129
130 130 def url(self):
131 131 return 'file:' + self.root
132 132
133 133 def hook(self, name, throw=False, **args):
134 134 def callhook(hname, funcname):
135 135 '''call python hook. hook is callable object, looked up as
136 136 name in python module. if callable returns "true", hook
137 137 fails, else passes. if hook raises exception, treated as
138 138 hook failure. exception propagates if throw is "true".
139 139
140 140 reason for "true" meaning "hook failed" is so that
141 141 unmodified commands (e.g. mercurial.commands.update) can
142 142 be run as hooks without wrappers to convert return values.'''
143 143
144 144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 145 d = funcname.rfind('.')
146 146 if d == -1:
147 147 raise util.Abort(_('%s hook is invalid ("%s" not in a module)')
148 148 % (hname, funcname))
149 149 modname = funcname[:d]
150 150 try:
151 151 obj = __import__(modname)
152 152 except ImportError:
153 153 try:
154 154 # extensions are loaded with hgext_ prefix
155 155 obj = __import__("hgext_%s" % modname)
156 156 except ImportError:
157 157 raise util.Abort(_('%s hook is invalid '
158 158 '(import of "%s" failed)') %
159 159 (hname, modname))
160 160 try:
161 161 for p in funcname.split('.')[1:]:
162 162 obj = getattr(obj, p)
163 163 except AttributeError, err:
164 164 raise util.Abort(_('%s hook is invalid '
165 165 '("%s" is not defined)') %
166 166 (hname, funcname))
167 167 if not callable(obj):
168 168 raise util.Abort(_('%s hook is invalid '
169 169 '("%s" is not callable)') %
170 170 (hname, funcname))
171 171 try:
172 172 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
173 173 except (KeyboardInterrupt, util.SignalInterrupt):
174 174 raise
175 175 except Exception, exc:
176 176 if isinstance(exc, util.Abort):
177 177 self.ui.warn(_('error: %s hook failed: %s\n') %
178 178 (hname, exc.args[0]))
179 179 else:
180 180 self.ui.warn(_('error: %s hook raised an exception: '
181 181 '%s\n') % (hname, exc))
182 182 if throw:
183 183 raise
184 184 self.ui.print_exc()
185 185 return True
186 186 if r:
187 187 if throw:
188 188 raise util.Abort(_('%s hook failed') % hname)
189 189 self.ui.warn(_('warning: %s hook failed\n') % hname)
190 190 return r
191 191
192 192 def runhook(name, cmd):
193 193 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
194 194 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
195 195 r = util.system(cmd, environ=env, cwd=self.root)
196 196 if r:
197 197 desc, r = util.explain_exit(r)
198 198 if throw:
199 199 raise util.Abort(_('%s hook %s') % (name, desc))
200 200 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
201 201 return r
202 202
203 203 r = False
204 204 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
205 205 if hname.split(".", 1)[0] == name and cmd]
206 206 hooks.sort()
207 207 for hname, cmd in hooks:
208 208 if cmd.startswith('python:'):
209 209 r = callhook(hname, cmd[7:].strip()) or r
210 210 else:
211 211 r = runhook(hname, cmd) or r
212 212 return r
213 213
214 214 tag_disallowed = ':\r\n'
215 215
216 216 def tag(self, name, node, message, local, user, date):
217 217 '''tag a revision with a symbolic name.
218 218
219 219 if local is True, the tag is stored in a per-repository file.
220 220 otherwise, it is stored in the .hgtags file, and a new
221 221 changeset is committed with the change.
222 222
223 223 keyword arguments:
224 224
225 225 local: whether to store tag in non-version-controlled file
226 226 (default False)
227 227
228 228 message: commit message to use if committing
229 229
230 230 user: name of user to use if committing
231 231
232 232 date: date tuple to use if committing'''
233 233
234 234 for c in self.tag_disallowed:
235 235 if c in name:
236 236 raise util.Abort(_('%r cannot be used in a tag name') % c)
237 237
238 238 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
239 239
240 240 if local:
241 241 # local tags are stored in the current charset
242 242 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
243 243 self.hook('tag', node=hex(node), tag=name, local=local)
244 244 return
245 245
246 246 for x in self.status()[:5]:
247 247 if '.hgtags' in x:
248 248 raise util.Abort(_('working copy of .hgtags is changed '
249 249 '(please commit .hgtags manually)'))
250 250
251 251 # committed tags are stored in UTF-8
252 252 line = '%s %s\n' % (hex(node), util.fromlocal(name))
253 253 self.wfile('.hgtags', 'ab').write(line)
254 254 if self.dirstate.state('.hgtags') == '?':
255 255 self.add(['.hgtags'])
256 256
257 257 self.commit(['.hgtags'], message, user, date)
258 258 self.hook('tag', node=hex(node), tag=name, local=local)
259 259
260 260 def tags(self):
261 261 '''return a mapping of tag to node'''
262 262 if not self.tagscache:
263 263 self.tagscache = {}
264 264
265 265 def parsetag(line, context):
266 266 if not line:
267 267 return
268 268 s = l.split(" ", 1)
269 269 if len(s) != 2:
270 270 self.ui.warn(_("%s: cannot parse entry\n") % context)
271 271 return
272 272 node, key = s
273 273 key = util.tolocal(key.strip()) # stored in UTF-8
274 274 try:
275 275 bin_n = bin(node)
276 276 except TypeError:
277 277 self.ui.warn(_("%s: node '%s' is not well formed\n") %
278 278 (context, node))
279 279 return
280 280 if bin_n not in self.changelog.nodemap:
281 281 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
282 282 (context, key))
283 283 return
284 284 self.tagscache[key] = bin_n
285 285
286 286 # read the tags file from each head, ending with the tip,
287 287 # and add each tag found to the map, with "newer" ones
288 288 # taking precedence
289 289 f = None
290 290 for rev, node, fnode in self._hgtagsnodes():
291 291 f = (f and f.filectx(fnode) or
292 292 self.filectx('.hgtags', fileid=fnode))
293 293 count = 0
294 294 for l in f.data().splitlines():
295 295 count += 1
296 296 parsetag(l, _("%s, line %d") % (str(f), count))
297 297
298 298 try:
299 299 f = self.opener("localtags")
300 300 count = 0
301 301 for l in f:
302 302 # localtags are stored in the local character set
303 303 # while the internal tag table is stored in UTF-8
304 304 l = util.fromlocal(l)
305 305 count += 1
306 306 parsetag(l, _("localtags, line %d") % count)
307 307 except IOError:
308 308 pass
309 309
310 310 self.tagscache['tip'] = self.changelog.tip()
311 311
312 312 return self.tagscache
313 313
314 314 def _hgtagsnodes(self):
315 315 heads = self.heads()
316 316 heads.reverse()
317 317 last = {}
318 318 ret = []
319 319 for node in heads:
320 320 c = self.changectx(node)
321 321 rev = c.rev()
322 322 try:
323 323 fnode = c.filenode('.hgtags')
324 324 except revlog.LookupError:
325 325 continue
326 326 ret.append((rev, node, fnode))
327 327 if fnode in last:
328 328 ret[last[fnode]] = None
329 329 last[fnode] = len(ret) - 1
330 330 return [item for item in ret if item]
331 331
332 332 def tagslist(self):
333 333 '''return a list of tags ordered by revision'''
334 334 l = []
335 335 for t, n in self.tags().items():
336 336 try:
337 337 r = self.changelog.rev(n)
338 338 except:
339 339 r = -2 # sort to the beginning of the list if unknown
340 340 l.append((r, t, n))
341 341 l.sort()
342 342 return [(t, n) for r, t, n in l]
343 343
344 344 def nodetags(self, node):
345 345 '''return the tags associated with a node'''
346 346 if not self.nodetagscache:
347 347 self.nodetagscache = {}
348 348 for t, n in self.tags().items():
349 349 self.nodetagscache.setdefault(n, []).append(t)
350 350 return self.nodetagscache.get(node, [])
351 351
352 352 def _branchtags(self):
353 353 partial, last, lrev = self._readbranchcache()
354 354
355 355 tiprev = self.changelog.count() - 1
356 356 if lrev != tiprev:
357 357 self._updatebranchcache(partial, lrev+1, tiprev+1)
358 358 self._writebranchcache(partial, self.changelog.tip(), tiprev)
359 359
360 360 return partial
361 361
362 362 def branchtags(self):
363 363 if self.branchcache is not None:
364 364 return self.branchcache
365 365
366 366 self.branchcache = {} # avoid recursion in changectx
367 367 partial = self._branchtags()
368 368
369 369 # the branch cache is stored on disk as UTF-8, but in the local
370 370 # charset internally
371 371 for k, v in partial.items():
372 372 self.branchcache[util.tolocal(k)] = v
373 373 return self.branchcache
374 374
375 375 def _readbranchcache(self):
376 376 partial = {}
377 377 try:
378 378 f = self.opener("branches.cache")
379 379 lines = f.read().split('\n')
380 380 f.close()
381 381 last, lrev = lines.pop(0).rstrip().split(" ", 1)
382 382 last, lrev = bin(last), int(lrev)
383 383 if not (lrev < self.changelog.count() and
384 384 self.changelog.node(lrev) == last): # sanity check
385 385 # invalidate the cache
386 386 raise ValueError('Invalid branch cache: unknown tip')
387 387 for l in lines:
388 388 if not l: continue
389 389 node, label = l.rstrip().split(" ", 1)
390 390 partial[label] = bin(node)
391 391 except (KeyboardInterrupt, util.SignalInterrupt):
392 392 raise
393 393 except Exception, inst:
394 394 if self.ui.debugflag:
395 395 self.ui.warn(str(inst), '\n')
396 396 partial, last, lrev = {}, nullid, nullrev
397 397 return partial, last, lrev
398 398
399 399 def _writebranchcache(self, branches, tip, tiprev):
400 400 try:
401 401 f = self.opener("branches.cache", "w")
402 402 f.write("%s %s\n" % (hex(tip), tiprev))
403 403 for label, node in branches.iteritems():
404 404 f.write("%s %s\n" % (hex(node), label))
405 405 except IOError:
406 406 pass
407 407
408 408 def _updatebranchcache(self, partial, start, end):
409 409 for r in xrange(start, end):
410 410 c = self.changectx(r)
411 411 b = c.branch()
412 412 if b:
413 413 partial[b] = c.node()
414 414
415 415 def lookup(self, key):
416 416 if key == '.':
417 417 key = self.dirstate.parents()[0]
418 418 if key == nullid:
419 419 raise repo.RepoError(_("no revision checked out"))
420 420 elif key == 'null':
421 421 return nullid
422 422 n = self.changelog._match(key)
423 423 if n:
424 424 return n
425 425 if key in self.tags():
426 426 return self.tags()[key]
427 427 if key in self.branchtags():
428 428 return self.branchtags()[key]
429 429 n = self.changelog._partialmatch(key)
430 430 if n:
431 431 return n
432 432 raise repo.RepoError(_("unknown revision '%s'") % key)
433 433
434 434 def dev(self):
435 435 return os.lstat(self.path).st_dev
436 436
437 437 def local(self):
438 438 return True
439 439
440 440 def join(self, f):
441 441 return os.path.join(self.path, f)
442 442
443 443 def sjoin(self, f):
444 444 f = self.encodefn(f)
445 445 return os.path.join(self.spath, f)
446 446
447 447 def wjoin(self, f):
448 448 return os.path.join(self.root, f)
449 449
450 450 def file(self, f):
451 451 if f[0] == '/':
452 452 f = f[1:]
453 453 return filelog.filelog(self.sopener, f, self.revlogversion)
454 454
455 455 def changectx(self, changeid=None):
456 456 return context.changectx(self, changeid)
457 457
458 458 def workingctx(self):
459 459 return context.workingctx(self)
460 460
461 461 def parents(self, changeid=None):
462 462 '''
463 463 get list of changectxs for parents of changeid or working directory
464 464 '''
465 465 if changeid is None:
466 466 pl = self.dirstate.parents()
467 467 else:
468 468 n = self.changelog.lookup(changeid)
469 469 pl = self.changelog.parents(n)
470 470 if pl[1] == nullid:
471 471 return [self.changectx(pl[0])]
472 472 return [self.changectx(pl[0]), self.changectx(pl[1])]
473 473
474 474 def filectx(self, path, changeid=None, fileid=None):
475 475 """changeid can be a changeset revision, node, or tag.
476 476 fileid can be a file revision or node."""
477 477 return context.filectx(self, path, changeid, fileid)
478 478
479 479 def getcwd(self):
480 480 return self.dirstate.getcwd()
481 481
482 482 def wfile(self, f, mode='r'):
483 483 return self.wopener(f, mode)
484 484
485 485 def _filter(self, filter, filename, data):
486 486 if filter not in self.filterpats:
487 487 l = []
488 488 for pat, cmd in self.ui.configitems(filter):
489 489 mf = util.matcher(self.root, "", [pat], [], [])[1]
490 490 l.append((mf, cmd))
491 491 self.filterpats[filter] = l
492 492
493 493 for mf, cmd in self.filterpats[filter]:
494 494 if mf(filename):
495 495 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
496 496 data = util.filter(data, cmd)
497 497 break
498 498
499 499 return data
500 500
501 501 def wread(self, filename):
502 502 if self._link(filename):
503 503 data = os.readlink(self.wjoin(filename))
504 504 else:
505 505 data = self.wopener(filename, 'r').read()
506 506 return self._filter("encode", filename, data)
507 507
508 508 def wwrite(self, filename, data, flags):
509 509 data = self._filter("decode", filename, data)
510 510 if "l" in flags:
511 511 try:
512 512 os.unlink(self.wjoin(filename))
513 513 except OSError:
514 514 pass
515 515 os.symlink(data, self.wjoin(filename))
516 516 else:
517 517 try:
518 518 if self._link(filename):
519 519 os.unlink(self.wjoin(filename))
520 520 except OSError:
521 521 pass
522 522 self.wopener(filename, 'w').write(data)
523 523 util.set_exec(self.wjoin(filename), "x" in flags)
524 524
525 525 def wwritedata(self, filename, data):
526 526 return self._filter("decode", filename, data)
527 527
528 528 def transaction(self):
529 529 tr = self.transhandle
530 530 if tr != None and tr.running():
531 531 return tr.nest()
532 532
533 533 # save dirstate for rollback
534 534 try:
535 535 ds = self.opener("dirstate").read()
536 536 except IOError:
537 537 ds = ""
538 538 self.opener("journal.dirstate", "w").write(ds)
539 539
540 540 renames = [(self.sjoin("journal"), self.sjoin("undo")),
541 541 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
542 542 tr = transaction.transaction(self.ui.warn, self.sopener,
543 543 self.sjoin("journal"),
544 544 aftertrans(renames))
545 545 self.transhandle = tr
546 546 return tr
547 547
548 548 def recover(self):
549 549 l = self.lock()
550 550 if os.path.exists(self.sjoin("journal")):
551 551 self.ui.status(_("rolling back interrupted transaction\n"))
552 552 transaction.rollback(self.sopener, self.sjoin("journal"))
553 553 self.reload()
554 554 return True
555 555 else:
556 556 self.ui.warn(_("no interrupted transaction available\n"))
557 557 return False
558 558
559 559 def rollback(self, wlock=None):
560 560 if not wlock:
561 561 wlock = self.wlock()
562 562 l = self.lock()
563 563 if os.path.exists(self.sjoin("undo")):
564 564 self.ui.status(_("rolling back last transaction\n"))
565 565 transaction.rollback(self.sopener, self.sjoin("undo"))
566 566 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
567 567 self.reload()
568 568 self.wreload()
569 569 else:
570 570 self.ui.warn(_("no rollback information available\n"))
571 571
572 572 def wreload(self):
573 573 self.dirstate.read()
574 574
575 575 def reload(self):
576 576 self.changelog.load()
577 577 self.manifest.load()
578 578 self.tagscache = None
579 579 self.nodetagscache = None
580 580
581 581 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
582 582 desc=None):
583 583 try:
584 584 l = lock.lock(lockname, 0, releasefn, desc=desc)
585 585 except lock.LockHeld, inst:
586 586 if not wait:
587 587 raise
588 588 self.ui.warn(_("waiting for lock on %s held by %r\n") %
589 589 (desc, inst.locker))
590 590 # default to 600 seconds timeout
591 591 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
592 592 releasefn, desc=desc)
593 593 if acquirefn:
594 594 acquirefn()
595 595 return l
596 596
597 597 def lock(self, wait=1):
598 598 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
599 599 desc=_('repository %s') % self.origroot)
600 600
601 601 def wlock(self, wait=1):
602 602 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
603 603 self.wreload,
604 604 desc=_('working directory of %s') % self.origroot)
605 605
606 606 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
607 607 """
608 608 commit an individual file as part of a larger transaction
609 609 """
610 610
611 611 t = self.wread(fn)
612 612 fl = self.file(fn)
613 613 fp1 = manifest1.get(fn, nullid)
614 614 fp2 = manifest2.get(fn, nullid)
615 615
616 616 meta = {}
617 617 cp = self.dirstate.copied(fn)
618 618 if cp:
619 619 meta["copy"] = cp
620 620 if not manifest2: # not a branch merge
621 621 meta["copyrev"] = hex(manifest1.get(cp, nullid))
622 622 fp2 = nullid
623 623 elif fp2 != nullid: # copied on remote side
624 624 meta["copyrev"] = hex(manifest1.get(cp, nullid))
625 625 elif fp1 != nullid: # copied on local side, reversed
626 626 meta["copyrev"] = hex(manifest2.get(cp))
627 627 fp2 = nullid
628 628 else: # directory rename
629 629 meta["copyrev"] = hex(manifest1.get(cp, nullid))
630 630 self.ui.debug(_(" %s: copy %s:%s\n") %
631 631 (fn, cp, meta["copyrev"]))
632 632 fp1 = nullid
633 633 elif fp2 != nullid:
634 634 # is one parent an ancestor of the other?
635 635 fpa = fl.ancestor(fp1, fp2)
636 636 if fpa == fp1:
637 637 fp1, fp2 = fp2, nullid
638 638 elif fpa == fp2:
639 639 fp2 = nullid
640 640
641 641 # is the file unmodified from the parent? report existing entry
642 642 if fp2 == nullid and not fl.cmp(fp1, t):
643 643 return fp1
644 644
645 645 changelist.append(fn)
646 646 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
647 647
648 648 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
649 649 if p1 is None:
650 650 p1, p2 = self.dirstate.parents()
651 651 return self.commit(files=files, text=text, user=user, date=date,
652 652 p1=p1, p2=p2, wlock=wlock, extra=extra)
653 653
654 654 def commit(self, files=None, text="", user=None, date=None,
655 655 match=util.always, force=False, lock=None, wlock=None,
656 656 force_editor=False, p1=None, p2=None, extra={}):
657 657
658 658 commit = []
659 659 remove = []
660 660 changed = []
661 661 use_dirstate = (p1 is None) # not rawcommit
662 662 extra = extra.copy()
663 663
664 664 if use_dirstate:
665 665 if files:
666 666 for f in files:
667 667 s = self.dirstate.state(f)
668 668 if s in 'nmai':
669 669 commit.append(f)
670 670 elif s == 'r':
671 671 remove.append(f)
672 672 else:
673 673 self.ui.warn(_("%s not tracked!\n") % f)
674 674 else:
675 675 changes = self.status(match=match)[:5]
676 676 modified, added, removed, deleted, unknown = changes
677 677 commit = modified + added
678 678 remove = removed
679 679 else:
680 680 commit = files
681 681
682 682 if use_dirstate:
683 683 p1, p2 = self.dirstate.parents()
684 684 update_dirstate = True
685 685 else:
686 686 p1, p2 = p1, p2 or nullid
687 687 update_dirstate = (self.dirstate.parents()[0] == p1)
688 688
689 689 c1 = self.changelog.read(p1)
690 690 c2 = self.changelog.read(p2)
691 691 m1 = self.manifest.read(c1[0]).copy()
692 692 m2 = self.manifest.read(c2[0])
693 693
694 694 if use_dirstate:
695 695 branchname = self.workingctx().branch()
696 696 try:
697 697 branchname = branchname.decode('UTF-8').encode('UTF-8')
698 698 except UnicodeDecodeError:
699 699 raise util.Abort(_('branch name not in UTF-8!'))
700 700 else:
701 701 branchname = ""
702 702
703 703 if use_dirstate:
704 704 oldname = c1[5].get("branch", "") # stored in UTF-8
705 705 if not commit and not remove and not force and p2 == nullid and \
706 706 branchname == oldname:
707 707 self.ui.status(_("nothing changed\n"))
708 708 return None
709 709
710 710 xp1 = hex(p1)
711 711 if p2 == nullid: xp2 = ''
712 712 else: xp2 = hex(p2)
713 713
714 714 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
715 715
716 716 if not wlock:
717 717 wlock = self.wlock()
718 718 if not lock:
719 719 lock = self.lock()
720 720 tr = self.transaction()
721 721
722 722 # check in files
723 723 new = {}
724 724 linkrev = self.changelog.count()
725 725 commit.sort()
726 726 is_exec = util.execfunc(self.root, m1.execf)
727 727 is_link = util.linkfunc(self.root, m1.linkf)
728 728 for f in commit:
729 729 self.ui.note(f + "\n")
730 730 try:
731 731 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
732 732 m1.set(f, is_exec(f), is_link(f))
733 733 except OSError:
734 734 if use_dirstate:
735 735 self.ui.warn(_("trouble committing %s!\n") % f)
736 736 raise
737 737 else:
738 738 remove.append(f)
739 739
740 740 # update manifest
741 741 m1.update(new)
742 742 remove.sort()
743 743 removed = []
744 744
745 745 for f in remove:
746 746 if f in m1:
747 747 del m1[f]
748 748 removed.append(f)
749 749 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
750 750
751 751 # add changeset
752 752 new = new.keys()
753 753 new.sort()
754 754
755 755 user = user or self.ui.username()
756 756 if not text or force_editor:
757 757 edittext = []
758 758 if text:
759 759 edittext.append(text)
760 760 edittext.append("")
761 761 edittext.append("HG: user: %s" % user)
762 762 if p2 != nullid:
763 763 edittext.append("HG: branch merge")
764 764 if branchname:
765 edittext.append("HG: branch %s" % branchname)
765 edittext.append("HG: branch %s" % util.tolocal(branchname))
766 766 edittext.extend(["HG: changed %s" % f for f in changed])
767 767 edittext.extend(["HG: removed %s" % f for f in removed])
768 768 if not changed and not remove:
769 769 edittext.append("HG: no files changed")
770 770 edittext.append("")
771 771 # run editor in the repository root
772 772 olddir = os.getcwd()
773 773 os.chdir(self.root)
774 774 text = self.ui.edit("\n".join(edittext), user)
775 775 os.chdir(olddir)
776 776
777 777 lines = [line.rstrip() for line in text.rstrip().splitlines()]
778 778 while lines and not lines[0]:
779 779 del lines[0]
780 780 if not lines:
781 781 return None
782 782 text = '\n'.join(lines)
783 783 if branchname:
784 784 extra["branch"] = branchname
785 785 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
786 786 user, date, extra)
787 787 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
788 788 parent2=xp2)
789 789 tr.close()
790 790
791 791 if self.branchcache and "branch" in extra:
792 792 self.branchcache[util.tolocal(extra["branch"])] = n
793 793
794 794 if use_dirstate or update_dirstate:
795 795 self.dirstate.setparents(n)
796 796 if use_dirstate:
797 797 self.dirstate.update(new, "n")
798 798 self.dirstate.forget(removed)
799 799
800 800 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
801 801 return n
802 802
803 803 def walk(self, node=None, files=[], match=util.always, badmatch=None):
804 804 '''
805 805 walk recursively through the directory tree or a given
806 806 changeset, finding all files matched by the match
807 807 function
808 808
809 809 results are yielded in a tuple (src, filename), where src
810 810 is one of:
811 811 'f' the file was found in the directory tree
812 812 'm' the file was only in the dirstate and not in the tree
813 813 'b' file was not found and matched badmatch
814 814 '''
815 815
816 816 if node:
817 817 fdict = dict.fromkeys(files)
818 818 for fn in self.manifest.read(self.changelog.read(node)[0]):
819 819 for ffn in fdict:
820 820 # match if the file is the exact name or a directory
821 821 if ffn == fn or fn.startswith("%s/" % ffn):
822 822 del fdict[ffn]
823 823 break
824 824 if match(fn):
825 825 yield 'm', fn
826 826 for fn in fdict:
827 827 if badmatch and badmatch(fn):
828 828 if match(fn):
829 829 yield 'b', fn
830 830 else:
831 831 self.ui.warn(_('%s: No such file in rev %s\n') % (
832 832 util.pathto(self.getcwd(), fn), short(node)))
833 833 else:
834 834 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
835 835 yield src, fn
836 836
837 837 def status(self, node1=None, node2=None, files=[], match=util.always,
838 838 wlock=None, list_ignored=False, list_clean=False):
839 839 """return status of files between two nodes or node and working directory
840 840
841 841 If node1 is None, use the first dirstate parent instead.
842 842 If node2 is None, compare node1 with working directory.
843 843 """
844 844
845 845 def fcmp(fn, mf):
846 846 t1 = self.wread(fn)
847 847 return self.file(fn).cmp(mf.get(fn, nullid), t1)
848 848
849 849 def mfmatches(node):
850 850 change = self.changelog.read(node)
851 851 mf = self.manifest.read(change[0]).copy()
852 852 for fn in mf.keys():
853 853 if not match(fn):
854 854 del mf[fn]
855 855 return mf
856 856
857 857 modified, added, removed, deleted, unknown = [], [], [], [], []
858 858 ignored, clean = [], []
859 859
860 860 compareworking = False
861 861 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
862 862 compareworking = True
863 863
864 864 if not compareworking:
865 865 # read the manifest from node1 before the manifest from node2,
866 866 # so that we'll hit the manifest cache if we're going through
867 867 # all the revisions in parent->child order.
868 868 mf1 = mfmatches(node1)
869 869
870 870 # are we comparing the working directory?
871 871 if not node2:
872 872 if not wlock:
873 873 try:
874 874 wlock = self.wlock(wait=0)
875 875 except lock.LockException:
876 876 wlock = None
877 877 (lookup, modified, added, removed, deleted, unknown,
878 878 ignored, clean) = self.dirstate.status(files, match,
879 879 list_ignored, list_clean)
880 880
881 881 # are we comparing working dir against its parent?
882 882 if compareworking:
883 883 if lookup:
884 884 # do a full compare of any files that might have changed
885 885 mf2 = mfmatches(self.dirstate.parents()[0])
886 886 for f in lookup:
887 887 if fcmp(f, mf2):
888 888 modified.append(f)
889 889 else:
890 890 clean.append(f)
891 891 if wlock is not None:
892 892 self.dirstate.update([f], "n")
893 893 else:
894 894 # we are comparing working dir against non-parent
895 895 # generate a pseudo-manifest for the working dir
896 896 # XXX: create it in dirstate.py ?
897 897 mf2 = mfmatches(self.dirstate.parents()[0])
898 898 is_exec = util.execfunc(self.root, mf2.execf)
899 899 is_link = util.linkfunc(self.root, mf2.linkf)
900 900 for f in lookup + modified + added:
901 901 mf2[f] = ""
902 902 mf2.set(f, is_exec(f), is_link(f))
903 903 for f in removed:
904 904 if f in mf2:
905 905 del mf2[f]
906 906 else:
907 907 # we are comparing two revisions
908 908 mf2 = mfmatches(node2)
909 909
910 910 if not compareworking:
911 911 # flush lists from dirstate before comparing manifests
912 912 modified, added, clean = [], [], []
913 913
914 914 # make sure to sort the files so we talk to the disk in a
915 915 # reasonable order
916 916 mf2keys = mf2.keys()
917 917 mf2keys.sort()
918 918 for fn in mf2keys:
919 919 if mf1.has_key(fn):
920 920 if mf1.flags(fn) != mf2.flags(fn) or \
921 921 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
922 922 modified.append(fn)
923 923 elif list_clean:
924 924 clean.append(fn)
925 925 del mf1[fn]
926 926 else:
927 927 added.append(fn)
928 928
929 929 removed = mf1.keys()
930 930
931 931 # sort and return results:
932 932 for l in modified, added, removed, deleted, unknown, ignored, clean:
933 933 l.sort()
934 934 return (modified, added, removed, deleted, unknown, ignored, clean)
935 935
936 936 def add(self, list, wlock=None):
937 937 if not wlock:
938 938 wlock = self.wlock()
939 939 for f in list:
940 940 p = self.wjoin(f)
941 941 if not os.path.exists(p):
942 942 self.ui.warn(_("%s does not exist!\n") % f)
943 943 elif not os.path.isfile(p):
944 944 self.ui.warn(_("%s not added: only files supported currently\n")
945 945 % f)
946 946 elif self.dirstate.state(f) in 'an':
947 947 self.ui.warn(_("%s already tracked!\n") % f)
948 948 else:
949 949 self.dirstate.update([f], "a")
950 950
951 951 def forget(self, list, wlock=None):
952 952 if not wlock:
953 953 wlock = self.wlock()
954 954 for f in list:
955 955 if self.dirstate.state(f) not in 'ai':
956 956 self.ui.warn(_("%s not added!\n") % f)
957 957 else:
958 958 self.dirstate.forget([f])
959 959
960 960 def remove(self, list, unlink=False, wlock=None):
961 961 if unlink:
962 962 for f in list:
963 963 try:
964 964 util.unlink(self.wjoin(f))
965 965 except OSError, inst:
966 966 if inst.errno != errno.ENOENT:
967 967 raise
968 968 if not wlock:
969 969 wlock = self.wlock()
970 970 for f in list:
971 971 p = self.wjoin(f)
972 972 if os.path.exists(p):
973 973 self.ui.warn(_("%s still exists!\n") % f)
974 974 elif self.dirstate.state(f) == 'a':
975 975 self.dirstate.forget([f])
976 976 elif f not in self.dirstate:
977 977 self.ui.warn(_("%s not tracked!\n") % f)
978 978 else:
979 979 self.dirstate.update([f], "r")
980 980
981 981 def undelete(self, list, wlock=None):
982 982 p = self.dirstate.parents()[0]
983 983 mn = self.changelog.read(p)[0]
984 984 m = self.manifest.read(mn)
985 985 if not wlock:
986 986 wlock = self.wlock()
987 987 for f in list:
988 988 if self.dirstate.state(f) not in "r":
989 989 self.ui.warn("%s not removed!\n" % f)
990 990 else:
991 991 t = self.file(f).read(m[f])
992 992 self.wwrite(f, t, m.flags(f))
993 993 self.dirstate.update([f], "n")
994 994
995 995 def copy(self, source, dest, wlock=None):
996 996 p = self.wjoin(dest)
997 997 if not os.path.exists(p):
998 998 self.ui.warn(_("%s does not exist!\n") % dest)
999 999 elif not os.path.isfile(p):
1000 1000 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1001 1001 else:
1002 1002 if not wlock:
1003 1003 wlock = self.wlock()
1004 1004 if self.dirstate.state(dest) == '?':
1005 1005 self.dirstate.update([dest], "a")
1006 1006 self.dirstate.copy(source, dest)
1007 1007
1008 1008 def heads(self, start=None):
1009 1009 heads = self.changelog.heads(start)
1010 1010 # sort the output in rev descending order
1011 1011 heads = [(-self.changelog.rev(h), h) for h in heads]
1012 1012 heads.sort()
1013 1013 return [n for (r, n) in heads]
1014 1014
1015 1015 def branches(self, nodes):
1016 1016 if not nodes:
1017 1017 nodes = [self.changelog.tip()]
1018 1018 b = []
1019 1019 for n in nodes:
1020 1020 t = n
1021 1021 while 1:
1022 1022 p = self.changelog.parents(n)
1023 1023 if p[1] != nullid or p[0] == nullid:
1024 1024 b.append((t, n, p[0], p[1]))
1025 1025 break
1026 1026 n = p[0]
1027 1027 return b
1028 1028
1029 1029 def between(self, pairs):
1030 1030 r = []
1031 1031
1032 1032 for top, bottom in pairs:
1033 1033 n, l, i = top, [], 0
1034 1034 f = 1
1035 1035
1036 1036 while n != bottom:
1037 1037 p = self.changelog.parents(n)[0]
1038 1038 if i == f:
1039 1039 l.append(n)
1040 1040 f = f * 2
1041 1041 n = p
1042 1042 i += 1
1043 1043
1044 1044 r.append(l)
1045 1045
1046 1046 return r
1047 1047
1048 1048 def findincoming(self, remote, base=None, heads=None, force=False):
1049 1049 """Return list of roots of the subsets of missing nodes from remote
1050 1050
1051 1051 If base dict is specified, assume that these nodes and their parents
1052 1052 exist on the remote side and that no child of a node of base exists
1053 1053 in both remote and self.
1054 1054 Furthermore base will be updated to include the nodes that exists
1055 1055 in self and remote but no children exists in self and remote.
1056 1056 If a list of heads is specified, return only nodes which are heads
1057 1057 or ancestors of these heads.
1058 1058
1059 1059 All the ancestors of base are in self and in remote.
1060 1060 All the descendants of the list returned are missing in self.
1061 1061 (and so we know that the rest of the nodes are missing in remote, see
1062 1062 outgoing)
1063 1063 """
1064 1064 m = self.changelog.nodemap
1065 1065 search = []
1066 1066 fetch = {}
1067 1067 seen = {}
1068 1068 seenbranch = {}
1069 1069 if base == None:
1070 1070 base = {}
1071 1071
1072 1072 if not heads:
1073 1073 heads = remote.heads()
1074 1074
1075 1075 if self.changelog.tip() == nullid:
1076 1076 base[nullid] = 1
1077 1077 if heads != [nullid]:
1078 1078 return [nullid]
1079 1079 return []
1080 1080
1081 1081 # assume we're closer to the tip than the root
1082 1082 # and start by examining the heads
1083 1083 self.ui.status(_("searching for changes\n"))
1084 1084
1085 1085 unknown = []
1086 1086 for h in heads:
1087 1087 if h not in m:
1088 1088 unknown.append(h)
1089 1089 else:
1090 1090 base[h] = 1
1091 1091
1092 1092 if not unknown:
1093 1093 return []
1094 1094
1095 1095 req = dict.fromkeys(unknown)
1096 1096 reqcnt = 0
1097 1097
1098 1098 # search through remote branches
1099 1099 # a 'branch' here is a linear segment of history, with four parts:
1100 1100 # head, root, first parent, second parent
1101 1101 # (a branch always has two parents (or none) by definition)
1102 1102 unknown = remote.branches(unknown)
1103 1103 while unknown:
1104 1104 r = []
1105 1105 while unknown:
1106 1106 n = unknown.pop(0)
1107 1107 if n[0] in seen:
1108 1108 continue
1109 1109
1110 1110 self.ui.debug(_("examining %s:%s\n")
1111 1111 % (short(n[0]), short(n[1])))
1112 1112 if n[0] == nullid: # found the end of the branch
1113 1113 pass
1114 1114 elif n in seenbranch:
1115 1115 self.ui.debug(_("branch already found\n"))
1116 1116 continue
1117 1117 elif n[1] and n[1] in m: # do we know the base?
1118 1118 self.ui.debug(_("found incomplete branch %s:%s\n")
1119 1119 % (short(n[0]), short(n[1])))
1120 1120 search.append(n) # schedule branch range for scanning
1121 1121 seenbranch[n] = 1
1122 1122 else:
1123 1123 if n[1] not in seen and n[1] not in fetch:
1124 1124 if n[2] in m and n[3] in m:
1125 1125 self.ui.debug(_("found new changeset %s\n") %
1126 1126 short(n[1]))
1127 1127 fetch[n[1]] = 1 # earliest unknown
1128 1128 for p in n[2:4]:
1129 1129 if p in m:
1130 1130 base[p] = 1 # latest known
1131 1131
1132 1132 for p in n[2:4]:
1133 1133 if p not in req and p not in m:
1134 1134 r.append(p)
1135 1135 req[p] = 1
1136 1136 seen[n[0]] = 1
1137 1137
1138 1138 if r:
1139 1139 reqcnt += 1
1140 1140 self.ui.debug(_("request %d: %s\n") %
1141 1141 (reqcnt, " ".join(map(short, r))))
1142 1142 for p in xrange(0, len(r), 10):
1143 1143 for b in remote.branches(r[p:p+10]):
1144 1144 self.ui.debug(_("received %s:%s\n") %
1145 1145 (short(b[0]), short(b[1])))
1146 1146 unknown.append(b)
1147 1147
1148 1148 # do binary search on the branches we found
1149 1149 while search:
1150 1150 n = search.pop(0)
1151 1151 reqcnt += 1
1152 1152 l = remote.between([(n[0], n[1])])[0]
1153 1153 l.append(n[1])
1154 1154 p = n[0]
1155 1155 f = 1
1156 1156 for i in l:
1157 1157 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1158 1158 if i in m:
1159 1159 if f <= 2:
1160 1160 self.ui.debug(_("found new branch changeset %s\n") %
1161 1161 short(p))
1162 1162 fetch[p] = 1
1163 1163 base[i] = 1
1164 1164 else:
1165 1165 self.ui.debug(_("narrowed branch search to %s:%s\n")
1166 1166 % (short(p), short(i)))
1167 1167 search.append((p, i))
1168 1168 break
1169 1169 p, f = i, f * 2
1170 1170
1171 1171 # sanity check our fetch list
1172 1172 for f in fetch.keys():
1173 1173 if f in m:
1174 1174 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1175 1175
1176 1176 if base.keys() == [nullid]:
1177 1177 if force:
1178 1178 self.ui.warn(_("warning: repository is unrelated\n"))
1179 1179 else:
1180 1180 raise util.Abort(_("repository is unrelated"))
1181 1181
1182 1182 self.ui.debug(_("found new changesets starting at ") +
1183 1183 " ".join([short(f) for f in fetch]) + "\n")
1184 1184
1185 1185 self.ui.debug(_("%d total queries\n") % reqcnt)
1186 1186
1187 1187 return fetch.keys()
1188 1188
1189 1189 def findoutgoing(self, remote, base=None, heads=None, force=False):
1190 1190 """Return list of nodes that are roots of subsets not in remote
1191 1191
1192 1192 If base dict is specified, assume that these nodes and their parents
1193 1193 exist on the remote side.
1194 1194 If a list of heads is specified, return only nodes which are heads
1195 1195 or ancestors of these heads, and return a second element which
1196 1196 contains all remote heads which get new children.
1197 1197 """
1198 1198 if base == None:
1199 1199 base = {}
1200 1200 self.findincoming(remote, base, heads, force=force)
1201 1201
1202 1202 self.ui.debug(_("common changesets up to ")
1203 1203 + " ".join(map(short, base.keys())) + "\n")
1204 1204
1205 1205 remain = dict.fromkeys(self.changelog.nodemap)
1206 1206
1207 1207 # prune everything remote has from the tree
1208 1208 del remain[nullid]
1209 1209 remove = base.keys()
1210 1210 while remove:
1211 1211 n = remove.pop(0)
1212 1212 if n in remain:
1213 1213 del remain[n]
1214 1214 for p in self.changelog.parents(n):
1215 1215 remove.append(p)
1216 1216
1217 1217 # find every node whose parents have been pruned
1218 1218 subset = []
1219 1219 # find every remote head that will get new children
1220 1220 updated_heads = {}
1221 1221 for n in remain:
1222 1222 p1, p2 = self.changelog.parents(n)
1223 1223 if p1 not in remain and p2 not in remain:
1224 1224 subset.append(n)
1225 1225 if heads:
1226 1226 if p1 in heads:
1227 1227 updated_heads[p1] = True
1228 1228 if p2 in heads:
1229 1229 updated_heads[p2] = True
1230 1230
1231 1231 # this is the set of all roots we have to push
1232 1232 if heads:
1233 1233 return subset, updated_heads.keys()
1234 1234 else:
1235 1235 return subset
1236 1236
1237 1237 def pull(self, remote, heads=None, force=False, lock=None):
1238 1238 mylock = False
1239 1239 if not lock:
1240 1240 lock = self.lock()
1241 1241 mylock = True
1242 1242
1243 1243 try:
1244 1244 fetch = self.findincoming(remote, force=force)
1245 1245 if fetch == [nullid]:
1246 1246 self.ui.status(_("requesting all changes\n"))
1247 1247
1248 1248 if not fetch:
1249 1249 self.ui.status(_("no changes found\n"))
1250 1250 return 0
1251 1251
1252 1252 if heads is None:
1253 1253 cg = remote.changegroup(fetch, 'pull')
1254 1254 else:
1255 1255 if 'changegroupsubset' not in remote.capabilities:
1256 1256 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1257 1257 cg = remote.changegroupsubset(fetch, heads, 'pull')
1258 1258 return self.addchangegroup(cg, 'pull', remote.url())
1259 1259 finally:
1260 1260 if mylock:
1261 1261 lock.release()
1262 1262
1263 1263 def push(self, remote, force=False, revs=None):
1264 1264 # there are two ways to push to remote repo:
1265 1265 #
1266 1266 # addchangegroup assumes local user can lock remote
1267 1267 # repo (local filesystem, old ssh servers).
1268 1268 #
1269 1269 # unbundle assumes local user cannot lock remote repo (new ssh
1270 1270 # servers, http servers).
1271 1271
1272 1272 if remote.capable('unbundle'):
1273 1273 return self.push_unbundle(remote, force, revs)
1274 1274 return self.push_addchangegroup(remote, force, revs)
1275 1275
1276 1276 def prepush(self, remote, force, revs):
1277 1277 base = {}
1278 1278 remote_heads = remote.heads()
1279 1279 inc = self.findincoming(remote, base, remote_heads, force=force)
1280 1280
1281 1281 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1282 1282 if revs is not None:
1283 1283 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1284 1284 else:
1285 1285 bases, heads = update, self.changelog.heads()
1286 1286
1287 1287 if not bases:
1288 1288 self.ui.status(_("no changes found\n"))
1289 1289 return None, 1
1290 1290 elif not force:
1291 1291 # check if we're creating new remote heads
1292 1292 # to be a remote head after push, node must be either
1293 1293 # - unknown locally
1294 1294 # - a local outgoing head descended from update
1295 1295 # - a remote head that's known locally and not
1296 1296 # ancestral to an outgoing head
1297 1297
1298 1298 warn = 0
1299 1299
1300 1300 if remote_heads == [nullid]:
1301 1301 warn = 0
1302 1302 elif not revs and len(heads) > len(remote_heads):
1303 1303 warn = 1
1304 1304 else:
1305 1305 newheads = list(heads)
1306 1306 for r in remote_heads:
1307 1307 if r in self.changelog.nodemap:
1308 1308 desc = self.changelog.heads(r, heads)
1309 1309 l = [h for h in heads if h in desc]
1310 1310 if not l:
1311 1311 newheads.append(r)
1312 1312 else:
1313 1313 newheads.append(r)
1314 1314 if len(newheads) > len(remote_heads):
1315 1315 warn = 1
1316 1316
1317 1317 if warn:
1318 1318 self.ui.warn(_("abort: push creates new remote branches!\n"))
1319 1319 self.ui.status(_("(did you forget to merge?"
1320 1320 " use push -f to force)\n"))
1321 1321 return None, 1
1322 1322 elif inc:
1323 1323 self.ui.warn(_("note: unsynced remote changes!\n"))
1324 1324
1325 1325
1326 1326 if revs is None:
1327 1327 cg = self.changegroup(update, 'push')
1328 1328 else:
1329 1329 cg = self.changegroupsubset(update, revs, 'push')
1330 1330 return cg, remote_heads
1331 1331
1332 1332 def push_addchangegroup(self, remote, force, revs):
1333 1333 lock = remote.lock()
1334 1334
1335 1335 ret = self.prepush(remote, force, revs)
1336 1336 if ret[0] is not None:
1337 1337 cg, remote_heads = ret
1338 1338 return remote.addchangegroup(cg, 'push', self.url())
1339 1339 return ret[1]
1340 1340
1341 1341 def push_unbundle(self, remote, force, revs):
1342 1342 # local repo finds heads on server, finds out what revs it
1343 1343 # must push. once revs transferred, if server finds it has
1344 1344 # different heads (someone else won commit/push race), server
1345 1345 # aborts.
1346 1346
1347 1347 ret = self.prepush(remote, force, revs)
1348 1348 if ret[0] is not None:
1349 1349 cg, remote_heads = ret
1350 1350 if force: remote_heads = ['force']
1351 1351 return remote.unbundle(cg, remote_heads, 'push')
1352 1352 return ret[1]
1353 1353
1354 1354 def changegroupinfo(self, nodes):
1355 1355 self.ui.note(_("%d changesets found\n") % len(nodes))
1356 1356 if self.ui.debugflag:
1357 1357 self.ui.debug(_("List of changesets:\n"))
1358 1358 for node in nodes:
1359 1359 self.ui.debug("%s\n" % hex(node))
1360 1360
1361 1361 def changegroupsubset(self, bases, heads, source):
1362 1362 """This function generates a changegroup consisting of all the nodes
1363 1363 that are descendents of any of the bases, and ancestors of any of
1364 1364 the heads.
1365 1365
1366 1366 It is fairly complex as determining which filenodes and which
1367 1367 manifest nodes need to be included for the changeset to be complete
1368 1368 is non-trivial.
1369 1369
1370 1370 Another wrinkle is doing the reverse, figuring out which changeset in
1371 1371 the changegroup a particular filenode or manifestnode belongs to."""
1372 1372
1373 1373 self.hook('preoutgoing', throw=True, source=source)
1374 1374
1375 1375 # Set up some initial variables
1376 1376 # Make it easy to refer to self.changelog
1377 1377 cl = self.changelog
1378 1378 # msng is short for missing - compute the list of changesets in this
1379 1379 # changegroup.
1380 1380 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1381 1381 self.changegroupinfo(msng_cl_lst)
1382 1382 # Some bases may turn out to be superfluous, and some heads may be
1383 1383 # too. nodesbetween will return the minimal set of bases and heads
1384 1384 # necessary to re-create the changegroup.
1385 1385
1386 1386 # Known heads are the list of heads that it is assumed the recipient
1387 1387 # of this changegroup will know about.
1388 1388 knownheads = {}
1389 1389 # We assume that all parents of bases are known heads.
1390 1390 for n in bases:
1391 1391 for p in cl.parents(n):
1392 1392 if p != nullid:
1393 1393 knownheads[p] = 1
1394 1394 knownheads = knownheads.keys()
1395 1395 if knownheads:
1396 1396 # Now that we know what heads are known, we can compute which
1397 1397 # changesets are known. The recipient must know about all
1398 1398 # changesets required to reach the known heads from the null
1399 1399 # changeset.
1400 1400 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1401 1401 junk = None
1402 1402 # Transform the list into an ersatz set.
1403 1403 has_cl_set = dict.fromkeys(has_cl_set)
1404 1404 else:
1405 1405 # If there were no known heads, the recipient cannot be assumed to
1406 1406 # know about any changesets.
1407 1407 has_cl_set = {}
1408 1408
1409 1409 # Make it easy to refer to self.manifest
1410 1410 mnfst = self.manifest
1411 1411 # We don't know which manifests are missing yet
1412 1412 msng_mnfst_set = {}
1413 1413 # Nor do we know which filenodes are missing.
1414 1414 msng_filenode_set = {}
1415 1415
1416 1416 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1417 1417 junk = None
1418 1418
1419 1419 # A changeset always belongs to itself, so the changenode lookup
1420 1420 # function for a changenode is identity.
1421 1421 def identity(x):
1422 1422 return x
1423 1423
1424 1424 # A function generating function. Sets up an environment for the
1425 1425 # inner function.
1426 1426 def cmp_by_rev_func(revlog):
1427 1427 # Compare two nodes by their revision number in the environment's
1428 1428 # revision history. Since the revision number both represents the
1429 1429 # most efficient order to read the nodes in, and represents a
1430 1430 # topological sorting of the nodes, this function is often useful.
1431 1431 def cmp_by_rev(a, b):
1432 1432 return cmp(revlog.rev(a), revlog.rev(b))
1433 1433 return cmp_by_rev
1434 1434
1435 1435 # If we determine that a particular file or manifest node must be a
1436 1436 # node that the recipient of the changegroup will already have, we can
1437 1437 # also assume the recipient will have all the parents. This function
1438 1438 # prunes them from the set of missing nodes.
1439 1439 def prune_parents(revlog, hasset, msngset):
1440 1440 haslst = hasset.keys()
1441 1441 haslst.sort(cmp_by_rev_func(revlog))
1442 1442 for node in haslst:
1443 1443 parentlst = [p for p in revlog.parents(node) if p != nullid]
1444 1444 while parentlst:
1445 1445 n = parentlst.pop()
1446 1446 if n not in hasset:
1447 1447 hasset[n] = 1
1448 1448 p = [p for p in revlog.parents(n) if p != nullid]
1449 1449 parentlst.extend(p)
1450 1450 for n in hasset:
1451 1451 msngset.pop(n, None)
1452 1452
1453 1453 # This is a function generating function used to set up an environment
1454 1454 # for the inner function to execute in.
1455 1455 def manifest_and_file_collector(changedfileset):
1456 1456 # This is an information gathering function that gathers
1457 1457 # information from each changeset node that goes out as part of
1458 1458 # the changegroup. The information gathered is a list of which
1459 1459 # manifest nodes are potentially required (the recipient may
1460 1460 # already have them) and total list of all files which were
1461 1461 # changed in any changeset in the changegroup.
1462 1462 #
1463 1463 # We also remember the first changenode we saw any manifest
1464 1464 # referenced by so we can later determine which changenode 'owns'
1465 1465 # the manifest.
1466 1466 def collect_manifests_and_files(clnode):
1467 1467 c = cl.read(clnode)
1468 1468 for f in c[3]:
1469 1469 # This is to make sure we only have one instance of each
1470 1470 # filename string for each filename.
1471 1471 changedfileset.setdefault(f, f)
1472 1472 msng_mnfst_set.setdefault(c[0], clnode)
1473 1473 return collect_manifests_and_files
1474 1474
1475 1475 # Figure out which manifest nodes (of the ones we think might be part
1476 1476 # of the changegroup) the recipient must know about and remove them
1477 1477 # from the changegroup.
1478 1478 def prune_manifests():
1479 1479 has_mnfst_set = {}
1480 1480 for n in msng_mnfst_set:
1481 1481 # If a 'missing' manifest thinks it belongs to a changenode
1482 1482 # the recipient is assumed to have, obviously the recipient
1483 1483 # must have that manifest.
1484 1484 linknode = cl.node(mnfst.linkrev(n))
1485 1485 if linknode in has_cl_set:
1486 1486 has_mnfst_set[n] = 1
1487 1487 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1488 1488
1489 1489 # Use the information collected in collect_manifests_and_files to say
1490 1490 # which changenode any manifestnode belongs to.
1491 1491 def lookup_manifest_link(mnfstnode):
1492 1492 return msng_mnfst_set[mnfstnode]
1493 1493
1494 1494 # A function generating function that sets up the initial environment
1495 1495 # the inner function.
1496 1496 def filenode_collector(changedfiles):
1497 1497 next_rev = [0]
1498 1498 # This gathers information from each manifestnode included in the
1499 1499 # changegroup about which filenodes the manifest node references
1500 1500 # so we can include those in the changegroup too.
1501 1501 #
1502 1502 # It also remembers which changenode each filenode belongs to. It
1503 1503 # does this by assuming the a filenode belongs to the changenode
1504 1504 # the first manifest that references it belongs to.
1505 1505 def collect_msng_filenodes(mnfstnode):
1506 1506 r = mnfst.rev(mnfstnode)
1507 1507 if r == next_rev[0]:
1508 1508 # If the last rev we looked at was the one just previous,
1509 1509 # we only need to see a diff.
1510 1510 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1511 1511 # For each line in the delta
1512 1512 for dline in delta.splitlines():
1513 1513 # get the filename and filenode for that line
1514 1514 f, fnode = dline.split('\0')
1515 1515 fnode = bin(fnode[:40])
1516 1516 f = changedfiles.get(f, None)
1517 1517 # And if the file is in the list of files we care
1518 1518 # about.
1519 1519 if f is not None:
1520 1520 # Get the changenode this manifest belongs to
1521 1521 clnode = msng_mnfst_set[mnfstnode]
1522 1522 # Create the set of filenodes for the file if
1523 1523 # there isn't one already.
1524 1524 ndset = msng_filenode_set.setdefault(f, {})
1525 1525 # And set the filenode's changelog node to the
1526 1526 # manifest's if it hasn't been set already.
1527 1527 ndset.setdefault(fnode, clnode)
1528 1528 else:
1529 1529 # Otherwise we need a full manifest.
1530 1530 m = mnfst.read(mnfstnode)
1531 1531 # For every file in we care about.
1532 1532 for f in changedfiles:
1533 1533 fnode = m.get(f, None)
1534 1534 # If it's in the manifest
1535 1535 if fnode is not None:
1536 1536 # See comments above.
1537 1537 clnode = msng_mnfst_set[mnfstnode]
1538 1538 ndset = msng_filenode_set.setdefault(f, {})
1539 1539 ndset.setdefault(fnode, clnode)
1540 1540 # Remember the revision we hope to see next.
1541 1541 next_rev[0] = r + 1
1542 1542 return collect_msng_filenodes
1543 1543
1544 1544 # We have a list of filenodes we think we need for a file, lets remove
1545 1545 # all those we now the recipient must have.
1546 1546 def prune_filenodes(f, filerevlog):
1547 1547 msngset = msng_filenode_set[f]
1548 1548 hasset = {}
1549 1549 # If a 'missing' filenode thinks it belongs to a changenode we
1550 1550 # assume the recipient must have, then the recipient must have
1551 1551 # that filenode.
1552 1552 for n in msngset:
1553 1553 clnode = cl.node(filerevlog.linkrev(n))
1554 1554 if clnode in has_cl_set:
1555 1555 hasset[n] = 1
1556 1556 prune_parents(filerevlog, hasset, msngset)
1557 1557
1558 1558 # A function generator function that sets up the a context for the
1559 1559 # inner function.
1560 1560 def lookup_filenode_link_func(fname):
1561 1561 msngset = msng_filenode_set[fname]
1562 1562 # Lookup the changenode the filenode belongs to.
1563 1563 def lookup_filenode_link(fnode):
1564 1564 return msngset[fnode]
1565 1565 return lookup_filenode_link
1566 1566
1567 1567 # Now that we have all theses utility functions to help out and
1568 1568 # logically divide up the task, generate the group.
1569 1569 def gengroup():
1570 1570 # The set of changed files starts empty.
1571 1571 changedfiles = {}
1572 1572 # Create a changenode group generator that will call our functions
1573 1573 # back to lookup the owning changenode and collect information.
1574 1574 group = cl.group(msng_cl_lst, identity,
1575 1575 manifest_and_file_collector(changedfiles))
1576 1576 for chnk in group:
1577 1577 yield chnk
1578 1578
1579 1579 # The list of manifests has been collected by the generator
1580 1580 # calling our functions back.
1581 1581 prune_manifests()
1582 1582 msng_mnfst_lst = msng_mnfst_set.keys()
1583 1583 # Sort the manifestnodes by revision number.
1584 1584 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1585 1585 # Create a generator for the manifestnodes that calls our lookup
1586 1586 # and data collection functions back.
1587 1587 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1588 1588 filenode_collector(changedfiles))
1589 1589 for chnk in group:
1590 1590 yield chnk
1591 1591
1592 1592 # These are no longer needed, dereference and toss the memory for
1593 1593 # them.
1594 1594 msng_mnfst_lst = None
1595 1595 msng_mnfst_set.clear()
1596 1596
1597 1597 changedfiles = changedfiles.keys()
1598 1598 changedfiles.sort()
1599 1599 # Go through all our files in order sorted by name.
1600 1600 for fname in changedfiles:
1601 1601 filerevlog = self.file(fname)
1602 1602 # Toss out the filenodes that the recipient isn't really
1603 1603 # missing.
1604 1604 if msng_filenode_set.has_key(fname):
1605 1605 prune_filenodes(fname, filerevlog)
1606 1606 msng_filenode_lst = msng_filenode_set[fname].keys()
1607 1607 else:
1608 1608 msng_filenode_lst = []
1609 1609 # If any filenodes are left, generate the group for them,
1610 1610 # otherwise don't bother.
1611 1611 if len(msng_filenode_lst) > 0:
1612 1612 yield changegroup.genchunk(fname)
1613 1613 # Sort the filenodes by their revision #
1614 1614 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1615 1615 # Create a group generator and only pass in a changenode
1616 1616 # lookup function as we need to collect no information
1617 1617 # from filenodes.
1618 1618 group = filerevlog.group(msng_filenode_lst,
1619 1619 lookup_filenode_link_func(fname))
1620 1620 for chnk in group:
1621 1621 yield chnk
1622 1622 if msng_filenode_set.has_key(fname):
1623 1623 # Don't need this anymore, toss it to free memory.
1624 1624 del msng_filenode_set[fname]
1625 1625 # Signal that no more groups are left.
1626 1626 yield changegroup.closechunk()
1627 1627
1628 1628 if msng_cl_lst:
1629 1629 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1630 1630
1631 1631 return util.chunkbuffer(gengroup())
1632 1632
1633 1633 def changegroup(self, basenodes, source):
1634 1634 """Generate a changegroup of all nodes that we have that a recipient
1635 1635 doesn't.
1636 1636
1637 1637 This is much easier than the previous function as we can assume that
1638 1638 the recipient has any changenode we aren't sending them."""
1639 1639
1640 1640 self.hook('preoutgoing', throw=True, source=source)
1641 1641
1642 1642 cl = self.changelog
1643 1643 nodes = cl.nodesbetween(basenodes, None)[0]
1644 1644 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1645 1645 self.changegroupinfo(nodes)
1646 1646
1647 1647 def identity(x):
1648 1648 return x
1649 1649
1650 1650 def gennodelst(revlog):
1651 1651 for r in xrange(0, revlog.count()):
1652 1652 n = revlog.node(r)
1653 1653 if revlog.linkrev(n) in revset:
1654 1654 yield n
1655 1655
1656 1656 def changed_file_collector(changedfileset):
1657 1657 def collect_changed_files(clnode):
1658 1658 c = cl.read(clnode)
1659 1659 for fname in c[3]:
1660 1660 changedfileset[fname] = 1
1661 1661 return collect_changed_files
1662 1662
1663 1663 def lookuprevlink_func(revlog):
1664 1664 def lookuprevlink(n):
1665 1665 return cl.node(revlog.linkrev(n))
1666 1666 return lookuprevlink
1667 1667
1668 1668 def gengroup():
1669 1669 # construct a list of all changed files
1670 1670 changedfiles = {}
1671 1671
1672 1672 for chnk in cl.group(nodes, identity,
1673 1673 changed_file_collector(changedfiles)):
1674 1674 yield chnk
1675 1675 changedfiles = changedfiles.keys()
1676 1676 changedfiles.sort()
1677 1677
1678 1678 mnfst = self.manifest
1679 1679 nodeiter = gennodelst(mnfst)
1680 1680 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1681 1681 yield chnk
1682 1682
1683 1683 for fname in changedfiles:
1684 1684 filerevlog = self.file(fname)
1685 1685 nodeiter = gennodelst(filerevlog)
1686 1686 nodeiter = list(nodeiter)
1687 1687 if nodeiter:
1688 1688 yield changegroup.genchunk(fname)
1689 1689 lookup = lookuprevlink_func(filerevlog)
1690 1690 for chnk in filerevlog.group(nodeiter, lookup):
1691 1691 yield chnk
1692 1692
1693 1693 yield changegroup.closechunk()
1694 1694
1695 1695 if nodes:
1696 1696 self.hook('outgoing', node=hex(nodes[0]), source=source)
1697 1697
1698 1698 return util.chunkbuffer(gengroup())
1699 1699
1700 1700 def addchangegroup(self, source, srctype, url):
1701 1701 """add changegroup to repo.
1702 1702
1703 1703 return values:
1704 1704 - nothing changed or no source: 0
1705 1705 - more heads than before: 1+added heads (2..n)
1706 1706 - less heads than before: -1-removed heads (-2..-n)
1707 1707 - number of heads stays the same: 1
1708 1708 """
1709 1709 def csmap(x):
1710 1710 self.ui.debug(_("add changeset %s\n") % short(x))
1711 1711 return cl.count()
1712 1712
1713 1713 def revmap(x):
1714 1714 return cl.rev(x)
1715 1715
1716 1716 if not source:
1717 1717 return 0
1718 1718
1719 1719 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1720 1720
1721 1721 changesets = files = revisions = 0
1722 1722
1723 1723 tr = self.transaction()
1724 1724
1725 1725 # write changelog data to temp files so concurrent readers will not see
1726 1726 # inconsistent view
1727 1727 cl = None
1728 1728 try:
1729 1729 cl = appendfile.appendchangelog(self.sopener,
1730 1730 self.changelog.version)
1731 1731
1732 1732 oldheads = len(cl.heads())
1733 1733
1734 1734 # pull off the changeset group
1735 1735 self.ui.status(_("adding changesets\n"))
1736 1736 cor = cl.count() - 1
1737 1737 chunkiter = changegroup.chunkiter(source)
1738 1738 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1739 1739 raise util.Abort(_("received changelog group is empty"))
1740 1740 cnr = cl.count() - 1
1741 1741 changesets = cnr - cor
1742 1742
1743 1743 # pull off the manifest group
1744 1744 self.ui.status(_("adding manifests\n"))
1745 1745 chunkiter = changegroup.chunkiter(source)
1746 1746 # no need to check for empty manifest group here:
1747 1747 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1748 1748 # no new manifest will be created and the manifest group will
1749 1749 # be empty during the pull
1750 1750 self.manifest.addgroup(chunkiter, revmap, tr)
1751 1751
1752 1752 # process the files
1753 1753 self.ui.status(_("adding file changes\n"))
1754 1754 while 1:
1755 1755 f = changegroup.getchunk(source)
1756 1756 if not f:
1757 1757 break
1758 1758 self.ui.debug(_("adding %s revisions\n") % f)
1759 1759 fl = self.file(f)
1760 1760 o = fl.count()
1761 1761 chunkiter = changegroup.chunkiter(source)
1762 1762 if fl.addgroup(chunkiter, revmap, tr) is None:
1763 1763 raise util.Abort(_("received file revlog group is empty"))
1764 1764 revisions += fl.count() - o
1765 1765 files += 1
1766 1766
1767 1767 cl.writedata()
1768 1768 finally:
1769 1769 if cl:
1770 1770 cl.cleanup()
1771 1771
1772 1772 # make changelog see real files again
1773 1773 self.changelog = changelog.changelog(self.sopener,
1774 1774 self.changelog.version)
1775 1775 self.changelog.checkinlinesize(tr)
1776 1776
1777 1777 newheads = len(self.changelog.heads())
1778 1778 heads = ""
1779 1779 if oldheads and newheads != oldheads:
1780 1780 heads = _(" (%+d heads)") % (newheads - oldheads)
1781 1781
1782 1782 self.ui.status(_("added %d changesets"
1783 1783 " with %d changes to %d files%s\n")
1784 1784 % (changesets, revisions, files, heads))
1785 1785
1786 1786 if changesets > 0:
1787 1787 self.hook('pretxnchangegroup', throw=True,
1788 1788 node=hex(self.changelog.node(cor+1)), source=srctype,
1789 1789 url=url)
1790 1790
1791 1791 tr.close()
1792 1792
1793 1793 if changesets > 0:
1794 1794 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1795 1795 source=srctype, url=url)
1796 1796
1797 1797 for i in xrange(cor + 1, cnr + 1):
1798 1798 self.hook("incoming", node=hex(self.changelog.node(i)),
1799 1799 source=srctype, url=url)
1800 1800
1801 1801 # never return 0 here:
1802 1802 if newheads < oldheads:
1803 1803 return newheads - oldheads - 1
1804 1804 else:
1805 1805 return newheads - oldheads + 1
1806 1806
1807 1807
1808 1808 def stream_in(self, remote):
1809 1809 fp = remote.stream_out()
1810 1810 l = fp.readline()
1811 1811 try:
1812 1812 resp = int(l)
1813 1813 except ValueError:
1814 1814 raise util.UnexpectedOutput(
1815 1815 _('Unexpected response from remote server:'), l)
1816 1816 if resp == 1:
1817 1817 raise util.Abort(_('operation forbidden by server'))
1818 1818 elif resp == 2:
1819 1819 raise util.Abort(_('locking the remote repository failed'))
1820 1820 elif resp != 0:
1821 1821 raise util.Abort(_('the server sent an unknown error code'))
1822 1822 self.ui.status(_('streaming all changes\n'))
1823 1823 l = fp.readline()
1824 1824 try:
1825 1825 total_files, total_bytes = map(int, l.split(' ', 1))
1826 1826 except ValueError, TypeError:
1827 1827 raise util.UnexpectedOutput(
1828 1828 _('Unexpected response from remote server:'), l)
1829 1829 self.ui.status(_('%d files to transfer, %s of data\n') %
1830 1830 (total_files, util.bytecount(total_bytes)))
1831 1831 start = time.time()
1832 1832 for i in xrange(total_files):
1833 1833 # XXX doesn't support '\n' or '\r' in filenames
1834 1834 l = fp.readline()
1835 1835 try:
1836 1836 name, size = l.split('\0', 1)
1837 1837 size = int(size)
1838 1838 except ValueError, TypeError:
1839 1839 raise util.UnexpectedOutput(
1840 1840 _('Unexpected response from remote server:'), l)
1841 1841 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1842 1842 ofp = self.sopener(name, 'w')
1843 1843 for chunk in util.filechunkiter(fp, limit=size):
1844 1844 ofp.write(chunk)
1845 1845 ofp.close()
1846 1846 elapsed = time.time() - start
1847 1847 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1848 1848 (util.bytecount(total_bytes), elapsed,
1849 1849 util.bytecount(total_bytes / elapsed)))
1850 1850 self.reload()
1851 1851 return len(self.heads()) + 1
1852 1852
1853 1853 def clone(self, remote, heads=[], stream=False):
1854 1854 '''clone remote repository.
1855 1855
1856 1856 keyword arguments:
1857 1857 heads: list of revs to clone (forces use of pull)
1858 1858 stream: use streaming clone if possible'''
1859 1859
1860 1860 # now, all clients that can request uncompressed clones can
1861 1861 # read repo formats supported by all servers that can serve
1862 1862 # them.
1863 1863
1864 1864 # if revlog format changes, client will have to check version
1865 1865 # and format flags on "stream" capability, and use
1866 1866 # uncompressed only if compatible.
1867 1867
1868 1868 if stream and not heads and remote.capable('stream'):
1869 1869 return self.stream_in(remote)
1870 1870 return self.pull(remote, heads)
1871 1871
1872 1872 # used to avoid circular references so destructors work
1873 1873 def aftertrans(files):
1874 1874 renamefiles = [tuple(t) for t in files]
1875 1875 def a():
1876 1876 for src, dest in renamefiles:
1877 1877 util.rename(src, dest)
1878 1878 return a
1879 1879
1880 1880 def instance(ui, path, create):
1881 1881 return localrepository(ui, util.drop_scheme('file', path), create)
1882 1882
1883 1883 def islocal(path):
1884 1884 return True
General Comments 0
You need to be logged in to leave comments. Login now