##// END OF EJS Templates
Use length of file instead of length of change for the dirstate entry.
Thomas Arendsen Hein -
r863:a7e95e36 default
parent child Browse files
Show More
@@ -1,2042 +1,2042 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse stat")
14 14 demandload(globals(), "bisect select")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 283 date = date or "%d %d" % (time.time(), time.timezone)
284 284 list.sort()
285 285 l = [hex(manifest), user, date] + list + ["", desc]
286 286 text = "\n".join(l)
287 287 return self.addrevision(text, transaction, self.count(), p1, p2)
288 288
289 289 class dirstate:
290 290 def __init__(self, opener, ui, root):
291 291 self.opener = opener
292 292 self.root = root
293 293 self.dirty = 0
294 294 self.ui = ui
295 295 self.map = None
296 296 self.pl = None
297 297 self.copies = {}
298 298 self.ignorefunc = None
299 299
300 300 def wjoin(self, f):
301 301 return os.path.join(self.root, f)
302 302
303 303 def ignore(self, f):
304 304 if not self.ignorefunc:
305 305 bigpat = []
306 306 try:
307 307 l = file(self.wjoin(".hgignore"))
308 308 for pat in l:
309 309 if pat != "\n":
310 310 p = util.pconvert(pat[:-1])
311 311 try:
312 312 r = re.compile(p)
313 313 except:
314 314 self.ui.warn("ignoring invalid ignore"
315 315 + " regular expression '%s'\n" % p)
316 316 else:
317 317 bigpat.append(util.pconvert(pat[:-1]))
318 318 except IOError: pass
319 319
320 320 if bigpat:
321 321 s = "(?:%s)" % (")|(?:".join(bigpat))
322 322 r = re.compile(s)
323 323 self.ignorefunc = r.search
324 324 else:
325 325 self.ignorefunc = util.never
326 326
327 327 return self.ignorefunc(f)
328 328
329 329 def __del__(self):
330 330 if self.dirty:
331 331 self.write()
332 332
333 333 def __getitem__(self, key):
334 334 try:
335 335 return self.map[key]
336 336 except TypeError:
337 337 self.read()
338 338 return self[key]
339 339
340 340 def __contains__(self, key):
341 341 if not self.map: self.read()
342 342 return key in self.map
343 343
344 344 def parents(self):
345 345 if not self.pl:
346 346 self.read()
347 347 return self.pl
348 348
349 349 def markdirty(self):
350 350 if not self.dirty:
351 351 self.dirty = 1
352 352
353 353 def setparents(self, p1, p2 = nullid):
354 354 self.markdirty()
355 355 self.pl = p1, p2
356 356
357 357 def state(self, key):
358 358 try:
359 359 return self[key][0]
360 360 except KeyError:
361 361 return "?"
362 362
363 363 def read(self):
364 364 if self.map is not None: return self.map
365 365
366 366 self.map = {}
367 367 self.pl = [nullid, nullid]
368 368 try:
369 369 st = self.opener("dirstate").read()
370 370 if not st: return
371 371 except: return
372 372
373 373 self.pl = [st[:20], st[20: 40]]
374 374
375 375 pos = 40
376 376 while pos < len(st):
377 377 e = struct.unpack(">cllll", st[pos:pos+17])
378 378 l = e[4]
379 379 pos += 17
380 380 f = st[pos:pos + l]
381 381 if '\0' in f:
382 382 f, c = f.split('\0')
383 383 self.copies[f] = c
384 384 self.map[f] = e[:4]
385 385 pos += l
386 386
387 387 def copy(self, source, dest):
388 388 self.read()
389 389 self.markdirty()
390 390 self.copies[dest] = source
391 391
392 392 def copied(self, file):
393 393 return self.copies.get(file, None)
394 394
395 395 def update(self, files, state, **kw):
396 396 ''' current states:
397 397 n normal
398 398 m needs merging
399 399 r marked for removal
400 400 a marked for addition'''
401 401
402 402 if not files: return
403 403 self.read()
404 404 self.markdirty()
405 405 for f in files:
406 406 if state == "r":
407 407 self.map[f] = ('r', 0, 0, 0)
408 408 else:
409 409 s = os.stat(os.path.join(self.root, f))
410 410 st_mode = kw.get('st_mode', s.st_mode)
411 411 st_size = kw.get('st_size', s.st_size)
412 412 st_mtime = kw.get('st_mtime', s.st_mtime)
413 413 self.map[f] = (state, st_mode, st_size, st_mtime)
414 414
415 415 def forget(self, files):
416 416 if not files: return
417 417 self.read()
418 418 self.markdirty()
419 419 for f in files:
420 420 try:
421 421 del self.map[f]
422 422 except KeyError:
423 423 self.ui.warn("not in dirstate: %s!\n" % f)
424 424 pass
425 425
426 426 def clear(self):
427 427 self.map = {}
428 428 self.markdirty()
429 429
430 430 def write(self):
431 431 st = self.opener("dirstate", "w")
432 432 st.write("".join(self.pl))
433 433 for f, e in self.map.items():
434 434 c = self.copied(f)
435 435 if c:
436 436 f = f + "\0" + c
437 437 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
438 438 st.write(e + f)
439 439 self.dirty = 0
440 440
441 441 def walk(self, files = None, match = util.always):
442 442 self.read()
443 443 dc = self.map.copy()
444 444 # walk all files by default
445 445 if not files: files = [self.root]
446 446 known = {'.hg': 1}
447 447 def seen(fn):
448 448 if fn in known: return True
449 449 known[fn] = 1
450 450 def traverse():
451 451 for f in util.unique(files):
452 452 f = os.path.join(self.root, f)
453 453 if os.path.isdir(f):
454 454 for dir, subdirs, fl in os.walk(f):
455 455 d = dir[len(self.root) + 1:]
456 456 nd = os.path.normpath(d)
457 457 if seen(nd):
458 458 subdirs[:] = []
459 459 continue
460 460 for sd in subdirs:
461 461 ds = os.path.join(nd, sd +'/')
462 462 if self.ignore(ds) or not match(ds):
463 463 subdirs.remove(sd)
464 464 subdirs.sort()
465 465 fl.sort()
466 466 for fn in fl:
467 467 fn = util.pconvert(os.path.join(d, fn))
468 468 yield 'f', fn
469 469 else:
470 470 yield 'f', f[len(self.root) + 1:]
471 471
472 472 ks = dc.keys()
473 473 ks.sort()
474 474 for k in ks:
475 475 yield 'm', k
476 476
477 477 # yield only files that match: all in dirstate, others only if
478 478 # not in .hgignore
479 479
480 480 for src, fn in util.unique(traverse()):
481 481 fn = os.path.normpath(fn)
482 482 if seen(fn): continue
483 483 if fn in dc:
484 484 del dc[fn]
485 485 elif self.ignore(fn):
486 486 continue
487 487 if match(fn):
488 488 yield src, fn
489 489
490 490 def changes(self, files=None, match=util.always):
491 491 self.read()
492 492 dc = self.map.copy()
493 493 lookup, modified, added, unknown = [], [], [], []
494 494 removed, deleted = [], []
495 495
496 496 for src, fn in self.walk(files, match):
497 497 try:
498 498 s = os.stat(os.path.join(self.root, fn))
499 499 except OSError:
500 500 continue
501 501 if not stat.S_ISREG(s.st_mode):
502 502 continue
503 503 c = dc.get(fn)
504 504 if c:
505 505 del dc[fn]
506 506 if c[0] == 'm':
507 507 modified.append(fn)
508 508 elif c[0] == 'a':
509 509 added.append(fn)
510 510 elif c[0] == 'r':
511 511 unknown.append(fn)
512 512 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
513 513 modified.append(fn)
514 514 elif c[3] != s.st_mtime:
515 515 lookup.append(fn)
516 516 else:
517 517 unknown.append(fn)
518 518
519 519 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
520 520 if c[0] == 'r':
521 521 removed.append(fn)
522 522 else:
523 523 deleted.append(fn)
524 524 return (lookup, modified, added, removed + deleted, unknown)
525 525
526 526 # used to avoid circular references so destructors work
527 527 def opener(base):
528 528 p = base
529 529 def o(path, mode="r"):
530 530 if p.startswith("http://"):
531 531 f = os.path.join(p, urllib.quote(path))
532 532 return httprangereader.httprangereader(f)
533 533
534 534 f = os.path.join(p, path)
535 535
536 536 mode += "b" # for that other OS
537 537
538 538 if mode[0] != "r":
539 539 try:
540 540 s = os.stat(f)
541 541 except OSError:
542 542 d = os.path.dirname(f)
543 543 if not os.path.isdir(d):
544 544 os.makedirs(d)
545 545 else:
546 546 if s.st_nlink > 1:
547 547 file(f + ".tmp", "wb").write(file(f, "rb").read())
548 548 util.rename(f+".tmp", f)
549 549
550 550 return file(f, mode)
551 551
552 552 return o
553 553
554 554 class RepoError(Exception): pass
555 555
556 556 class localrepository:
557 557 def __init__(self, ui, path=None, create=0):
558 558 self.remote = 0
559 559 if path and path.startswith("http://"):
560 560 self.remote = 1
561 561 self.path = path
562 562 else:
563 563 if not path:
564 564 p = os.getcwd()
565 565 while not os.path.isdir(os.path.join(p, ".hg")):
566 566 oldp = p
567 567 p = os.path.dirname(p)
568 568 if p == oldp: raise RepoError("no repo found")
569 569 path = p
570 570 self.path = os.path.join(path, ".hg")
571 571
572 572 if not create and not os.path.isdir(self.path):
573 573 raise RepoError("repository %s not found" % self.path)
574 574
575 575 self.root = path
576 576 self.ui = ui
577 577
578 578 if create:
579 579 os.mkdir(self.path)
580 580 os.mkdir(self.join("data"))
581 581
582 582 self.opener = opener(self.path)
583 583 self.wopener = opener(self.root)
584 584 self.manifest = manifest(self.opener)
585 585 self.changelog = changelog(self.opener)
586 586 self.tagscache = None
587 587 self.nodetagscache = None
588 588
589 589 if not self.remote:
590 590 self.dirstate = dirstate(self.opener, ui, self.root)
591 591 try:
592 592 self.ui.readconfig(self.opener("hgrc"))
593 593 except IOError: pass
594 594
595 595 def hook(self, name, **args):
596 596 s = self.ui.config("hooks", name)
597 597 if s:
598 598 self.ui.note("running hook %s: %s\n" % (name, s))
599 599 old = {}
600 600 for k, v in args.items():
601 601 k = k.upper()
602 602 old[k] = os.environ.get(k, None)
603 603 os.environ[k] = v
604 604
605 605 r = os.system(s)
606 606
607 607 for k, v in old.items():
608 608 if v != None:
609 609 os.environ[k] = v
610 610 else:
611 611 del os.environ[k]
612 612
613 613 if r:
614 614 self.ui.warn("abort: %s hook failed with status %d!\n" %
615 615 (name, r))
616 616 return False
617 617 return True
618 618
619 619 def tags(self):
620 620 '''return a mapping of tag to node'''
621 621 if not self.tagscache:
622 622 self.tagscache = {}
623 623 def addtag(self, k, n):
624 624 try:
625 625 bin_n = bin(n)
626 626 except TypeError:
627 627 bin_n = ''
628 628 self.tagscache[k.strip()] = bin_n
629 629
630 630 try:
631 631 # read each head of the tags file, ending with the tip
632 632 # and add each tag found to the map, with "newer" ones
633 633 # taking precedence
634 634 fl = self.file(".hgtags")
635 635 h = fl.heads()
636 636 h.reverse()
637 637 for r in h:
638 638 for l in fl.revision(r).splitlines():
639 639 if l:
640 640 n, k = l.split(" ", 1)
641 641 addtag(self, k, n)
642 642 except KeyError:
643 643 pass
644 644
645 645 try:
646 646 f = self.opener("localtags")
647 647 for l in f:
648 648 n, k = l.split(" ", 1)
649 649 addtag(self, k, n)
650 650 except IOError:
651 651 pass
652 652
653 653 self.tagscache['tip'] = self.changelog.tip()
654 654
655 655 return self.tagscache
656 656
657 657 def tagslist(self):
658 658 '''return a list of tags ordered by revision'''
659 659 l = []
660 660 for t, n in self.tags().items():
661 661 try:
662 662 r = self.changelog.rev(n)
663 663 except:
664 664 r = -2 # sort to the beginning of the list if unknown
665 665 l.append((r,t,n))
666 666 l.sort()
667 667 return [(t,n) for r,t,n in l]
668 668
669 669 def nodetags(self, node):
670 670 '''return the tags associated with a node'''
671 671 if not self.nodetagscache:
672 672 self.nodetagscache = {}
673 673 for t,n in self.tags().items():
674 674 self.nodetagscache.setdefault(n,[]).append(t)
675 675 return self.nodetagscache.get(node, [])
676 676
677 677 def lookup(self, key):
678 678 try:
679 679 return self.tags()[key]
680 680 except KeyError:
681 681 try:
682 682 return self.changelog.lookup(key)
683 683 except:
684 684 raise RepoError("unknown revision '%s'" % key)
685 685
686 686 def dev(self):
687 687 if self.remote: return -1
688 688 return os.stat(self.path).st_dev
689 689
690 690 def join(self, f):
691 691 return os.path.join(self.path, f)
692 692
693 693 def wjoin(self, f):
694 694 return os.path.join(self.root, f)
695 695
696 696 def file(self, f):
697 697 if f[0] == '/': f = f[1:]
698 698 return filelog(self.opener, f)
699 699
700 700 def getcwd(self):
701 701 cwd = os.getcwd()
702 702 if cwd == self.root: return ''
703 703 return cwd[len(self.root) + 1:]
704 704
705 705 def wfile(self, f, mode='r'):
706 706 return self.wopener(f, mode)
707 707
708 708 def transaction(self):
709 709 # save dirstate for undo
710 710 try:
711 711 ds = self.opener("dirstate").read()
712 712 except IOError:
713 713 ds = ""
714 714 self.opener("journal.dirstate", "w").write(ds)
715 715
716 716 def after():
717 717 util.rename(self.join("journal"), self.join("undo"))
718 718 util.rename(self.join("journal.dirstate"),
719 719 self.join("undo.dirstate"))
720 720
721 721 return transaction.transaction(self.ui.warn, self.opener,
722 722 self.join("journal"), after)
723 723
724 724 def recover(self):
725 725 lock = self.lock()
726 726 if os.path.exists(self.join("journal")):
727 727 self.ui.status("rolling back interrupted transaction\n")
728 728 return transaction.rollback(self.opener, self.join("journal"))
729 729 else:
730 730 self.ui.warn("no interrupted transaction available\n")
731 731
732 732 def undo(self):
733 733 lock = self.lock()
734 734 if os.path.exists(self.join("undo")):
735 735 self.ui.status("rolling back last transaction\n")
736 736 transaction.rollback(self.opener, self.join("undo"))
737 737 self.dirstate = None
738 738 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
739 739 self.dirstate = dirstate(self.opener, self.ui, self.root)
740 740 else:
741 741 self.ui.warn("no undo information available\n")
742 742
743 743 def lock(self, wait = 1):
744 744 try:
745 745 return lock.lock(self.join("lock"), 0)
746 746 except lock.LockHeld, inst:
747 747 if wait:
748 748 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
749 749 return lock.lock(self.join("lock"), wait)
750 750 raise inst
751 751
752 752 def rawcommit(self, files, text, user, date, p1=None, p2=None):
753 753 orig_parent = self.dirstate.parents()[0] or nullid
754 754 p1 = p1 or self.dirstate.parents()[0] or nullid
755 755 p2 = p2 or self.dirstate.parents()[1] or nullid
756 756 c1 = self.changelog.read(p1)
757 757 c2 = self.changelog.read(p2)
758 758 m1 = self.manifest.read(c1[0])
759 759 mf1 = self.manifest.readflags(c1[0])
760 760 m2 = self.manifest.read(c2[0])
761 761
762 762 if orig_parent == p1:
763 763 update_dirstate = 1
764 764 else:
765 765 update_dirstate = 0
766 766
767 767 tr = self.transaction()
768 768 mm = m1.copy()
769 769 mfm = mf1.copy()
770 770 linkrev = self.changelog.count()
771 771 for f in files:
772 772 try:
773 773 t = self.wfile(f).read()
774 774 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
775 775 r = self.file(f)
776 776 mfm[f] = tm
777 777 mm[f] = r.add(t, {}, tr, linkrev,
778 778 m1.get(f, nullid), m2.get(f, nullid))
779 779 if update_dirstate:
780 780 self.dirstate.update([f], "n")
781 781 except IOError:
782 782 try:
783 783 del mm[f]
784 784 del mfm[f]
785 785 if update_dirstate:
786 786 self.dirstate.forget([f])
787 787 except:
788 788 # deleted from p2?
789 789 pass
790 790
791 791 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
792 792 user = user or self.ui.username()
793 793 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
794 794 tr.close()
795 795 if update_dirstate:
796 796 self.dirstate.setparents(n, nullid)
797 797
798 798 def commit(self, files = None, text = "", user = None, date = None,
799 799 match = util.always):
800 800 commit = []
801 801 remove = []
802 802 if files:
803 803 for f in files:
804 804 s = self.dirstate.state(f)
805 805 if s in 'nmai':
806 806 commit.append(f)
807 807 elif s == 'r':
808 808 remove.append(f)
809 809 else:
810 810 self.ui.warn("%s not tracked!\n" % f)
811 811 else:
812 812 (c, a, d, u) = self.changes(match = match)
813 813 commit = c + a
814 814 remove = d
815 815
816 816 if not commit and not remove:
817 817 self.ui.status("nothing changed\n")
818 818 return
819 819
820 820 if not self.hook("precommit"):
821 821 return 1
822 822
823 823 p1, p2 = self.dirstate.parents()
824 824 c1 = self.changelog.read(p1)
825 825 c2 = self.changelog.read(p2)
826 826 m1 = self.manifest.read(c1[0])
827 827 mf1 = self.manifest.readflags(c1[0])
828 828 m2 = self.manifest.read(c2[0])
829 829 lock = self.lock()
830 830 tr = self.transaction()
831 831
832 832 # check in files
833 833 new = {}
834 834 linkrev = self.changelog.count()
835 835 commit.sort()
836 836 for f in commit:
837 837 self.ui.note(f + "\n")
838 838 try:
839 839 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
840 840 t = self.wfile(f).read()
841 841 except IOError:
842 842 self.ui.warn("trouble committing %s!\n" % f)
843 843 raise
844 844
845 845 meta = {}
846 846 cp = self.dirstate.copied(f)
847 847 if cp:
848 848 meta["copy"] = cp
849 849 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
850 850 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
851 851
852 852 r = self.file(f)
853 853 fp1 = m1.get(f, nullid)
854 854 fp2 = m2.get(f, nullid)
855 855 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
856 856
857 857 # update manifest
858 858 m1.update(new)
859 859 for f in remove:
860 860 if f in m1:
861 861 del m1[f]
862 862 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
863 863 (new, remove))
864 864
865 865 # add changeset
866 866 new = new.keys()
867 867 new.sort()
868 868
869 869 if not text:
870 870 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
871 871 edittext += "".join(["HG: changed %s\n" % f for f in new])
872 872 edittext += "".join(["HG: removed %s\n" % f for f in remove])
873 873 edittext = self.ui.edit(edittext)
874 874 if not edittext.rstrip():
875 875 return 1
876 876 text = edittext
877 877
878 878 user = user or self.ui.username()
879 879 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
880 880
881 881 tr.close()
882 882
883 883 self.dirstate.setparents(n)
884 884 self.dirstate.update(new, "n")
885 885 self.dirstate.forget(remove)
886 886
887 887 if not self.hook("commit", node=hex(n)):
888 888 return 1
889 889
890 890 def walk(self, node = None, files = [], match = util.always):
891 891 if node:
892 892 for fn in self.manifest.read(self.changelog.read(node)[0]):
893 893 if match(fn): yield 'm', fn
894 894 else:
895 895 for src, fn in self.dirstate.walk(files, match):
896 896 yield src, fn
897 897
898 898 def changes(self, node1 = None, node2 = None, files = [],
899 899 match = util.always):
900 900 mf2, u = None, []
901 901
902 902 def fcmp(fn, mf):
903 903 t1 = self.wfile(fn).read()
904 904 t2 = self.file(fn).revision(mf[fn])
905 905 return cmp(t1, t2)
906 906
907 907 def mfmatches(node):
908 908 mf = dict(self.manifest.read(node))
909 909 for fn in mf.keys():
910 910 if not match(fn):
911 911 del mf[fn]
912 912 return mf
913 913
914 914 # are we comparing the working directory?
915 915 if not node2:
916 916 l, c, a, d, u = self.dirstate.changes(files, match)
917 917
918 918 # are we comparing working dir against its parent?
919 919 if not node1:
920 920 if l:
921 921 # do a full compare of any files that might have changed
922 922 change = self.changelog.read(self.dirstate.parents()[0])
923 923 mf2 = mfmatches(change[0])
924 924 for f in l:
925 925 if fcmp(f, mf2):
926 926 c.append(f)
927 927
928 928 for l in c, a, d, u:
929 929 l.sort()
930 930
931 931 return (c, a, d, u)
932 932
933 933 # are we comparing working dir against non-tip?
934 934 # generate a pseudo-manifest for the working dir
935 935 if not node2:
936 936 if not mf2:
937 937 change = self.changelog.read(self.dirstate.parents()[0])
938 938 mf2 = mfmatches(change[0])
939 939 for f in a + c + l:
940 940 mf2[f] = ""
941 941 for f in d:
942 942 if f in mf2: del mf2[f]
943 943 else:
944 944 change = self.changelog.read(node2)
945 945 mf2 = mfmatches(change[0])
946 946
947 947 # flush lists from dirstate before comparing manifests
948 948 c, a = [], []
949 949
950 950 change = self.changelog.read(node1)
951 951 mf1 = mfmatches(change[0])
952 952
953 953 for fn in mf2:
954 954 if mf1.has_key(fn):
955 955 if mf1[fn] != mf2[fn]:
956 956 if mf2[fn] != "" or fcmp(fn, mf1):
957 957 c.append(fn)
958 958 del mf1[fn]
959 959 else:
960 960 a.append(fn)
961 961
962 962 d = mf1.keys()
963 963
964 964 for l in c, a, d, u:
965 965 l.sort()
966 966
967 967 return (c, a, d, u)
968 968
969 969 def add(self, list):
970 970 for f in list:
971 971 p = self.wjoin(f)
972 972 if not os.path.exists(p):
973 973 self.ui.warn("%s does not exist!\n" % f)
974 974 elif not os.path.isfile(p):
975 975 self.ui.warn("%s not added: only files supported currently\n" % f)
976 976 elif self.dirstate.state(f) in 'an':
977 977 self.ui.warn("%s already tracked!\n" % f)
978 978 else:
979 979 self.dirstate.update([f], "a")
980 980
981 981 def forget(self, list):
982 982 for f in list:
983 983 if self.dirstate.state(f) not in 'ai':
984 984 self.ui.warn("%s not added!\n" % f)
985 985 else:
986 986 self.dirstate.forget([f])
987 987
988 988 def remove(self, list):
989 989 for f in list:
990 990 p = self.wjoin(f)
991 991 if os.path.exists(p):
992 992 self.ui.warn("%s still exists!\n" % f)
993 993 elif self.dirstate.state(f) == 'a':
994 994 self.ui.warn("%s never committed!\n" % f)
995 995 self.dirstate.forget([f])
996 996 elif f not in self.dirstate:
997 997 self.ui.warn("%s not tracked!\n" % f)
998 998 else:
999 999 self.dirstate.update([f], "r")
1000 1000
1001 1001 def copy(self, source, dest):
1002 1002 p = self.wjoin(dest)
1003 1003 if not os.path.exists(p):
1004 1004 self.ui.warn("%s does not exist!\n" % dest)
1005 1005 elif not os.path.isfile(p):
1006 1006 self.ui.warn("copy failed: %s is not a file\n" % dest)
1007 1007 else:
1008 1008 if self.dirstate.state(dest) == '?':
1009 1009 self.dirstate.update([dest], "a")
1010 1010 self.dirstate.copy(source, dest)
1011 1011
1012 1012 def heads(self):
1013 1013 return self.changelog.heads()
1014 1014
1015 1015 def branches(self, nodes):
1016 1016 if not nodes: nodes = [self.changelog.tip()]
1017 1017 b = []
1018 1018 for n in nodes:
1019 1019 t = n
1020 1020 while n:
1021 1021 p = self.changelog.parents(n)
1022 1022 if p[1] != nullid or p[0] == nullid:
1023 1023 b.append((t, n, p[0], p[1]))
1024 1024 break
1025 1025 n = p[0]
1026 1026 return b
1027 1027
1028 1028 def between(self, pairs):
1029 1029 r = []
1030 1030
1031 1031 for top, bottom in pairs:
1032 1032 n, l, i = top, [], 0
1033 1033 f = 1
1034 1034
1035 1035 while n != bottom:
1036 1036 p = self.changelog.parents(n)[0]
1037 1037 if i == f:
1038 1038 l.append(n)
1039 1039 f = f * 2
1040 1040 n = p
1041 1041 i += 1
1042 1042
1043 1043 r.append(l)
1044 1044
1045 1045 return r
1046 1046
1047 1047 def newer(self, nodes):
1048 1048 m = {}
1049 1049 nl = []
1050 1050 pm = {}
1051 1051 cl = self.changelog
1052 1052 t = l = cl.count()
1053 1053
1054 1054 # find the lowest numbered node
1055 1055 for n in nodes:
1056 1056 l = min(l, cl.rev(n))
1057 1057 m[n] = 1
1058 1058
1059 1059 for i in xrange(l, t):
1060 1060 n = cl.node(i)
1061 1061 if n in m: # explicitly listed
1062 1062 pm[n] = 1
1063 1063 nl.append(n)
1064 1064 continue
1065 1065 for p in cl.parents(n):
1066 1066 if p in pm: # parent listed
1067 1067 pm[n] = 1
1068 1068 nl.append(n)
1069 1069 break
1070 1070
1071 1071 return nl
1072 1072
1073 1073 def findincoming(self, remote, base=None, heads=None):
1074 1074 m = self.changelog.nodemap
1075 1075 search = []
1076 1076 fetch = []
1077 1077 seen = {}
1078 1078 seenbranch = {}
1079 1079 if base == None:
1080 1080 base = {}
1081 1081
1082 1082 # assume we're closer to the tip than the root
1083 1083 # and start by examining the heads
1084 1084 self.ui.status("searching for changes\n")
1085 1085
1086 1086 if not heads:
1087 1087 heads = remote.heads()
1088 1088
1089 1089 unknown = []
1090 1090 for h in heads:
1091 1091 if h not in m:
1092 1092 unknown.append(h)
1093 1093 else:
1094 1094 base[h] = 1
1095 1095
1096 1096 if not unknown:
1097 1097 return None
1098 1098
1099 1099 rep = {}
1100 1100 reqcnt = 0
1101 1101
1102 1102 # search through remote branches
1103 1103 # a 'branch' here is a linear segment of history, with four parts:
1104 1104 # head, root, first parent, second parent
1105 1105 # (a branch always has two parents (or none) by definition)
1106 1106 unknown = remote.branches(unknown)
1107 1107 while unknown:
1108 1108 r = []
1109 1109 while unknown:
1110 1110 n = unknown.pop(0)
1111 1111 if n[0] in seen:
1112 1112 continue
1113 1113
1114 1114 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1115 1115 if n[0] == nullid:
1116 1116 break
1117 1117 if n in seenbranch:
1118 1118 self.ui.debug("branch already found\n")
1119 1119 continue
1120 1120 if n[1] and n[1] in m: # do we know the base?
1121 1121 self.ui.debug("found incomplete branch %s:%s\n"
1122 1122 % (short(n[0]), short(n[1])))
1123 1123 search.append(n) # schedule branch range for scanning
1124 1124 seenbranch[n] = 1
1125 1125 else:
1126 1126 if n[1] not in seen and n[1] not in fetch:
1127 1127 if n[2] in m and n[3] in m:
1128 1128 self.ui.debug("found new changeset %s\n" %
1129 1129 short(n[1]))
1130 1130 fetch.append(n[1]) # earliest unknown
1131 1131 base[n[2]] = 1 # latest known
1132 1132 continue
1133 1133
1134 1134 for a in n[2:4]:
1135 1135 if a not in rep:
1136 1136 r.append(a)
1137 1137 rep[a] = 1
1138 1138
1139 1139 seen[n[0]] = 1
1140 1140
1141 1141 if r:
1142 1142 reqcnt += 1
1143 1143 self.ui.debug("request %d: %s\n" %
1144 1144 (reqcnt, " ".join(map(short, r))))
1145 1145 for p in range(0, len(r), 10):
1146 1146 for b in remote.branches(r[p:p+10]):
1147 1147 self.ui.debug("received %s:%s\n" %
1148 1148 (short(b[0]), short(b[1])))
1149 1149 if b[0] not in m and b[0] not in seen:
1150 1150 unknown.append(b)
1151 1151
1152 1152 # do binary search on the branches we found
1153 1153 while search:
1154 1154 n = search.pop(0)
1155 1155 reqcnt += 1
1156 1156 l = remote.between([(n[0], n[1])])[0]
1157 1157 l.append(n[1])
1158 1158 p = n[0]
1159 1159 f = 1
1160 1160 for i in l:
1161 1161 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1162 1162 if i in m:
1163 1163 if f <= 2:
1164 1164 self.ui.debug("found new branch changeset %s\n" %
1165 1165 short(p))
1166 1166 fetch.append(p)
1167 1167 base[i] = 1
1168 1168 else:
1169 1169 self.ui.debug("narrowed branch search to %s:%s\n"
1170 1170 % (short(p), short(i)))
1171 1171 search.append((p, i))
1172 1172 break
1173 1173 p, f = i, f * 2
1174 1174
1175 1175 # sanity check our fetch list
1176 1176 for f in fetch:
1177 1177 if f in m:
1178 1178 raise RepoError("already have changeset " + short(f[:4]))
1179 1179
1180 1180 if base.keys() == [nullid]:
1181 1181 self.ui.warn("warning: pulling from an unrelated repository!\n")
1182 1182
1183 1183 self.ui.note("adding new changesets starting at " +
1184 1184 " ".join([short(f) for f in fetch]) + "\n")
1185 1185
1186 1186 self.ui.debug("%d total queries\n" % reqcnt)
1187 1187
1188 1188 return fetch
1189 1189
1190 1190 def findoutgoing(self, remote, base=None, heads=None):
1191 1191 if base == None:
1192 1192 base = {}
1193 1193 self.findincoming(remote, base, heads)
1194 1194
1195 1195 remain = dict.fromkeys(self.changelog.nodemap)
1196 1196
1197 1197 # prune everything remote has from the tree
1198 1198 del remain[nullid]
1199 1199 remove = base.keys()
1200 1200 while remove:
1201 1201 n = remove.pop(0)
1202 1202 if n in remain:
1203 1203 del remain[n]
1204 1204 for p in self.changelog.parents(n):
1205 1205 remove.append(p)
1206 1206
1207 1207 # find every node whose parents have been pruned
1208 1208 subset = []
1209 1209 for n in remain:
1210 1210 p1, p2 = self.changelog.parents(n)
1211 1211 if p1 not in remain and p2 not in remain:
1212 1212 subset.append(n)
1213 1213
1214 1214 # this is the set of all roots we have to push
1215 1215 return subset
1216 1216
1217 1217 def pull(self, remote):
1218 1218 lock = self.lock()
1219 1219
1220 1220 # if we have an empty repo, fetch everything
1221 1221 if self.changelog.tip() == nullid:
1222 1222 self.ui.status("requesting all changes\n")
1223 1223 fetch = [nullid]
1224 1224 else:
1225 1225 fetch = self.findincoming(remote)
1226 1226
1227 1227 if not fetch:
1228 1228 self.ui.status("no changes found\n")
1229 1229 return 1
1230 1230
1231 1231 cg = remote.changegroup(fetch)
1232 1232 return self.addchangegroup(cg)
1233 1233
1234 1234 def push(self, remote, force=False):
1235 1235 lock = remote.lock()
1236 1236
1237 1237 base = {}
1238 1238 heads = remote.heads()
1239 1239 inc = self.findincoming(remote, base, heads)
1240 1240 if not force and inc:
1241 1241 self.ui.warn("abort: unsynced remote changes!\n")
1242 1242 self.ui.status("(did you forget to sync? use push -f to force)\n")
1243 1243 return 1
1244 1244
1245 1245 update = self.findoutgoing(remote, base)
1246 1246 if not update:
1247 1247 self.ui.status("no changes found\n")
1248 1248 return 1
1249 1249 elif not force:
1250 1250 if len(heads) < len(self.changelog.heads()):
1251 1251 self.ui.warn("abort: push creates new remote branches!\n")
1252 1252 self.ui.status("(did you forget to merge?" +
1253 1253 " use push -f to force)\n")
1254 1254 return 1
1255 1255
1256 1256 cg = self.changegroup(update)
1257 1257 return remote.addchangegroup(cg)
1258 1258
1259 1259 def changegroup(self, basenodes):
1260 1260 class genread:
1261 1261 def __init__(self, generator):
1262 1262 self.g = generator
1263 1263 self.buf = ""
1264 1264 def read(self, l):
1265 1265 while l > len(self.buf):
1266 1266 try:
1267 1267 self.buf += self.g.next()
1268 1268 except StopIteration:
1269 1269 break
1270 1270 d, self.buf = self.buf[:l], self.buf[l:]
1271 1271 return d
1272 1272
1273 1273 def gengroup():
1274 1274 nodes = self.newer(basenodes)
1275 1275
1276 1276 # construct the link map
1277 1277 linkmap = {}
1278 1278 for n in nodes:
1279 1279 linkmap[self.changelog.rev(n)] = n
1280 1280
1281 1281 # construct a list of all changed files
1282 1282 changed = {}
1283 1283 for n in nodes:
1284 1284 c = self.changelog.read(n)
1285 1285 for f in c[3]:
1286 1286 changed[f] = 1
1287 1287 changed = changed.keys()
1288 1288 changed.sort()
1289 1289
1290 1290 # the changegroup is changesets + manifests + all file revs
1291 1291 revs = [ self.changelog.rev(n) for n in nodes ]
1292 1292
1293 1293 for y in self.changelog.group(linkmap): yield y
1294 1294 for y in self.manifest.group(linkmap): yield y
1295 1295 for f in changed:
1296 1296 yield struct.pack(">l", len(f) + 4) + f
1297 1297 g = self.file(f).group(linkmap)
1298 1298 for y in g:
1299 1299 yield y
1300 1300
1301 1301 yield struct.pack(">l", 0)
1302 1302
1303 1303 return genread(gengroup())
1304 1304
1305 1305 def addchangegroup(self, source):
1306 1306
1307 1307 def getchunk():
1308 1308 d = source.read(4)
1309 1309 if not d: return ""
1310 1310 l = struct.unpack(">l", d)[0]
1311 1311 if l <= 4: return ""
1312 1312 return source.read(l - 4)
1313 1313
1314 1314 def getgroup():
1315 1315 while 1:
1316 1316 c = getchunk()
1317 1317 if not c: break
1318 1318 yield c
1319 1319
1320 1320 def csmap(x):
1321 1321 self.ui.debug("add changeset %s\n" % short(x))
1322 1322 return self.changelog.count()
1323 1323
1324 1324 def revmap(x):
1325 1325 return self.changelog.rev(x)
1326 1326
1327 1327 if not source: return
1328 1328 changesets = files = revisions = 0
1329 1329
1330 1330 tr = self.transaction()
1331 1331
1332 1332 # pull off the changeset group
1333 1333 self.ui.status("adding changesets\n")
1334 1334 co = self.changelog.tip()
1335 1335 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1336 1336 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1337 1337
1338 1338 # pull off the manifest group
1339 1339 self.ui.status("adding manifests\n")
1340 1340 mm = self.manifest.tip()
1341 1341 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1342 1342
1343 1343 # process the files
1344 1344 self.ui.status("adding file changes\n")
1345 1345 while 1:
1346 1346 f = getchunk()
1347 1347 if not f: break
1348 1348 self.ui.debug("adding %s revisions\n" % f)
1349 1349 fl = self.file(f)
1350 1350 o = fl.count()
1351 1351 n = fl.addgroup(getgroup(), revmap, tr)
1352 1352 revisions += fl.count() - o
1353 1353 files += 1
1354 1354
1355 1355 self.ui.status(("added %d changesets" +
1356 1356 " with %d changes to %d files\n")
1357 1357 % (changesets, revisions, files))
1358 1358
1359 1359 tr.close()
1360 1360
1361 1361 if not self.hook("changegroup"):
1362 1362 return 1
1363 1363
1364 1364 return
1365 1365
1366 1366 def update(self, node, allow=False, force=False, choose=None,
1367 1367 moddirstate=True):
1368 1368 pl = self.dirstate.parents()
1369 1369 if not force and pl[1] != nullid:
1370 1370 self.ui.warn("aborting: outstanding uncommitted merges\n")
1371 1371 return 1
1372 1372
1373 1373 p1, p2 = pl[0], node
1374 1374 pa = self.changelog.ancestor(p1, p2)
1375 1375 m1n = self.changelog.read(p1)[0]
1376 1376 m2n = self.changelog.read(p2)[0]
1377 1377 man = self.manifest.ancestor(m1n, m2n)
1378 1378 m1 = self.manifest.read(m1n)
1379 1379 mf1 = self.manifest.readflags(m1n)
1380 1380 m2 = self.manifest.read(m2n)
1381 1381 mf2 = self.manifest.readflags(m2n)
1382 1382 ma = self.manifest.read(man)
1383 1383 mfa = self.manifest.readflags(man)
1384 1384
1385 1385 (c, a, d, u) = self.changes()
1386 1386
1387 1387 # is this a jump, or a merge? i.e. is there a linear path
1388 1388 # from p1 to p2?
1389 1389 linear_path = (pa == p1 or pa == p2)
1390 1390
1391 1391 # resolve the manifest to determine which files
1392 1392 # we care about merging
1393 1393 self.ui.note("resolving manifests\n")
1394 1394 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1395 1395 (force, allow, moddirstate, linear_path))
1396 1396 self.ui.debug(" ancestor %s local %s remote %s\n" %
1397 1397 (short(man), short(m1n), short(m2n)))
1398 1398
1399 1399 merge = {}
1400 1400 get = {}
1401 1401 remove = []
1402 1402 mark = {}
1403 1403
1404 1404 # construct a working dir manifest
1405 1405 mw = m1.copy()
1406 1406 mfw = mf1.copy()
1407 1407 umap = dict.fromkeys(u)
1408 1408
1409 1409 for f in a + c + u:
1410 1410 mw[f] = ""
1411 1411 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1412 1412
1413 1413 for f in d:
1414 1414 if f in mw: del mw[f]
1415 1415
1416 1416 # If we're jumping between revisions (as opposed to merging),
1417 1417 # and if neither the working directory nor the target rev has
1418 1418 # the file, then we need to remove it from the dirstate, to
1419 1419 # prevent the dirstate from listing the file when it is no
1420 1420 # longer in the manifest.
1421 1421 if moddirstate and linear_path and f not in m2:
1422 1422 self.dirstate.forget((f,))
1423 1423
1424 1424 # Compare manifests
1425 1425 for f, n in mw.iteritems():
1426 1426 if choose and not choose(f): continue
1427 1427 if f in m2:
1428 1428 s = 0
1429 1429
1430 1430 # is the wfile new since m1, and match m2?
1431 1431 if f not in m1:
1432 1432 t1 = self.wfile(f).read()
1433 1433 t2 = self.file(f).revision(m2[f])
1434 1434 if cmp(t1, t2) == 0:
1435 1435 mark[f] = 1
1436 1436 n = m2[f]
1437 1437 del t1, t2
1438 1438
1439 1439 # are files different?
1440 1440 if n != m2[f]:
1441 1441 a = ma.get(f, nullid)
1442 1442 # are both different from the ancestor?
1443 1443 if n != a and m2[f] != a:
1444 1444 self.ui.debug(" %s versions differ, resolve\n" % f)
1445 1445 # merge executable bits
1446 1446 # "if we changed or they changed, change in merge"
1447 1447 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1448 1448 mode = ((a^b) | (a^c)) ^ a
1449 1449 merge[f] = (m1.get(f, nullid), m2[f], mode)
1450 1450 s = 1
1451 1451 # are we clobbering?
1452 1452 # is remote's version newer?
1453 1453 # or are we going back in time?
1454 1454 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1455 1455 self.ui.debug(" remote %s is newer, get\n" % f)
1456 1456 get[f] = m2[f]
1457 1457 s = 1
1458 1458 else:
1459 1459 mark[f] = 1
1460 1460 elif f in umap:
1461 1461 # this unknown file is the same as the checkout
1462 1462 get[f] = m2[f]
1463 1463
1464 1464 if not s and mfw[f] != mf2[f]:
1465 1465 if force:
1466 1466 self.ui.debug(" updating permissions for %s\n" % f)
1467 1467 util.set_exec(self.wjoin(f), mf2[f])
1468 1468 else:
1469 1469 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1470 1470 mode = ((a^b) | (a^c)) ^ a
1471 1471 if mode != b:
1472 1472 self.ui.debug(" updating permissions for %s\n" % f)
1473 1473 util.set_exec(self.wjoin(f), mode)
1474 1474 mark[f] = 1
1475 1475 del m2[f]
1476 1476 elif f in ma:
1477 1477 if n != ma[f]:
1478 1478 r = "d"
1479 1479 if not force and (linear_path or allow):
1480 1480 r = self.ui.prompt(
1481 1481 (" local changed %s which remote deleted\n" % f) +
1482 1482 "(k)eep or (d)elete?", "[kd]", "k")
1483 1483 if r == "d":
1484 1484 remove.append(f)
1485 1485 else:
1486 1486 self.ui.debug("other deleted %s\n" % f)
1487 1487 remove.append(f) # other deleted it
1488 1488 else:
1489 1489 if n == m1.get(f, nullid): # same as parent
1490 1490 if p2 == pa: # going backwards?
1491 1491 self.ui.debug("remote deleted %s\n" % f)
1492 1492 remove.append(f)
1493 1493 else:
1494 1494 self.ui.debug("local created %s, keeping\n" % f)
1495 1495 else:
1496 1496 self.ui.debug("working dir created %s, keeping\n" % f)
1497 1497
1498 1498 for f, n in m2.iteritems():
1499 1499 if choose and not choose(f): continue
1500 1500 if f[0] == "/": continue
1501 1501 if f in ma and n != ma[f]:
1502 1502 r = "k"
1503 1503 if not force and (linear_path or allow):
1504 1504 r = self.ui.prompt(
1505 1505 ("remote changed %s which local deleted\n" % f) +
1506 1506 "(k)eep or (d)elete?", "[kd]", "k")
1507 1507 if r == "k": get[f] = n
1508 1508 elif f not in ma:
1509 1509 self.ui.debug("remote created %s\n" % f)
1510 1510 get[f] = n
1511 1511 else:
1512 1512 if force or p2 == pa: # going backwards?
1513 1513 self.ui.debug("local deleted %s, recreating\n" % f)
1514 1514 get[f] = n
1515 1515 else:
1516 1516 self.ui.debug("local deleted %s\n" % f)
1517 1517
1518 1518 del mw, m1, m2, ma
1519 1519
1520 1520 if force:
1521 1521 for f in merge:
1522 1522 get[f] = merge[f][1]
1523 1523 merge = {}
1524 1524
1525 1525 if linear_path or force:
1526 1526 # we don't need to do any magic, just jump to the new rev
1527 1527 mode = 'n'
1528 1528 p1, p2 = p2, nullid
1529 1529 else:
1530 1530 if not allow:
1531 1531 self.ui.status("this update spans a branch" +
1532 1532 " affecting the following files:\n")
1533 1533 fl = merge.keys() + get.keys()
1534 1534 fl.sort()
1535 1535 for f in fl:
1536 1536 cf = ""
1537 1537 if f in merge: cf = " (resolve)"
1538 1538 self.ui.status(" %s%s\n" % (f, cf))
1539 1539 self.ui.warn("aborting update spanning branches!\n")
1540 1540 self.ui.status("(use update -m to merge across branches" +
1541 1541 " or -C to lose changes)\n")
1542 1542 return 1
1543 1543 # we have to remember what files we needed to get/change
1544 1544 # because any file that's different from either one of its
1545 1545 # parents must be in the changeset
1546 1546 mode = 'm'
1547 1547 if moddirstate:
1548 1548 self.dirstate.update(mark.keys(), "m")
1549 1549
1550 1550 if moddirstate:
1551 1551 self.dirstate.setparents(p1, p2)
1552 1552
1553 1553 # get the files we don't need to change
1554 1554 files = get.keys()
1555 1555 files.sort()
1556 1556 for f in files:
1557 1557 if f[0] == "/": continue
1558 1558 self.ui.note("getting %s\n" % f)
1559 1559 t = self.file(f).read(get[f])
1560 1560 try:
1561 1561 self.wfile(f, "w").write(t)
1562 1562 except IOError:
1563 1563 os.makedirs(os.path.dirname(self.wjoin(f)))
1564 1564 self.wfile(f, "w").write(t)
1565 1565 util.set_exec(self.wjoin(f), mf2[f])
1566 1566 if moddirstate:
1567 1567 self.dirstate.update([f], mode)
1568 1568
1569 1569 # merge the tricky bits
1570 1570 files = merge.keys()
1571 1571 files.sort()
1572 1572 for f in files:
1573 1573 self.ui.status("merging %s\n" % f)
1574 1574 m, o, flag = merge[f]
1575 1575 self.merge3(f, m, o)
1576 1576 util.set_exec(self.wjoin(f), flag)
1577 1577 if moddirstate:
1578 1578 if mode == 'm':
1579 1579 # only update dirstate on branch merge, otherwise we
1580 1580 # could mark files with changes as unchanged
1581 1581 self.dirstate.update([f], mode)
1582 1582 elif p2 == nullid:
1583 1583 # update dirstate from parent1's manifest
1584 1584 m1n = self.changelog.read(p1)[0]
1585 1585 m1 = self.manifest.read(m1n)
1586 1586 file_ = self.file(f)
1587 f_len = file_.length(file_.rev(m1[f]))
1587 f_len = len(file_.read(m1[f]))
1588 1588 self.dirstate.update([f], mode, st_size=f_len, st_mtime=0)
1589 1589 else:
1590 1590 self.ui.warn("Second parent without branch merge!?\n"
1591 1591 "Dirstate for file %s may be wrong.\n" % f)
1592 1592
1593 1593 remove.sort()
1594 1594 for f in remove:
1595 1595 self.ui.note("removing %s\n" % f)
1596 1596 try:
1597 1597 os.unlink(f)
1598 1598 except OSError, inst:
1599 1599 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1600 1600 # try removing directories that might now be empty
1601 1601 try: os.removedirs(os.path.dirname(f))
1602 1602 except: pass
1603 1603 if moddirstate:
1604 1604 if mode == 'n':
1605 1605 self.dirstate.forget(remove)
1606 1606 else:
1607 1607 self.dirstate.update(remove, 'r')
1608 1608
1609 1609 def merge3(self, fn, my, other):
1610 1610 """perform a 3-way merge in the working directory"""
1611 1611
1612 1612 def temp(prefix, node):
1613 1613 pre = "%s~%s." % (os.path.basename(fn), prefix)
1614 1614 (fd, name) = tempfile.mkstemp("", pre)
1615 1615 f = os.fdopen(fd, "wb")
1616 1616 f.write(fl.revision(node))
1617 1617 f.close()
1618 1618 return name
1619 1619
1620 1620 fl = self.file(fn)
1621 1621 base = fl.ancestor(my, other)
1622 1622 a = self.wjoin(fn)
1623 1623 b = temp("base", base)
1624 1624 c = temp("other", other)
1625 1625
1626 1626 self.ui.note("resolving %s\n" % fn)
1627 1627 self.ui.debug("file %s: other %s ancestor %s\n" %
1628 1628 (fn, short(other), short(base)))
1629 1629
1630 1630 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1631 1631 or "hgmerge")
1632 1632 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1633 1633 if r:
1634 1634 self.ui.warn("merging %s failed!\n" % fn)
1635 1635
1636 1636 os.unlink(b)
1637 1637 os.unlink(c)
1638 1638
1639 1639 def verify(self):
1640 1640 filelinkrevs = {}
1641 1641 filenodes = {}
1642 1642 changesets = revisions = files = 0
1643 1643 errors = 0
1644 1644
1645 1645 seen = {}
1646 1646 self.ui.status("checking changesets\n")
1647 1647 for i in range(self.changelog.count()):
1648 1648 changesets += 1
1649 1649 n = self.changelog.node(i)
1650 1650 if n in seen:
1651 1651 self.ui.warn("duplicate changeset at revision %d\n" % i)
1652 1652 errors += 1
1653 1653 seen[n] = 1
1654 1654
1655 1655 for p in self.changelog.parents(n):
1656 1656 if p not in self.changelog.nodemap:
1657 1657 self.ui.warn("changeset %s has unknown parent %s\n" %
1658 1658 (short(n), short(p)))
1659 1659 errors += 1
1660 1660 try:
1661 1661 changes = self.changelog.read(n)
1662 1662 except Exception, inst:
1663 1663 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1664 1664 errors += 1
1665 1665
1666 1666 for f in changes[3]:
1667 1667 filelinkrevs.setdefault(f, []).append(i)
1668 1668
1669 1669 seen = {}
1670 1670 self.ui.status("checking manifests\n")
1671 1671 for i in range(self.manifest.count()):
1672 1672 n = self.manifest.node(i)
1673 1673 if n in seen:
1674 1674 self.ui.warn("duplicate manifest at revision %d\n" % i)
1675 1675 errors += 1
1676 1676 seen[n] = 1
1677 1677
1678 1678 for p in self.manifest.parents(n):
1679 1679 if p not in self.manifest.nodemap:
1680 1680 self.ui.warn("manifest %s has unknown parent %s\n" %
1681 1681 (short(n), short(p)))
1682 1682 errors += 1
1683 1683
1684 1684 try:
1685 1685 delta = mdiff.patchtext(self.manifest.delta(n))
1686 1686 except KeyboardInterrupt:
1687 1687 self.ui.warn("aborted")
1688 1688 sys.exit(0)
1689 1689 except Exception, inst:
1690 1690 self.ui.warn("unpacking manifest %s: %s\n"
1691 1691 % (short(n), inst))
1692 1692 errors += 1
1693 1693
1694 1694 ff = [ l.split('\0') for l in delta.splitlines() ]
1695 1695 for f, fn in ff:
1696 1696 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1697 1697
1698 1698 self.ui.status("crosschecking files in changesets and manifests\n")
1699 1699 for f in filenodes:
1700 1700 if f not in filelinkrevs:
1701 1701 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1702 1702 errors += 1
1703 1703
1704 1704 for f in filelinkrevs:
1705 1705 if f not in filenodes:
1706 1706 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1707 1707 errors += 1
1708 1708
1709 1709 self.ui.status("checking files\n")
1710 1710 ff = filenodes.keys()
1711 1711 ff.sort()
1712 1712 for f in ff:
1713 1713 if f == "/dev/null": continue
1714 1714 files += 1
1715 1715 fl = self.file(f)
1716 1716 nodes = { nullid: 1 }
1717 1717 seen = {}
1718 1718 for i in range(fl.count()):
1719 1719 revisions += 1
1720 1720 n = fl.node(i)
1721 1721
1722 1722 if n in seen:
1723 1723 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1724 1724 errors += 1
1725 1725
1726 1726 if n not in filenodes[f]:
1727 1727 self.ui.warn("%s: %d:%s not in manifests\n"
1728 1728 % (f, i, short(n)))
1729 1729 errors += 1
1730 1730 else:
1731 1731 del filenodes[f][n]
1732 1732
1733 1733 flr = fl.linkrev(n)
1734 1734 if flr not in filelinkrevs[f]:
1735 1735 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1736 1736 % (f, short(n), fl.linkrev(n)))
1737 1737 errors += 1
1738 1738 else:
1739 1739 filelinkrevs[f].remove(flr)
1740 1740
1741 1741 # verify contents
1742 1742 try:
1743 1743 t = fl.read(n)
1744 1744 except Exception, inst:
1745 1745 self.ui.warn("unpacking file %s %s: %s\n"
1746 1746 % (f, short(n), inst))
1747 1747 errors += 1
1748 1748
1749 1749 # verify parents
1750 1750 (p1, p2) = fl.parents(n)
1751 1751 if p1 not in nodes:
1752 1752 self.ui.warn("file %s:%s unknown parent 1 %s" %
1753 1753 (f, short(n), short(p1)))
1754 1754 errors += 1
1755 1755 if p2 not in nodes:
1756 1756 self.ui.warn("file %s:%s unknown parent 2 %s" %
1757 1757 (f, short(n), short(p1)))
1758 1758 errors += 1
1759 1759 nodes[n] = 1
1760 1760
1761 1761 # cross-check
1762 1762 for node in filenodes[f]:
1763 1763 self.ui.warn("node %s in manifests not in %s\n"
1764 1764 % (hex(node), f))
1765 1765 errors += 1
1766 1766
1767 1767 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1768 1768 (files, changesets, revisions))
1769 1769
1770 1770 if errors:
1771 1771 self.ui.warn("%d integrity errors encountered!\n" % errors)
1772 1772 return 1
1773 1773
1774 1774 class httprepository:
1775 1775 def __init__(self, ui, path):
1776 1776 # fix missing / after hostname
1777 1777 s = urlparse.urlsplit(path)
1778 1778 partial = s[2]
1779 1779 if not partial: partial = "/"
1780 1780 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1781 1781 self.ui = ui
1782 1782 no_list = [ "localhost", "127.0.0.1" ]
1783 1783 host = ui.config("http_proxy", "host")
1784 1784 if host is None:
1785 1785 host = os.environ.get("http_proxy")
1786 1786 if host and host.startswith('http://'):
1787 1787 host = host[7:]
1788 1788 user = ui.config("http_proxy", "user")
1789 1789 passwd = ui.config("http_proxy", "passwd")
1790 1790 no = ui.config("http_proxy", "no")
1791 1791 if no is None:
1792 1792 no = os.environ.get("no_proxy")
1793 1793 if no:
1794 1794 no_list = no_list + no.split(",")
1795 1795
1796 1796 no_proxy = 0
1797 1797 for h in no_list:
1798 1798 if (path.startswith("http://" + h + "/") or
1799 1799 path.startswith("http://" + h + ":") or
1800 1800 path == "http://" + h):
1801 1801 no_proxy = 1
1802 1802
1803 1803 # Note: urllib2 takes proxy values from the environment and those will
1804 1804 # take precedence
1805 1805 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1806 1806 if os.environ.has_key(env):
1807 1807 del os.environ[env]
1808 1808
1809 1809 proxy_handler = urllib2.BaseHandler()
1810 1810 if host and not no_proxy:
1811 1811 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1812 1812
1813 1813 authinfo = None
1814 1814 if user and passwd:
1815 1815 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1816 1816 passmgr.add_password(None, host, user, passwd)
1817 1817 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1818 1818
1819 1819 opener = urllib2.build_opener(proxy_handler, authinfo)
1820 1820 urllib2.install_opener(opener)
1821 1821
1822 1822 def dev(self):
1823 1823 return -1
1824 1824
1825 1825 def do_cmd(self, cmd, **args):
1826 1826 self.ui.debug("sending %s command\n" % cmd)
1827 1827 q = {"cmd": cmd}
1828 1828 q.update(args)
1829 1829 qs = urllib.urlencode(q)
1830 1830 cu = "%s?%s" % (self.url, qs)
1831 1831 resp = urllib2.urlopen(cu)
1832 1832 proto = resp.headers['content-type']
1833 1833
1834 1834 # accept old "text/plain" and "application/hg-changegroup" for now
1835 1835 if not proto.startswith('application/mercurial') and \
1836 1836 not proto.startswith('text/plain') and \
1837 1837 not proto.startswith('application/hg-changegroup'):
1838 1838 raise RepoError("'%s' does not appear to be an hg repository"
1839 1839 % self.url)
1840 1840
1841 1841 if proto.startswith('application/mercurial'):
1842 1842 version = proto[22:]
1843 1843 if float(version) > 0.1:
1844 1844 raise RepoError("'%s' uses newer protocol %s" %
1845 1845 (self.url, version))
1846 1846
1847 1847 return resp
1848 1848
1849 1849 def heads(self):
1850 1850 d = self.do_cmd("heads").read()
1851 1851 try:
1852 1852 return map(bin, d[:-1].split(" "))
1853 1853 except:
1854 1854 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1855 1855 raise
1856 1856
1857 1857 def branches(self, nodes):
1858 1858 n = " ".join(map(hex, nodes))
1859 1859 d = self.do_cmd("branches", nodes=n).read()
1860 1860 try:
1861 1861 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1862 1862 return br
1863 1863 except:
1864 1864 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1865 1865 raise
1866 1866
1867 1867 def between(self, pairs):
1868 1868 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
1869 1869 d = self.do_cmd("between", pairs=n).read()
1870 1870 try:
1871 1871 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
1872 1872 return p
1873 1873 except:
1874 1874 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
1875 1875 raise
1876 1876
1877 1877 def changegroup(self, nodes):
1878 1878 n = " ".join(map(hex, nodes))
1879 1879 f = self.do_cmd("changegroup", roots=n)
1880 1880 bytes = 0
1881 1881
1882 1882 class zread:
1883 1883 def __init__(self, f):
1884 1884 self.zd = zlib.decompressobj()
1885 1885 self.f = f
1886 1886 self.buf = ""
1887 1887 def read(self, l):
1888 1888 while l > len(self.buf):
1889 1889 r = self.f.read(4096)
1890 1890 if r:
1891 1891 self.buf += self.zd.decompress(r)
1892 1892 else:
1893 1893 self.buf += self.zd.flush()
1894 1894 break
1895 1895 d, self.buf = self.buf[:l], self.buf[l:]
1896 1896 return d
1897 1897
1898 1898 return zread(f)
1899 1899
1900 1900 class remotelock:
1901 1901 def __init__(self, repo):
1902 1902 self.repo = repo
1903 1903 def release(self):
1904 1904 self.repo.unlock()
1905 1905 self.repo = None
1906 1906 def __del__(self):
1907 1907 if self.repo:
1908 1908 self.release()
1909 1909
1910 1910 class sshrepository:
1911 1911 def __init__(self, ui, path):
1912 1912 self.url = path
1913 1913 self.ui = ui
1914 1914
1915 1915 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
1916 1916 if not m:
1917 1917 raise RepoError("couldn't parse destination %s" % path)
1918 1918
1919 1919 self.user = m.group(2)
1920 1920 self.host = m.group(3)
1921 1921 self.port = m.group(5)
1922 1922 self.path = m.group(7)
1923 1923
1924 1924 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
1925 1925 args = self.port and ("%s -p %s") % (args, self.port) or args
1926 1926 path = self.path or ""
1927 1927
1928 1928 if not path:
1929 1929 raise RepoError("no remote repository path specified")
1930 1930
1931 1931 cmd = "ssh %s 'hg -R %s serve --stdio'"
1932 1932 cmd = cmd % (args, path)
1933 1933
1934 1934 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
1935 1935
1936 1936 def readerr(self):
1937 1937 while 1:
1938 1938 r,w,x = select.select([self.pipee], [], [], 0)
1939 1939 if not r: break
1940 1940 l = self.pipee.readline()
1941 1941 if not l: break
1942 1942 self.ui.status("remote: ", l)
1943 1943
1944 1944 def __del__(self):
1945 1945 try:
1946 1946 self.pipeo.close()
1947 1947 self.pipei.close()
1948 1948 for l in self.pipee:
1949 1949 self.ui.status("remote: ", l)
1950 1950 self.pipee.close()
1951 1951 except:
1952 1952 pass
1953 1953
1954 1954 def dev(self):
1955 1955 return -1
1956 1956
1957 1957 def do_cmd(self, cmd, **args):
1958 1958 self.ui.debug("sending %s command\n" % cmd)
1959 1959 self.pipeo.write("%s\n" % cmd)
1960 1960 for k, v in args.items():
1961 1961 self.pipeo.write("%s %d\n" % (k, len(v)))
1962 1962 self.pipeo.write(v)
1963 1963 self.pipeo.flush()
1964 1964
1965 1965 return self.pipei
1966 1966
1967 1967 def call(self, cmd, **args):
1968 1968 r = self.do_cmd(cmd, **args)
1969 1969 l = r.readline()
1970 1970 self.readerr()
1971 1971 try:
1972 1972 l = int(l)
1973 1973 except:
1974 1974 raise RepoError("unexpected response '%s'" % l)
1975 1975 return r.read(l)
1976 1976
1977 1977 def lock(self):
1978 1978 self.call("lock")
1979 1979 return remotelock(self)
1980 1980
1981 1981 def unlock(self):
1982 1982 self.call("unlock")
1983 1983
1984 1984 def heads(self):
1985 1985 d = self.call("heads")
1986 1986 try:
1987 1987 return map(bin, d[:-1].split(" "))
1988 1988 except:
1989 1989 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1990 1990
1991 1991 def branches(self, nodes):
1992 1992 n = " ".join(map(hex, nodes))
1993 1993 d = self.call("branches", nodes=n)
1994 1994 try:
1995 1995 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
1996 1996 return br
1997 1997 except:
1998 1998 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
1999 1999
2000 2000 def between(self, pairs):
2001 2001 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2002 2002 d = self.call("between", pairs=n)
2003 2003 try:
2004 2004 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2005 2005 return p
2006 2006 except:
2007 2007 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2008 2008
2009 2009 def changegroup(self, nodes):
2010 2010 n = " ".join(map(hex, nodes))
2011 2011 f = self.do_cmd("changegroup", roots=n)
2012 2012 return self.pipei
2013 2013
2014 2014 def addchangegroup(self, cg):
2015 2015 d = self.call("addchangegroup")
2016 2016 if d:
2017 2017 raise RepoError("push refused: %s", d)
2018 2018
2019 2019 while 1:
2020 2020 d = cg.read(4096)
2021 2021 if not d: break
2022 2022 self.pipeo.write(d)
2023 2023 self.readerr()
2024 2024
2025 2025 self.pipeo.flush()
2026 2026
2027 2027 self.readerr()
2028 2028 l = int(self.pipei.readline())
2029 2029 return self.pipei.read(l) != ""
2030 2030
2031 2031 def repository(ui, path=None, create=0):
2032 2032 if path:
2033 2033 if path.startswith("http://"):
2034 2034 return httprepository(ui, path)
2035 2035 if path.startswith("hg://"):
2036 2036 return httprepository(ui, path.replace("hg://", "http://"))
2037 2037 if path.startswith("old-http://"):
2038 2038 return localrepository(ui, path.replace("old-http://", "http://"))
2039 2039 if path.startswith("ssh://"):
2040 2040 return sshrepository(ui, path)
2041 2041
2042 2042 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now