##// END OF EJS Templates
Allow the changegroup generator to completely load the buffer....
mason@suse.com -
r903:71be6dd2 default
parent child Browse files
Show More
@@ -1,2210 +1,2213 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 283 date = date or "%d %d" % (time.time(), time.timezone)
284 284 list.sort()
285 285 l = [hex(manifest), user, date] + list + ["", desc]
286 286 text = "\n".join(l)
287 287 return self.addrevision(text, transaction, self.count(), p1, p2)
288 288
289 289 class dirstate:
290 290 def __init__(self, opener, ui, root):
291 291 self.opener = opener
292 292 self.root = root
293 293 self.dirty = 0
294 294 self.ui = ui
295 295 self.map = None
296 296 self.pl = None
297 297 self.copies = {}
298 298 self.ignorefunc = None
299 299
300 300 def wjoin(self, f):
301 301 return os.path.join(self.root, f)
302 302
303 303 def getcwd(self):
304 304 cwd = os.getcwd()
305 305 if cwd == self.root: return ''
306 306 return cwd[len(self.root) + 1:]
307 307
308 308 def ignore(self, f):
309 309 if not self.ignorefunc:
310 310 bigpat = []
311 311 try:
312 312 l = file(self.wjoin(".hgignore"))
313 313 for pat in l:
314 314 if pat != "\n":
315 315 p = pat[:-1]
316 316 try:
317 317 re.compile(p)
318 318 except:
319 319 self.ui.warn("ignoring invalid ignore"
320 320 + " regular expression '%s'\n" % p)
321 321 else:
322 322 bigpat.append(p)
323 323 except IOError: pass
324 324
325 325 if bigpat:
326 326 s = "(?:%s)" % (")|(?:".join(bigpat))
327 327 r = re.compile(s)
328 328 self.ignorefunc = r.search
329 329 else:
330 330 self.ignorefunc = util.never
331 331
332 332 return self.ignorefunc(f)
333 333
334 334 def __del__(self):
335 335 if self.dirty:
336 336 self.write()
337 337
338 338 def __getitem__(self, key):
339 339 try:
340 340 return self.map[key]
341 341 except TypeError:
342 342 self.read()
343 343 return self[key]
344 344
345 345 def __contains__(self, key):
346 346 if not self.map: self.read()
347 347 return key in self.map
348 348
349 349 def parents(self):
350 350 if not self.pl:
351 351 self.read()
352 352 return self.pl
353 353
354 354 def markdirty(self):
355 355 if not self.dirty:
356 356 self.dirty = 1
357 357
358 358 def setparents(self, p1, p2 = nullid):
359 359 self.markdirty()
360 360 self.pl = p1, p2
361 361
362 362 def state(self, key):
363 363 try:
364 364 return self[key][0]
365 365 except KeyError:
366 366 return "?"
367 367
368 368 def read(self):
369 369 if self.map is not None: return self.map
370 370
371 371 self.map = {}
372 372 self.pl = [nullid, nullid]
373 373 try:
374 374 st = self.opener("dirstate").read()
375 375 if not st: return
376 376 except: return
377 377
378 378 self.pl = [st[:20], st[20: 40]]
379 379
380 380 pos = 40
381 381 while pos < len(st):
382 382 e = struct.unpack(">cllll", st[pos:pos+17])
383 383 l = e[4]
384 384 pos += 17
385 385 f = st[pos:pos + l]
386 386 if '\0' in f:
387 387 f, c = f.split('\0')
388 388 self.copies[f] = c
389 389 self.map[f] = e[:4]
390 390 pos += l
391 391
392 392 def copy(self, source, dest):
393 393 self.read()
394 394 self.markdirty()
395 395 self.copies[dest] = source
396 396
397 397 def copied(self, file):
398 398 return self.copies.get(file, None)
399 399
400 400 def update(self, files, state, **kw):
401 401 ''' current states:
402 402 n normal
403 403 m needs merging
404 404 r marked for removal
405 405 a marked for addition'''
406 406
407 407 if not files: return
408 408 self.read()
409 409 self.markdirty()
410 410 for f in files:
411 411 if state == "r":
412 412 self.map[f] = ('r', 0, 0, 0)
413 413 else:
414 414 s = os.stat(os.path.join(self.root, f))
415 415 st_size = kw.get('st_size', s.st_size)
416 416 st_mtime = kw.get('st_mtime', s.st_mtime)
417 417 self.map[f] = (state, s.st_mode, st_size, st_mtime)
418 418
419 419 def forget(self, files):
420 420 if not files: return
421 421 self.read()
422 422 self.markdirty()
423 423 for f in files:
424 424 try:
425 425 del self.map[f]
426 426 except KeyError:
427 427 self.ui.warn("not in dirstate: %s!\n" % f)
428 428 pass
429 429
430 430 def clear(self):
431 431 self.map = {}
432 432 self.markdirty()
433 433
434 434 def write(self):
435 435 st = self.opener("dirstate", "w")
436 436 st.write("".join(self.pl))
437 437 for f, e in self.map.items():
438 438 c = self.copied(f)
439 439 if c:
440 440 f = f + "\0" + c
441 441 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
442 442 st.write(e + f)
443 443 self.dirty = 0
444 444
445 445 def filterfiles(self, files):
446 446 ret = {}
447 447 unknown = []
448 448
449 449 for x in files:
450 450 if x is '.':
451 451 return self.map.copy()
452 452 if x not in self.map:
453 453 unknown.append(x)
454 454 else:
455 455 ret[x] = self.map[x]
456 456
457 457 if not unknown:
458 458 return ret
459 459
460 460 b = self.map.keys()
461 461 b.sort()
462 462 blen = len(b)
463 463
464 464 for x in unknown:
465 465 bs = bisect.bisect(b, x)
466 466 if bs != 0 and b[bs-1] == x:
467 467 ret[x] = self.map[x]
468 468 continue
469 469 while bs < blen:
470 470 s = b[bs]
471 471 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
472 472 ret[s] = self.map[s]
473 473 else:
474 474 break
475 475 bs += 1
476 476 return ret
477 477
478 478 def walk(self, files = None, match = util.always, dc=None):
479 479 self.read()
480 480
481 481 # walk all files by default
482 482 if not files:
483 483 files = [self.root]
484 484 if not dc:
485 485 dc = self.map.copy()
486 486 elif not dc:
487 487 dc = self.filterfiles(files)
488 488
489 489 known = {'.hg': 1}
490 490 def seen(fn):
491 491 if fn in known: return True
492 492 known[fn] = 1
493 493 def traverse():
494 494 for ff in util.unique(files):
495 495 f = os.path.join(self.root, ff)
496 496 try:
497 497 st = os.stat(f)
498 498 except OSError, inst:
499 499 if ff not in dc: self.ui.warn('%s: %s\n' % (
500 500 util.pathto(self.getcwd(), ff),
501 501 inst.strerror))
502 502 continue
503 503 if stat.S_ISDIR(st.st_mode):
504 504 for dir, subdirs, fl in os.walk(f):
505 505 d = dir[len(self.root) + 1:]
506 506 nd = util.normpath(d)
507 507 if nd == '.': nd = ''
508 508 if seen(nd):
509 509 subdirs[:] = []
510 510 continue
511 511 for sd in subdirs:
512 512 ds = os.path.join(nd, sd +'/')
513 513 if self.ignore(ds) or not match(ds):
514 514 subdirs.remove(sd)
515 515 subdirs.sort()
516 516 fl.sort()
517 517 for fn in fl:
518 518 fn = util.pconvert(os.path.join(d, fn))
519 519 yield 'f', fn
520 520 elif stat.S_ISREG(st.st_mode):
521 521 yield 'f', ff
522 522 else:
523 523 kind = 'unknown'
524 524 if stat.S_ISCHR(st.st_mode): kind = 'character device'
525 525 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
526 526 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
527 527 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
528 528 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
529 529 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
530 530 util.pathto(self.getcwd(), ff),
531 531 kind))
532 532
533 533 ks = dc.keys()
534 534 ks.sort()
535 535 for k in ks:
536 536 yield 'm', k
537 537
538 538 # yield only files that match: all in dirstate, others only if
539 539 # not in .hgignore
540 540
541 541 for src, fn in util.unique(traverse()):
542 542 fn = util.normpath(fn)
543 543 if seen(fn): continue
544 544 if fn not in dc and self.ignore(fn):
545 545 continue
546 546 if match(fn):
547 547 yield src, fn
548 548
549 549 def changes(self, files=None, match=util.always):
550 550 self.read()
551 551 if not files:
552 552 dc = self.map.copy()
553 553 else:
554 554 dc = self.filterfiles(files)
555 555 lookup, modified, added, unknown = [], [], [], []
556 556 removed, deleted = [], []
557 557
558 558 for src, fn in self.walk(files, match, dc=dc):
559 559 try:
560 560 s = os.stat(os.path.join(self.root, fn))
561 561 except OSError:
562 562 continue
563 563 if not stat.S_ISREG(s.st_mode):
564 564 continue
565 565 c = dc.get(fn)
566 566 if c:
567 567 del dc[fn]
568 568 if c[0] == 'm':
569 569 modified.append(fn)
570 570 elif c[0] == 'a':
571 571 added.append(fn)
572 572 elif c[0] == 'r':
573 573 unknown.append(fn)
574 574 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
575 575 modified.append(fn)
576 576 elif c[3] != s.st_mtime:
577 577 lookup.append(fn)
578 578 else:
579 579 unknown.append(fn)
580 580
581 581 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
582 582 if c[0] == 'r':
583 583 removed.append(fn)
584 584 else:
585 585 deleted.append(fn)
586 586 return (lookup, modified, added, removed + deleted, unknown)
587 587
588 588 # used to avoid circular references so destructors work
589 589 def opener(base):
590 590 p = base
591 591 def o(path, mode="r"):
592 592 if p.startswith("http://"):
593 593 f = os.path.join(p, urllib.quote(path))
594 594 return httprangereader.httprangereader(f)
595 595
596 596 f = os.path.join(p, path)
597 597
598 598 mode += "b" # for that other OS
599 599
600 600 if mode[0] != "r":
601 601 try:
602 602 s = os.stat(f)
603 603 except OSError:
604 604 d = os.path.dirname(f)
605 605 if not os.path.isdir(d):
606 606 os.makedirs(d)
607 607 else:
608 608 if s.st_nlink > 1:
609 609 file(f + ".tmp", "wb").write(file(f, "rb").read())
610 610 util.rename(f+".tmp", f)
611 611
612 612 return file(f, mode)
613 613
614 614 return o
615 615
616 616 class RepoError(Exception): pass
617 617
618 618 class localrepository:
619 619 def __init__(self, ui, path=None, create=0):
620 620 self.remote = 0
621 621 if path and path.startswith("http://"):
622 622 self.remote = 1
623 623 self.path = path
624 624 else:
625 625 if not path:
626 626 p = os.getcwd()
627 627 while not os.path.isdir(os.path.join(p, ".hg")):
628 628 oldp = p
629 629 p = os.path.dirname(p)
630 630 if p == oldp: raise RepoError("no repo found")
631 631 path = p
632 632 self.path = os.path.join(path, ".hg")
633 633
634 634 if not create and not os.path.isdir(self.path):
635 635 raise RepoError("repository %s not found" % self.path)
636 636
637 637 self.root = path
638 638 self.ui = ui
639 639
640 640 if create:
641 641 os.mkdir(self.path)
642 642 os.mkdir(self.join("data"))
643 643
644 644 self.opener = opener(self.path)
645 645 self.wopener = opener(self.root)
646 646 self.manifest = manifest(self.opener)
647 647 self.changelog = changelog(self.opener)
648 648 self.tagscache = None
649 649 self.nodetagscache = None
650 650
651 651 if not self.remote:
652 652 self.dirstate = dirstate(self.opener, ui, self.root)
653 653 try:
654 654 self.ui.readconfig(self.opener("hgrc"))
655 655 except IOError: pass
656 656
657 657 def hook(self, name, **args):
658 658 s = self.ui.config("hooks", name)
659 659 if s:
660 660 self.ui.note("running hook %s: %s\n" % (name, s))
661 661 old = {}
662 662 for k, v in args.items():
663 663 k = k.upper()
664 664 old[k] = os.environ.get(k, None)
665 665 os.environ[k] = v
666 666
667 667 r = os.system(s)
668 668
669 669 for k, v in old.items():
670 670 if v != None:
671 671 os.environ[k] = v
672 672 else:
673 673 del os.environ[k]
674 674
675 675 if r:
676 676 self.ui.warn("abort: %s hook failed with status %d!\n" %
677 677 (name, r))
678 678 return False
679 679 return True
680 680
681 681 def tags(self):
682 682 '''return a mapping of tag to node'''
683 683 if not self.tagscache:
684 684 self.tagscache = {}
685 685 def addtag(self, k, n):
686 686 try:
687 687 bin_n = bin(n)
688 688 except TypeError:
689 689 bin_n = ''
690 690 self.tagscache[k.strip()] = bin_n
691 691
692 692 try:
693 693 # read each head of the tags file, ending with the tip
694 694 # and add each tag found to the map, with "newer" ones
695 695 # taking precedence
696 696 fl = self.file(".hgtags")
697 697 h = fl.heads()
698 698 h.reverse()
699 699 for r in h:
700 700 for l in fl.revision(r).splitlines():
701 701 if l:
702 702 n, k = l.split(" ", 1)
703 703 addtag(self, k, n)
704 704 except KeyError:
705 705 pass
706 706
707 707 try:
708 708 f = self.opener("localtags")
709 709 for l in f:
710 710 n, k = l.split(" ", 1)
711 711 addtag(self, k, n)
712 712 except IOError:
713 713 pass
714 714
715 715 self.tagscache['tip'] = self.changelog.tip()
716 716
717 717 return self.tagscache
718 718
719 719 def tagslist(self):
720 720 '''return a list of tags ordered by revision'''
721 721 l = []
722 722 for t, n in self.tags().items():
723 723 try:
724 724 r = self.changelog.rev(n)
725 725 except:
726 726 r = -2 # sort to the beginning of the list if unknown
727 727 l.append((r,t,n))
728 728 l.sort()
729 729 return [(t,n) for r,t,n in l]
730 730
731 731 def nodetags(self, node):
732 732 '''return the tags associated with a node'''
733 733 if not self.nodetagscache:
734 734 self.nodetagscache = {}
735 735 for t,n in self.tags().items():
736 736 self.nodetagscache.setdefault(n,[]).append(t)
737 737 return self.nodetagscache.get(node, [])
738 738
739 739 def lookup(self, key):
740 740 try:
741 741 return self.tags()[key]
742 742 except KeyError:
743 743 try:
744 744 return self.changelog.lookup(key)
745 745 except:
746 746 raise RepoError("unknown revision '%s'" % key)
747 747
748 748 def dev(self):
749 749 if self.remote: return -1
750 750 return os.stat(self.path).st_dev
751 751
752 752 def join(self, f):
753 753 return os.path.join(self.path, f)
754 754
755 755 def wjoin(self, f):
756 756 return os.path.join(self.root, f)
757 757
758 758 def file(self, f):
759 759 if f[0] == '/': f = f[1:]
760 760 return filelog(self.opener, f)
761 761
762 762 def getcwd(self):
763 763 return self.dirstate.getcwd()
764 764
765 765 def wfile(self, f, mode='r'):
766 766 return self.wopener(f, mode)
767 767
768 768 def transaction(self):
769 769 # save dirstate for undo
770 770 try:
771 771 ds = self.opener("dirstate").read()
772 772 except IOError:
773 773 ds = ""
774 774 self.opener("journal.dirstate", "w").write(ds)
775 775
776 776 def after():
777 777 util.rename(self.join("journal"), self.join("undo"))
778 778 util.rename(self.join("journal.dirstate"),
779 779 self.join("undo.dirstate"))
780 780
781 781 return transaction.transaction(self.ui.warn, self.opener,
782 782 self.join("journal"), after)
783 783
784 784 def recover(self):
785 785 lock = self.lock()
786 786 if os.path.exists(self.join("journal")):
787 787 self.ui.status("rolling back interrupted transaction\n")
788 788 return transaction.rollback(self.opener, self.join("journal"))
789 789 else:
790 790 self.ui.warn("no interrupted transaction available\n")
791 791
792 792 def undo(self):
793 793 lock = self.lock()
794 794 if os.path.exists(self.join("undo")):
795 795 self.ui.status("rolling back last transaction\n")
796 796 transaction.rollback(self.opener, self.join("undo"))
797 797 self.dirstate = None
798 798 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
799 799 self.dirstate = dirstate(self.opener, self.ui, self.root)
800 800 else:
801 801 self.ui.warn("no undo information available\n")
802 802
803 803 def lock(self, wait = 1):
804 804 try:
805 805 return lock.lock(self.join("lock"), 0)
806 806 except lock.LockHeld, inst:
807 807 if wait:
808 808 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
809 809 return lock.lock(self.join("lock"), wait)
810 810 raise inst
811 811
812 812 def rawcommit(self, files, text, user, date, p1=None, p2=None):
813 813 orig_parent = self.dirstate.parents()[0] or nullid
814 814 p1 = p1 or self.dirstate.parents()[0] or nullid
815 815 p2 = p2 or self.dirstate.parents()[1] or nullid
816 816 c1 = self.changelog.read(p1)
817 817 c2 = self.changelog.read(p2)
818 818 m1 = self.manifest.read(c1[0])
819 819 mf1 = self.manifest.readflags(c1[0])
820 820 m2 = self.manifest.read(c2[0])
821 821
822 822 if orig_parent == p1:
823 823 update_dirstate = 1
824 824 else:
825 825 update_dirstate = 0
826 826
827 827 tr = self.transaction()
828 828 mm = m1.copy()
829 829 mfm = mf1.copy()
830 830 linkrev = self.changelog.count()
831 831 for f in files:
832 832 try:
833 833 t = self.wfile(f).read()
834 834 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
835 835 r = self.file(f)
836 836 mfm[f] = tm
837 837 mm[f] = r.add(t, {}, tr, linkrev,
838 838 m1.get(f, nullid), m2.get(f, nullid))
839 839 if update_dirstate:
840 840 self.dirstate.update([f], "n")
841 841 except IOError:
842 842 try:
843 843 del mm[f]
844 844 del mfm[f]
845 845 if update_dirstate:
846 846 self.dirstate.forget([f])
847 847 except:
848 848 # deleted from p2?
849 849 pass
850 850
851 851 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
852 852 user = user or self.ui.username()
853 853 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
854 854 tr.close()
855 855 if update_dirstate:
856 856 self.dirstate.setparents(n, nullid)
857 857
858 858 def commit(self, files = None, text = "", user = None, date = None,
859 859 match = util.always, force=False):
860 860 commit = []
861 861 remove = []
862 862 if files:
863 863 for f in files:
864 864 s = self.dirstate.state(f)
865 865 if s in 'nmai':
866 866 commit.append(f)
867 867 elif s == 'r':
868 868 remove.append(f)
869 869 else:
870 870 self.ui.warn("%s not tracked!\n" % f)
871 871 else:
872 872 (c, a, d, u) = self.changes(match = match)
873 873 commit = c + a
874 874 remove = d
875 875
876 876 if not commit and not remove and not force:
877 877 self.ui.status("nothing changed\n")
878 878 return None
879 879
880 880 if not self.hook("precommit"):
881 881 return None
882 882
883 883 p1, p2 = self.dirstate.parents()
884 884 c1 = self.changelog.read(p1)
885 885 c2 = self.changelog.read(p2)
886 886 m1 = self.manifest.read(c1[0])
887 887 mf1 = self.manifest.readflags(c1[0])
888 888 m2 = self.manifest.read(c2[0])
889 889 lock = self.lock()
890 890 tr = self.transaction()
891 891
892 892 # check in files
893 893 new = {}
894 894 linkrev = self.changelog.count()
895 895 commit.sort()
896 896 for f in commit:
897 897 self.ui.note(f + "\n")
898 898 try:
899 899 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
900 900 t = self.wfile(f).read()
901 901 except IOError:
902 902 self.ui.warn("trouble committing %s!\n" % f)
903 903 raise
904 904
905 905 meta = {}
906 906 cp = self.dirstate.copied(f)
907 907 if cp:
908 908 meta["copy"] = cp
909 909 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
910 910 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
911 911
912 912 r = self.file(f)
913 913 fp1 = m1.get(f, nullid)
914 914 fp2 = m2.get(f, nullid)
915 915 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
916 916
917 917 # update manifest
918 918 m1.update(new)
919 919 for f in remove:
920 920 if f in m1:
921 921 del m1[f]
922 922 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
923 923 (new, remove))
924 924
925 925 # add changeset
926 926 new = new.keys()
927 927 new.sort()
928 928
929 929 if not text:
930 930 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
931 931 edittext += "".join(["HG: changed %s\n" % f for f in new])
932 932 edittext += "".join(["HG: removed %s\n" % f for f in remove])
933 933 edittext = self.ui.edit(edittext)
934 934 if not edittext.rstrip():
935 935 return None
936 936 text = edittext
937 937
938 938 user = user or self.ui.username()
939 939 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
940 940 tr.close()
941 941
942 942 self.dirstate.setparents(n)
943 943 self.dirstate.update(new, "n")
944 944 self.dirstate.forget(remove)
945 945
946 946 if not self.hook("commit", node=hex(n)):
947 947 return None
948 948 return n
949 949
950 950 def walk(self, node = None, files = [], match = util.always):
951 951 if node:
952 952 for fn in self.manifest.read(self.changelog.read(node)[0]):
953 953 if match(fn): yield 'm', fn
954 954 else:
955 955 for src, fn in self.dirstate.walk(files, match):
956 956 yield src, fn
957 957
958 958 def changes(self, node1 = None, node2 = None, files = [],
959 959 match = util.always):
960 960 mf2, u = None, []
961 961
962 962 def fcmp(fn, mf):
963 963 t1 = self.wfile(fn).read()
964 964 t2 = self.file(fn).revision(mf[fn])
965 965 return cmp(t1, t2)
966 966
967 967 def mfmatches(node):
968 968 mf = dict(self.manifest.read(node))
969 969 for fn in mf.keys():
970 970 if not match(fn):
971 971 del mf[fn]
972 972 return mf
973 973
974 974 # are we comparing the working directory?
975 975 if not node2:
976 976 l, c, a, d, u = self.dirstate.changes(files, match)
977 977
978 978 # are we comparing working dir against its parent?
979 979 if not node1:
980 980 if l:
981 981 # do a full compare of any files that might have changed
982 982 change = self.changelog.read(self.dirstate.parents()[0])
983 983 mf2 = mfmatches(change[0])
984 984 for f in l:
985 985 if fcmp(f, mf2):
986 986 c.append(f)
987 987
988 988 for l in c, a, d, u:
989 989 l.sort()
990 990
991 991 return (c, a, d, u)
992 992
993 993 # are we comparing working dir against non-tip?
994 994 # generate a pseudo-manifest for the working dir
995 995 if not node2:
996 996 if not mf2:
997 997 change = self.changelog.read(self.dirstate.parents()[0])
998 998 mf2 = mfmatches(change[0])
999 999 for f in a + c + l:
1000 1000 mf2[f] = ""
1001 1001 for f in d:
1002 1002 if f in mf2: del mf2[f]
1003 1003 else:
1004 1004 change = self.changelog.read(node2)
1005 1005 mf2 = mfmatches(change[0])
1006 1006
1007 1007 # flush lists from dirstate before comparing manifests
1008 1008 c, a = [], []
1009 1009
1010 1010 change = self.changelog.read(node1)
1011 1011 mf1 = mfmatches(change[0])
1012 1012
1013 1013 for fn in mf2:
1014 1014 if mf1.has_key(fn):
1015 1015 if mf1[fn] != mf2[fn]:
1016 1016 if mf2[fn] != "" or fcmp(fn, mf1):
1017 1017 c.append(fn)
1018 1018 del mf1[fn]
1019 1019 else:
1020 1020 a.append(fn)
1021 1021
1022 1022 d = mf1.keys()
1023 1023
1024 1024 for l in c, a, d, u:
1025 1025 l.sort()
1026 1026
1027 1027 return (c, a, d, u)
1028 1028
1029 1029 def add(self, list):
1030 1030 for f in list:
1031 1031 p = self.wjoin(f)
1032 1032 if not os.path.exists(p):
1033 1033 self.ui.warn("%s does not exist!\n" % f)
1034 1034 elif not os.path.isfile(p):
1035 1035 self.ui.warn("%s not added: only files supported currently\n" % f)
1036 1036 elif self.dirstate.state(f) in 'an':
1037 1037 self.ui.warn("%s already tracked!\n" % f)
1038 1038 else:
1039 1039 self.dirstate.update([f], "a")
1040 1040
1041 1041 def forget(self, list):
1042 1042 for f in list:
1043 1043 if self.dirstate.state(f) not in 'ai':
1044 1044 self.ui.warn("%s not added!\n" % f)
1045 1045 else:
1046 1046 self.dirstate.forget([f])
1047 1047
1048 1048 def remove(self, list):
1049 1049 for f in list:
1050 1050 p = self.wjoin(f)
1051 1051 if os.path.exists(p):
1052 1052 self.ui.warn("%s still exists!\n" % f)
1053 1053 elif self.dirstate.state(f) == 'a':
1054 1054 self.ui.warn("%s never committed!\n" % f)
1055 1055 self.dirstate.forget([f])
1056 1056 elif f not in self.dirstate:
1057 1057 self.ui.warn("%s not tracked!\n" % f)
1058 1058 else:
1059 1059 self.dirstate.update([f], "r")
1060 1060
1061 1061 def copy(self, source, dest):
1062 1062 p = self.wjoin(dest)
1063 1063 if not os.path.exists(p):
1064 1064 self.ui.warn("%s does not exist!\n" % dest)
1065 1065 elif not os.path.isfile(p):
1066 1066 self.ui.warn("copy failed: %s is not a file\n" % dest)
1067 1067 else:
1068 1068 if self.dirstate.state(dest) == '?':
1069 1069 self.dirstate.update([dest], "a")
1070 1070 self.dirstate.copy(source, dest)
1071 1071
1072 1072 def heads(self):
1073 1073 return self.changelog.heads()
1074 1074
1075 1075 # branchlookup returns a dict giving a list of branches for
1076 1076 # each head. A branch is defined as the tag of a node or
1077 1077 # the branch of the node's parents. If a node has multiple
1078 1078 # branch tags, tags are eliminated if they are visible from other
1079 1079 # branch tags.
1080 1080 #
1081 1081 # So, for this graph: a->b->c->d->e
1082 1082 # \ /
1083 1083 # aa -----/
1084 1084 # a has tag 2.6.12
1085 1085 # d has tag 2.6.13
1086 1086 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1087 1087 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1088 1088 # from the list.
1089 1089 #
1090 1090 # It is possible that more than one head will have the same branch tag.
1091 1091 # callers need to check the result for multiple heads under the same
1092 1092 # branch tag if that is a problem for them (ie checkout of a specific
1093 1093 # branch).
1094 1094 #
1095 1095 # passing in a specific branch will limit the depth of the search
1096 1096 # through the parents. It won't limit the branches returned in the
1097 1097 # result though.
1098 1098 def branchlookup(self, heads=None, branch=None):
1099 1099 if not heads:
1100 1100 heads = self.heads()
1101 1101 headt = [ h for h in heads ]
1102 1102 chlog = self.changelog
1103 1103 branches = {}
1104 1104 merges = []
1105 1105 seenmerge = {}
1106 1106
1107 1107 # traverse the tree once for each head, recording in the branches
1108 1108 # dict which tags are visible from this head. The branches
1109 1109 # dict also records which tags are visible from each tag
1110 1110 # while we traverse.
1111 1111 while headt or merges:
1112 1112 if merges:
1113 1113 n, found = merges.pop()
1114 1114 visit = [n]
1115 1115 else:
1116 1116 h = headt.pop()
1117 1117 visit = [h]
1118 1118 found = [h]
1119 1119 seen = {}
1120 1120 while visit:
1121 1121 n = visit.pop()
1122 1122 if n in seen:
1123 1123 continue
1124 1124 pp = chlog.parents(n)
1125 1125 tags = self.nodetags(n)
1126 1126 if tags:
1127 1127 for x in tags:
1128 1128 if x == 'tip':
1129 1129 continue
1130 1130 for f in found:
1131 1131 branches.setdefault(f, {})[n] = 1
1132 1132 branches.setdefault(n, {})[n] = 1
1133 1133 break
1134 1134 if n not in found:
1135 1135 found.append(n)
1136 1136 if branch in tags:
1137 1137 continue
1138 1138 seen[n] = 1
1139 1139 if pp[1] != nullid and n not in seenmerge:
1140 1140 merges.append((pp[1], [x for x in found]))
1141 1141 seenmerge[n] = 1
1142 1142 if pp[0] != nullid:
1143 1143 visit.append(pp[0])
1144 1144 # traverse the branches dict, eliminating branch tags from each
1145 1145 # head that are visible from another branch tag for that head.
1146 1146 out = {}
1147 1147 viscache = {}
1148 1148 for h in heads:
1149 1149 def visible(node):
1150 1150 if node in viscache:
1151 1151 return viscache[node]
1152 1152 ret = {}
1153 1153 visit = [node]
1154 1154 while visit:
1155 1155 x = visit.pop()
1156 1156 if x in viscache:
1157 1157 ret.update(viscache[x])
1158 1158 elif x not in ret:
1159 1159 ret[x] = 1
1160 1160 if x in branches:
1161 1161 visit[len(visit):] = branches[x].keys()
1162 1162 viscache[node] = ret
1163 1163 return ret
1164 1164 if h not in branches:
1165 1165 continue
1166 1166 # O(n^2), but somewhat limited. This only searches the
1167 1167 # tags visible from a specific head, not all the tags in the
1168 1168 # whole repo.
1169 1169 for b in branches[h]:
1170 1170 vis = False
1171 1171 for bb in branches[h].keys():
1172 1172 if b != bb:
1173 1173 if b in visible(bb):
1174 1174 vis = True
1175 1175 break
1176 1176 if not vis:
1177 1177 l = out.setdefault(h, [])
1178 1178 l[len(l):] = self.nodetags(b)
1179 1179 return out
1180 1180
1181 1181 def branches(self, nodes):
1182 1182 if not nodes: nodes = [self.changelog.tip()]
1183 1183 b = []
1184 1184 for n in nodes:
1185 1185 t = n
1186 1186 while n:
1187 1187 p = self.changelog.parents(n)
1188 1188 if p[1] != nullid or p[0] == nullid:
1189 1189 b.append((t, n, p[0], p[1]))
1190 1190 break
1191 1191 n = p[0]
1192 1192 return b
1193 1193
1194 1194 def between(self, pairs):
1195 1195 r = []
1196 1196
1197 1197 for top, bottom in pairs:
1198 1198 n, l, i = top, [], 0
1199 1199 f = 1
1200 1200
1201 1201 while n != bottom:
1202 1202 p = self.changelog.parents(n)[0]
1203 1203 if i == f:
1204 1204 l.append(n)
1205 1205 f = f * 2
1206 1206 n = p
1207 1207 i += 1
1208 1208
1209 1209 r.append(l)
1210 1210
1211 1211 return r
1212 1212
1213 1213 def newer(self, nodes):
1214 1214 m = {}
1215 1215 nl = []
1216 1216 pm = {}
1217 1217 cl = self.changelog
1218 1218 t = l = cl.count()
1219 1219
1220 1220 # find the lowest numbered node
1221 1221 for n in nodes:
1222 1222 l = min(l, cl.rev(n))
1223 1223 m[n] = 1
1224 1224
1225 1225 for i in xrange(l, t):
1226 1226 n = cl.node(i)
1227 1227 if n in m: # explicitly listed
1228 1228 pm[n] = 1
1229 1229 nl.append(n)
1230 1230 continue
1231 1231 for p in cl.parents(n):
1232 1232 if p in pm: # parent listed
1233 1233 pm[n] = 1
1234 1234 nl.append(n)
1235 1235 break
1236 1236
1237 1237 return nl
1238 1238
1239 1239 def findincoming(self, remote, base=None, heads=None):
1240 1240 m = self.changelog.nodemap
1241 1241 search = []
1242 1242 fetch = []
1243 1243 seen = {}
1244 1244 seenbranch = {}
1245 1245 if base == None:
1246 1246 base = {}
1247 1247
1248 1248 # assume we're closer to the tip than the root
1249 1249 # and start by examining the heads
1250 1250 self.ui.status("searching for changes\n")
1251 1251
1252 1252 if not heads:
1253 1253 heads = remote.heads()
1254 1254
1255 1255 unknown = []
1256 1256 for h in heads:
1257 1257 if h not in m:
1258 1258 unknown.append(h)
1259 1259 else:
1260 1260 base[h] = 1
1261 1261
1262 1262 if not unknown:
1263 1263 return None
1264 1264
1265 1265 rep = {}
1266 1266 reqcnt = 0
1267 1267
1268 1268 # search through remote branches
1269 1269 # a 'branch' here is a linear segment of history, with four parts:
1270 1270 # head, root, first parent, second parent
1271 1271 # (a branch always has two parents (or none) by definition)
1272 1272 unknown = remote.branches(unknown)
1273 1273 while unknown:
1274 1274 r = []
1275 1275 while unknown:
1276 1276 n = unknown.pop(0)
1277 1277 if n[0] in seen:
1278 1278 continue
1279 1279
1280 1280 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1281 1281 if n[0] == nullid:
1282 1282 break
1283 1283 if n in seenbranch:
1284 1284 self.ui.debug("branch already found\n")
1285 1285 continue
1286 1286 if n[1] and n[1] in m: # do we know the base?
1287 1287 self.ui.debug("found incomplete branch %s:%s\n"
1288 1288 % (short(n[0]), short(n[1])))
1289 1289 search.append(n) # schedule branch range for scanning
1290 1290 seenbranch[n] = 1
1291 1291 else:
1292 1292 if n[1] not in seen and n[1] not in fetch:
1293 1293 if n[2] in m and n[3] in m:
1294 1294 self.ui.debug("found new changeset %s\n" %
1295 1295 short(n[1]))
1296 1296 fetch.append(n[1]) # earliest unknown
1297 1297 base[n[2]] = 1 # latest known
1298 1298 continue
1299 1299
1300 1300 for a in n[2:4]:
1301 1301 if a not in rep:
1302 1302 r.append(a)
1303 1303 rep[a] = 1
1304 1304
1305 1305 seen[n[0]] = 1
1306 1306
1307 1307 if r:
1308 1308 reqcnt += 1
1309 1309 self.ui.debug("request %d: %s\n" %
1310 1310 (reqcnt, " ".join(map(short, r))))
1311 1311 for p in range(0, len(r), 10):
1312 1312 for b in remote.branches(r[p:p+10]):
1313 1313 self.ui.debug("received %s:%s\n" %
1314 1314 (short(b[0]), short(b[1])))
1315 1315 if b[0] not in m and b[0] not in seen:
1316 1316 unknown.append(b)
1317 1317
1318 1318 # do binary search on the branches we found
1319 1319 while search:
1320 1320 n = search.pop(0)
1321 1321 reqcnt += 1
1322 1322 l = remote.between([(n[0], n[1])])[0]
1323 1323 l.append(n[1])
1324 1324 p = n[0]
1325 1325 f = 1
1326 1326 for i in l:
1327 1327 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1328 1328 if i in m:
1329 1329 if f <= 2:
1330 1330 self.ui.debug("found new branch changeset %s\n" %
1331 1331 short(p))
1332 1332 fetch.append(p)
1333 1333 base[i] = 1
1334 1334 else:
1335 1335 self.ui.debug("narrowed branch search to %s:%s\n"
1336 1336 % (short(p), short(i)))
1337 1337 search.append((p, i))
1338 1338 break
1339 1339 p, f = i, f * 2
1340 1340
1341 1341 # sanity check our fetch list
1342 1342 for f in fetch:
1343 1343 if f in m:
1344 1344 raise RepoError("already have changeset " + short(f[:4]))
1345 1345
1346 1346 if base.keys() == [nullid]:
1347 1347 self.ui.warn("warning: pulling from an unrelated repository!\n")
1348 1348
1349 1349 self.ui.note("adding new changesets starting at " +
1350 1350 " ".join([short(f) for f in fetch]) + "\n")
1351 1351
1352 1352 self.ui.debug("%d total queries\n" % reqcnt)
1353 1353
1354 1354 return fetch
1355 1355
1356 1356 def findoutgoing(self, remote, base=None, heads=None):
1357 1357 if base == None:
1358 1358 base = {}
1359 1359 self.findincoming(remote, base, heads)
1360 1360
1361 1361 remain = dict.fromkeys(self.changelog.nodemap)
1362 1362
1363 1363 # prune everything remote has from the tree
1364 1364 del remain[nullid]
1365 1365 remove = base.keys()
1366 1366 while remove:
1367 1367 n = remove.pop(0)
1368 1368 if n in remain:
1369 1369 del remain[n]
1370 1370 for p in self.changelog.parents(n):
1371 1371 remove.append(p)
1372 1372
1373 1373 # find every node whose parents have been pruned
1374 1374 subset = []
1375 1375 for n in remain:
1376 1376 p1, p2 = self.changelog.parents(n)
1377 1377 if p1 not in remain and p2 not in remain:
1378 1378 subset.append(n)
1379 1379
1380 1380 # this is the set of all roots we have to push
1381 1381 return subset
1382 1382
1383 1383 def pull(self, remote):
1384 1384 lock = self.lock()
1385 1385
1386 1386 # if we have an empty repo, fetch everything
1387 1387 if self.changelog.tip() == nullid:
1388 1388 self.ui.status("requesting all changes\n")
1389 1389 fetch = [nullid]
1390 1390 else:
1391 1391 fetch = self.findincoming(remote)
1392 1392
1393 1393 if not fetch:
1394 1394 self.ui.status("no changes found\n")
1395 1395 return 1
1396 1396
1397 1397 cg = remote.changegroup(fetch)
1398 1398 return self.addchangegroup(cg)
1399 1399
1400 1400 def push(self, remote, force=False):
1401 1401 lock = remote.lock()
1402 1402
1403 1403 base = {}
1404 1404 heads = remote.heads()
1405 1405 inc = self.findincoming(remote, base, heads)
1406 1406 if not force and inc:
1407 1407 self.ui.warn("abort: unsynced remote changes!\n")
1408 1408 self.ui.status("(did you forget to sync? use push -f to force)\n")
1409 1409 return 1
1410 1410
1411 1411 update = self.findoutgoing(remote, base)
1412 1412 if not update:
1413 1413 self.ui.status("no changes found\n")
1414 1414 return 1
1415 1415 elif not force:
1416 1416 if len(heads) < len(self.changelog.heads()):
1417 1417 self.ui.warn("abort: push creates new remote branches!\n")
1418 1418 self.ui.status("(did you forget to merge?" +
1419 1419 " use push -f to force)\n")
1420 1420 return 1
1421 1421
1422 1422 cg = self.changegroup(update)
1423 1423 return remote.addchangegroup(cg)
1424 1424
1425 1425 def changegroup(self, basenodes):
1426 1426 class genread:
1427 1427 def __init__(self, generator):
1428 1428 self.g = generator
1429 1429 self.buf = ""
1430 def fillbuf(self):
1431 self.buf += "".join(self.g)
1432
1430 1433 def read(self, l):
1431 1434 while l > len(self.buf):
1432 1435 try:
1433 1436 self.buf += self.g.next()
1434 1437 except StopIteration:
1435 1438 break
1436 1439 d, self.buf = self.buf[:l], self.buf[l:]
1437 1440 return d
1438 1441
1439 1442 def gengroup():
1440 1443 nodes = self.newer(basenodes)
1441 1444
1442 1445 # construct the link map
1443 1446 linkmap = {}
1444 1447 for n in nodes:
1445 1448 linkmap[self.changelog.rev(n)] = n
1446 1449
1447 1450 # construct a list of all changed files
1448 1451 changed = {}
1449 1452 for n in nodes:
1450 1453 c = self.changelog.read(n)
1451 1454 for f in c[3]:
1452 1455 changed[f] = 1
1453 1456 changed = changed.keys()
1454 1457 changed.sort()
1455 1458
1456 1459 # the changegroup is changesets + manifests + all file revs
1457 1460 revs = [ self.changelog.rev(n) for n in nodes ]
1458 1461
1459 1462 for y in self.changelog.group(linkmap): yield y
1460 1463 for y in self.manifest.group(linkmap): yield y
1461 1464 for f in changed:
1462 1465 yield struct.pack(">l", len(f) + 4) + f
1463 1466 g = self.file(f).group(linkmap)
1464 1467 for y in g:
1465 1468 yield y
1466 1469
1467 1470 yield struct.pack(">l", 0)
1468 1471
1469 1472 return genread(gengroup())
1470 1473
1471 1474 def addchangegroup(self, source):
1472 1475
1473 1476 def getchunk():
1474 1477 d = source.read(4)
1475 1478 if not d: return ""
1476 1479 l = struct.unpack(">l", d)[0]
1477 1480 if l <= 4: return ""
1478 1481 return source.read(l - 4)
1479 1482
1480 1483 def getgroup():
1481 1484 while 1:
1482 1485 c = getchunk()
1483 1486 if not c: break
1484 1487 yield c
1485 1488
1486 1489 def csmap(x):
1487 1490 self.ui.debug("add changeset %s\n" % short(x))
1488 1491 return self.changelog.count()
1489 1492
1490 1493 def revmap(x):
1491 1494 return self.changelog.rev(x)
1492 1495
1493 1496 if not source: return
1494 1497 changesets = files = revisions = 0
1495 1498
1496 1499 tr = self.transaction()
1497 1500
1498 1501 # pull off the changeset group
1499 1502 self.ui.status("adding changesets\n")
1500 1503 co = self.changelog.tip()
1501 1504 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1502 1505 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1503 1506
1504 1507 # pull off the manifest group
1505 1508 self.ui.status("adding manifests\n")
1506 1509 mm = self.manifest.tip()
1507 1510 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1508 1511
1509 1512 # process the files
1510 1513 self.ui.status("adding file changes\n")
1511 1514 while 1:
1512 1515 f = getchunk()
1513 1516 if not f: break
1514 1517 self.ui.debug("adding %s revisions\n" % f)
1515 1518 fl = self.file(f)
1516 1519 o = fl.count()
1517 1520 n = fl.addgroup(getgroup(), revmap, tr)
1518 1521 revisions += fl.count() - o
1519 1522 files += 1
1520 1523
1521 1524 self.ui.status(("added %d changesets" +
1522 1525 " with %d changes to %d files\n")
1523 1526 % (changesets, revisions, files))
1524 1527
1525 1528 tr.close()
1526 1529
1527 1530 if not self.hook("changegroup"):
1528 1531 return 1
1529 1532
1530 1533 return
1531 1534
1532 1535 def update(self, node, allow=False, force=False, choose=None,
1533 1536 moddirstate=True):
1534 1537 pl = self.dirstate.parents()
1535 1538 if not force and pl[1] != nullid:
1536 1539 self.ui.warn("aborting: outstanding uncommitted merges\n")
1537 1540 return 1
1538 1541
1539 1542 p1, p2 = pl[0], node
1540 1543 pa = self.changelog.ancestor(p1, p2)
1541 1544 m1n = self.changelog.read(p1)[0]
1542 1545 m2n = self.changelog.read(p2)[0]
1543 1546 man = self.manifest.ancestor(m1n, m2n)
1544 1547 m1 = self.manifest.read(m1n)
1545 1548 mf1 = self.manifest.readflags(m1n)
1546 1549 m2 = self.manifest.read(m2n)
1547 1550 mf2 = self.manifest.readflags(m2n)
1548 1551 ma = self.manifest.read(man)
1549 1552 mfa = self.manifest.readflags(man)
1550 1553
1551 1554 (c, a, d, u) = self.changes()
1552 1555
1553 1556 # is this a jump, or a merge? i.e. is there a linear path
1554 1557 # from p1 to p2?
1555 1558 linear_path = (pa == p1 or pa == p2)
1556 1559
1557 1560 # resolve the manifest to determine which files
1558 1561 # we care about merging
1559 1562 self.ui.note("resolving manifests\n")
1560 1563 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1561 1564 (force, allow, moddirstate, linear_path))
1562 1565 self.ui.debug(" ancestor %s local %s remote %s\n" %
1563 1566 (short(man), short(m1n), short(m2n)))
1564 1567
1565 1568 merge = {}
1566 1569 get = {}
1567 1570 remove = []
1568 1571 mark = {}
1569 1572
1570 1573 # construct a working dir manifest
1571 1574 mw = m1.copy()
1572 1575 mfw = mf1.copy()
1573 1576 umap = dict.fromkeys(u)
1574 1577
1575 1578 for f in a + c + u:
1576 1579 mw[f] = ""
1577 1580 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1578 1581
1579 1582 for f in d:
1580 1583 if f in mw: del mw[f]
1581 1584
1582 1585 # If we're jumping between revisions (as opposed to merging),
1583 1586 # and if neither the working directory nor the target rev has
1584 1587 # the file, then we need to remove it from the dirstate, to
1585 1588 # prevent the dirstate from listing the file when it is no
1586 1589 # longer in the manifest.
1587 1590 if moddirstate and linear_path and f not in m2:
1588 1591 self.dirstate.forget((f,))
1589 1592
1590 1593 # Compare manifests
1591 1594 for f, n in mw.iteritems():
1592 1595 if choose and not choose(f): continue
1593 1596 if f in m2:
1594 1597 s = 0
1595 1598
1596 1599 # is the wfile new since m1, and match m2?
1597 1600 if f not in m1:
1598 1601 t1 = self.wfile(f).read()
1599 1602 t2 = self.file(f).revision(m2[f])
1600 1603 if cmp(t1, t2) == 0:
1601 1604 mark[f] = 1
1602 1605 n = m2[f]
1603 1606 del t1, t2
1604 1607
1605 1608 # are files different?
1606 1609 if n != m2[f]:
1607 1610 a = ma.get(f, nullid)
1608 1611 # are both different from the ancestor?
1609 1612 if n != a and m2[f] != a:
1610 1613 self.ui.debug(" %s versions differ, resolve\n" % f)
1611 1614 # merge executable bits
1612 1615 # "if we changed or they changed, change in merge"
1613 1616 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1614 1617 mode = ((a^b) | (a^c)) ^ a
1615 1618 merge[f] = (m1.get(f, nullid), m2[f], mode)
1616 1619 s = 1
1617 1620 # are we clobbering?
1618 1621 # is remote's version newer?
1619 1622 # or are we going back in time?
1620 1623 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1621 1624 self.ui.debug(" remote %s is newer, get\n" % f)
1622 1625 get[f] = m2[f]
1623 1626 s = 1
1624 1627 else:
1625 1628 mark[f] = 1
1626 1629 elif f in umap:
1627 1630 # this unknown file is the same as the checkout
1628 1631 get[f] = m2[f]
1629 1632
1630 1633 if not s and mfw[f] != mf2[f]:
1631 1634 if force:
1632 1635 self.ui.debug(" updating permissions for %s\n" % f)
1633 1636 util.set_exec(self.wjoin(f), mf2[f])
1634 1637 else:
1635 1638 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1636 1639 mode = ((a^b) | (a^c)) ^ a
1637 1640 if mode != b:
1638 1641 self.ui.debug(" updating permissions for %s\n" % f)
1639 1642 util.set_exec(self.wjoin(f), mode)
1640 1643 mark[f] = 1
1641 1644 del m2[f]
1642 1645 elif f in ma:
1643 1646 if n != ma[f]:
1644 1647 r = "d"
1645 1648 if not force and (linear_path or allow):
1646 1649 r = self.ui.prompt(
1647 1650 (" local changed %s which remote deleted\n" % f) +
1648 1651 "(k)eep or (d)elete?", "[kd]", "k")
1649 1652 if r == "d":
1650 1653 remove.append(f)
1651 1654 else:
1652 1655 self.ui.debug("other deleted %s\n" % f)
1653 1656 remove.append(f) # other deleted it
1654 1657 else:
1655 1658 if n == m1.get(f, nullid): # same as parent
1656 1659 if p2 == pa: # going backwards?
1657 1660 self.ui.debug("remote deleted %s\n" % f)
1658 1661 remove.append(f)
1659 1662 else:
1660 1663 self.ui.debug("local created %s, keeping\n" % f)
1661 1664 else:
1662 1665 self.ui.debug("working dir created %s, keeping\n" % f)
1663 1666
1664 1667 for f, n in m2.iteritems():
1665 1668 if choose and not choose(f): continue
1666 1669 if f[0] == "/": continue
1667 1670 if f in ma and n != ma[f]:
1668 1671 r = "k"
1669 1672 if not force and (linear_path or allow):
1670 1673 r = self.ui.prompt(
1671 1674 ("remote changed %s which local deleted\n" % f) +
1672 1675 "(k)eep or (d)elete?", "[kd]", "k")
1673 1676 if r == "k": get[f] = n
1674 1677 elif f not in ma:
1675 1678 self.ui.debug("remote created %s\n" % f)
1676 1679 get[f] = n
1677 1680 else:
1678 1681 if force or p2 == pa: # going backwards?
1679 1682 self.ui.debug("local deleted %s, recreating\n" % f)
1680 1683 get[f] = n
1681 1684 else:
1682 1685 self.ui.debug("local deleted %s\n" % f)
1683 1686
1684 1687 del mw, m1, m2, ma
1685 1688
1686 1689 if force:
1687 1690 for f in merge:
1688 1691 get[f] = merge[f][1]
1689 1692 merge = {}
1690 1693
1691 1694 if linear_path or force:
1692 1695 # we don't need to do any magic, just jump to the new rev
1693 1696 mode = 'n'
1694 1697 p1, p2 = p2, nullid
1695 1698 else:
1696 1699 if not allow:
1697 1700 self.ui.status("this update spans a branch" +
1698 1701 " affecting the following files:\n")
1699 1702 fl = merge.keys() + get.keys()
1700 1703 fl.sort()
1701 1704 for f in fl:
1702 1705 cf = ""
1703 1706 if f in merge: cf = " (resolve)"
1704 1707 self.ui.status(" %s%s\n" % (f, cf))
1705 1708 self.ui.warn("aborting update spanning branches!\n")
1706 1709 self.ui.status("(use update -m to merge across branches" +
1707 1710 " or -C to lose changes)\n")
1708 1711 return 1
1709 1712 # we have to remember what files we needed to get/change
1710 1713 # because any file that's different from either one of its
1711 1714 # parents must be in the changeset
1712 1715 mode = 'm'
1713 1716 if moddirstate:
1714 1717 self.dirstate.update(mark.keys(), "m")
1715 1718
1716 1719 if moddirstate:
1717 1720 self.dirstate.setparents(p1, p2)
1718 1721
1719 1722 # get the files we don't need to change
1720 1723 files = get.keys()
1721 1724 files.sort()
1722 1725 for f in files:
1723 1726 if f[0] == "/": continue
1724 1727 self.ui.note("getting %s\n" % f)
1725 1728 t = self.file(f).read(get[f])
1726 1729 try:
1727 1730 self.wfile(f, "w").write(t)
1728 1731 except IOError:
1729 1732 os.makedirs(os.path.dirname(self.wjoin(f)))
1730 1733 self.wfile(f, "w").write(t)
1731 1734 util.set_exec(self.wjoin(f), mf2[f])
1732 1735 if moddirstate:
1733 1736 self.dirstate.update([f], mode)
1734 1737
1735 1738 # merge the tricky bits
1736 1739 files = merge.keys()
1737 1740 files.sort()
1738 1741 for f in files:
1739 1742 self.ui.status("merging %s\n" % f)
1740 1743 m, o, flag = merge[f]
1741 1744 self.merge3(f, m, o)
1742 1745 util.set_exec(self.wjoin(f), flag)
1743 1746 if moddirstate:
1744 1747 if mode == 'm':
1745 1748 # only update dirstate on branch merge, otherwise we
1746 1749 # could mark files with changes as unchanged
1747 1750 self.dirstate.update([f], mode)
1748 1751 elif p2 == nullid:
1749 1752 # update dirstate from parent1's manifest
1750 1753 m1n = self.changelog.read(p1)[0]
1751 1754 m1 = self.manifest.read(m1n)
1752 1755 f_len = len(self.file(f).read(m1[f]))
1753 1756 self.dirstate.update([f], mode, st_size=f_len, st_mtime=0)
1754 1757 else:
1755 1758 self.ui.warn("Second parent without branch merge!?\n"
1756 1759 "Dirstate for file %s may be wrong.\n" % f)
1757 1760
1758 1761 remove.sort()
1759 1762 for f in remove:
1760 1763 self.ui.note("removing %s\n" % f)
1761 1764 try:
1762 1765 os.unlink(f)
1763 1766 except OSError, inst:
1764 1767 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1765 1768 # try removing directories that might now be empty
1766 1769 try: os.removedirs(os.path.dirname(f))
1767 1770 except: pass
1768 1771 if moddirstate:
1769 1772 if mode == 'n':
1770 1773 self.dirstate.forget(remove)
1771 1774 else:
1772 1775 self.dirstate.update(remove, 'r')
1773 1776
1774 1777 def merge3(self, fn, my, other):
1775 1778 """perform a 3-way merge in the working directory"""
1776 1779
1777 1780 def temp(prefix, node):
1778 1781 pre = "%s~%s." % (os.path.basename(fn), prefix)
1779 1782 (fd, name) = tempfile.mkstemp("", pre)
1780 1783 f = os.fdopen(fd, "wb")
1781 1784 f.write(fl.revision(node))
1782 1785 f.close()
1783 1786 return name
1784 1787
1785 1788 fl = self.file(fn)
1786 1789 base = fl.ancestor(my, other)
1787 1790 a = self.wjoin(fn)
1788 1791 b = temp("base", base)
1789 1792 c = temp("other", other)
1790 1793
1791 1794 self.ui.note("resolving %s\n" % fn)
1792 1795 self.ui.debug("file %s: other %s ancestor %s\n" %
1793 1796 (fn, short(other), short(base)))
1794 1797
1795 1798 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1796 1799 or "hgmerge")
1797 1800 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1798 1801 if r:
1799 1802 self.ui.warn("merging %s failed!\n" % fn)
1800 1803
1801 1804 os.unlink(b)
1802 1805 os.unlink(c)
1803 1806
1804 1807 def verify(self):
1805 1808 filelinkrevs = {}
1806 1809 filenodes = {}
1807 1810 changesets = revisions = files = 0
1808 1811 errors = 0
1809 1812
1810 1813 seen = {}
1811 1814 self.ui.status("checking changesets\n")
1812 1815 for i in range(self.changelog.count()):
1813 1816 changesets += 1
1814 1817 n = self.changelog.node(i)
1815 1818 if n in seen:
1816 1819 self.ui.warn("duplicate changeset at revision %d\n" % i)
1817 1820 errors += 1
1818 1821 seen[n] = 1
1819 1822
1820 1823 for p in self.changelog.parents(n):
1821 1824 if p not in self.changelog.nodemap:
1822 1825 self.ui.warn("changeset %s has unknown parent %s\n" %
1823 1826 (short(n), short(p)))
1824 1827 errors += 1
1825 1828 try:
1826 1829 changes = self.changelog.read(n)
1827 1830 except Exception, inst:
1828 1831 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1829 1832 errors += 1
1830 1833
1831 1834 for f in changes[3]:
1832 1835 filelinkrevs.setdefault(f, []).append(i)
1833 1836
1834 1837 seen = {}
1835 1838 self.ui.status("checking manifests\n")
1836 1839 for i in range(self.manifest.count()):
1837 1840 n = self.manifest.node(i)
1838 1841 if n in seen:
1839 1842 self.ui.warn("duplicate manifest at revision %d\n" % i)
1840 1843 errors += 1
1841 1844 seen[n] = 1
1842 1845
1843 1846 for p in self.manifest.parents(n):
1844 1847 if p not in self.manifest.nodemap:
1845 1848 self.ui.warn("manifest %s has unknown parent %s\n" %
1846 1849 (short(n), short(p)))
1847 1850 errors += 1
1848 1851
1849 1852 try:
1850 1853 delta = mdiff.patchtext(self.manifest.delta(n))
1851 1854 except KeyboardInterrupt:
1852 1855 self.ui.warn("aborted")
1853 1856 sys.exit(0)
1854 1857 except Exception, inst:
1855 1858 self.ui.warn("unpacking manifest %s: %s\n"
1856 1859 % (short(n), inst))
1857 1860 errors += 1
1858 1861
1859 1862 ff = [ l.split('\0') for l in delta.splitlines() ]
1860 1863 for f, fn in ff:
1861 1864 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1862 1865
1863 1866 self.ui.status("crosschecking files in changesets and manifests\n")
1864 1867 for f in filenodes:
1865 1868 if f not in filelinkrevs:
1866 1869 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1867 1870 errors += 1
1868 1871
1869 1872 for f in filelinkrevs:
1870 1873 if f not in filenodes:
1871 1874 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1872 1875 errors += 1
1873 1876
1874 1877 self.ui.status("checking files\n")
1875 1878 ff = filenodes.keys()
1876 1879 ff.sort()
1877 1880 for f in ff:
1878 1881 if f == "/dev/null": continue
1879 1882 files += 1
1880 1883 fl = self.file(f)
1881 1884 nodes = { nullid: 1 }
1882 1885 seen = {}
1883 1886 for i in range(fl.count()):
1884 1887 revisions += 1
1885 1888 n = fl.node(i)
1886 1889
1887 1890 if n in seen:
1888 1891 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1889 1892 errors += 1
1890 1893
1891 1894 if n not in filenodes[f]:
1892 1895 self.ui.warn("%s: %d:%s not in manifests\n"
1893 1896 % (f, i, short(n)))
1894 1897 errors += 1
1895 1898 else:
1896 1899 del filenodes[f][n]
1897 1900
1898 1901 flr = fl.linkrev(n)
1899 1902 if flr not in filelinkrevs[f]:
1900 1903 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1901 1904 % (f, short(n), fl.linkrev(n)))
1902 1905 errors += 1
1903 1906 else:
1904 1907 filelinkrevs[f].remove(flr)
1905 1908
1906 1909 # verify contents
1907 1910 try:
1908 1911 t = fl.read(n)
1909 1912 except Exception, inst:
1910 1913 self.ui.warn("unpacking file %s %s: %s\n"
1911 1914 % (f, short(n), inst))
1912 1915 errors += 1
1913 1916
1914 1917 # verify parents
1915 1918 (p1, p2) = fl.parents(n)
1916 1919 if p1 not in nodes:
1917 1920 self.ui.warn("file %s:%s unknown parent 1 %s" %
1918 1921 (f, short(n), short(p1)))
1919 1922 errors += 1
1920 1923 if p2 not in nodes:
1921 1924 self.ui.warn("file %s:%s unknown parent 2 %s" %
1922 1925 (f, short(n), short(p1)))
1923 1926 errors += 1
1924 1927 nodes[n] = 1
1925 1928
1926 1929 # cross-check
1927 1930 for node in filenodes[f]:
1928 1931 self.ui.warn("node %s in manifests not in %s\n"
1929 1932 % (hex(node), f))
1930 1933 errors += 1
1931 1934
1932 1935 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1933 1936 (files, changesets, revisions))
1934 1937
1935 1938 if errors:
1936 1939 self.ui.warn("%d integrity errors encountered!\n" % errors)
1937 1940 return 1
1938 1941
1939 1942 class httprepository:
1940 1943 def __init__(self, ui, path):
1941 1944 # fix missing / after hostname
1942 1945 s = urlparse.urlsplit(path)
1943 1946 partial = s[2]
1944 1947 if not partial: partial = "/"
1945 1948 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1946 1949 self.ui = ui
1947 1950 no_list = [ "localhost", "127.0.0.1" ]
1948 1951 host = ui.config("http_proxy", "host")
1949 1952 if host is None:
1950 1953 host = os.environ.get("http_proxy")
1951 1954 if host and host.startswith('http://'):
1952 1955 host = host[7:]
1953 1956 user = ui.config("http_proxy", "user")
1954 1957 passwd = ui.config("http_proxy", "passwd")
1955 1958 no = ui.config("http_proxy", "no")
1956 1959 if no is None:
1957 1960 no = os.environ.get("no_proxy")
1958 1961 if no:
1959 1962 no_list = no_list + no.split(",")
1960 1963
1961 1964 no_proxy = 0
1962 1965 for h in no_list:
1963 1966 if (path.startswith("http://" + h + "/") or
1964 1967 path.startswith("http://" + h + ":") or
1965 1968 path == "http://" + h):
1966 1969 no_proxy = 1
1967 1970
1968 1971 # Note: urllib2 takes proxy values from the environment and those will
1969 1972 # take precedence
1970 1973 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1971 1974 try:
1972 1975 if os.environ.has_key(env):
1973 1976 del os.environ[env]
1974 1977 except OSError:
1975 1978 pass
1976 1979
1977 1980 proxy_handler = urllib2.BaseHandler()
1978 1981 if host and not no_proxy:
1979 1982 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1980 1983
1981 1984 authinfo = None
1982 1985 if user and passwd:
1983 1986 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1984 1987 passmgr.add_password(None, host, user, passwd)
1985 1988 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
1986 1989
1987 1990 opener = urllib2.build_opener(proxy_handler, authinfo)
1988 1991 urllib2.install_opener(opener)
1989 1992
1990 1993 def dev(self):
1991 1994 return -1
1992 1995
1993 1996 def do_cmd(self, cmd, **args):
1994 1997 self.ui.debug("sending %s command\n" % cmd)
1995 1998 q = {"cmd": cmd}
1996 1999 q.update(args)
1997 2000 qs = urllib.urlencode(q)
1998 2001 cu = "%s?%s" % (self.url, qs)
1999 2002 resp = urllib2.urlopen(cu)
2000 2003 proto = resp.headers['content-type']
2001 2004
2002 2005 # accept old "text/plain" and "application/hg-changegroup" for now
2003 2006 if not proto.startswith('application/mercurial') and \
2004 2007 not proto.startswith('text/plain') and \
2005 2008 not proto.startswith('application/hg-changegroup'):
2006 2009 raise RepoError("'%s' does not appear to be an hg repository"
2007 2010 % self.url)
2008 2011
2009 2012 if proto.startswith('application/mercurial'):
2010 2013 version = proto[22:]
2011 2014 if float(version) > 0.1:
2012 2015 raise RepoError("'%s' uses newer protocol %s" %
2013 2016 (self.url, version))
2014 2017
2015 2018 return resp
2016 2019
2017 2020 def heads(self):
2018 2021 d = self.do_cmd("heads").read()
2019 2022 try:
2020 2023 return map(bin, d[:-1].split(" "))
2021 2024 except:
2022 2025 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2023 2026 raise
2024 2027
2025 2028 def branches(self, nodes):
2026 2029 n = " ".join(map(hex, nodes))
2027 2030 d = self.do_cmd("branches", nodes=n).read()
2028 2031 try:
2029 2032 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2030 2033 return br
2031 2034 except:
2032 2035 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2033 2036 raise
2034 2037
2035 2038 def between(self, pairs):
2036 2039 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2037 2040 d = self.do_cmd("between", pairs=n).read()
2038 2041 try:
2039 2042 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2040 2043 return p
2041 2044 except:
2042 2045 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2043 2046 raise
2044 2047
2045 2048 def changegroup(self, nodes):
2046 2049 n = " ".join(map(hex, nodes))
2047 2050 f = self.do_cmd("changegroup", roots=n)
2048 2051 bytes = 0
2049 2052
2050 2053 class zread:
2051 2054 def __init__(self, f):
2052 2055 self.zd = zlib.decompressobj()
2053 2056 self.f = f
2054 2057 self.buf = ""
2055 2058 def read(self, l):
2056 2059 while l > len(self.buf):
2057 2060 r = self.f.read(4096)
2058 2061 if r:
2059 2062 self.buf += self.zd.decompress(r)
2060 2063 else:
2061 2064 self.buf += self.zd.flush()
2062 2065 break
2063 2066 d, self.buf = self.buf[:l], self.buf[l:]
2064 2067 return d
2065 2068
2066 2069 return zread(f)
2067 2070
2068 2071 class remotelock:
2069 2072 def __init__(self, repo):
2070 2073 self.repo = repo
2071 2074 def release(self):
2072 2075 self.repo.unlock()
2073 2076 self.repo = None
2074 2077 def __del__(self):
2075 2078 if self.repo:
2076 2079 self.release()
2077 2080
2078 2081 class sshrepository:
2079 2082 def __init__(self, ui, path):
2080 2083 self.url = path
2081 2084 self.ui = ui
2082 2085
2083 2086 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2084 2087 if not m:
2085 2088 raise RepoError("couldn't parse destination %s" % path)
2086 2089
2087 2090 self.user = m.group(2)
2088 2091 self.host = m.group(3)
2089 2092 self.port = m.group(5)
2090 2093 self.path = m.group(7)
2091 2094
2092 2095 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2093 2096 args = self.port and ("%s -p %s") % (args, self.port) or args
2094 2097 path = self.path or ""
2095 2098
2096 2099 if not path:
2097 2100 raise RepoError("no remote repository path specified")
2098 2101
2099 2102 cmd = "ssh %s 'hg -R %s serve --stdio'"
2100 2103 cmd = cmd % (args, path)
2101 2104
2102 2105 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2103 2106
2104 2107 def readerr(self):
2105 2108 while 1:
2106 2109 r,w,x = select.select([self.pipee], [], [], 0)
2107 2110 if not r: break
2108 2111 l = self.pipee.readline()
2109 2112 if not l: break
2110 2113 self.ui.status("remote: ", l)
2111 2114
2112 2115 def __del__(self):
2113 2116 try:
2114 2117 self.pipeo.close()
2115 2118 self.pipei.close()
2116 2119 for l in self.pipee:
2117 2120 self.ui.status("remote: ", l)
2118 2121 self.pipee.close()
2119 2122 except:
2120 2123 pass
2121 2124
2122 2125 def dev(self):
2123 2126 return -1
2124 2127
2125 2128 def do_cmd(self, cmd, **args):
2126 2129 self.ui.debug("sending %s command\n" % cmd)
2127 2130 self.pipeo.write("%s\n" % cmd)
2128 2131 for k, v in args.items():
2129 2132 self.pipeo.write("%s %d\n" % (k, len(v)))
2130 2133 self.pipeo.write(v)
2131 2134 self.pipeo.flush()
2132 2135
2133 2136 return self.pipei
2134 2137
2135 2138 def call(self, cmd, **args):
2136 2139 r = self.do_cmd(cmd, **args)
2137 2140 l = r.readline()
2138 2141 self.readerr()
2139 2142 try:
2140 2143 l = int(l)
2141 2144 except:
2142 2145 raise RepoError("unexpected response '%s'" % l)
2143 2146 return r.read(l)
2144 2147
2145 2148 def lock(self):
2146 2149 self.call("lock")
2147 2150 return remotelock(self)
2148 2151
2149 2152 def unlock(self):
2150 2153 self.call("unlock")
2151 2154
2152 2155 def heads(self):
2153 2156 d = self.call("heads")
2154 2157 try:
2155 2158 return map(bin, d[:-1].split(" "))
2156 2159 except:
2157 2160 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2158 2161
2159 2162 def branches(self, nodes):
2160 2163 n = " ".join(map(hex, nodes))
2161 2164 d = self.call("branches", nodes=n)
2162 2165 try:
2163 2166 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2164 2167 return br
2165 2168 except:
2166 2169 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2167 2170
2168 2171 def between(self, pairs):
2169 2172 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2170 2173 d = self.call("between", pairs=n)
2171 2174 try:
2172 2175 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2173 2176 return p
2174 2177 except:
2175 2178 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2176 2179
2177 2180 def changegroup(self, nodes):
2178 2181 n = " ".join(map(hex, nodes))
2179 2182 f = self.do_cmd("changegroup", roots=n)
2180 2183 return self.pipei
2181 2184
2182 2185 def addchangegroup(self, cg):
2183 2186 d = self.call("addchangegroup")
2184 2187 if d:
2185 2188 raise RepoError("push refused: %s", d)
2186 2189
2187 2190 while 1:
2188 2191 d = cg.read(4096)
2189 2192 if not d: break
2190 2193 self.pipeo.write(d)
2191 2194 self.readerr()
2192 2195
2193 2196 self.pipeo.flush()
2194 2197
2195 2198 self.readerr()
2196 2199 l = int(self.pipei.readline())
2197 2200 return self.pipei.read(l) != ""
2198 2201
2199 2202 def repository(ui, path=None, create=0):
2200 2203 if path:
2201 2204 if path.startswith("http://"):
2202 2205 return httprepository(ui, path)
2203 2206 if path.startswith("hg://"):
2204 2207 return httprepository(ui, path.replace("hg://", "http://"))
2205 2208 if path.startswith("old-http://"):
2206 2209 return localrepository(ui, path.replace("old-http://", "http://"))
2207 2210 if path.startswith("ssh://"):
2208 2211 return sshrepository(ui, path)
2209 2212
2210 2213 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now