##// END OF EJS Templates
Remove call to non-existent date_parser
mpm@selenic.com -
r971:eac9c8ef 0.6c default
parent child Browse files
Show More
@@ -1,2232 +1,2230 b''
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import sys, struct, os
9 9 import util
10 10 from revlog import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock urllib urllib2 transaction time socket")
13 13 demandload(globals(), "tempfile httprangereader bdiff urlparse")
14 14 demandload(globals(), "bisect errno select stat")
15 15
16 16 class filelog(revlog):
17 17 def __init__(self, opener, path):
18 18 revlog.__init__(self, opener,
19 19 os.path.join("data", self.encodedir(path + ".i")),
20 20 os.path.join("data", self.encodedir(path + ".d")))
21 21
22 22 # This avoids a collision between a file named foo and a dir named
23 23 # foo.i or foo.d
24 24 def encodedir(self, path):
25 25 return (path
26 26 .replace(".hg/", ".hg.hg/")
27 27 .replace(".i/", ".i.hg/")
28 28 .replace(".d/", ".d.hg/"))
29 29
30 30 def decodedir(self, path):
31 31 return (path
32 32 .replace(".d.hg/", ".d/")
33 33 .replace(".i.hg/", ".i/")
34 34 .replace(".hg.hg/", ".hg/"))
35 35
36 36 def read(self, node):
37 37 t = self.revision(node)
38 38 if not t.startswith('\1\n'):
39 39 return t
40 40 s = t.find('\1\n', 2)
41 41 return t[s+2:]
42 42
43 43 def readmeta(self, node):
44 44 t = self.revision(node)
45 45 if not t.startswith('\1\n'):
46 46 return t
47 47 s = t.find('\1\n', 2)
48 48 mt = t[2:s]
49 49 for l in mt.splitlines():
50 50 k, v = l.split(": ", 1)
51 51 m[k] = v
52 52 return m
53 53
54 54 def add(self, text, meta, transaction, link, p1=None, p2=None):
55 55 if meta or text.startswith('\1\n'):
56 56 mt = ""
57 57 if meta:
58 58 mt = [ "%s: %s\n" % (k, v) for k,v in meta.items() ]
59 59 text = "\1\n" + "".join(mt) + "\1\n" + text
60 60 return self.addrevision(text, transaction, link, p1, p2)
61 61
62 62 def annotate(self, node):
63 63
64 64 def decorate(text, rev):
65 65 return ([rev] * len(text.splitlines()), text)
66 66
67 67 def pair(parent, child):
68 68 for a1, a2, b1, b2 in bdiff.blocks(parent[1], child[1]):
69 69 child[0][b1:b2] = parent[0][a1:a2]
70 70 return child
71 71
72 72 # find all ancestors
73 73 needed = {node:1}
74 74 visit = [node]
75 75 while visit:
76 76 n = visit.pop(0)
77 77 for p in self.parents(n):
78 78 if p not in needed:
79 79 needed[p] = 1
80 80 visit.append(p)
81 81 else:
82 82 # count how many times we'll use this
83 83 needed[p] += 1
84 84
85 85 # sort by revision which is a topological order
86 86 visit = [ (self.rev(n), n) for n in needed.keys() ]
87 87 visit.sort()
88 88 hist = {}
89 89
90 90 for r,n in visit:
91 91 curr = decorate(self.read(n), self.linkrev(n))
92 92 for p in self.parents(n):
93 93 if p != nullid:
94 94 curr = pair(hist[p], curr)
95 95 # trim the history of unneeded revs
96 96 needed[p] -= 1
97 97 if not needed[p]:
98 98 del hist[p]
99 99 hist[n] = curr
100 100
101 101 return zip(hist[n][0], hist[n][1].splitlines(1))
102 102
103 103 class manifest(revlog):
104 104 def __init__(self, opener):
105 105 self.mapcache = None
106 106 self.listcache = None
107 107 self.addlist = None
108 108 revlog.__init__(self, opener, "00manifest.i", "00manifest.d")
109 109
110 110 def read(self, node):
111 111 if node == nullid: return {} # don't upset local cache
112 112 if self.mapcache and self.mapcache[0] == node:
113 113 return self.mapcache[1]
114 114 text = self.revision(node)
115 115 map = {}
116 116 flag = {}
117 117 self.listcache = (text, text.splitlines(1))
118 118 for l in self.listcache[1]:
119 119 (f, n) = l.split('\0')
120 120 map[f] = bin(n[:40])
121 121 flag[f] = (n[40:-1] == "x")
122 122 self.mapcache = (node, map, flag)
123 123 return map
124 124
125 125 def readflags(self, node):
126 126 if node == nullid: return {} # don't upset local cache
127 127 if not self.mapcache or self.mapcache[0] != node:
128 128 self.read(node)
129 129 return self.mapcache[2]
130 130
131 131 def diff(self, a, b):
132 132 # this is sneaky, as we're not actually using a and b
133 133 if self.listcache and self.addlist and self.listcache[0] == a:
134 134 d = mdiff.diff(self.listcache[1], self.addlist, 1)
135 135 if mdiff.patch(a, d) != b:
136 136 sys.stderr.write("*** sortdiff failed, falling back ***\n")
137 137 return mdiff.textdiff(a, b)
138 138 return d
139 139 else:
140 140 return mdiff.textdiff(a, b)
141 141
142 142 def add(self, map, flags, transaction, link, p1=None, p2=None,
143 143 changed=None):
144 144 # directly generate the mdiff delta from the data collected during
145 145 # the bisect loop below
146 146 def gendelta(delta):
147 147 i = 0
148 148 result = []
149 149 while i < len(delta):
150 150 start = delta[i][2]
151 151 end = delta[i][3]
152 152 l = delta[i][4]
153 153 if l == None:
154 154 l = ""
155 155 while i < len(delta) - 1 and start <= delta[i+1][2] \
156 156 and end >= delta[i+1][2]:
157 157 if delta[i+1][3] > end:
158 158 end = delta[i+1][3]
159 159 if delta[i+1][4]:
160 160 l += delta[i+1][4]
161 161 i += 1
162 162 result.append(struct.pack(">lll", start, end, len(l)) + l)
163 163 i += 1
164 164 return result
165 165
166 166 # apply the changes collected during the bisect loop to our addlist
167 167 def addlistdelta(addlist, delta):
168 168 # apply the deltas to the addlist. start from the bottom up
169 169 # so changes to the offsets don't mess things up.
170 170 i = len(delta)
171 171 while i > 0:
172 172 i -= 1
173 173 start = delta[i][0]
174 174 end = delta[i][1]
175 175 if delta[i][4]:
176 176 addlist[start:end] = [delta[i][4]]
177 177 else:
178 178 del addlist[start:end]
179 179 return addlist
180 180
181 181 # calculate the byte offset of the start of each line in the
182 182 # manifest
183 183 def calcoffsets(addlist):
184 184 offsets = [0] * (len(addlist) + 1)
185 185 offset = 0
186 186 i = 0
187 187 while i < len(addlist):
188 188 offsets[i] = offset
189 189 offset += len(addlist[i])
190 190 i += 1
191 191 offsets[i] = offset
192 192 return offsets
193 193
194 194 # if we're using the listcache, make sure it is valid and
195 195 # parented by the same node we're diffing against
196 196 if not changed or not self.listcache or not p1 or \
197 197 self.mapcache[0] != p1:
198 198 files = map.keys()
199 199 files.sort()
200 200
201 201 self.addlist = ["%s\000%s%s\n" %
202 202 (f, hex(map[f]), flags[f] and "x" or '')
203 203 for f in files]
204 204 cachedelta = None
205 205 else:
206 206 addlist = self.listcache[1]
207 207
208 208 # find the starting offset for each line in the add list
209 209 offsets = calcoffsets(addlist)
210 210
211 211 # combine the changed lists into one list for sorting
212 212 work = [[x, 0] for x in changed[0]]
213 213 work[len(work):] = [[x, 1] for x in changed[1]]
214 214 work.sort()
215 215
216 216 delta = []
217 217 bs = 0
218 218
219 219 for w in work:
220 220 f = w[0]
221 221 # bs will either be the index of the item or the insert point
222 222 bs = bisect.bisect(addlist, f, bs)
223 223 if bs < len(addlist):
224 224 fn = addlist[bs][:addlist[bs].index('\0')]
225 225 else:
226 226 fn = None
227 227 if w[1] == 0:
228 228 l = "%s\000%s%s\n" % (f, hex(map[f]),
229 229 flags[f] and "x" or '')
230 230 else:
231 231 l = None
232 232 start = bs
233 233 if fn != f:
234 234 # item not found, insert a new one
235 235 end = bs
236 236 if w[1] == 1:
237 237 sys.stderr.write("failed to remove %s from manifest\n"
238 238 % f)
239 239 sys.exit(1)
240 240 else:
241 241 # item is found, replace/delete the existing line
242 242 end = bs + 1
243 243 delta.append([start, end, offsets[start], offsets[end], l])
244 244
245 245 self.addlist = addlistdelta(addlist, delta)
246 246 if self.mapcache[0] == self.tip():
247 247 cachedelta = "".join(gendelta(delta))
248 248 else:
249 249 cachedelta = None
250 250
251 251 text = "".join(self.addlist)
252 252 if cachedelta and mdiff.patch(self.listcache[0], cachedelta) != text:
253 253 sys.stderr.write("manifest delta failure\n")
254 254 sys.exit(1)
255 255 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
256 256 self.mapcache = (n, map, flags)
257 257 self.listcache = (text, self.addlist)
258 258 self.addlist = None
259 259
260 260 return n
261 261
262 262 class changelog(revlog):
263 263 def __init__(self, opener):
264 264 revlog.__init__(self, opener, "00changelog.i", "00changelog.d")
265 265
266 266 def extract(self, text):
267 267 if not text:
268 268 return (nullid, "", "0", [], "")
269 269 last = text.index("\n\n")
270 270 desc = text[last + 2:]
271 271 l = text[:last].splitlines()
272 272 manifest = bin(l[0])
273 273 user = l[1]
274 274 date = l[2]
275 275 files = l[3:]
276 276 return (manifest, user, date, files, desc)
277 277
278 278 def read(self, node):
279 279 return self.extract(self.revision(node))
280 280
281 281 def add(self, manifest, list, desc, transaction, p1=None, p2=None,
282 282 user=None, date=None):
283 if date:
284 date = util.date_parser(date)
285 else:
283 if not date:
286 284 if time.daylight: offset = time.altzone
287 285 else: offset = time.timezone
288 286 date = "%d %d" % (time.time(), offset)
289 287 list.sort()
290 288 l = [hex(manifest), user, date] + list + ["", desc]
291 289 text = "\n".join(l)
292 290 return self.addrevision(text, transaction, self.count(), p1, p2)
293 291
294 292 class dirstate:
295 293 def __init__(self, opener, ui, root):
296 294 self.opener = opener
297 295 self.root = root
298 296 self.dirty = 0
299 297 self.ui = ui
300 298 self.map = None
301 299 self.pl = None
302 300 self.copies = {}
303 301 self.ignorefunc = None
304 302
305 303 def wjoin(self, f):
306 304 return os.path.join(self.root, f)
307 305
308 306 def getcwd(self):
309 307 cwd = os.getcwd()
310 308 if cwd == self.root: return ''
311 309 return cwd[len(self.root) + 1:]
312 310
313 311 def ignore(self, f):
314 312 if not self.ignorefunc:
315 313 bigpat = []
316 314 try:
317 315 l = file(self.wjoin(".hgignore"))
318 316 for pat in l:
319 317 p = pat.rstrip()
320 318 if p:
321 319 try:
322 320 re.compile(p)
323 321 except:
324 322 self.ui.warn("ignoring invalid ignore"
325 323 + " regular expression '%s'\n" % p)
326 324 else:
327 325 bigpat.append(p)
328 326 except IOError: pass
329 327
330 328 if bigpat:
331 329 s = "(?:%s)" % (")|(?:".join(bigpat))
332 330 r = re.compile(s)
333 331 self.ignorefunc = r.search
334 332 else:
335 333 self.ignorefunc = util.never
336 334
337 335 return self.ignorefunc(f)
338 336
339 337 def __del__(self):
340 338 if self.dirty:
341 339 self.write()
342 340
343 341 def __getitem__(self, key):
344 342 try:
345 343 return self.map[key]
346 344 except TypeError:
347 345 self.read()
348 346 return self[key]
349 347
350 348 def __contains__(self, key):
351 349 if not self.map: self.read()
352 350 return key in self.map
353 351
354 352 def parents(self):
355 353 if not self.pl:
356 354 self.read()
357 355 return self.pl
358 356
359 357 def markdirty(self):
360 358 if not self.dirty:
361 359 self.dirty = 1
362 360
363 361 def setparents(self, p1, p2 = nullid):
364 362 self.markdirty()
365 363 self.pl = p1, p2
366 364
367 365 def state(self, key):
368 366 try:
369 367 return self[key][0]
370 368 except KeyError:
371 369 return "?"
372 370
373 371 def read(self):
374 372 if self.map is not None: return self.map
375 373
376 374 self.map = {}
377 375 self.pl = [nullid, nullid]
378 376 try:
379 377 st = self.opener("dirstate").read()
380 378 if not st: return
381 379 except: return
382 380
383 381 self.pl = [st[:20], st[20: 40]]
384 382
385 383 pos = 40
386 384 while pos < len(st):
387 385 e = struct.unpack(">cllll", st[pos:pos+17])
388 386 l = e[4]
389 387 pos += 17
390 388 f = st[pos:pos + l]
391 389 if '\0' in f:
392 390 f, c = f.split('\0')
393 391 self.copies[f] = c
394 392 self.map[f] = e[:4]
395 393 pos += l
396 394
397 395 def copy(self, source, dest):
398 396 self.read()
399 397 self.markdirty()
400 398 self.copies[dest] = source
401 399
402 400 def copied(self, file):
403 401 return self.copies.get(file, None)
404 402
405 403 def update(self, files, state, **kw):
406 404 ''' current states:
407 405 n normal
408 406 m needs merging
409 407 r marked for removal
410 408 a marked for addition'''
411 409
412 410 if not files: return
413 411 self.read()
414 412 self.markdirty()
415 413 for f in files:
416 414 if state == "r":
417 415 self.map[f] = ('r', 0, 0, 0)
418 416 else:
419 417 s = os.stat(os.path.join(self.root, f))
420 418 st_size = kw.get('st_size', s.st_size)
421 419 st_mtime = kw.get('st_mtime', s.st_mtime)
422 420 self.map[f] = (state, s.st_mode, st_size, st_mtime)
423 421
424 422 def forget(self, files):
425 423 if not files: return
426 424 self.read()
427 425 self.markdirty()
428 426 for f in files:
429 427 try:
430 428 del self.map[f]
431 429 except KeyError:
432 430 self.ui.warn("not in dirstate: %s!\n" % f)
433 431 pass
434 432
435 433 def clear(self):
436 434 self.map = {}
437 435 self.markdirty()
438 436
439 437 def write(self):
440 438 st = self.opener("dirstate", "w")
441 439 st.write("".join(self.pl))
442 440 for f, e in self.map.items():
443 441 c = self.copied(f)
444 442 if c:
445 443 f = f + "\0" + c
446 444 e = struct.pack(">cllll", e[0], e[1], e[2], e[3], len(f))
447 445 st.write(e + f)
448 446 self.dirty = 0
449 447
450 448 def filterfiles(self, files):
451 449 ret = {}
452 450 unknown = []
453 451
454 452 for x in files:
455 453 if x is '.':
456 454 return self.map.copy()
457 455 if x not in self.map:
458 456 unknown.append(x)
459 457 else:
460 458 ret[x] = self.map[x]
461 459
462 460 if not unknown:
463 461 return ret
464 462
465 463 b = self.map.keys()
466 464 b.sort()
467 465 blen = len(b)
468 466
469 467 for x in unknown:
470 468 bs = bisect.bisect(b, x)
471 469 if bs != 0 and b[bs-1] == x:
472 470 ret[x] = self.map[x]
473 471 continue
474 472 while bs < blen:
475 473 s = b[bs]
476 474 if len(s) > len(x) and s.startswith(x) and s[len(x)] == '/':
477 475 ret[s] = self.map[s]
478 476 else:
479 477 break
480 478 bs += 1
481 479 return ret
482 480
483 481 def walk(self, files = None, match = util.always, dc=None):
484 482 self.read()
485 483
486 484 # walk all files by default
487 485 if not files:
488 486 files = [self.root]
489 487 if not dc:
490 488 dc = self.map.copy()
491 489 elif not dc:
492 490 dc = self.filterfiles(files)
493 491
494 492 known = {'.hg': 1}
495 493 def seen(fn):
496 494 if fn in known: return True
497 495 known[fn] = 1
498 496 def traverse():
499 497 for ff in util.unique(files):
500 498 f = os.path.join(self.root, ff)
501 499 try:
502 500 st = os.stat(f)
503 501 except OSError, inst:
504 502 if ff not in dc: self.ui.warn('%s: %s\n' % (
505 503 util.pathto(self.getcwd(), ff),
506 504 inst.strerror))
507 505 continue
508 506 if stat.S_ISDIR(st.st_mode):
509 507 for dir, subdirs, fl in os.walk(f):
510 508 d = dir[len(self.root) + 1:]
511 509 nd = util.normpath(d)
512 510 if nd == '.': nd = ''
513 511 if seen(nd):
514 512 subdirs[:] = []
515 513 continue
516 514 for sd in subdirs:
517 515 ds = os.path.join(nd, sd +'/')
518 516 if self.ignore(ds) or not match(ds):
519 517 subdirs.remove(sd)
520 518 subdirs.sort()
521 519 fl.sort()
522 520 for fn in fl:
523 521 fn = util.pconvert(os.path.join(d, fn))
524 522 yield 'f', fn
525 523 elif stat.S_ISREG(st.st_mode):
526 524 yield 'f', ff
527 525 else:
528 526 kind = 'unknown'
529 527 if stat.S_ISCHR(st.st_mode): kind = 'character device'
530 528 elif stat.S_ISBLK(st.st_mode): kind = 'block device'
531 529 elif stat.S_ISFIFO(st.st_mode): kind = 'fifo'
532 530 elif stat.S_ISLNK(st.st_mode): kind = 'symbolic link'
533 531 elif stat.S_ISSOCK(st.st_mode): kind = 'socket'
534 532 self.ui.warn('%s: unsupported file type (type is %s)\n' % (
535 533 util.pathto(self.getcwd(), ff),
536 534 kind))
537 535
538 536 ks = dc.keys()
539 537 ks.sort()
540 538 for k in ks:
541 539 yield 'm', k
542 540
543 541 # yield only files that match: all in dirstate, others only if
544 542 # not in .hgignore
545 543
546 544 for src, fn in util.unique(traverse()):
547 545 fn = util.normpath(fn)
548 546 if seen(fn): continue
549 547 if fn not in dc and self.ignore(fn):
550 548 continue
551 549 if match(fn):
552 550 yield src, fn
553 551
554 552 def changes(self, files=None, match=util.always):
555 553 self.read()
556 554 if not files:
557 555 dc = self.map.copy()
558 556 else:
559 557 dc = self.filterfiles(files)
560 558 lookup, modified, added, unknown = [], [], [], []
561 559 removed, deleted = [], []
562 560
563 561 for src, fn in self.walk(files, match, dc=dc):
564 562 try:
565 563 s = os.stat(os.path.join(self.root, fn))
566 564 except OSError:
567 565 continue
568 566 if not stat.S_ISREG(s.st_mode):
569 567 continue
570 568 c = dc.get(fn)
571 569 if c:
572 570 del dc[fn]
573 571 if c[0] == 'm':
574 572 modified.append(fn)
575 573 elif c[0] == 'a':
576 574 added.append(fn)
577 575 elif c[0] == 'r':
578 576 unknown.append(fn)
579 577 elif c[2] != s.st_size or (c[1] ^ s.st_mode) & 0100:
580 578 modified.append(fn)
581 579 elif c[3] != s.st_mtime:
582 580 lookup.append(fn)
583 581 else:
584 582 unknown.append(fn)
585 583
586 584 for fn, c in [(fn, c) for fn, c in dc.items() if match(fn)]:
587 585 if c[0] == 'r':
588 586 removed.append(fn)
589 587 else:
590 588 deleted.append(fn)
591 589 return (lookup, modified, added, removed + deleted, unknown)
592 590
593 591 # used to avoid circular references so destructors work
594 592 def opener(base):
595 593 p = base
596 594 def o(path, mode="r"):
597 595 if p.startswith("http://"):
598 596 f = os.path.join(p, urllib.quote(path))
599 597 return httprangereader.httprangereader(f)
600 598
601 599 f = os.path.join(p, path)
602 600
603 601 mode += "b" # for that other OS
604 602
605 603 if mode[0] != "r":
606 604 try:
607 605 s = os.stat(f)
608 606 except OSError:
609 607 d = os.path.dirname(f)
610 608 if not os.path.isdir(d):
611 609 os.makedirs(d)
612 610 else:
613 611 if s.st_nlink > 1:
614 612 file(f + ".tmp", "wb").write(file(f, "rb").read())
615 613 util.rename(f+".tmp", f)
616 614
617 615 return file(f, mode)
618 616
619 617 return o
620 618
621 619 class RepoError(Exception): pass
622 620
623 621 class localrepository:
624 622 def __init__(self, ui, path=None, create=0):
625 623 self.remote = 0
626 624 if path and path.startswith("http://"):
627 625 self.remote = 1
628 626 self.path = path
629 627 else:
630 628 if not path:
631 629 p = os.getcwd()
632 630 while not os.path.isdir(os.path.join(p, ".hg")):
633 631 oldp = p
634 632 p = os.path.dirname(p)
635 633 if p == oldp: raise RepoError("no repo found")
636 634 path = p
637 635 self.path = os.path.join(path, ".hg")
638 636
639 637 if not create and not os.path.isdir(self.path):
640 638 raise RepoError("repository %s not found" % self.path)
641 639
642 640 self.root = os.path.abspath(path)
643 641 self.ui = ui
644 642
645 643 if create:
646 644 os.mkdir(self.path)
647 645 os.mkdir(self.join("data"))
648 646
649 647 self.opener = opener(self.path)
650 648 self.wopener = opener(self.root)
651 649 self.manifest = manifest(self.opener)
652 650 self.changelog = changelog(self.opener)
653 651 self.tagscache = None
654 652 self.nodetagscache = None
655 653
656 654 if not self.remote:
657 655 self.dirstate = dirstate(self.opener, ui, self.root)
658 656 try:
659 657 self.ui.readconfig(self.opener("hgrc"))
660 658 except IOError: pass
661 659
662 660 def hook(self, name, **args):
663 661 s = self.ui.config("hooks", name)
664 662 if s:
665 663 self.ui.note("running hook %s: %s\n" % (name, s))
666 664 old = {}
667 665 for k, v in args.items():
668 666 k = k.upper()
669 667 old[k] = os.environ.get(k, None)
670 668 os.environ[k] = v
671 669
672 670 r = os.system(s)
673 671
674 672 for k, v in old.items():
675 673 if v != None:
676 674 os.environ[k] = v
677 675 else:
678 676 del os.environ[k]
679 677
680 678 if r:
681 679 self.ui.warn("abort: %s hook failed with status %d!\n" %
682 680 (name, r))
683 681 return False
684 682 return True
685 683
686 684 def tags(self):
687 685 '''return a mapping of tag to node'''
688 686 if not self.tagscache:
689 687 self.tagscache = {}
690 688 def addtag(self, k, n):
691 689 try:
692 690 bin_n = bin(n)
693 691 except TypeError:
694 692 bin_n = ''
695 693 self.tagscache[k.strip()] = bin_n
696 694
697 695 try:
698 696 # read each head of the tags file, ending with the tip
699 697 # and add each tag found to the map, with "newer" ones
700 698 # taking precedence
701 699 fl = self.file(".hgtags")
702 700 h = fl.heads()
703 701 h.reverse()
704 702 for r in h:
705 703 for l in fl.revision(r).splitlines():
706 704 if l:
707 705 n, k = l.split(" ", 1)
708 706 addtag(self, k, n)
709 707 except KeyError:
710 708 pass
711 709
712 710 try:
713 711 f = self.opener("localtags")
714 712 for l in f:
715 713 n, k = l.split(" ", 1)
716 714 addtag(self, k, n)
717 715 except IOError:
718 716 pass
719 717
720 718 self.tagscache['tip'] = self.changelog.tip()
721 719
722 720 return self.tagscache
723 721
724 722 def tagslist(self):
725 723 '''return a list of tags ordered by revision'''
726 724 l = []
727 725 for t, n in self.tags().items():
728 726 try:
729 727 r = self.changelog.rev(n)
730 728 except:
731 729 r = -2 # sort to the beginning of the list if unknown
732 730 l.append((r,t,n))
733 731 l.sort()
734 732 return [(t,n) for r,t,n in l]
735 733
736 734 def nodetags(self, node):
737 735 '''return the tags associated with a node'''
738 736 if not self.nodetagscache:
739 737 self.nodetagscache = {}
740 738 for t,n in self.tags().items():
741 739 self.nodetagscache.setdefault(n,[]).append(t)
742 740 return self.nodetagscache.get(node, [])
743 741
744 742 def lookup(self, key):
745 743 try:
746 744 return self.tags()[key]
747 745 except KeyError:
748 746 try:
749 747 return self.changelog.lookup(key)
750 748 except:
751 749 raise RepoError("unknown revision '%s'" % key)
752 750
753 751 def dev(self):
754 752 if self.remote: return -1
755 753 return os.stat(self.path).st_dev
756 754
757 755 def local(self):
758 756 return not self.remote
759 757
760 758 def join(self, f):
761 759 return os.path.join(self.path, f)
762 760
763 761 def wjoin(self, f):
764 762 return os.path.join(self.root, f)
765 763
766 764 def file(self, f):
767 765 if f[0] == '/': f = f[1:]
768 766 return filelog(self.opener, f)
769 767
770 768 def getcwd(self):
771 769 return self.dirstate.getcwd()
772 770
773 771 def wfile(self, f, mode='r'):
774 772 return self.wopener(f, mode)
775 773
776 774 def transaction(self):
777 775 # save dirstate for undo
778 776 try:
779 777 ds = self.opener("dirstate").read()
780 778 except IOError:
781 779 ds = ""
782 780 self.opener("journal.dirstate", "w").write(ds)
783 781
784 782 def after():
785 783 util.rename(self.join("journal"), self.join("undo"))
786 784 util.rename(self.join("journal.dirstate"),
787 785 self.join("undo.dirstate"))
788 786
789 787 return transaction.transaction(self.ui.warn, self.opener,
790 788 self.join("journal"), after)
791 789
792 790 def recover(self):
793 791 lock = self.lock()
794 792 if os.path.exists(self.join("journal")):
795 793 self.ui.status("rolling back interrupted transaction\n")
796 794 return transaction.rollback(self.opener, self.join("journal"))
797 795 else:
798 796 self.ui.warn("no interrupted transaction available\n")
799 797
800 798 def undo(self):
801 799 lock = self.lock()
802 800 if os.path.exists(self.join("undo")):
803 801 self.ui.status("rolling back last transaction\n")
804 802 transaction.rollback(self.opener, self.join("undo"))
805 803 self.dirstate = None
806 804 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
807 805 self.dirstate = dirstate(self.opener, self.ui, self.root)
808 806 else:
809 807 self.ui.warn("no undo information available\n")
810 808
811 809 def lock(self, wait = 1):
812 810 try:
813 811 return lock.lock(self.join("lock"), 0)
814 812 except lock.LockHeld, inst:
815 813 if wait:
816 814 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
817 815 return lock.lock(self.join("lock"), wait)
818 816 raise inst
819 817
820 818 def rawcommit(self, files, text, user, date, p1=None, p2=None):
821 819 orig_parent = self.dirstate.parents()[0] or nullid
822 820 p1 = p1 or self.dirstate.parents()[0] or nullid
823 821 p2 = p2 or self.dirstate.parents()[1] or nullid
824 822 c1 = self.changelog.read(p1)
825 823 c2 = self.changelog.read(p2)
826 824 m1 = self.manifest.read(c1[0])
827 825 mf1 = self.manifest.readflags(c1[0])
828 826 m2 = self.manifest.read(c2[0])
829 827
830 828 if orig_parent == p1:
831 829 update_dirstate = 1
832 830 else:
833 831 update_dirstate = 0
834 832
835 833 tr = self.transaction()
836 834 mm = m1.copy()
837 835 mfm = mf1.copy()
838 836 linkrev = self.changelog.count()
839 837 for f in files:
840 838 try:
841 839 t = self.wfile(f).read()
842 840 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
843 841 r = self.file(f)
844 842 mfm[f] = tm
845 843 mm[f] = r.add(t, {}, tr, linkrev,
846 844 m1.get(f, nullid), m2.get(f, nullid))
847 845 if update_dirstate:
848 846 self.dirstate.update([f], "n")
849 847 except IOError:
850 848 try:
851 849 del mm[f]
852 850 del mfm[f]
853 851 if update_dirstate:
854 852 self.dirstate.forget([f])
855 853 except:
856 854 # deleted from p2?
857 855 pass
858 856
859 857 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
860 858 user = user or self.ui.username()
861 859 n = self.changelog.add(mnode, files, text, tr, p1, p2, user, date)
862 860 tr.close()
863 861 if update_dirstate:
864 862 self.dirstate.setparents(n, nullid)
865 863
866 864 def commit(self, files = None, text = "", user = None, date = None,
867 865 match = util.always, force=False):
868 866 commit = []
869 867 remove = []
870 868 if files:
871 869 for f in files:
872 870 s = self.dirstate.state(f)
873 871 if s in 'nmai':
874 872 commit.append(f)
875 873 elif s == 'r':
876 874 remove.append(f)
877 875 else:
878 876 self.ui.warn("%s not tracked!\n" % f)
879 877 else:
880 878 (c, a, d, u) = self.changes(match = match)
881 879 commit = c + a
882 880 remove = d
883 881
884 882 if not commit and not remove and not force:
885 883 self.ui.status("nothing changed\n")
886 884 return None
887 885
888 886 if not self.hook("precommit"):
889 887 return None
890 888
891 889 p1, p2 = self.dirstate.parents()
892 890 c1 = self.changelog.read(p1)
893 891 c2 = self.changelog.read(p2)
894 892 m1 = self.manifest.read(c1[0])
895 893 mf1 = self.manifest.readflags(c1[0])
896 894 m2 = self.manifest.read(c2[0])
897 895 lock = self.lock()
898 896 tr = self.transaction()
899 897
900 898 # check in files
901 899 new = {}
902 900 linkrev = self.changelog.count()
903 901 commit.sort()
904 902 for f in commit:
905 903 self.ui.note(f + "\n")
906 904 try:
907 905 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
908 906 t = self.wfile(f).read()
909 907 except IOError:
910 908 self.ui.warn("trouble committing %s!\n" % f)
911 909 raise
912 910
913 911 meta = {}
914 912 cp = self.dirstate.copied(f)
915 913 if cp:
916 914 meta["copy"] = cp
917 915 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
918 916 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
919 917
920 918 r = self.file(f)
921 919 fp1 = m1.get(f, nullid)
922 920 fp2 = m2.get(f, nullid)
923 921 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
924 922
925 923 # update manifest
926 924 m1.update(new)
927 925 for f in remove:
928 926 if f in m1:
929 927 del m1[f]
930 928 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
931 929 (new, remove))
932 930
933 931 # add changeset
934 932 new = new.keys()
935 933 new.sort()
936 934
937 935 if not text:
938 936 edittext = "\n" + "HG: manifest hash %s\n" % hex(mn)
939 937 edittext += "".join(["HG: changed %s\n" % f for f in new])
940 938 edittext += "".join(["HG: removed %s\n" % f for f in remove])
941 939 edittext = self.ui.edit(edittext)
942 940 if not edittext.rstrip():
943 941 return None
944 942 text = edittext
945 943
946 944 user = user or self.ui.username()
947 945 n = self.changelog.add(mn, new, text, tr, p1, p2, user, date)
948 946 tr.close()
949 947
950 948 self.dirstate.setparents(n)
951 949 self.dirstate.update(new, "n")
952 950 self.dirstate.forget(remove)
953 951
954 952 if not self.hook("commit", node=hex(n)):
955 953 return None
956 954 return n
957 955
958 956 def walk(self, node = None, files = [], match = util.always):
959 957 if node:
960 958 for fn in self.manifest.read(self.changelog.read(node)[0]):
961 959 if match(fn): yield 'm', fn
962 960 else:
963 961 for src, fn in self.dirstate.walk(files, match):
964 962 yield src, fn
965 963
966 964 def changes(self, node1 = None, node2 = None, files = [],
967 965 match = util.always):
968 966 mf2, u = None, []
969 967
970 968 def fcmp(fn, mf):
971 969 t1 = self.wfile(fn).read()
972 970 t2 = self.file(fn).revision(mf[fn])
973 971 return cmp(t1, t2)
974 972
975 973 def mfmatches(node):
976 974 mf = dict(self.manifest.read(node))
977 975 for fn in mf.keys():
978 976 if not match(fn):
979 977 del mf[fn]
980 978 return mf
981 979
982 980 # are we comparing the working directory?
983 981 if not node2:
984 982 l, c, a, d, u = self.dirstate.changes(files, match)
985 983
986 984 # are we comparing working dir against its parent?
987 985 if not node1:
988 986 if l:
989 987 # do a full compare of any files that might have changed
990 988 change = self.changelog.read(self.dirstate.parents()[0])
991 989 mf2 = mfmatches(change[0])
992 990 for f in l:
993 991 if fcmp(f, mf2):
994 992 c.append(f)
995 993
996 994 for l in c, a, d, u:
997 995 l.sort()
998 996
999 997 return (c, a, d, u)
1000 998
1001 999 # are we comparing working dir against non-tip?
1002 1000 # generate a pseudo-manifest for the working dir
1003 1001 if not node2:
1004 1002 if not mf2:
1005 1003 change = self.changelog.read(self.dirstate.parents()[0])
1006 1004 mf2 = mfmatches(change[0])
1007 1005 for f in a + c + l:
1008 1006 mf2[f] = ""
1009 1007 for f in d:
1010 1008 if f in mf2: del mf2[f]
1011 1009 else:
1012 1010 change = self.changelog.read(node2)
1013 1011 mf2 = mfmatches(change[0])
1014 1012
1015 1013 # flush lists from dirstate before comparing manifests
1016 1014 c, a = [], []
1017 1015
1018 1016 change = self.changelog.read(node1)
1019 1017 mf1 = mfmatches(change[0])
1020 1018
1021 1019 for fn in mf2:
1022 1020 if mf1.has_key(fn):
1023 1021 if mf1[fn] != mf2[fn]:
1024 1022 if mf2[fn] != "" or fcmp(fn, mf1):
1025 1023 c.append(fn)
1026 1024 del mf1[fn]
1027 1025 else:
1028 1026 a.append(fn)
1029 1027
1030 1028 d = mf1.keys()
1031 1029
1032 1030 for l in c, a, d, u:
1033 1031 l.sort()
1034 1032
1035 1033 return (c, a, d, u)
1036 1034
1037 1035 def add(self, list):
1038 1036 for f in list:
1039 1037 p = self.wjoin(f)
1040 1038 if not os.path.exists(p):
1041 1039 self.ui.warn("%s does not exist!\n" % f)
1042 1040 elif not os.path.isfile(p):
1043 1041 self.ui.warn("%s not added: only files supported currently\n" % f)
1044 1042 elif self.dirstate.state(f) in 'an':
1045 1043 self.ui.warn("%s already tracked!\n" % f)
1046 1044 else:
1047 1045 self.dirstate.update([f], "a")
1048 1046
1049 1047 def forget(self, list):
1050 1048 for f in list:
1051 1049 if self.dirstate.state(f) not in 'ai':
1052 1050 self.ui.warn("%s not added!\n" % f)
1053 1051 else:
1054 1052 self.dirstate.forget([f])
1055 1053
1056 1054 def remove(self, list):
1057 1055 for f in list:
1058 1056 p = self.wjoin(f)
1059 1057 if os.path.exists(p):
1060 1058 self.ui.warn("%s still exists!\n" % f)
1061 1059 elif self.dirstate.state(f) == 'a':
1062 1060 self.ui.warn("%s never committed!\n" % f)
1063 1061 self.dirstate.forget([f])
1064 1062 elif f not in self.dirstate:
1065 1063 self.ui.warn("%s not tracked!\n" % f)
1066 1064 else:
1067 1065 self.dirstate.update([f], "r")
1068 1066
1069 1067 def copy(self, source, dest):
1070 1068 p = self.wjoin(dest)
1071 1069 if not os.path.exists(p):
1072 1070 self.ui.warn("%s does not exist!\n" % dest)
1073 1071 elif not os.path.isfile(p):
1074 1072 self.ui.warn("copy failed: %s is not a file\n" % dest)
1075 1073 else:
1076 1074 if self.dirstate.state(dest) == '?':
1077 1075 self.dirstate.update([dest], "a")
1078 1076 self.dirstate.copy(source, dest)
1079 1077
1080 1078 def heads(self):
1081 1079 return self.changelog.heads()
1082 1080
1083 1081 # branchlookup returns a dict giving a list of branches for
1084 1082 # each head. A branch is defined as the tag of a node or
1085 1083 # the branch of the node's parents. If a node has multiple
1086 1084 # branch tags, tags are eliminated if they are visible from other
1087 1085 # branch tags.
1088 1086 #
1089 1087 # So, for this graph: a->b->c->d->e
1090 1088 # \ /
1091 1089 # aa -----/
1092 1090 # a has tag 2.6.12
1093 1091 # d has tag 2.6.13
1094 1092 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
1095 1093 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
1096 1094 # from the list.
1097 1095 #
1098 1096 # It is possible that more than one head will have the same branch tag.
1099 1097 # callers need to check the result for multiple heads under the same
1100 1098 # branch tag if that is a problem for them (ie checkout of a specific
1101 1099 # branch).
1102 1100 #
1103 1101 # passing in a specific branch will limit the depth of the search
1104 1102 # through the parents. It won't limit the branches returned in the
1105 1103 # result though.
1106 1104 def branchlookup(self, heads=None, branch=None):
1107 1105 if not heads:
1108 1106 heads = self.heads()
1109 1107 headt = [ h for h in heads ]
1110 1108 chlog = self.changelog
1111 1109 branches = {}
1112 1110 merges = []
1113 1111 seenmerge = {}
1114 1112
1115 1113 # traverse the tree once for each head, recording in the branches
1116 1114 # dict which tags are visible from this head. The branches
1117 1115 # dict also records which tags are visible from each tag
1118 1116 # while we traverse.
1119 1117 while headt or merges:
1120 1118 if merges:
1121 1119 n, found = merges.pop()
1122 1120 visit = [n]
1123 1121 else:
1124 1122 h = headt.pop()
1125 1123 visit = [h]
1126 1124 found = [h]
1127 1125 seen = {}
1128 1126 while visit:
1129 1127 n = visit.pop()
1130 1128 if n in seen:
1131 1129 continue
1132 1130 pp = chlog.parents(n)
1133 1131 tags = self.nodetags(n)
1134 1132 if tags:
1135 1133 for x in tags:
1136 1134 if x == 'tip':
1137 1135 continue
1138 1136 for f in found:
1139 1137 branches.setdefault(f, {})[n] = 1
1140 1138 branches.setdefault(n, {})[n] = 1
1141 1139 break
1142 1140 if n not in found:
1143 1141 found.append(n)
1144 1142 if branch in tags:
1145 1143 continue
1146 1144 seen[n] = 1
1147 1145 if pp[1] != nullid and n not in seenmerge:
1148 1146 merges.append((pp[1], [x for x in found]))
1149 1147 seenmerge[n] = 1
1150 1148 if pp[0] != nullid:
1151 1149 visit.append(pp[0])
1152 1150 # traverse the branches dict, eliminating branch tags from each
1153 1151 # head that are visible from another branch tag for that head.
1154 1152 out = {}
1155 1153 viscache = {}
1156 1154 for h in heads:
1157 1155 def visible(node):
1158 1156 if node in viscache:
1159 1157 return viscache[node]
1160 1158 ret = {}
1161 1159 visit = [node]
1162 1160 while visit:
1163 1161 x = visit.pop()
1164 1162 if x in viscache:
1165 1163 ret.update(viscache[x])
1166 1164 elif x not in ret:
1167 1165 ret[x] = 1
1168 1166 if x in branches:
1169 1167 visit[len(visit):] = branches[x].keys()
1170 1168 viscache[node] = ret
1171 1169 return ret
1172 1170 if h not in branches:
1173 1171 continue
1174 1172 # O(n^2), but somewhat limited. This only searches the
1175 1173 # tags visible from a specific head, not all the tags in the
1176 1174 # whole repo.
1177 1175 for b in branches[h]:
1178 1176 vis = False
1179 1177 for bb in branches[h].keys():
1180 1178 if b != bb:
1181 1179 if b in visible(bb):
1182 1180 vis = True
1183 1181 break
1184 1182 if not vis:
1185 1183 l = out.setdefault(h, [])
1186 1184 l[len(l):] = self.nodetags(b)
1187 1185 return out
1188 1186
1189 1187 def branches(self, nodes):
1190 1188 if not nodes: nodes = [self.changelog.tip()]
1191 1189 b = []
1192 1190 for n in nodes:
1193 1191 t = n
1194 1192 while n:
1195 1193 p = self.changelog.parents(n)
1196 1194 if p[1] != nullid or p[0] == nullid:
1197 1195 b.append((t, n, p[0], p[1]))
1198 1196 break
1199 1197 n = p[0]
1200 1198 return b
1201 1199
1202 1200 def between(self, pairs):
1203 1201 r = []
1204 1202
1205 1203 for top, bottom in pairs:
1206 1204 n, l, i = top, [], 0
1207 1205 f = 1
1208 1206
1209 1207 while n != bottom:
1210 1208 p = self.changelog.parents(n)[0]
1211 1209 if i == f:
1212 1210 l.append(n)
1213 1211 f = f * 2
1214 1212 n = p
1215 1213 i += 1
1216 1214
1217 1215 r.append(l)
1218 1216
1219 1217 return r
1220 1218
1221 1219 def newer(self, nodes):
1222 1220 m = {}
1223 1221 nl = []
1224 1222 pm = {}
1225 1223 cl = self.changelog
1226 1224 t = l = cl.count()
1227 1225
1228 1226 # find the lowest numbered node
1229 1227 for n in nodes:
1230 1228 l = min(l, cl.rev(n))
1231 1229 m[n] = 1
1232 1230
1233 1231 for i in xrange(l, t):
1234 1232 n = cl.node(i)
1235 1233 if n in m: # explicitly listed
1236 1234 pm[n] = 1
1237 1235 nl.append(n)
1238 1236 continue
1239 1237 for p in cl.parents(n):
1240 1238 if p in pm: # parent listed
1241 1239 pm[n] = 1
1242 1240 nl.append(n)
1243 1241 break
1244 1242
1245 1243 return nl
1246 1244
1247 1245 def findincoming(self, remote, base=None, heads=None):
1248 1246 m = self.changelog.nodemap
1249 1247 search = []
1250 1248 fetch = []
1251 1249 seen = {}
1252 1250 seenbranch = {}
1253 1251 if base == None:
1254 1252 base = {}
1255 1253
1256 1254 # assume we're closer to the tip than the root
1257 1255 # and start by examining the heads
1258 1256 self.ui.status("searching for changes\n")
1259 1257
1260 1258 if not heads:
1261 1259 heads = remote.heads()
1262 1260
1263 1261 unknown = []
1264 1262 for h in heads:
1265 1263 if h not in m:
1266 1264 unknown.append(h)
1267 1265 else:
1268 1266 base[h] = 1
1269 1267
1270 1268 if not unknown:
1271 1269 return None
1272 1270
1273 1271 rep = {}
1274 1272 reqcnt = 0
1275 1273
1276 1274 # search through remote branches
1277 1275 # a 'branch' here is a linear segment of history, with four parts:
1278 1276 # head, root, first parent, second parent
1279 1277 # (a branch always has two parents (or none) by definition)
1280 1278 unknown = remote.branches(unknown)
1281 1279 while unknown:
1282 1280 r = []
1283 1281 while unknown:
1284 1282 n = unknown.pop(0)
1285 1283 if n[0] in seen:
1286 1284 continue
1287 1285
1288 1286 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
1289 1287 if n[0] == nullid:
1290 1288 break
1291 1289 if n in seenbranch:
1292 1290 self.ui.debug("branch already found\n")
1293 1291 continue
1294 1292 if n[1] and n[1] in m: # do we know the base?
1295 1293 self.ui.debug("found incomplete branch %s:%s\n"
1296 1294 % (short(n[0]), short(n[1])))
1297 1295 search.append(n) # schedule branch range for scanning
1298 1296 seenbranch[n] = 1
1299 1297 else:
1300 1298 if n[1] not in seen and n[1] not in fetch:
1301 1299 if n[2] in m and n[3] in m:
1302 1300 self.ui.debug("found new changeset %s\n" %
1303 1301 short(n[1]))
1304 1302 fetch.append(n[1]) # earliest unknown
1305 1303 base[n[2]] = 1 # latest known
1306 1304 continue
1307 1305
1308 1306 for a in n[2:4]:
1309 1307 if a not in rep:
1310 1308 r.append(a)
1311 1309 rep[a] = 1
1312 1310
1313 1311 seen[n[0]] = 1
1314 1312
1315 1313 if r:
1316 1314 reqcnt += 1
1317 1315 self.ui.debug("request %d: %s\n" %
1318 1316 (reqcnt, " ".join(map(short, r))))
1319 1317 for p in range(0, len(r), 10):
1320 1318 for b in remote.branches(r[p:p+10]):
1321 1319 self.ui.debug("received %s:%s\n" %
1322 1320 (short(b[0]), short(b[1])))
1323 1321 if b[0] not in m and b[0] not in seen:
1324 1322 unknown.append(b)
1325 1323
1326 1324 # do binary search on the branches we found
1327 1325 while search:
1328 1326 n = search.pop(0)
1329 1327 reqcnt += 1
1330 1328 l = remote.between([(n[0], n[1])])[0]
1331 1329 l.append(n[1])
1332 1330 p = n[0]
1333 1331 f = 1
1334 1332 for i in l:
1335 1333 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
1336 1334 if i in m:
1337 1335 if f <= 2:
1338 1336 self.ui.debug("found new branch changeset %s\n" %
1339 1337 short(p))
1340 1338 fetch.append(p)
1341 1339 base[i] = 1
1342 1340 else:
1343 1341 self.ui.debug("narrowed branch search to %s:%s\n"
1344 1342 % (short(p), short(i)))
1345 1343 search.append((p, i))
1346 1344 break
1347 1345 p, f = i, f * 2
1348 1346
1349 1347 # sanity check our fetch list
1350 1348 for f in fetch:
1351 1349 if f in m:
1352 1350 raise RepoError("already have changeset " + short(f[:4]))
1353 1351
1354 1352 if base.keys() == [nullid]:
1355 1353 self.ui.warn("warning: pulling from an unrelated repository!\n")
1356 1354
1357 1355 self.ui.note("adding new changesets starting at " +
1358 1356 " ".join([short(f) for f in fetch]) + "\n")
1359 1357
1360 1358 self.ui.debug("%d total queries\n" % reqcnt)
1361 1359
1362 1360 return fetch
1363 1361
1364 1362 def findoutgoing(self, remote, base=None, heads=None):
1365 1363 if base == None:
1366 1364 base = {}
1367 1365 self.findincoming(remote, base, heads)
1368 1366
1369 1367 remain = dict.fromkeys(self.changelog.nodemap)
1370 1368
1371 1369 # prune everything remote has from the tree
1372 1370 del remain[nullid]
1373 1371 remove = base.keys()
1374 1372 while remove:
1375 1373 n = remove.pop(0)
1376 1374 if n in remain:
1377 1375 del remain[n]
1378 1376 for p in self.changelog.parents(n):
1379 1377 remove.append(p)
1380 1378
1381 1379 # find every node whose parents have been pruned
1382 1380 subset = []
1383 1381 for n in remain:
1384 1382 p1, p2 = self.changelog.parents(n)
1385 1383 if p1 not in remain and p2 not in remain:
1386 1384 subset.append(n)
1387 1385
1388 1386 # this is the set of all roots we have to push
1389 1387 return subset
1390 1388
1391 1389 def pull(self, remote):
1392 1390 lock = self.lock()
1393 1391
1394 1392 # if we have an empty repo, fetch everything
1395 1393 if self.changelog.tip() == nullid:
1396 1394 self.ui.status("requesting all changes\n")
1397 1395 fetch = [nullid]
1398 1396 else:
1399 1397 fetch = self.findincoming(remote)
1400 1398
1401 1399 if not fetch:
1402 1400 self.ui.status("no changes found\n")
1403 1401 return 1
1404 1402
1405 1403 cg = remote.changegroup(fetch)
1406 1404 return self.addchangegroup(cg)
1407 1405
1408 1406 def push(self, remote, force=False):
1409 1407 lock = remote.lock()
1410 1408
1411 1409 base = {}
1412 1410 heads = remote.heads()
1413 1411 inc = self.findincoming(remote, base, heads)
1414 1412 if not force and inc:
1415 1413 self.ui.warn("abort: unsynced remote changes!\n")
1416 1414 self.ui.status("(did you forget to sync? use push -f to force)\n")
1417 1415 return 1
1418 1416
1419 1417 update = self.findoutgoing(remote, base)
1420 1418 if not update:
1421 1419 self.ui.status("no changes found\n")
1422 1420 return 1
1423 1421 elif not force:
1424 1422 if len(heads) < len(self.changelog.heads()):
1425 1423 self.ui.warn("abort: push creates new remote branches!\n")
1426 1424 self.ui.status("(did you forget to merge?" +
1427 1425 " use push -f to force)\n")
1428 1426 return 1
1429 1427
1430 1428 cg = self.changegroup(update)
1431 1429 return remote.addchangegroup(cg)
1432 1430
1433 1431 def changegroup(self, basenodes):
1434 1432 class genread:
1435 1433 def __init__(self, generator):
1436 1434 self.g = generator
1437 1435 self.buf = ""
1438 1436 def fillbuf(self):
1439 1437 self.buf += "".join(self.g)
1440 1438
1441 1439 def read(self, l):
1442 1440 while l > len(self.buf):
1443 1441 try:
1444 1442 self.buf += self.g.next()
1445 1443 except StopIteration:
1446 1444 break
1447 1445 d, self.buf = self.buf[:l], self.buf[l:]
1448 1446 return d
1449 1447
1450 1448 def gengroup():
1451 1449 nodes = self.newer(basenodes)
1452 1450
1453 1451 # construct the link map
1454 1452 linkmap = {}
1455 1453 for n in nodes:
1456 1454 linkmap[self.changelog.rev(n)] = n
1457 1455
1458 1456 # construct a list of all changed files
1459 1457 changed = {}
1460 1458 for n in nodes:
1461 1459 c = self.changelog.read(n)
1462 1460 for f in c[3]:
1463 1461 changed[f] = 1
1464 1462 changed = changed.keys()
1465 1463 changed.sort()
1466 1464
1467 1465 # the changegroup is changesets + manifests + all file revs
1468 1466 revs = [ self.changelog.rev(n) for n in nodes ]
1469 1467
1470 1468 for y in self.changelog.group(linkmap): yield y
1471 1469 for y in self.manifest.group(linkmap): yield y
1472 1470 for f in changed:
1473 1471 yield struct.pack(">l", len(f) + 4) + f
1474 1472 g = self.file(f).group(linkmap)
1475 1473 for y in g:
1476 1474 yield y
1477 1475
1478 1476 yield struct.pack(">l", 0)
1479 1477
1480 1478 return genread(gengroup())
1481 1479
1482 1480 def addchangegroup(self, source):
1483 1481
1484 1482 def getchunk():
1485 1483 d = source.read(4)
1486 1484 if not d: return ""
1487 1485 l = struct.unpack(">l", d)[0]
1488 1486 if l <= 4: return ""
1489 1487 return source.read(l - 4)
1490 1488
1491 1489 def getgroup():
1492 1490 while 1:
1493 1491 c = getchunk()
1494 1492 if not c: break
1495 1493 yield c
1496 1494
1497 1495 def csmap(x):
1498 1496 self.ui.debug("add changeset %s\n" % short(x))
1499 1497 return self.changelog.count()
1500 1498
1501 1499 def revmap(x):
1502 1500 return self.changelog.rev(x)
1503 1501
1504 1502 if not source: return
1505 1503 changesets = files = revisions = 0
1506 1504
1507 1505 tr = self.transaction()
1508 1506
1509 1507 # pull off the changeset group
1510 1508 self.ui.status("adding changesets\n")
1511 1509 co = self.changelog.tip()
1512 1510 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1513 1511 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
1514 1512
1515 1513 # pull off the manifest group
1516 1514 self.ui.status("adding manifests\n")
1517 1515 mm = self.manifest.tip()
1518 1516 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1519 1517
1520 1518 # process the files
1521 1519 self.ui.status("adding file changes\n")
1522 1520 while 1:
1523 1521 f = getchunk()
1524 1522 if not f: break
1525 1523 self.ui.debug("adding %s revisions\n" % f)
1526 1524 fl = self.file(f)
1527 1525 o = fl.count()
1528 1526 n = fl.addgroup(getgroup(), revmap, tr)
1529 1527 revisions += fl.count() - o
1530 1528 files += 1
1531 1529
1532 1530 self.ui.status(("added %d changesets" +
1533 1531 " with %d changes to %d files\n")
1534 1532 % (changesets, revisions, files))
1535 1533
1536 1534 tr.close()
1537 1535
1538 1536 if not self.hook("changegroup"):
1539 1537 return 1
1540 1538
1541 1539 return
1542 1540
1543 1541 def update(self, node, allow=False, force=False, choose=None,
1544 1542 moddirstate=True):
1545 1543 pl = self.dirstate.parents()
1546 1544 if not force and pl[1] != nullid:
1547 1545 self.ui.warn("aborting: outstanding uncommitted merges\n")
1548 1546 return 1
1549 1547
1550 1548 p1, p2 = pl[0], node
1551 1549 pa = self.changelog.ancestor(p1, p2)
1552 1550 m1n = self.changelog.read(p1)[0]
1553 1551 m2n = self.changelog.read(p2)[0]
1554 1552 man = self.manifest.ancestor(m1n, m2n)
1555 1553 m1 = self.manifest.read(m1n)
1556 1554 mf1 = self.manifest.readflags(m1n)
1557 1555 m2 = self.manifest.read(m2n)
1558 1556 mf2 = self.manifest.readflags(m2n)
1559 1557 ma = self.manifest.read(man)
1560 1558 mfa = self.manifest.readflags(man)
1561 1559
1562 1560 (c, a, d, u) = self.changes()
1563 1561
1564 1562 # is this a jump, or a merge? i.e. is there a linear path
1565 1563 # from p1 to p2?
1566 1564 linear_path = (pa == p1 or pa == p2)
1567 1565
1568 1566 # resolve the manifest to determine which files
1569 1567 # we care about merging
1570 1568 self.ui.note("resolving manifests\n")
1571 1569 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1572 1570 (force, allow, moddirstate, linear_path))
1573 1571 self.ui.debug(" ancestor %s local %s remote %s\n" %
1574 1572 (short(man), short(m1n), short(m2n)))
1575 1573
1576 1574 merge = {}
1577 1575 get = {}
1578 1576 remove = []
1579 1577 mark = {}
1580 1578
1581 1579 # construct a working dir manifest
1582 1580 mw = m1.copy()
1583 1581 mfw = mf1.copy()
1584 1582 umap = dict.fromkeys(u)
1585 1583
1586 1584 for f in a + c + u:
1587 1585 mw[f] = ""
1588 1586 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1589 1587
1590 1588 for f in d:
1591 1589 if f in mw: del mw[f]
1592 1590
1593 1591 # If we're jumping between revisions (as opposed to merging),
1594 1592 # and if neither the working directory nor the target rev has
1595 1593 # the file, then we need to remove it from the dirstate, to
1596 1594 # prevent the dirstate from listing the file when it is no
1597 1595 # longer in the manifest.
1598 1596 if moddirstate and linear_path and f not in m2:
1599 1597 self.dirstate.forget((f,))
1600 1598
1601 1599 # Compare manifests
1602 1600 for f, n in mw.iteritems():
1603 1601 if choose and not choose(f): continue
1604 1602 if f in m2:
1605 1603 s = 0
1606 1604
1607 1605 # is the wfile new since m1, and match m2?
1608 1606 if f not in m1:
1609 1607 t1 = self.wfile(f).read()
1610 1608 t2 = self.file(f).revision(m2[f])
1611 1609 if cmp(t1, t2) == 0:
1612 1610 mark[f] = 1
1613 1611 n = m2[f]
1614 1612 del t1, t2
1615 1613
1616 1614 # are files different?
1617 1615 if n != m2[f]:
1618 1616 a = ma.get(f, nullid)
1619 1617 # are both different from the ancestor?
1620 1618 if n != a and m2[f] != a:
1621 1619 self.ui.debug(" %s versions differ, resolve\n" % f)
1622 1620 # merge executable bits
1623 1621 # "if we changed or they changed, change in merge"
1624 1622 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1625 1623 mode = ((a^b) | (a^c)) ^ a
1626 1624 merge[f] = (m1.get(f, nullid), m2[f], mode)
1627 1625 s = 1
1628 1626 # are we clobbering?
1629 1627 # is remote's version newer?
1630 1628 # or are we going back in time?
1631 1629 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1632 1630 self.ui.debug(" remote %s is newer, get\n" % f)
1633 1631 get[f] = m2[f]
1634 1632 s = 1
1635 1633 else:
1636 1634 mark[f] = 1
1637 1635 elif f in umap:
1638 1636 # this unknown file is the same as the checkout
1639 1637 get[f] = m2[f]
1640 1638
1641 1639 if not s and mfw[f] != mf2[f]:
1642 1640 if force:
1643 1641 self.ui.debug(" updating permissions for %s\n" % f)
1644 1642 util.set_exec(self.wjoin(f), mf2[f])
1645 1643 else:
1646 1644 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1647 1645 mode = ((a^b) | (a^c)) ^ a
1648 1646 if mode != b:
1649 1647 self.ui.debug(" updating permissions for %s\n" % f)
1650 1648 util.set_exec(self.wjoin(f), mode)
1651 1649 mark[f] = 1
1652 1650 del m2[f]
1653 1651 elif f in ma:
1654 1652 if n != ma[f]:
1655 1653 r = "d"
1656 1654 if not force and (linear_path or allow):
1657 1655 r = self.ui.prompt(
1658 1656 (" local changed %s which remote deleted\n" % f) +
1659 1657 "(k)eep or (d)elete?", "[kd]", "k")
1660 1658 if r == "d":
1661 1659 remove.append(f)
1662 1660 else:
1663 1661 self.ui.debug("other deleted %s\n" % f)
1664 1662 remove.append(f) # other deleted it
1665 1663 else:
1666 1664 if n == m1.get(f, nullid): # same as parent
1667 1665 if p2 == pa: # going backwards?
1668 1666 self.ui.debug("remote deleted %s\n" % f)
1669 1667 remove.append(f)
1670 1668 else:
1671 1669 self.ui.debug("local created %s, keeping\n" % f)
1672 1670 else:
1673 1671 self.ui.debug("working dir created %s, keeping\n" % f)
1674 1672
1675 1673 for f, n in m2.iteritems():
1676 1674 if choose and not choose(f): continue
1677 1675 if f[0] == "/": continue
1678 1676 if f in ma and n != ma[f]:
1679 1677 r = "k"
1680 1678 if not force and (linear_path or allow):
1681 1679 r = self.ui.prompt(
1682 1680 ("remote changed %s which local deleted\n" % f) +
1683 1681 "(k)eep or (d)elete?", "[kd]", "k")
1684 1682 if r == "k": get[f] = n
1685 1683 elif f not in ma:
1686 1684 self.ui.debug("remote created %s\n" % f)
1687 1685 get[f] = n
1688 1686 else:
1689 1687 if force or p2 == pa: # going backwards?
1690 1688 self.ui.debug("local deleted %s, recreating\n" % f)
1691 1689 get[f] = n
1692 1690 else:
1693 1691 self.ui.debug("local deleted %s\n" % f)
1694 1692
1695 1693 del mw, m1, m2, ma
1696 1694
1697 1695 if force:
1698 1696 for f in merge:
1699 1697 get[f] = merge[f][1]
1700 1698 merge = {}
1701 1699
1702 1700 if linear_path or force:
1703 1701 # we don't need to do any magic, just jump to the new rev
1704 1702 mode = 'n'
1705 1703 p1, p2 = p2, nullid
1706 1704 else:
1707 1705 if not allow:
1708 1706 self.ui.status("this update spans a branch" +
1709 1707 " affecting the following files:\n")
1710 1708 fl = merge.keys() + get.keys()
1711 1709 fl.sort()
1712 1710 for f in fl:
1713 1711 cf = ""
1714 1712 if f in merge: cf = " (resolve)"
1715 1713 self.ui.status(" %s%s\n" % (f, cf))
1716 1714 self.ui.warn("aborting update spanning branches!\n")
1717 1715 self.ui.status("(use update -m to merge across branches" +
1718 1716 " or -C to lose changes)\n")
1719 1717 return 1
1720 1718 # we have to remember what files we needed to get/change
1721 1719 # because any file that's different from either one of its
1722 1720 # parents must be in the changeset
1723 1721 mode = 'm'
1724 1722 if moddirstate:
1725 1723 self.dirstate.update(mark.keys(), "m")
1726 1724
1727 1725 if moddirstate:
1728 1726 self.dirstate.setparents(p1, p2)
1729 1727
1730 1728 # get the files we don't need to change
1731 1729 files = get.keys()
1732 1730 files.sort()
1733 1731 for f in files:
1734 1732 if f[0] == "/": continue
1735 1733 self.ui.note("getting %s\n" % f)
1736 1734 t = self.file(f).read(get[f])
1737 1735 try:
1738 1736 self.wfile(f, "w").write(t)
1739 1737 except IOError:
1740 1738 os.makedirs(os.path.dirname(self.wjoin(f)))
1741 1739 self.wfile(f, "w").write(t)
1742 1740 util.set_exec(self.wjoin(f), mf2[f])
1743 1741 if moddirstate:
1744 1742 self.dirstate.update([f], mode)
1745 1743
1746 1744 # merge the tricky bits
1747 1745 files = merge.keys()
1748 1746 files.sort()
1749 1747 for f in files:
1750 1748 self.ui.status("merging %s\n" % f)
1751 1749 m, o, flag = merge[f]
1752 1750 self.merge3(f, m, o)
1753 1751 util.set_exec(self.wjoin(f), flag)
1754 1752 if moddirstate:
1755 1753 if mode == 'm':
1756 1754 # only update dirstate on branch merge, otherwise we
1757 1755 # could mark files with changes as unchanged
1758 1756 self.dirstate.update([f], mode)
1759 1757 elif p2 == nullid:
1760 1758 # update dirstate from parent1's manifest
1761 1759 m1n = self.changelog.read(p1)[0]
1762 1760 m1 = self.manifest.read(m1n)
1763 1761 f_len = len(self.file(f).read(m1[f]))
1764 1762 self.dirstate.update([f], mode, st_size=f_len, st_mtime=0)
1765 1763 else:
1766 1764 self.ui.warn("Second parent without branch merge!?\n"
1767 1765 "Dirstate for file %s may be wrong.\n" % f)
1768 1766
1769 1767 remove.sort()
1770 1768 for f in remove:
1771 1769 self.ui.note("removing %s\n" % f)
1772 1770 try:
1773 1771 os.unlink(self.wjoin(f))
1774 1772 except OSError, inst:
1775 1773 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1776 1774 # try removing directories that might now be empty
1777 1775 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1778 1776 except: pass
1779 1777 if moddirstate:
1780 1778 if mode == 'n':
1781 1779 self.dirstate.forget(remove)
1782 1780 else:
1783 1781 self.dirstate.update(remove, 'r')
1784 1782
1785 1783 def merge3(self, fn, my, other):
1786 1784 """perform a 3-way merge in the working directory"""
1787 1785
1788 1786 def temp(prefix, node):
1789 1787 pre = "%s~%s." % (os.path.basename(fn), prefix)
1790 1788 (fd, name) = tempfile.mkstemp("", pre)
1791 1789 f = os.fdopen(fd, "wb")
1792 1790 f.write(fl.revision(node))
1793 1791 f.close()
1794 1792 return name
1795 1793
1796 1794 fl = self.file(fn)
1797 1795 base = fl.ancestor(my, other)
1798 1796 a = self.wjoin(fn)
1799 1797 b = temp("base", base)
1800 1798 c = temp("other", other)
1801 1799
1802 1800 self.ui.note("resolving %s\n" % fn)
1803 1801 self.ui.debug("file %s: other %s ancestor %s\n" %
1804 1802 (fn, short(other), short(base)))
1805 1803
1806 1804 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1807 1805 or "hgmerge")
1808 1806 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1809 1807 if r:
1810 1808 self.ui.warn("merging %s failed!\n" % fn)
1811 1809
1812 1810 os.unlink(b)
1813 1811 os.unlink(c)
1814 1812
1815 1813 def verify(self):
1816 1814 filelinkrevs = {}
1817 1815 filenodes = {}
1818 1816 changesets = revisions = files = 0
1819 1817 errors = 0
1820 1818
1821 1819 seen = {}
1822 1820 self.ui.status("checking changesets\n")
1823 1821 for i in range(self.changelog.count()):
1824 1822 changesets += 1
1825 1823 n = self.changelog.node(i)
1826 1824 if n in seen:
1827 1825 self.ui.warn("duplicate changeset at revision %d\n" % i)
1828 1826 errors += 1
1829 1827 seen[n] = 1
1830 1828
1831 1829 for p in self.changelog.parents(n):
1832 1830 if p not in self.changelog.nodemap:
1833 1831 self.ui.warn("changeset %s has unknown parent %s\n" %
1834 1832 (short(n), short(p)))
1835 1833 errors += 1
1836 1834 try:
1837 1835 changes = self.changelog.read(n)
1838 1836 except Exception, inst:
1839 1837 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1840 1838 errors += 1
1841 1839
1842 1840 for f in changes[3]:
1843 1841 filelinkrevs.setdefault(f, []).append(i)
1844 1842
1845 1843 seen = {}
1846 1844 self.ui.status("checking manifests\n")
1847 1845 for i in range(self.manifest.count()):
1848 1846 n = self.manifest.node(i)
1849 1847 if n in seen:
1850 1848 self.ui.warn("duplicate manifest at revision %d\n" % i)
1851 1849 errors += 1
1852 1850 seen[n] = 1
1853 1851
1854 1852 for p in self.manifest.parents(n):
1855 1853 if p not in self.manifest.nodemap:
1856 1854 self.ui.warn("manifest %s has unknown parent %s\n" %
1857 1855 (short(n), short(p)))
1858 1856 errors += 1
1859 1857
1860 1858 try:
1861 1859 delta = mdiff.patchtext(self.manifest.delta(n))
1862 1860 except KeyboardInterrupt:
1863 1861 self.ui.warn("aborted")
1864 1862 sys.exit(0)
1865 1863 except Exception, inst:
1866 1864 self.ui.warn("unpacking manifest %s: %s\n"
1867 1865 % (short(n), inst))
1868 1866 errors += 1
1869 1867
1870 1868 ff = [ l.split('\0') for l in delta.splitlines() ]
1871 1869 for f, fn in ff:
1872 1870 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1873 1871
1874 1872 self.ui.status("crosschecking files in changesets and manifests\n")
1875 1873 for f in filenodes:
1876 1874 if f not in filelinkrevs:
1877 1875 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1878 1876 errors += 1
1879 1877
1880 1878 for f in filelinkrevs:
1881 1879 if f not in filenodes:
1882 1880 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1883 1881 errors += 1
1884 1882
1885 1883 self.ui.status("checking files\n")
1886 1884 ff = filenodes.keys()
1887 1885 ff.sort()
1888 1886 for f in ff:
1889 1887 if f == "/dev/null": continue
1890 1888 files += 1
1891 1889 fl = self.file(f)
1892 1890 nodes = { nullid: 1 }
1893 1891 seen = {}
1894 1892 for i in range(fl.count()):
1895 1893 revisions += 1
1896 1894 n = fl.node(i)
1897 1895
1898 1896 if n in seen:
1899 1897 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1900 1898 errors += 1
1901 1899
1902 1900 if n not in filenodes[f]:
1903 1901 self.ui.warn("%s: %d:%s not in manifests\n"
1904 1902 % (f, i, short(n)))
1905 1903 errors += 1
1906 1904 else:
1907 1905 del filenodes[f][n]
1908 1906
1909 1907 flr = fl.linkrev(n)
1910 1908 if flr not in filelinkrevs[f]:
1911 1909 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1912 1910 % (f, short(n), fl.linkrev(n)))
1913 1911 errors += 1
1914 1912 else:
1915 1913 filelinkrevs[f].remove(flr)
1916 1914
1917 1915 # verify contents
1918 1916 try:
1919 1917 t = fl.read(n)
1920 1918 except Exception, inst:
1921 1919 self.ui.warn("unpacking file %s %s: %s\n"
1922 1920 % (f, short(n), inst))
1923 1921 errors += 1
1924 1922
1925 1923 # verify parents
1926 1924 (p1, p2) = fl.parents(n)
1927 1925 if p1 not in nodes:
1928 1926 self.ui.warn("file %s:%s unknown parent 1 %s" %
1929 1927 (f, short(n), short(p1)))
1930 1928 errors += 1
1931 1929 if p2 not in nodes:
1932 1930 self.ui.warn("file %s:%s unknown parent 2 %s" %
1933 1931 (f, short(n), short(p1)))
1934 1932 errors += 1
1935 1933 nodes[n] = 1
1936 1934
1937 1935 # cross-check
1938 1936 for node in filenodes[f]:
1939 1937 self.ui.warn("node %s in manifests not in %s\n"
1940 1938 % (hex(node), f))
1941 1939 errors += 1
1942 1940
1943 1941 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1944 1942 (files, changesets, revisions))
1945 1943
1946 1944 if errors:
1947 1945 self.ui.warn("%d integrity errors encountered!\n" % errors)
1948 1946 return 1
1949 1947
1950 1948 class remoterepository:
1951 1949 def local(self):
1952 1950 return False
1953 1951
1954 1952 class httprepository(remoterepository):
1955 1953 def __init__(self, ui, path):
1956 1954 # fix missing / after hostname
1957 1955 s = urlparse.urlsplit(path)
1958 1956 partial = s[2]
1959 1957 if not partial: partial = "/"
1960 1958 self.url = urlparse.urlunsplit((s[0], s[1], partial, '', ''))
1961 1959 self.ui = ui
1962 1960 no_list = [ "localhost", "127.0.0.1" ]
1963 1961 host = ui.config("http_proxy", "host")
1964 1962 if host is None:
1965 1963 host = os.environ.get("http_proxy")
1966 1964 if host and host.startswith('http://'):
1967 1965 host = host[7:]
1968 1966 user = ui.config("http_proxy", "user")
1969 1967 passwd = ui.config("http_proxy", "passwd")
1970 1968 no = ui.config("http_proxy", "no")
1971 1969 if no is None:
1972 1970 no = os.environ.get("no_proxy")
1973 1971 if no:
1974 1972 no_list = no_list + no.split(",")
1975 1973
1976 1974 no_proxy = 0
1977 1975 for h in no_list:
1978 1976 if (path.startswith("http://" + h + "/") or
1979 1977 path.startswith("http://" + h + ":") or
1980 1978 path == "http://" + h):
1981 1979 no_proxy = 1
1982 1980
1983 1981 # Note: urllib2 takes proxy values from the environment and those will
1984 1982 # take precedence
1985 1983 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
1986 1984 try:
1987 1985 if os.environ.has_key(env):
1988 1986 del os.environ[env]
1989 1987 except OSError:
1990 1988 pass
1991 1989
1992 1990 proxy_handler = urllib2.BaseHandler()
1993 1991 if host and not no_proxy:
1994 1992 proxy_handler = urllib2.ProxyHandler({"http" : "http://" + host})
1995 1993
1996 1994 authinfo = None
1997 1995 if user and passwd:
1998 1996 passmgr = urllib2.HTTPPasswordMgrWithDefaultRealm()
1999 1997 passmgr.add_password(None, host, user, passwd)
2000 1998 authinfo = urllib2.ProxyBasicAuthHandler(passmgr)
2001 1999
2002 2000 opener = urllib2.build_opener(proxy_handler, authinfo)
2003 2001 urllib2.install_opener(opener)
2004 2002
2005 2003 def dev(self):
2006 2004 return -1
2007 2005
2008 2006 def do_cmd(self, cmd, **args):
2009 2007 self.ui.debug("sending %s command\n" % cmd)
2010 2008 q = {"cmd": cmd}
2011 2009 q.update(args)
2012 2010 qs = urllib.urlencode(q)
2013 2011 cu = "%s?%s" % (self.url, qs)
2014 2012 resp = urllib2.urlopen(cu)
2015 2013 proto = resp.headers['content-type']
2016 2014
2017 2015 # accept old "text/plain" and "application/hg-changegroup" for now
2018 2016 if not proto.startswith('application/mercurial') and \
2019 2017 not proto.startswith('text/plain') and \
2020 2018 not proto.startswith('application/hg-changegroup'):
2021 2019 raise RepoError("'%s' does not appear to be an hg repository"
2022 2020 % self.url)
2023 2021
2024 2022 if proto.startswith('application/mercurial'):
2025 2023 version = proto[22:]
2026 2024 if float(version) > 0.1:
2027 2025 raise RepoError("'%s' uses newer protocol %s" %
2028 2026 (self.url, version))
2029 2027
2030 2028 return resp
2031 2029
2032 2030 def heads(self):
2033 2031 d = self.do_cmd("heads").read()
2034 2032 try:
2035 2033 return map(bin, d[:-1].split(" "))
2036 2034 except:
2037 2035 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2038 2036 raise
2039 2037
2040 2038 def branches(self, nodes):
2041 2039 n = " ".join(map(hex, nodes))
2042 2040 d = self.do_cmd("branches", nodes=n).read()
2043 2041 try:
2044 2042 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2045 2043 return br
2046 2044 except:
2047 2045 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2048 2046 raise
2049 2047
2050 2048 def between(self, pairs):
2051 2049 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2052 2050 d = self.do_cmd("between", pairs=n).read()
2053 2051 try:
2054 2052 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2055 2053 return p
2056 2054 except:
2057 2055 self.ui.warn("unexpected response:\n" + d[:400] + "\n...\n")
2058 2056 raise
2059 2057
2060 2058 def changegroup(self, nodes):
2061 2059 n = " ".join(map(hex, nodes))
2062 2060 f = self.do_cmd("changegroup", roots=n)
2063 2061 bytes = 0
2064 2062
2065 2063 class zread:
2066 2064 def __init__(self, f):
2067 2065 self.zd = zlib.decompressobj()
2068 2066 self.f = f
2069 2067 self.buf = ""
2070 2068 def read(self, l):
2071 2069 while l > len(self.buf):
2072 2070 r = self.f.read(4096)
2073 2071 if r:
2074 2072 self.buf += self.zd.decompress(r)
2075 2073 else:
2076 2074 self.buf += self.zd.flush()
2077 2075 break
2078 2076 d, self.buf = self.buf[:l], self.buf[l:]
2079 2077 return d
2080 2078
2081 2079 return zread(f)
2082 2080
2083 2081 class remotelock:
2084 2082 def __init__(self, repo):
2085 2083 self.repo = repo
2086 2084 def release(self):
2087 2085 self.repo.unlock()
2088 2086 self.repo = None
2089 2087 def __del__(self):
2090 2088 if self.repo:
2091 2089 self.release()
2092 2090
2093 2091 class sshrepository(remoterepository):
2094 2092 def __init__(self, ui, path):
2095 2093 self.url = path
2096 2094 self.ui = ui
2097 2095
2098 2096 m = re.match(r'ssh://(([^@]+)@)?([^:/]+)(:(\d+))?(/(.*))', path)
2099 2097 if not m:
2100 2098 raise RepoError("couldn't parse destination %s" % path)
2101 2099
2102 2100 self.user = m.group(2)
2103 2101 self.host = m.group(3)
2104 2102 self.port = m.group(5)
2105 2103 self.path = m.group(7)
2106 2104
2107 2105 args = self.user and ("%s@%s" % (self.user, self.host)) or self.host
2108 2106 args = self.port and ("%s -p %s") % (args, self.port) or args
2109 2107 path = self.path or ""
2110 2108
2111 2109 if not path:
2112 2110 raise RepoError("no remote repository path specified")
2113 2111
2114 2112 sshcmd = self.ui.config("ui", "ssh", "ssh")
2115 2113 remotecmd = self.ui.config("ui", "remotecmd", "hg")
2116 2114 cmd = "%s %s '%s -R %s serve --stdio'"
2117 2115 cmd = cmd % (sshcmd, args, remotecmd, path)
2118 2116
2119 2117 self.pipeo, self.pipei, self.pipee = os.popen3(cmd)
2120 2118
2121 2119 def readerr(self):
2122 2120 while 1:
2123 2121 r,w,x = select.select([self.pipee], [], [], 0)
2124 2122 if not r: break
2125 2123 l = self.pipee.readline()
2126 2124 if not l: break
2127 2125 self.ui.status("remote: ", l)
2128 2126
2129 2127 def __del__(self):
2130 2128 try:
2131 2129 self.pipeo.close()
2132 2130 self.pipei.close()
2133 2131 for l in self.pipee:
2134 2132 self.ui.status("remote: ", l)
2135 2133 self.pipee.close()
2136 2134 except:
2137 2135 pass
2138 2136
2139 2137 def dev(self):
2140 2138 return -1
2141 2139
2142 2140 def do_cmd(self, cmd, **args):
2143 2141 self.ui.debug("sending %s command\n" % cmd)
2144 2142 self.pipeo.write("%s\n" % cmd)
2145 2143 for k, v in args.items():
2146 2144 self.pipeo.write("%s %d\n" % (k, len(v)))
2147 2145 self.pipeo.write(v)
2148 2146 self.pipeo.flush()
2149 2147
2150 2148 return self.pipei
2151 2149
2152 2150 def call(self, cmd, **args):
2153 2151 r = self.do_cmd(cmd, **args)
2154 2152 l = r.readline()
2155 2153 self.readerr()
2156 2154 try:
2157 2155 l = int(l)
2158 2156 except:
2159 2157 raise RepoError("unexpected response '%s'" % l)
2160 2158 return r.read(l)
2161 2159
2162 2160 def lock(self):
2163 2161 self.call("lock")
2164 2162 return remotelock(self)
2165 2163
2166 2164 def unlock(self):
2167 2165 self.call("unlock")
2168 2166
2169 2167 def heads(self):
2170 2168 d = self.call("heads")
2171 2169 try:
2172 2170 return map(bin, d[:-1].split(" "))
2173 2171 except:
2174 2172 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2175 2173
2176 2174 def branches(self, nodes):
2177 2175 n = " ".join(map(hex, nodes))
2178 2176 d = self.call("branches", nodes=n)
2179 2177 try:
2180 2178 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
2181 2179 return br
2182 2180 except:
2183 2181 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2184 2182
2185 2183 def between(self, pairs):
2186 2184 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
2187 2185 d = self.call("between", pairs=n)
2188 2186 try:
2189 2187 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
2190 2188 return p
2191 2189 except:
2192 2190 raise RepoError("unexpected response '%s'" % (d[:400] + "..."))
2193 2191
2194 2192 def changegroup(self, nodes):
2195 2193 n = " ".join(map(hex, nodes))
2196 2194 f = self.do_cmd("changegroup", roots=n)
2197 2195 return self.pipei
2198 2196
2199 2197 def addchangegroup(self, cg):
2200 2198 d = self.call("addchangegroup")
2201 2199 if d:
2202 2200 raise RepoError("push refused: %s", d)
2203 2201
2204 2202 while 1:
2205 2203 d = cg.read(4096)
2206 2204 if not d: break
2207 2205 self.pipeo.write(d)
2208 2206 self.readerr()
2209 2207
2210 2208 self.pipeo.flush()
2211 2209
2212 2210 self.readerr()
2213 2211 l = int(self.pipei.readline())
2214 2212 return self.pipei.read(l) != ""
2215 2213
2216 2214 class httpsrepository(httprepository):
2217 2215 pass
2218 2216
2219 2217 def repository(ui, path=None, create=0):
2220 2218 if path:
2221 2219 if path.startswith("http://"):
2222 2220 return httprepository(ui, path)
2223 2221 if path.startswith("https://"):
2224 2222 return httpsrepository(ui, path)
2225 2223 if path.startswith("hg://"):
2226 2224 return httprepository(ui, path.replace("hg://", "http://"))
2227 2225 if path.startswith("old-http://"):
2228 2226 return localrepository(ui, path.replace("old-http://", "http://"))
2229 2227 if path.startswith("ssh://"):
2230 2228 return sshrepository(ui, path)
2231 2229
2232 2230 return localrepository(ui, path, create)
General Comments 0
You need to be logged in to leave comments. Login now