##// END OF EJS Templates
Separate out old-http support...
mpm@selenic.com -
r1101:2cf5c8a4 default
parent child Browse files
Show More
@@ -0,0 +1,35
1 # statichttprepo.py - simple http repository class for mercurial
2 #
3 # This provides read-only repo access to repositories exported via static http
4 #
5 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 #
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
9
10 import os, urllib
11 import localrepo, httprangereader, filelog, manifest, changelog
12
13 def opener(base):
14 """return a function that opens files over http"""
15 p = base
16 def o(path, mode="r"):
17 f = os.path.join(p, urllib.quote(path))
18 return httprangereader.httprangereader(f)
19 return o
20
21 class statichttprepository(localrepo.localrepository):
22 def __init__(self, ui, path):
23 self.path = (path + "/.hg")
24 self.ui = ui
25 self.opener = opener(self.path)
26 self.manifest = manifest.manifest(self.opener)
27 self.changelog = changelog.changelog(self.opener)
28 self.tagscache = None
29 self.nodetagscache = None
30
31 def dev(self):
32 return -1
33
34 def local(self):
35 return False
@@ -1,29 +1,29
1 1 # hg.py - repository classes for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import util
9 9 from node import *
10 10 from repo import *
11 11 from demandload import *
12 demandload(globals(), "localrepo httprepo sshrepo")
12 demandload(globals(), "localrepo httprepo sshrepo statichttprepo")
13 13
14 14 def repository(ui, path=None, create=0):
15 15 if path:
16 16 if path.startswith("http://"):
17 17 return httprepo.httprepository(ui, path)
18 18 if path.startswith("https://"):
19 19 return httprepo.httpsrepository(ui, path)
20 20 if path.startswith("hg://"):
21 21 return httprepo.httprepository(
22 22 ui, path.replace("hg://", "http://"))
23 23 if path.startswith("old-http://"):
24 return localrepo.localrepository(
25 ui, util.opener, path.replace("old-http://", "http://"))
24 return statichttprepo.statichttprepository(
25 ui, path.replace("old-http://", "http://"))
26 26 if path.startswith("ssh://"):
27 27 return sshrepo.sshrepository(ui, path)
28 28
29 29 return localrepo.localrepository(ui, util.opener, path, create)
@@ -1,1407 +1,1400
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import struct, os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from demandload import *
12 12 demandload(globals(), "re lock transaction tempfile stat mdiff")
13 13
14 14 class localrepository:
15 15 def __init__(self, ui, opener, path=None, create=0):
16 self.remote = 0
17 if path and path.startswith("http://"):
18 self.remote = 1
19 self.path = path
20 else:
21 if not path:
22 p = os.getcwd()
23 while not os.path.isdir(os.path.join(p, ".hg")):
24 oldp = p
25 p = os.path.dirname(p)
26 if p == oldp: raise repo.RepoError("no repo found")
27 path = p
28 self.path = os.path.join(path, ".hg")
16 if not path:
17 p = os.getcwd()
18 while not os.path.isdir(os.path.join(p, ".hg")):
19 oldp = p
20 p = os.path.dirname(p)
21 if p == oldp: raise repo.RepoError("no repo found")
22 path = p
23 self.path = os.path.join(path, ".hg")
29 24
30 if not create and not os.path.isdir(self.path):
31 raise repo.RepoError("repository %s not found" % self.path)
25 if not create and not os.path.isdir(self.path):
26 raise repo.RepoError("repository %s not found" % self.path)
32 27
33 28 self.root = os.path.abspath(path)
34 29 self.ui = ui
35 30
36 31 if create:
37 32 os.mkdir(self.path)
38 33 os.mkdir(self.join("data"))
39 34
40 35 self.opener = opener(self.path)
41 36 self.wopener = opener(self.root)
42 37 self.manifest = manifest.manifest(self.opener)
43 38 self.changelog = changelog.changelog(self.opener)
44 39 self.tagscache = None
45 40 self.nodetagscache = None
46 41
47 if not self.remote:
48 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
49 try:
50 self.ui.readconfig(self.opener("hgrc"))
51 except IOError: pass
42 self.dirstate = dirstate.dirstate(self.opener, ui, self.root)
43 try:
44 self.ui.readconfig(self.opener("hgrc"))
45 except IOError: pass
52 46
53 47 def hook(self, name, **args):
54 48 s = self.ui.config("hooks", name)
55 49 if s:
56 50 self.ui.note("running hook %s: %s\n" % (name, s))
57 51 old = {}
58 52 for k, v in args.items():
59 53 k = k.upper()
60 54 old[k] = os.environ.get(k, None)
61 55 os.environ[k] = v
62 56
63 57 r = os.system(s)
64 58
65 59 for k, v in old.items():
66 60 if v != None:
67 61 os.environ[k] = v
68 62 else:
69 63 del os.environ[k]
70 64
71 65 if r:
72 66 self.ui.warn("abort: %s hook failed with status %d!\n" %
73 67 (name, r))
74 68 return False
75 69 return True
76 70
77 71 def tags(self):
78 72 '''return a mapping of tag to node'''
79 73 if not self.tagscache:
80 74 self.tagscache = {}
81 75 def addtag(self, k, n):
82 76 try:
83 77 bin_n = bin(n)
84 78 except TypeError:
85 79 bin_n = ''
86 80 self.tagscache[k.strip()] = bin_n
87 81
88 82 try:
89 83 # read each head of the tags file, ending with the tip
90 84 # and add each tag found to the map, with "newer" ones
91 85 # taking precedence
92 86 fl = self.file(".hgtags")
93 87 h = fl.heads()
94 88 h.reverse()
95 89 for r in h:
96 90 for l in fl.read(r).splitlines():
97 91 if l:
98 92 n, k = l.split(" ", 1)
99 93 addtag(self, k, n)
100 94 except KeyError:
101 95 pass
102 96
103 97 try:
104 98 f = self.opener("localtags")
105 99 for l in f:
106 100 n, k = l.split(" ", 1)
107 101 addtag(self, k, n)
108 102 except IOError:
109 103 pass
110 104
111 105 self.tagscache['tip'] = self.changelog.tip()
112 106
113 107 return self.tagscache
114 108
115 109 def tagslist(self):
116 110 '''return a list of tags ordered by revision'''
117 111 l = []
118 112 for t, n in self.tags().items():
119 113 try:
120 114 r = self.changelog.rev(n)
121 115 except:
122 116 r = -2 # sort to the beginning of the list if unknown
123 117 l.append((r,t,n))
124 118 l.sort()
125 119 return [(t,n) for r,t,n in l]
126 120
127 121 def nodetags(self, node):
128 122 '''return the tags associated with a node'''
129 123 if not self.nodetagscache:
130 124 self.nodetagscache = {}
131 125 for t,n in self.tags().items():
132 126 self.nodetagscache.setdefault(n,[]).append(t)
133 127 return self.nodetagscache.get(node, [])
134 128
135 129 def lookup(self, key):
136 130 try:
137 131 return self.tags()[key]
138 132 except KeyError:
139 133 try:
140 134 return self.changelog.lookup(key)
141 135 except:
142 136 raise repo.RepoError("unknown revision '%s'" % key)
143 137
144 138 def dev(self):
145 if self.remote: return -1
146 139 return os.stat(self.path).st_dev
147 140
148 141 def local(self):
149 return not self.remote
142 return True
150 143
151 144 def join(self, f):
152 145 return os.path.join(self.path, f)
153 146
154 147 def wjoin(self, f):
155 148 return os.path.join(self.root, f)
156 149
157 150 def file(self, f):
158 151 if f[0] == '/': f = f[1:]
159 152 return filelog.filelog(self.opener, f)
160 153
161 154 def getcwd(self):
162 155 return self.dirstate.getcwd()
163 156
164 157 def wfile(self, f, mode='r'):
165 158 return self.wopener(f, mode)
166 159
167 160 def wread(self, filename):
168 161 return self.wopener(filename, 'r').read()
169 162
170 163 def wwrite(self, filename, data, fd=None):
171 164 if fd:
172 165 return fd.write(data)
173 166 return self.wopener(filename, 'w').write(data)
174 167
175 168 def transaction(self):
176 169 # save dirstate for undo
177 170 try:
178 171 ds = self.opener("dirstate").read()
179 172 except IOError:
180 173 ds = ""
181 174 self.opener("journal.dirstate", "w").write(ds)
182 175
183 176 def after():
184 177 util.rename(self.join("journal"), self.join("undo"))
185 178 util.rename(self.join("journal.dirstate"),
186 179 self.join("undo.dirstate"))
187 180
188 181 return transaction.transaction(self.ui.warn, self.opener,
189 182 self.join("journal"), after)
190 183
191 184 def recover(self):
192 185 lock = self.lock()
193 186 if os.path.exists(self.join("journal")):
194 187 self.ui.status("rolling back interrupted transaction\n")
195 188 return transaction.rollback(self.opener, self.join("journal"))
196 189 else:
197 190 self.ui.warn("no interrupted transaction available\n")
198 191
199 192 def undo(self):
200 193 lock = self.lock()
201 194 if os.path.exists(self.join("undo")):
202 195 self.ui.status("rolling back last transaction\n")
203 196 transaction.rollback(self.opener, self.join("undo"))
204 197 self.dirstate = None
205 198 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
206 199 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
207 200 else:
208 201 self.ui.warn("no undo information available\n")
209 202
210 203 def lock(self, wait=1):
211 204 try:
212 205 return lock.lock(self.join("lock"), 0)
213 206 except lock.LockHeld, inst:
214 207 if wait:
215 208 self.ui.warn("waiting for lock held by %s\n" % inst.args[0])
216 209 return lock.lock(self.join("lock"), wait)
217 210 raise inst
218 211
219 212 def rawcommit(self, files, text, user, date, p1=None, p2=None):
220 213 orig_parent = self.dirstate.parents()[0] or nullid
221 214 p1 = p1 or self.dirstate.parents()[0] or nullid
222 215 p2 = p2 or self.dirstate.parents()[1] or nullid
223 216 c1 = self.changelog.read(p1)
224 217 c2 = self.changelog.read(p2)
225 218 m1 = self.manifest.read(c1[0])
226 219 mf1 = self.manifest.readflags(c1[0])
227 220 m2 = self.manifest.read(c2[0])
228 221 changed = []
229 222
230 223 if orig_parent == p1:
231 224 update_dirstate = 1
232 225 else:
233 226 update_dirstate = 0
234 227
235 228 tr = self.transaction()
236 229 mm = m1.copy()
237 230 mfm = mf1.copy()
238 231 linkrev = self.changelog.count()
239 232 for f in files:
240 233 try:
241 234 t = self.wread(f)
242 235 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
243 236 r = self.file(f)
244 237 mfm[f] = tm
245 238
246 239 fp1 = m1.get(f, nullid)
247 240 fp2 = m2.get(f, nullid)
248 241
249 242 # is the same revision on two branches of a merge?
250 243 if fp2 == fp1:
251 244 fp2 = nullid
252 245
253 246 if fp2 != nullid:
254 247 # is one parent an ancestor of the other?
255 248 fpa = r.ancestor(fp1, fp2)
256 249 if fpa == fp1:
257 250 fp1, fp2 = fp2, nullid
258 251 elif fpa == fp2:
259 252 fp2 = nullid
260 253
261 254 # is the file unmodified from the parent?
262 255 if t == r.read(fp1):
263 256 # record the proper existing parent in manifest
264 257 # no need to add a revision
265 258 mm[f] = fp1
266 259 continue
267 260
268 261 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
269 262 changed.append(f)
270 263 if update_dirstate:
271 264 self.dirstate.update([f], "n")
272 265 except IOError:
273 266 try:
274 267 del mm[f]
275 268 del mfm[f]
276 269 if update_dirstate:
277 270 self.dirstate.forget([f])
278 271 except:
279 272 # deleted from p2?
280 273 pass
281 274
282 275 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
283 276 user = user or self.ui.username()
284 277 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
285 278 tr.close()
286 279 if update_dirstate:
287 280 self.dirstate.setparents(n, nullid)
288 281
289 282 def commit(self, files = None, text = "", user = None, date = None,
290 283 match = util.always, force=False):
291 284 commit = []
292 285 remove = []
293 286 changed = []
294 287
295 288 if files:
296 289 for f in files:
297 290 s = self.dirstate.state(f)
298 291 if s in 'nmai':
299 292 commit.append(f)
300 293 elif s == 'r':
301 294 remove.append(f)
302 295 else:
303 296 self.ui.warn("%s not tracked!\n" % f)
304 297 else:
305 298 (c, a, d, u) = self.changes(match=match)
306 299 commit = c + a
307 300 remove = d
308 301
309 302 p1, p2 = self.dirstate.parents()
310 303 c1 = self.changelog.read(p1)
311 304 c2 = self.changelog.read(p2)
312 305 m1 = self.manifest.read(c1[0])
313 306 mf1 = self.manifest.readflags(c1[0])
314 307 m2 = self.manifest.read(c2[0])
315 308
316 309 if not commit and not remove and not force and p2 == nullid:
317 310 self.ui.status("nothing changed\n")
318 311 return None
319 312
320 313 if not self.hook("precommit"):
321 314 return None
322 315
323 316 lock = self.lock()
324 317 tr = self.transaction()
325 318
326 319 # check in files
327 320 new = {}
328 321 linkrev = self.changelog.count()
329 322 commit.sort()
330 323 for f in commit:
331 324 self.ui.note(f + "\n")
332 325 try:
333 326 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
334 327 t = self.wread(f)
335 328 except IOError:
336 329 self.ui.warn("trouble committing %s!\n" % f)
337 330 raise
338 331
339 332 meta = {}
340 333 cp = self.dirstate.copied(f)
341 334 if cp:
342 335 meta["copy"] = cp
343 336 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
344 337 self.ui.debug(" %s: copy %s:%s\n" % (f, cp, meta["copyrev"]))
345 338
346 339 r = self.file(f)
347 340 fp1 = m1.get(f, nullid)
348 341 fp2 = m2.get(f, nullid)
349 342
350 343 # is the same revision on two branches of a merge?
351 344 if fp2 == fp1:
352 345 fp2 = nullid
353 346
354 347 if fp2 != nullid:
355 348 # is one parent an ancestor of the other?
356 349 fpa = r.ancestor(fp1, fp2)
357 350 if fpa == fp1:
358 351 fp1, fp2 = fp2, nullid
359 352 elif fpa == fp2:
360 353 fp2 = nullid
361 354
362 355 # is the file unmodified from the parent?
363 356 if not meta and t == r.read(fp1):
364 357 # record the proper existing parent in manifest
365 358 # no need to add a revision
366 359 new[f] = fp1
367 360 continue
368 361
369 362 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
370 363 # remember what we've added so that we can later calculate
371 364 # the files to pull from a set of changesets
372 365 changed.append(f)
373 366
374 367 # update manifest
375 368 m1.update(new)
376 369 for f in remove:
377 370 if f in m1:
378 371 del m1[f]
379 372 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
380 373 (new, remove))
381 374
382 375 # add changeset
383 376 new = new.keys()
384 377 new.sort()
385 378
386 379 if not text:
387 380 edittext = ""
388 381 if p2 != nullid:
389 382 edittext += "HG: branch merge\n"
390 383 edittext += "\n" + "HG: manifest hash %s\n" % hex(mn)
391 384 edittext += "".join(["HG: changed %s\n" % f for f in changed])
392 385 edittext += "".join(["HG: removed %s\n" % f for f in remove])
393 386 if not changed and not remove:
394 387 edittext += "HG: no files changed\n"
395 388 edittext = self.ui.edit(edittext)
396 389 if not edittext.rstrip():
397 390 return None
398 391 text = edittext
399 392
400 393 user = user or self.ui.username()
401 394 n = self.changelog.add(mn, changed, text, tr, p1, p2, user, date)
402 395 tr.close()
403 396
404 397 self.dirstate.setparents(n)
405 398 self.dirstate.update(new, "n")
406 399 self.dirstate.forget(remove)
407 400
408 401 if not self.hook("commit", node=hex(n)):
409 402 return None
410 403 return n
411 404
412 405 def walk(self, node=None, files=[], match=util.always):
413 406 if node:
414 407 for fn in self.manifest.read(self.changelog.read(node)[0]):
415 408 if match(fn): yield 'm', fn
416 409 else:
417 410 for src, fn in self.dirstate.walk(files, match):
418 411 yield src, fn
419 412
420 413 def changes(self, node1 = None, node2 = None, files = [],
421 414 match = util.always):
422 415 mf2, u = None, []
423 416
424 417 def fcmp(fn, mf):
425 418 t1 = self.wread(fn)
426 419 t2 = self.file(fn).read(mf.get(fn, nullid))
427 420 return cmp(t1, t2)
428 421
429 422 def mfmatches(node):
430 423 mf = dict(self.manifest.read(node))
431 424 for fn in mf.keys():
432 425 if not match(fn):
433 426 del mf[fn]
434 427 return mf
435 428
436 429 # are we comparing the working directory?
437 430 if not node2:
438 431 l, c, a, d, u = self.dirstate.changes(files, match)
439 432
440 433 # are we comparing working dir against its parent?
441 434 if not node1:
442 435 if l:
443 436 # do a full compare of any files that might have changed
444 437 change = self.changelog.read(self.dirstate.parents()[0])
445 438 mf2 = mfmatches(change[0])
446 439 for f in l:
447 440 if fcmp(f, mf2):
448 441 c.append(f)
449 442
450 443 for l in c, a, d, u:
451 444 l.sort()
452 445
453 446 return (c, a, d, u)
454 447
455 448 # are we comparing working dir against non-tip?
456 449 # generate a pseudo-manifest for the working dir
457 450 if not node2:
458 451 if not mf2:
459 452 change = self.changelog.read(self.dirstate.parents()[0])
460 453 mf2 = mfmatches(change[0])
461 454 for f in a + c + l:
462 455 mf2[f] = ""
463 456 for f in d:
464 457 if f in mf2: del mf2[f]
465 458 else:
466 459 change = self.changelog.read(node2)
467 460 mf2 = mfmatches(change[0])
468 461
469 462 # flush lists from dirstate before comparing manifests
470 463 c, a = [], []
471 464
472 465 change = self.changelog.read(node1)
473 466 mf1 = mfmatches(change[0])
474 467
475 468 for fn in mf2:
476 469 if mf1.has_key(fn):
477 470 if mf1[fn] != mf2[fn]:
478 471 if mf2[fn] != "" or fcmp(fn, mf1):
479 472 c.append(fn)
480 473 del mf1[fn]
481 474 else:
482 475 a.append(fn)
483 476
484 477 d = mf1.keys()
485 478
486 479 for l in c, a, d, u:
487 480 l.sort()
488 481
489 482 return (c, a, d, u)
490 483
491 484 def add(self, list):
492 485 for f in list:
493 486 p = self.wjoin(f)
494 487 if not os.path.exists(p):
495 488 self.ui.warn("%s does not exist!\n" % f)
496 489 elif not os.path.isfile(p):
497 490 self.ui.warn("%s not added: only files supported currently\n" % f)
498 491 elif self.dirstate.state(f) in 'an':
499 492 self.ui.warn("%s already tracked!\n" % f)
500 493 else:
501 494 self.dirstate.update([f], "a")
502 495
503 496 def forget(self, list):
504 497 for f in list:
505 498 if self.dirstate.state(f) not in 'ai':
506 499 self.ui.warn("%s not added!\n" % f)
507 500 else:
508 501 self.dirstate.forget([f])
509 502
510 503 def remove(self, list):
511 504 for f in list:
512 505 p = self.wjoin(f)
513 506 if os.path.exists(p):
514 507 self.ui.warn("%s still exists!\n" % f)
515 508 elif self.dirstate.state(f) == 'a':
516 509 self.ui.warn("%s never committed!\n" % f)
517 510 self.dirstate.forget([f])
518 511 elif f not in self.dirstate:
519 512 self.ui.warn("%s not tracked!\n" % f)
520 513 else:
521 514 self.dirstate.update([f], "r")
522 515
523 516 def copy(self, source, dest):
524 517 p = self.wjoin(dest)
525 518 if not os.path.exists(p):
526 519 self.ui.warn("%s does not exist!\n" % dest)
527 520 elif not os.path.isfile(p):
528 521 self.ui.warn("copy failed: %s is not a file\n" % dest)
529 522 else:
530 523 if self.dirstate.state(dest) == '?':
531 524 self.dirstate.update([dest], "a")
532 525 self.dirstate.copy(source, dest)
533 526
534 527 def heads(self):
535 528 return self.changelog.heads()
536 529
537 530 # branchlookup returns a dict giving a list of branches for
538 531 # each head. A branch is defined as the tag of a node or
539 532 # the branch of the node's parents. If a node has multiple
540 533 # branch tags, tags are eliminated if they are visible from other
541 534 # branch tags.
542 535 #
543 536 # So, for this graph: a->b->c->d->e
544 537 # \ /
545 538 # aa -----/
546 539 # a has tag 2.6.12
547 540 # d has tag 2.6.13
548 541 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
549 542 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
550 543 # from the list.
551 544 #
552 545 # It is possible that more than one head will have the same branch tag.
553 546 # callers need to check the result for multiple heads under the same
554 547 # branch tag if that is a problem for them (ie checkout of a specific
555 548 # branch).
556 549 #
557 550 # passing in a specific branch will limit the depth of the search
558 551 # through the parents. It won't limit the branches returned in the
559 552 # result though.
560 553 def branchlookup(self, heads=None, branch=None):
561 554 if not heads:
562 555 heads = self.heads()
563 556 headt = [ h for h in heads ]
564 557 chlog = self.changelog
565 558 branches = {}
566 559 merges = []
567 560 seenmerge = {}
568 561
569 562 # traverse the tree once for each head, recording in the branches
570 563 # dict which tags are visible from this head. The branches
571 564 # dict also records which tags are visible from each tag
572 565 # while we traverse.
573 566 while headt or merges:
574 567 if merges:
575 568 n, found = merges.pop()
576 569 visit = [n]
577 570 else:
578 571 h = headt.pop()
579 572 visit = [h]
580 573 found = [h]
581 574 seen = {}
582 575 while visit:
583 576 n = visit.pop()
584 577 if n in seen:
585 578 continue
586 579 pp = chlog.parents(n)
587 580 tags = self.nodetags(n)
588 581 if tags:
589 582 for x in tags:
590 583 if x == 'tip':
591 584 continue
592 585 for f in found:
593 586 branches.setdefault(f, {})[n] = 1
594 587 branches.setdefault(n, {})[n] = 1
595 588 break
596 589 if n not in found:
597 590 found.append(n)
598 591 if branch in tags:
599 592 continue
600 593 seen[n] = 1
601 594 if pp[1] != nullid and n not in seenmerge:
602 595 merges.append((pp[1], [x for x in found]))
603 596 seenmerge[n] = 1
604 597 if pp[0] != nullid:
605 598 visit.append(pp[0])
606 599 # traverse the branches dict, eliminating branch tags from each
607 600 # head that are visible from another branch tag for that head.
608 601 out = {}
609 602 viscache = {}
610 603 for h in heads:
611 604 def visible(node):
612 605 if node in viscache:
613 606 return viscache[node]
614 607 ret = {}
615 608 visit = [node]
616 609 while visit:
617 610 x = visit.pop()
618 611 if x in viscache:
619 612 ret.update(viscache[x])
620 613 elif x not in ret:
621 614 ret[x] = 1
622 615 if x in branches:
623 616 visit[len(visit):] = branches[x].keys()
624 617 viscache[node] = ret
625 618 return ret
626 619 if h not in branches:
627 620 continue
628 621 # O(n^2), but somewhat limited. This only searches the
629 622 # tags visible from a specific head, not all the tags in the
630 623 # whole repo.
631 624 for b in branches[h]:
632 625 vis = False
633 626 for bb in branches[h].keys():
634 627 if b != bb:
635 628 if b in visible(bb):
636 629 vis = True
637 630 break
638 631 if not vis:
639 632 l = out.setdefault(h, [])
640 633 l[len(l):] = self.nodetags(b)
641 634 return out
642 635
643 636 def branches(self, nodes):
644 637 if not nodes: nodes = [self.changelog.tip()]
645 638 b = []
646 639 for n in nodes:
647 640 t = n
648 641 while n:
649 642 p = self.changelog.parents(n)
650 643 if p[1] != nullid or p[0] == nullid:
651 644 b.append((t, n, p[0], p[1]))
652 645 break
653 646 n = p[0]
654 647 return b
655 648
656 649 def between(self, pairs):
657 650 r = []
658 651
659 652 for top, bottom in pairs:
660 653 n, l, i = top, [], 0
661 654 f = 1
662 655
663 656 while n != bottom:
664 657 p = self.changelog.parents(n)[0]
665 658 if i == f:
666 659 l.append(n)
667 660 f = f * 2
668 661 n = p
669 662 i += 1
670 663
671 664 r.append(l)
672 665
673 666 return r
674 667
675 668 def newer(self, nodes):
676 669 m = {}
677 670 nl = []
678 671 pm = {}
679 672 cl = self.changelog
680 673 t = l = cl.count()
681 674
682 675 # find the lowest numbered node
683 676 for n in nodes:
684 677 l = min(l, cl.rev(n))
685 678 m[n] = 1
686 679
687 680 for i in xrange(l, t):
688 681 n = cl.node(i)
689 682 if n in m: # explicitly listed
690 683 pm[n] = 1
691 684 nl.append(n)
692 685 continue
693 686 for p in cl.parents(n):
694 687 if p in pm: # parent listed
695 688 pm[n] = 1
696 689 nl.append(n)
697 690 break
698 691
699 692 return nl
700 693
701 694 def findincoming(self, remote, base=None, heads=None):
702 695 m = self.changelog.nodemap
703 696 search = []
704 697 fetch = {}
705 698 seen = {}
706 699 seenbranch = {}
707 700 if base == None:
708 701 base = {}
709 702
710 703 # assume we're closer to the tip than the root
711 704 # and start by examining the heads
712 705 self.ui.status("searching for changes\n")
713 706
714 707 if not heads:
715 708 heads = remote.heads()
716 709
717 710 unknown = []
718 711 for h in heads:
719 712 if h not in m:
720 713 unknown.append(h)
721 714 else:
722 715 base[h] = 1
723 716
724 717 if not unknown:
725 718 return None
726 719
727 720 rep = {}
728 721 reqcnt = 0
729 722
730 723 # search through remote branches
731 724 # a 'branch' here is a linear segment of history, with four parts:
732 725 # head, root, first parent, second parent
733 726 # (a branch always has two parents (or none) by definition)
734 727 unknown = remote.branches(unknown)
735 728 while unknown:
736 729 r = []
737 730 while unknown:
738 731 n = unknown.pop(0)
739 732 if n[0] in seen:
740 733 continue
741 734
742 735 self.ui.debug("examining %s:%s\n" % (short(n[0]), short(n[1])))
743 736 if n[0] == nullid:
744 737 break
745 738 if n in seenbranch:
746 739 self.ui.debug("branch already found\n")
747 740 continue
748 741 if n[1] and n[1] in m: # do we know the base?
749 742 self.ui.debug("found incomplete branch %s:%s\n"
750 743 % (short(n[0]), short(n[1])))
751 744 search.append(n) # schedule branch range for scanning
752 745 seenbranch[n] = 1
753 746 else:
754 747 if n[1] not in seen and n[1] not in fetch:
755 748 if n[2] in m and n[3] in m:
756 749 self.ui.debug("found new changeset %s\n" %
757 750 short(n[1]))
758 751 fetch[n[1]] = 1 # earliest unknown
759 752 base[n[2]] = 1 # latest known
760 753 continue
761 754
762 755 for a in n[2:4]:
763 756 if a not in rep:
764 757 r.append(a)
765 758 rep[a] = 1
766 759
767 760 seen[n[0]] = 1
768 761
769 762 if r:
770 763 reqcnt += 1
771 764 self.ui.debug("request %d: %s\n" %
772 765 (reqcnt, " ".join(map(short, r))))
773 766 for p in range(0, len(r), 10):
774 767 for b in remote.branches(r[p:p+10]):
775 768 self.ui.debug("received %s:%s\n" %
776 769 (short(b[0]), short(b[1])))
777 770 if b[0] in m:
778 771 self.ui.debug("found base node %s\n" % short(b[0]))
779 772 base[b[0]] = 1
780 773 elif b[0] not in seen:
781 774 unknown.append(b)
782 775
783 776 # do binary search on the branches we found
784 777 while search:
785 778 n = search.pop(0)
786 779 reqcnt += 1
787 780 l = remote.between([(n[0], n[1])])[0]
788 781 l.append(n[1])
789 782 p = n[0]
790 783 f = 1
791 784 for i in l:
792 785 self.ui.debug("narrowing %d:%d %s\n" % (f, len(l), short(i)))
793 786 if i in m:
794 787 if f <= 2:
795 788 self.ui.debug("found new branch changeset %s\n" %
796 789 short(p))
797 790 fetch[p] = 1
798 791 base[i] = 1
799 792 else:
800 793 self.ui.debug("narrowed branch search to %s:%s\n"
801 794 % (short(p), short(i)))
802 795 search.append((p, i))
803 796 break
804 797 p, f = i, f * 2
805 798
806 799 # sanity check our fetch list
807 800 for f in fetch.keys():
808 801 if f in m:
809 802 raise repo.RepoError("already have changeset " + short(f[:4]))
810 803
811 804 if base.keys() == [nullid]:
812 805 self.ui.warn("warning: pulling from an unrelated repository!\n")
813 806
814 807 self.ui.note("found new changesets starting at " +
815 808 " ".join([short(f) for f in fetch]) + "\n")
816 809
817 810 self.ui.debug("%d total queries\n" % reqcnt)
818 811
819 812 return fetch.keys()
820 813
821 814 def findoutgoing(self, remote, base=None, heads=None):
822 815 if base == None:
823 816 base = {}
824 817 self.findincoming(remote, base, heads)
825 818
826 819 self.ui.debug("common changesets up to "
827 820 + " ".join(map(short, base.keys())) + "\n")
828 821
829 822 remain = dict.fromkeys(self.changelog.nodemap)
830 823
831 824 # prune everything remote has from the tree
832 825 del remain[nullid]
833 826 remove = base.keys()
834 827 while remove:
835 828 n = remove.pop(0)
836 829 if n in remain:
837 830 del remain[n]
838 831 for p in self.changelog.parents(n):
839 832 remove.append(p)
840 833
841 834 # find every node whose parents have been pruned
842 835 subset = []
843 836 for n in remain:
844 837 p1, p2 = self.changelog.parents(n)
845 838 if p1 not in remain and p2 not in remain:
846 839 subset.append(n)
847 840
848 841 # this is the set of all roots we have to push
849 842 return subset
850 843
851 844 def pull(self, remote):
852 845 lock = self.lock()
853 846
854 847 # if we have an empty repo, fetch everything
855 848 if self.changelog.tip() == nullid:
856 849 self.ui.status("requesting all changes\n")
857 850 fetch = [nullid]
858 851 else:
859 852 fetch = self.findincoming(remote)
860 853
861 854 if not fetch:
862 855 self.ui.status("no changes found\n")
863 856 return 1
864 857
865 858 cg = remote.changegroup(fetch)
866 859 return self.addchangegroup(cg)
867 860
868 861 def push(self, remote, force=False):
869 862 lock = remote.lock()
870 863
871 864 base = {}
872 865 heads = remote.heads()
873 866 inc = self.findincoming(remote, base, heads)
874 867 if not force and inc:
875 868 self.ui.warn("abort: unsynced remote changes!\n")
876 869 self.ui.status("(did you forget to sync? use push -f to force)\n")
877 870 return 1
878 871
879 872 update = self.findoutgoing(remote, base)
880 873 if not update:
881 874 self.ui.status("no changes found\n")
882 875 return 1
883 876 elif not force:
884 877 if len(heads) < len(self.changelog.heads()):
885 878 self.ui.warn("abort: push creates new remote branches!\n")
886 879 self.ui.status("(did you forget to merge?" +
887 880 " use push -f to force)\n")
888 881 return 1
889 882
890 883 cg = self.changegroup(update)
891 884 return remote.addchangegroup(cg)
892 885
893 886 def changegroup(self, basenodes):
894 887 class genread:
895 888 def __init__(self, generator):
896 889 self.g = generator
897 890 self.buf = ""
898 891 def fillbuf(self):
899 892 self.buf += "".join(self.g)
900 893
901 894 def read(self, l):
902 895 while l > len(self.buf):
903 896 try:
904 897 self.buf += self.g.next()
905 898 except StopIteration:
906 899 break
907 900 d, self.buf = self.buf[:l], self.buf[l:]
908 901 return d
909 902
910 903 def gengroup():
911 904 nodes = self.newer(basenodes)
912 905
913 906 # construct the link map
914 907 linkmap = {}
915 908 for n in nodes:
916 909 linkmap[self.changelog.rev(n)] = n
917 910
918 911 # construct a list of all changed files
919 912 changed = {}
920 913 for n in nodes:
921 914 c = self.changelog.read(n)
922 915 for f in c[3]:
923 916 changed[f] = 1
924 917 changed = changed.keys()
925 918 changed.sort()
926 919
927 920 # the changegroup is changesets + manifests + all file revs
928 921 revs = [ self.changelog.rev(n) for n in nodes ]
929 922
930 923 for y in self.changelog.group(linkmap): yield y
931 924 for y in self.manifest.group(linkmap): yield y
932 925 for f in changed:
933 926 yield struct.pack(">l", len(f) + 4) + f
934 927 g = self.file(f).group(linkmap)
935 928 for y in g:
936 929 yield y
937 930
938 931 yield struct.pack(">l", 0)
939 932
940 933 return genread(gengroup())
941 934
942 935 def addchangegroup(self, source):
943 936
944 937 def getchunk():
945 938 d = source.read(4)
946 939 if not d: return ""
947 940 l = struct.unpack(">l", d)[0]
948 941 if l <= 4: return ""
949 942 return source.read(l - 4)
950 943
951 944 def getgroup():
952 945 while 1:
953 946 c = getchunk()
954 947 if not c: break
955 948 yield c
956 949
957 950 def csmap(x):
958 951 self.ui.debug("add changeset %s\n" % short(x))
959 952 return self.changelog.count()
960 953
961 954 def revmap(x):
962 955 return self.changelog.rev(x)
963 956
964 957 if not source: return
965 958 changesets = files = revisions = 0
966 959
967 960 tr = self.transaction()
968 961
969 962 oldheads = len(self.changelog.heads())
970 963
971 964 # pull off the changeset group
972 965 self.ui.status("adding changesets\n")
973 966 co = self.changelog.tip()
974 967 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
975 968 changesets = self.changelog.rev(cn) - self.changelog.rev(co)
976 969
977 970 # pull off the manifest group
978 971 self.ui.status("adding manifests\n")
979 972 mm = self.manifest.tip()
980 973 mo = self.manifest.addgroup(getgroup(), revmap, tr)
981 974
982 975 # process the files
983 976 self.ui.status("adding file changes\n")
984 977 while 1:
985 978 f = getchunk()
986 979 if not f: break
987 980 self.ui.debug("adding %s revisions\n" % f)
988 981 fl = self.file(f)
989 982 o = fl.count()
990 983 n = fl.addgroup(getgroup(), revmap, tr)
991 984 revisions += fl.count() - o
992 985 files += 1
993 986
994 987 newheads = len(self.changelog.heads())
995 988 heads = ""
996 989 if oldheads and newheads > oldheads:
997 990 heads = " (+%d heads)" % (newheads - oldheads)
998 991
999 992 self.ui.status(("added %d changesets" +
1000 993 " with %d changes to %d files%s\n")
1001 994 % (changesets, revisions, files, heads))
1002 995
1003 996 tr.close()
1004 997
1005 998 if not self.hook("changegroup"):
1006 999 return 1
1007 1000
1008 1001 return
1009 1002
1010 1003 def update(self, node, allow=False, force=False, choose=None,
1011 1004 moddirstate=True):
1012 1005 pl = self.dirstate.parents()
1013 1006 if not force and pl[1] != nullid:
1014 1007 self.ui.warn("aborting: outstanding uncommitted merges\n")
1015 1008 return 1
1016 1009
1017 1010 p1, p2 = pl[0], node
1018 1011 pa = self.changelog.ancestor(p1, p2)
1019 1012 m1n = self.changelog.read(p1)[0]
1020 1013 m2n = self.changelog.read(p2)[0]
1021 1014 man = self.manifest.ancestor(m1n, m2n)
1022 1015 m1 = self.manifest.read(m1n)
1023 1016 mf1 = self.manifest.readflags(m1n)
1024 1017 m2 = self.manifest.read(m2n)
1025 1018 mf2 = self.manifest.readflags(m2n)
1026 1019 ma = self.manifest.read(man)
1027 1020 mfa = self.manifest.readflags(man)
1028 1021
1029 1022 (c, a, d, u) = self.changes()
1030 1023
1031 1024 # is this a jump, or a merge? i.e. is there a linear path
1032 1025 # from p1 to p2?
1033 1026 linear_path = (pa == p1 or pa == p2)
1034 1027
1035 1028 # resolve the manifest to determine which files
1036 1029 # we care about merging
1037 1030 self.ui.note("resolving manifests\n")
1038 1031 self.ui.debug(" force %s allow %s moddirstate %s linear %s\n" %
1039 1032 (force, allow, moddirstate, linear_path))
1040 1033 self.ui.debug(" ancestor %s local %s remote %s\n" %
1041 1034 (short(man), short(m1n), short(m2n)))
1042 1035
1043 1036 merge = {}
1044 1037 get = {}
1045 1038 remove = []
1046 1039
1047 1040 # construct a working dir manifest
1048 1041 mw = m1.copy()
1049 1042 mfw = mf1.copy()
1050 1043 umap = dict.fromkeys(u)
1051 1044
1052 1045 for f in a + c + u:
1053 1046 mw[f] = ""
1054 1047 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1055 1048
1056 1049 for f in d:
1057 1050 if f in mw: del mw[f]
1058 1051
1059 1052 # If we're jumping between revisions (as opposed to merging),
1060 1053 # and if neither the working directory nor the target rev has
1061 1054 # the file, then we need to remove it from the dirstate, to
1062 1055 # prevent the dirstate from listing the file when it is no
1063 1056 # longer in the manifest.
1064 1057 if moddirstate and linear_path and f not in m2:
1065 1058 self.dirstate.forget((f,))
1066 1059
1067 1060 # Compare manifests
1068 1061 for f, n in mw.iteritems():
1069 1062 if choose and not choose(f): continue
1070 1063 if f in m2:
1071 1064 s = 0
1072 1065
1073 1066 # is the wfile new since m1, and match m2?
1074 1067 if f not in m1:
1075 1068 t1 = self.wread(f)
1076 1069 t2 = self.file(f).read(m2[f])
1077 1070 if cmp(t1, t2) == 0:
1078 1071 n = m2[f]
1079 1072 del t1, t2
1080 1073
1081 1074 # are files different?
1082 1075 if n != m2[f]:
1083 1076 a = ma.get(f, nullid)
1084 1077 # are both different from the ancestor?
1085 1078 if n != a and m2[f] != a:
1086 1079 self.ui.debug(" %s versions differ, resolve\n" % f)
1087 1080 # merge executable bits
1088 1081 # "if we changed or they changed, change in merge"
1089 1082 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1090 1083 mode = ((a^b) | (a^c)) ^ a
1091 1084 merge[f] = (m1.get(f, nullid), m2[f], mode)
1092 1085 s = 1
1093 1086 # are we clobbering?
1094 1087 # is remote's version newer?
1095 1088 # or are we going back in time?
1096 1089 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1097 1090 self.ui.debug(" remote %s is newer, get\n" % f)
1098 1091 get[f] = m2[f]
1099 1092 s = 1
1100 1093 elif f in umap:
1101 1094 # this unknown file is the same as the checkout
1102 1095 get[f] = m2[f]
1103 1096
1104 1097 if not s and mfw[f] != mf2[f]:
1105 1098 if force:
1106 1099 self.ui.debug(" updating permissions for %s\n" % f)
1107 1100 util.set_exec(self.wjoin(f), mf2[f])
1108 1101 else:
1109 1102 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1110 1103 mode = ((a^b) | (a^c)) ^ a
1111 1104 if mode != b:
1112 1105 self.ui.debug(" updating permissions for %s\n" % f)
1113 1106 util.set_exec(self.wjoin(f), mode)
1114 1107 del m2[f]
1115 1108 elif f in ma:
1116 1109 if n != ma[f]:
1117 1110 r = "d"
1118 1111 if not force and (linear_path or allow):
1119 1112 r = self.ui.prompt(
1120 1113 (" local changed %s which remote deleted\n" % f) +
1121 1114 "(k)eep or (d)elete?", "[kd]", "k")
1122 1115 if r == "d":
1123 1116 remove.append(f)
1124 1117 else:
1125 1118 self.ui.debug("other deleted %s\n" % f)
1126 1119 remove.append(f) # other deleted it
1127 1120 else:
1128 1121 if n == m1.get(f, nullid): # same as parent
1129 1122 if p2 == pa: # going backwards?
1130 1123 self.ui.debug("remote deleted %s\n" % f)
1131 1124 remove.append(f)
1132 1125 else:
1133 1126 self.ui.debug("local created %s, keeping\n" % f)
1134 1127 else:
1135 1128 self.ui.debug("working dir created %s, keeping\n" % f)
1136 1129
1137 1130 for f, n in m2.iteritems():
1138 1131 if choose and not choose(f): continue
1139 1132 if f[0] == "/": continue
1140 1133 if f in ma and n != ma[f]:
1141 1134 r = "k"
1142 1135 if not force and (linear_path or allow):
1143 1136 r = self.ui.prompt(
1144 1137 ("remote changed %s which local deleted\n" % f) +
1145 1138 "(k)eep or (d)elete?", "[kd]", "k")
1146 1139 if r == "k": get[f] = n
1147 1140 elif f not in ma:
1148 1141 self.ui.debug("remote created %s\n" % f)
1149 1142 get[f] = n
1150 1143 else:
1151 1144 if force or p2 == pa: # going backwards?
1152 1145 self.ui.debug("local deleted %s, recreating\n" % f)
1153 1146 get[f] = n
1154 1147 else:
1155 1148 self.ui.debug("local deleted %s\n" % f)
1156 1149
1157 1150 del mw, m1, m2, ma
1158 1151
1159 1152 if force:
1160 1153 for f in merge:
1161 1154 get[f] = merge[f][1]
1162 1155 merge = {}
1163 1156
1164 1157 if linear_path or force:
1165 1158 # we don't need to do any magic, just jump to the new rev
1166 1159 branch_merge = False
1167 1160 p1, p2 = p2, nullid
1168 1161 else:
1169 1162 if not allow:
1170 1163 self.ui.status("this update spans a branch" +
1171 1164 " affecting the following files:\n")
1172 1165 fl = merge.keys() + get.keys()
1173 1166 fl.sort()
1174 1167 for f in fl:
1175 1168 cf = ""
1176 1169 if f in merge: cf = " (resolve)"
1177 1170 self.ui.status(" %s%s\n" % (f, cf))
1178 1171 self.ui.warn("aborting update spanning branches!\n")
1179 1172 self.ui.status("(use update -m to merge across branches" +
1180 1173 " or -C to lose changes)\n")
1181 1174 return 1
1182 1175 branch_merge = True
1183 1176
1184 1177 if moddirstate:
1185 1178 self.dirstate.setparents(p1, p2)
1186 1179
1187 1180 # get the files we don't need to change
1188 1181 files = get.keys()
1189 1182 files.sort()
1190 1183 for f in files:
1191 1184 if f[0] == "/": continue
1192 1185 self.ui.note("getting %s\n" % f)
1193 1186 t = self.file(f).read(get[f])
1194 1187 try:
1195 1188 self.wwrite(f, t)
1196 1189 except IOError:
1197 1190 os.makedirs(os.path.dirname(self.wjoin(f)))
1198 1191 self.wwrite(f, t)
1199 1192 util.set_exec(self.wjoin(f), mf2[f])
1200 1193 if moddirstate:
1201 1194 if branch_merge:
1202 1195 self.dirstate.update([f], 'n', st_mtime=-1)
1203 1196 else:
1204 1197 self.dirstate.update([f], 'n')
1205 1198
1206 1199 # merge the tricky bits
1207 1200 files = merge.keys()
1208 1201 files.sort()
1209 1202 for f in files:
1210 1203 self.ui.status("merging %s\n" % f)
1211 1204 my, other, flag = merge[f]
1212 1205 self.merge3(f, my, other)
1213 1206 util.set_exec(self.wjoin(f), flag)
1214 1207 if moddirstate:
1215 1208 if branch_merge:
1216 1209 # We've done a branch merge, mark this file as merged
1217 1210 # so that we properly record the merger later
1218 1211 self.dirstate.update([f], 'm')
1219 1212 else:
1220 1213 # We've update-merged a locally modified file, so
1221 1214 # we set the dirstate to emulate a normal checkout
1222 1215 # of that file some time in the past. Thus our
1223 1216 # merge will appear as a normal local file
1224 1217 # modification.
1225 1218 f_len = len(self.file(f).read(other))
1226 1219 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1227 1220
1228 1221 remove.sort()
1229 1222 for f in remove:
1230 1223 self.ui.note("removing %s\n" % f)
1231 1224 try:
1232 1225 os.unlink(self.wjoin(f))
1233 1226 except OSError, inst:
1234 1227 self.ui.warn("update failed to remove %s: %s!\n" % (f, inst))
1235 1228 # try removing directories that might now be empty
1236 1229 try: os.removedirs(os.path.dirname(self.wjoin(f)))
1237 1230 except: pass
1238 1231 if moddirstate:
1239 1232 if branch_merge:
1240 1233 self.dirstate.update(remove, 'r')
1241 1234 else:
1242 1235 self.dirstate.forget(remove)
1243 1236
1244 1237 def merge3(self, fn, my, other):
1245 1238 """perform a 3-way merge in the working directory"""
1246 1239
1247 1240 def temp(prefix, node):
1248 1241 pre = "%s~%s." % (os.path.basename(fn), prefix)
1249 1242 (fd, name) = tempfile.mkstemp("", pre)
1250 1243 f = os.fdopen(fd, "wb")
1251 1244 self.wwrite(fn, fl.read(node), f)
1252 1245 f.close()
1253 1246 return name
1254 1247
1255 1248 fl = self.file(fn)
1256 1249 base = fl.ancestor(my, other)
1257 1250 a = self.wjoin(fn)
1258 1251 b = temp("base", base)
1259 1252 c = temp("other", other)
1260 1253
1261 1254 self.ui.note("resolving %s\n" % fn)
1262 1255 self.ui.debug("file %s: other %s ancestor %s\n" %
1263 1256 (fn, short(other), short(base)))
1264 1257
1265 1258 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1266 1259 or "hgmerge")
1267 1260 r = os.system("%s %s %s %s" % (cmd, a, b, c))
1268 1261 if r:
1269 1262 self.ui.warn("merging %s failed!\n" % fn)
1270 1263
1271 1264 os.unlink(b)
1272 1265 os.unlink(c)
1273 1266
1274 1267 def verify(self):
1275 1268 filelinkrevs = {}
1276 1269 filenodes = {}
1277 1270 changesets = revisions = files = 0
1278 1271 errors = 0
1279 1272
1280 1273 seen = {}
1281 1274 self.ui.status("checking changesets\n")
1282 1275 for i in range(self.changelog.count()):
1283 1276 changesets += 1
1284 1277 n = self.changelog.node(i)
1285 1278 if n in seen:
1286 1279 self.ui.warn("duplicate changeset at revision %d\n" % i)
1287 1280 errors += 1
1288 1281 seen[n] = 1
1289 1282
1290 1283 for p in self.changelog.parents(n):
1291 1284 if p not in self.changelog.nodemap:
1292 1285 self.ui.warn("changeset %s has unknown parent %s\n" %
1293 1286 (short(n), short(p)))
1294 1287 errors += 1
1295 1288 try:
1296 1289 changes = self.changelog.read(n)
1297 1290 except Exception, inst:
1298 1291 self.ui.warn("unpacking changeset %s: %s\n" % (short(n), inst))
1299 1292 errors += 1
1300 1293
1301 1294 for f in changes[3]:
1302 1295 filelinkrevs.setdefault(f, []).append(i)
1303 1296
1304 1297 seen = {}
1305 1298 self.ui.status("checking manifests\n")
1306 1299 for i in range(self.manifest.count()):
1307 1300 n = self.manifest.node(i)
1308 1301 if n in seen:
1309 1302 self.ui.warn("duplicate manifest at revision %d\n" % i)
1310 1303 errors += 1
1311 1304 seen[n] = 1
1312 1305
1313 1306 for p in self.manifest.parents(n):
1314 1307 if p not in self.manifest.nodemap:
1315 1308 self.ui.warn("manifest %s has unknown parent %s\n" %
1316 1309 (short(n), short(p)))
1317 1310 errors += 1
1318 1311
1319 1312 try:
1320 1313 delta = mdiff.patchtext(self.manifest.delta(n))
1321 1314 except KeyboardInterrupt:
1322 1315 self.ui.warn("interrupted")
1323 1316 raise
1324 1317 except Exception, inst:
1325 1318 self.ui.warn("unpacking manifest %s: %s\n"
1326 1319 % (short(n), inst))
1327 1320 errors += 1
1328 1321
1329 1322 ff = [ l.split('\0') for l in delta.splitlines() ]
1330 1323 for f, fn in ff:
1331 1324 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1332 1325
1333 1326 self.ui.status("crosschecking files in changesets and manifests\n")
1334 1327 for f in filenodes:
1335 1328 if f not in filelinkrevs:
1336 1329 self.ui.warn("file %s in manifest but not in changesets\n" % f)
1337 1330 errors += 1
1338 1331
1339 1332 for f in filelinkrevs:
1340 1333 if f not in filenodes:
1341 1334 self.ui.warn("file %s in changeset but not in manifest\n" % f)
1342 1335 errors += 1
1343 1336
1344 1337 self.ui.status("checking files\n")
1345 1338 ff = filenodes.keys()
1346 1339 ff.sort()
1347 1340 for f in ff:
1348 1341 if f == "/dev/null": continue
1349 1342 files += 1
1350 1343 fl = self.file(f)
1351 1344 nodes = { nullid: 1 }
1352 1345 seen = {}
1353 1346 for i in range(fl.count()):
1354 1347 revisions += 1
1355 1348 n = fl.node(i)
1356 1349
1357 1350 if n in seen:
1358 1351 self.ui.warn("%s: duplicate revision %d\n" % (f, i))
1359 1352 errors += 1
1360 1353
1361 1354 if n not in filenodes[f]:
1362 1355 self.ui.warn("%s: %d:%s not in manifests\n"
1363 1356 % (f, i, short(n)))
1364 1357 errors += 1
1365 1358 else:
1366 1359 del filenodes[f][n]
1367 1360
1368 1361 flr = fl.linkrev(n)
1369 1362 if flr not in filelinkrevs[f]:
1370 1363 self.ui.warn("%s:%s points to unexpected changeset %d\n"
1371 1364 % (f, short(n), fl.linkrev(n)))
1372 1365 errors += 1
1373 1366 else:
1374 1367 filelinkrevs[f].remove(flr)
1375 1368
1376 1369 # verify contents
1377 1370 try:
1378 1371 t = fl.read(n)
1379 1372 except Exception, inst:
1380 1373 self.ui.warn("unpacking file %s %s: %s\n"
1381 1374 % (f, short(n), inst))
1382 1375 errors += 1
1383 1376
1384 1377 # verify parents
1385 1378 (p1, p2) = fl.parents(n)
1386 1379 if p1 not in nodes:
1387 1380 self.ui.warn("file %s:%s unknown parent 1 %s" %
1388 1381 (f, short(n), short(p1)))
1389 1382 errors += 1
1390 1383 if p2 not in nodes:
1391 1384 self.ui.warn("file %s:%s unknown parent 2 %s" %
1392 1385 (f, short(n), short(p1)))
1393 1386 errors += 1
1394 1387 nodes[n] = 1
1395 1388
1396 1389 # cross-check
1397 1390 for node in filenodes[f]:
1398 1391 self.ui.warn("node %s in manifests not in %s\n"
1399 1392 % (hex(node), f))
1400 1393 errors += 1
1401 1394
1402 1395 self.ui.status("%d files, %d changesets, %d total revisions\n" %
1403 1396 (files, changesets, revisions))
1404 1397
1405 1398 if errors:
1406 1399 self.ui.warn("%d integrity errors encountered!\n" % errors)
1407 1400 return 1
@@ -1,362 +1,358
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8
9 9 This contains helper routines that are independent of the SCM core and hide
10 10 platform-specific details from the core.
11 11 """
12 12
13 13 import os, errno
14 14 from demandload import *
15 15 demandload(globals(), "re")
16 16
17 17 def binary(s):
18 18 """return true if a string is binary data using diff's heuristic"""
19 19 if s and '\0' in s[:4096]:
20 20 return True
21 21 return False
22 22
23 23 def unique(g):
24 24 """return the uniq elements of iterable g"""
25 25 seen = {}
26 26 for f in g:
27 27 if f not in seen:
28 28 seen[f] = 1
29 29 yield f
30 30
31 31 class Abort(Exception):
32 32 """Raised if a command needs to print an error and exit."""
33 33
34 34 def always(fn): return True
35 35 def never(fn): return False
36 36
37 37 def globre(pat, head='^', tail='$'):
38 38 "convert a glob pattern into a regexp"
39 39 i, n = 0, len(pat)
40 40 res = ''
41 41 group = False
42 42 def peek(): return i < n and pat[i]
43 43 while i < n:
44 44 c = pat[i]
45 45 i = i+1
46 46 if c == '*':
47 47 if peek() == '*':
48 48 i += 1
49 49 res += '.*'
50 50 else:
51 51 res += '[^/]*'
52 52 elif c == '?':
53 53 res += '.'
54 54 elif c == '[':
55 55 j = i
56 56 if j < n and pat[j] in '!]':
57 57 j += 1
58 58 while j < n and pat[j] != ']':
59 59 j += 1
60 60 if j >= n:
61 61 res += '\\['
62 62 else:
63 63 stuff = pat[i:j].replace('\\','\\\\')
64 64 i = j + 1
65 65 if stuff[0] == '!':
66 66 stuff = '^' + stuff[1:]
67 67 elif stuff[0] == '^':
68 68 stuff = '\\' + stuff
69 69 res = '%s[%s]' % (res, stuff)
70 70 elif c == '{':
71 71 group = True
72 72 res += '(?:'
73 73 elif c == '}' and group:
74 74 res += ')'
75 75 group = False
76 76 elif c == ',' and group:
77 77 res += '|'
78 78 else:
79 79 res += re.escape(c)
80 80 return head + res + tail
81 81
82 82 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
83 83
84 84 def pathto(n1, n2):
85 85 '''return the relative path from one place to another.
86 86 this returns a path in the form used by the local filesystem, not hg.'''
87 87 if not n1: return localpath(n2)
88 88 a, b = n1.split('/'), n2.split('/')
89 89 a.reverse(), b.reverse()
90 90 while a and b and a[-1] == b[-1]:
91 91 a.pop(), b.pop()
92 92 b.reverse()
93 93 return os.sep.join((['..'] * len(a)) + b)
94 94
95 95 def canonpath(root, cwd, myname):
96 96 """return the canonical path of myname, given cwd and root"""
97 97 rootsep = root + os.sep
98 98 name = myname
99 99 if not name.startswith(os.sep):
100 100 name = os.path.join(root, cwd, name)
101 101 name = os.path.normpath(name)
102 102 if name.startswith(rootsep):
103 103 return pconvert(name[len(rootsep):])
104 104 elif name == root:
105 105 return ''
106 106 else:
107 107 raise Abort('%s not under root' % myname)
108 108
109 109 def matcher(canonroot, cwd, names, inc, exc, head=''):
110 110 """build a function to match a set of file patterns
111 111
112 112 arguments:
113 113 canonroot - the canonical root of the tree you're matching against
114 114 cwd - the current working directory, if relevant
115 115 names - patterns to find
116 116 inc - patterns to include
117 117 exc - patterns to exclude
118 118 head - a regex to prepend to patterns to control whether a match is rooted
119 119
120 120 a pattern is one of:
121 121 're:<regex>'
122 122 'glob:<shellglob>'
123 123 'path:<explicit path>'
124 124 'relpath:<relative path>'
125 125 '<relative path>'
126 126
127 127 returns:
128 128 a 3-tuple containing
129 129 - list of explicit non-pattern names passed in
130 130 - a bool match(filename) function
131 131 - a bool indicating if any patterns were passed in
132 132
133 133 todo:
134 134 make head regex a rooted bool
135 135 """
136 136
137 137 def patkind(name):
138 138 for prefix in 're:', 'glob:', 'path:', 'relpath:':
139 139 if name.startswith(prefix): return name.split(':', 1)
140 140 for c in name:
141 141 if c in _globchars: return 'glob', name
142 142 return 'relpath', name
143 143
144 144 def regex(kind, name, tail):
145 145 '''convert a pattern into a regular expression'''
146 146 if kind == 're':
147 147 return name
148 148 elif kind == 'path':
149 149 return '^' + re.escape(name) + '(?:/|$)'
150 150 elif kind == 'relpath':
151 151 return head + re.escape(name) + tail
152 152 return head + globre(name, '', tail)
153 153
154 154 def matchfn(pats, tail):
155 155 """build a matching function from a set of patterns"""
156 156 if pats:
157 157 pat = '(?:%s)' % '|'.join([regex(k, p, tail) for (k, p) in pats])
158 158 return re.compile(pat).match
159 159
160 160 def globprefix(pat):
161 161 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
162 162 root = []
163 163 for p in pat.split(os.sep):
164 164 if patkind(p)[0] == 'glob': break
165 165 root.append(p)
166 166 return '/'.join(root)
167 167
168 168 pats = []
169 169 files = []
170 170 roots = []
171 171 for kind, name in map(patkind, names):
172 172 if kind in ('glob', 'relpath'):
173 173 name = canonpath(canonroot, cwd, name)
174 174 if name == '':
175 175 kind, name = 'glob', '**'
176 176 if kind in ('glob', 'path', 're'):
177 177 pats.append((kind, name))
178 178 if kind == 'glob':
179 179 root = globprefix(name)
180 180 if root: roots.append(root)
181 181 elif kind == 'relpath':
182 182 files.append((kind, name))
183 183 roots.append(name)
184 184
185 185 patmatch = matchfn(pats, '$') or always
186 186 filematch = matchfn(files, '(?:/|$)') or always
187 187 incmatch = always
188 188 if inc:
189 189 incmatch = matchfn(map(patkind, inc), '(?:/|$)')
190 190 excmatch = lambda fn: False
191 191 if exc:
192 192 excmatch = matchfn(map(patkind, exc), '(?:/|$)')
193 193
194 194 return (roots,
195 195 lambda fn: (incmatch(fn) and not excmatch(fn) and
196 196 (fn.endswith('/') or
197 197 (not pats and not files) or
198 198 (pats and patmatch(fn)) or
199 199 (files and filematch(fn)))),
200 200 (inc or exc or (pats and pats != [('glob', '**')])) and True)
201 201
202 202 def system(cmd, errprefix=None):
203 203 """execute a shell command that must succeed"""
204 204 rc = os.system(cmd)
205 205 if rc:
206 206 errmsg = "%s %s" % (os.path.basename(cmd.split(None, 1)[0]),
207 207 explain_exit(rc)[0])
208 208 if errprefix:
209 209 errmsg = "%s: %s" % (errprefix, errmsg)
210 210 raise Abort(errmsg)
211 211
212 212 def rename(src, dst):
213 213 """forcibly rename a file"""
214 214 try:
215 215 os.rename(src, dst)
216 216 except:
217 217 os.unlink(dst)
218 218 os.rename(src, dst)
219 219
220 220 def copytree(src, dst, copyfile):
221 221 """Copy a directory tree, files are copied using 'copyfile'."""
222 222 names = os.listdir(src)
223 223 os.mkdir(dst)
224 224
225 225 for name in names:
226 226 srcname = os.path.join(src, name)
227 227 dstname = os.path.join(dst, name)
228 228 if os.path.isdir(srcname):
229 229 copytree(srcname, dstname, copyfile)
230 230 elif os.path.isfile(srcname):
231 231 copyfile(srcname, dstname)
232 232 else:
233 233 pass
234 234
235 235 def opener(base):
236 236 """
237 237 return a function that opens files relative to base
238 238
239 239 this function is used to hide the details of COW semantics and
240 240 remote file access from higher level code.
241 241
242 242 todo: separate remote file access into a separate function
243 243 """
244 244 p = base
245 245 def o(path, mode="r"):
246 if p.startswith("http://"):
247 f = os.path.join(p, urllib.quote(path))
248 return httprangereader.httprangereader(f)
249
250 246 f = os.path.join(p, path)
251 247
252 248 mode += "b" # for that other OS
253 249
254 250 if mode[0] != "r":
255 251 try:
256 252 s = os.stat(f)
257 253 except OSError:
258 254 d = os.path.dirname(f)
259 255 if not os.path.isdir(d):
260 256 os.makedirs(d)
261 257 else:
262 258 if s.st_nlink > 1:
263 259 file(f + ".tmp", "wb").write(file(f, "rb").read())
264 260 rename(f+".tmp", f)
265 261
266 262 return file(f, mode)
267 263
268 264 return o
269 265
270 266 def _makelock_file(info, pathname):
271 267 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
272 268 os.write(ld, info)
273 269 os.close(ld)
274 270
275 271 def _readlock_file(pathname):
276 272 return file(pathname).read()
277 273
278 274 # Platform specific variants
279 275 if os.name == 'nt':
280 276 nulldev = 'NUL:'
281 277
282 278 def is_exec(f, last):
283 279 return last
284 280
285 281 def set_exec(f, mode):
286 282 pass
287 283
288 284 def pconvert(path):
289 285 return path.replace("\\", "/")
290 286
291 287 def localpath(path):
292 288 return path.replace('/', '\\')
293 289
294 290 def normpath(path):
295 291 return pconvert(os.path.normpath(path))
296 292
297 293 makelock = _makelock_file
298 294 readlock = _readlock_file
299 295
300 296 def explain_exit(code):
301 297 return "exited with status %d" % code, code
302 298
303 299 else:
304 300 nulldev = '/dev/null'
305 301
306 302 def is_exec(f, last):
307 303 """check whether a file is executable"""
308 304 return (os.stat(f).st_mode & 0100 != 0)
309 305
310 306 def set_exec(f, mode):
311 307 s = os.stat(f).st_mode
312 308 if (s & 0100 != 0) == mode:
313 309 return
314 310 if mode:
315 311 # Turn on +x for every +r bit when making a file executable
316 312 # and obey umask.
317 313 umask = os.umask(0)
318 314 os.umask(umask)
319 315 os.chmod(f, s | (s & 0444) >> 2 & ~umask)
320 316 else:
321 317 os.chmod(f, s & 0666)
322 318
323 319 def pconvert(path):
324 320 return path
325 321
326 322 def localpath(path):
327 323 return path
328 324
329 325 normpath = os.path.normpath
330 326
331 327 def makelock(info, pathname):
332 328 try:
333 329 os.symlink(info, pathname)
334 330 except OSError, why:
335 331 if why.errno == errno.EEXIST:
336 332 raise
337 333 else:
338 334 _makelock_file(info, pathname)
339 335
340 336 def readlock(pathname):
341 337 try:
342 338 return os.readlink(pathname)
343 339 except OSError, why:
344 340 if why.errno == errno.EINVAL:
345 341 return _readlock_file(pathname)
346 342 else:
347 343 raise
348 344
349 345 def explain_exit(code):
350 346 """return a 2-tuple (desc, code) describing a process's status"""
351 347 if os.name == 'nt': # os.WIFxx is not supported on windows
352 348 return "aborted with error." , -1
353 349 if os.WIFEXITED(code):
354 350 val = os.WEXITSTATUS(code)
355 351 return "exited with status %d" % val, val
356 352 elif os.WIFSIGNALED(code):
357 353 val = os.WTERMSIG(code)
358 354 return "killed by signal %d" % val, val
359 355 elif os.WIFSTOPPED(code):
360 356 val = os.WSTOPSIG(code)
361 357 return "stopped by signal %d" % val, val
362 358 raise ValueError("invalid exit code")
General Comments 0
You need to be logged in to leave comments. Login now