##// END OF EJS Templates
Merge with crew-stable
Alexis S. L. Carvalho -
r4134:9dc64c84 merge default
parent child Browse files
Show More
@@ -1,2219 +1,2222 b''
1 1 # queue.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 34 import os, sys, re, errno
35 35
36 36 commands.norepo += " qclone qversion"
37 37
38 38 # Patch names looks like unix-file names.
39 39 # They must be joinable with queue directory and result in the patch path.
40 40 normname = util.normpath
41 41
42 42 class statusentry:
43 43 def __init__(self, rev, name=None):
44 44 if not name:
45 45 fields = rev.split(':', 1)
46 46 if len(fields) == 2:
47 47 self.rev, self.name = fields
48 48 else:
49 49 self.rev, self.name = None, None
50 50 else:
51 51 self.rev, self.name = rev, name
52 52
53 53 def __str__(self):
54 54 return self.rev + ':' + self.name
55 55
56 56 class queue:
57 57 def __init__(self, ui, path, patchdir=None):
58 58 self.basepath = path
59 59 self.path = patchdir or os.path.join(path, "patches")
60 60 self.opener = util.opener(self.path)
61 61 self.ui = ui
62 62 self.applied = []
63 63 self.full_series = []
64 64 self.applied_dirty = 0
65 65 self.series_dirty = 0
66 66 self.series_path = "series"
67 67 self.status_path = "status"
68 68 self.guards_path = "guards"
69 69 self.active_guards = None
70 70 self.guards_dirty = False
71 71 self._diffopts = None
72 72
73 73 if os.path.exists(self.join(self.series_path)):
74 74 self.full_series = self.opener(self.series_path).read().splitlines()
75 75 self.parse_series()
76 76
77 77 if os.path.exists(self.join(self.status_path)):
78 78 lines = self.opener(self.status_path).read().splitlines()
79 79 self.applied = [statusentry(l) for l in lines]
80 80
81 81 def diffopts(self):
82 82 if self._diffopts is None:
83 83 self._diffopts = patch.diffopts(self.ui)
84 84 return self._diffopts
85 85
86 86 def join(self, *p):
87 87 return os.path.join(self.path, *p)
88 88
89 89 def find_series(self, patch):
90 90 pre = re.compile("(\s*)([^#]+)")
91 91 index = 0
92 92 for l in self.full_series:
93 93 m = pre.match(l)
94 94 if m:
95 95 s = m.group(2)
96 96 s = s.rstrip()
97 97 if s == patch:
98 98 return index
99 99 index += 1
100 100 return None
101 101
102 102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 103
104 104 def parse_series(self):
105 105 self.series = []
106 106 self.series_guards = []
107 107 for l in self.full_series:
108 108 h = l.find('#')
109 109 if h == -1:
110 110 patch = l
111 111 comment = ''
112 112 elif h == 0:
113 113 continue
114 114 else:
115 115 patch = l[:h]
116 116 comment = l[h:]
117 117 patch = patch.strip()
118 118 if patch:
119 119 if patch in self.series:
120 120 raise util.Abort(_('%s appears more than once in %s') %
121 121 (patch, self.join(self.series_path)))
122 122 self.series.append(patch)
123 123 self.series_guards.append(self.guard_re.findall(comment))
124 124
125 125 def check_guard(self, guard):
126 126 bad_chars = '# \t\r\n\f'
127 127 first = guard[0]
128 128 for c in '-+':
129 129 if first == c:
130 130 return (_('guard %r starts with invalid character: %r') %
131 131 (guard, c))
132 132 for c in bad_chars:
133 133 if c in guard:
134 134 return _('invalid character in guard %r: %r') % (guard, c)
135 135
136 136 def set_active(self, guards):
137 137 for guard in guards:
138 138 bad = self.check_guard(guard)
139 139 if bad:
140 140 raise util.Abort(bad)
141 141 guards = dict.fromkeys(guards).keys()
142 142 guards.sort()
143 143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 144 self.active_guards = guards
145 145 self.guards_dirty = True
146 146
147 147 def active(self):
148 148 if self.active_guards is None:
149 149 self.active_guards = []
150 150 try:
151 151 guards = self.opener(self.guards_path).read().split()
152 152 except IOError, err:
153 153 if err.errno != errno.ENOENT: raise
154 154 guards = []
155 155 for i, guard in enumerate(guards):
156 156 bad = self.check_guard(guard)
157 157 if bad:
158 158 self.ui.warn('%s:%d: %s\n' %
159 159 (self.join(self.guards_path), i + 1, bad))
160 160 else:
161 161 self.active_guards.append(guard)
162 162 return self.active_guards
163 163
164 164 def set_guards(self, idx, guards):
165 165 for g in guards:
166 166 if len(g) < 2:
167 167 raise util.Abort(_('guard %r too short') % g)
168 168 if g[0] not in '-+':
169 169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 170 bad = self.check_guard(g[1:])
171 171 if bad:
172 172 raise util.Abort(bad)
173 173 drop = self.guard_re.sub('', self.full_series[idx])
174 174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 175 self.parse_series()
176 176 self.series_dirty = True
177 177
178 178 def pushable(self, idx):
179 179 if isinstance(idx, str):
180 180 idx = self.series.index(idx)
181 181 patchguards = self.series_guards[idx]
182 182 if not patchguards:
183 183 return True, None
184 184 default = False
185 185 guards = self.active()
186 186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 187 if exactneg:
188 188 return False, exactneg[0]
189 189 pos = [g for g in patchguards if g[0] == '+']
190 190 exactpos = [g for g in pos if g[1:] in guards]
191 191 if pos:
192 192 if exactpos:
193 193 return True, exactpos[0]
194 194 return False, pos
195 195 return True, ''
196 196
197 197 def explain_pushable(self, idx, all_patches=False):
198 198 write = all_patches and self.ui.write or self.ui.warn
199 199 if all_patches or self.ui.verbose:
200 200 if isinstance(idx, str):
201 201 idx = self.series.index(idx)
202 202 pushable, why = self.pushable(idx)
203 203 if all_patches and pushable:
204 204 if why is None:
205 205 write(_('allowing %s - no guards in effect\n') %
206 206 self.series[idx])
207 207 else:
208 208 if not why:
209 209 write(_('allowing %s - no matching negative guards\n') %
210 210 self.series[idx])
211 211 else:
212 212 write(_('allowing %s - guarded by %r\n') %
213 213 (self.series[idx], why))
214 214 if not pushable:
215 215 if why:
216 216 write(_('skipping %s - guarded by %r\n') %
217 217 (self.series[idx], why))
218 218 else:
219 219 write(_('skipping %s - no matching guards\n') %
220 220 self.series[idx])
221 221
222 222 def save_dirty(self):
223 223 def write_list(items, path):
224 224 fp = self.opener(path, 'w')
225 225 for i in items:
226 226 print >> fp, i
227 227 fp.close()
228 228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 231
232 232 def readheaders(self, patch):
233 233 def eatdiff(lines):
234 234 while lines:
235 235 l = lines[-1]
236 236 if (l.startswith("diff -") or
237 237 l.startswith("Index:") or
238 238 l.startswith("===========")):
239 239 del lines[-1]
240 240 else:
241 241 break
242 242 def eatempty(lines):
243 243 while lines:
244 244 l = lines[-1]
245 245 if re.match('\s*$', l):
246 246 del lines[-1]
247 247 else:
248 248 break
249 249
250 250 pf = self.join(patch)
251 251 message = []
252 252 comments = []
253 253 user = None
254 254 date = None
255 255 format = None
256 256 subject = None
257 257 diffstart = 0
258 258
259 259 for line in file(pf):
260 260 line = line.rstrip()
261 261 if line.startswith('diff --git'):
262 262 diffstart = 2
263 263 break
264 264 if diffstart:
265 265 if line.startswith('+++ '):
266 266 diffstart = 2
267 267 break
268 268 if line.startswith("--- "):
269 269 diffstart = 1
270 270 continue
271 271 elif format == "hgpatch":
272 272 # parse values when importing the result of an hg export
273 273 if line.startswith("# User "):
274 274 user = line[7:]
275 275 elif line.startswith("# Date "):
276 276 date = line[7:]
277 277 elif not line.startswith("# ") and line:
278 278 message.append(line)
279 279 format = None
280 280 elif line == '# HG changeset patch':
281 281 format = "hgpatch"
282 282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 283 line.startswith("subject: "))):
284 284 subject = line[9:]
285 285 format = "tag"
286 286 elif (format != "tagdone" and (line.startswith("From: ") or
287 287 line.startswith("from: "))):
288 288 user = line[6:]
289 289 format = "tag"
290 290 elif format == "tag" and line == "":
291 291 # when looking for tags (subject: from: etc) they
292 292 # end once you find a blank line in the source
293 293 format = "tagdone"
294 294 elif message or line:
295 295 message.append(line)
296 296 comments.append(line)
297 297
298 298 eatdiff(message)
299 299 eatdiff(comments)
300 300 eatempty(message)
301 301 eatempty(comments)
302 302
303 303 # make sure message isn't empty
304 304 if format and format.startswith("tag") and subject:
305 305 message.insert(0, "")
306 306 message.insert(0, subject)
307 307 return (message, comments, user, date, diffstart > 1)
308 308
309 309 def printdiff(self, repo, node1, node2=None, files=None,
310 310 fp=None, changes=None, opts={}):
311 311 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
312 312
313 313 patch.diff(repo, node1, node2, fns, match=matchfn,
314 314 fp=fp, changes=changes, opts=self.diffopts())
315 315
316 316 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
317 317 # first try just applying the patch
318 318 (err, n) = self.apply(repo, [ patch ], update_status=False,
319 319 strict=True, merge=rev, wlock=wlock)
320 320
321 321 if err == 0:
322 322 return (err, n)
323 323
324 324 if n is None:
325 325 raise util.Abort(_("apply failed for patch %s") % patch)
326 326
327 327 self.ui.warn("patch didn't work out, merging %s\n" % patch)
328 328
329 329 # apply failed, strip away that rev and merge.
330 330 hg.clean(repo, head, wlock=wlock)
331 331 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
332 332
333 333 ctx = repo.changectx(rev)
334 334 ret = hg.merge(repo, rev, wlock=wlock)
335 335 if ret:
336 336 raise util.Abort(_("update returned %d") % ret)
337 337 n = repo.commit(None, ctx.description(), ctx.user(),
338 338 force=1, wlock=wlock)
339 339 if n == None:
340 340 raise util.Abort(_("repo commit failed"))
341 341 try:
342 342 message, comments, user, date, patchfound = mergeq.readheaders(patch)
343 343 except:
344 344 raise util.Abort(_("unable to read %s") % patch)
345 345
346 346 patchf = self.opener(patch, "w")
347 347 if comments:
348 348 comments = "\n".join(comments) + '\n\n'
349 349 patchf.write(comments)
350 350 self.printdiff(repo, head, n, fp=patchf)
351 351 patchf.close()
352 352 return (0, n)
353 353
354 354 def qparents(self, repo, rev=None):
355 355 if rev is None:
356 356 (p1, p2) = repo.dirstate.parents()
357 357 if p2 == revlog.nullid:
358 358 return p1
359 359 if len(self.applied) == 0:
360 360 return None
361 361 return revlog.bin(self.applied[-1].rev)
362 362 pp = repo.changelog.parents(rev)
363 363 if pp[1] != revlog.nullid:
364 364 arevs = [ x.rev for x in self.applied ]
365 365 p0 = revlog.hex(pp[0])
366 366 p1 = revlog.hex(pp[1])
367 367 if p0 in arevs:
368 368 return pp[0]
369 369 if p1 in arevs:
370 370 return pp[1]
371 371 return pp[0]
372 372
373 373 def mergepatch(self, repo, mergeq, series, wlock):
374 374 if len(self.applied) == 0:
375 375 # each of the patches merged in will have two parents. This
376 376 # can confuse the qrefresh, qdiff, and strip code because it
377 377 # needs to know which parent is actually in the patch queue.
378 378 # so, we insert a merge marker with only one parent. This way
379 379 # the first patch in the queue is never a merge patch
380 380 #
381 381 pname = ".hg.patches.merge.marker"
382 382 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
383 383 wlock=wlock)
384 384 self.applied.append(statusentry(revlog.hex(n), pname))
385 385 self.applied_dirty = 1
386 386
387 387 head = self.qparents(repo)
388 388
389 389 for patch in series:
390 390 patch = mergeq.lookup(patch, strict=True)
391 391 if not patch:
392 392 self.ui.warn("patch %s does not exist\n" % patch)
393 393 return (1, None)
394 394 pushable, reason = self.pushable(patch)
395 395 if not pushable:
396 396 self.explain_pushable(patch, all_patches=True)
397 397 continue
398 398 info = mergeq.isapplied(patch)
399 399 if not info:
400 400 self.ui.warn("patch %s is not applied\n" % patch)
401 401 return (1, None)
402 402 rev = revlog.bin(info[1])
403 403 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
404 404 if head:
405 405 self.applied.append(statusentry(revlog.hex(head), patch))
406 406 self.applied_dirty = 1
407 407 if err:
408 408 return (err, head)
409 409 return (0, head)
410 410
411 411 def patch(self, repo, patchfile):
412 412 '''Apply patchfile to the working directory.
413 413 patchfile: file name of patch'''
414 414 files = {}
415 415 try:
416 416 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
417 417 files=files)
418 418 except Exception, inst:
419 419 self.ui.note(str(inst) + '\n')
420 420 if not self.ui.verbose:
421 421 self.ui.warn("patch failed, unable to continue (try -v)\n")
422 422 return (False, files, False)
423 423
424 424 return (True, files, fuzz)
425 425
426 426 def apply(self, repo, series, list=False, update_status=True,
427 427 strict=False, patchdir=None, merge=None, wlock=None):
428 428 # TODO unify with commands.py
429 429 if not patchdir:
430 430 patchdir = self.path
431 431 err = 0
432 432 if not wlock:
433 433 wlock = repo.wlock()
434 434 lock = repo.lock()
435 435 tr = repo.transaction()
436 436 n = None
437 437 for patchname in series:
438 438 pushable, reason = self.pushable(patchname)
439 439 if not pushable:
440 440 self.explain_pushable(patchname, all_patches=True)
441 441 continue
442 442 self.ui.warn("applying %s\n" % patchname)
443 443 pf = os.path.join(patchdir, patchname)
444 444
445 445 try:
446 446 message, comments, user, date, patchfound = self.readheaders(patchname)
447 447 except:
448 448 self.ui.warn("Unable to read %s\n" % patchname)
449 449 err = 1
450 450 break
451 451
452 452 if not message:
453 453 message = "imported patch %s\n" % patchname
454 454 else:
455 455 if list:
456 456 message.append("\nimported patch %s" % patchname)
457 457 message = '\n'.join(message)
458 458
459 459 (patcherr, files, fuzz) = self.patch(repo, pf)
460 460 patcherr = not patcherr
461 461
462 462 if merge and files:
463 463 # Mark as merged and update dirstate parent info
464 464 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
465 465 p1, p2 = repo.dirstate.parents()
466 466 repo.dirstate.setparents(p1, merge)
467 467 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
468 468 n = repo.commit(files, message, user, date, force=1, lock=lock,
469 469 wlock=wlock)
470 470
471 471 if n == None:
472 472 raise util.Abort(_("repo commit failed"))
473 473
474 474 if update_status:
475 475 self.applied.append(statusentry(revlog.hex(n), patchname))
476 476
477 477 if patcherr:
478 478 if not patchfound:
479 479 self.ui.warn("patch %s is empty\n" % patchname)
480 480 err = 0
481 481 else:
482 482 self.ui.warn("patch failed, rejects left in working dir\n")
483 483 err = 1
484 484 break
485 485
486 486 if fuzz and strict:
487 487 self.ui.warn("fuzz found when applying patch, stopping\n")
488 488 err = 1
489 489 break
490 490 tr.close()
491 491 return (err, n)
492 492
493 493 def delete(self, repo, patches, opts):
494 494 realpatches = []
495 495 for patch in patches:
496 496 patch = self.lookup(patch, strict=True)
497 497 info = self.isapplied(patch)
498 498 if info:
499 499 raise util.Abort(_("cannot delete applied patch %s") % patch)
500 500 if patch not in self.series:
501 501 raise util.Abort(_("patch %s not in series file") % patch)
502 502 realpatches.append(patch)
503 503
504 504 appliedbase = 0
505 505 if opts.get('rev'):
506 506 if not self.applied:
507 507 raise util.Abort(_('no patches applied'))
508 508 revs = cmdutil.revrange(repo, opts['rev'])
509 509 if len(revs) > 1 and revs[0] > revs[1]:
510 510 revs.reverse()
511 511 for rev in revs:
512 512 if appliedbase >= len(self.applied):
513 513 raise util.Abort(_("revision %d is not managed") % rev)
514 514
515 515 base = revlog.bin(self.applied[appliedbase].rev)
516 516 node = repo.changelog.node(rev)
517 517 if node != base:
518 518 raise util.Abort(_("cannot delete revision %d above "
519 519 "applied patches") % rev)
520 520 realpatches.append(self.applied[appliedbase].name)
521 521 appliedbase += 1
522 522
523 523 if not opts.get('keep'):
524 524 r = self.qrepo()
525 525 if r:
526 526 r.remove(realpatches, True)
527 527 else:
528 528 for p in realpatches:
529 529 os.unlink(self.join(p))
530 530
531 531 if appliedbase:
532 532 del self.applied[:appliedbase]
533 533 self.applied_dirty = 1
534 534 indices = [self.find_series(p) for p in realpatches]
535 535 indices.sort()
536 536 for i in indices[-1::-1]:
537 537 del self.full_series[i]
538 538 self.parse_series()
539 539 self.series_dirty = 1
540 540
541 541 def check_toppatch(self, repo):
542 542 if len(self.applied) > 0:
543 543 top = revlog.bin(self.applied[-1].rev)
544 544 pp = repo.dirstate.parents()
545 545 if top not in pp:
546 546 raise util.Abort(_("queue top not at same revision as working directory"))
547 547 return top
548 548 return None
549 549 def check_localchanges(self, repo, force=False, refresh=True):
550 550 m, a, r, d = repo.status()[:4]
551 551 if m or a or r or d:
552 552 if not force:
553 553 if refresh:
554 554 raise util.Abort(_("local changes found, refresh first"))
555 555 else:
556 556 raise util.Abort(_("local changes found"))
557 557 return m, a, r, d
558 558 def new(self, repo, patch, msg=None, force=None):
559 559 if os.path.exists(self.join(patch)):
560 560 raise util.Abort(_('patch "%s" already exists') % patch)
561 561 m, a, r, d = self.check_localchanges(repo, force)
562 562 commitfiles = m + a + r
563 563 self.check_toppatch(repo)
564 564 wlock = repo.wlock()
565 565 insert = self.full_series_end()
566 566 if msg:
567 567 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
568 568 wlock=wlock)
569 569 else:
570 570 n = repo.commit(commitfiles,
571 571 "New patch: %s" % patch, force=True, wlock=wlock)
572 572 if n == None:
573 573 raise util.Abort(_("repo commit failed"))
574 574 self.full_series[insert:insert] = [patch]
575 575 self.applied.append(statusentry(revlog.hex(n), patch))
576 576 self.parse_series()
577 577 self.series_dirty = 1
578 578 self.applied_dirty = 1
579 579 p = self.opener(patch, "w")
580 580 if msg:
581 581 msg = msg + "\n"
582 582 p.write(msg)
583 583 p.close()
584 584 wlock = None
585 585 r = self.qrepo()
586 586 if r: r.add([patch])
587 587 if commitfiles:
588 588 self.refresh(repo, short=True)
589 589
590 590 def strip(self, repo, rev, update=True, backup="all", wlock=None):
591 591 def limitheads(chlog, stop):
592 592 """return the list of all nodes that have no children"""
593 593 p = {}
594 594 h = []
595 595 stoprev = 0
596 596 if stop in chlog.nodemap:
597 597 stoprev = chlog.rev(stop)
598 598
599 599 for r in xrange(chlog.count() - 1, -1, -1):
600 600 n = chlog.node(r)
601 601 if n not in p:
602 602 h.append(n)
603 603 if n == stop:
604 604 break
605 605 if r < stoprev:
606 606 break
607 607 for pn in chlog.parents(n):
608 608 p[pn] = 1
609 609 return h
610 610
611 611 def bundle(cg):
612 612 backupdir = repo.join("strip-backup")
613 613 if not os.path.isdir(backupdir):
614 614 os.mkdir(backupdir)
615 615 name = os.path.join(backupdir, "%s" % revlog.short(rev))
616 616 name = savename(name)
617 617 self.ui.warn("saving bundle to %s\n" % name)
618 618 return changegroup.writebundle(cg, name, "HG10BZ")
619 619
620 620 def stripall(revnum):
621 621 mm = repo.changectx(rev).manifest()
622 622 seen = {}
623 623
624 624 for x in xrange(revnum, repo.changelog.count()):
625 625 for f in repo.changectx(x).files():
626 626 if f in seen:
627 627 continue
628 628 seen[f] = 1
629 629 if f in mm:
630 630 filerev = mm[f]
631 631 else:
632 632 filerev = 0
633 633 seen[f] = filerev
634 634 # we go in two steps here so the strip loop happens in a
635 635 # sensible order. When stripping many files, this helps keep
636 636 # our disk access patterns under control.
637 637 seen_list = seen.keys()
638 638 seen_list.sort()
639 639 for f in seen_list:
640 640 ff = repo.file(f)
641 641 filerev = seen[f]
642 642 if filerev != 0:
643 643 if filerev in ff.nodemap:
644 644 filerev = ff.rev(filerev)
645 645 else:
646 646 filerev = 0
647 647 ff.strip(filerev, revnum)
648 648
649 649 if not wlock:
650 650 wlock = repo.wlock()
651 651 lock = repo.lock()
652 652 chlog = repo.changelog
653 653 # TODO delete the undo files, and handle undo of merge sets
654 654 pp = chlog.parents(rev)
655 655 revnum = chlog.rev(rev)
656 656
657 657 if update:
658 658 self.check_localchanges(repo, refresh=False)
659 659 urev = self.qparents(repo, rev)
660 660 hg.clean(repo, urev, wlock=wlock)
661 661 repo.dirstate.write()
662 662
663 663 # save is a list of all the branches we are truncating away
664 664 # that we actually want to keep. changegroup will be used
665 665 # to preserve them and add them back after the truncate
666 666 saveheads = []
667 667 savebases = {}
668 668
669 669 heads = limitheads(chlog, rev)
670 670 seen = {}
671 671
672 672 # search through all the heads, finding those where the revision
673 673 # we want to strip away is an ancestor. Also look for merges
674 674 # that might be turned into new heads by the strip.
675 675 while heads:
676 676 h = heads.pop()
677 677 n = h
678 678 while True:
679 679 seen[n] = 1
680 680 pp = chlog.parents(n)
681 681 if pp[1] != revlog.nullid:
682 682 for p in pp:
683 683 if chlog.rev(p) > revnum and p not in seen:
684 684 heads.append(p)
685 685 if pp[0] == revlog.nullid:
686 686 break
687 687 if chlog.rev(pp[0]) < revnum:
688 688 break
689 689 n = pp[0]
690 690 if n == rev:
691 691 break
692 692 r = chlog.reachable(h, rev)
693 693 if rev not in r:
694 694 saveheads.append(h)
695 695 for x in r:
696 696 if chlog.rev(x) > revnum:
697 697 savebases[x] = 1
698 698
699 699 # create a changegroup for all the branches we need to keep
700 700 if backup == "all":
701 701 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
702 702 bundle(backupch)
703 703 if saveheads:
704 704 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
705 705 chgrpfile = bundle(backupch)
706 706
707 707 stripall(revnum)
708 708
709 709 change = chlog.read(rev)
710 710 chlog.strip(revnum, revnum)
711 711 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
712 712 if saveheads:
713 713 self.ui.status("adding branch\n")
714 714 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
715 715 update=False)
716 716 if backup != "strip":
717 717 os.unlink(chgrpfile)
718 718
719 719 def isapplied(self, patch):
720 720 """returns (index, rev, patch)"""
721 721 for i in xrange(len(self.applied)):
722 722 a = self.applied[i]
723 723 if a.name == patch:
724 724 return (i, a.rev, a.name)
725 725 return None
726 726
727 727 # if the exact patch name does not exist, we try a few
728 728 # variations. If strict is passed, we try only #1
729 729 #
730 730 # 1) a number to indicate an offset in the series file
731 731 # 2) a unique substring of the patch name was given
732 732 # 3) patchname[-+]num to indicate an offset in the series file
733 733 def lookup(self, patch, strict=False):
734 734 patch = patch and str(patch)
735 735
736 736 def partial_name(s):
737 737 if s in self.series:
738 738 return s
739 739 matches = [x for x in self.series if s in x]
740 740 if len(matches) > 1:
741 741 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
742 742 for m in matches:
743 743 self.ui.warn(' %s\n' % m)
744 744 return None
745 745 if matches:
746 746 return matches[0]
747 747 if len(self.series) > 0 and len(self.applied) > 0:
748 748 if s == 'qtip':
749 749 return self.series[self.series_end(True)-1]
750 750 if s == 'qbase':
751 751 return self.series[0]
752 752 return None
753 753 if patch == None:
754 754 return None
755 755
756 756 # we don't want to return a partial match until we make
757 757 # sure the file name passed in does not exist (checked below)
758 758 res = partial_name(patch)
759 759 if res and res == patch:
760 760 return res
761 761
762 762 if not os.path.isfile(self.join(patch)):
763 763 try:
764 764 sno = int(patch)
765 765 except(ValueError, OverflowError):
766 766 pass
767 767 else:
768 768 if sno < len(self.series):
769 769 return self.series[sno]
770 770 if not strict:
771 771 # return any partial match made above
772 772 if res:
773 773 return res
774 774 minus = patch.rfind('-')
775 775 if minus >= 0:
776 776 res = partial_name(patch[:minus])
777 777 if res:
778 778 i = self.series.index(res)
779 779 try:
780 780 off = int(patch[minus+1:] or 1)
781 781 except(ValueError, OverflowError):
782 782 pass
783 783 else:
784 784 if i - off >= 0:
785 785 return self.series[i - off]
786 786 plus = patch.rfind('+')
787 787 if plus >= 0:
788 788 res = partial_name(patch[:plus])
789 789 if res:
790 790 i = self.series.index(res)
791 791 try:
792 792 off = int(patch[plus+1:] or 1)
793 793 except(ValueError, OverflowError):
794 794 pass
795 795 else:
796 796 if i + off < len(self.series):
797 797 return self.series[i + off]
798 798 raise util.Abort(_("patch %s not in series") % patch)
799 799
800 800 def push(self, repo, patch=None, force=False, list=False,
801 801 mergeq=None, wlock=None):
802 802 if not wlock:
803 803 wlock = repo.wlock()
804 804 patch = self.lookup(patch)
805 805 # Suppose our series file is: A B C and the current 'top' patch is B.
806 806 # qpush C should be performed (moving forward)
807 807 # qpush B is a NOP (no change)
808 808 # qpush A is an error (can't go backwards with qpush)
809 809 if patch:
810 810 info = self.isapplied(patch)
811 811 if info:
812 812 if info[0] < len(self.applied) - 1:
813 813 raise util.Abort(_("cannot push to a previous patch: %s") %
814 814 patch)
815 815 if info[0] < len(self.series) - 1:
816 816 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
817 817 else:
818 818 self.ui.warn(_('all patches are currently applied\n'))
819 819 return
820 820
821 821 # Following the above example, starting at 'top' of B:
822 822 # qpush should be performed (pushes C), but a subsequent qpush without
823 823 # an argument is an error (nothing to apply). This allows a loop
824 824 # of "...while hg qpush..." to work as it detects an error when done
825 825 if self.series_end() == len(self.series):
826 826 self.ui.warn(_('patch series already fully applied\n'))
827 827 return 1
828 828 if not force:
829 829 self.check_localchanges(repo)
830 830
831 831 self.applied_dirty = 1;
832 832 start = self.series_end()
833 833 if start > 0:
834 834 self.check_toppatch(repo)
835 835 if not patch:
836 836 patch = self.series[start]
837 837 end = start + 1
838 838 else:
839 839 end = self.series.index(patch, start) + 1
840 840 s = self.series[start:end]
841 841 if mergeq:
842 842 ret = self.mergepatch(repo, mergeq, s, wlock)
843 843 else:
844 844 ret = self.apply(repo, s, list, wlock=wlock)
845 845 top = self.applied[-1].name
846 846 if ret[0]:
847 847 self.ui.write("Errors during apply, please fix and refresh %s\n" %
848 848 top)
849 849 else:
850 850 self.ui.write("Now at: %s\n" % top)
851 851 return ret[0]
852 852
853 853 def pop(self, repo, patch=None, force=False, update=True, all=False,
854 854 wlock=None):
855 855 def getfile(f, rev):
856 856 t = repo.file(f).read(rev)
857 857 repo.wfile(f, "w").write(t)
858 858
859 859 if not wlock:
860 860 wlock = repo.wlock()
861 861 if patch:
862 862 # index, rev, patch
863 863 info = self.isapplied(patch)
864 864 if not info:
865 865 patch = self.lookup(patch)
866 866 info = self.isapplied(patch)
867 867 if not info:
868 868 raise util.Abort(_("patch %s is not applied") % patch)
869 869
870 870 if len(self.applied) == 0:
871 871 # Allow qpop -a to work repeatedly,
872 872 # but not qpop without an argument
873 873 self.ui.warn(_("no patches applied\n"))
874 874 return not all
875 875
876 876 if not update:
877 877 parents = repo.dirstate.parents()
878 878 rr = [ revlog.bin(x.rev) for x in self.applied ]
879 879 for p in parents:
880 880 if p in rr:
881 881 self.ui.warn("qpop: forcing dirstate update\n")
882 882 update = True
883 883
884 884 if not force and update:
885 885 self.check_localchanges(repo)
886 886
887 887 self.applied_dirty = 1;
888 888 end = len(self.applied)
889 889 if not patch:
890 890 if all:
891 891 popi = 0
892 892 else:
893 893 popi = len(self.applied) - 1
894 894 else:
895 895 popi = info[0] + 1
896 896 if popi >= end:
897 897 self.ui.warn("qpop: %s is already at the top\n" % patch)
898 898 return
899 899 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
900 900
901 901 start = info[0]
902 902 rev = revlog.bin(info[1])
903 903
904 904 # we know there are no local changes, so we can make a simplified
905 905 # form of hg.update.
906 906 if update:
907 907 top = self.check_toppatch(repo)
908 908 qp = self.qparents(repo, rev)
909 909 changes = repo.changelog.read(qp)
910 910 mmap = repo.manifest.read(changes[0])
911 911 m, a, r, d, u = repo.status(qp, top)[:5]
912 912 if d:
913 913 raise util.Abort("deletions found between repo revs")
914 914 for f in m:
915 915 getfile(f, mmap[f])
916 916 for f in r:
917 917 getfile(f, mmap[f])
918 918 util.set_exec(repo.wjoin(f), mmap.execf(f))
919 919 repo.dirstate.update(m + r, 'n')
920 920 for f in a:
921 921 try:
922 922 os.unlink(repo.wjoin(f))
923 923 except OSError, e:
924 924 if e.errno != errno.ENOENT:
925 925 raise
926 926 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
927 927 except: pass
928 928 if a:
929 929 repo.dirstate.forget(a)
930 930 repo.dirstate.setparents(qp, revlog.nullid)
931 931 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
932 932 del self.applied[start:end]
933 933 if len(self.applied):
934 934 self.ui.write("Now at: %s\n" % self.applied[-1].name)
935 935 else:
936 936 self.ui.write("Patch queue now empty\n")
937 937
938 938 def diff(self, repo, pats, opts):
939 939 top = self.check_toppatch(repo)
940 940 if not top:
941 941 self.ui.write("No patches applied\n")
942 942 return
943 943 qp = self.qparents(repo, top)
944 944 if opts.get('git'):
945 945 self.diffopts().git = True
946 946 self.printdiff(repo, qp, files=pats, opts=opts)
947 947
948 948 def refresh(self, repo, pats=None, **opts):
949 949 if len(self.applied) == 0:
950 950 self.ui.write("No patches applied\n")
951 951 return 1
952 952 wlock = repo.wlock()
953 953 self.check_toppatch(repo)
954 954 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
955 955 top = revlog.bin(top)
956 956 cparents = repo.changelog.parents(top)
957 957 patchparent = self.qparents(repo, top)
958 958 message, comments, user, date, patchfound = self.readheaders(patchfn)
959 959
960 960 patchf = self.opener(patchfn, "w")
961 961 msg = opts.get('msg', '').rstrip()
962 962 if msg:
963 963 if comments:
964 964 # Remove existing message.
965 965 ci = 0
966 966 for mi in xrange(len(message)):
967 967 while message[mi] != comments[ci]:
968 968 ci += 1
969 969 del comments[ci]
970 970 comments.append(msg)
971 971 if comments:
972 972 comments = "\n".join(comments) + '\n\n'
973 973 patchf.write(comments)
974 974
975 975 if opts.get('git'):
976 976 self.diffopts().git = True
977 977 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
978 978 tip = repo.changelog.tip()
979 979 if top == tip:
980 980 # if the top of our patch queue is also the tip, there is an
981 981 # optimization here. We update the dirstate in place and strip
982 982 # off the tip commit. Then just commit the current directory
983 983 # tree. We can also send repo.commit the list of files
984 984 # changed to speed up the diff
985 985 #
986 986 # in short mode, we only diff the files included in the
987 987 # patch already
988 988 #
989 989 # this should really read:
990 990 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
991 991 # but we do it backwards to take advantage of manifest/chlog
992 992 # caching against the next repo.status call
993 993 #
994 994 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
995 995 changes = repo.changelog.read(tip)
996 996 man = repo.manifest.read(changes[0])
997 997 aaa = aa[:]
998 998 if opts.get('short'):
999 999 filelist = mm + aa + dd
1000 1000 else:
1001 1001 filelist = None
1002 1002 m, a, r, d, u = repo.status(files=filelist)[:5]
1003 1003
1004 1004 # we might end up with files that were added between tip and
1005 1005 # the dirstate parent, but then changed in the local dirstate.
1006 1006 # in this case, we want them to only show up in the added section
1007 1007 for x in m:
1008 1008 if x not in aa:
1009 1009 mm.append(x)
1010 1010 # we might end up with files added by the local dirstate that
1011 1011 # were deleted by the patch. In this case, they should only
1012 1012 # show up in the changed section.
1013 1013 for x in a:
1014 1014 if x in dd:
1015 1015 del dd[dd.index(x)]
1016 1016 mm.append(x)
1017 1017 else:
1018 1018 aa.append(x)
1019 1019 # make sure any files deleted in the local dirstate
1020 1020 # are not in the add or change column of the patch
1021 1021 forget = []
1022 1022 for x in d + r:
1023 1023 if x in aa:
1024 1024 del aa[aa.index(x)]
1025 1025 forget.append(x)
1026 1026 continue
1027 1027 elif x in mm:
1028 1028 del mm[mm.index(x)]
1029 1029 dd.append(x)
1030 1030
1031 1031 m = util.unique(mm)
1032 1032 r = util.unique(dd)
1033 1033 a = util.unique(aa)
1034 1034 filelist = filter(matchfn, util.unique(m + r + a))
1035 1035 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1036 1036 fp=patchf, changes=(m, a, r, [], u),
1037 1037 opts=self.diffopts())
1038 1038 patchf.close()
1039 1039
1040 1040 repo.dirstate.setparents(*cparents)
1041 1041 copies = {}
1042 1042 for dst in a:
1043 1043 src = repo.dirstate.copied(dst)
1044 1044 if src is None:
1045 1045 continue
1046 1046 copies.setdefault(src, []).append(dst)
1047 1047 repo.dirstate.update(a, 'a')
1048 1048 # remember the copies between patchparent and tip
1049 1049 # this may be slow, so don't do it if we're not tracking copies
1050 1050 if self.diffopts().git:
1051 1051 for dst in aaa:
1052 1052 f = repo.file(dst)
1053 1053 src = f.renamed(man[dst])
1054 1054 if src:
1055 1055 copies[src[0]] = copies.get(dst, [])
1056 1056 if dst in a:
1057 1057 copies[src[0]].append(dst)
1058 1058 # we can't copy a file created by the patch itself
1059 1059 if dst in copies:
1060 1060 del copies[dst]
1061 1061 for src, dsts in copies.iteritems():
1062 1062 for dst in dsts:
1063 1063 repo.dirstate.copy(src, dst)
1064 1064 repo.dirstate.update(r, 'r')
1065 1065 # if the patch excludes a modified file, mark that file with mtime=0
1066 1066 # so status can see it.
1067 1067 mm = []
1068 1068 for i in xrange(len(m)-1, -1, -1):
1069 1069 if not matchfn(m[i]):
1070 1070 mm.append(m[i])
1071 1071 del m[i]
1072 1072 repo.dirstate.update(m, 'n')
1073 1073 repo.dirstate.update(mm, 'n', st_mtime=0)
1074 1074 repo.dirstate.forget(forget)
1075 1075
1076 1076 if not msg:
1077 1077 if not message:
1078 1078 message = "patch queue: %s\n" % patchfn
1079 1079 else:
1080 1080 message = "\n".join(message)
1081 1081 else:
1082 1082 message = msg
1083 1083
1084 1084 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1085 1085 n = repo.commit(filelist, message, changes[1], force=1, wlock=wlock)
1086 1086 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1087 1087 self.applied_dirty = 1
1088 1088 else:
1089 1089 self.printdiff(repo, patchparent, fp=patchf)
1090 1090 patchf.close()
1091 1091 added = repo.status()[1]
1092 1092 for a in added:
1093 1093 f = repo.wjoin(a)
1094 1094 try:
1095 1095 os.unlink(f)
1096 1096 except OSError, e:
1097 1097 if e.errno != errno.ENOENT:
1098 1098 raise
1099 1099 try: os.removedirs(os.path.dirname(f))
1100 1100 except: pass
1101 1101 # forget the file copies in the dirstate
1102 1102 # push should readd the files later on
1103 1103 repo.dirstate.forget(added)
1104 1104 self.pop(repo, force=True, wlock=wlock)
1105 1105 self.push(repo, force=True, wlock=wlock)
1106 1106
1107 1107 def init(self, repo, create=False):
1108 1108 if not create and os.path.isdir(self.path):
1109 1109 raise util.Abort(_("patch queue directory already exists"))
1110 1110 try:
1111 1111 os.mkdir(self.path)
1112 1112 except OSError, inst:
1113 1113 if inst.errno != errno.EEXIST or not create:
1114 1114 raise
1115 1115 if create:
1116 1116 return self.qrepo(create=True)
1117 1117
1118 1118 def unapplied(self, repo, patch=None):
1119 1119 if patch and patch not in self.series:
1120 1120 raise util.Abort(_("patch %s is not in series file") % patch)
1121 1121 if not patch:
1122 1122 start = self.series_end()
1123 1123 else:
1124 1124 start = self.series.index(patch) + 1
1125 1125 unapplied = []
1126 1126 for i in xrange(start, len(self.series)):
1127 1127 pushable, reason = self.pushable(i)
1128 1128 if pushable:
1129 1129 unapplied.append((i, self.series[i]))
1130 1130 self.explain_pushable(i)
1131 1131 return unapplied
1132 1132
1133 1133 def qseries(self, repo, missing=None, start=0, length=0, status=None,
1134 1134 summary=False):
1135 1135 def displayname(patchname):
1136 1136 if summary:
1137 1137 msg = self.readheaders(patchname)[0]
1138 1138 msg = msg and ': ' + msg[0] or ': '
1139 1139 else:
1140 1140 msg = ''
1141 1141 return '%s%s' % (patchname, msg)
1142 1142
1143 1143 def pname(i):
1144 1144 if status == 'A':
1145 1145 return self.applied[i].name
1146 1146 else:
1147 1147 return self.series[i]
1148 1148
1149 1149 applied = dict.fromkeys([p.name for p in self.applied])
1150 1150 if not length:
1151 1151 length = len(self.series) - start
1152 1152 if not missing:
1153 1153 for i in xrange(start, start+length):
1154 1154 pfx = ''
1155 1155 patch = pname(i)
1156 1156 if self.ui.verbose:
1157 1157 if patch in applied:
1158 1158 stat = 'A'
1159 1159 elif self.pushable(i)[0]:
1160 1160 stat = 'U'
1161 1161 else:
1162 1162 stat = 'G'
1163 1163 pfx = '%d %s ' % (i, stat)
1164 1164 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1165 1165 else:
1166 1166 msng_list = []
1167 1167 for root, dirs, files in os.walk(self.path):
1168 1168 d = root[len(self.path) + 1:]
1169 1169 for f in files:
1170 1170 fl = os.path.join(d, f)
1171 1171 if (fl not in self.series and
1172 1172 fl not in (self.status_path, self.series_path)
1173 1173 and not fl.startswith('.')):
1174 1174 msng_list.append(fl)
1175 1175 msng_list.sort()
1176 1176 for x in msng_list:
1177 1177 pfx = self.ui.verbose and ('D ') or ''
1178 1178 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1179 1179
1180 1180 def issaveline(self, l):
1181 1181 if l.name == '.hg.patches.save.line':
1182 1182 return True
1183 1183
1184 1184 def qrepo(self, create=False):
1185 1185 if create or os.path.isdir(self.join(".hg")):
1186 1186 return hg.repository(self.ui, path=self.path, create=create)
1187 1187
1188 1188 def restore(self, repo, rev, delete=None, qupdate=None):
1189 1189 c = repo.changelog.read(rev)
1190 1190 desc = c[4].strip()
1191 1191 lines = desc.splitlines()
1192 1192 i = 0
1193 1193 datastart = None
1194 1194 series = []
1195 1195 applied = []
1196 1196 qpp = None
1197 1197 for i in xrange(0, len(lines)):
1198 1198 if lines[i] == 'Patch Data:':
1199 1199 datastart = i + 1
1200 1200 elif lines[i].startswith('Dirstate:'):
1201 1201 l = lines[i].rstrip()
1202 1202 l = l[10:].split(' ')
1203 1203 qpp = [ hg.bin(x) for x in l ]
1204 1204 elif datastart != None:
1205 1205 l = lines[i].rstrip()
1206 1206 se = statusentry(l)
1207 1207 file_ = se.name
1208 1208 if se.rev:
1209 1209 applied.append(se)
1210 1210 else:
1211 1211 series.append(file_)
1212 1212 if datastart == None:
1213 1213 self.ui.warn("No saved patch data found\n")
1214 1214 return 1
1215 1215 self.ui.warn("restoring status: %s\n" % lines[0])
1216 1216 self.full_series = series
1217 1217 self.applied = applied
1218 1218 self.parse_series()
1219 1219 self.series_dirty = 1
1220 1220 self.applied_dirty = 1
1221 1221 heads = repo.changelog.heads()
1222 1222 if delete:
1223 1223 if rev not in heads:
1224 1224 self.ui.warn("save entry has children, leaving it alone\n")
1225 1225 else:
1226 1226 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1227 1227 pp = repo.dirstate.parents()
1228 1228 if rev in pp:
1229 1229 update = True
1230 1230 else:
1231 1231 update = False
1232 1232 self.strip(repo, rev, update=update, backup='strip')
1233 1233 if qpp:
1234 1234 self.ui.warn("saved queue repository parents: %s %s\n" %
1235 1235 (hg.short(qpp[0]), hg.short(qpp[1])))
1236 1236 if qupdate:
1237 1237 print "queue directory updating"
1238 1238 r = self.qrepo()
1239 1239 if not r:
1240 1240 self.ui.warn("Unable to load queue repository\n")
1241 1241 return 1
1242 1242 hg.clean(r, qpp[0])
1243 1243
1244 1244 def save(self, repo, msg=None):
1245 1245 if len(self.applied) == 0:
1246 1246 self.ui.warn("save: no patches applied, exiting\n")
1247 1247 return 1
1248 1248 if self.issaveline(self.applied[-1]):
1249 1249 self.ui.warn("status is already saved\n")
1250 1250 return 1
1251 1251
1252 1252 ar = [ ':' + x for x in self.full_series ]
1253 1253 if not msg:
1254 1254 msg = "hg patches saved state"
1255 1255 else:
1256 1256 msg = "hg patches: " + msg.rstrip('\r\n')
1257 1257 r = self.qrepo()
1258 1258 if r:
1259 1259 pp = r.dirstate.parents()
1260 1260 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1261 1261 msg += "\n\nPatch Data:\n"
1262 1262 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1263 1263 "\n".join(ar) + '\n' or "")
1264 1264 n = repo.commit(None, text, user=None, force=1)
1265 1265 if not n:
1266 1266 self.ui.warn("repo commit failed\n")
1267 1267 return 1
1268 1268 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1269 1269 self.applied_dirty = 1
1270 1270
1271 1271 def full_series_end(self):
1272 1272 if len(self.applied) > 0:
1273 1273 p = self.applied[-1].name
1274 1274 end = self.find_series(p)
1275 1275 if end == None:
1276 1276 return len(self.full_series)
1277 1277 return end + 1
1278 1278 return 0
1279 1279
1280 1280 def series_end(self, all_patches=False):
1281 1281 end = 0
1282 1282 def next(start):
1283 1283 if all_patches:
1284 1284 return start
1285 1285 i = start
1286 1286 while i < len(self.series):
1287 1287 p, reason = self.pushable(i)
1288 1288 if p:
1289 1289 break
1290 1290 self.explain_pushable(i)
1291 1291 i += 1
1292 1292 return i
1293 1293 if len(self.applied) > 0:
1294 1294 p = self.applied[-1].name
1295 1295 try:
1296 1296 end = self.series.index(p)
1297 1297 except ValueError:
1298 1298 return 0
1299 1299 return next(end + 1)
1300 1300 return next(end)
1301 1301
1302 1302 def appliedname(self, index):
1303 1303 pname = self.applied[index].name
1304 1304 if not self.ui.verbose:
1305 1305 p = pname
1306 1306 else:
1307 1307 p = str(self.series.index(pname)) + " " + pname
1308 1308 return p
1309 1309
1310 1310 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1311 1311 force=None, git=False):
1312 1312 def checkseries(patchname):
1313 1313 if patchname in self.series:
1314 1314 raise util.Abort(_('patch %s is already in the series file')
1315 1315 % patchname)
1316 1316 def checkfile(patchname):
1317 1317 if not force and os.path.exists(self.join(patchname)):
1318 1318 raise util.Abort(_('patch "%s" already exists')
1319 1319 % patchname)
1320 1320
1321 1321 if rev:
1322 1322 if files:
1323 1323 raise util.Abort(_('option "-r" not valid when importing '
1324 1324 'files'))
1325 1325 rev = cmdutil.revrange(repo, rev)
1326 1326 rev.sort(lambda x, y: cmp(y, x))
1327 1327 if (len(files) > 1 or len(rev) > 1) and patchname:
1328 1328 raise util.Abort(_('option "-n" not valid when importing multiple '
1329 1329 'patches'))
1330 1330 i = 0
1331 1331 added = []
1332 1332 if rev:
1333 1333 # If mq patches are applied, we can only import revisions
1334 1334 # that form a linear path to qbase.
1335 1335 # Otherwise, they should form a linear path to a head.
1336 1336 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1337 1337 if len(heads) > 1:
1338 1338 raise util.Abort(_('revision %d is the root of more than one '
1339 1339 'branch') % rev[-1])
1340 1340 if self.applied:
1341 1341 base = revlog.hex(repo.changelog.node(rev[0]))
1342 1342 if base in [n.rev for n in self.applied]:
1343 1343 raise util.Abort(_('revision %d is already managed')
1344 1344 % rev[0])
1345 1345 if heads != [revlog.bin(self.applied[-1].rev)]:
1346 1346 raise util.Abort(_('revision %d is not the parent of '
1347 1347 'the queue') % rev[0])
1348 1348 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1349 1349 lastparent = repo.changelog.parentrevs(base)[0]
1350 1350 else:
1351 1351 if heads != [repo.changelog.node(rev[0])]:
1352 1352 raise util.Abort(_('revision %d has unmanaged children')
1353 1353 % rev[0])
1354 1354 lastparent = None
1355 1355
1356 1356 if git:
1357 1357 self.diffopts().git = True
1358 1358
1359 1359 for r in rev:
1360 1360 p1, p2 = repo.changelog.parentrevs(r)
1361 1361 n = repo.changelog.node(r)
1362 1362 if p2 != revlog.nullrev:
1363 1363 raise util.Abort(_('cannot import merge revision %d') % r)
1364 1364 if lastparent and lastparent != r:
1365 1365 raise util.Abort(_('revision %d is not the parent of %d')
1366 1366 % (r, lastparent))
1367 1367 lastparent = p1
1368 1368
1369 1369 if not patchname:
1370 1370 patchname = normname('%d.diff' % r)
1371 1371 checkseries(patchname)
1372 1372 checkfile(patchname)
1373 1373 self.full_series.insert(0, patchname)
1374 1374
1375 1375 patchf = self.opener(patchname, "w")
1376 1376 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1377 1377 patchf.close()
1378 1378
1379 1379 se = statusentry(revlog.hex(n), patchname)
1380 1380 self.applied.insert(0, se)
1381 1381
1382 1382 added.append(patchname)
1383 1383 patchname = None
1384 1384 self.parse_series()
1385 1385 self.applied_dirty = 1
1386 1386
1387 1387 for filename in files:
1388 1388 if existing:
1389 1389 if filename == '-':
1390 1390 raise util.Abort(_('-e is incompatible with import from -'))
1391 1391 if not patchname:
1392 1392 patchname = normname(filename)
1393 1393 if not os.path.isfile(self.join(patchname)):
1394 1394 raise util.Abort(_("patch %s does not exist") % patchname)
1395 1395 else:
1396 1396 try:
1397 1397 if filename == '-':
1398 1398 if not patchname:
1399 1399 raise util.Abort(_('need --name to import a patch from -'))
1400 1400 text = sys.stdin.read()
1401 1401 else:
1402 1402 text = file(filename).read()
1403 1403 except IOError:
1404 1404 raise util.Abort(_("unable to read %s") % patchname)
1405 1405 if not patchname:
1406 1406 patchname = normname(os.path.basename(filename))
1407 1407 checkfile(patchname)
1408 1408 patchf = self.opener(patchname, "w")
1409 1409 patchf.write(text)
1410 1410 checkseries(patchname)
1411 1411 index = self.full_series_end() + i
1412 1412 self.full_series[index:index] = [patchname]
1413 1413 self.parse_series()
1414 1414 self.ui.warn("adding %s to series file\n" % patchname)
1415 1415 i += 1
1416 1416 added.append(patchname)
1417 1417 patchname = None
1418 1418 self.series_dirty = 1
1419 1419 qrepo = self.qrepo()
1420 1420 if qrepo:
1421 1421 qrepo.add(added)
1422 1422
1423 1423 def delete(ui, repo, *patches, **opts):
1424 1424 """remove patches from queue
1425 1425
1426 1426 With --rev, mq will stop managing the named revisions. The
1427 1427 patches must be applied and at the base of the stack. This option
1428 1428 is useful when the patches have been applied upstream.
1429 1429
1430 1430 Otherwise, the patches must not be applied.
1431 1431
1432 1432 With --keep, the patch files are preserved in the patch directory."""
1433 1433 q = repo.mq
1434 1434 q.delete(repo, patches, opts)
1435 1435 q.save_dirty()
1436 1436 return 0
1437 1437
1438 1438 def applied(ui, repo, patch=None, **opts):
1439 1439 """print the patches already applied"""
1440 1440 q = repo.mq
1441 1441 if patch:
1442 1442 if patch not in q.series:
1443 1443 raise util.Abort(_("patch %s is not in series file") % patch)
1444 1444 end = q.series.index(patch) + 1
1445 1445 else:
1446 1446 end = len(q.applied)
1447 1447 if not end:
1448 1448 return
1449 1449
1450 1450 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1451 1451
1452 1452 def unapplied(ui, repo, patch=None, **opts):
1453 1453 """print the patches not yet applied"""
1454 1454 q = repo.mq
1455 1455 if patch:
1456 1456 if patch not in q.series:
1457 1457 raise util.Abort(_("patch %s is not in series file") % patch)
1458 1458 start = q.series.index(patch) + 1
1459 1459 else:
1460 1460 start = q.series_end()
1461 1461 q.qseries(repo, start=start, summary=opts.get('summary'))
1462 1462
1463 1463 def qimport(ui, repo, *filename, **opts):
1464 1464 """import a patch
1465 1465
1466 1466 The patch will have the same name as its source file unless you
1467 1467 give it a new one with --name.
1468 1468
1469 1469 You can register an existing patch inside the patch directory
1470 1470 with the --existing flag.
1471 1471
1472 1472 With --force, an existing patch of the same name will be overwritten.
1473 1473
1474 1474 An existing changeset may be placed under mq control with --rev
1475 1475 (e.g. qimport --rev tip -n patch will place tip under mq control).
1476 1476 With --git, patches imported with --rev will use the git diff
1477 1477 format.
1478 1478 """
1479 1479 q = repo.mq
1480 1480 q.qimport(repo, filename, patchname=opts['name'],
1481 1481 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1482 1482 git=opts['git'])
1483 1483 q.save_dirty()
1484 1484 return 0
1485 1485
1486 1486 def init(ui, repo, **opts):
1487 1487 """init a new queue repository
1488 1488
1489 1489 The queue repository is unversioned by default. If -c is
1490 1490 specified, qinit will create a separate nested repository
1491 1491 for patches. Use qcommit to commit changes to this queue
1492 1492 repository."""
1493 1493 q = repo.mq
1494 1494 r = q.init(repo, create=opts['create_repo'])
1495 1495 q.save_dirty()
1496 1496 if r:
1497 1497 if not os.path.exists(r.wjoin('.hgignore')):
1498 1498 fp = r.wopener('.hgignore', 'w')
1499 1499 fp.write('syntax: glob\n')
1500 1500 fp.write('status\n')
1501 1501 fp.write('guards\n')
1502 1502 fp.close()
1503 1503 if not os.path.exists(r.wjoin('series')):
1504 1504 r.wopener('series', 'w').close()
1505 1505 r.add(['.hgignore', 'series'])
1506 1506 commands.add(ui, r)
1507 1507 return 0
1508 1508
1509 1509 def clone(ui, source, dest=None, **opts):
1510 1510 '''clone main and patch repository at same time
1511 1511
1512 1512 If source is local, destination will have no patches applied. If
1513 1513 source is remote, this command can not check if patches are
1514 1514 applied in source, so cannot guarantee that patches are not
1515 1515 applied in destination. If you clone remote repository, be sure
1516 1516 before that it has no patches applied.
1517 1517
1518 1518 Source patch repository is looked for in <src>/.hg/patches by
1519 1519 default. Use -p <url> to change.
1520 1520 '''
1521 1521 commands.setremoteconfig(ui, opts)
1522 1522 if dest is None:
1523 1523 dest = hg.defaultdest(source)
1524 1524 sr = hg.repository(ui, ui.expandpath(source))
1525 1525 qbase, destrev = None, None
1526 1526 if sr.local():
1527 1527 if sr.mq.applied:
1528 1528 qbase = revlog.bin(sr.mq.applied[0].rev)
1529 1529 if not hg.islocal(dest):
1530 1530 destrev = sr.parents(qbase)[0]
1531 1531 ui.note(_('cloning main repo\n'))
1532 1532 sr, dr = hg.clone(ui, sr, dest,
1533 1533 pull=opts['pull'],
1534 1534 rev=destrev,
1535 1535 update=False,
1536 1536 stream=opts['uncompressed'])
1537 1537 ui.note(_('cloning patch repo\n'))
1538 1538 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1539 1539 dr.url() + '/.hg/patches',
1540 1540 pull=opts['pull'],
1541 1541 update=not opts['noupdate'],
1542 1542 stream=opts['uncompressed'])
1543 1543 if dr.local():
1544 1544 if qbase:
1545 1545 ui.note(_('stripping applied patches from destination repo\n'))
1546 1546 dr.mq.strip(dr, qbase, update=False, backup=None)
1547 1547 if not opts['noupdate']:
1548 1548 ui.note(_('updating destination repo\n'))
1549 1549 hg.update(dr, dr.changelog.tip())
1550 1550
1551 1551 def commit(ui, repo, *pats, **opts):
1552 1552 """commit changes in the queue repository"""
1553 1553 q = repo.mq
1554 1554 r = q.qrepo()
1555 1555 if not r: raise util.Abort('no queue repository')
1556 1556 commands.commit(r.ui, r, *pats, **opts)
1557 1557
1558 1558 def series(ui, repo, **opts):
1559 1559 """print the entire series file"""
1560 1560 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1561 1561 return 0
1562 1562
1563 1563 def top(ui, repo, **opts):
1564 1564 """print the name of the current patch"""
1565 1565 q = repo.mq
1566 1566 t = len(q.applied)
1567 1567 if t:
1568 1568 return q.qseries(repo, start=t-1, length=1, status='A',
1569 1569 summary=opts.get('summary'))
1570 1570 else:
1571 1571 ui.write("No patches applied\n")
1572 1572 return 1
1573 1573
1574 1574 def next(ui, repo, **opts):
1575 1575 """print the name of the next patch"""
1576 1576 q = repo.mq
1577 1577 end = q.series_end()
1578 1578 if end == len(q.series):
1579 1579 ui.write("All patches applied\n")
1580 1580 return 1
1581 1581 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1582 1582
1583 1583 def prev(ui, repo, **opts):
1584 1584 """print the name of the previous patch"""
1585 1585 q = repo.mq
1586 1586 l = len(q.applied)
1587 1587 if l == 1:
1588 1588 ui.write("Only one patch applied\n")
1589 1589 return 1
1590 1590 if not l:
1591 1591 ui.write("No patches applied\n")
1592 1592 return 1
1593 1593 return q.qseries(repo, start=l-2, length=1, status='A',
1594 1594 summary=opts.get('summary'))
1595 1595
1596 1596 def new(ui, repo, patch, **opts):
1597 1597 """create a new patch
1598 1598
1599 1599 qnew creates a new patch on top of the currently-applied patch
1600 1600 (if any). It will refuse to run if there are any outstanding
1601 1601 changes unless -f is specified, in which case the patch will
1602 1602 be initialised with them.
1603 1603
1604 1604 -e, -m or -l set the patch header as well as the commit message.
1605 1605 If none is specified, the patch header is empty and the
1606 1606 commit message is 'New patch: PATCH'"""
1607 1607 q = repo.mq
1608 1608 message = commands.logmessage(opts)
1609 1609 if opts['edit']:
1610 1610 message = ui.edit(message, ui.username())
1611 1611 q.new(repo, patch, msg=message, force=opts['force'])
1612 1612 q.save_dirty()
1613 1613 return 0
1614 1614
1615 1615 def refresh(ui, repo, *pats, **opts):
1616 1616 """update the current patch
1617 1617
1618 1618 If any file patterns are provided, the refreshed patch will contain only
1619 1619 the modifications that match those patterns; the remaining modifications
1620 1620 will remain in the working directory.
1621 1621
1622 1622 hg add/remove/copy/rename work as usual, though you might want to use
1623 1623 git-style patches (--git or [diff] git=1) to track copies and renames.
1624 1624 """
1625 1625 q = repo.mq
1626 1626 message = commands.logmessage(opts)
1627 1627 if opts['edit']:
1628 1628 if message:
1629 1629 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1630 1630 patch = q.applied[-1].name
1631 1631 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1632 1632 message = ui.edit('\n'.join(message), user or ui.username())
1633 1633 ret = q.refresh(repo, pats, msg=message, **opts)
1634 1634 q.save_dirty()
1635 1635 return ret
1636 1636
1637 1637 def diff(ui, repo, *pats, **opts):
1638 1638 """diff of the current patch"""
1639 1639 repo.mq.diff(repo, pats, opts)
1640 1640 return 0
1641 1641
1642 1642 def fold(ui, repo, *files, **opts):
1643 1643 """fold the named patches into the current patch
1644 1644
1645 1645 Patches must not yet be applied. Each patch will be successively
1646 1646 applied to the current patch in the order given. If all the
1647 1647 patches apply successfully, the current patch will be refreshed
1648 1648 with the new cumulative patch, and the folded patches will
1649 1649 be deleted. With -k/--keep, the folded patch files will not
1650 1650 be removed afterwards.
1651 1651
1652 1652 The header for each folded patch will be concatenated with
1653 1653 the current patch header, separated by a line of '* * *'."""
1654 1654
1655 1655 q = repo.mq
1656 1656
1657 1657 if not files:
1658 1658 raise util.Abort(_('qfold requires at least one patch name'))
1659 1659 if not q.check_toppatch(repo):
1660 1660 raise util.Abort(_('No patches applied'))
1661 1661
1662 1662 message = commands.logmessage(opts)
1663 1663 if opts['edit']:
1664 1664 if message:
1665 1665 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1666 1666
1667 1667 parent = q.lookup('qtip')
1668 1668 patches = []
1669 1669 messages = []
1670 1670 for f in files:
1671 1671 p = q.lookup(f)
1672 1672 if p in patches or p == parent:
1673 1673 ui.warn(_('Skipping already folded patch %s') % p)
1674 1674 if q.isapplied(p):
1675 1675 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1676 1676 patches.append(p)
1677 1677
1678 1678 for p in patches:
1679 1679 if not message:
1680 1680 messages.append(q.readheaders(p)[0])
1681 1681 pf = q.join(p)
1682 1682 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1683 1683 if not patchsuccess:
1684 1684 raise util.Abort(_('Error folding patch %s') % p)
1685 1685 patch.updatedir(ui, repo, files)
1686 1686
1687 1687 if not message:
1688 1688 message, comments, user = q.readheaders(parent)[0:3]
1689 1689 for msg in messages:
1690 1690 message.append('* * *')
1691 1691 message.extend(msg)
1692 1692 message = '\n'.join(message)
1693 1693
1694 1694 if opts['edit']:
1695 1695 message = ui.edit(message, user or ui.username())
1696 1696
1697 1697 q.refresh(repo, msg=message)
1698 1698 q.delete(repo, patches, opts)
1699 1699 q.save_dirty()
1700 1700
1701 1701 def guard(ui, repo, *args, **opts):
1702 1702 '''set or print guards for a patch
1703 1703
1704 1704 Guards control whether a patch can be pushed. A patch with no
1705 1705 guards is always pushed. A patch with a positive guard ("+foo") is
1706 1706 pushed only if the qselect command has activated it. A patch with
1707 1707 a negative guard ("-foo") is never pushed if the qselect command
1708 1708 has activated it.
1709 1709
1710 1710 With no arguments, print the currently active guards.
1711 1711 With arguments, set guards for the named patch.
1712 1712
1713 1713 To set a negative guard "-foo" on topmost patch ("--" is needed so
1714 1714 hg will not interpret "-foo" as an option):
1715 1715 hg qguard -- -foo
1716 1716
1717 1717 To set guards on another patch:
1718 1718 hg qguard other.patch +2.6.17 -stable
1719 1719 '''
1720 1720 def status(idx):
1721 1721 guards = q.series_guards[idx] or ['unguarded']
1722 1722 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1723 1723 q = repo.mq
1724 1724 patch = None
1725 1725 args = list(args)
1726 1726 if opts['list']:
1727 1727 if args or opts['none']:
1728 1728 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1729 1729 for i in xrange(len(q.series)):
1730 1730 status(i)
1731 1731 return
1732 1732 if not args or args[0][0:1] in '-+':
1733 1733 if not q.applied:
1734 1734 raise util.Abort(_('no patches applied'))
1735 1735 patch = q.applied[-1].name
1736 1736 if patch is None and args[0][0:1] not in '-+':
1737 1737 patch = args.pop(0)
1738 1738 if patch is None:
1739 1739 raise util.Abort(_('no patch to work with'))
1740 1740 if args or opts['none']:
1741 q.set_guards(q.find_series(patch), args)
1741 idx = q.find_series(patch)
1742 if idx is None:
1743 raise util.Abort(_('no patch named %s') % patch)
1744 q.set_guards(idx, args)
1742 1745 q.save_dirty()
1743 1746 else:
1744 1747 status(q.series.index(q.lookup(patch)))
1745 1748
1746 1749 def header(ui, repo, patch=None):
1747 1750 """Print the header of the topmost or specified patch"""
1748 1751 q = repo.mq
1749 1752
1750 1753 if patch:
1751 1754 patch = q.lookup(patch)
1752 1755 else:
1753 1756 if not q.applied:
1754 1757 ui.write('No patches applied\n')
1755 1758 return 1
1756 1759 patch = q.lookup('qtip')
1757 1760 message = repo.mq.readheaders(patch)[0]
1758 1761
1759 1762 ui.write('\n'.join(message) + '\n')
1760 1763
1761 1764 def lastsavename(path):
1762 1765 (directory, base) = os.path.split(path)
1763 1766 names = os.listdir(directory)
1764 1767 namere = re.compile("%s.([0-9]+)" % base)
1765 1768 maxindex = None
1766 1769 maxname = None
1767 1770 for f in names:
1768 1771 m = namere.match(f)
1769 1772 if m:
1770 1773 index = int(m.group(1))
1771 1774 if maxindex == None or index > maxindex:
1772 1775 maxindex = index
1773 1776 maxname = f
1774 1777 if maxname:
1775 1778 return (os.path.join(directory, maxname), maxindex)
1776 1779 return (None, None)
1777 1780
1778 1781 def savename(path):
1779 1782 (last, index) = lastsavename(path)
1780 1783 if last is None:
1781 1784 index = 0
1782 1785 newpath = path + ".%d" % (index + 1)
1783 1786 return newpath
1784 1787
1785 1788 def push(ui, repo, patch=None, **opts):
1786 1789 """push the next patch onto the stack"""
1787 1790 q = repo.mq
1788 1791 mergeq = None
1789 1792
1790 1793 if opts['all']:
1791 1794 if not q.series:
1792 1795 ui.warn(_('no patches in series\n'))
1793 1796 return 0
1794 1797 patch = q.series[-1]
1795 1798 if opts['merge']:
1796 1799 if opts['name']:
1797 1800 newpath = opts['name']
1798 1801 else:
1799 1802 newpath, i = lastsavename(q.path)
1800 1803 if not newpath:
1801 1804 ui.warn("no saved queues found, please use -n\n")
1802 1805 return 1
1803 1806 mergeq = queue(ui, repo.join(""), newpath)
1804 1807 ui.warn("merging with queue at: %s\n" % mergeq.path)
1805 1808 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1806 1809 mergeq=mergeq)
1807 1810 q.save_dirty()
1808 1811 return ret
1809 1812
1810 1813 def pop(ui, repo, patch=None, **opts):
1811 1814 """pop the current patch off the stack"""
1812 1815 localupdate = True
1813 1816 if opts['name']:
1814 1817 q = queue(ui, repo.join(""), repo.join(opts['name']))
1815 1818 ui.warn('using patch queue: %s\n' % q.path)
1816 1819 localupdate = False
1817 1820 else:
1818 1821 q = repo.mq
1819 1822 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1820 1823 all=opts['all'])
1821 1824 q.save_dirty()
1822 1825 return ret
1823 1826
1824 1827 def rename(ui, repo, patch, name=None, **opts):
1825 1828 """rename a patch
1826 1829
1827 1830 With one argument, renames the current patch to PATCH1.
1828 1831 With two arguments, renames PATCH1 to PATCH2."""
1829 1832
1830 1833 q = repo.mq
1831 1834
1832 1835 if not name:
1833 1836 name = patch
1834 1837 patch = None
1835 1838
1836 1839 if patch:
1837 1840 patch = q.lookup(patch)
1838 1841 else:
1839 1842 if not q.applied:
1840 1843 ui.write(_('No patches applied\n'))
1841 1844 return
1842 1845 patch = q.lookup('qtip')
1843 1846 absdest = q.join(name)
1844 1847 if os.path.isdir(absdest):
1845 1848 name = normname(os.path.join(name, os.path.basename(patch)))
1846 1849 absdest = q.join(name)
1847 1850 if os.path.exists(absdest):
1848 1851 raise util.Abort(_('%s already exists') % absdest)
1849 1852
1850 1853 if name in q.series:
1851 1854 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1852 1855
1853 1856 if ui.verbose:
1854 1857 ui.write('Renaming %s to %s\n' % (patch, name))
1855 1858 i = q.find_series(patch)
1856 1859 guards = q.guard_re.findall(q.full_series[i])
1857 1860 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1858 1861 q.parse_series()
1859 1862 q.series_dirty = 1
1860 1863
1861 1864 info = q.isapplied(patch)
1862 1865 if info:
1863 1866 q.applied[info[0]] = statusentry(info[1], name)
1864 1867 q.applied_dirty = 1
1865 1868
1866 1869 util.rename(q.join(patch), absdest)
1867 1870 r = q.qrepo()
1868 1871 if r:
1869 1872 wlock = r.wlock()
1870 1873 if r.dirstate.state(name) == 'r':
1871 1874 r.undelete([name], wlock)
1872 1875 r.copy(patch, name, wlock)
1873 1876 r.remove([patch], False, wlock)
1874 1877
1875 1878 q.save_dirty()
1876 1879
1877 1880 def restore(ui, repo, rev, **opts):
1878 1881 """restore the queue state saved by a rev"""
1879 1882 rev = repo.lookup(rev)
1880 1883 q = repo.mq
1881 1884 q.restore(repo, rev, delete=opts['delete'],
1882 1885 qupdate=opts['update'])
1883 1886 q.save_dirty()
1884 1887 return 0
1885 1888
1886 1889 def save(ui, repo, **opts):
1887 1890 """save current queue state"""
1888 1891 q = repo.mq
1889 1892 message = commands.logmessage(opts)
1890 1893 ret = q.save(repo, msg=message)
1891 1894 if ret:
1892 1895 return ret
1893 1896 q.save_dirty()
1894 1897 if opts['copy']:
1895 1898 path = q.path
1896 1899 if opts['name']:
1897 1900 newpath = os.path.join(q.basepath, opts['name'])
1898 1901 if os.path.exists(newpath):
1899 1902 if not os.path.isdir(newpath):
1900 1903 raise util.Abort(_('destination %s exists and is not '
1901 1904 'a directory') % newpath)
1902 1905 if not opts['force']:
1903 1906 raise util.Abort(_('destination %s exists, '
1904 1907 'use -f to force') % newpath)
1905 1908 else:
1906 1909 newpath = savename(path)
1907 1910 ui.warn("copy %s to %s\n" % (path, newpath))
1908 1911 util.copyfiles(path, newpath)
1909 1912 if opts['empty']:
1910 1913 try:
1911 1914 os.unlink(q.join(q.status_path))
1912 1915 except:
1913 1916 pass
1914 1917 return 0
1915 1918
1916 1919 def strip(ui, repo, rev, **opts):
1917 1920 """strip a revision and all later revs on the same branch"""
1918 1921 rev = repo.lookup(rev)
1919 1922 backup = 'all'
1920 1923 if opts['backup']:
1921 1924 backup = 'strip'
1922 1925 elif opts['nobackup']:
1923 1926 backup = 'none'
1924 1927 update = repo.dirstate.parents()[0] != revlog.nullid
1925 1928 repo.mq.strip(repo, rev, backup=backup, update=update)
1926 1929 return 0
1927 1930
1928 1931 def select(ui, repo, *args, **opts):
1929 1932 '''set or print guarded patches to push
1930 1933
1931 1934 Use the qguard command to set or print guards on patch, then use
1932 1935 qselect to tell mq which guards to use. A patch will be pushed if it
1933 1936 has no guards or any positive guards match the currently selected guard,
1934 1937 but will not be pushed if any negative guards match the current guard.
1935 1938 For example:
1936 1939
1937 1940 qguard foo.patch -stable (negative guard)
1938 1941 qguard bar.patch +stable (positive guard)
1939 1942 qselect stable
1940 1943
1941 1944 This activates the "stable" guard. mq will skip foo.patch (because
1942 1945 it has a negative match) but push bar.patch (because it
1943 1946 has a positive match).
1944 1947
1945 1948 With no arguments, prints the currently active guards.
1946 1949 With one argument, sets the active guard.
1947 1950
1948 1951 Use -n/--none to deactivate guards (no other arguments needed).
1949 1952 When no guards are active, patches with positive guards are skipped
1950 1953 and patches with negative guards are pushed.
1951 1954
1952 1955 qselect can change the guards on applied patches. It does not pop
1953 1956 guarded patches by default. Use --pop to pop back to the last applied
1954 1957 patch that is not guarded. Use --reapply (which implies --pop) to push
1955 1958 back to the current patch afterwards, but skip guarded patches.
1956 1959
1957 1960 Use -s/--series to print a list of all guards in the series file (no
1958 1961 other arguments needed). Use -v for more information.'''
1959 1962
1960 1963 q = repo.mq
1961 1964 guards = q.active()
1962 1965 if args or opts['none']:
1963 1966 old_unapplied = q.unapplied(repo)
1964 1967 old_guarded = [i for i in xrange(len(q.applied)) if
1965 1968 not q.pushable(i)[0]]
1966 1969 q.set_active(args)
1967 1970 q.save_dirty()
1968 1971 if not args:
1969 1972 ui.status(_('guards deactivated\n'))
1970 1973 if not opts['pop'] and not opts['reapply']:
1971 1974 unapplied = q.unapplied(repo)
1972 1975 guarded = [i for i in xrange(len(q.applied))
1973 1976 if not q.pushable(i)[0]]
1974 1977 if len(unapplied) != len(old_unapplied):
1975 1978 ui.status(_('number of unguarded, unapplied patches has '
1976 1979 'changed from %d to %d\n') %
1977 1980 (len(old_unapplied), len(unapplied)))
1978 1981 if len(guarded) != len(old_guarded):
1979 1982 ui.status(_('number of guarded, applied patches has changed '
1980 1983 'from %d to %d\n') %
1981 1984 (len(old_guarded), len(guarded)))
1982 1985 elif opts['series']:
1983 1986 guards = {}
1984 1987 noguards = 0
1985 1988 for gs in q.series_guards:
1986 1989 if not gs:
1987 1990 noguards += 1
1988 1991 for g in gs:
1989 1992 guards.setdefault(g, 0)
1990 1993 guards[g] += 1
1991 1994 if ui.verbose:
1992 1995 guards['NONE'] = noguards
1993 1996 guards = guards.items()
1994 1997 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
1995 1998 if guards:
1996 1999 ui.note(_('guards in series file:\n'))
1997 2000 for guard, count in guards:
1998 2001 ui.note('%2d ' % count)
1999 2002 ui.write(guard, '\n')
2000 2003 else:
2001 2004 ui.note(_('no guards in series file\n'))
2002 2005 else:
2003 2006 if guards:
2004 2007 ui.note(_('active guards:\n'))
2005 2008 for g in guards:
2006 2009 ui.write(g, '\n')
2007 2010 else:
2008 2011 ui.write(_('no active guards\n'))
2009 2012 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2010 2013 popped = False
2011 2014 if opts['pop'] or opts['reapply']:
2012 2015 for i in xrange(len(q.applied)):
2013 2016 pushable, reason = q.pushable(i)
2014 2017 if not pushable:
2015 2018 ui.status(_('popping guarded patches\n'))
2016 2019 popped = True
2017 2020 if i == 0:
2018 2021 q.pop(repo, all=True)
2019 2022 else:
2020 2023 q.pop(repo, i-1)
2021 2024 break
2022 2025 if popped:
2023 2026 try:
2024 2027 if reapply:
2025 2028 ui.status(_('reapplying unguarded patches\n'))
2026 2029 q.push(repo, reapply)
2027 2030 finally:
2028 2031 q.save_dirty()
2029 2032
2030 2033 def reposetup(ui, repo):
2031 2034 class mqrepo(repo.__class__):
2032 2035 def abort_if_wdir_patched(self, errmsg, force=False):
2033 2036 if self.mq.applied and not force:
2034 2037 parent = revlog.hex(self.dirstate.parents()[0])
2035 2038 if parent in [s.rev for s in self.mq.applied]:
2036 2039 raise util.Abort(errmsg)
2037 2040
2038 2041 def commit(self, *args, **opts):
2039 2042 if len(args) >= 6:
2040 2043 force = args[5]
2041 2044 else:
2042 2045 force = opts.get('force')
2043 2046 self.abort_if_wdir_patched(
2044 2047 _('cannot commit over an applied mq patch'),
2045 2048 force)
2046 2049
2047 2050 return super(mqrepo, self).commit(*args, **opts)
2048 2051
2049 2052 def push(self, remote, force=False, revs=None):
2050 2053 if self.mq.applied and not force and not revs:
2051 2054 raise util.Abort(_('source has mq patches applied'))
2052 2055 return super(mqrepo, self).push(remote, force, revs)
2053 2056
2054 2057 def tags(self):
2055 2058 if self.tagscache:
2056 2059 return self.tagscache
2057 2060
2058 2061 tagscache = super(mqrepo, self).tags()
2059 2062
2060 2063 q = self.mq
2061 2064 if not q.applied:
2062 2065 return tagscache
2063 2066
2064 2067 mqtags = [(patch.rev, patch.name) for patch in q.applied]
2065 2068 mqtags.append((mqtags[-1][0], 'qtip'))
2066 2069 mqtags.append((mqtags[0][0], 'qbase'))
2067 2070 for patch in mqtags:
2068 2071 if patch[1] in tagscache:
2069 2072 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2070 2073 else:
2071 2074 tagscache[patch[1]] = revlog.bin(patch[0])
2072 2075
2073 2076 return tagscache
2074 2077
2075 2078 def _branchtags(self):
2076 2079 q = self.mq
2077 2080 if not q.applied:
2078 2081 return super(mqrepo, self)._branchtags()
2079 2082
2080 2083 self.branchcache = {} # avoid recursion in changectx
2081 2084 cl = self.changelog
2082 2085 partial, last, lrev = self._readbranchcache()
2083 2086
2084 2087 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2085 2088 start = lrev + 1
2086 2089 if start < qbase:
2087 2090 # update the cache (excluding the patches) and save it
2088 2091 self._updatebranchcache(partial, lrev+1, qbase)
2089 2092 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2090 2093 start = qbase
2091 2094 # if start = qbase, the cache is as updated as it should be.
2092 2095 # if start > qbase, the cache includes (part of) the patches.
2093 2096 # we might as well use it, but we won't save it.
2094 2097
2095 2098 # update the cache up to the tip
2096 2099 self._updatebranchcache(partial, start, cl.count())
2097 2100
2098 2101 return partial
2099 2102
2100 2103 if repo.local():
2101 2104 repo.__class__ = mqrepo
2102 2105 repo.mq = queue(ui, repo.join(""))
2103 2106
2104 2107 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2105 2108
2106 2109 cmdtable = {
2107 2110 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2108 2111 "qclone": (clone,
2109 2112 [('', 'pull', None, _('use pull protocol to copy metadata')),
2110 2113 ('U', 'noupdate', None, _('do not update the new working directories')),
2111 2114 ('', 'uncompressed', None,
2112 2115 _('use uncompressed transfer (fast over LAN)')),
2113 2116 ('e', 'ssh', '', _('specify ssh command to use')),
2114 2117 ('p', 'patches', '', _('location of source patch repo')),
2115 2118 ('', 'remotecmd', '',
2116 2119 _('specify hg command to run on the remote side'))],
2117 2120 'hg qclone [OPTION]... SOURCE [DEST]'),
2118 2121 "qcommit|qci":
2119 2122 (commit,
2120 2123 commands.table["^commit|ci"][1],
2121 2124 'hg qcommit [OPTION]... [FILE]...'),
2122 2125 "^qdiff": (diff,
2123 2126 [('g', 'git', None, _('use git extended diff format')),
2124 2127 ('I', 'include', [], _('include names matching the given patterns')),
2125 2128 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2126 2129 'hg qdiff [-I] [-X] [FILE]...'),
2127 2130 "qdelete|qremove|qrm":
2128 2131 (delete,
2129 2132 [('k', 'keep', None, _('keep patch file')),
2130 2133 ('r', 'rev', [], _('stop managing a revision'))],
2131 2134 'hg qdelete [-k] [-r REV]... PATCH...'),
2132 2135 'qfold':
2133 2136 (fold,
2134 2137 [('e', 'edit', None, _('edit patch header')),
2135 2138 ('k', 'keep', None, _('keep folded patch files'))
2136 2139 ] + commands.commitopts,
2137 2140 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2138 2141 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2139 2142 ('n', 'none', None, _('drop all guards'))],
2140 2143 'hg qguard [PATCH] [+GUARD...] [-GUARD...]'),
2141 2144 'qheader': (header, [],
2142 2145 _('hg qheader [PATCH]')),
2143 2146 "^qimport":
2144 2147 (qimport,
2145 2148 [('e', 'existing', None, 'import file in patch dir'),
2146 2149 ('n', 'name', '', 'patch file name'),
2147 2150 ('f', 'force', None, 'overwrite existing files'),
2148 2151 ('r', 'rev', [], 'place existing revisions under mq control'),
2149 2152 ('g', 'git', None, _('use git extended diff format'))],
2150 2153 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2151 2154 "^qinit":
2152 2155 (init,
2153 2156 [('c', 'create-repo', None, 'create queue repository')],
2154 2157 'hg qinit [-c]'),
2155 2158 "qnew":
2156 2159 (new,
2157 2160 [('e', 'edit', None, _('edit commit message')),
2158 2161 ('f', 'force', None, _('import uncommitted changes into patch'))
2159 2162 ] + commands.commitopts,
2160 2163 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2161 2164 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2162 2165 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2163 2166 "^qpop":
2164 2167 (pop,
2165 2168 [('a', 'all', None, 'pop all patches'),
2166 2169 ('n', 'name', '', 'queue name to pop'),
2167 2170 ('f', 'force', None, 'forget any local changes')],
2168 2171 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2169 2172 "^qpush":
2170 2173 (push,
2171 2174 [('f', 'force', None, 'apply if the patch has rejects'),
2172 2175 ('l', 'list', None, 'list patch name in commit text'),
2173 2176 ('a', 'all', None, 'apply all patches'),
2174 2177 ('m', 'merge', None, 'merge from another queue'),
2175 2178 ('n', 'name', '', 'merge queue name')],
2176 2179 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2177 2180 "^qrefresh":
2178 2181 (refresh,
2179 2182 [('e', 'edit', None, _('edit commit message')),
2180 2183 ('g', 'git', None, _('use git extended diff format')),
2181 2184 ('s', 'short', None, 'refresh only files already in the patch'),
2182 2185 ('I', 'include', [], _('include names matching the given patterns')),
2183 2186 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2184 2187 ] + commands.commitopts,
2185 2188 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] FILES...'),
2186 2189 'qrename|qmv':
2187 2190 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2188 2191 "qrestore":
2189 2192 (restore,
2190 2193 [('d', 'delete', None, 'delete save entry'),
2191 2194 ('u', 'update', None, 'update queue working dir')],
2192 2195 'hg qrestore [-d] [-u] REV'),
2193 2196 "qsave":
2194 2197 (save,
2195 2198 [('c', 'copy', None, 'copy patch directory'),
2196 2199 ('n', 'name', '', 'copy directory name'),
2197 2200 ('e', 'empty', None, 'clear queue status file'),
2198 2201 ('f', 'force', None, 'force copy')] + commands.commitopts,
2199 2202 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2200 2203 "qselect": (select,
2201 2204 [('n', 'none', None, _('disable all guards')),
2202 2205 ('s', 'series', None, _('list all guards in series file')),
2203 2206 ('', 'pop', None,
2204 2207 _('pop to before first guarded applied patch')),
2205 2208 ('', 'reapply', None, _('pop, then reapply patches'))],
2206 2209 'hg qselect [OPTION...] [GUARD...]'),
2207 2210 "qseries":
2208 2211 (series,
2209 2212 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2210 2213 'hg qseries [-ms]'),
2211 2214 "^strip":
2212 2215 (strip,
2213 2216 [('f', 'force', None, 'force multi-head removal'),
2214 2217 ('b', 'backup', None, 'bundle unrelated changesets'),
2215 2218 ('n', 'nobackup', None, 'no backups')],
2216 2219 'hg strip [-f] [-b] [-n] REV'),
2217 2220 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2218 2221 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2219 2222 }
@@ -1,373 +1,373 b''
1 1 /*
2 2 bdiff.c - efficient binary diff extension for Mercurial
3 3
4 4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms of
7 7 the GNU General Public License, incorporated herein by reference.
8 8
9 9 Based roughly on Python difflib
10 10 */
11 11
12 12 #include <Python.h>
13 13 #include <stdlib.h>
14 14 #include <string.h>
15 15
16 16 #if defined __hpux || defined __SUNPRO_C || defined _AIX
17 17 # define inline
18 18 #endif
19 19
20 20 #ifdef _WIN32
21 21 #ifdef _MSC_VER
22 22 #define inline __inline
23 23 typedef unsigned long uint32_t;
24 24 #else
25 25 #include <stdint.h>
26 26 #endif
27 27 static uint32_t htonl(uint32_t x)
28 28 {
29 29 return ((x & 0x000000ffUL) << 24) |
30 30 ((x & 0x0000ff00UL) << 8) |
31 31 ((x & 0x00ff0000UL) >> 8) |
32 32 ((x & 0xff000000UL) >> 24);
33 33 }
34 34 #else
35 35 #include <sys/types.h>
36 36 #ifdef __BEOS__
37 37 #include <ByteOrder.h>
38 38 #else
39 39 #include <arpa/inet.h>
40 40 #endif
41 41 #include <inttypes.h>
42 42 #endif
43 43
44 44 struct line {
45 45 int h, len, n, e;
46 46 const char *l;
47 47 };
48 48
49 49 struct pos {
50 50 int pos, len;
51 51 };
52 52
53 53 struct hunk {
54 54 int a1, a2, b1, b2;
55 55 };
56 56
57 57 struct hunklist {
58 58 struct hunk *base, *head;
59 59 };
60 60
61 61 static inline uint32_t rol32(uint32_t word, unsigned int shift)
62 62 {
63 63 return (word << shift) | (word >> (32 - shift));
64 64 }
65 65
66 66 int splitlines(const char *a, int len, struct line **lr)
67 67 {
68 68 int g, h, i;
69 69 const char *p, *b = a;
70 70 struct line *l;
71 71
72 72 /* count the lines */
73 73 i = 1; /* extra line for sentinel */
74 74 for (p = a; p < a + len; p++)
75 75 if (*p == '\n' || p == a + len - 1)
76 76 i++;
77 77
78 78 *lr = l = (struct line *)malloc(sizeof(struct line) * i);
79 79 if (!l)
80 80 return -1;
81 81
82 82 /* build the line array and calculate hashes */
83 83 h = 0;
84 84 for (p = a; p < a + len; p++) {
85 85 /*
86 86 * a simple hash from GNU diff, with better collision
87 87 * resistance from hashpjw. this slows down common
88 88 * case by 10%, but speeds up worst case by 100x.
89 89 */
90 90 h = *p + rol32(h, 7);
91 91 if ((g = h & 0xf0000000)) {
92 92 h ^= g >> 24;
93 93 h ^= g;
94 94 }
95 95 if (*p == '\n' || p == a + len - 1) {
96 96 l->len = p - b + 1;
97 97 l->h = h * l->len;
98 98 l->l = b;
99 99 l->n = -1;
100 100 l++;
101 101 b = p + 1;
102 102 h = 0;
103 103 }
104 104 }
105 105
106 106 /* set up a sentinel */
107 107 l->h = l->len = 0;
108 108 l->l = a + len;
109 109 return i - 1;
110 110 }
111 111
112 112 int inline cmp(struct line *a, struct line *b)
113 113 {
114 114 return a->h != b->h || a->len != b->len || memcmp(a->l, b->l, a->len);
115 115 }
116 116
117 117 static int equatelines(struct line *a, int an, struct line *b, int bn)
118 118 {
119 119 int i, j, buckets = 1, t;
120 120 struct pos *h;
121 121
122 122 /* build a hash table of the next highest power of 2 */
123 123 while (buckets < bn + 1)
124 124 buckets *= 2;
125 125
126 126 h = (struct pos *)malloc(buckets * sizeof(struct pos));
127 127 buckets = buckets - 1;
128 128 if (!h)
129 129 return 0;
130 130
131 131 /* clear the hash table */
132 132 for (i = 0; i <= buckets; i++) {
133 133 h[i].pos = -1;
134 134 h[i].len = 0;
135 135 }
136 136
137 137 /* add lines to the hash table chains */
138 138 for (i = bn - 1; i >= 0; i--) {
139 139 /* find the equivalence class */
140 140 for (j = b[i].h & buckets; h[j].pos != -1;
141 141 j = (j + 1) & buckets)
142 142 if (!cmp(b + i, b + h[j].pos))
143 143 break;
144 144
145 145 /* add to the head of the equivalence class */
146 146 b[i].n = h[j].pos;
147 147 b[i].e = j;
148 148 h[j].pos = i;
149 149 h[j].len++; /* keep track of popularity */
150 150 }
151 151
152 152 /* compute popularity threshold */
153 153 t = (bn >= 200) ? bn / 100 : bn + 1;
154 154
155 155 /* match items in a to their equivalence class in b */
156 156 for (i = 0; i < an; i++) {
157 157 /* find the equivalence class */
158 158 for (j = a[i].h & buckets; h[j].pos != -1;
159 159 j = (j + 1) & buckets)
160 160 if (!cmp(a + i, b + h[j].pos))
161 161 break;
162 162
163 163 a[i].e = j; /* use equivalence class for quick compare */
164 164 if (h[j].len <= t)
165 165 a[i].n = h[j].pos; /* point to head of match list */
166 166 else
167 167 a[i].n = -1; /* too popular */
168 168 }
169 169
170 170 /* discard hash tables */
171 171 free(h);
172 172 return 1;
173 173 }
174 174
175 175 static int longest_match(struct line *a, struct line *b, struct pos *pos,
176 176 int a1, int a2, int b1, int b2, int *omi, int *omj)
177 177 {
178 178 int mi = a1, mj = b1, mk = 0, mb = 0, i, j, k;
179 179
180 180 for (i = a1; i < a2; i++) {
181 181 /* skip things before the current block */
182 182 for (j = a[i].n; j != -1 && j < b1; j = b[j].n)
183 183 ;
184 184
185 185 /* loop through all lines match a[i] in b */
186 186 for (; j != -1 && j < b2; j = b[j].n) {
187 187 /* does this extend an earlier match? */
188 188 if (i > a1 && j > b1 && pos[j - 1].pos == i - 1)
189 189 k = pos[j - 1].len + 1;
190 190 else
191 191 k = 1;
192 192 pos[j].pos = i;
193 193 pos[j].len = k;
194 194
195 195 /* best match so far? */
196 196 if (k > mk) {
197 197 mi = i;
198 198 mj = j;
199 199 mk = k;
200 200 }
201 201 }
202 202 }
203 203
204 204 if (mk) {
205 205 mi = mi - mk + 1;
206 206 mj = mj - mk + 1;
207 207 }
208 208
209 209 /* expand match to include neighboring popular lines */
210 210 while (mi - mb > a1 && mj - mb > b1 &&
211 211 a[mi - mb - 1].e == b[mj - mb - 1].e)
212 212 mb++;
213 213 while (mi + mk < a2 && mj + mk < b2 &&
214 214 a[mi + mk].e == b[mj + mk].e)
215 215 mk++;
216 216
217 217 *omi = mi - mb;
218 218 *omj = mj - mb;
219 219 return mk + mb;
220 220 }
221 221
222 222 static void recurse(struct line *a, struct line *b, struct pos *pos,
223 223 int a1, int a2, int b1, int b2, struct hunklist *l)
224 224 {
225 225 int i, j, k;
226 226
227 227 /* find the longest match in this chunk */
228 228 k = longest_match(a, b, pos, a1, a2, b1, b2, &i, &j);
229 229 if (!k)
230 230 return;
231 231
232 232 /* and recurse on the remaining chunks on either side */
233 233 recurse(a, b, pos, a1, i, b1, j, l);
234 234 l->head->a1 = i;
235 235 l->head->a2 = i + k;
236 236 l->head->b1 = j;
237 237 l->head->b2 = j + k;
238 238 l->head++;
239 239 recurse(a, b, pos, i + k, a2, j + k, b2, l);
240 240 }
241 241
242 242 static struct hunklist diff(struct line *a, int an, struct line *b, int bn)
243 243 {
244 244 struct hunklist l;
245 245 struct pos *pos;
246 246 int t;
247 247
248 248 /* allocate and fill arrays */
249 249 t = equatelines(a, an, b, bn);
250 250 pos = (struct pos *)calloc(bn, sizeof(struct pos));
251 251 /* we can't have more matches than lines in the shorter file */
252 252 l.head = l.base = (struct hunk *)malloc(sizeof(struct hunk) *
253 253 ((an<bn ? an:bn) + 1));
254 254
255 255 if (pos && l.base && t) {
256 256 /* generate the matching block list */
257 257 recurse(a, b, pos, 0, an, 0, bn, &l);
258 l.head->a1 = an;
259 l.head->b1 = bn;
258 l.head->a1 = l.head->a2 = an;
259 l.head->b1 = l.head->b2 = bn;
260 260 l.head++;
261 261 }
262 262
263 263 free(pos);
264 264 return l;
265 265 }
266 266
267 267 static PyObject *blocks(PyObject *self, PyObject *args)
268 268 {
269 269 PyObject *sa, *sb, *rl = NULL, *m;
270 270 struct line *a, *b;
271 271 struct hunklist l = {NULL, NULL};
272 272 struct hunk *h;
273 273 int an, bn, pos = 0;
274 274
275 275 if (!PyArg_ParseTuple(args, "SS:bdiff", &sa, &sb))
276 276 return NULL;
277 277
278 278 an = splitlines(PyString_AsString(sa), PyString_Size(sa), &a);
279 279 bn = splitlines(PyString_AsString(sb), PyString_Size(sb), &b);
280 280 if (!a || !b)
281 281 goto nomem;
282 282
283 283 l = diff(a, an, b, bn);
284 284 rl = PyList_New(l.head - l.base);
285 285 if (!l.head || !rl)
286 286 goto nomem;
287 287
288 288 for (h = l.base; h != l.head; h++) {
289 289 m = Py_BuildValue("iiii", h->a1, h->a2, h->b1, h->b2);
290 290 PyList_SetItem(rl, pos, m);
291 291 pos++;
292 292 }
293 293
294 294 nomem:
295 295 free(a);
296 296 free(b);
297 297 free(l.base);
298 298 return rl ? rl : PyErr_NoMemory();
299 299 }
300 300
301 301 static PyObject *bdiff(PyObject *self, PyObject *args)
302 302 {
303 303 char *sa, *sb;
304 304 PyObject *result = NULL;
305 305 struct line *al, *bl;
306 306 struct hunklist l = {NULL, NULL};
307 307 struct hunk *h;
308 308 char encode[12], *rb;
309 309 int an, bn, len = 0, la, lb;
310 310
311 311 if (!PyArg_ParseTuple(args, "s#s#:bdiff", &sa, &la, &sb, &lb))
312 312 return NULL;
313 313
314 314 an = splitlines(sa, la, &al);
315 315 bn = splitlines(sb, lb, &bl);
316 316 if (!al || !bl)
317 317 goto nomem;
318 318
319 319 l = diff(al, an, bl, bn);
320 320 if (!l.head)
321 321 goto nomem;
322 322
323 323 /* calculate length of output */
324 324 la = lb = 0;
325 325 for (h = l.base; h != l.head; h++) {
326 326 if (h->a1 != la || h->b1 != lb)
327 327 len += 12 + bl[h->b1].l - bl[lb].l;
328 328 la = h->a2;
329 329 lb = h->b2;
330 330 }
331 331
332 332 result = PyString_FromStringAndSize(NULL, len);
333 333 if (!result)
334 334 goto nomem;
335 335
336 336 /* build binary patch */
337 337 rb = PyString_AsString(result);
338 338 la = lb = 0;
339 339
340 340 for (h = l.base; h != l.head; h++) {
341 341 if (h->a1 != la || h->b1 != lb) {
342 342 len = bl[h->b1].l - bl[lb].l;
343 343 *(uint32_t *)(encode) = htonl(al[la].l - al->l);
344 344 *(uint32_t *)(encode + 4) = htonl(al[h->a1].l - al->l);
345 345 *(uint32_t *)(encode + 8) = htonl(len);
346 346 memcpy(rb, encode, 12);
347 347 memcpy(rb + 12, bl[lb].l, len);
348 348 rb += 12 + len;
349 349 }
350 350 la = h->a2;
351 351 lb = h->b2;
352 352 }
353 353
354 354 nomem:
355 355 free(al);
356 356 free(bl);
357 357 free(l.base);
358 358 return result ? result : PyErr_NoMemory();
359 359 }
360 360
361 361 static char mdiff_doc[] = "Efficient binary diff.";
362 362
363 363 static PyMethodDef methods[] = {
364 364 {"bdiff", bdiff, METH_VARARGS, "calculate a binary diff\n"},
365 365 {"blocks", blocks, METH_VARARGS, "find a list of matching lines\n"},
366 366 {NULL, NULL}
367 367 };
368 368
369 369 PyMODINIT_FUNC initbdiff(void)
370 370 {
371 371 Py_InitModule3("bdiff", methods, mdiff_doc);
372 372 }
373 373
@@ -1,246 +1,251 b''
1 1 # hgweb/server.py - The standalone hg web server.
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, sys, errno, urllib, BaseHTTPServer, socket, SocketServer, traceback
10 10 from mercurial import ui, hg, util, templater
11 11 from hgweb_mod import hgweb
12 12 from hgwebdir_mod import hgwebdir
13 13 from request import wsgiapplication
14 14 from mercurial.i18n import gettext as _
15 15
16 16 def _splitURI(uri):
17 17 """ Return path and query splited from uri
18 18
19 19 Just like CGI environment, the path is unquoted, the query is
20 20 not.
21 21 """
22 22 if '?' in uri:
23 23 path, query = uri.split('?', 1)
24 24 else:
25 25 path, query = uri, ''
26 26 return urllib.unquote(path), query
27 27
28 28 class _error_logger(object):
29 29 def __init__(self, handler):
30 30 self.handler = handler
31 31 def flush(self):
32 32 pass
33 33 def write(self, str):
34 34 self.writelines(str.split('\n'))
35 35 def writelines(self, seq):
36 36 for msg in seq:
37 37 self.handler.log_error("HG error: %s", msg)
38 38
39 39 class _hgwebhandler(object, BaseHTTPServer.BaseHTTPRequestHandler):
40 40 def __init__(self, *args, **kargs):
41 41 self.protocol_version = 'HTTP/1.1'
42 42 BaseHTTPServer.BaseHTTPRequestHandler.__init__(self, *args, **kargs)
43 43
44 44 def log_error(self, format, *args):
45 45 errorlog = self.server.errorlog
46 46 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
47 47 self.log_date_time_string(),
48 48 format % args))
49 49
50 50 def log_message(self, format, *args):
51 51 accesslog = self.server.accesslog
52 52 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
53 53 self.log_date_time_string(),
54 54 format % args))
55 55
56 56 def do_POST(self):
57 57 try:
58 58 try:
59 59 self.do_hgweb()
60 60 except socket.error, inst:
61 61 if inst[0] != errno.EPIPE:
62 62 raise
63 63 except StandardError, inst:
64 64 self._start_response("500 Internal Server Error", [])
65 65 self._write("Internal Server Error")
66 66 tb = "".join(traceback.format_exception(*sys.exc_info()))
67 67 self.log_error("Exception happened during processing request '%s':\n%s",
68 68 self.path, tb)
69 69
70 70 def do_GET(self):
71 71 self.do_POST()
72 72
73 73 def do_hgweb(self):
74 74 path_info, query = _splitURI(self.path)
75 75
76 76 env = {}
77 77 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
78 78 env['REQUEST_METHOD'] = self.command
79 79 env['SERVER_NAME'] = self.server.server_name
80 80 env['SERVER_PORT'] = str(self.server.server_port)
81 81 env['REQUEST_URI'] = self.path
82 82 env['PATH_INFO'] = path_info
83 83 if query:
84 84 env['QUERY_STRING'] = query
85 85 host = self.address_string()
86 86 if host != self.client_address[0]:
87 87 env['REMOTE_HOST'] = host
88 88 env['REMOTE_ADDR'] = self.client_address[0]
89 89
90 90 if self.headers.typeheader is None:
91 91 env['CONTENT_TYPE'] = self.headers.type
92 92 else:
93 93 env['CONTENT_TYPE'] = self.headers.typeheader
94 94 length = self.headers.getheader('content-length')
95 95 if length:
96 96 env['CONTENT_LENGTH'] = length
97 97 for header in [h for h in self.headers.keys() \
98 98 if h not in ('content-type', 'content-length')]:
99 99 hkey = 'HTTP_' + header.replace('-', '_').upper()
100 100 hval = self.headers.getheader(header)
101 101 hval = hval.replace('\n', '').strip()
102 102 if hval:
103 103 env[hkey] = hval
104 104 env['SERVER_PROTOCOL'] = self.request_version
105 105 env['wsgi.version'] = (1, 0)
106 106 env['wsgi.url_scheme'] = 'http'
107 107 env['wsgi.input'] = self.rfile
108 108 env['wsgi.errors'] = _error_logger(self)
109 109 env['wsgi.multithread'] = isinstance(self.server,
110 110 SocketServer.ThreadingMixIn)
111 111 env['wsgi.multiprocess'] = isinstance(self.server,
112 112 SocketServer.ForkingMixIn)
113 113 env['wsgi.run_once'] = 0
114 114
115 115 self.close_connection = True
116 116 self.saved_status = None
117 117 self.saved_headers = []
118 118 self.sent_headers = False
119 119 self.length = None
120 120 req = self.server.reqmaker(env, self._start_response)
121 121 for data in req:
122 122 if data:
123 123 self._write(data)
124 124
125 125 def send_headers(self):
126 126 if not self.saved_status:
127 127 raise AssertionError("Sending headers before start_response() called")
128 128 saved_status = self.saved_status.split(None, 1)
129 129 saved_status[0] = int(saved_status[0])
130 130 self.send_response(*saved_status)
131 131 should_close = True
132 132 for h in self.saved_headers:
133 133 self.send_header(*h)
134 134 if h[0].lower() == 'content-length':
135 135 should_close = False
136 136 self.length = int(h[1])
137 137 # The value of the Connection header is a list of case-insensitive
138 138 # tokens separated by commas and optional whitespace.
139 139 if 'close' in [token.strip().lower() for token in
140 140 self.headers.get('connection', '').split(',')]:
141 141 should_close = True
142 142 if should_close:
143 143 self.send_header('Connection', 'close')
144 144 self.close_connection = should_close
145 145 self.end_headers()
146 146 self.sent_headers = True
147 147
148 148 def _start_response(self, http_status, headers, exc_info=None):
149 149 code, msg = http_status.split(None, 1)
150 150 code = int(code)
151 151 self.saved_status = http_status
152 152 bad_headers = ('connection', 'transfer-encoding')
153 153 self.saved_headers = [ h for h in headers \
154 154 if h[0].lower() not in bad_headers ]
155 155 return self._write
156 156
157 157 def _write(self, data):
158 158 if not self.saved_status:
159 159 raise AssertionError("data written before start_response() called")
160 160 elif not self.sent_headers:
161 161 self.send_headers()
162 162 if self.length is not None:
163 163 if len(data) > self.length:
164 164 raise AssertionError("Content-length header sent, but more bytes than specified are being written.")
165 165 self.length = self.length - len(data)
166 166 self.wfile.write(data)
167 167 self.wfile.flush()
168 168
169 169 def create_server(ui, repo):
170 170 use_threads = True
171 171
172 172 def openlog(opt, default):
173 173 if opt and opt != '-':
174 174 return open(opt, 'w')
175 175 return default
176 176
177 177 address = ui.config("web", "address", "")
178 178 port = int(ui.config("web", "port", 8000))
179 179 use_ipv6 = ui.configbool("web", "ipv6")
180 180 webdir_conf = ui.config("web", "webdir_conf")
181 181 accesslog = openlog(ui.config("web", "accesslog", "-"), sys.stdout)
182 182 errorlog = openlog(ui.config("web", "errorlog", "-"), sys.stderr)
183 183
184 184 if use_threads:
185 185 try:
186 186 from threading import activeCount
187 187 except ImportError:
188 188 use_threads = False
189 189
190 190 if use_threads:
191 191 _mixin = SocketServer.ThreadingMixIn
192 192 else:
193 193 if hasattr(os, "fork"):
194 194 _mixin = SocketServer.ForkingMixIn
195 195 else:
196 196 class _mixin:
197 197 pass
198 198
199 199 class MercurialHTTPServer(object, _mixin, BaseHTTPServer.HTTPServer):
200
201 # SO_REUSEADDR has broken semantics on windows
202 if os.name == 'nt':
203 allow_reuse_address = 0
204
200 205 def __init__(self, *args, **kargs):
201 206 BaseHTTPServer.HTTPServer.__init__(self, *args, **kargs)
202 207 self.accesslog = accesslog
203 208 self.errorlog = errorlog
204 209 self.repo = repo
205 210 self.webdir_conf = webdir_conf
206 211 self.webdirmaker = hgwebdir
207 212 self.repoviewmaker = hgweb
208 213 self.reqmaker = wsgiapplication(self.make_handler)
209 214 self.daemon_threads = True
210 215
211 216 addr, port = self.socket.getsockname()[:2]
212 217 if addr in ('0.0.0.0', '::'):
213 218 addr = socket.gethostname()
214 219 else:
215 220 try:
216 221 addr = socket.gethostbyaddr(addr)[0]
217 222 except socket.error:
218 223 pass
219 224 self.addr, self.port = addr, port
220 225
221 226 def make_handler(self):
222 227 if self.webdir_conf:
223 228 hgwebobj = self.webdirmaker(self.webdir_conf, ui)
224 229 elif self.repo is not None:
225 230 hgwebobj = self.repoviewmaker(hg.repository(repo.ui,
226 231 repo.root))
227 232 else:
228 233 raise hg.RepoError(_("There is no Mercurial repository here"
229 234 " (.hg not found)"))
230 235 return hgwebobj
231 236
232 237 class IPv6HTTPServer(MercurialHTTPServer):
233 238 address_family = getattr(socket, 'AF_INET6', None)
234 239
235 240 def __init__(self, *args, **kwargs):
236 241 if self.address_family is None:
237 242 raise hg.RepoError(_('IPv6 not available on this system'))
238 243 super(IPv6HTTPServer, self).__init__(*args, **kwargs)
239 244
240 245 try:
241 246 if use_ipv6:
242 247 return IPv6HTTPServer((address, port), _hgwebhandler)
243 248 else:
244 249 return MercurialHTTPServer((address, port), _hgwebhandler)
245 250 except socket.error, inst:
246 251 raise util.Abort(_('cannot start server: %s') % inst.args[1])
@@ -1,386 +1,393 b''
1 1 # httprepo.py - HTTP repository proxy classes for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 from node import *
10 10 from remoterepo import *
11 11 from i18n import _
12 12 import hg, os, urllib, urllib2, urlparse, zlib, util, httplib
13 13 import errno, keepalive, tempfile, socket, changegroup
14 14
15 15 class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm):
16 16 def __init__(self, ui):
17 17 urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self)
18 18 self.ui = ui
19 19
20 20 def find_user_password(self, realm, authuri):
21 21 authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password(
22 22 self, realm, authuri)
23 23 user, passwd = authinfo
24 24 if user and passwd:
25 25 return (user, passwd)
26 26
27 27 if not self.ui.interactive:
28 28 raise util.Abort(_('http authorization required'))
29 29
30 30 self.ui.write(_("http authorization required\n"))
31 31 self.ui.status(_("realm: %s\n") % realm)
32 32 if user:
33 33 self.ui.status(_("user: %s\n") % user)
34 34 else:
35 35 user = self.ui.prompt(_("user:"), default=None)
36 36
37 37 if not passwd:
38 38 passwd = self.ui.getpass()
39 39
40 40 self.add_password(realm, authuri, user, passwd)
41 41 return (user, passwd)
42 42
43 43 def netlocsplit(netloc):
44 44 '''split [user[:passwd]@]host[:port] into 4-tuple.'''
45 45
46 46 a = netloc.find('@')
47 47 if a == -1:
48 48 user, passwd = None, None
49 49 else:
50 50 userpass, netloc = netloc[:a], netloc[a+1:]
51 51 c = userpass.find(':')
52 52 if c == -1:
53 53 user, passwd = urllib.unquote(userpass), None
54 54 else:
55 55 user = urllib.unquote(userpass[:c])
56 56 passwd = urllib.unquote(userpass[c+1:])
57 57 c = netloc.find(':')
58 58 if c == -1:
59 59 host, port = netloc, None
60 60 else:
61 61 host, port = netloc[:c], netloc[c+1:]
62 62 return host, port, user, passwd
63 63
64 64 def netlocunsplit(host, port, user=None, passwd=None):
65 65 '''turn host, port, user, passwd into [user[:passwd]@]host[:port].'''
66 66 if port:
67 67 hostport = host + ':' + port
68 68 else:
69 69 hostport = host
70 70 if user:
71 71 if passwd:
72 72 userpass = urllib.quote(user) + ':' + urllib.quote(passwd)
73 73 else:
74 74 userpass = urllib.quote(user)
75 75 return userpass + '@' + hostport
76 76 return hostport
77 77
78 78 class httpsendfile(file):
79 79 def __len__(self):
80 80 return os.fstat(self.fileno()).st_size
81 81
82 82 def _gen_sendfile(connection):
83 83 def _sendfile(self, data):
84 84 # send a file
85 85 if isinstance(data, httpsendfile):
86 86 # if auth required, some data sent twice, so rewind here
87 87 data.seek(0)
88 88 for chunk in util.filechunkiter(data):
89 89 connection.send(self, chunk)
90 90 else:
91 91 connection.send(self, data)
92 92 return _sendfile
93 93
94 94 class httpconnection(keepalive.HTTPConnection):
95 95 # must be able to send big bundle as stream.
96 96 send = _gen_sendfile(keepalive.HTTPConnection)
97 97
98 98 class basehttphandler(keepalive.HTTPHandler):
99 99 def http_open(self, req):
100 100 return self.do_open(httpconnection, req)
101 101
102 102 has_https = hasattr(urllib2, 'HTTPSHandler')
103 103 if has_https:
104 104 class httpsconnection(httplib.HTTPSConnection):
105 105 response_class = keepalive.HTTPResponse
106 106 # must be able to send big bundle as stream.
107 107 send = _gen_sendfile(httplib.HTTPSConnection)
108 108
109 109 class httphandler(basehttphandler, urllib2.HTTPSHandler):
110 110 def https_open(self, req):
111 111 return self.do_open(httpsconnection, req)
112 112 else:
113 113 class httphandler(basehttphandler):
114 114 pass
115 115
116 116 def zgenerator(f):
117 117 zd = zlib.decompressobj()
118 118 try:
119 119 for chunk in util.filechunkiter(f):
120 120 yield zd.decompress(chunk)
121 121 except httplib.HTTPException, inst:
122 122 raise IOError(None, _('connection ended unexpectedly'))
123 123 yield zd.flush()
124 124
125 125 class httprepository(remoterepository):
126 126 def __init__(self, ui, path):
127 127 self.path = path
128 128 self.caps = None
129 self.handler = None
129 130 scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path)
130 131 if query or frag:
131 132 raise util.Abort(_('unsupported URL component: "%s"') %
132 133 (query or frag))
133 134 if not urlpath: urlpath = '/'
134 135 host, port, user, passwd = netlocsplit(netloc)
135 136
136 137 # urllib cannot handle URLs with embedded user or passwd
137 138 self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port),
138 139 urlpath, '', ''))
139 140 self.ui = ui
140 141
141 142 proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy')
142 143 # XXX proxyauthinfo = None
143 handlers = [httphandler()]
144 self.handler = httphandler()
145 handlers = [self.handler]
144 146
145 147 if proxyurl:
146 148 # proxy can be proper url or host[:port]
147 149 if not (proxyurl.startswith('http:') or
148 150 proxyurl.startswith('https:')):
149 151 proxyurl = 'http://' + proxyurl + '/'
150 152 snpqf = urlparse.urlsplit(proxyurl)
151 153 proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf
152 154 hpup = netlocsplit(proxynetloc)
153 155
154 156 proxyhost, proxyport, proxyuser, proxypasswd = hpup
155 157 if not proxyuser:
156 158 proxyuser = ui.config("http_proxy", "user")
157 159 proxypasswd = ui.config("http_proxy", "passwd")
158 160
159 161 # see if we should use a proxy for this url
160 162 no_list = [ "localhost", "127.0.0.1" ]
161 163 no_list.extend([p.lower() for
162 164 p in ui.configlist("http_proxy", "no")])
163 165 no_list.extend([p.strip().lower() for
164 166 p in os.getenv("no_proxy", '').split(',')
165 167 if p.strip()])
166 168 # "http_proxy.always" config is for running tests on localhost
167 169 if (not ui.configbool("http_proxy", "always") and
168 170 host.lower() in no_list):
169 171 ui.debug(_('disabling proxy for %s\n') % host)
170 172 else:
171 173 proxyurl = urlparse.urlunsplit((
172 174 proxyscheme, netlocunsplit(proxyhost, proxyport,
173 175 proxyuser, proxypasswd or ''),
174 176 proxypath, proxyquery, proxyfrag))
175 177 handlers.append(urllib2.ProxyHandler({scheme: proxyurl}))
176 178 ui.debug(_('proxying through http://%s:%s\n') %
177 179 (proxyhost, proxyport))
178 180
179 181 # urllib2 takes proxy values from the environment and those
180 182 # will take precedence if found, so drop them
181 183 for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]:
182 184 try:
183 185 if os.environ.has_key(env):
184 186 del os.environ[env]
185 187 except OSError:
186 188 pass
187 189
188 190 passmgr = passwordmgr(ui)
189 191 if user:
190 192 ui.debug(_('http auth: user %s, password %s\n') %
191 193 (user, passwd and '*' * len(passwd) or 'not set'))
192 194 passmgr.add_password(None, host, user, passwd or '')
193 195
194 196 handlers.extend((urllib2.HTTPBasicAuthHandler(passmgr),
195 197 urllib2.HTTPDigestAuthHandler(passmgr)))
196 198 opener = urllib2.build_opener(*handlers)
197 199
198 200 # 1.0 here is the _protocol_ version
199 201 opener.addheaders = [('User-agent', 'mercurial/proto-1.0')]
200 202 urllib2.install_opener(opener)
201 203
204 def __del__(self):
205 if self.handler:
206 self.handler.close_all()
207 self.handler = None
208
202 209 def url(self):
203 210 return self.path
204 211
205 212 # look up capabilities only when needed
206 213
207 214 def get_caps(self):
208 215 if self.caps is None:
209 216 try:
210 217 self.caps = self.do_read('capabilities').split()
211 218 except hg.RepoError:
212 219 self.caps = ()
213 220 self.ui.debug(_('capabilities: %s\n') %
214 221 (' '.join(self.caps or ['none'])))
215 222 return self.caps
216 223
217 224 capabilities = property(get_caps)
218 225
219 226 def lock(self):
220 227 raise util.Abort(_('operation not supported over http'))
221 228
222 229 def do_cmd(self, cmd, **args):
223 230 data = args.pop('data', None)
224 231 headers = args.pop('headers', {})
225 232 self.ui.debug(_("sending %s command\n") % cmd)
226 233 q = {"cmd": cmd}
227 234 q.update(args)
228 235 qs = '?%s' % urllib.urlencode(q)
229 236 cu = "%s%s" % (self._url, qs)
230 237 try:
231 238 if data:
232 239 self.ui.debug(_("sending %s bytes\n") %
233 240 headers.get('content-length', 'X'))
234 241 resp = urllib2.urlopen(urllib2.Request(cu, data, headers))
235 242 except urllib2.HTTPError, inst:
236 243 if inst.code == 401:
237 244 raise util.Abort(_('authorization failed'))
238 245 raise
239 246 except httplib.HTTPException, inst:
240 247 self.ui.debug(_('http error while sending %s command\n') % cmd)
241 248 self.ui.print_exc()
242 249 raise IOError(None, inst)
243 250 except IndexError:
244 251 # this only happens with Python 2.3, later versions raise URLError
245 252 raise util.Abort(_('http error, possibly caused by proxy setting'))
246 253 # record the url we got redirected to
247 254 resp_url = resp.geturl()
248 255 if resp_url.endswith(qs):
249 256 resp_url = resp_url[:-len(qs)]
250 257 if self._url != resp_url:
251 258 self.ui.status(_('real URL is %s\n') % resp_url)
252 259 self._url = resp_url
253 260 try:
254 261 proto = resp.getheader('content-type')
255 262 except AttributeError:
256 263 proto = resp.headers['content-type']
257 264
258 265 # accept old "text/plain" and "application/hg-changegroup" for now
259 266 if not proto.startswith('application/mercurial-') and \
260 267 not proto.startswith('text/plain') and \
261 268 not proto.startswith('application/hg-changegroup'):
262 269 raise hg.RepoError(_("'%s' does not appear to be an hg repository") %
263 270 self._url)
264 271
265 272 if proto.startswith('application/mercurial-'):
266 273 try:
267 274 version = float(proto[22:])
268 275 except ValueError:
269 276 raise hg.RepoError(_("'%s' sent a broken Content-type "
270 277 "header (%s)") % (self._url, proto))
271 278 if version > 0.1:
272 279 raise hg.RepoError(_("'%s' uses newer protocol %s") %
273 280 (self._url, version))
274 281
275 282 return resp
276 283
277 284 def do_read(self, cmd, **args):
278 285 fp = self.do_cmd(cmd, **args)
279 286 try:
280 287 return fp.read()
281 288 finally:
282 289 # if using keepalive, allow connection to be reused
283 290 fp.close()
284 291
285 292 def lookup(self, key):
286 293 d = self.do_cmd("lookup", key = key).read()
287 294 success, data = d[:-1].split(' ', 1)
288 295 if int(success):
289 296 return bin(data)
290 297 raise hg.RepoError(data)
291 298
292 299 def heads(self):
293 300 d = self.do_read("heads")
294 301 try:
295 302 return map(bin, d[:-1].split(" "))
296 303 except:
297 304 raise util.UnexpectedOutput(_("unexpected response:"), d)
298 305
299 306 def branches(self, nodes):
300 307 n = " ".join(map(hex, nodes))
301 308 d = self.do_read("branches", nodes=n)
302 309 try:
303 310 br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ]
304 311 return br
305 312 except:
306 313 raise util.UnexpectedOutput(_("unexpected response:"), d)
307 314
308 315 def between(self, pairs):
309 316 n = "\n".join(["-".join(map(hex, p)) for p in pairs])
310 317 d = self.do_read("between", pairs=n)
311 318 try:
312 319 p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ]
313 320 return p
314 321 except:
315 322 raise util.UnexpectedOutput(_("unexpected response:"), d)
316 323
317 324 def changegroup(self, nodes, kind):
318 325 n = " ".join(map(hex, nodes))
319 326 f = self.do_cmd("changegroup", roots=n)
320 327 return util.chunkbuffer(zgenerator(f))
321 328
322 329 def changegroupsubset(self, bases, heads, source):
323 330 baselst = " ".join([hex(n) for n in bases])
324 331 headlst = " ".join([hex(n) for n in heads])
325 332 f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst)
326 333 return util.chunkbuffer(zgenerator(f))
327 334
328 335 def unbundle(self, cg, heads, source):
329 336 # have to stream bundle to a temp file because we do not have
330 337 # http 1.1 chunked transfer.
331 338
332 339 type = ""
333 340 types = self.capable('unbundle')
334 341 # servers older than d1b16a746db6 will send 'unbundle' as a
335 342 # boolean capability
336 343 try:
337 344 types = types.split(',')
338 345 except AttributeError:
339 346 types = [""]
340 347 if types:
341 348 for x in types:
342 349 if x in changegroup.bundletypes:
343 350 type = x
344 351 break
345 352
346 353 tempname = changegroup.writebundle(cg, None, type)
347 354 fp = httpsendfile(tempname, "rb")
348 355 try:
349 356 try:
350 357 rfp = self.do_cmd(
351 358 'unbundle', data=fp,
352 359 headers={'content-type': 'application/octet-stream'},
353 360 heads=' '.join(map(hex, heads)))
354 361 try:
355 362 ret = int(rfp.readline())
356 363 self.ui.write(rfp.read())
357 364 return ret
358 365 finally:
359 366 rfp.close()
360 367 except socket.error, err:
361 368 if err[0] in (errno.ECONNRESET, errno.EPIPE):
362 369 raise util.Abort(_('push failed: %s') % err[1])
363 370 raise util.Abort(err[1])
364 371 finally:
365 372 fp.close()
366 373 os.unlink(tempname)
367 374
368 375 def stream_out(self):
369 376 return self.do_cmd('stream_out')
370 377
371 378 class httpsrepository(httprepository):
372 379 def __init__(self, ui, path):
373 380 if not has_https:
374 381 raise util.Abort(_('Python support for SSL and HTTPS '
375 382 'is not installed'))
376 383 httprepository.__init__(self, ui, path)
377 384
378 385 def instance(ui, path, create):
379 386 if create:
380 387 raise util.Abort(_('cannot create new http repository'))
381 388 if path.startswith('hg:'):
382 389 ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n"))
383 390 path = 'http:' + path[3:]
384 391 if path.startswith('https:'):
385 392 return httpsrepository(ui, path)
386 393 return httprepository(ui, path)
@@ -1,1920 +1,1922 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, appendfile, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.path = os.path.join(path, ".hg")
34 34 self.root = os.path.realpath(path)
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 os.mkdir(os.path.join(self.path, "store"))
45 45 requirements = ("revlogv1", "store")
46 46 reqfile = self.opener("requires", "w")
47 47 for r in requirements:
48 48 reqfile.write("%s\n" % r)
49 49 reqfile.close()
50 50 # create an invalid changelog
51 51 self.opener("00changelog.i", "a").write(
52 52 '\0\0\0\2' # represents revlogv2
53 53 ' dummy changelog to prevent using the old repo layout'
54 54 )
55 55 else:
56 56 raise repo.RepoError(_("repository %s not found") % path)
57 57 elif create:
58 58 raise repo.RepoError(_("repository %s already exists") % path)
59 59 else:
60 60 # find requirements
61 61 try:
62 62 requirements = self.opener("requires").read().splitlines()
63 63 except IOError, inst:
64 64 if inst.errno != errno.ENOENT:
65 65 raise
66 66 requirements = []
67 67 # check them
68 68 for r in requirements:
69 69 if r not in self.supported:
70 70 raise repo.RepoError(_("requirement '%s' not supported") % r)
71 71
72 72 # setup store
73 73 if "store" in requirements:
74 74 self.encodefn = util.encodefilename
75 75 self.decodefn = util.decodefilename
76 76 self.spath = os.path.join(self.path, "store")
77 77 else:
78 78 self.encodefn = lambda x: x
79 79 self.decodefn = lambda x: x
80 80 self.spath = self.path
81 81 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
82 82
83 83 self.ui = ui.ui(parentui=parentui)
84 84 try:
85 85 self.ui.readconfig(self.join("hgrc"), self.root)
86 86 except IOError:
87 87 pass
88 88
89 89 v = self.ui.configrevlog()
90 90 self.revlogversion = int(v.get('format', revlog.REVLOG_DEFAULT_FORMAT))
91 91 self.revlogv1 = self.revlogversion != revlog.REVLOGV0
92 92 fl = v.get('flags', None)
93 93 flags = 0
94 94 if fl != None:
95 95 for x in fl.split():
96 96 flags |= revlog.flagstr(x)
97 97 elif self.revlogv1:
98 98 flags = revlog.REVLOG_DEFAULT_FLAGS
99 99
100 100 v = self.revlogversion | flags
101 101 self.manifest = manifest.manifest(self.sopener, v)
102 102 self.changelog = changelog.changelog(self.sopener, v)
103 103
104 104 fallback = self.ui.config('ui', 'fallbackencoding')
105 105 if fallback:
106 106 util._fallbackencoding = fallback
107 107
108 108 # the changelog might not have the inline index flag
109 109 # on. If the format of the changelog is the same as found in
110 110 # .hgrc, apply any flags found in the .hgrc as well.
111 111 # Otherwise, just version from the changelog
112 112 v = self.changelog.version
113 113 if v == self.revlogversion:
114 114 v |= flags
115 115 self.revlogversion = v
116 116
117 117 self.tagscache = None
118 118 self.branchcache = None
119 119 self.nodetagscache = None
120 120 self.filterpats = {}
121 121 self.transhandle = None
122 122
123 123 self._link = lambda x: False
124 124 if util.checklink(self.root):
125 125 r = self.root # avoid circular reference in lambda
126 126 self._link = lambda x: util.is_link(os.path.join(r, x))
127 127
128 128 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
129 129
130 130 def url(self):
131 131 return 'file:' + self.root
132 132
133 133 def hook(self, name, throw=False, **args):
134 134 def callhook(hname, funcname):
135 135 '''call python hook. hook is callable object, looked up as
136 136 name in python module. if callable returns "true", hook
137 137 fails, else passes. if hook raises exception, treated as
138 138 hook failure. exception propagates if throw is "true".
139 139
140 140 reason for "true" meaning "hook failed" is so that
141 141 unmodified commands (e.g. mercurial.commands.update) can
142 142 be run as hooks without wrappers to convert return values.'''
143 143
144 144 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
145 145 obj = funcname
146 146 if not callable(obj):
147 147 d = funcname.rfind('.')
148 148 if d == -1:
149 149 raise util.Abort(_('%s hook is invalid ("%s" not in '
150 150 'a module)') % (hname, funcname))
151 151 modname = funcname[:d]
152 152 try:
153 153 obj = __import__(modname)
154 154 except ImportError:
155 155 try:
156 156 # extensions are loaded with hgext_ prefix
157 157 obj = __import__("hgext_%s" % modname)
158 158 except ImportError:
159 159 raise util.Abort(_('%s hook is invalid '
160 160 '(import of "%s" failed)') %
161 161 (hname, modname))
162 162 try:
163 163 for p in funcname.split('.')[1:]:
164 164 obj = getattr(obj, p)
165 165 except AttributeError, err:
166 166 raise util.Abort(_('%s hook is invalid '
167 167 '("%s" is not defined)') %
168 168 (hname, funcname))
169 169 if not callable(obj):
170 170 raise util.Abort(_('%s hook is invalid '
171 171 '("%s" is not callable)') %
172 172 (hname, funcname))
173 173 try:
174 174 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
175 175 except (KeyboardInterrupt, util.SignalInterrupt):
176 176 raise
177 177 except Exception, exc:
178 178 if isinstance(exc, util.Abort):
179 179 self.ui.warn(_('error: %s hook failed: %s\n') %
180 180 (hname, exc.args[0]))
181 181 else:
182 182 self.ui.warn(_('error: %s hook raised an exception: '
183 183 '%s\n') % (hname, exc))
184 184 if throw:
185 185 raise
186 186 self.ui.print_exc()
187 187 return True
188 188 if r:
189 189 if throw:
190 190 raise util.Abort(_('%s hook failed') % hname)
191 191 self.ui.warn(_('warning: %s hook failed\n') % hname)
192 192 return r
193 193
194 194 def runhook(name, cmd):
195 195 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
196 196 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
197 197 r = util.system(cmd, environ=env, cwd=self.root)
198 198 if r:
199 199 desc, r = util.explain_exit(r)
200 200 if throw:
201 201 raise util.Abort(_('%s hook %s') % (name, desc))
202 202 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
203 203 return r
204 204
205 205 r = False
206 206 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
207 207 if hname.split(".", 1)[0] == name and cmd]
208 208 hooks.sort()
209 209 for hname, cmd in hooks:
210 210 if callable(cmd):
211 211 r = callhook(hname, cmd) or r
212 212 elif cmd.startswith('python:'):
213 213 r = callhook(hname, cmd[7:].strip()) or r
214 214 else:
215 215 r = runhook(hname, cmd) or r
216 216 return r
217 217
218 218 tag_disallowed = ':\r\n'
219 219
220 220 def _tag(self, name, node, message, local, user, date, parent=None):
221 221 use_dirstate = parent is None
222 222
223 223 for c in self.tag_disallowed:
224 224 if c in name:
225 225 raise util.Abort(_('%r cannot be used in a tag name') % c)
226 226
227 227 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
228 228
229 229 if local:
230 230 # local tags are stored in the current charset
231 231 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
232 232 self.hook('tag', node=hex(node), tag=name, local=local)
233 233 return
234 234
235 235 # committed tags are stored in UTF-8
236 236 line = '%s %s\n' % (hex(node), util.fromlocal(name))
237 237 if use_dirstate:
238 238 self.wfile('.hgtags', 'ab').write(line)
239 239 else:
240 240 ntags = self.filectx('.hgtags', parent).data()
241 241 self.wfile('.hgtags', 'ab').write(ntags + line)
242 242 if use_dirstate and self.dirstate.state('.hgtags') == '?':
243 243 self.add(['.hgtags'])
244 244
245 245 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
246 246
247 247 self.hook('tag', node=hex(node), tag=name, local=local)
248 248
249 249 return tagnode
250 250
251 251 def tag(self, name, node, message, local, user, date):
252 252 '''tag a revision with a symbolic name.
253 253
254 254 if local is True, the tag is stored in a per-repository file.
255 255 otherwise, it is stored in the .hgtags file, and a new
256 256 changeset is committed with the change.
257 257
258 258 keyword arguments:
259 259
260 260 local: whether to store tag in non-version-controlled file
261 261 (default False)
262 262
263 263 message: commit message to use if committing
264 264
265 265 user: name of user to use if committing
266 266
267 267 date: date tuple to use if committing'''
268 268
269 269 for x in self.status()[:5]:
270 270 if '.hgtags' in x:
271 271 raise util.Abort(_('working copy of .hgtags is changed '
272 272 '(please commit .hgtags manually)'))
273 273
274 274
275 275 self._tag(name, node, message, local, user, date)
276 276
277 277 def tags(self):
278 278 '''return a mapping of tag to node'''
279 279 if not self.tagscache:
280 280 self.tagscache = {}
281 281
282 282 def parsetag(line, context):
283 283 if not line:
284 284 return
285 285 s = l.split(" ", 1)
286 286 if len(s) != 2:
287 287 self.ui.warn(_("%s: cannot parse entry\n") % context)
288 288 return
289 289 node, key = s
290 290 key = util.tolocal(key.strip()) # stored in UTF-8
291 291 try:
292 292 bin_n = bin(node)
293 293 except TypeError:
294 294 self.ui.warn(_("%s: node '%s' is not well formed\n") %
295 295 (context, node))
296 296 return
297 297 if bin_n not in self.changelog.nodemap:
298 298 self.ui.warn(_("%s: tag '%s' refers to unknown node\n") %
299 299 (context, key))
300 300 return
301 301 self.tagscache[key] = bin_n
302 302
303 303 # read the tags file from each head, ending with the tip,
304 304 # and add each tag found to the map, with "newer" ones
305 305 # taking precedence
306 306 f = None
307 307 for rev, node, fnode in self._hgtagsnodes():
308 308 f = (f and f.filectx(fnode) or
309 309 self.filectx('.hgtags', fileid=fnode))
310 310 count = 0
311 311 for l in f.data().splitlines():
312 312 count += 1
313 313 parsetag(l, _("%s, line %d") % (str(f), count))
314 314
315 315 try:
316 316 f = self.opener("localtags")
317 317 count = 0
318 318 for l in f:
319 319 # localtags are stored in the local character set
320 320 # while the internal tag table is stored in UTF-8
321 321 l = util.fromlocal(l)
322 322 count += 1
323 323 parsetag(l, _("localtags, line %d") % count)
324 324 except IOError:
325 325 pass
326 326
327 327 self.tagscache['tip'] = self.changelog.tip()
328 328
329 329 return self.tagscache
330 330
331 331 def _hgtagsnodes(self):
332 332 heads = self.heads()
333 333 heads.reverse()
334 334 last = {}
335 335 ret = []
336 336 for node in heads:
337 337 c = self.changectx(node)
338 338 rev = c.rev()
339 339 try:
340 340 fnode = c.filenode('.hgtags')
341 341 except revlog.LookupError:
342 342 continue
343 343 ret.append((rev, node, fnode))
344 344 if fnode in last:
345 345 ret[last[fnode]] = None
346 346 last[fnode] = len(ret) - 1
347 347 return [item for item in ret if item]
348 348
349 349 def tagslist(self):
350 350 '''return a list of tags ordered by revision'''
351 351 l = []
352 352 for t, n in self.tags().items():
353 353 try:
354 354 r = self.changelog.rev(n)
355 355 except:
356 356 r = -2 # sort to the beginning of the list if unknown
357 357 l.append((r, t, n))
358 358 l.sort()
359 359 return [(t, n) for r, t, n in l]
360 360
361 361 def nodetags(self, node):
362 362 '''return the tags associated with a node'''
363 363 if not self.nodetagscache:
364 364 self.nodetagscache = {}
365 365 for t, n in self.tags().items():
366 366 self.nodetagscache.setdefault(n, []).append(t)
367 367 return self.nodetagscache.get(node, [])
368 368
369 369 def _branchtags(self):
370 370 partial, last, lrev = self._readbranchcache()
371 371
372 372 tiprev = self.changelog.count() - 1
373 373 if lrev != tiprev:
374 374 self._updatebranchcache(partial, lrev+1, tiprev+1)
375 375 self._writebranchcache(partial, self.changelog.tip(), tiprev)
376 376
377 377 return partial
378 378
379 379 def branchtags(self):
380 380 if self.branchcache is not None:
381 381 return self.branchcache
382 382
383 383 self.branchcache = {} # avoid recursion in changectx
384 384 partial = self._branchtags()
385 385
386 386 # the branch cache is stored on disk as UTF-8, but in the local
387 387 # charset internally
388 388 for k, v in partial.items():
389 389 self.branchcache[util.tolocal(k)] = v
390 390 return self.branchcache
391 391
392 392 def _readbranchcache(self):
393 393 partial = {}
394 394 try:
395 395 f = self.opener("branches.cache")
396 396 lines = f.read().split('\n')
397 397 f.close()
398 398 last, lrev = lines.pop(0).rstrip().split(" ", 1)
399 399 last, lrev = bin(last), int(lrev)
400 400 if not (lrev < self.changelog.count() and
401 401 self.changelog.node(lrev) == last): # sanity check
402 402 # invalidate the cache
403 403 raise ValueError('Invalid branch cache: unknown tip')
404 404 for l in lines:
405 405 if not l: continue
406 406 node, label = l.rstrip().split(" ", 1)
407 407 partial[label] = bin(node)
408 408 except (KeyboardInterrupt, util.SignalInterrupt):
409 409 raise
410 410 except Exception, inst:
411 411 if self.ui.debugflag:
412 412 self.ui.warn(str(inst), '\n')
413 413 partial, last, lrev = {}, nullid, nullrev
414 414 return partial, last, lrev
415 415
416 416 def _writebranchcache(self, branches, tip, tiprev):
417 417 try:
418 418 f = self.opener("branches.cache", "w")
419 419 f.write("%s %s\n" % (hex(tip), tiprev))
420 420 for label, node in branches.iteritems():
421 421 f.write("%s %s\n" % (hex(node), label))
422 422 except IOError:
423 423 pass
424 424
425 425 def _updatebranchcache(self, partial, start, end):
426 426 for r in xrange(start, end):
427 427 c = self.changectx(r)
428 428 b = c.branch()
429 429 if b:
430 430 partial[b] = c.node()
431 431
432 432 def lookup(self, key):
433 433 if key == '.':
434 434 key = self.dirstate.parents()[0]
435 435 if key == nullid:
436 436 raise repo.RepoError(_("no revision checked out"))
437 437 elif key == 'null':
438 438 return nullid
439 439 n = self.changelog._match(key)
440 440 if n:
441 441 return n
442 442 if key in self.tags():
443 443 return self.tags()[key]
444 444 if key in self.branchtags():
445 445 return self.branchtags()[key]
446 446 n = self.changelog._partialmatch(key)
447 447 if n:
448 448 return n
449 449 raise repo.RepoError(_("unknown revision '%s'") % key)
450 450
451 451 def dev(self):
452 452 return os.lstat(self.path).st_dev
453 453
454 454 def local(self):
455 455 return True
456 456
457 457 def join(self, f):
458 458 return os.path.join(self.path, f)
459 459
460 460 def sjoin(self, f):
461 461 f = self.encodefn(f)
462 462 return os.path.join(self.spath, f)
463 463
464 464 def wjoin(self, f):
465 465 return os.path.join(self.root, f)
466 466
467 467 def file(self, f):
468 468 if f[0] == '/':
469 469 f = f[1:]
470 470 return filelog.filelog(self.sopener, f, self.revlogversion)
471 471
472 472 def changectx(self, changeid=None):
473 473 return context.changectx(self, changeid)
474 474
475 475 def workingctx(self):
476 476 return context.workingctx(self)
477 477
478 478 def parents(self, changeid=None):
479 479 '''
480 480 get list of changectxs for parents of changeid or working directory
481 481 '''
482 482 if changeid is None:
483 483 pl = self.dirstate.parents()
484 484 else:
485 485 n = self.changelog.lookup(changeid)
486 486 pl = self.changelog.parents(n)
487 487 if pl[1] == nullid:
488 488 return [self.changectx(pl[0])]
489 489 return [self.changectx(pl[0]), self.changectx(pl[1])]
490 490
491 491 def filectx(self, path, changeid=None, fileid=None):
492 492 """changeid can be a changeset revision, node, or tag.
493 493 fileid can be a file revision or node."""
494 494 return context.filectx(self, path, changeid, fileid)
495 495
496 496 def getcwd(self):
497 497 return self.dirstate.getcwd()
498 498
499 499 def wfile(self, f, mode='r'):
500 500 return self.wopener(f, mode)
501 501
502 502 def _filter(self, filter, filename, data):
503 503 if filter not in self.filterpats:
504 504 l = []
505 505 for pat, cmd in self.ui.configitems(filter):
506 506 mf = util.matcher(self.root, "", [pat], [], [])[1]
507 507 l.append((mf, cmd))
508 508 self.filterpats[filter] = l
509 509
510 510 for mf, cmd in self.filterpats[filter]:
511 511 if mf(filename):
512 512 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
513 513 data = util.filter(data, cmd)
514 514 break
515 515
516 516 return data
517 517
518 518 def wread(self, filename):
519 519 if self._link(filename):
520 520 data = os.readlink(self.wjoin(filename))
521 521 else:
522 522 data = self.wopener(filename, 'r').read()
523 523 return self._filter("encode", filename, data)
524 524
525 525 def wwrite(self, filename, data, flags):
526 526 data = self._filter("decode", filename, data)
527 527 if "l" in flags:
528 528 try:
529 529 os.unlink(self.wjoin(filename))
530 530 except OSError:
531 531 pass
532 532 os.symlink(data, self.wjoin(filename))
533 533 else:
534 534 try:
535 535 if self._link(filename):
536 536 os.unlink(self.wjoin(filename))
537 537 except OSError:
538 538 pass
539 539 self.wopener(filename, 'w').write(data)
540 540 util.set_exec(self.wjoin(filename), "x" in flags)
541 541
542 542 def wwritedata(self, filename, data):
543 543 return self._filter("decode", filename, data)
544 544
545 545 def transaction(self):
546 546 tr = self.transhandle
547 547 if tr != None and tr.running():
548 548 return tr.nest()
549 549
550 550 # save dirstate for rollback
551 551 try:
552 552 ds = self.opener("dirstate").read()
553 553 except IOError:
554 554 ds = ""
555 555 self.opener("journal.dirstate", "w").write(ds)
556 556
557 557 renames = [(self.sjoin("journal"), self.sjoin("undo")),
558 558 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
559 559 tr = transaction.transaction(self.ui.warn, self.sopener,
560 560 self.sjoin("journal"),
561 561 aftertrans(renames))
562 562 self.transhandle = tr
563 563 return tr
564 564
565 565 def recover(self):
566 566 l = self.lock()
567 567 if os.path.exists(self.sjoin("journal")):
568 568 self.ui.status(_("rolling back interrupted transaction\n"))
569 569 transaction.rollback(self.sopener, self.sjoin("journal"))
570 570 self.reload()
571 571 return True
572 572 else:
573 573 self.ui.warn(_("no interrupted transaction available\n"))
574 574 return False
575 575
576 576 def rollback(self, wlock=None):
577 577 if not wlock:
578 578 wlock = self.wlock()
579 579 l = self.lock()
580 580 if os.path.exists(self.sjoin("undo")):
581 581 self.ui.status(_("rolling back last transaction\n"))
582 582 transaction.rollback(self.sopener, self.sjoin("undo"))
583 583 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
584 584 self.reload()
585 585 self.wreload()
586 586 else:
587 587 self.ui.warn(_("no rollback information available\n"))
588 588
589 589 def wreload(self):
590 590 self.dirstate.read()
591 591
592 592 def reload(self):
593 593 self.changelog.load()
594 594 self.manifest.load()
595 595 self.tagscache = None
596 596 self.nodetagscache = None
597 597
598 598 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
599 599 desc=None):
600 600 try:
601 601 l = lock.lock(lockname, 0, releasefn, desc=desc)
602 602 except lock.LockHeld, inst:
603 603 if not wait:
604 604 raise
605 605 self.ui.warn(_("waiting for lock on %s held by %r\n") %
606 606 (desc, inst.locker))
607 607 # default to 600 seconds timeout
608 608 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
609 609 releasefn, desc=desc)
610 610 if acquirefn:
611 611 acquirefn()
612 612 return l
613 613
614 614 def lock(self, wait=1):
615 615 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
616 616 desc=_('repository %s') % self.origroot)
617 617
618 618 def wlock(self, wait=1):
619 619 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
620 620 self.wreload,
621 621 desc=_('working directory of %s') % self.origroot)
622 622
623 623 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
624 624 """
625 625 commit an individual file as part of a larger transaction
626 626 """
627 627
628 628 t = self.wread(fn)
629 629 fl = self.file(fn)
630 630 fp1 = manifest1.get(fn, nullid)
631 631 fp2 = manifest2.get(fn, nullid)
632 632
633 633 meta = {}
634 634 cp = self.dirstate.copied(fn)
635 635 if cp:
636 636 # Mark the new revision of this file as a copy of another
637 637 # file. This copy data will effectively act as a parent
638 638 # of this new revision. If this is a merge, the first
639 639 # parent will be the nullid (meaning "look up the copy data")
640 640 # and the second one will be the other parent. For example:
641 641 #
642 642 # 0 --- 1 --- 3 rev1 changes file foo
643 643 # \ / rev2 renames foo to bar and changes it
644 644 # \- 2 -/ rev3 should have bar with all changes and
645 645 # should record that bar descends from
646 646 # bar in rev2 and foo in rev1
647 647 #
648 648 # this allows this merge to succeed:
649 649 #
650 650 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
651 651 # \ / merging rev3 and rev4 should use bar@rev2
652 652 # \- 2 --- 4 as the merge base
653 653 #
654 654 meta["copy"] = cp
655 655 if not manifest2: # not a branch merge
656 656 meta["copyrev"] = hex(manifest1.get(cp, nullid))
657 657 fp2 = nullid
658 658 elif fp2 != nullid: # copied on remote side
659 659 meta["copyrev"] = hex(manifest1.get(cp, nullid))
660 660 elif fp1 != nullid: # copied on local side, reversed
661 661 meta["copyrev"] = hex(manifest2.get(cp))
662 662 fp2 = fp1
663 663 else: # directory rename
664 664 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 665 self.ui.debug(_(" %s: copy %s:%s\n") %
666 666 (fn, cp, meta["copyrev"]))
667 667 fp1 = nullid
668 668 elif fp2 != nullid:
669 669 # is one parent an ancestor of the other?
670 670 fpa = fl.ancestor(fp1, fp2)
671 671 if fpa == fp1:
672 672 fp1, fp2 = fp2, nullid
673 673 elif fpa == fp2:
674 674 fp2 = nullid
675 675
676 676 # is the file unmodified from the parent? report existing entry
677 677 if fp2 == nullid and not fl.cmp(fp1, t):
678 678 return fp1
679 679
680 680 changelist.append(fn)
681 681 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
682 682
683 683 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
684 684 if p1 is None:
685 685 p1, p2 = self.dirstate.parents()
686 686 return self.commit(files=files, text=text, user=user, date=date,
687 687 p1=p1, p2=p2, wlock=wlock, extra=extra)
688 688
689 689 def commit(self, files=None, text="", user=None, date=None,
690 690 match=util.always, force=False, lock=None, wlock=None,
691 691 force_editor=False, p1=None, p2=None, extra={}):
692 692
693 693 commit = []
694 694 remove = []
695 695 changed = []
696 696 use_dirstate = (p1 is None) # not rawcommit
697 697 extra = extra.copy()
698 698
699 699 if use_dirstate:
700 700 if files:
701 701 for f in files:
702 702 s = self.dirstate.state(f)
703 703 if s in 'nmai':
704 704 commit.append(f)
705 705 elif s == 'r':
706 706 remove.append(f)
707 707 else:
708 708 self.ui.warn(_("%s not tracked!\n") % f)
709 709 else:
710 710 changes = self.status(match=match)[:5]
711 711 modified, added, removed, deleted, unknown = changes
712 712 commit = modified + added
713 713 remove = removed
714 714 else:
715 715 commit = files
716 716
717 717 if use_dirstate:
718 718 p1, p2 = self.dirstate.parents()
719 719 update_dirstate = True
720 720 else:
721 721 p1, p2 = p1, p2 or nullid
722 722 update_dirstate = (self.dirstate.parents()[0] == p1)
723 723
724 724 c1 = self.changelog.read(p1)
725 725 c2 = self.changelog.read(p2)
726 726 m1 = self.manifest.read(c1[0]).copy()
727 727 m2 = self.manifest.read(c2[0])
728 728
729 729 if use_dirstate:
730 730 branchname = self.workingctx().branch()
731 731 try:
732 732 branchname = branchname.decode('UTF-8').encode('UTF-8')
733 733 except UnicodeDecodeError:
734 734 raise util.Abort(_('branch name not in UTF-8!'))
735 735 else:
736 736 branchname = ""
737 737
738 738 if use_dirstate:
739 739 oldname = c1[5].get("branch", "") # stored in UTF-8
740 740 if not commit and not remove and not force and p2 == nullid and \
741 741 branchname == oldname:
742 742 self.ui.status(_("nothing changed\n"))
743 743 return None
744 744
745 745 xp1 = hex(p1)
746 746 if p2 == nullid: xp2 = ''
747 747 else: xp2 = hex(p2)
748 748
749 749 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
750 750
751 751 if not wlock:
752 752 wlock = self.wlock()
753 753 if not lock:
754 754 lock = self.lock()
755 755 tr = self.transaction()
756 756
757 757 # check in files
758 758 new = {}
759 759 linkrev = self.changelog.count()
760 760 commit.sort()
761 761 is_exec = util.execfunc(self.root, m1.execf)
762 762 is_link = util.linkfunc(self.root, m1.linkf)
763 763 for f in commit:
764 764 self.ui.note(f + "\n")
765 765 try:
766 766 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
767 767 m1.set(f, is_exec(f), is_link(f))
768 768 except (OSError, IOError):
769 769 if use_dirstate:
770 770 self.ui.warn(_("trouble committing %s!\n") % f)
771 771 raise
772 772 else:
773 773 remove.append(f)
774 774
775 775 # update manifest
776 776 m1.update(new)
777 777 remove.sort()
778 778 removed = []
779 779
780 780 for f in remove:
781 781 if f in m1:
782 782 del m1[f]
783 783 removed.append(f)
784 784 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
785 785
786 786 # add changeset
787 787 new = new.keys()
788 788 new.sort()
789 789
790 790 user = user or self.ui.username()
791 791 if not text or force_editor:
792 792 edittext = []
793 793 if text:
794 794 edittext.append(text)
795 795 edittext.append("")
796 796 edittext.append("HG: user: %s" % user)
797 797 if p2 != nullid:
798 798 edittext.append("HG: branch merge")
799 799 if branchname:
800 800 edittext.append("HG: branch %s" % util.tolocal(branchname))
801 801 edittext.extend(["HG: changed %s" % f for f in changed])
802 802 edittext.extend(["HG: removed %s" % f for f in removed])
803 803 if not changed and not remove:
804 804 edittext.append("HG: no files changed")
805 805 edittext.append("")
806 806 # run editor in the repository root
807 807 olddir = os.getcwd()
808 808 os.chdir(self.root)
809 809 text = self.ui.edit("\n".join(edittext), user)
810 810 os.chdir(olddir)
811 811
812 812 lines = [line.rstrip() for line in text.rstrip().splitlines()]
813 813 while lines and not lines[0]:
814 814 del lines[0]
815 815 if not lines:
816 816 return None
817 817 text = '\n'.join(lines)
818 818 if branchname:
819 819 extra["branch"] = branchname
820 820 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
821 821 user, date, extra)
822 822 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
823 823 parent2=xp2)
824 824 tr.close()
825 825
826 826 if self.branchcache and "branch" in extra:
827 827 self.branchcache[util.tolocal(extra["branch"])] = n
828 828
829 829 if use_dirstate or update_dirstate:
830 830 self.dirstate.setparents(n)
831 831 if use_dirstate:
832 832 self.dirstate.update(new, "n")
833 833 self.dirstate.forget(removed)
834 834
835 835 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
836 836 return n
837 837
838 838 def walk(self, node=None, files=[], match=util.always, badmatch=None):
839 839 '''
840 840 walk recursively through the directory tree or a given
841 841 changeset, finding all files matched by the match
842 842 function
843 843
844 844 results are yielded in a tuple (src, filename), where src
845 845 is one of:
846 846 'f' the file was found in the directory tree
847 847 'm' the file was only in the dirstate and not in the tree
848 848 'b' file was not found and matched badmatch
849 849 '''
850 850
851 851 if node:
852 852 fdict = dict.fromkeys(files)
853 853 for fn in self.manifest.read(self.changelog.read(node)[0]):
854 854 for ffn in fdict:
855 855 # match if the file is the exact name or a directory
856 856 if ffn == fn or fn.startswith("%s/" % ffn):
857 857 del fdict[ffn]
858 858 break
859 859 if match(fn):
860 860 yield 'm', fn
861 861 for fn in fdict:
862 862 if badmatch and badmatch(fn):
863 863 if match(fn):
864 864 yield 'b', fn
865 865 else:
866 866 self.ui.warn(_('%s: No such file in rev %s\n') % (
867 867 util.pathto(self.getcwd(), fn), short(node)))
868 868 else:
869 869 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
870 870 yield src, fn
871 871
872 872 def status(self, node1=None, node2=None, files=[], match=util.always,
873 873 wlock=None, list_ignored=False, list_clean=False):
874 874 """return status of files between two nodes or node and working directory
875 875
876 876 If node1 is None, use the first dirstate parent instead.
877 877 If node2 is None, compare node1 with working directory.
878 878 """
879 879
880 880 def fcmp(fn, mf):
881 881 t1 = self.wread(fn)
882 882 return self.file(fn).cmp(mf.get(fn, nullid), t1)
883 883
884 884 def mfmatches(node):
885 885 change = self.changelog.read(node)
886 886 mf = self.manifest.read(change[0]).copy()
887 887 for fn in mf.keys():
888 888 if not match(fn):
889 889 del mf[fn]
890 890 return mf
891 891
892 892 modified, added, removed, deleted, unknown = [], [], [], [], []
893 893 ignored, clean = [], []
894 894
895 895 compareworking = False
896 896 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
897 897 compareworking = True
898 898
899 899 if not compareworking:
900 900 # read the manifest from node1 before the manifest from node2,
901 901 # so that we'll hit the manifest cache if we're going through
902 902 # all the revisions in parent->child order.
903 903 mf1 = mfmatches(node1)
904 904
905 905 # are we comparing the working directory?
906 906 if not node2:
907 907 if not wlock:
908 908 try:
909 909 wlock = self.wlock(wait=0)
910 910 except lock.LockException:
911 911 wlock = None
912 912 (lookup, modified, added, removed, deleted, unknown,
913 913 ignored, clean) = self.dirstate.status(files, match,
914 914 list_ignored, list_clean)
915 915
916 916 # are we comparing working dir against its parent?
917 917 if compareworking:
918 918 if lookup:
919 919 # do a full compare of any files that might have changed
920 920 mf2 = mfmatches(self.dirstate.parents()[0])
921 921 for f in lookup:
922 922 if fcmp(f, mf2):
923 923 modified.append(f)
924 924 else:
925 925 clean.append(f)
926 926 if wlock is not None:
927 927 self.dirstate.update([f], "n")
928 928 else:
929 929 # we are comparing working dir against non-parent
930 930 # generate a pseudo-manifest for the working dir
931 931 # XXX: create it in dirstate.py ?
932 932 mf2 = mfmatches(self.dirstate.parents()[0])
933 933 is_exec = util.execfunc(self.root, mf2.execf)
934 934 is_link = util.linkfunc(self.root, mf2.linkf)
935 935 for f in lookup + modified + added:
936 936 mf2[f] = ""
937 937 mf2.set(f, is_exec(f), is_link(f))
938 938 for f in removed:
939 939 if f in mf2:
940 940 del mf2[f]
941 941 else:
942 942 # we are comparing two revisions
943 943 mf2 = mfmatches(node2)
944 944
945 945 if not compareworking:
946 946 # flush lists from dirstate before comparing manifests
947 947 modified, added, clean = [], [], []
948 948
949 949 # make sure to sort the files so we talk to the disk in a
950 950 # reasonable order
951 951 mf2keys = mf2.keys()
952 952 mf2keys.sort()
953 953 for fn in mf2keys:
954 954 if mf1.has_key(fn):
955 955 if mf1.flags(fn) != mf2.flags(fn) or \
956 956 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1))):
957 957 modified.append(fn)
958 958 elif list_clean:
959 959 clean.append(fn)
960 960 del mf1[fn]
961 961 else:
962 962 added.append(fn)
963 963
964 964 removed = mf1.keys()
965 965
966 966 # sort and return results:
967 967 for l in modified, added, removed, deleted, unknown, ignored, clean:
968 968 l.sort()
969 969 return (modified, added, removed, deleted, unknown, ignored, clean)
970 970
971 971 def add(self, list, wlock=None):
972 972 if not wlock:
973 973 wlock = self.wlock()
974 974 for f in list:
975 975 p = self.wjoin(f)
976 976 islink = os.path.islink(p)
977 977 if not islink and not os.path.exists(p):
978 978 self.ui.warn(_("%s does not exist!\n") % f)
979 979 elif not islink and not os.path.isfile(p):
980 980 self.ui.warn(_("%s not added: only files and symlinks "
981 981 "supported currently\n") % f)
982 982 elif self.dirstate.state(f) in 'an':
983 983 self.ui.warn(_("%s already tracked!\n") % f)
984 984 else:
985 985 self.dirstate.update([f], "a")
986 986
987 987 def forget(self, list, wlock=None):
988 988 if not wlock:
989 989 wlock = self.wlock()
990 990 for f in list:
991 991 if self.dirstate.state(f) not in 'ai':
992 992 self.ui.warn(_("%s not added!\n") % f)
993 993 else:
994 994 self.dirstate.forget([f])
995 995
996 996 def remove(self, list, unlink=False, wlock=None):
997 997 if unlink:
998 998 for f in list:
999 999 try:
1000 1000 util.unlink(self.wjoin(f))
1001 1001 except OSError, inst:
1002 1002 if inst.errno != errno.ENOENT:
1003 1003 raise
1004 1004 if not wlock:
1005 1005 wlock = self.wlock()
1006 1006 for f in list:
1007 1007 p = self.wjoin(f)
1008 1008 if os.path.exists(p):
1009 1009 self.ui.warn(_("%s still exists!\n") % f)
1010 1010 elif self.dirstate.state(f) == 'a':
1011 1011 self.dirstate.forget([f])
1012 1012 elif f not in self.dirstate:
1013 1013 self.ui.warn(_("%s not tracked!\n") % f)
1014 1014 else:
1015 1015 self.dirstate.update([f], "r")
1016 1016
1017 1017 def undelete(self, list, wlock=None):
1018 1018 p = self.dirstate.parents()[0]
1019 1019 mn = self.changelog.read(p)[0]
1020 1020 m = self.manifest.read(mn)
1021 1021 if not wlock:
1022 1022 wlock = self.wlock()
1023 1023 for f in list:
1024 1024 if self.dirstate.state(f) not in "r":
1025 1025 self.ui.warn("%s not removed!\n" % f)
1026 1026 else:
1027 1027 t = self.file(f).read(m[f])
1028 1028 self.wwrite(f, t, m.flags(f))
1029 1029 self.dirstate.update([f], "n")
1030 1030
1031 1031 def copy(self, source, dest, wlock=None):
1032 1032 p = self.wjoin(dest)
1033 1033 if not os.path.exists(p):
1034 1034 self.ui.warn(_("%s does not exist!\n") % dest)
1035 1035 elif not os.path.isfile(p):
1036 1036 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
1037 1037 else:
1038 1038 if not wlock:
1039 1039 wlock = self.wlock()
1040 1040 if self.dirstate.state(dest) == '?':
1041 1041 self.dirstate.update([dest], "a")
1042 1042 self.dirstate.copy(source, dest)
1043 1043
1044 1044 def heads(self, start=None):
1045 1045 heads = self.changelog.heads(start)
1046 1046 # sort the output in rev descending order
1047 1047 heads = [(-self.changelog.rev(h), h) for h in heads]
1048 1048 heads.sort()
1049 1049 return [n for (r, n) in heads]
1050 1050
1051 1051 def branches(self, nodes):
1052 1052 if not nodes:
1053 1053 nodes = [self.changelog.tip()]
1054 1054 b = []
1055 1055 for n in nodes:
1056 1056 t = n
1057 1057 while 1:
1058 1058 p = self.changelog.parents(n)
1059 1059 if p[1] != nullid or p[0] == nullid:
1060 1060 b.append((t, n, p[0], p[1]))
1061 1061 break
1062 1062 n = p[0]
1063 1063 return b
1064 1064
1065 1065 def between(self, pairs):
1066 1066 r = []
1067 1067
1068 1068 for top, bottom in pairs:
1069 1069 n, l, i = top, [], 0
1070 1070 f = 1
1071 1071
1072 1072 while n != bottom:
1073 1073 p = self.changelog.parents(n)[0]
1074 1074 if i == f:
1075 1075 l.append(n)
1076 1076 f = f * 2
1077 1077 n = p
1078 1078 i += 1
1079 1079
1080 1080 r.append(l)
1081 1081
1082 1082 return r
1083 1083
1084 1084 def findincoming(self, remote, base=None, heads=None, force=False):
1085 1085 """Return list of roots of the subsets of missing nodes from remote
1086 1086
1087 1087 If base dict is specified, assume that these nodes and their parents
1088 1088 exist on the remote side and that no child of a node of base exists
1089 1089 in both remote and self.
1090 1090 Furthermore base will be updated to include the nodes that exists
1091 1091 in self and remote but no children exists in self and remote.
1092 1092 If a list of heads is specified, return only nodes which are heads
1093 1093 or ancestors of these heads.
1094 1094
1095 1095 All the ancestors of base are in self and in remote.
1096 1096 All the descendants of the list returned are missing in self.
1097 1097 (and so we know that the rest of the nodes are missing in remote, see
1098 1098 outgoing)
1099 1099 """
1100 1100 m = self.changelog.nodemap
1101 1101 search = []
1102 1102 fetch = {}
1103 1103 seen = {}
1104 1104 seenbranch = {}
1105 1105 if base == None:
1106 1106 base = {}
1107 1107
1108 1108 if not heads:
1109 1109 heads = remote.heads()
1110 1110
1111 1111 if self.changelog.tip() == nullid:
1112 1112 base[nullid] = 1
1113 1113 if heads != [nullid]:
1114 1114 return [nullid]
1115 1115 return []
1116 1116
1117 1117 # assume we're closer to the tip than the root
1118 1118 # and start by examining the heads
1119 1119 self.ui.status(_("searching for changes\n"))
1120 1120
1121 1121 unknown = []
1122 1122 for h in heads:
1123 1123 if h not in m:
1124 1124 unknown.append(h)
1125 1125 else:
1126 1126 base[h] = 1
1127 1127
1128 1128 if not unknown:
1129 1129 return []
1130 1130
1131 1131 req = dict.fromkeys(unknown)
1132 1132 reqcnt = 0
1133 1133
1134 1134 # search through remote branches
1135 1135 # a 'branch' here is a linear segment of history, with four parts:
1136 1136 # head, root, first parent, second parent
1137 1137 # (a branch always has two parents (or none) by definition)
1138 1138 unknown = remote.branches(unknown)
1139 1139 while unknown:
1140 1140 r = []
1141 1141 while unknown:
1142 1142 n = unknown.pop(0)
1143 1143 if n[0] in seen:
1144 1144 continue
1145 1145
1146 1146 self.ui.debug(_("examining %s:%s\n")
1147 1147 % (short(n[0]), short(n[1])))
1148 1148 if n[0] == nullid: # found the end of the branch
1149 1149 pass
1150 1150 elif n in seenbranch:
1151 1151 self.ui.debug(_("branch already found\n"))
1152 1152 continue
1153 1153 elif n[1] and n[1] in m: # do we know the base?
1154 1154 self.ui.debug(_("found incomplete branch %s:%s\n")
1155 1155 % (short(n[0]), short(n[1])))
1156 1156 search.append(n) # schedule branch range for scanning
1157 1157 seenbranch[n] = 1
1158 1158 else:
1159 1159 if n[1] not in seen and n[1] not in fetch:
1160 1160 if n[2] in m and n[3] in m:
1161 1161 self.ui.debug(_("found new changeset %s\n") %
1162 1162 short(n[1]))
1163 1163 fetch[n[1]] = 1 # earliest unknown
1164 1164 for p in n[2:4]:
1165 1165 if p in m:
1166 1166 base[p] = 1 # latest known
1167 1167
1168 1168 for p in n[2:4]:
1169 1169 if p not in req and p not in m:
1170 1170 r.append(p)
1171 1171 req[p] = 1
1172 1172 seen[n[0]] = 1
1173 1173
1174 1174 if r:
1175 1175 reqcnt += 1
1176 1176 self.ui.debug(_("request %d: %s\n") %
1177 1177 (reqcnt, " ".join(map(short, r))))
1178 1178 for p in xrange(0, len(r), 10):
1179 1179 for b in remote.branches(r[p:p+10]):
1180 1180 self.ui.debug(_("received %s:%s\n") %
1181 1181 (short(b[0]), short(b[1])))
1182 1182 unknown.append(b)
1183 1183
1184 1184 # do binary search on the branches we found
1185 1185 while search:
1186 1186 n = search.pop(0)
1187 1187 reqcnt += 1
1188 1188 l = remote.between([(n[0], n[1])])[0]
1189 1189 l.append(n[1])
1190 1190 p = n[0]
1191 1191 f = 1
1192 1192 for i in l:
1193 1193 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1194 1194 if i in m:
1195 1195 if f <= 2:
1196 1196 self.ui.debug(_("found new branch changeset %s\n") %
1197 1197 short(p))
1198 1198 fetch[p] = 1
1199 1199 base[i] = 1
1200 1200 else:
1201 1201 self.ui.debug(_("narrowed branch search to %s:%s\n")
1202 1202 % (short(p), short(i)))
1203 1203 search.append((p, i))
1204 1204 break
1205 1205 p, f = i, f * 2
1206 1206
1207 1207 # sanity check our fetch list
1208 1208 for f in fetch.keys():
1209 1209 if f in m:
1210 1210 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1211 1211
1212 1212 if base.keys() == [nullid]:
1213 1213 if force:
1214 1214 self.ui.warn(_("warning: repository is unrelated\n"))
1215 1215 else:
1216 1216 raise util.Abort(_("repository is unrelated"))
1217 1217
1218 1218 self.ui.debug(_("found new changesets starting at ") +
1219 1219 " ".join([short(f) for f in fetch]) + "\n")
1220 1220
1221 1221 self.ui.debug(_("%d total queries\n") % reqcnt)
1222 1222
1223 1223 return fetch.keys()
1224 1224
1225 1225 def findoutgoing(self, remote, base=None, heads=None, force=False):
1226 1226 """Return list of nodes that are roots of subsets not in remote
1227 1227
1228 1228 If base dict is specified, assume that these nodes and their parents
1229 1229 exist on the remote side.
1230 1230 If a list of heads is specified, return only nodes which are heads
1231 1231 or ancestors of these heads, and return a second element which
1232 1232 contains all remote heads which get new children.
1233 1233 """
1234 1234 if base == None:
1235 1235 base = {}
1236 1236 self.findincoming(remote, base, heads, force=force)
1237 1237
1238 1238 self.ui.debug(_("common changesets up to ")
1239 1239 + " ".join(map(short, base.keys())) + "\n")
1240 1240
1241 1241 remain = dict.fromkeys(self.changelog.nodemap)
1242 1242
1243 1243 # prune everything remote has from the tree
1244 1244 del remain[nullid]
1245 1245 remove = base.keys()
1246 1246 while remove:
1247 1247 n = remove.pop(0)
1248 1248 if n in remain:
1249 1249 del remain[n]
1250 1250 for p in self.changelog.parents(n):
1251 1251 remove.append(p)
1252 1252
1253 1253 # find every node whose parents have been pruned
1254 1254 subset = []
1255 1255 # find every remote head that will get new children
1256 1256 updated_heads = {}
1257 1257 for n in remain:
1258 1258 p1, p2 = self.changelog.parents(n)
1259 1259 if p1 not in remain and p2 not in remain:
1260 1260 subset.append(n)
1261 1261 if heads:
1262 1262 if p1 in heads:
1263 1263 updated_heads[p1] = True
1264 1264 if p2 in heads:
1265 1265 updated_heads[p2] = True
1266 1266
1267 1267 # this is the set of all roots we have to push
1268 1268 if heads:
1269 1269 return subset, updated_heads.keys()
1270 1270 else:
1271 1271 return subset
1272 1272
1273 1273 def pull(self, remote, heads=None, force=False, lock=None):
1274 1274 mylock = False
1275 1275 if not lock:
1276 1276 lock = self.lock()
1277 1277 mylock = True
1278 1278
1279 1279 try:
1280 1280 fetch = self.findincoming(remote, force=force)
1281 1281 if fetch == [nullid]:
1282 1282 self.ui.status(_("requesting all changes\n"))
1283 1283
1284 1284 if not fetch:
1285 1285 self.ui.status(_("no changes found\n"))
1286 1286 return 0
1287 1287
1288 1288 if heads is None:
1289 1289 cg = remote.changegroup(fetch, 'pull')
1290 1290 else:
1291 1291 if 'changegroupsubset' not in remote.capabilities:
1292 1292 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1293 1293 cg = remote.changegroupsubset(fetch, heads, 'pull')
1294 1294 return self.addchangegroup(cg, 'pull', remote.url())
1295 1295 finally:
1296 1296 if mylock:
1297 1297 lock.release()
1298 1298
1299 1299 def push(self, remote, force=False, revs=None):
1300 1300 # there are two ways to push to remote repo:
1301 1301 #
1302 1302 # addchangegroup assumes local user can lock remote
1303 1303 # repo (local filesystem, old ssh servers).
1304 1304 #
1305 1305 # unbundle assumes local user cannot lock remote repo (new ssh
1306 1306 # servers, http servers).
1307 1307
1308 1308 if remote.capable('unbundle'):
1309 1309 return self.push_unbundle(remote, force, revs)
1310 1310 return self.push_addchangegroup(remote, force, revs)
1311 1311
1312 1312 def prepush(self, remote, force, revs):
1313 1313 base = {}
1314 1314 remote_heads = remote.heads()
1315 1315 inc = self.findincoming(remote, base, remote_heads, force=force)
1316 1316
1317 1317 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1318 1318 if revs is not None:
1319 1319 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1320 1320 else:
1321 1321 bases, heads = update, self.changelog.heads()
1322 1322
1323 1323 if not bases:
1324 1324 self.ui.status(_("no changes found\n"))
1325 1325 return None, 1
1326 1326 elif not force:
1327 1327 # check if we're creating new remote heads
1328 1328 # to be a remote head after push, node must be either
1329 1329 # - unknown locally
1330 1330 # - a local outgoing head descended from update
1331 1331 # - a remote head that's known locally and not
1332 1332 # ancestral to an outgoing head
1333 1333
1334 1334 warn = 0
1335 1335
1336 1336 if remote_heads == [nullid]:
1337 1337 warn = 0
1338 1338 elif not revs and len(heads) > len(remote_heads):
1339 1339 warn = 1
1340 1340 else:
1341 1341 newheads = list(heads)
1342 1342 for r in remote_heads:
1343 1343 if r in self.changelog.nodemap:
1344 1344 desc = self.changelog.heads(r, heads)
1345 1345 l = [h for h in heads if h in desc]
1346 1346 if not l:
1347 1347 newheads.append(r)
1348 1348 else:
1349 1349 newheads.append(r)
1350 1350 if len(newheads) > len(remote_heads):
1351 1351 warn = 1
1352 1352
1353 1353 if warn:
1354 1354 self.ui.warn(_("abort: push creates new remote branches!\n"))
1355 1355 self.ui.status(_("(did you forget to merge?"
1356 1356 " use push -f to force)\n"))
1357 1357 return None, 1
1358 1358 elif inc:
1359 1359 self.ui.warn(_("note: unsynced remote changes!\n"))
1360 1360
1361 1361
1362 1362 if revs is None:
1363 1363 cg = self.changegroup(update, 'push')
1364 1364 else:
1365 1365 cg = self.changegroupsubset(update, revs, 'push')
1366 1366 return cg, remote_heads
1367 1367
1368 1368 def push_addchangegroup(self, remote, force, revs):
1369 1369 lock = remote.lock()
1370 1370
1371 1371 ret = self.prepush(remote, force, revs)
1372 1372 if ret[0] is not None:
1373 1373 cg, remote_heads = ret
1374 1374 return remote.addchangegroup(cg, 'push', self.url())
1375 1375 return ret[1]
1376 1376
1377 1377 def push_unbundle(self, remote, force, revs):
1378 1378 # local repo finds heads on server, finds out what revs it
1379 1379 # must push. once revs transferred, if server finds it has
1380 1380 # different heads (someone else won commit/push race), server
1381 1381 # aborts.
1382 1382
1383 1383 ret = self.prepush(remote, force, revs)
1384 1384 if ret[0] is not None:
1385 1385 cg, remote_heads = ret
1386 1386 if force: remote_heads = ['force']
1387 1387 return remote.unbundle(cg, remote_heads, 'push')
1388 1388 return ret[1]
1389 1389
1390 1390 def changegroupinfo(self, nodes):
1391 1391 self.ui.note(_("%d changesets found\n") % len(nodes))
1392 1392 if self.ui.debugflag:
1393 1393 self.ui.debug(_("List of changesets:\n"))
1394 1394 for node in nodes:
1395 1395 self.ui.debug("%s\n" % hex(node))
1396 1396
1397 1397 def changegroupsubset(self, bases, heads, source):
1398 1398 """This function generates a changegroup consisting of all the nodes
1399 1399 that are descendents of any of the bases, and ancestors of any of
1400 1400 the heads.
1401 1401
1402 1402 It is fairly complex as determining which filenodes and which
1403 1403 manifest nodes need to be included for the changeset to be complete
1404 1404 is non-trivial.
1405 1405
1406 1406 Another wrinkle is doing the reverse, figuring out which changeset in
1407 1407 the changegroup a particular filenode or manifestnode belongs to."""
1408 1408
1409 1409 self.hook('preoutgoing', throw=True, source=source)
1410 1410
1411 1411 # Set up some initial variables
1412 1412 # Make it easy to refer to self.changelog
1413 1413 cl = self.changelog
1414 1414 # msng is short for missing - compute the list of changesets in this
1415 1415 # changegroup.
1416 1416 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1417 1417 self.changegroupinfo(msng_cl_lst)
1418 1418 # Some bases may turn out to be superfluous, and some heads may be
1419 1419 # too. nodesbetween will return the minimal set of bases and heads
1420 1420 # necessary to re-create the changegroup.
1421 1421
1422 1422 # Known heads are the list of heads that it is assumed the recipient
1423 1423 # of this changegroup will know about.
1424 1424 knownheads = {}
1425 1425 # We assume that all parents of bases are known heads.
1426 1426 for n in bases:
1427 1427 for p in cl.parents(n):
1428 1428 if p != nullid:
1429 1429 knownheads[p] = 1
1430 1430 knownheads = knownheads.keys()
1431 1431 if knownheads:
1432 1432 # Now that we know what heads are known, we can compute which
1433 1433 # changesets are known. The recipient must know about all
1434 1434 # changesets required to reach the known heads from the null
1435 1435 # changeset.
1436 1436 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1437 1437 junk = None
1438 1438 # Transform the list into an ersatz set.
1439 1439 has_cl_set = dict.fromkeys(has_cl_set)
1440 1440 else:
1441 1441 # If there were no known heads, the recipient cannot be assumed to
1442 1442 # know about any changesets.
1443 1443 has_cl_set = {}
1444 1444
1445 1445 # Make it easy to refer to self.manifest
1446 1446 mnfst = self.manifest
1447 1447 # We don't know which manifests are missing yet
1448 1448 msng_mnfst_set = {}
1449 1449 # Nor do we know which filenodes are missing.
1450 1450 msng_filenode_set = {}
1451 1451
1452 1452 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1453 1453 junk = None
1454 1454
1455 1455 # A changeset always belongs to itself, so the changenode lookup
1456 1456 # function for a changenode is identity.
1457 1457 def identity(x):
1458 1458 return x
1459 1459
1460 1460 # A function generating function. Sets up an environment for the
1461 1461 # inner function.
1462 1462 def cmp_by_rev_func(revlog):
1463 1463 # Compare two nodes by their revision number in the environment's
1464 1464 # revision history. Since the revision number both represents the
1465 1465 # most efficient order to read the nodes in, and represents a
1466 1466 # topological sorting of the nodes, this function is often useful.
1467 1467 def cmp_by_rev(a, b):
1468 1468 return cmp(revlog.rev(a), revlog.rev(b))
1469 1469 return cmp_by_rev
1470 1470
1471 1471 # If we determine that a particular file or manifest node must be a
1472 1472 # node that the recipient of the changegroup will already have, we can
1473 1473 # also assume the recipient will have all the parents. This function
1474 1474 # prunes them from the set of missing nodes.
1475 1475 def prune_parents(revlog, hasset, msngset):
1476 1476 haslst = hasset.keys()
1477 1477 haslst.sort(cmp_by_rev_func(revlog))
1478 1478 for node in haslst:
1479 1479 parentlst = [p for p in revlog.parents(node) if p != nullid]
1480 1480 while parentlst:
1481 1481 n = parentlst.pop()
1482 1482 if n not in hasset:
1483 1483 hasset[n] = 1
1484 1484 p = [p for p in revlog.parents(n) if p != nullid]
1485 1485 parentlst.extend(p)
1486 1486 for n in hasset:
1487 1487 msngset.pop(n, None)
1488 1488
1489 1489 # This is a function generating function used to set up an environment
1490 1490 # for the inner function to execute in.
1491 1491 def manifest_and_file_collector(changedfileset):
1492 1492 # This is an information gathering function that gathers
1493 1493 # information from each changeset node that goes out as part of
1494 1494 # the changegroup. The information gathered is a list of which
1495 1495 # manifest nodes are potentially required (the recipient may
1496 1496 # already have them) and total list of all files which were
1497 1497 # changed in any changeset in the changegroup.
1498 1498 #
1499 1499 # We also remember the first changenode we saw any manifest
1500 1500 # referenced by so we can later determine which changenode 'owns'
1501 1501 # the manifest.
1502 1502 def collect_manifests_and_files(clnode):
1503 1503 c = cl.read(clnode)
1504 1504 for f in c[3]:
1505 1505 # This is to make sure we only have one instance of each
1506 1506 # filename string for each filename.
1507 1507 changedfileset.setdefault(f, f)
1508 1508 msng_mnfst_set.setdefault(c[0], clnode)
1509 1509 return collect_manifests_and_files
1510 1510
1511 1511 # Figure out which manifest nodes (of the ones we think might be part
1512 1512 # of the changegroup) the recipient must know about and remove them
1513 1513 # from the changegroup.
1514 1514 def prune_manifests():
1515 1515 has_mnfst_set = {}
1516 1516 for n in msng_mnfst_set:
1517 1517 # If a 'missing' manifest thinks it belongs to a changenode
1518 1518 # the recipient is assumed to have, obviously the recipient
1519 1519 # must have that manifest.
1520 1520 linknode = cl.node(mnfst.linkrev(n))
1521 1521 if linknode in has_cl_set:
1522 1522 has_mnfst_set[n] = 1
1523 1523 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1524 1524
1525 1525 # Use the information collected in collect_manifests_and_files to say
1526 1526 # which changenode any manifestnode belongs to.
1527 1527 def lookup_manifest_link(mnfstnode):
1528 1528 return msng_mnfst_set[mnfstnode]
1529 1529
1530 1530 # A function generating function that sets up the initial environment
1531 1531 # the inner function.
1532 1532 def filenode_collector(changedfiles):
1533 1533 next_rev = [0]
1534 1534 # This gathers information from each manifestnode included in the
1535 1535 # changegroup about which filenodes the manifest node references
1536 1536 # so we can include those in the changegroup too.
1537 1537 #
1538 1538 # It also remembers which changenode each filenode belongs to. It
1539 1539 # does this by assuming the a filenode belongs to the changenode
1540 1540 # the first manifest that references it belongs to.
1541 1541 def collect_msng_filenodes(mnfstnode):
1542 1542 r = mnfst.rev(mnfstnode)
1543 1543 if r == next_rev[0]:
1544 1544 # If the last rev we looked at was the one just previous,
1545 1545 # we only need to see a diff.
1546 1546 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1547 1547 # For each line in the delta
1548 1548 for dline in delta.splitlines():
1549 1549 # get the filename and filenode for that line
1550 1550 f, fnode = dline.split('\0')
1551 1551 fnode = bin(fnode[:40])
1552 1552 f = changedfiles.get(f, None)
1553 1553 # And if the file is in the list of files we care
1554 1554 # about.
1555 1555 if f is not None:
1556 1556 # Get the changenode this manifest belongs to
1557 1557 clnode = msng_mnfst_set[mnfstnode]
1558 1558 # Create the set of filenodes for the file if
1559 1559 # there isn't one already.
1560 1560 ndset = msng_filenode_set.setdefault(f, {})
1561 1561 # And set the filenode's changelog node to the
1562 1562 # manifest's if it hasn't been set already.
1563 1563 ndset.setdefault(fnode, clnode)
1564 1564 else:
1565 1565 # Otherwise we need a full manifest.
1566 1566 m = mnfst.read(mnfstnode)
1567 1567 # For every file in we care about.
1568 1568 for f in changedfiles:
1569 1569 fnode = m.get(f, None)
1570 1570 # If it's in the manifest
1571 1571 if fnode is not None:
1572 1572 # See comments above.
1573 1573 clnode = msng_mnfst_set[mnfstnode]
1574 1574 ndset = msng_filenode_set.setdefault(f, {})
1575 1575 ndset.setdefault(fnode, clnode)
1576 1576 # Remember the revision we hope to see next.
1577 1577 next_rev[0] = r + 1
1578 1578 return collect_msng_filenodes
1579 1579
1580 1580 # We have a list of filenodes we think we need for a file, lets remove
1581 1581 # all those we now the recipient must have.
1582 1582 def prune_filenodes(f, filerevlog):
1583 1583 msngset = msng_filenode_set[f]
1584 1584 hasset = {}
1585 1585 # If a 'missing' filenode thinks it belongs to a changenode we
1586 1586 # assume the recipient must have, then the recipient must have
1587 1587 # that filenode.
1588 1588 for n in msngset:
1589 1589 clnode = cl.node(filerevlog.linkrev(n))
1590 1590 if clnode in has_cl_set:
1591 1591 hasset[n] = 1
1592 1592 prune_parents(filerevlog, hasset, msngset)
1593 1593
1594 1594 # A function generator function that sets up the a context for the
1595 1595 # inner function.
1596 1596 def lookup_filenode_link_func(fname):
1597 1597 msngset = msng_filenode_set[fname]
1598 1598 # Lookup the changenode the filenode belongs to.
1599 1599 def lookup_filenode_link(fnode):
1600 1600 return msngset[fnode]
1601 1601 return lookup_filenode_link
1602 1602
1603 1603 # Now that we have all theses utility functions to help out and
1604 1604 # logically divide up the task, generate the group.
1605 1605 def gengroup():
1606 1606 # The set of changed files starts empty.
1607 1607 changedfiles = {}
1608 1608 # Create a changenode group generator that will call our functions
1609 1609 # back to lookup the owning changenode and collect information.
1610 1610 group = cl.group(msng_cl_lst, identity,
1611 1611 manifest_and_file_collector(changedfiles))
1612 1612 for chnk in group:
1613 1613 yield chnk
1614 1614
1615 1615 # The list of manifests has been collected by the generator
1616 1616 # calling our functions back.
1617 1617 prune_manifests()
1618 1618 msng_mnfst_lst = msng_mnfst_set.keys()
1619 1619 # Sort the manifestnodes by revision number.
1620 1620 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1621 1621 # Create a generator for the manifestnodes that calls our lookup
1622 1622 # and data collection functions back.
1623 1623 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1624 1624 filenode_collector(changedfiles))
1625 1625 for chnk in group:
1626 1626 yield chnk
1627 1627
1628 1628 # These are no longer needed, dereference and toss the memory for
1629 1629 # them.
1630 1630 msng_mnfst_lst = None
1631 1631 msng_mnfst_set.clear()
1632 1632
1633 1633 changedfiles = changedfiles.keys()
1634 1634 changedfiles.sort()
1635 1635 # Go through all our files in order sorted by name.
1636 1636 for fname in changedfiles:
1637 1637 filerevlog = self.file(fname)
1638 1638 # Toss out the filenodes that the recipient isn't really
1639 1639 # missing.
1640 1640 if msng_filenode_set.has_key(fname):
1641 1641 prune_filenodes(fname, filerevlog)
1642 1642 msng_filenode_lst = msng_filenode_set[fname].keys()
1643 1643 else:
1644 1644 msng_filenode_lst = []
1645 1645 # If any filenodes are left, generate the group for them,
1646 1646 # otherwise don't bother.
1647 1647 if len(msng_filenode_lst) > 0:
1648 1648 yield changegroup.genchunk(fname)
1649 1649 # Sort the filenodes by their revision #
1650 1650 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1651 1651 # Create a group generator and only pass in a changenode
1652 1652 # lookup function as we need to collect no information
1653 1653 # from filenodes.
1654 1654 group = filerevlog.group(msng_filenode_lst,
1655 1655 lookup_filenode_link_func(fname))
1656 1656 for chnk in group:
1657 1657 yield chnk
1658 1658 if msng_filenode_set.has_key(fname):
1659 1659 # Don't need this anymore, toss it to free memory.
1660 1660 del msng_filenode_set[fname]
1661 1661 # Signal that no more groups are left.
1662 1662 yield changegroup.closechunk()
1663 1663
1664 1664 if msng_cl_lst:
1665 1665 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1666 1666
1667 1667 return util.chunkbuffer(gengroup())
1668 1668
1669 1669 def changegroup(self, basenodes, source):
1670 1670 """Generate a changegroup of all nodes that we have that a recipient
1671 1671 doesn't.
1672 1672
1673 1673 This is much easier than the previous function as we can assume that
1674 1674 the recipient has any changenode we aren't sending them."""
1675 1675
1676 1676 self.hook('preoutgoing', throw=True, source=source)
1677 1677
1678 1678 cl = self.changelog
1679 1679 nodes = cl.nodesbetween(basenodes, None)[0]
1680 1680 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1681 1681 self.changegroupinfo(nodes)
1682 1682
1683 1683 def identity(x):
1684 1684 return x
1685 1685
1686 1686 def gennodelst(revlog):
1687 1687 for r in xrange(0, revlog.count()):
1688 1688 n = revlog.node(r)
1689 1689 if revlog.linkrev(n) in revset:
1690 1690 yield n
1691 1691
1692 1692 def changed_file_collector(changedfileset):
1693 1693 def collect_changed_files(clnode):
1694 1694 c = cl.read(clnode)
1695 1695 for fname in c[3]:
1696 1696 changedfileset[fname] = 1
1697 1697 return collect_changed_files
1698 1698
1699 1699 def lookuprevlink_func(revlog):
1700 1700 def lookuprevlink(n):
1701 1701 return cl.node(revlog.linkrev(n))
1702 1702 return lookuprevlink
1703 1703
1704 1704 def gengroup():
1705 1705 # construct a list of all changed files
1706 1706 changedfiles = {}
1707 1707
1708 1708 for chnk in cl.group(nodes, identity,
1709 1709 changed_file_collector(changedfiles)):
1710 1710 yield chnk
1711 1711 changedfiles = changedfiles.keys()
1712 1712 changedfiles.sort()
1713 1713
1714 1714 mnfst = self.manifest
1715 1715 nodeiter = gennodelst(mnfst)
1716 1716 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1717 1717 yield chnk
1718 1718
1719 1719 for fname in changedfiles:
1720 1720 filerevlog = self.file(fname)
1721 1721 nodeiter = gennodelst(filerevlog)
1722 1722 nodeiter = list(nodeiter)
1723 1723 if nodeiter:
1724 1724 yield changegroup.genchunk(fname)
1725 1725 lookup = lookuprevlink_func(filerevlog)
1726 1726 for chnk in filerevlog.group(nodeiter, lookup):
1727 1727 yield chnk
1728 1728
1729 1729 yield changegroup.closechunk()
1730 1730
1731 1731 if nodes:
1732 1732 self.hook('outgoing', node=hex(nodes[0]), source=source)
1733 1733
1734 1734 return util.chunkbuffer(gengroup())
1735 1735
1736 1736 def addchangegroup(self, source, srctype, url):
1737 1737 """add changegroup to repo.
1738 1738
1739 1739 return values:
1740 1740 - nothing changed or no source: 0
1741 1741 - more heads than before: 1+added heads (2..n)
1742 1742 - less heads than before: -1-removed heads (-2..-n)
1743 1743 - number of heads stays the same: 1
1744 1744 """
1745 1745 def csmap(x):
1746 1746 self.ui.debug(_("add changeset %s\n") % short(x))
1747 1747 return cl.count()
1748 1748
1749 1749 def revmap(x):
1750 1750 return cl.rev(x)
1751 1751
1752 1752 if not source:
1753 1753 return 0
1754 1754
1755 1755 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1756 1756
1757 1757 changesets = files = revisions = 0
1758 1758
1759 1759 tr = self.transaction()
1760 1760
1761 1761 # write changelog data to temp files so concurrent readers will not see
1762 1762 # inconsistent view
1763 1763 cl = None
1764 1764 try:
1765 1765 cl = appendfile.appendchangelog(self.sopener,
1766 1766 self.changelog.version)
1767 1767
1768 1768 oldheads = len(cl.heads())
1769 1769
1770 1770 # pull off the changeset group
1771 1771 self.ui.status(_("adding changesets\n"))
1772 1772 cor = cl.count() - 1
1773 1773 chunkiter = changegroup.chunkiter(source)
1774 1774 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1775 1775 raise util.Abort(_("received changelog group is empty"))
1776 1776 cnr = cl.count() - 1
1777 1777 changesets = cnr - cor
1778 1778
1779 1779 # pull off the manifest group
1780 1780 self.ui.status(_("adding manifests\n"))
1781 1781 chunkiter = changegroup.chunkiter(source)
1782 1782 # no need to check for empty manifest group here:
1783 1783 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1784 1784 # no new manifest will be created and the manifest group will
1785 1785 # be empty during the pull
1786 1786 self.manifest.addgroup(chunkiter, revmap, tr)
1787 1787
1788 1788 # process the files
1789 1789 self.ui.status(_("adding file changes\n"))
1790 1790 while 1:
1791 1791 f = changegroup.getchunk(source)
1792 1792 if not f:
1793 1793 break
1794 1794 self.ui.debug(_("adding %s revisions\n") % f)
1795 1795 fl = self.file(f)
1796 1796 o = fl.count()
1797 1797 chunkiter = changegroup.chunkiter(source)
1798 1798 if fl.addgroup(chunkiter, revmap, tr) is None:
1799 1799 raise util.Abort(_("received file revlog group is empty"))
1800 1800 revisions += fl.count() - o
1801 1801 files += 1
1802 1802
1803 1803 cl.writedata()
1804 1804 finally:
1805 1805 if cl:
1806 1806 cl.cleanup()
1807 1807
1808 1808 # make changelog see real files again
1809 1809 self.changelog = changelog.changelog(self.sopener,
1810 1810 self.changelog.version)
1811 1811 self.changelog.checkinlinesize(tr)
1812 1812
1813 1813 newheads = len(self.changelog.heads())
1814 1814 heads = ""
1815 1815 if oldheads and newheads != oldheads:
1816 1816 heads = _(" (%+d heads)") % (newheads - oldheads)
1817 1817
1818 1818 self.ui.status(_("added %d changesets"
1819 1819 " with %d changes to %d files%s\n")
1820 1820 % (changesets, revisions, files, heads))
1821 1821
1822 1822 if changesets > 0:
1823 1823 self.hook('pretxnchangegroup', throw=True,
1824 1824 node=hex(self.changelog.node(cor+1)), source=srctype,
1825 1825 url=url)
1826 1826
1827 1827 tr.close()
1828 1828
1829 1829 if changesets > 0:
1830 1830 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1831 1831 source=srctype, url=url)
1832 1832
1833 1833 for i in xrange(cor + 1, cnr + 1):
1834 1834 self.hook("incoming", node=hex(self.changelog.node(i)),
1835 1835 source=srctype, url=url)
1836 1836
1837 1837 # never return 0 here:
1838 1838 if newheads < oldheads:
1839 1839 return newheads - oldheads - 1
1840 1840 else:
1841 1841 return newheads - oldheads + 1
1842 1842
1843 1843
1844 1844 def stream_in(self, remote):
1845 1845 fp = remote.stream_out()
1846 1846 l = fp.readline()
1847 1847 try:
1848 1848 resp = int(l)
1849 1849 except ValueError:
1850 1850 raise util.UnexpectedOutput(
1851 1851 _('Unexpected response from remote server:'), l)
1852 1852 if resp == 1:
1853 1853 raise util.Abort(_('operation forbidden by server'))
1854 1854 elif resp == 2:
1855 1855 raise util.Abort(_('locking the remote repository failed'))
1856 1856 elif resp != 0:
1857 1857 raise util.Abort(_('the server sent an unknown error code'))
1858 1858 self.ui.status(_('streaming all changes\n'))
1859 1859 l = fp.readline()
1860 1860 try:
1861 1861 total_files, total_bytes = map(int, l.split(' ', 1))
1862 1862 except ValueError, TypeError:
1863 1863 raise util.UnexpectedOutput(
1864 1864 _('Unexpected response from remote server:'), l)
1865 1865 self.ui.status(_('%d files to transfer, %s of data\n') %
1866 1866 (total_files, util.bytecount(total_bytes)))
1867 1867 start = time.time()
1868 1868 for i in xrange(total_files):
1869 1869 # XXX doesn't support '\n' or '\r' in filenames
1870 1870 l = fp.readline()
1871 1871 try:
1872 1872 name, size = l.split('\0', 1)
1873 1873 size = int(size)
1874 1874 except ValueError, TypeError:
1875 1875 raise util.UnexpectedOutput(
1876 1876 _('Unexpected response from remote server:'), l)
1877 1877 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1878 1878 ofp = self.sopener(name, 'w')
1879 1879 for chunk in util.filechunkiter(fp, limit=size):
1880 1880 ofp.write(chunk)
1881 1881 ofp.close()
1882 1882 elapsed = time.time() - start
1883 if elapsed <= 0:
1884 elapsed = 0.001
1883 1885 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1884 1886 (util.bytecount(total_bytes), elapsed,
1885 1887 util.bytecount(total_bytes / elapsed)))
1886 1888 self.reload()
1887 1889 return len(self.heads()) + 1
1888 1890
1889 1891 def clone(self, remote, heads=[], stream=False):
1890 1892 '''clone remote repository.
1891 1893
1892 1894 keyword arguments:
1893 1895 heads: list of revs to clone (forces use of pull)
1894 1896 stream: use streaming clone if possible'''
1895 1897
1896 1898 # now, all clients that can request uncompressed clones can
1897 1899 # read repo formats supported by all servers that can serve
1898 1900 # them.
1899 1901
1900 1902 # if revlog format changes, client will have to check version
1901 1903 # and format flags on "stream" capability, and use
1902 1904 # uncompressed only if compatible.
1903 1905
1904 1906 if stream and not heads and remote.capable('stream'):
1905 1907 return self.stream_in(remote)
1906 1908 return self.pull(remote, heads)
1907 1909
1908 1910 # used to avoid circular references so destructors work
1909 1911 def aftertrans(files):
1910 1912 renamefiles = [tuple(t) for t in files]
1911 1913 def a():
1912 1914 for src, dest in renamefiles:
1913 1915 util.rename(src, dest)
1914 1916 return a
1915 1917
1916 1918 def instance(ui, path, create):
1917 1919 return localrepository(ui, util.drop_scheme('file', path), create)
1918 1920
1919 1921 def islocal(path):
1920 1922 return True
@@ -1,95 +1,95 b''
1 1 # streamclone.py - streaming clone server support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import _
9 9 import os, stat, util, lock
10 10
11 11 # if server supports streaming clone, it advertises "stream"
12 12 # capability with value that is version+flags of repo it is serving.
13 13 # client only streams if it can read that repo format.
14 14
15 15 def walkrepo(root):
16 16 '''iterate over metadata files in repository.
17 17 walk in natural (sorted) order.
18 18 yields 2-tuples: name of .d or .i file, size of file.'''
19 19
20 20 strip_count = len(root) + len(os.sep)
21 21 def walk(path, recurse):
22 22 ents = os.listdir(path)
23 23 ents.sort()
24 24 for e in ents:
25 25 pe = os.path.join(path, e)
26 26 st = os.lstat(pe)
27 27 if stat.S_ISDIR(st.st_mode):
28 28 if recurse:
29 29 for x in walk(pe, True):
30 30 yield x
31 31 else:
32 32 if not stat.S_ISREG(st.st_mode) or len(e) < 2:
33 33 continue
34 34 sfx = e[-2:]
35 35 if sfx in ('.d', '.i'):
36 36 yield pe[strip_count:], st.st_size
37 37 # write file data first
38 38 for x in walk(os.path.join(root, 'data'), True):
39 39 yield x
40 40 # write manifest before changelog
41 41 meta = list(walk(root, False))
42 42 meta.sort()
43 43 meta.reverse()
44 44 for x in meta:
45 45 yield x
46 46
47 47 # stream file format is simple.
48 48 #
49 49 # server writes out line that says how many files, how many total
50 50 # bytes. separator is ascii space, byte counts are strings.
51 51 #
52 52 # then for each file:
53 53 #
54 54 # server writes out line that says file name, how many bytes in
55 55 # file. separator is ascii nul, byte count is string.
56 56 #
57 57 # server writes out raw file data.
58 58
59 59 def stream_out(repo, fileobj):
60 60 '''stream out all metadata files in repository.
61 61 writes to file-like object, must support write() and optional flush().'''
62 62
63 63 if not repo.ui.configbool('server', 'uncompressed'):
64 64 fileobj.write('1\n')
65 65 return
66 66
67 67 # get consistent snapshot of repo. lock during scan so lock not
68 68 # needed while we stream, and commits can happen.
69 69 try:
70 70 repolock = repo.lock()
71 71 except (lock.LockHeld, lock.LockUnavailable), inst:
72 72 repo.ui.warn('locking the repository failed: %s\n' % (inst,))
73 73 fileobj.write('2\n')
74 74 return
75 75
76 76 fileobj.write('0\n')
77 77 repo.ui.debug('scanning\n')
78 78 entries = []
79 79 total_bytes = 0
80 80 for name, size in walkrepo(repo.spath):
81 name = util.pconvert(repo.decodefn(name))
81 name = repo.decodefn(util.pconvert(name))
82 82 entries.append((name, size))
83 83 total_bytes += size
84 84 repolock.release()
85 85
86 86 repo.ui.debug('%d files, %d bytes to transfer\n' %
87 87 (len(entries), total_bytes))
88 88 fileobj.write('%d %d\n' % (len(entries), total_bytes))
89 89 for name, size in entries:
90 90 repo.ui.debug('sending %s (%d bytes)\n' % (name, size))
91 91 fileobj.write('%s\0%d\n' % (name, size))
92 92 for chunk in util.filechunkiter(repo.sopener(name), limit=size):
93 93 fileobj.write(chunk)
94 94 flush = getattr(fileobj, 'flush', None)
95 95 if flush: flush()
@@ -1,1440 +1,1448 b''
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
17 17 import os, threading, time, calendar, ConfigParser, locale, glob
18 18
19 19 try:
20 20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
21 21 or "ascii"
22 22 except locale.Error:
23 23 _encoding = 'ascii'
24 24 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
25 25 _fallbackencoding = 'ISO-8859-1'
26 26
27 27 def tolocal(s):
28 28 """
29 29 Convert a string from internal UTF-8 to local encoding
30 30
31 31 All internal strings should be UTF-8 but some repos before the
32 32 implementation of locale support may contain latin1 or possibly
33 33 other character sets. We attempt to decode everything strictly
34 34 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
35 35 replace unknown characters.
36 36 """
37 37 for e in ('UTF-8', _fallbackencoding):
38 38 try:
39 39 u = s.decode(e) # attempt strict decoding
40 40 return u.encode(_encoding, "replace")
41 41 except LookupError, k:
42 42 raise Abort(_("%s, please check your locale settings") % k)
43 43 except UnicodeDecodeError:
44 44 pass
45 45 u = s.decode("utf-8", "replace") # last ditch
46 46 return u.encode(_encoding, "replace")
47 47
48 48 def fromlocal(s):
49 49 """
50 50 Convert a string from the local character encoding to UTF-8
51 51
52 52 We attempt to decode strings using the encoding mode set by
53 53 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
54 54 characters will cause an error message. Other modes include
55 55 'replace', which replaces unknown characters with a special
56 56 Unicode character, and 'ignore', which drops the character.
57 57 """
58 58 try:
59 59 return s.decode(_encoding, _encodingmode).encode("utf-8")
60 60 except UnicodeDecodeError, inst:
61 61 sub = s[max(0, inst.start-10):inst.start+10]
62 62 raise Abort("decoding near '%s': %s!" % (sub, inst))
63 63 except LookupError, k:
64 64 raise Abort(_("%s, please check your locale settings") % k)
65 65
66 66 def locallen(s):
67 67 """Find the length in characters of a local string"""
68 68 return len(s.decode(_encoding, "replace"))
69 69
70 70 def localsub(s, a, b=None):
71 71 try:
72 72 u = s.decode(_encoding, _encodingmode)
73 73 if b is not None:
74 74 u = u[a:b]
75 75 else:
76 76 u = u[:a]
77 77 return u.encode(_encoding, _encodingmode)
78 78 except UnicodeDecodeError, inst:
79 79 sub = s[max(0, inst.start-10), inst.start+10]
80 80 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
81 81
82 82 # used by parsedate
83 83 defaultdateformats = (
84 84 '%Y-%m-%d %H:%M:%S',
85 85 '%Y-%m-%d %I:%M:%S%p',
86 86 '%Y-%m-%d %H:%M',
87 87 '%Y-%m-%d %I:%M%p',
88 88 '%Y-%m-%d',
89 89 '%m-%d',
90 90 '%m/%d',
91 91 '%m/%d/%y',
92 92 '%m/%d/%Y',
93 93 '%a %b %d %H:%M:%S %Y',
94 94 '%a %b %d %I:%M:%S%p %Y',
95 95 '%b %d %H:%M:%S %Y',
96 96 '%b %d %I:%M:%S%p %Y',
97 97 '%b %d %H:%M:%S',
98 98 '%b %d %I:%M:%S%p',
99 99 '%b %d %H:%M',
100 100 '%b %d %I:%M%p',
101 101 '%b %d %Y',
102 102 '%b %d',
103 103 '%H:%M:%S',
104 104 '%I:%M:%SP',
105 105 '%H:%M',
106 106 '%I:%M%p',
107 107 )
108 108
109 109 extendeddateformats = defaultdateformats + (
110 110 "%Y",
111 111 "%Y-%m",
112 112 "%b",
113 113 "%b %Y",
114 114 )
115 115
116 116 class SignalInterrupt(Exception):
117 117 """Exception raised on SIGTERM and SIGHUP."""
118 118
119 119 # differences from SafeConfigParser:
120 120 # - case-sensitive keys
121 121 # - allows values that are not strings (this means that you may not
122 122 # be able to save the configuration to a file)
123 123 class configparser(ConfigParser.SafeConfigParser):
124 124 def optionxform(self, optionstr):
125 125 return optionstr
126 126
127 127 def set(self, section, option, value):
128 128 return ConfigParser.ConfigParser.set(self, section, option, value)
129 129
130 130 def _interpolate(self, section, option, rawval, vars):
131 131 if not isinstance(rawval, basestring):
132 132 return rawval
133 133 return ConfigParser.SafeConfigParser._interpolate(self, section,
134 134 option, rawval, vars)
135 135
136 136 def cachefunc(func):
137 137 '''cache the result of function calls'''
138 138 # XXX doesn't handle keywords args
139 139 cache = {}
140 140 if func.func_code.co_argcount == 1:
141 141 # we gain a small amount of time because
142 142 # we don't need to pack/unpack the list
143 143 def f(arg):
144 144 if arg not in cache:
145 145 cache[arg] = func(arg)
146 146 return cache[arg]
147 147 else:
148 148 def f(*args):
149 149 if args not in cache:
150 150 cache[args] = func(*args)
151 151 return cache[args]
152 152
153 153 return f
154 154
155 155 def pipefilter(s, cmd):
156 156 '''filter string S through command CMD, returning its output'''
157 157 (pout, pin) = popen2.popen2(cmd, -1, 'b')
158 158 def writer():
159 159 try:
160 160 pin.write(s)
161 161 pin.close()
162 162 except IOError, inst:
163 163 if inst.errno != errno.EPIPE:
164 164 raise
165 165
166 166 # we should use select instead on UNIX, but this will work on most
167 167 # systems, including Windows
168 168 w = threading.Thread(target=writer)
169 169 w.start()
170 170 f = pout.read()
171 171 pout.close()
172 172 w.join()
173 173 return f
174 174
175 175 def tempfilter(s, cmd):
176 176 '''filter string S through a pair of temporary files with CMD.
177 177 CMD is used as a template to create the real command to be run,
178 178 with the strings INFILE and OUTFILE replaced by the real names of
179 179 the temporary files generated.'''
180 180 inname, outname = None, None
181 181 try:
182 182 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
183 183 fp = os.fdopen(infd, 'wb')
184 184 fp.write(s)
185 185 fp.close()
186 186 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
187 187 os.close(outfd)
188 188 cmd = cmd.replace('INFILE', inname)
189 189 cmd = cmd.replace('OUTFILE', outname)
190 190 code = os.system(cmd)
191 191 if code: raise Abort(_("command '%s' failed: %s") %
192 192 (cmd, explain_exit(code)))
193 193 return open(outname, 'rb').read()
194 194 finally:
195 195 try:
196 196 if inname: os.unlink(inname)
197 197 except: pass
198 198 try:
199 199 if outname: os.unlink(outname)
200 200 except: pass
201 201
202 202 filtertable = {
203 203 'tempfile:': tempfilter,
204 204 'pipe:': pipefilter,
205 205 }
206 206
207 207 def filter(s, cmd):
208 208 "filter a string through a command that transforms its input to its output"
209 209 for name, fn in filtertable.iteritems():
210 210 if cmd.startswith(name):
211 211 return fn(s, cmd[len(name):].lstrip())
212 212 return pipefilter(s, cmd)
213 213
214 214 def find_in_path(name, path, default=None):
215 215 '''find name in search path. path can be string (will be split
216 216 with os.pathsep), or iterable thing that returns strings. if name
217 217 found, return path to name. else return default.'''
218 218 if isinstance(path, str):
219 219 path = path.split(os.pathsep)
220 220 for p in path:
221 221 p_name = os.path.join(p, name)
222 222 if os.path.exists(p_name):
223 223 return p_name
224 224 return default
225 225
226 226 def binary(s):
227 227 """return true if a string is binary data using diff's heuristic"""
228 228 if s and '\0' in s[:4096]:
229 229 return True
230 230 return False
231 231
232 232 def unique(g):
233 233 """return the uniq elements of iterable g"""
234 234 seen = {}
235 235 l = []
236 236 for f in g:
237 237 if f not in seen:
238 238 seen[f] = 1
239 239 l.append(f)
240 240 return l
241 241
242 242 class Abort(Exception):
243 243 """Raised if a command needs to print an error and exit."""
244 244
245 245 class UnexpectedOutput(Abort):
246 246 """Raised to print an error with part of output and exit."""
247 247
248 248 def always(fn): return True
249 249 def never(fn): return False
250 250
251 251 def expand_glob(pats):
252 252 '''On Windows, expand the implicit globs in a list of patterns'''
253 253 if os.name != 'nt':
254 254 return list(pats)
255 255 ret = []
256 256 for p in pats:
257 257 kind, name = patkind(p, None)
258 258 if kind is None:
259 259 globbed = glob.glob(name)
260 260 if globbed:
261 261 ret.extend(globbed)
262 262 continue
263 263 # if we couldn't expand the glob, just keep it around
264 264 ret.append(p)
265 265 return ret
266 266
267 267 def patkind(name, dflt_pat='glob'):
268 268 """Split a string into an optional pattern kind prefix and the
269 269 actual pattern."""
270 270 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
271 271 if name.startswith(prefix + ':'): return name.split(':', 1)
272 272 return dflt_pat, name
273 273
274 274 def globre(pat, head='^', tail='$'):
275 275 "convert a glob pattern into a regexp"
276 276 i, n = 0, len(pat)
277 277 res = ''
278 278 group = False
279 279 def peek(): return i < n and pat[i]
280 280 while i < n:
281 281 c = pat[i]
282 282 i = i+1
283 283 if c == '*':
284 284 if peek() == '*':
285 285 i += 1
286 286 res += '.*'
287 287 else:
288 288 res += '[^/]*'
289 289 elif c == '?':
290 290 res += '.'
291 291 elif c == '[':
292 292 j = i
293 293 if j < n and pat[j] in '!]':
294 294 j += 1
295 295 while j < n and pat[j] != ']':
296 296 j += 1
297 297 if j >= n:
298 298 res += '\\['
299 299 else:
300 300 stuff = pat[i:j].replace('\\','\\\\')
301 301 i = j + 1
302 302 if stuff[0] == '!':
303 303 stuff = '^' + stuff[1:]
304 304 elif stuff[0] == '^':
305 305 stuff = '\\' + stuff
306 306 res = '%s[%s]' % (res, stuff)
307 307 elif c == '{':
308 308 group = True
309 309 res += '(?:'
310 310 elif c == '}' and group:
311 311 res += ')'
312 312 group = False
313 313 elif c == ',' and group:
314 314 res += '|'
315 315 elif c == '\\':
316 316 p = peek()
317 317 if p:
318 318 i += 1
319 319 res += re.escape(p)
320 320 else:
321 321 res += re.escape(c)
322 322 else:
323 323 res += re.escape(c)
324 324 return head + res + tail
325 325
326 326 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
327 327
328 328 def pathto(n1, n2):
329 329 '''return the relative path from one place to another.
330 330 n1 should use os.sep to separate directories
331 331 n2 should use "/" to separate directories
332 332 returns an os.sep-separated path.
333 333 '''
334 334 if not n1: return localpath(n2)
335 335 a, b = n1.split(os.sep), n2.split('/')
336 336 a.reverse()
337 337 b.reverse()
338 338 while a and b and a[-1] == b[-1]:
339 339 a.pop()
340 340 b.pop()
341 341 b.reverse()
342 342 return os.sep.join((['..'] * len(a)) + b)
343 343
344 344 def canonpath(root, cwd, myname):
345 345 """return the canonical path of myname, given cwd and root"""
346 346 if root == os.sep:
347 347 rootsep = os.sep
348 348 elif root.endswith(os.sep):
349 349 rootsep = root
350 350 else:
351 351 rootsep = root + os.sep
352 352 name = myname
353 353 if not os.path.isabs(name):
354 354 name = os.path.join(root, cwd, name)
355 355 name = os.path.normpath(name)
356 356 if name != rootsep and name.startswith(rootsep):
357 357 name = name[len(rootsep):]
358 358 audit_path(name)
359 359 return pconvert(name)
360 360 elif name == root:
361 361 return ''
362 362 else:
363 363 # Determine whether `name' is in the hierarchy at or beneath `root',
364 364 # by iterating name=dirname(name) until that causes no change (can't
365 365 # check name == '/', because that doesn't work on windows). For each
366 366 # `name', compare dev/inode numbers. If they match, the list `rel'
367 367 # holds the reversed list of components making up the relative file
368 368 # name we want.
369 369 root_st = os.stat(root)
370 370 rel = []
371 371 while True:
372 372 try:
373 373 name_st = os.stat(name)
374 374 except OSError:
375 375 break
376 376 if samestat(name_st, root_st):
377 377 if not rel:
378 378 # name was actually the same as root (maybe a symlink)
379 379 return ''
380 380 rel.reverse()
381 381 name = os.path.join(*rel)
382 382 audit_path(name)
383 383 return pconvert(name)
384 384 dirname, basename = os.path.split(name)
385 385 rel.append(basename)
386 386 if dirname == name:
387 387 break
388 388 name = dirname
389 389
390 390 raise Abort('%s not under root' % myname)
391 391
392 392 def matcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='', src=None):
393 393 return _matcher(canonroot, cwd, names, inc, exc, head, 'glob', src)
394 394
395 395 def cmdmatcher(canonroot, cwd='', names=['.'], inc=[], exc=[], head='',
396 396 src=None, globbed=False):
397 397 if not globbed:
398 398 names = expand_glob(names)
399 399 return _matcher(canonroot, cwd, names, inc, exc, head, 'relpath', src)
400 400
401 401 def _matcher(canonroot, cwd, names, inc, exc, head, dflt_pat, src):
402 402 """build a function to match a set of file patterns
403 403
404 404 arguments:
405 405 canonroot - the canonical root of the tree you're matching against
406 406 cwd - the current working directory, if relevant
407 407 names - patterns to find
408 408 inc - patterns to include
409 409 exc - patterns to exclude
410 410 head - a regex to prepend to patterns to control whether a match is rooted
411 411
412 412 a pattern is one of:
413 413 'glob:<rooted glob>'
414 414 're:<rooted regexp>'
415 415 'path:<rooted path>'
416 416 'relglob:<relative glob>'
417 417 'relpath:<relative path>'
418 418 'relre:<relative regexp>'
419 419 '<rooted path or regexp>'
420 420
421 421 returns:
422 422 a 3-tuple containing
423 423 - list of explicit non-pattern names passed in
424 424 - a bool match(filename) function
425 425 - a bool indicating if any patterns were passed in
426 426
427 427 todo:
428 428 make head regex a rooted bool
429 429 """
430 430
431 431 def contains_glob(name):
432 432 for c in name:
433 433 if c in _globchars: return True
434 434 return False
435 435
436 436 def regex(kind, name, tail):
437 437 '''convert a pattern into a regular expression'''
438 438 if kind == 're':
439 439 return name
440 440 elif kind == 'path':
441 441 return '^' + re.escape(name) + '(?:/|$)'
442 442 elif kind == 'relglob':
443 443 return head + globre(name, '(?:|.*/)', tail)
444 444 elif kind == 'relpath':
445 445 return head + re.escape(name) + tail
446 446 elif kind == 'relre':
447 447 if name.startswith('^'):
448 448 return name
449 449 return '.*' + name
450 450 return head + globre(name, '', tail)
451 451
452 452 def matchfn(pats, tail):
453 453 """build a matching function from a set of patterns"""
454 454 if not pats:
455 455 return
456 456 matches = []
457 457 for k, p in pats:
458 458 try:
459 459 pat = '(?:%s)' % regex(k, p, tail)
460 460 matches.append(re.compile(pat).match)
461 461 except re.error:
462 462 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
463 463 else: raise Abort("invalid pattern (%s): %s" % (k, p))
464 464
465 465 def buildfn(text):
466 466 for m in matches:
467 467 r = m(text)
468 468 if r:
469 469 return r
470 470
471 471 return buildfn
472 472
473 473 def globprefix(pat):
474 474 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
475 475 root = []
476 476 for p in pat.split(os.sep):
477 477 if contains_glob(p): break
478 478 root.append(p)
479 479 return '/'.join(root)
480 480
481 481 pats = []
482 482 files = []
483 483 roots = []
484 484 for kind, name in [patkind(p, dflt_pat) for p in names]:
485 485 if kind in ('glob', 'relpath'):
486 486 name = canonpath(canonroot, cwd, name)
487 487 if name == '':
488 488 kind, name = 'glob', '**'
489 489 if kind in ('glob', 'path', 're'):
490 490 pats.append((kind, name))
491 491 if kind == 'glob':
492 492 root = globprefix(name)
493 493 if root: roots.append(root)
494 494 elif kind == 'relpath':
495 495 files.append((kind, name))
496 496 roots.append(name)
497 497
498 498 patmatch = matchfn(pats, '$') or always
499 499 filematch = matchfn(files, '(?:/|$)') or always
500 500 incmatch = always
501 501 if inc:
502 502 inckinds = [patkind(canonpath(canonroot, cwd, i)) for i in inc]
503 503 incmatch = matchfn(inckinds, '(?:/|$)')
504 504 excmatch = lambda fn: False
505 505 if exc:
506 506 exckinds = [patkind(canonpath(canonroot, cwd, x)) for x in exc]
507 507 excmatch = matchfn(exckinds, '(?:/|$)')
508 508
509 509 return (roots,
510 510 lambda fn: (incmatch(fn) and not excmatch(fn) and
511 511 (fn.endswith('/') or
512 512 (not pats and not files) or
513 513 (pats and patmatch(fn)) or
514 514 (files and filematch(fn)))),
515 515 (inc or exc or (pats and pats != [('glob', '**')])) and True)
516 516
517 517 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
518 518 '''enhanced shell command execution.
519 519 run with environment maybe modified, maybe in different dir.
520 520
521 521 if command fails and onerr is None, return status. if ui object,
522 522 print error message and return status, else raise onerr object as
523 523 exception.'''
524 524 def py2shell(val):
525 525 'convert python object into string that is useful to shell'
526 526 if val in (None, False):
527 527 return '0'
528 528 if val == True:
529 529 return '1'
530 530 return str(val)
531 531 oldenv = {}
532 532 for k in environ:
533 533 oldenv[k] = os.environ.get(k)
534 534 if cwd is not None:
535 535 oldcwd = os.getcwd()
536 536 origcmd = cmd
537 537 if os.name == 'nt':
538 538 cmd = '"%s"' % cmd
539 539 try:
540 540 for k, v in environ.iteritems():
541 541 os.environ[k] = py2shell(v)
542 542 if cwd is not None and oldcwd != cwd:
543 543 os.chdir(cwd)
544 544 rc = os.system(cmd)
545 545 if rc and onerr:
546 546 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
547 547 explain_exit(rc)[0])
548 548 if errprefix:
549 549 errmsg = '%s: %s' % (errprefix, errmsg)
550 550 try:
551 551 onerr.warn(errmsg + '\n')
552 552 except AttributeError:
553 553 raise onerr(errmsg)
554 554 return rc
555 555 finally:
556 556 for k, v in oldenv.iteritems():
557 557 if v is None:
558 558 del os.environ[k]
559 559 else:
560 560 os.environ[k] = v
561 561 if cwd is not None and oldcwd != cwd:
562 562 os.chdir(oldcwd)
563 563
564 564 def rename(src, dst):
565 565 """forcibly rename a file"""
566 566 try:
567 567 os.rename(src, dst)
568 568 except OSError, err:
569 569 # on windows, rename to existing file is not allowed, so we
570 570 # must delete destination first. but if file is open, unlink
571 571 # schedules it for delete but does not delete it. rename
572 572 # happens immediately even for open files, so we create
573 573 # temporary file, delete it, rename destination to that name,
574 574 # then delete that. then rename is safe to do.
575 575 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
576 576 os.close(fd)
577 577 os.unlink(temp)
578 578 os.rename(dst, temp)
579 579 os.unlink(temp)
580 580 os.rename(src, dst)
581 581
582 582 def unlink(f):
583 583 """unlink and remove the directory if it is empty"""
584 584 os.unlink(f)
585 585 # try removing directories that might now be empty
586 586 try:
587 587 os.removedirs(os.path.dirname(f))
588 588 except OSError:
589 589 pass
590 590
591 591 def copyfile(src, dest):
592 592 "copy a file, preserving mode"
593 593 try:
594 594 shutil.copyfile(src, dest)
595 595 shutil.copymode(src, dest)
596 596 except shutil.Error, inst:
597 597 raise Abort(str(inst))
598 598
599 599 def copyfiles(src, dst, hardlink=None):
600 600 """Copy a directory tree using hardlinks if possible"""
601 601
602 602 if hardlink is None:
603 603 hardlink = (os.stat(src).st_dev ==
604 604 os.stat(os.path.dirname(dst)).st_dev)
605 605
606 606 if os.path.isdir(src):
607 607 os.mkdir(dst)
608 608 for name in os.listdir(src):
609 609 srcname = os.path.join(src, name)
610 610 dstname = os.path.join(dst, name)
611 611 copyfiles(srcname, dstname, hardlink)
612 612 else:
613 613 if hardlink:
614 614 try:
615 615 os_link(src, dst)
616 616 except (IOError, OSError):
617 617 hardlink = False
618 618 shutil.copy(src, dst)
619 619 else:
620 620 shutil.copy(src, dst)
621 621
622 622 def audit_path(path):
623 623 """Abort if path contains dangerous components"""
624 624 parts = os.path.normcase(path).split(os.sep)
625 625 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
626 626 or os.pardir in parts):
627 627 raise Abort(_("path contains illegal component: %s\n") % path)
628 628
629 629 def _makelock_file(info, pathname):
630 630 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
631 631 os.write(ld, info)
632 632 os.close(ld)
633 633
634 634 def _readlock_file(pathname):
635 635 return posixfile(pathname).read()
636 636
637 637 def nlinks(pathname):
638 638 """Return number of hardlinks for the given file."""
639 639 return os.lstat(pathname).st_nlink
640 640
641 641 if hasattr(os, 'link'):
642 642 os_link = os.link
643 643 else:
644 644 def os_link(src, dst):
645 645 raise OSError(0, _("Hardlinks not supported"))
646 646
647 647 def fstat(fp):
648 648 '''stat file object that may not have fileno method.'''
649 649 try:
650 650 return os.fstat(fp.fileno())
651 651 except AttributeError:
652 652 return os.stat(fp.name)
653 653
654 654 posixfile = file
655 655
656 656 def is_win_9x():
657 657 '''return true if run on windows 95, 98 or me.'''
658 658 try:
659 659 return sys.getwindowsversion()[3] == 1
660 660 except AttributeError:
661 661 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
662 662
663 663 getuser_fallback = None
664 664
665 665 def getuser():
666 666 '''return name of current user'''
667 667 try:
668 668 return getpass.getuser()
669 669 except ImportError:
670 670 # import of pwd will fail on windows - try fallback
671 671 if getuser_fallback:
672 672 return getuser_fallback()
673 673 # raised if win32api not available
674 674 raise Abort(_('user name not available - set USERNAME '
675 675 'environment variable'))
676 676
677 677 def username(uid=None):
678 678 """Return the name of the user with the given uid.
679 679
680 680 If uid is None, return the name of the current user."""
681 681 try:
682 682 import pwd
683 683 if uid is None:
684 684 uid = os.getuid()
685 685 try:
686 686 return pwd.getpwuid(uid)[0]
687 687 except KeyError:
688 688 return str(uid)
689 689 except ImportError:
690 690 return None
691 691
692 692 def groupname(gid=None):
693 693 """Return the name of the group with the given gid.
694 694
695 695 If gid is None, return the name of the current group."""
696 696 try:
697 697 import grp
698 698 if gid is None:
699 699 gid = os.getgid()
700 700 try:
701 701 return grp.getgrgid(gid)[0]
702 702 except KeyError:
703 703 return str(gid)
704 704 except ImportError:
705 705 return None
706 706
707 707 # File system features
708 708
709 709 def checkfolding(path):
710 710 """
711 711 Check whether the given path is on a case-sensitive filesystem
712 712
713 713 Requires a path (like /foo/.hg) ending with a foldable final
714 714 directory component.
715 715 """
716 716 s1 = os.stat(path)
717 717 d, b = os.path.split(path)
718 718 p2 = os.path.join(d, b.upper())
719 719 if path == p2:
720 720 p2 = os.path.join(d, b.lower())
721 721 try:
722 722 s2 = os.stat(p2)
723 723 if s2 == s1:
724 724 return False
725 725 return True
726 726 except:
727 727 return True
728 728
729 729 def checkexec(path):
730 730 """
731 731 Check whether the given path is on a filesystem with UNIX-like exec flags
732 732
733 733 Requires a directory (like /foo/.hg)
734 734 """
735 735 fh, fn = tempfile.mkstemp("", "", path)
736 736 os.close(fh)
737 737 m = os.stat(fn).st_mode
738 738 os.chmod(fn, m ^ 0111)
739 739 r = (os.stat(fn).st_mode != m)
740 740 os.unlink(fn)
741 741 return r
742 742
743 743 def execfunc(path, fallback):
744 744 '''return an is_exec() function with default to fallback'''
745 745 if checkexec(path):
746 746 return lambda x: is_exec(os.path.join(path, x))
747 747 return fallback
748 748
749 749 def checklink(path):
750 750 """check whether the given path is on a symlink-capable filesystem"""
751 751 # mktemp is not racy because symlink creation will fail if the
752 752 # file already exists
753 753 name = tempfile.mktemp(dir=path)
754 754 try:
755 755 os.symlink(".", name)
756 756 os.unlink(name)
757 757 return True
758 758 except (OSError, AttributeError):
759 759 return False
760 760
761 761 def linkfunc(path, fallback):
762 762 '''return an is_link() function with default to fallback'''
763 763 if checklink(path):
764 764 return lambda x: is_link(os.path.join(path, x))
765 765 return fallback
766 766
767 767 # Platform specific variants
768 768 if os.name == 'nt':
769 769 import msvcrt
770 770 nulldev = 'NUL:'
771 771
772 772 class winstdout:
773 773 '''stdout on windows misbehaves if sent through a pipe'''
774 774
775 775 def __init__(self, fp):
776 776 self.fp = fp
777 777
778 778 def __getattr__(self, key):
779 779 return getattr(self.fp, key)
780 780
781 781 def close(self):
782 782 try:
783 783 self.fp.close()
784 784 except: pass
785 785
786 786 def write(self, s):
787 787 try:
788 788 return self.fp.write(s)
789 789 except IOError, inst:
790 790 if inst.errno != 0: raise
791 791 self.close()
792 792 raise IOError(errno.EPIPE, 'Broken pipe')
793 793
794 def flush(self):
795 try:
796 return self.fp.flush()
797 except IOError, inst:
798 if inst.errno != errno.EINVAL: raise
799 self.close()
800 raise IOError(errno.EPIPE, 'Broken pipe')
801
794 802 sys.stdout = winstdout(sys.stdout)
795 803
796 804 def system_rcpath():
797 805 try:
798 806 return system_rcpath_win32()
799 807 except:
800 808 return [r'c:\mercurial\mercurial.ini']
801 809
802 810 def user_rcpath():
803 811 '''return os-specific hgrc search path to the user dir'''
804 812 try:
805 813 userrc = user_rcpath_win32()
806 814 except:
807 815 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
808 816 path = [userrc]
809 817 userprofile = os.environ.get('USERPROFILE')
810 818 if userprofile:
811 819 path.append(os.path.join(userprofile, 'mercurial.ini'))
812 820 return path
813 821
814 822 def parse_patch_output(output_line):
815 823 """parses the output produced by patch and returns the file name"""
816 824 pf = output_line[14:]
817 825 if pf[0] == '`':
818 826 pf = pf[1:-1] # Remove the quotes
819 827 return pf
820 828
821 829 def testpid(pid):
822 830 '''return False if pid dead, True if running or not known'''
823 831 return True
824 832
825 833 def set_exec(f, mode):
826 834 pass
827 835
828 836 def set_link(f, mode):
829 837 pass
830 838
831 839 def set_binary(fd):
832 840 msvcrt.setmode(fd.fileno(), os.O_BINARY)
833 841
834 842 def pconvert(path):
835 843 return path.replace("\\", "/")
836 844
837 845 def localpath(path):
838 846 return path.replace('/', '\\')
839 847
840 848 def normpath(path):
841 849 return pconvert(os.path.normpath(path))
842 850
843 851 makelock = _makelock_file
844 852 readlock = _readlock_file
845 853
846 854 def samestat(s1, s2):
847 855 return False
848 856
849 857 # A sequence of backslashes is special iff it precedes a double quote:
850 858 # - if there's an even number of backslashes, the double quote is not
851 859 # quoted (i.e. it ends the quoted region)
852 860 # - if there's an odd number of backslashes, the double quote is quoted
853 861 # - in both cases, every pair of backslashes is unquoted into a single
854 862 # backslash
855 863 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
856 864 # So, to quote a string, we must surround it in double quotes, double
857 865 # the number of backslashes that preceed double quotes and add another
858 866 # backslash before every double quote (being careful with the double
859 867 # quote we've appended to the end)
860 868 _quotere = None
861 869 def shellquote(s):
862 870 global _quotere
863 871 if _quotere is None:
864 872 _quotere = re.compile(r'(\\*)("|\\$)')
865 873 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
866 874
867 875 def explain_exit(code):
868 876 return _("exited with status %d") % code, code
869 877
870 878 # if you change this stub into a real check, please try to implement the
871 879 # username and groupname functions above, too.
872 880 def isowner(fp, st=None):
873 881 return True
874 882
875 883 try:
876 884 # override functions with win32 versions if possible
877 885 from util_win32 import *
878 886 if not is_win_9x():
879 887 posixfile = posixfile_nt
880 888 except ImportError:
881 889 pass
882 890
883 891 else:
884 892 nulldev = '/dev/null'
885 893 _umask = os.umask(0)
886 894 os.umask(_umask)
887 895
888 896 def rcfiles(path):
889 897 rcs = [os.path.join(path, 'hgrc')]
890 898 rcdir = os.path.join(path, 'hgrc.d')
891 899 try:
892 900 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
893 901 if f.endswith(".rc")])
894 902 except OSError:
895 903 pass
896 904 return rcs
897 905
898 906 def system_rcpath():
899 907 path = []
900 908 # old mod_python does not set sys.argv
901 909 if len(getattr(sys, 'argv', [])) > 0:
902 910 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
903 911 '/../etc/mercurial'))
904 912 path.extend(rcfiles('/etc/mercurial'))
905 913 return path
906 914
907 915 def user_rcpath():
908 916 return [os.path.expanduser('~/.hgrc')]
909 917
910 918 def parse_patch_output(output_line):
911 919 """parses the output produced by patch and returns the file name"""
912 920 pf = output_line[14:]
913 921 if pf.startswith("'") and pf.endswith("'") and " " in pf:
914 922 pf = pf[1:-1] # Remove the quotes
915 923 return pf
916 924
917 925 def is_exec(f):
918 926 """check whether a file is executable"""
919 927 return (os.lstat(f).st_mode & 0100 != 0)
920 928
921 929 def set_exec(f, mode):
922 930 s = os.lstat(f).st_mode
923 931 if (s & 0100 != 0) == mode:
924 932 return
925 933 if mode:
926 934 # Turn on +x for every +r bit when making a file executable
927 935 # and obey umask.
928 936 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
929 937 else:
930 938 os.chmod(f, s & 0666)
931 939
932 940 def is_link(f):
933 941 """check whether a file is a symlink"""
934 942 return (os.lstat(f).st_mode & 0120000 == 0120000)
935 943
936 944 def set_link(f, mode):
937 945 """make a file a symbolic link/regular file
938 946
939 947 if a file is changed to a link, its contents become the link data
940 948 if a link is changed to a file, its link data become its contents
941 949 """
942 950
943 951 m = is_link(f)
944 952 if m == bool(mode):
945 953 return
946 954
947 955 if mode: # switch file to link
948 956 data = file(f).read()
949 957 os.unlink(f)
950 958 os.symlink(data, f)
951 959 else:
952 960 data = os.readlink(f)
953 961 os.unlink(f)
954 962 file(f, "w").write(data)
955 963
956 964 def set_binary(fd):
957 965 pass
958 966
959 967 def pconvert(path):
960 968 return path
961 969
962 970 def localpath(path):
963 971 return path
964 972
965 973 normpath = os.path.normpath
966 974 samestat = os.path.samestat
967 975
968 976 def makelock(info, pathname):
969 977 try:
970 978 os.symlink(info, pathname)
971 979 except OSError, why:
972 980 if why.errno == errno.EEXIST:
973 981 raise
974 982 else:
975 983 _makelock_file(info, pathname)
976 984
977 985 def readlock(pathname):
978 986 try:
979 987 return os.readlink(pathname)
980 988 except OSError, why:
981 989 if why.errno == errno.EINVAL:
982 990 return _readlock_file(pathname)
983 991 else:
984 992 raise
985 993
986 994 def shellquote(s):
987 995 return "'%s'" % s.replace("'", "'\\''")
988 996
989 997 def testpid(pid):
990 998 '''return False if pid dead, True if running or not sure'''
991 999 try:
992 1000 os.kill(pid, 0)
993 1001 return True
994 1002 except OSError, inst:
995 1003 return inst.errno != errno.ESRCH
996 1004
997 1005 def explain_exit(code):
998 1006 """return a 2-tuple (desc, code) describing a process's status"""
999 1007 if os.WIFEXITED(code):
1000 1008 val = os.WEXITSTATUS(code)
1001 1009 return _("exited with status %d") % val, val
1002 1010 elif os.WIFSIGNALED(code):
1003 1011 val = os.WTERMSIG(code)
1004 1012 return _("killed by signal %d") % val, val
1005 1013 elif os.WIFSTOPPED(code):
1006 1014 val = os.WSTOPSIG(code)
1007 1015 return _("stopped by signal %d") % val, val
1008 1016 raise ValueError(_("invalid exit code"))
1009 1017
1010 1018 def isowner(fp, st=None):
1011 1019 """Return True if the file object f belongs to the current user.
1012 1020
1013 1021 The return value of a util.fstat(f) may be passed as the st argument.
1014 1022 """
1015 1023 if st is None:
1016 1024 st = fstat(fp)
1017 1025 return st.st_uid == os.getuid()
1018 1026
1019 1027 def _buildencodefun():
1020 1028 e = '_'
1021 1029 win_reserved = [ord(x) for x in '\\:*?"<>|']
1022 1030 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1023 1031 for x in (range(32) + range(126, 256) + win_reserved):
1024 1032 cmap[chr(x)] = "~%02x" % x
1025 1033 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1026 1034 cmap[chr(x)] = e + chr(x).lower()
1027 1035 dmap = {}
1028 1036 for k, v in cmap.iteritems():
1029 1037 dmap[v] = k
1030 1038 def decode(s):
1031 1039 i = 0
1032 1040 while i < len(s):
1033 1041 for l in xrange(1, 4):
1034 1042 try:
1035 1043 yield dmap[s[i:i+l]]
1036 1044 i += l
1037 1045 break
1038 1046 except KeyError:
1039 1047 pass
1040 1048 else:
1041 1049 raise KeyError
1042 1050 return (lambda s: "".join([cmap[c] for c in s]),
1043 1051 lambda s: "".join(list(decode(s))))
1044 1052
1045 1053 encodefilename, decodefilename = _buildencodefun()
1046 1054
1047 1055 def encodedopener(openerfn, fn):
1048 1056 def o(path, *args, **kw):
1049 1057 return openerfn(fn(path), *args, **kw)
1050 1058 return o
1051 1059
1052 1060 def opener(base, audit=True):
1053 1061 """
1054 1062 return a function that opens files relative to base
1055 1063
1056 1064 this function is used to hide the details of COW semantics and
1057 1065 remote file access from higher level code.
1058 1066 """
1059 1067 p = base
1060 1068 audit_p = audit
1061 1069
1062 1070 def mktempcopy(name):
1063 1071 d, fn = os.path.split(name)
1064 1072 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1065 1073 os.close(fd)
1066 1074 ofp = posixfile(temp, "wb")
1067 1075 try:
1068 1076 try:
1069 1077 ifp = posixfile(name, "rb")
1070 1078 except IOError, inst:
1071 1079 if not getattr(inst, 'filename', None):
1072 1080 inst.filename = name
1073 1081 raise
1074 1082 for chunk in filechunkiter(ifp):
1075 1083 ofp.write(chunk)
1076 1084 ifp.close()
1077 1085 ofp.close()
1078 1086 except:
1079 1087 try: os.unlink(temp)
1080 1088 except: pass
1081 1089 raise
1082 1090 st = os.lstat(name)
1083 1091 os.chmod(temp, st.st_mode)
1084 1092 return temp
1085 1093
1086 1094 class atomictempfile(posixfile):
1087 1095 """the file will only be copied when rename is called"""
1088 1096 def __init__(self, name, mode):
1089 1097 self.__name = name
1090 1098 self.temp = mktempcopy(name)
1091 1099 posixfile.__init__(self, self.temp, mode)
1092 1100 def rename(self):
1093 1101 if not self.closed:
1094 1102 posixfile.close(self)
1095 1103 rename(self.temp, localpath(self.__name))
1096 1104 def __del__(self):
1097 1105 if not self.closed:
1098 1106 try:
1099 1107 os.unlink(self.temp)
1100 1108 except: pass
1101 1109 posixfile.close(self)
1102 1110
1103 1111 class atomicfile(atomictempfile):
1104 1112 """the file will only be copied on close"""
1105 1113 def __init__(self, name, mode):
1106 1114 atomictempfile.__init__(self, name, mode)
1107 1115 def close(self):
1108 1116 self.rename()
1109 1117 def __del__(self):
1110 1118 self.rename()
1111 1119
1112 1120 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1113 1121 if audit_p:
1114 1122 audit_path(path)
1115 1123 f = os.path.join(p, path)
1116 1124
1117 1125 if not text:
1118 1126 mode += "b" # for that other OS
1119 1127
1120 1128 if mode[0] != "r":
1121 1129 try:
1122 1130 nlink = nlinks(f)
1123 1131 except OSError:
1124 1132 d = os.path.dirname(f)
1125 1133 if not os.path.isdir(d):
1126 1134 os.makedirs(d)
1127 1135 else:
1128 1136 if atomic:
1129 1137 return atomicfile(f, mode)
1130 1138 elif atomictemp:
1131 1139 return atomictempfile(f, mode)
1132 1140 if nlink > 1:
1133 1141 rename(mktempcopy(f), f)
1134 1142 return posixfile(f, mode)
1135 1143
1136 1144 return o
1137 1145
1138 1146 class chunkbuffer(object):
1139 1147 """Allow arbitrary sized chunks of data to be efficiently read from an
1140 1148 iterator over chunks of arbitrary size."""
1141 1149
1142 1150 def __init__(self, in_iter, targetsize = 2**16):
1143 1151 """in_iter is the iterator that's iterating over the input chunks.
1144 1152 targetsize is how big a buffer to try to maintain."""
1145 1153 self.in_iter = iter(in_iter)
1146 1154 self.buf = ''
1147 1155 self.targetsize = int(targetsize)
1148 1156 if self.targetsize <= 0:
1149 1157 raise ValueError(_("targetsize must be greater than 0, was %d") %
1150 1158 targetsize)
1151 1159 self.iterempty = False
1152 1160
1153 1161 def fillbuf(self):
1154 1162 """Ignore target size; read every chunk from iterator until empty."""
1155 1163 if not self.iterempty:
1156 1164 collector = cStringIO.StringIO()
1157 1165 collector.write(self.buf)
1158 1166 for ch in self.in_iter:
1159 1167 collector.write(ch)
1160 1168 self.buf = collector.getvalue()
1161 1169 self.iterempty = True
1162 1170
1163 1171 def read(self, l):
1164 1172 """Read L bytes of data from the iterator of chunks of data.
1165 1173 Returns less than L bytes if the iterator runs dry."""
1166 1174 if l > len(self.buf) and not self.iterempty:
1167 1175 # Clamp to a multiple of self.targetsize
1168 1176 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1169 1177 collector = cStringIO.StringIO()
1170 1178 collector.write(self.buf)
1171 1179 collected = len(self.buf)
1172 1180 for chunk in self.in_iter:
1173 1181 collector.write(chunk)
1174 1182 collected += len(chunk)
1175 1183 if collected >= targetsize:
1176 1184 break
1177 1185 if collected < targetsize:
1178 1186 self.iterempty = True
1179 1187 self.buf = collector.getvalue()
1180 1188 s, self.buf = self.buf[:l], buffer(self.buf, l)
1181 1189 return s
1182 1190
1183 1191 def filechunkiter(f, size=65536, limit=None):
1184 1192 """Create a generator that produces the data in the file size
1185 1193 (default 65536) bytes at a time, up to optional limit (default is
1186 1194 to read all data). Chunks may be less than size bytes if the
1187 1195 chunk is the last chunk in the file, or the file is a socket or
1188 1196 some other type of file that sometimes reads less data than is
1189 1197 requested."""
1190 1198 assert size >= 0
1191 1199 assert limit is None or limit >= 0
1192 1200 while True:
1193 1201 if limit is None: nbytes = size
1194 1202 else: nbytes = min(limit, size)
1195 1203 s = nbytes and f.read(nbytes)
1196 1204 if not s: break
1197 1205 if limit: limit -= len(s)
1198 1206 yield s
1199 1207
1200 1208 def makedate():
1201 1209 lt = time.localtime()
1202 1210 if lt[8] == 1 and time.daylight:
1203 1211 tz = time.altzone
1204 1212 else:
1205 1213 tz = time.timezone
1206 1214 return time.mktime(lt), tz
1207 1215
1208 1216 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1209 1217 """represent a (unixtime, offset) tuple as a localized time.
1210 1218 unixtime is seconds since the epoch, and offset is the time zone's
1211 1219 number of seconds away from UTC. if timezone is false, do not
1212 1220 append time zone to string."""
1213 1221 t, tz = date or makedate()
1214 1222 s = time.strftime(format, time.gmtime(float(t) - tz))
1215 1223 if timezone:
1216 1224 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1217 1225 return s
1218 1226
1219 1227 def strdate(string, format, defaults):
1220 1228 """parse a localized time string and return a (unixtime, offset) tuple.
1221 1229 if the string cannot be parsed, ValueError is raised."""
1222 1230 def timezone(string):
1223 1231 tz = string.split()[-1]
1224 1232 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1225 1233 tz = int(tz)
1226 1234 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1227 1235 return offset
1228 1236 if tz == "GMT" or tz == "UTC":
1229 1237 return 0
1230 1238 return None
1231 1239
1232 1240 # NOTE: unixtime = localunixtime + offset
1233 1241 offset, date = timezone(string), string
1234 1242 if offset != None:
1235 1243 date = " ".join(string.split()[:-1])
1236 1244
1237 1245 # add missing elements from defaults
1238 1246 for part in defaults:
1239 1247 found = [True for p in part if ("%"+p) in format]
1240 1248 if not found:
1241 1249 date += "@" + defaults[part]
1242 1250 format += "@%" + part[0]
1243 1251
1244 1252 timetuple = time.strptime(date, format)
1245 1253 localunixtime = int(calendar.timegm(timetuple))
1246 1254 if offset is None:
1247 1255 # local timezone
1248 1256 unixtime = int(time.mktime(timetuple))
1249 1257 offset = unixtime - localunixtime
1250 1258 else:
1251 1259 unixtime = localunixtime + offset
1252 1260 return unixtime, offset
1253 1261
1254 1262 def parsedate(string, formats=None, defaults=None):
1255 1263 """parse a localized time string and return a (unixtime, offset) tuple.
1256 1264 The date may be a "unixtime offset" string or in one of the specified
1257 1265 formats."""
1258 1266 if not string:
1259 1267 return 0, 0
1260 1268 if not formats:
1261 1269 formats = defaultdateformats
1262 1270 string = string.strip()
1263 1271 try:
1264 1272 when, offset = map(int, string.split(' '))
1265 1273 except ValueError:
1266 1274 # fill out defaults
1267 1275 if not defaults:
1268 1276 defaults = {}
1269 1277 now = makedate()
1270 1278 for part in "d mb yY HI M S".split():
1271 1279 if part not in defaults:
1272 1280 if part[0] in "HMS":
1273 1281 defaults[part] = "00"
1274 1282 elif part[0] in "dm":
1275 1283 defaults[part] = "1"
1276 1284 else:
1277 1285 defaults[part] = datestr(now, "%" + part[0], False)
1278 1286
1279 1287 for format in formats:
1280 1288 try:
1281 1289 when, offset = strdate(string, format, defaults)
1282 1290 except ValueError:
1283 1291 pass
1284 1292 else:
1285 1293 break
1286 1294 else:
1287 1295 raise Abort(_('invalid date: %r ') % string)
1288 1296 # validate explicit (probably user-specified) date and
1289 1297 # time zone offset. values must fit in signed 32 bits for
1290 1298 # current 32-bit linux runtimes. timezones go from UTC-12
1291 1299 # to UTC+14
1292 1300 if abs(when) > 0x7fffffff:
1293 1301 raise Abort(_('date exceeds 32 bits: %d') % when)
1294 1302 if offset < -50400 or offset > 43200:
1295 1303 raise Abort(_('impossible time zone offset: %d') % offset)
1296 1304 return when, offset
1297 1305
1298 1306 def matchdate(date):
1299 1307 """Return a function that matches a given date match specifier
1300 1308
1301 1309 Formats include:
1302 1310
1303 1311 '{date}' match a given date to the accuracy provided
1304 1312
1305 1313 '<{date}' on or before a given date
1306 1314
1307 1315 '>{date}' on or after a given date
1308 1316
1309 1317 """
1310 1318
1311 1319 def lower(date):
1312 1320 return parsedate(date, extendeddateformats)[0]
1313 1321
1314 1322 def upper(date):
1315 1323 d = dict(mb="12", HI="23", M="59", S="59")
1316 1324 for days in "31 30 29".split():
1317 1325 try:
1318 1326 d["d"] = days
1319 1327 return parsedate(date, extendeddateformats, d)[0]
1320 1328 except:
1321 1329 pass
1322 1330 d["d"] = "28"
1323 1331 return parsedate(date, extendeddateformats, d)[0]
1324 1332
1325 1333 if date[0] == "<":
1326 1334 when = upper(date[1:])
1327 1335 return lambda x: x <= when
1328 1336 elif date[0] == ">":
1329 1337 when = lower(date[1:])
1330 1338 return lambda x: x >= when
1331 1339 elif date[0] == "-":
1332 1340 try:
1333 1341 days = int(date[1:])
1334 1342 except ValueError:
1335 1343 raise Abort(_("invalid day spec: %s") % date[1:])
1336 1344 when = makedate()[0] - days * 3600 * 24
1337 1345 return lambda x: x >= when
1338 1346 elif " to " in date:
1339 1347 a, b = date.split(" to ")
1340 1348 start, stop = lower(a), upper(b)
1341 1349 return lambda x: x >= start and x <= stop
1342 1350 else:
1343 1351 start, stop = lower(date), upper(date)
1344 1352 return lambda x: x >= start and x <= stop
1345 1353
1346 1354 def shortuser(user):
1347 1355 """Return a short representation of a user name or email address."""
1348 1356 f = user.find('@')
1349 1357 if f >= 0:
1350 1358 user = user[:f]
1351 1359 f = user.find('<')
1352 1360 if f >= 0:
1353 1361 user = user[f+1:]
1354 1362 f = user.find(' ')
1355 1363 if f >= 0:
1356 1364 user = user[:f]
1357 1365 f = user.find('.')
1358 1366 if f >= 0:
1359 1367 user = user[:f]
1360 1368 return user
1361 1369
1362 1370 def ellipsis(text, maxlength=400):
1363 1371 """Trim string to at most maxlength (default: 400) characters."""
1364 1372 if len(text) <= maxlength:
1365 1373 return text
1366 1374 else:
1367 1375 return "%s..." % (text[:maxlength-3])
1368 1376
1369 1377 def walkrepos(path):
1370 1378 '''yield every hg repository under path, recursively.'''
1371 1379 def errhandler(err):
1372 1380 if err.filename == path:
1373 1381 raise err
1374 1382
1375 1383 for root, dirs, files in os.walk(path, onerror=errhandler):
1376 1384 for d in dirs:
1377 1385 if d == '.hg':
1378 1386 yield root
1379 1387 dirs[:] = []
1380 1388 break
1381 1389
1382 1390 _rcpath = None
1383 1391
1384 1392 def os_rcpath():
1385 1393 '''return default os-specific hgrc search path'''
1386 1394 path = system_rcpath()
1387 1395 path.extend(user_rcpath())
1388 1396 path = [os.path.normpath(f) for f in path]
1389 1397 return path
1390 1398
1391 1399 def rcpath():
1392 1400 '''return hgrc search path. if env var HGRCPATH is set, use it.
1393 1401 for each item in path, if directory, use files ending in .rc,
1394 1402 else use item.
1395 1403 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1396 1404 if no HGRCPATH, use default os-specific path.'''
1397 1405 global _rcpath
1398 1406 if _rcpath is None:
1399 1407 if 'HGRCPATH' in os.environ:
1400 1408 _rcpath = []
1401 1409 for p in os.environ['HGRCPATH'].split(os.pathsep):
1402 1410 if not p: continue
1403 1411 if os.path.isdir(p):
1404 1412 for f in os.listdir(p):
1405 1413 if f.endswith('.rc'):
1406 1414 _rcpath.append(os.path.join(p, f))
1407 1415 else:
1408 1416 _rcpath.append(p)
1409 1417 else:
1410 1418 _rcpath = os_rcpath()
1411 1419 return _rcpath
1412 1420
1413 1421 def bytecount(nbytes):
1414 1422 '''return byte count formatted as readable string, with units'''
1415 1423
1416 1424 units = (
1417 1425 (100, 1<<30, _('%.0f GB')),
1418 1426 (10, 1<<30, _('%.1f GB')),
1419 1427 (1, 1<<30, _('%.2f GB')),
1420 1428 (100, 1<<20, _('%.0f MB')),
1421 1429 (10, 1<<20, _('%.1f MB')),
1422 1430 (1, 1<<20, _('%.2f MB')),
1423 1431 (100, 1<<10, _('%.0f KB')),
1424 1432 (10, 1<<10, _('%.1f KB')),
1425 1433 (1, 1<<10, _('%.2f KB')),
1426 1434 (1, 1, _('%.0f bytes')),
1427 1435 )
1428 1436
1429 1437 for multiplier, divisor, format in units:
1430 1438 if nbytes >= divisor * multiplier:
1431 1439 return format % (nbytes / float(divisor))
1432 1440 return units[-1][2] % nbytes
1433 1441
1434 1442 def drop_scheme(scheme, path):
1435 1443 sc = scheme + ':'
1436 1444 if path.startswith(sc):
1437 1445 path = path[len(sc):]
1438 1446 if path.startswith('//'):
1439 1447 path = path[2:]
1440 1448 return path
@@ -1,34 +1,36 b''
1 1 #!/bin/sh
2 2
3 3 hg init test
4 4 cd test
5 5 echo foo>foo
6 6 hg commit -A -d '0 0' -m 1
7 7 hg --config server.uncompressed=True serve -p 20059 -d --pid-file=../hg1.pid
8 8 hg serve -p 20060 -d --pid-file=../hg2.pid
9 # Test server address cannot be reused
10 hg serve -p 20060 2>&1 | sed -e 's/abort: cannot start server:.*/abort: cannot start server:/'
9 11 cd ..
10 12 cat hg1.pid hg2.pid >> $DAEMON_PIDS
11 13
12 14 echo % clone via stream
13 15 http_proxy= hg clone --uncompressed http://localhost:20059/ copy 2>&1 | \
14 16 sed -e 's/[0-9][0-9.]*/XXX/g' -e 's/[KM]\(B\/sec\)/X\1/'
15 17 hg verify -R copy
16 18
17 19 echo % try to clone via stream, should use pull instead
18 20 http_proxy= hg clone --uncompressed http://localhost:20060/ copy2
19 21
20 22 echo % clone via pull
21 23 http_proxy= hg clone http://localhost:20059/ copy-pull
22 24 hg verify -R copy-pull
23 25
24 26 cd test
25 27 echo bar > bar
26 28 hg commit -A -d '1 0' -m 2
27 29 cd ..
28 30
29 31 echo % pull
30 32 cd copy-pull
31 33 echo '[hooks]' >> .hg/hgrc
32 34 echo 'changegroup = echo changegroup: u=$HG_URL' >> .hg/hgrc
33 35 hg pull
34 36 cd ..
@@ -1,40 +1,41 b''
1 1 adding foo
2 abort: cannot start server:
2 3 % clone via stream
3 4 streaming all changes
4 5 XXX files to transfer, XXX bytes of data
5 6 transferred XXX bytes in XXX seconds (XXX XB/sec)
6 7 XXX files updated, XXX files merged, XXX files removed, XXX files unresolved
7 8 checking changesets
8 9 checking manifests
9 10 crosschecking files in changesets and manifests
10 11 checking files
11 12 1 files, 1 changesets, 1 total revisions
12 13 % try to clone via stream, should use pull instead
13 14 requesting all changes
14 15 adding changesets
15 16 adding manifests
16 17 adding file changes
17 18 added 1 changesets with 1 changes to 1 files
18 19 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
19 20 % clone via pull
20 21 requesting all changes
21 22 adding changesets
22 23 adding manifests
23 24 adding file changes
24 25 added 1 changesets with 1 changes to 1 files
25 26 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
26 27 checking changesets
27 28 checking manifests
28 29 crosschecking files in changesets and manifests
29 30 checking files
30 31 1 files, 1 changesets, 1 total revisions
31 32 adding bar
32 33 % pull
33 34 changegroup: u=http://localhost:20059/
34 35 pulling from http://localhost:20059/
35 36 searching for changes
36 37 adding changesets
37 38 adding manifests
38 39 adding file changes
39 40 added 1 changesets with 1 changes to 1 files
40 41 (run 'hg update' to get a working copy)
@@ -1,117 +1,120 b''
1 1 #!/bin/sh
2 2
3 3 echo "[extensions]" >> $HGRCPATH
4 4 echo "mq=" >> $HGRCPATH
5 5
6 6 hg init
7 7 hg qinit
8 8
9 9 echo x > x
10 10 hg ci -Ama
11 11
12 12 hg qnew a.patch
13 13 echo a > a
14 14 hg add a
15 15 hg qrefresh
16 16
17 17 hg qnew b.patch
18 18 echo b > b
19 19 hg add b
20 20 hg qrefresh
21 21
22 22 hg qnew c.patch
23 23 echo c > c
24 24 hg add c
25 25 hg qrefresh
26 26
27 27 hg qpop -a
28 28
29 29 echo % should fail
30 hg qguard does-not-exist.patch +bleh
31
32 echo % should fail
30 33 hg qguard +fail
31 34
32 35 hg qpush
33 36 echo % should guard a.patch
34 37 hg qguard +a
35 38 echo % should print +a
36 39 hg qguard
37 40 hg qpop
38 41
39 42 hg qguard a.patch
40 43 echo % should push b.patch
41 44 hg qpush
42 45
43 46 hg qpop
44 47 hg qselect a
45 48 echo % should push a.patch
46 49 hg qpush
47 50
48 51 hg qguard c.patch -a
49 52 echo % should print -a
50 53 hg qguard c.patch
51 54
52 55 echo % should skip c.patch
53 56 hg qpush -a
54 57
55 58 hg qguard -n c.patch
56 59 echo % should push c.patch
57 60 hg qpush -a
58 61
59 62 hg qpop -a
60 63 hg qselect -n
61 64 echo % should push all
62 65 hg qpush -a
63 66
64 67 hg qpop -a
65 68 hg qguard a.patch +1
66 69 hg qguard b.patch +2
67 70 hg qselect 1
68 71 echo % should push a.patch, not b.patch
69 72 hg qpush
70 73 hg qpush
71 74 hg qpop -a
72 75
73 76 hg qselect 2
74 77 echo % should push b.patch
75 78 hg qpush
76 79 hg qpop -a
77 80
78 81 hg qselect 1 2
79 82 echo % should push a.patch, b.patch
80 83 hg qpush
81 84 hg qpush
82 85 hg qpop -a
83 86
84 87 hg qguard a.patch +1 +2 -3
85 88 hg qselect 1 2 3
86 89 echo % list patches and guards
87 90 hg qguard -l
88 91 echo % list series
89 92 hg qseries -v
90 93 echo % list guards
91 94 hg qselect
92 95 echo % should push b.patch
93 96 hg qpush
94 97
95 98 hg qpush -a
96 99 hg qselect -n --reapply
97 100 echo % guards in series file: +1 +2 -3
98 101 hg qselect -s
99 102 echo % should show c.patch
100 103 hg qapplied
101 104
102 105 hg qrename a.patch new.patch
103 106 echo % should show :
104 107 echo % new.patch: +1 +2 -3
105 108 echo % b.patch: +2
106 109 echo % c.patch: unguarded
107 110 hg qguard -l
108 111
109 112 hg qnew d.patch
110 113 hg qpop
111 114 echo % should show new.patch and b.patch as Guarded, c.patch as Applied
112 115 echo % and d.patch as Unapplied
113 116 hg qseries -v
114 117
115 118 hg qguard d.patch +2
116 119 echo % new.patch, b.patch: Guarded. c.patch: Applied. d.patch: Guarded.
117 120 hg qseries -v
@@ -1,103 +1,105 b''
1 1 adding x
2 2 Patch queue now empty
3 3 % should fail
4 abort: no patch named does-not-exist.patch
5 % should fail
4 6 abort: no patches applied
5 7 applying a.patch
6 8 Now at: a.patch
7 9 % should guard a.patch
8 10 % should print +a
9 11 a.patch: +a
10 12 Patch queue now empty
11 13 a.patch: +a
12 14 % should push b.patch
13 15 applying b.patch
14 16 Now at: b.patch
15 17 Patch queue now empty
16 18 number of unguarded, unapplied patches has changed from 2 to 3
17 19 % should push a.patch
18 20 applying a.patch
19 21 Now at: a.patch
20 22 % should print -a
21 23 c.patch: -a
22 24 % should skip c.patch
23 25 applying b.patch
24 26 skipping c.patch - guarded by '-a'
25 27 Now at: b.patch
26 28 % should push c.patch
27 29 applying c.patch
28 30 Now at: c.patch
29 31 Patch queue now empty
30 32 guards deactivated
31 33 number of unguarded, unapplied patches has changed from 3 to 2
32 34 % should push all
33 35 applying b.patch
34 36 applying c.patch
35 37 Now at: c.patch
36 38 Patch queue now empty
37 39 number of unguarded, unapplied patches has changed from 1 to 2
38 40 % should push a.patch, not b.patch
39 41 applying a.patch
40 42 Now at: a.patch
41 43 applying c.patch
42 44 Now at: c.patch
43 45 Patch queue now empty
44 46 % should push b.patch
45 47 applying b.patch
46 48 Now at: b.patch
47 49 Patch queue now empty
48 50 number of unguarded, unapplied patches has changed from 2 to 3
49 51 % should push a.patch, b.patch
50 52 applying a.patch
51 53 Now at: a.patch
52 54 applying b.patch
53 55 Now at: b.patch
54 56 Patch queue now empty
55 57 number of unguarded, unapplied patches has changed from 3 to 2
56 58 % list patches and guards
57 59 a.patch: +1 +2 -3
58 60 b.patch: +2
59 61 c.patch: unguarded
60 62 % list series
61 63 0 G a.patch
62 64 1 U b.patch
63 65 2 U c.patch
64 66 % list guards
65 67 1
66 68 2
67 69 3
68 70 % should push b.patch
69 71 applying b.patch
70 72 Now at: b.patch
71 73 applying c.patch
72 74 Now at: c.patch
73 75 guards deactivated
74 76 popping guarded patches
75 77 Patch queue now empty
76 78 reapplying unguarded patches
77 79 applying c.patch
78 80 Now at: c.patch
79 81 % guards in series file: +1 +2 -3
80 82 +1
81 83 +2
82 84 -3
83 85 % should show c.patch
84 86 c.patch
85 87 % should show :
86 88 % new.patch: +1 +2 -3
87 89 % b.patch: +2
88 90 % c.patch: unguarded
89 91 new.patch: +1 +2 -3
90 92 b.patch: +2
91 93 c.patch: unguarded
92 94 Now at: c.patch
93 95 % should show new.patch and b.patch as Guarded, c.patch as Applied
94 96 % and d.patch as Unapplied
95 97 0 G new.patch
96 98 1 G b.patch
97 99 2 A c.patch
98 100 3 U d.patch
99 101 % new.patch, b.patch: Guarded. c.patch: Applied. d.patch: Guarded.
100 102 0 G new.patch
101 103 1 G b.patch
102 104 2 A c.patch
103 105 3 G d.patch
General Comments 0
You need to be logged in to leave comments. Login now