##// END OF EJS Templates
localrepo and dirstate: rename reload to invalidate...
Matt Mackall -
r4613:3a645af7 default
parent child Browse files
Show More
@@ -1,2302 +1,2302 b''
1 1 # queue.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 34 import os, sys, re, errno
35 35
36 36 commands.norepo += " qclone qversion"
37 37
38 38 # Patch names looks like unix-file names.
39 39 # They must be joinable with queue directory and result in the patch path.
40 40 normname = util.normpath
41 41
42 42 class statusentry:
43 43 def __init__(self, rev, name=None):
44 44 if not name:
45 45 fields = rev.split(':', 1)
46 46 if len(fields) == 2:
47 47 self.rev, self.name = fields
48 48 else:
49 49 self.rev, self.name = None, None
50 50 else:
51 51 self.rev, self.name = rev, name
52 52
53 53 def __str__(self):
54 54 return self.rev + ':' + self.name
55 55
56 56 class queue:
57 57 def __init__(self, ui, path, patchdir=None):
58 58 self.basepath = path
59 59 self.path = patchdir or os.path.join(path, "patches")
60 60 self.opener = util.opener(self.path)
61 61 self.ui = ui
62 62 self.applied = []
63 63 self.full_series = []
64 64 self.applied_dirty = 0
65 65 self.series_dirty = 0
66 66 self.series_path = "series"
67 67 self.status_path = "status"
68 68 self.guards_path = "guards"
69 69 self.active_guards = None
70 70 self.guards_dirty = False
71 71 self._diffopts = None
72 72
73 73 if os.path.exists(self.join(self.series_path)):
74 74 self.full_series = self.opener(self.series_path).read().splitlines()
75 75 self.parse_series()
76 76
77 77 if os.path.exists(self.join(self.status_path)):
78 78 lines = self.opener(self.status_path).read().splitlines()
79 79 self.applied = [statusentry(l) for l in lines]
80 80
81 81 def diffopts(self):
82 82 if self._diffopts is None:
83 83 self._diffopts = patch.diffopts(self.ui)
84 84 return self._diffopts
85 85
86 86 def join(self, *p):
87 87 return os.path.join(self.path, *p)
88 88
89 89 def find_series(self, patch):
90 90 pre = re.compile("(\s*)([^#]+)")
91 91 index = 0
92 92 for l in self.full_series:
93 93 m = pre.match(l)
94 94 if m:
95 95 s = m.group(2)
96 96 s = s.rstrip()
97 97 if s == patch:
98 98 return index
99 99 index += 1
100 100 return None
101 101
102 102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 103
104 104 def parse_series(self):
105 105 self.series = []
106 106 self.series_guards = []
107 107 for l in self.full_series:
108 108 h = l.find('#')
109 109 if h == -1:
110 110 patch = l
111 111 comment = ''
112 112 elif h == 0:
113 113 continue
114 114 else:
115 115 patch = l[:h]
116 116 comment = l[h:]
117 117 patch = patch.strip()
118 118 if patch:
119 119 if patch in self.series:
120 120 raise util.Abort(_('%s appears more than once in %s') %
121 121 (patch, self.join(self.series_path)))
122 122 self.series.append(patch)
123 123 self.series_guards.append(self.guard_re.findall(comment))
124 124
125 125 def check_guard(self, guard):
126 126 bad_chars = '# \t\r\n\f'
127 127 first = guard[0]
128 128 for c in '-+':
129 129 if first == c:
130 130 return (_('guard %r starts with invalid character: %r') %
131 131 (guard, c))
132 132 for c in bad_chars:
133 133 if c in guard:
134 134 return _('invalid character in guard %r: %r') % (guard, c)
135 135
136 136 def set_active(self, guards):
137 137 for guard in guards:
138 138 bad = self.check_guard(guard)
139 139 if bad:
140 140 raise util.Abort(bad)
141 141 guards = dict.fromkeys(guards).keys()
142 142 guards.sort()
143 143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 144 self.active_guards = guards
145 145 self.guards_dirty = True
146 146
147 147 def active(self):
148 148 if self.active_guards is None:
149 149 self.active_guards = []
150 150 try:
151 151 guards = self.opener(self.guards_path).read().split()
152 152 except IOError, err:
153 153 if err.errno != errno.ENOENT: raise
154 154 guards = []
155 155 for i, guard in enumerate(guards):
156 156 bad = self.check_guard(guard)
157 157 if bad:
158 158 self.ui.warn('%s:%d: %s\n' %
159 159 (self.join(self.guards_path), i + 1, bad))
160 160 else:
161 161 self.active_guards.append(guard)
162 162 return self.active_guards
163 163
164 164 def set_guards(self, idx, guards):
165 165 for g in guards:
166 166 if len(g) < 2:
167 167 raise util.Abort(_('guard %r too short') % g)
168 168 if g[0] not in '-+':
169 169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 170 bad = self.check_guard(g[1:])
171 171 if bad:
172 172 raise util.Abort(bad)
173 173 drop = self.guard_re.sub('', self.full_series[idx])
174 174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 175 self.parse_series()
176 176 self.series_dirty = True
177 177
178 178 def pushable(self, idx):
179 179 if isinstance(idx, str):
180 180 idx = self.series.index(idx)
181 181 patchguards = self.series_guards[idx]
182 182 if not patchguards:
183 183 return True, None
184 184 default = False
185 185 guards = self.active()
186 186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 187 if exactneg:
188 188 return False, exactneg[0]
189 189 pos = [g for g in patchguards if g[0] == '+']
190 190 exactpos = [g for g in pos if g[1:] in guards]
191 191 if pos:
192 192 if exactpos:
193 193 return True, exactpos[0]
194 194 return False, pos
195 195 return True, ''
196 196
197 197 def explain_pushable(self, idx, all_patches=False):
198 198 write = all_patches and self.ui.write or self.ui.warn
199 199 if all_patches or self.ui.verbose:
200 200 if isinstance(idx, str):
201 201 idx = self.series.index(idx)
202 202 pushable, why = self.pushable(idx)
203 203 if all_patches and pushable:
204 204 if why is None:
205 205 write(_('allowing %s - no guards in effect\n') %
206 206 self.series[idx])
207 207 else:
208 208 if not why:
209 209 write(_('allowing %s - no matching negative guards\n') %
210 210 self.series[idx])
211 211 else:
212 212 write(_('allowing %s - guarded by %r\n') %
213 213 (self.series[idx], why))
214 214 if not pushable:
215 215 if why:
216 216 write(_('skipping %s - guarded by %r\n') %
217 217 (self.series[idx], why))
218 218 else:
219 219 write(_('skipping %s - no matching guards\n') %
220 220 self.series[idx])
221 221
222 222 def save_dirty(self):
223 223 def write_list(items, path):
224 224 fp = self.opener(path, 'w')
225 225 for i in items:
226 226 print >> fp, i
227 227 fp.close()
228 228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 231
232 232 def readheaders(self, patch):
233 233 def eatdiff(lines):
234 234 while lines:
235 235 l = lines[-1]
236 236 if (l.startswith("diff -") or
237 237 l.startswith("Index:") or
238 238 l.startswith("===========")):
239 239 del lines[-1]
240 240 else:
241 241 break
242 242 def eatempty(lines):
243 243 while lines:
244 244 l = lines[-1]
245 245 if re.match('\s*$', l):
246 246 del lines[-1]
247 247 else:
248 248 break
249 249
250 250 pf = self.join(patch)
251 251 message = []
252 252 comments = []
253 253 user = None
254 254 date = None
255 255 format = None
256 256 subject = None
257 257 diffstart = 0
258 258
259 259 for line in file(pf):
260 260 line = line.rstrip()
261 261 if line.startswith('diff --git'):
262 262 diffstart = 2
263 263 break
264 264 if diffstart:
265 265 if line.startswith('+++ '):
266 266 diffstart = 2
267 267 break
268 268 if line.startswith("--- "):
269 269 diffstart = 1
270 270 continue
271 271 elif format == "hgpatch":
272 272 # parse values when importing the result of an hg export
273 273 if line.startswith("# User "):
274 274 user = line[7:]
275 275 elif line.startswith("# Date "):
276 276 date = line[7:]
277 277 elif not line.startswith("# ") and line:
278 278 message.append(line)
279 279 format = None
280 280 elif line == '# HG changeset patch':
281 281 format = "hgpatch"
282 282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 283 line.startswith("subject: "))):
284 284 subject = line[9:]
285 285 format = "tag"
286 286 elif (format != "tagdone" and (line.startswith("From: ") or
287 287 line.startswith("from: "))):
288 288 user = line[6:]
289 289 format = "tag"
290 290 elif format == "tag" and line == "":
291 291 # when looking for tags (subject: from: etc) they
292 292 # end once you find a blank line in the source
293 293 format = "tagdone"
294 294 elif message or line:
295 295 message.append(line)
296 296 comments.append(line)
297 297
298 298 eatdiff(message)
299 299 eatdiff(comments)
300 300 eatempty(message)
301 301 eatempty(comments)
302 302
303 303 # make sure message isn't empty
304 304 if format and format.startswith("tag") and subject:
305 305 message.insert(0, "")
306 306 message.insert(0, subject)
307 307 return (message, comments, user, date, diffstart > 1)
308 308
309 309 def removeundo(self, repo):
310 310 undo = repo.sjoin('undo')
311 311 if not os.path.exists(undo):
312 312 return
313 313 try:
314 314 os.unlink(undo)
315 315 except OSError, inst:
316 316 self.ui.warn('error removing undo: %s\n' % str(inst))
317 317
318 318 def printdiff(self, repo, node1, node2=None, files=None,
319 319 fp=None, changes=None, opts={}):
320 320 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321 321
322 322 patch.diff(repo, node1, node2, fns, match=matchfn,
323 323 fp=fp, changes=changes, opts=self.diffopts())
324 324
325 325 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
326 326 # first try just applying the patch
327 327 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 328 strict=True, merge=rev, wlock=wlock)
329 329
330 330 if err == 0:
331 331 return (err, n)
332 332
333 333 if n is None:
334 334 raise util.Abort(_("apply failed for patch %s") % patch)
335 335
336 336 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337 337
338 338 # apply failed, strip away that rev and merge.
339 339 hg.clean(repo, head, wlock=wlock)
340 340 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
341 341
342 342 ctx = repo.changectx(rev)
343 343 ret = hg.merge(repo, rev, wlock=wlock)
344 344 if ret:
345 345 raise util.Abort(_("update returned %d") % ret)
346 346 n = repo.commit(None, ctx.description(), ctx.user(),
347 347 force=1, wlock=wlock)
348 348 if n == None:
349 349 raise util.Abort(_("repo commit failed"))
350 350 try:
351 351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 352 except:
353 353 raise util.Abort(_("unable to read %s") % patch)
354 354
355 355 patchf = self.opener(patch, "w")
356 356 if comments:
357 357 comments = "\n".join(comments) + '\n\n'
358 358 patchf.write(comments)
359 359 self.printdiff(repo, head, n, fp=patchf)
360 360 patchf.close()
361 361 self.removeundo(repo)
362 362 return (0, n)
363 363
364 364 def qparents(self, repo, rev=None):
365 365 if rev is None:
366 366 (p1, p2) = repo.dirstate.parents()
367 367 if p2 == revlog.nullid:
368 368 return p1
369 369 if len(self.applied) == 0:
370 370 return None
371 371 return revlog.bin(self.applied[-1].rev)
372 372 pp = repo.changelog.parents(rev)
373 373 if pp[1] != revlog.nullid:
374 374 arevs = [ x.rev for x in self.applied ]
375 375 p0 = revlog.hex(pp[0])
376 376 p1 = revlog.hex(pp[1])
377 377 if p0 in arevs:
378 378 return pp[0]
379 379 if p1 in arevs:
380 380 return pp[1]
381 381 return pp[0]
382 382
383 383 def mergepatch(self, repo, mergeq, series, wlock):
384 384 if len(self.applied) == 0:
385 385 # each of the patches merged in will have two parents. This
386 386 # can confuse the qrefresh, qdiff, and strip code because it
387 387 # needs to know which parent is actually in the patch queue.
388 388 # so, we insert a merge marker with only one parent. This way
389 389 # the first patch in the queue is never a merge patch
390 390 #
391 391 pname = ".hg.patches.merge.marker"
392 392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
393 393 wlock=wlock)
394 394 self.removeundo(repo)
395 395 self.applied.append(statusentry(revlog.hex(n), pname))
396 396 self.applied_dirty = 1
397 397
398 398 head = self.qparents(repo)
399 399
400 400 for patch in series:
401 401 patch = mergeq.lookup(patch, strict=True)
402 402 if not patch:
403 403 self.ui.warn("patch %s does not exist\n" % patch)
404 404 return (1, None)
405 405 pushable, reason = self.pushable(patch)
406 406 if not pushable:
407 407 self.explain_pushable(patch, all_patches=True)
408 408 continue
409 409 info = mergeq.isapplied(patch)
410 410 if not info:
411 411 self.ui.warn("patch %s is not applied\n" % patch)
412 412 return (1, None)
413 413 rev = revlog.bin(info[1])
414 414 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
415 415 if head:
416 416 self.applied.append(statusentry(revlog.hex(head), patch))
417 417 self.applied_dirty = 1
418 418 if err:
419 419 return (err, head)
420 420 self.save_dirty()
421 421 return (0, head)
422 422
423 423 def patch(self, repo, patchfile):
424 424 '''Apply patchfile to the working directory.
425 425 patchfile: file name of patch'''
426 426 files = {}
427 427 try:
428 428 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
429 429 files=files)
430 430 except Exception, inst:
431 431 self.ui.note(str(inst) + '\n')
432 432 if not self.ui.verbose:
433 433 self.ui.warn("patch failed, unable to continue (try -v)\n")
434 434 return (False, files, False)
435 435
436 436 return (True, files, fuzz)
437 437
438 438 def apply(self, repo, series, list=False, update_status=True,
439 439 strict=False, patchdir=None, merge=None, wlock=None,
440 440 all_files={}):
441 441 if not wlock:
442 442 wlock = repo.wlock()
443 443 lock = repo.lock()
444 444 tr = repo.transaction()
445 445 try:
446 446 ret = self._apply(tr, repo, series, list, update_status,
447 447 strict, patchdir, merge, wlock,
448 448 lock=lock, all_files=all_files)
449 449 tr.close()
450 450 self.save_dirty()
451 451 return ret
452 452 except:
453 453 try:
454 454 tr.abort()
455 455 finally:
456 repo.reload()
457 repo.wreload()
456 repo.invalidate()
457 repo.dirstate.invalidate()
458 458 raise
459 459
460 460 def _apply(self, tr, repo, series, list=False, update_status=True,
461 461 strict=False, patchdir=None, merge=None, wlock=None,
462 462 lock=None, all_files={}):
463 463 # TODO unify with commands.py
464 464 if not patchdir:
465 465 patchdir = self.path
466 466 err = 0
467 467 n = None
468 468 for patchname in series:
469 469 pushable, reason = self.pushable(patchname)
470 470 if not pushable:
471 471 self.explain_pushable(patchname, all_patches=True)
472 472 continue
473 473 self.ui.warn("applying %s\n" % patchname)
474 474 pf = os.path.join(patchdir, patchname)
475 475
476 476 try:
477 477 message, comments, user, date, patchfound = self.readheaders(patchname)
478 478 except:
479 479 self.ui.warn("Unable to read %s\n" % patchname)
480 480 err = 1
481 481 break
482 482
483 483 if not message:
484 484 message = "imported patch %s\n" % patchname
485 485 else:
486 486 if list:
487 487 message.append("\nimported patch %s" % patchname)
488 488 message = '\n'.join(message)
489 489
490 490 (patcherr, files, fuzz) = self.patch(repo, pf)
491 491 all_files.update(files)
492 492 patcherr = not patcherr
493 493
494 494 if merge and files:
495 495 # Mark as removed/merged and update dirstate parent info
496 496 removed = []
497 497 merged = []
498 498 for f in files:
499 499 if os.path.exists(repo.dirstate.wjoin(f)):
500 500 merged.append(f)
501 501 else:
502 502 removed.append(f)
503 503 repo.dirstate.update(repo.dirstate.filterfiles(removed), 'r')
504 504 repo.dirstate.update(repo.dirstate.filterfiles(merged), 'm')
505 505 p1, p2 = repo.dirstate.parents()
506 506 repo.dirstate.setparents(p1, merge)
507 507 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
508 508 n = repo.commit(files, message, user, date, force=1, lock=lock,
509 509 wlock=wlock)
510 510
511 511 if n == None:
512 512 raise util.Abort(_("repo commit failed"))
513 513
514 514 if update_status:
515 515 self.applied.append(statusentry(revlog.hex(n), patchname))
516 516
517 517 if patcherr:
518 518 if not patchfound:
519 519 self.ui.warn("patch %s is empty\n" % patchname)
520 520 err = 0
521 521 else:
522 522 self.ui.warn("patch failed, rejects left in working dir\n")
523 523 err = 1
524 524 break
525 525
526 526 if fuzz and strict:
527 527 self.ui.warn("fuzz found when applying patch, stopping\n")
528 528 err = 1
529 529 break
530 530 self.removeundo(repo)
531 531 return (err, n)
532 532
533 533 def delete(self, repo, patches, opts):
534 534 realpatches = []
535 535 for patch in patches:
536 536 patch = self.lookup(patch, strict=True)
537 537 info = self.isapplied(patch)
538 538 if info:
539 539 raise util.Abort(_("cannot delete applied patch %s") % patch)
540 540 if patch not in self.series:
541 541 raise util.Abort(_("patch %s not in series file") % patch)
542 542 realpatches.append(patch)
543 543
544 544 appliedbase = 0
545 545 if opts.get('rev'):
546 546 if not self.applied:
547 547 raise util.Abort(_('no patches applied'))
548 548 revs = cmdutil.revrange(repo, opts['rev'])
549 549 if len(revs) > 1 and revs[0] > revs[1]:
550 550 revs.reverse()
551 551 for rev in revs:
552 552 if appliedbase >= len(self.applied):
553 553 raise util.Abort(_("revision %d is not managed") % rev)
554 554
555 555 base = revlog.bin(self.applied[appliedbase].rev)
556 556 node = repo.changelog.node(rev)
557 557 if node != base:
558 558 raise util.Abort(_("cannot delete revision %d above "
559 559 "applied patches") % rev)
560 560 realpatches.append(self.applied[appliedbase].name)
561 561 appliedbase += 1
562 562
563 563 if not opts.get('keep'):
564 564 r = self.qrepo()
565 565 if r:
566 566 r.remove(realpatches, True)
567 567 else:
568 568 for p in realpatches:
569 569 os.unlink(self.join(p))
570 570
571 571 if appliedbase:
572 572 del self.applied[:appliedbase]
573 573 self.applied_dirty = 1
574 574 indices = [self.find_series(p) for p in realpatches]
575 575 indices.sort()
576 576 for i in indices[-1::-1]:
577 577 del self.full_series[i]
578 578 self.parse_series()
579 579 self.series_dirty = 1
580 580
581 581 def check_toppatch(self, repo):
582 582 if len(self.applied) > 0:
583 583 top = revlog.bin(self.applied[-1].rev)
584 584 pp = repo.dirstate.parents()
585 585 if top not in pp:
586 586 raise util.Abort(_("queue top not at same revision as working directory"))
587 587 return top
588 588 return None
589 589 def check_localchanges(self, repo, force=False, refresh=True):
590 590 m, a, r, d = repo.status()[:4]
591 591 if m or a or r or d:
592 592 if not force:
593 593 if refresh:
594 594 raise util.Abort(_("local changes found, refresh first"))
595 595 else:
596 596 raise util.Abort(_("local changes found"))
597 597 return m, a, r, d
598 598 def new(self, repo, patch, msg=None, force=None):
599 599 if os.path.exists(self.join(patch)):
600 600 raise util.Abort(_('patch "%s" already exists') % patch)
601 601 m, a, r, d = self.check_localchanges(repo, force)
602 602 commitfiles = m + a + r
603 603 self.check_toppatch(repo)
604 604 wlock = repo.wlock()
605 605 insert = self.full_series_end()
606 606 if msg:
607 607 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
608 608 wlock=wlock)
609 609 else:
610 610 n = repo.commit(commitfiles,
611 611 "New patch: %s" % patch, force=True, wlock=wlock)
612 612 if n == None:
613 613 raise util.Abort(_("repo commit failed"))
614 614 self.full_series[insert:insert] = [patch]
615 615 self.applied.append(statusentry(revlog.hex(n), patch))
616 616 self.parse_series()
617 617 self.series_dirty = 1
618 618 self.applied_dirty = 1
619 619 p = self.opener(patch, "w")
620 620 if msg:
621 621 msg = msg + "\n"
622 622 p.write(msg)
623 623 p.close()
624 624 wlock = None
625 625 r = self.qrepo()
626 626 if r: r.add([patch])
627 627 if commitfiles:
628 628 self.refresh(repo, short=True)
629 629 self.removeundo(repo)
630 630
631 631 def strip(self, repo, rev, update=True, backup="all", wlock=None):
632 632 def limitheads(chlog, stop):
633 633 """return the list of all nodes that have no children"""
634 634 p = {}
635 635 h = []
636 636 stoprev = 0
637 637 if stop in chlog.nodemap:
638 638 stoprev = chlog.rev(stop)
639 639
640 640 for r in xrange(chlog.count() - 1, -1, -1):
641 641 n = chlog.node(r)
642 642 if n not in p:
643 643 h.append(n)
644 644 if n == stop:
645 645 break
646 646 if r < stoprev:
647 647 break
648 648 for pn in chlog.parents(n):
649 649 p[pn] = 1
650 650 return h
651 651
652 652 def bundle(cg):
653 653 backupdir = repo.join("strip-backup")
654 654 if not os.path.isdir(backupdir):
655 655 os.mkdir(backupdir)
656 656 name = os.path.join(backupdir, "%s" % revlog.short(rev))
657 657 name = savename(name)
658 658 self.ui.warn("saving bundle to %s\n" % name)
659 659 return changegroup.writebundle(cg, name, "HG10BZ")
660 660
661 661 def stripall(revnum):
662 662 mm = repo.changectx(rev).manifest()
663 663 seen = {}
664 664
665 665 for x in xrange(revnum, repo.changelog.count()):
666 666 for f in repo.changectx(x).files():
667 667 if f in seen:
668 668 continue
669 669 seen[f] = 1
670 670 if f in mm:
671 671 filerev = mm[f]
672 672 else:
673 673 filerev = 0
674 674 seen[f] = filerev
675 675 # we go in two steps here so the strip loop happens in a
676 676 # sensible order. When stripping many files, this helps keep
677 677 # our disk access patterns under control.
678 678 seen_list = seen.keys()
679 679 seen_list.sort()
680 680 for f in seen_list:
681 681 ff = repo.file(f)
682 682 filerev = seen[f]
683 683 if filerev != 0:
684 684 if filerev in ff.nodemap:
685 685 filerev = ff.rev(filerev)
686 686 else:
687 687 filerev = 0
688 688 ff.strip(filerev, revnum)
689 689
690 690 if not wlock:
691 691 wlock = repo.wlock()
692 692 lock = repo.lock()
693 693 chlog = repo.changelog
694 694 # TODO delete the undo files, and handle undo of merge sets
695 695 pp = chlog.parents(rev)
696 696 revnum = chlog.rev(rev)
697 697
698 698 if update:
699 699 self.check_localchanges(repo, refresh=False)
700 700 urev = self.qparents(repo, rev)
701 701 hg.clean(repo, urev, wlock=wlock)
702 702 repo.dirstate.write()
703 703
704 704 # save is a list of all the branches we are truncating away
705 705 # that we actually want to keep. changegroup will be used
706 706 # to preserve them and add them back after the truncate
707 707 saveheads = []
708 708 savebases = {}
709 709
710 710 heads = limitheads(chlog, rev)
711 711 seen = {}
712 712
713 713 # search through all the heads, finding those where the revision
714 714 # we want to strip away is an ancestor. Also look for merges
715 715 # that might be turned into new heads by the strip.
716 716 while heads:
717 717 h = heads.pop()
718 718 n = h
719 719 while True:
720 720 seen[n] = 1
721 721 pp = chlog.parents(n)
722 722 if pp[1] != revlog.nullid:
723 723 for p in pp:
724 724 if chlog.rev(p) > revnum and p not in seen:
725 725 heads.append(p)
726 726 if pp[0] == revlog.nullid:
727 727 break
728 728 if chlog.rev(pp[0]) < revnum:
729 729 break
730 730 n = pp[0]
731 731 if n == rev:
732 732 break
733 733 r = chlog.reachable(h, rev)
734 734 if rev not in r:
735 735 saveheads.append(h)
736 736 for x in r:
737 737 if chlog.rev(x) > revnum:
738 738 savebases[x] = 1
739 739
740 740 # create a changegroup for all the branches we need to keep
741 741 if backup == "all":
742 742 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
743 743 bundle(backupch)
744 744 if saveheads:
745 745 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
746 746 chgrpfile = bundle(backupch)
747 747
748 748 stripall(revnum)
749 749
750 750 change = chlog.read(rev)
751 751 chlog.strip(revnum, revnum)
752 752 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
753 753 self.removeundo(repo)
754 754 if saveheads:
755 755 self.ui.status("adding branch\n")
756 756 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
757 757 update=False)
758 758 if backup != "strip":
759 759 os.unlink(chgrpfile)
760 760
761 761 def isapplied(self, patch):
762 762 """returns (index, rev, patch)"""
763 763 for i in xrange(len(self.applied)):
764 764 a = self.applied[i]
765 765 if a.name == patch:
766 766 return (i, a.rev, a.name)
767 767 return None
768 768
769 769 # if the exact patch name does not exist, we try a few
770 770 # variations. If strict is passed, we try only #1
771 771 #
772 772 # 1) a number to indicate an offset in the series file
773 773 # 2) a unique substring of the patch name was given
774 774 # 3) patchname[-+]num to indicate an offset in the series file
775 775 def lookup(self, patch, strict=False):
776 776 patch = patch and str(patch)
777 777
778 778 def partial_name(s):
779 779 if s in self.series:
780 780 return s
781 781 matches = [x for x in self.series if s in x]
782 782 if len(matches) > 1:
783 783 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
784 784 for m in matches:
785 785 self.ui.warn(' %s\n' % m)
786 786 return None
787 787 if matches:
788 788 return matches[0]
789 789 if len(self.series) > 0 and len(self.applied) > 0:
790 790 if s == 'qtip':
791 791 return self.series[self.series_end(True)-1]
792 792 if s == 'qbase':
793 793 return self.series[0]
794 794 return None
795 795 if patch == None:
796 796 return None
797 797
798 798 # we don't want to return a partial match until we make
799 799 # sure the file name passed in does not exist (checked below)
800 800 res = partial_name(patch)
801 801 if res and res == patch:
802 802 return res
803 803
804 804 if not os.path.isfile(self.join(patch)):
805 805 try:
806 806 sno = int(patch)
807 807 except(ValueError, OverflowError):
808 808 pass
809 809 else:
810 810 if sno < len(self.series):
811 811 return self.series[sno]
812 812 if not strict:
813 813 # return any partial match made above
814 814 if res:
815 815 return res
816 816 minus = patch.rfind('-')
817 817 if minus >= 0:
818 818 res = partial_name(patch[:minus])
819 819 if res:
820 820 i = self.series.index(res)
821 821 try:
822 822 off = int(patch[minus+1:] or 1)
823 823 except(ValueError, OverflowError):
824 824 pass
825 825 else:
826 826 if i - off >= 0:
827 827 return self.series[i - off]
828 828 plus = patch.rfind('+')
829 829 if plus >= 0:
830 830 res = partial_name(patch[:plus])
831 831 if res:
832 832 i = self.series.index(res)
833 833 try:
834 834 off = int(patch[plus+1:] or 1)
835 835 except(ValueError, OverflowError):
836 836 pass
837 837 else:
838 838 if i + off < len(self.series):
839 839 return self.series[i + off]
840 840 raise util.Abort(_("patch %s not in series") % patch)
841 841
842 842 def push(self, repo, patch=None, force=False, list=False,
843 843 mergeq=None, wlock=None):
844 844 if not wlock:
845 845 wlock = repo.wlock()
846 846 patch = self.lookup(patch)
847 847 # Suppose our series file is: A B C and the current 'top' patch is B.
848 848 # qpush C should be performed (moving forward)
849 849 # qpush B is a NOP (no change)
850 850 # qpush A is an error (can't go backwards with qpush)
851 851 if patch:
852 852 info = self.isapplied(patch)
853 853 if info:
854 854 if info[0] < len(self.applied) - 1:
855 855 raise util.Abort(_("cannot push to a previous patch: %s") %
856 856 patch)
857 857 if info[0] < len(self.series) - 1:
858 858 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
859 859 else:
860 860 self.ui.warn(_('all patches are currently applied\n'))
861 861 return
862 862
863 863 # Following the above example, starting at 'top' of B:
864 864 # qpush should be performed (pushes C), but a subsequent qpush without
865 865 # an argument is an error (nothing to apply). This allows a loop
866 866 # of "...while hg qpush..." to work as it detects an error when done
867 867 if self.series_end() == len(self.series):
868 868 self.ui.warn(_('patch series already fully applied\n'))
869 869 return 1
870 870 if not force:
871 871 self.check_localchanges(repo)
872 872
873 873 self.applied_dirty = 1;
874 874 start = self.series_end()
875 875 if start > 0:
876 876 self.check_toppatch(repo)
877 877 if not patch:
878 878 patch = self.series[start]
879 879 end = start + 1
880 880 else:
881 881 end = self.series.index(patch, start) + 1
882 882 s = self.series[start:end]
883 883 all_files = {}
884 884 try:
885 885 if mergeq:
886 886 ret = self.mergepatch(repo, mergeq, s, wlock)
887 887 else:
888 888 ret = self.apply(repo, s, list, wlock=wlock,
889 889 all_files=all_files)
890 890 except:
891 891 self.ui.warn(_('cleaning up working directory...'))
892 892 node = repo.dirstate.parents()[0]
893 893 hg.revert(repo, node, None, wlock)
894 894 unknown = repo.status(wlock=wlock)[4]
895 895 # only remove unknown files that we know we touched or
896 896 # created while patching
897 897 for f in unknown:
898 898 if f in all_files:
899 899 util.unlink(repo.wjoin(f))
900 900 self.ui.warn(_('done\n'))
901 901 raise
902 902 top = self.applied[-1].name
903 903 if ret[0]:
904 904 self.ui.write("Errors during apply, please fix and refresh %s\n" %
905 905 top)
906 906 else:
907 907 self.ui.write("Now at: %s\n" % top)
908 908 return ret[0]
909 909
910 910 def pop(self, repo, patch=None, force=False, update=True, all=False,
911 911 wlock=None):
912 912 def getfile(f, rev):
913 913 t = repo.file(f).read(rev)
914 914 repo.wfile(f, "w").write(t)
915 915
916 916 if not wlock:
917 917 wlock = repo.wlock()
918 918 if patch:
919 919 # index, rev, patch
920 920 info = self.isapplied(patch)
921 921 if not info:
922 922 patch = self.lookup(patch)
923 923 info = self.isapplied(patch)
924 924 if not info:
925 925 raise util.Abort(_("patch %s is not applied") % patch)
926 926
927 927 if len(self.applied) == 0:
928 928 # Allow qpop -a to work repeatedly,
929 929 # but not qpop without an argument
930 930 self.ui.warn(_("no patches applied\n"))
931 931 return not all
932 932
933 933 if not update:
934 934 parents = repo.dirstate.parents()
935 935 rr = [ revlog.bin(x.rev) for x in self.applied ]
936 936 for p in parents:
937 937 if p in rr:
938 938 self.ui.warn("qpop: forcing dirstate update\n")
939 939 update = True
940 940
941 941 if not force and update:
942 942 self.check_localchanges(repo)
943 943
944 944 self.applied_dirty = 1;
945 945 end = len(self.applied)
946 946 if not patch:
947 947 if all:
948 948 popi = 0
949 949 else:
950 950 popi = len(self.applied) - 1
951 951 else:
952 952 popi = info[0] + 1
953 953 if popi >= end:
954 954 self.ui.warn("qpop: %s is already at the top\n" % patch)
955 955 return
956 956 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
957 957
958 958 start = info[0]
959 959 rev = revlog.bin(info[1])
960 960
961 961 # we know there are no local changes, so we can make a simplified
962 962 # form of hg.update.
963 963 if update:
964 964 top = self.check_toppatch(repo)
965 965 qp = self.qparents(repo, rev)
966 966 changes = repo.changelog.read(qp)
967 967 mmap = repo.manifest.read(changes[0])
968 968 m, a, r, d, u = repo.status(qp, top)[:5]
969 969 if d:
970 970 raise util.Abort("deletions found between repo revs")
971 971 for f in m:
972 972 getfile(f, mmap[f])
973 973 for f in r:
974 974 getfile(f, mmap[f])
975 975 util.set_exec(repo.wjoin(f), mmap.execf(f))
976 976 repo.dirstate.update(m + r, 'n')
977 977 for f in a:
978 978 try:
979 979 os.unlink(repo.wjoin(f))
980 980 except OSError, e:
981 981 if e.errno != errno.ENOENT:
982 982 raise
983 983 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
984 984 except: pass
985 985 if a:
986 986 repo.dirstate.forget(a)
987 987 repo.dirstate.setparents(qp, revlog.nullid)
988 988 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
989 989 del self.applied[start:end]
990 990 if len(self.applied):
991 991 self.ui.write("Now at: %s\n" % self.applied[-1].name)
992 992 else:
993 993 self.ui.write("Patch queue now empty\n")
994 994
995 995 def diff(self, repo, pats, opts):
996 996 top = self.check_toppatch(repo)
997 997 if not top:
998 998 self.ui.write("No patches applied\n")
999 999 return
1000 1000 qp = self.qparents(repo, top)
1001 1001 if opts.get('git'):
1002 1002 self.diffopts().git = True
1003 1003 self.printdiff(repo, qp, files=pats, opts=opts)
1004 1004
1005 1005 def refresh(self, repo, pats=None, **opts):
1006 1006 if len(self.applied) == 0:
1007 1007 self.ui.write("No patches applied\n")
1008 1008 return 1
1009 1009 wlock = repo.wlock()
1010 1010 self.check_toppatch(repo)
1011 1011 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1012 1012 top = revlog.bin(top)
1013 1013 cparents = repo.changelog.parents(top)
1014 1014 patchparent = self.qparents(repo, top)
1015 1015 message, comments, user, date, patchfound = self.readheaders(patchfn)
1016 1016
1017 1017 patchf = self.opener(patchfn, "w")
1018 1018 msg = opts.get('msg', '').rstrip()
1019 1019 if msg:
1020 1020 if comments:
1021 1021 # Remove existing message.
1022 1022 ci = 0
1023 1023 subj = None
1024 1024 for mi in xrange(len(message)):
1025 1025 if comments[ci].lower().startswith('subject: '):
1026 1026 subj = comments[ci][9:]
1027 1027 while message[mi] != comments[ci] and message[mi] != subj:
1028 1028 ci += 1
1029 1029 del comments[ci]
1030 1030 comments.append(msg)
1031 1031 if comments:
1032 1032 comments = "\n".join(comments) + '\n\n'
1033 1033 patchf.write(comments)
1034 1034
1035 1035 if opts.get('git'):
1036 1036 self.diffopts().git = True
1037 1037 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
1038 1038 tip = repo.changelog.tip()
1039 1039 if top == tip:
1040 1040 # if the top of our patch queue is also the tip, there is an
1041 1041 # optimization here. We update the dirstate in place and strip
1042 1042 # off the tip commit. Then just commit the current directory
1043 1043 # tree. We can also send repo.commit the list of files
1044 1044 # changed to speed up the diff
1045 1045 #
1046 1046 # in short mode, we only diff the files included in the
1047 1047 # patch already
1048 1048 #
1049 1049 # this should really read:
1050 1050 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1051 1051 # but we do it backwards to take advantage of manifest/chlog
1052 1052 # caching against the next repo.status call
1053 1053 #
1054 1054 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1055 1055 changes = repo.changelog.read(tip)
1056 1056 man = repo.manifest.read(changes[0])
1057 1057 aaa = aa[:]
1058 1058 if opts.get('short'):
1059 1059 filelist = mm + aa + dd
1060 1060 match = dict.fromkeys(filelist).__contains__
1061 1061 else:
1062 1062 filelist = None
1063 1063 match = util.always
1064 1064 m, a, r, d, u = repo.status(files=filelist, match=match)[:5]
1065 1065
1066 1066 # we might end up with files that were added between tip and
1067 1067 # the dirstate parent, but then changed in the local dirstate.
1068 1068 # in this case, we want them to only show up in the added section
1069 1069 for x in m:
1070 1070 if x not in aa:
1071 1071 mm.append(x)
1072 1072 # we might end up with files added by the local dirstate that
1073 1073 # were deleted by the patch. In this case, they should only
1074 1074 # show up in the changed section.
1075 1075 for x in a:
1076 1076 if x in dd:
1077 1077 del dd[dd.index(x)]
1078 1078 mm.append(x)
1079 1079 else:
1080 1080 aa.append(x)
1081 1081 # make sure any files deleted in the local dirstate
1082 1082 # are not in the add or change column of the patch
1083 1083 forget = []
1084 1084 for x in d + r:
1085 1085 if x in aa:
1086 1086 del aa[aa.index(x)]
1087 1087 forget.append(x)
1088 1088 continue
1089 1089 elif x in mm:
1090 1090 del mm[mm.index(x)]
1091 1091 dd.append(x)
1092 1092
1093 1093 m = util.unique(mm)
1094 1094 r = util.unique(dd)
1095 1095 a = util.unique(aa)
1096 1096 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1097 1097 filelist = util.unique(c[0] + c[1] + c[2])
1098 1098 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1099 1099 fp=patchf, changes=c, opts=self.diffopts())
1100 1100 patchf.close()
1101 1101
1102 1102 repo.dirstate.setparents(*cparents)
1103 1103 copies = {}
1104 1104 for dst in a:
1105 1105 src = repo.dirstate.copied(dst)
1106 1106 if src is None:
1107 1107 continue
1108 1108 copies.setdefault(src, []).append(dst)
1109 1109 repo.dirstate.update(a, 'a')
1110 1110 # remember the copies between patchparent and tip
1111 1111 # this may be slow, so don't do it if we're not tracking copies
1112 1112 if self.diffopts().git:
1113 1113 for dst in aaa:
1114 1114 f = repo.file(dst)
1115 1115 src = f.renamed(man[dst])
1116 1116 if src:
1117 1117 copies[src[0]] = copies.get(dst, [])
1118 1118 if dst in a:
1119 1119 copies[src[0]].append(dst)
1120 1120 # we can't copy a file created by the patch itself
1121 1121 if dst in copies:
1122 1122 del copies[dst]
1123 1123 for src, dsts in copies.iteritems():
1124 1124 for dst in dsts:
1125 1125 repo.dirstate.copy(src, dst)
1126 1126 repo.dirstate.update(r, 'r')
1127 1127 # if the patch excludes a modified file, mark that file with mtime=0
1128 1128 # so status can see it.
1129 1129 mm = []
1130 1130 for i in xrange(len(m)-1, -1, -1):
1131 1131 if not matchfn(m[i]):
1132 1132 mm.append(m[i])
1133 1133 del m[i]
1134 1134 repo.dirstate.update(m, 'n')
1135 1135 repo.dirstate.update(mm, 'n', st_mtime=-1, st_size=-1)
1136 1136 repo.dirstate.forget(forget)
1137 1137
1138 1138 if not msg:
1139 1139 if not message:
1140 1140 message = "patch queue: %s\n" % patchfn
1141 1141 else:
1142 1142 message = "\n".join(message)
1143 1143 else:
1144 1144 message = msg
1145 1145
1146 1146 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1147 1147 n = repo.commit(filelist, message, changes[1], match=matchfn,
1148 1148 force=1, wlock=wlock)
1149 1149 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1150 1150 self.applied_dirty = 1
1151 1151 self.removeundo(repo)
1152 1152 else:
1153 1153 self.printdiff(repo, patchparent, fp=patchf)
1154 1154 patchf.close()
1155 1155 added = repo.status()[1]
1156 1156 for a in added:
1157 1157 f = repo.wjoin(a)
1158 1158 try:
1159 1159 os.unlink(f)
1160 1160 except OSError, e:
1161 1161 if e.errno != errno.ENOENT:
1162 1162 raise
1163 1163 try: os.removedirs(os.path.dirname(f))
1164 1164 except: pass
1165 1165 # forget the file copies in the dirstate
1166 1166 # push should readd the files later on
1167 1167 repo.dirstate.forget(added)
1168 1168 self.pop(repo, force=True, wlock=wlock)
1169 1169 self.push(repo, force=True, wlock=wlock)
1170 1170
1171 1171 def init(self, repo, create=False):
1172 1172 if not create and os.path.isdir(self.path):
1173 1173 raise util.Abort(_("patch queue directory already exists"))
1174 1174 try:
1175 1175 os.mkdir(self.path)
1176 1176 except OSError, inst:
1177 1177 if inst.errno != errno.EEXIST or not create:
1178 1178 raise
1179 1179 if create:
1180 1180 return self.qrepo(create=True)
1181 1181
1182 1182 def unapplied(self, repo, patch=None):
1183 1183 if patch and patch not in self.series:
1184 1184 raise util.Abort(_("patch %s is not in series file") % patch)
1185 1185 if not patch:
1186 1186 start = self.series_end()
1187 1187 else:
1188 1188 start = self.series.index(patch) + 1
1189 1189 unapplied = []
1190 1190 for i in xrange(start, len(self.series)):
1191 1191 pushable, reason = self.pushable(i)
1192 1192 if pushable:
1193 1193 unapplied.append((i, self.series[i]))
1194 1194 self.explain_pushable(i)
1195 1195 return unapplied
1196 1196
1197 1197 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1198 1198 summary=False):
1199 1199 def displayname(patchname):
1200 1200 if summary:
1201 1201 msg = self.readheaders(patchname)[0]
1202 1202 msg = msg and ': ' + msg[0] or ': '
1203 1203 else:
1204 1204 msg = ''
1205 1205 return '%s%s' % (patchname, msg)
1206 1206
1207 1207 applied = dict.fromkeys([p.name for p in self.applied])
1208 1208 if length is None:
1209 1209 length = len(self.series) - start
1210 1210 if not missing:
1211 1211 for i in xrange(start, start+length):
1212 1212 patch = self.series[i]
1213 1213 if patch in applied:
1214 1214 stat = 'A'
1215 1215 elif self.pushable(i)[0]:
1216 1216 stat = 'U'
1217 1217 else:
1218 1218 stat = 'G'
1219 1219 pfx = ''
1220 1220 if self.ui.verbose:
1221 1221 pfx = '%d %s ' % (i, stat)
1222 1222 elif status and status != stat:
1223 1223 continue
1224 1224 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1225 1225 else:
1226 1226 msng_list = []
1227 1227 for root, dirs, files in os.walk(self.path):
1228 1228 d = root[len(self.path) + 1:]
1229 1229 for f in files:
1230 1230 fl = os.path.join(d, f)
1231 1231 if (fl not in self.series and
1232 1232 fl not in (self.status_path, self.series_path,
1233 1233 self.guards_path)
1234 1234 and not fl.startswith('.')):
1235 1235 msng_list.append(fl)
1236 1236 msng_list.sort()
1237 1237 for x in msng_list:
1238 1238 pfx = self.ui.verbose and ('D ') or ''
1239 1239 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1240 1240
1241 1241 def issaveline(self, l):
1242 1242 if l.name == '.hg.patches.save.line':
1243 1243 return True
1244 1244
1245 1245 def qrepo(self, create=False):
1246 1246 if create or os.path.isdir(self.join(".hg")):
1247 1247 return hg.repository(self.ui, path=self.path, create=create)
1248 1248
1249 1249 def restore(self, repo, rev, delete=None, qupdate=None):
1250 1250 c = repo.changelog.read(rev)
1251 1251 desc = c[4].strip()
1252 1252 lines = desc.splitlines()
1253 1253 i = 0
1254 1254 datastart = None
1255 1255 series = []
1256 1256 applied = []
1257 1257 qpp = None
1258 1258 for i in xrange(0, len(lines)):
1259 1259 if lines[i] == 'Patch Data:':
1260 1260 datastart = i + 1
1261 1261 elif lines[i].startswith('Dirstate:'):
1262 1262 l = lines[i].rstrip()
1263 1263 l = l[10:].split(' ')
1264 1264 qpp = [ hg.bin(x) for x in l ]
1265 1265 elif datastart != None:
1266 1266 l = lines[i].rstrip()
1267 1267 se = statusentry(l)
1268 1268 file_ = se.name
1269 1269 if se.rev:
1270 1270 applied.append(se)
1271 1271 else:
1272 1272 series.append(file_)
1273 1273 if datastart == None:
1274 1274 self.ui.warn("No saved patch data found\n")
1275 1275 return 1
1276 1276 self.ui.warn("restoring status: %s\n" % lines[0])
1277 1277 self.full_series = series
1278 1278 self.applied = applied
1279 1279 self.parse_series()
1280 1280 self.series_dirty = 1
1281 1281 self.applied_dirty = 1
1282 1282 heads = repo.changelog.heads()
1283 1283 if delete:
1284 1284 if rev not in heads:
1285 1285 self.ui.warn("save entry has children, leaving it alone\n")
1286 1286 else:
1287 1287 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1288 1288 pp = repo.dirstate.parents()
1289 1289 if rev in pp:
1290 1290 update = True
1291 1291 else:
1292 1292 update = False
1293 1293 self.strip(repo, rev, update=update, backup='strip')
1294 1294 if qpp:
1295 1295 self.ui.warn("saved queue repository parents: %s %s\n" %
1296 1296 (hg.short(qpp[0]), hg.short(qpp[1])))
1297 1297 if qupdate:
1298 1298 print "queue directory updating"
1299 1299 r = self.qrepo()
1300 1300 if not r:
1301 1301 self.ui.warn("Unable to load queue repository\n")
1302 1302 return 1
1303 1303 hg.clean(r, qpp[0])
1304 1304
1305 1305 def save(self, repo, msg=None):
1306 1306 if len(self.applied) == 0:
1307 1307 self.ui.warn("save: no patches applied, exiting\n")
1308 1308 return 1
1309 1309 if self.issaveline(self.applied[-1]):
1310 1310 self.ui.warn("status is already saved\n")
1311 1311 return 1
1312 1312
1313 1313 ar = [ ':' + x for x in self.full_series ]
1314 1314 if not msg:
1315 1315 msg = "hg patches saved state"
1316 1316 else:
1317 1317 msg = "hg patches: " + msg.rstrip('\r\n')
1318 1318 r = self.qrepo()
1319 1319 if r:
1320 1320 pp = r.dirstate.parents()
1321 1321 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1322 1322 msg += "\n\nPatch Data:\n"
1323 1323 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1324 1324 "\n".join(ar) + '\n' or "")
1325 1325 n = repo.commit(None, text, user=None, force=1)
1326 1326 if not n:
1327 1327 self.ui.warn("repo commit failed\n")
1328 1328 return 1
1329 1329 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1330 1330 self.applied_dirty = 1
1331 1331 self.removeundo(repo)
1332 1332
1333 1333 def full_series_end(self):
1334 1334 if len(self.applied) > 0:
1335 1335 p = self.applied[-1].name
1336 1336 end = self.find_series(p)
1337 1337 if end == None:
1338 1338 return len(self.full_series)
1339 1339 return end + 1
1340 1340 return 0
1341 1341
1342 1342 def series_end(self, all_patches=False):
1343 1343 """If all_patches is False, return the index of the next pushable patch
1344 1344 in the series, or the series length. If all_patches is True, return the
1345 1345 index of the first patch past the last applied one.
1346 1346 """
1347 1347 end = 0
1348 1348 def next(start):
1349 1349 if all_patches:
1350 1350 return start
1351 1351 i = start
1352 1352 while i < len(self.series):
1353 1353 p, reason = self.pushable(i)
1354 1354 if p:
1355 1355 break
1356 1356 self.explain_pushable(i)
1357 1357 i += 1
1358 1358 return i
1359 1359 if len(self.applied) > 0:
1360 1360 p = self.applied[-1].name
1361 1361 try:
1362 1362 end = self.series.index(p)
1363 1363 except ValueError:
1364 1364 return 0
1365 1365 return next(end + 1)
1366 1366 return next(end)
1367 1367
1368 1368 def appliedname(self, index):
1369 1369 pname = self.applied[index].name
1370 1370 if not self.ui.verbose:
1371 1371 p = pname
1372 1372 else:
1373 1373 p = str(self.series.index(pname)) + " " + pname
1374 1374 return p
1375 1375
1376 1376 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1377 1377 force=None, git=False):
1378 1378 def checkseries(patchname):
1379 1379 if patchname in self.series:
1380 1380 raise util.Abort(_('patch %s is already in the series file')
1381 1381 % patchname)
1382 1382 def checkfile(patchname):
1383 1383 if not force and os.path.exists(self.join(patchname)):
1384 1384 raise util.Abort(_('patch "%s" already exists')
1385 1385 % patchname)
1386 1386
1387 1387 if rev:
1388 1388 if files:
1389 1389 raise util.Abort(_('option "-r" not valid when importing '
1390 1390 'files'))
1391 1391 rev = cmdutil.revrange(repo, rev)
1392 1392 rev.sort(lambda x, y: cmp(y, x))
1393 1393 if (len(files) > 1 or len(rev) > 1) and patchname:
1394 1394 raise util.Abort(_('option "-n" not valid when importing multiple '
1395 1395 'patches'))
1396 1396 i = 0
1397 1397 added = []
1398 1398 if rev:
1399 1399 # If mq patches are applied, we can only import revisions
1400 1400 # that form a linear path to qbase.
1401 1401 # Otherwise, they should form a linear path to a head.
1402 1402 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1403 1403 if len(heads) > 1:
1404 1404 raise util.Abort(_('revision %d is the root of more than one '
1405 1405 'branch') % rev[-1])
1406 1406 if self.applied:
1407 1407 base = revlog.hex(repo.changelog.node(rev[0]))
1408 1408 if base in [n.rev for n in self.applied]:
1409 1409 raise util.Abort(_('revision %d is already managed')
1410 1410 % rev[0])
1411 1411 if heads != [revlog.bin(self.applied[-1].rev)]:
1412 1412 raise util.Abort(_('revision %d is not the parent of '
1413 1413 'the queue') % rev[0])
1414 1414 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1415 1415 lastparent = repo.changelog.parentrevs(base)[0]
1416 1416 else:
1417 1417 if heads != [repo.changelog.node(rev[0])]:
1418 1418 raise util.Abort(_('revision %d has unmanaged children')
1419 1419 % rev[0])
1420 1420 lastparent = None
1421 1421
1422 1422 if git:
1423 1423 self.diffopts().git = True
1424 1424
1425 1425 for r in rev:
1426 1426 p1, p2 = repo.changelog.parentrevs(r)
1427 1427 n = repo.changelog.node(r)
1428 1428 if p2 != revlog.nullrev:
1429 1429 raise util.Abort(_('cannot import merge revision %d') % r)
1430 1430 if lastparent and lastparent != r:
1431 1431 raise util.Abort(_('revision %d is not the parent of %d')
1432 1432 % (r, lastparent))
1433 1433 lastparent = p1
1434 1434
1435 1435 if not patchname:
1436 1436 patchname = normname('%d.diff' % r)
1437 1437 checkseries(patchname)
1438 1438 checkfile(patchname)
1439 1439 self.full_series.insert(0, patchname)
1440 1440
1441 1441 patchf = self.opener(patchname, "w")
1442 1442 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1443 1443 patchf.close()
1444 1444
1445 1445 se = statusentry(revlog.hex(n), patchname)
1446 1446 self.applied.insert(0, se)
1447 1447
1448 1448 added.append(patchname)
1449 1449 patchname = None
1450 1450 self.parse_series()
1451 1451 self.applied_dirty = 1
1452 1452
1453 1453 for filename in files:
1454 1454 if existing:
1455 1455 if filename == '-':
1456 1456 raise util.Abort(_('-e is incompatible with import from -'))
1457 1457 if not patchname:
1458 1458 patchname = normname(filename)
1459 1459 if not os.path.isfile(self.join(patchname)):
1460 1460 raise util.Abort(_("patch %s does not exist") % patchname)
1461 1461 else:
1462 1462 try:
1463 1463 if filename == '-':
1464 1464 if not patchname:
1465 1465 raise util.Abort(_('need --name to import a patch from -'))
1466 1466 text = sys.stdin.read()
1467 1467 else:
1468 1468 text = file(filename).read()
1469 1469 except IOError:
1470 1470 raise util.Abort(_("unable to read %s") % patchname)
1471 1471 if not patchname:
1472 1472 patchname = normname(os.path.basename(filename))
1473 1473 checkfile(patchname)
1474 1474 patchf = self.opener(patchname, "w")
1475 1475 patchf.write(text)
1476 1476 checkseries(patchname)
1477 1477 index = self.full_series_end() + i
1478 1478 self.full_series[index:index] = [patchname]
1479 1479 self.parse_series()
1480 1480 self.ui.warn("adding %s to series file\n" % patchname)
1481 1481 i += 1
1482 1482 added.append(patchname)
1483 1483 patchname = None
1484 1484 self.series_dirty = 1
1485 1485 qrepo = self.qrepo()
1486 1486 if qrepo:
1487 1487 qrepo.add(added)
1488 1488
1489 1489 def delete(ui, repo, *patches, **opts):
1490 1490 """remove patches from queue
1491 1491
1492 1492 With --rev, mq will stop managing the named revisions. The
1493 1493 patches must be applied and at the base of the stack. This option
1494 1494 is useful when the patches have been applied upstream.
1495 1495
1496 1496 Otherwise, the patches must not be applied.
1497 1497
1498 1498 With --keep, the patch files are preserved in the patch directory."""
1499 1499 q = repo.mq
1500 1500 q.delete(repo, patches, opts)
1501 1501 q.save_dirty()
1502 1502 return 0
1503 1503
1504 1504 def applied(ui, repo, patch=None, **opts):
1505 1505 """print the patches already applied"""
1506 1506 q = repo.mq
1507 1507 if patch:
1508 1508 if patch not in q.series:
1509 1509 raise util.Abort(_("patch %s is not in series file") % patch)
1510 1510 end = q.series.index(patch) + 1
1511 1511 else:
1512 1512 end = q.series_end(True)
1513 1513 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1514 1514
1515 1515 def unapplied(ui, repo, patch=None, **opts):
1516 1516 """print the patches not yet applied"""
1517 1517 q = repo.mq
1518 1518 if patch:
1519 1519 if patch not in q.series:
1520 1520 raise util.Abort(_("patch %s is not in series file") % patch)
1521 1521 start = q.series.index(patch) + 1
1522 1522 else:
1523 1523 start = q.series_end(True)
1524 1524 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1525 1525
1526 1526 def qimport(ui, repo, *filename, **opts):
1527 1527 """import a patch
1528 1528
1529 1529 The patch will have the same name as its source file unless you
1530 1530 give it a new one with --name.
1531 1531
1532 1532 You can register an existing patch inside the patch directory
1533 1533 with the --existing flag.
1534 1534
1535 1535 With --force, an existing patch of the same name will be overwritten.
1536 1536
1537 1537 An existing changeset may be placed under mq control with --rev
1538 1538 (e.g. qimport --rev tip -n patch will place tip under mq control).
1539 1539 With --git, patches imported with --rev will use the git diff
1540 1540 format.
1541 1541 """
1542 1542 q = repo.mq
1543 1543 q.qimport(repo, filename, patchname=opts['name'],
1544 1544 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1545 1545 git=opts['git'])
1546 1546 q.save_dirty()
1547 1547 return 0
1548 1548
1549 1549 def init(ui, repo, **opts):
1550 1550 """init a new queue repository
1551 1551
1552 1552 The queue repository is unversioned by default. If -c is
1553 1553 specified, qinit will create a separate nested repository
1554 1554 for patches. Use qcommit to commit changes to this queue
1555 1555 repository."""
1556 1556 q = repo.mq
1557 1557 r = q.init(repo, create=opts['create_repo'])
1558 1558 q.save_dirty()
1559 1559 if r:
1560 1560 if not os.path.exists(r.wjoin('.hgignore')):
1561 1561 fp = r.wopener('.hgignore', 'w')
1562 1562 fp.write('syntax: glob\n')
1563 1563 fp.write('status\n')
1564 1564 fp.write('guards\n')
1565 1565 fp.close()
1566 1566 if not os.path.exists(r.wjoin('series')):
1567 1567 r.wopener('series', 'w').close()
1568 1568 r.add(['.hgignore', 'series'])
1569 1569 commands.add(ui, r)
1570 1570 return 0
1571 1571
1572 1572 def clone(ui, source, dest=None, **opts):
1573 1573 '''clone main and patch repository at same time
1574 1574
1575 1575 If source is local, destination will have no patches applied. If
1576 1576 source is remote, this command can not check if patches are
1577 1577 applied in source, so cannot guarantee that patches are not
1578 1578 applied in destination. If you clone remote repository, be sure
1579 1579 before that it has no patches applied.
1580 1580
1581 1581 Source patch repository is looked for in <src>/.hg/patches by
1582 1582 default. Use -p <url> to change.
1583 1583 '''
1584 1584 cmdutil.setremoteconfig(ui, opts)
1585 1585 if dest is None:
1586 1586 dest = hg.defaultdest(source)
1587 1587 sr = hg.repository(ui, ui.expandpath(source))
1588 1588 qbase, destrev = None, None
1589 1589 if sr.local():
1590 1590 if sr.mq.applied:
1591 1591 qbase = revlog.bin(sr.mq.applied[0].rev)
1592 1592 if not hg.islocal(dest):
1593 1593 heads = dict.fromkeys(sr.heads())
1594 1594 for h in sr.heads(qbase):
1595 1595 del heads[h]
1596 1596 destrev = heads.keys()
1597 1597 destrev.append(sr.changelog.parents(qbase)[0])
1598 1598 ui.note(_('cloning main repo\n'))
1599 1599 sr, dr = hg.clone(ui, sr.url(), dest,
1600 1600 pull=opts['pull'],
1601 1601 rev=destrev,
1602 1602 update=False,
1603 1603 stream=opts['uncompressed'])
1604 1604 ui.note(_('cloning patch repo\n'))
1605 1605 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1606 1606 dr.url() + '/.hg/patches',
1607 1607 pull=opts['pull'],
1608 1608 update=not opts['noupdate'],
1609 1609 stream=opts['uncompressed'])
1610 1610 if dr.local():
1611 1611 if qbase:
1612 1612 ui.note(_('stripping applied patches from destination repo\n'))
1613 1613 dr.mq.strip(dr, qbase, update=False, backup=None)
1614 1614 if not opts['noupdate']:
1615 1615 ui.note(_('updating destination repo\n'))
1616 1616 hg.update(dr, dr.changelog.tip())
1617 1617
1618 1618 def commit(ui, repo, *pats, **opts):
1619 1619 """commit changes in the queue repository"""
1620 1620 q = repo.mq
1621 1621 r = q.qrepo()
1622 1622 if not r: raise util.Abort('no queue repository')
1623 1623 commands.commit(r.ui, r, *pats, **opts)
1624 1624
1625 1625 def series(ui, repo, **opts):
1626 1626 """print the entire series file"""
1627 1627 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1628 1628 return 0
1629 1629
1630 1630 def top(ui, repo, **opts):
1631 1631 """print the name of the current patch"""
1632 1632 q = repo.mq
1633 1633 t = q.applied and q.series_end(True) or 0
1634 1634 if t:
1635 1635 return q.qseries(repo, start=t-1, length=1, status='A',
1636 1636 summary=opts.get('summary'))
1637 1637 else:
1638 1638 ui.write("No patches applied\n")
1639 1639 return 1
1640 1640
1641 1641 def next(ui, repo, **opts):
1642 1642 """print the name of the next patch"""
1643 1643 q = repo.mq
1644 1644 end = q.series_end()
1645 1645 if end == len(q.series):
1646 1646 ui.write("All patches applied\n")
1647 1647 return 1
1648 1648 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1649 1649
1650 1650 def prev(ui, repo, **opts):
1651 1651 """print the name of the previous patch"""
1652 1652 q = repo.mq
1653 1653 l = len(q.applied)
1654 1654 if l == 1:
1655 1655 ui.write("Only one patch applied\n")
1656 1656 return 1
1657 1657 if not l:
1658 1658 ui.write("No patches applied\n")
1659 1659 return 1
1660 1660 return q.qseries(repo, start=l-2, length=1, status='A',
1661 1661 summary=opts.get('summary'))
1662 1662
1663 1663 def new(ui, repo, patch, **opts):
1664 1664 """create a new patch
1665 1665
1666 1666 qnew creates a new patch on top of the currently-applied patch
1667 1667 (if any). It will refuse to run if there are any outstanding
1668 1668 changes unless -f is specified, in which case the patch will
1669 1669 be initialised with them.
1670 1670
1671 1671 -e, -m or -l set the patch header as well as the commit message.
1672 1672 If none is specified, the patch header is empty and the
1673 1673 commit message is 'New patch: PATCH'"""
1674 1674 q = repo.mq
1675 1675 message = cmdutil.logmessage(opts)
1676 1676 if opts['edit']:
1677 1677 message = ui.edit(message, ui.username())
1678 1678 q.new(repo, patch, msg=message, force=opts['force'])
1679 1679 q.save_dirty()
1680 1680 return 0
1681 1681
1682 1682 def refresh(ui, repo, *pats, **opts):
1683 1683 """update the current patch
1684 1684
1685 1685 If any file patterns are provided, the refreshed patch will contain only
1686 1686 the modifications that match those patterns; the remaining modifications
1687 1687 will remain in the working directory.
1688 1688
1689 1689 hg add/remove/copy/rename work as usual, though you might want to use
1690 1690 git-style patches (--git or [diff] git=1) to track copies and renames.
1691 1691 """
1692 1692 q = repo.mq
1693 1693 message = cmdutil.logmessage(opts)
1694 1694 if opts['edit']:
1695 1695 if message:
1696 1696 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1697 1697 patch = q.applied[-1].name
1698 1698 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1699 1699 message = ui.edit('\n'.join(message), user or ui.username())
1700 1700 ret = q.refresh(repo, pats, msg=message, **opts)
1701 1701 q.save_dirty()
1702 1702 return ret
1703 1703
1704 1704 def diff(ui, repo, *pats, **opts):
1705 1705 """diff of the current patch"""
1706 1706 repo.mq.diff(repo, pats, opts)
1707 1707 return 0
1708 1708
1709 1709 def fold(ui, repo, *files, **opts):
1710 1710 """fold the named patches into the current patch
1711 1711
1712 1712 Patches must not yet be applied. Each patch will be successively
1713 1713 applied to the current patch in the order given. If all the
1714 1714 patches apply successfully, the current patch will be refreshed
1715 1715 with the new cumulative patch, and the folded patches will
1716 1716 be deleted. With -k/--keep, the folded patch files will not
1717 1717 be removed afterwards.
1718 1718
1719 1719 The header for each folded patch will be concatenated with
1720 1720 the current patch header, separated by a line of '* * *'."""
1721 1721
1722 1722 q = repo.mq
1723 1723
1724 1724 if not files:
1725 1725 raise util.Abort(_('qfold requires at least one patch name'))
1726 1726 if not q.check_toppatch(repo):
1727 1727 raise util.Abort(_('No patches applied'))
1728 1728
1729 1729 message = cmdutil.logmessage(opts)
1730 1730 if opts['edit']:
1731 1731 if message:
1732 1732 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1733 1733
1734 1734 parent = q.lookup('qtip')
1735 1735 patches = []
1736 1736 messages = []
1737 1737 for f in files:
1738 1738 p = q.lookup(f)
1739 1739 if p in patches or p == parent:
1740 1740 ui.warn(_('Skipping already folded patch %s') % p)
1741 1741 if q.isapplied(p):
1742 1742 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1743 1743 patches.append(p)
1744 1744
1745 1745 for p in patches:
1746 1746 if not message:
1747 1747 messages.append(q.readheaders(p)[0])
1748 1748 pf = q.join(p)
1749 1749 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1750 1750 if not patchsuccess:
1751 1751 raise util.Abort(_('Error folding patch %s') % p)
1752 1752 patch.updatedir(ui, repo, files)
1753 1753
1754 1754 if not message:
1755 1755 message, comments, user = q.readheaders(parent)[0:3]
1756 1756 for msg in messages:
1757 1757 message.append('* * *')
1758 1758 message.extend(msg)
1759 1759 message = '\n'.join(message)
1760 1760
1761 1761 if opts['edit']:
1762 1762 message = ui.edit(message, user or ui.username())
1763 1763
1764 1764 q.refresh(repo, msg=message)
1765 1765 q.delete(repo, patches, opts)
1766 1766 q.save_dirty()
1767 1767
1768 1768 def goto(ui, repo, patch, **opts):
1769 1769 '''push or pop patches until named patch is at top of stack'''
1770 1770 q = repo.mq
1771 1771 patch = q.lookup(patch)
1772 1772 if q.isapplied(patch):
1773 1773 ret = q.pop(repo, patch, force=opts['force'])
1774 1774 else:
1775 1775 ret = q.push(repo, patch, force=opts['force'])
1776 1776 q.save_dirty()
1777 1777 return ret
1778 1778
1779 1779 def guard(ui, repo, *args, **opts):
1780 1780 '''set or print guards for a patch
1781 1781
1782 1782 Guards control whether a patch can be pushed. A patch with no
1783 1783 guards is always pushed. A patch with a positive guard ("+foo") is
1784 1784 pushed only if the qselect command has activated it. A patch with
1785 1785 a negative guard ("-foo") is never pushed if the qselect command
1786 1786 has activated it.
1787 1787
1788 1788 With no arguments, print the currently active guards.
1789 1789 With arguments, set guards for the named patch.
1790 1790
1791 1791 To set a negative guard "-foo" on topmost patch ("--" is needed so
1792 1792 hg will not interpret "-foo" as an option):
1793 1793 hg qguard -- -foo
1794 1794
1795 1795 To set guards on another patch:
1796 1796 hg qguard other.patch +2.6.17 -stable
1797 1797 '''
1798 1798 def status(idx):
1799 1799 guards = q.series_guards[idx] or ['unguarded']
1800 1800 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1801 1801 q = repo.mq
1802 1802 patch = None
1803 1803 args = list(args)
1804 1804 if opts['list']:
1805 1805 if args or opts['none']:
1806 1806 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1807 1807 for i in xrange(len(q.series)):
1808 1808 status(i)
1809 1809 return
1810 1810 if not args or args[0][0:1] in '-+':
1811 1811 if not q.applied:
1812 1812 raise util.Abort(_('no patches applied'))
1813 1813 patch = q.applied[-1].name
1814 1814 if patch is None and args[0][0:1] not in '-+':
1815 1815 patch = args.pop(0)
1816 1816 if patch is None:
1817 1817 raise util.Abort(_('no patch to work with'))
1818 1818 if args or opts['none']:
1819 1819 idx = q.find_series(patch)
1820 1820 if idx is None:
1821 1821 raise util.Abort(_('no patch named %s') % patch)
1822 1822 q.set_guards(idx, args)
1823 1823 q.save_dirty()
1824 1824 else:
1825 1825 status(q.series.index(q.lookup(patch)))
1826 1826
1827 1827 def header(ui, repo, patch=None):
1828 1828 """Print the header of the topmost or specified patch"""
1829 1829 q = repo.mq
1830 1830
1831 1831 if patch:
1832 1832 patch = q.lookup(patch)
1833 1833 else:
1834 1834 if not q.applied:
1835 1835 ui.write('No patches applied\n')
1836 1836 return 1
1837 1837 patch = q.lookup('qtip')
1838 1838 message = repo.mq.readheaders(patch)[0]
1839 1839
1840 1840 ui.write('\n'.join(message) + '\n')
1841 1841
1842 1842 def lastsavename(path):
1843 1843 (directory, base) = os.path.split(path)
1844 1844 names = os.listdir(directory)
1845 1845 namere = re.compile("%s.([0-9]+)" % base)
1846 1846 maxindex = None
1847 1847 maxname = None
1848 1848 for f in names:
1849 1849 m = namere.match(f)
1850 1850 if m:
1851 1851 index = int(m.group(1))
1852 1852 if maxindex == None or index > maxindex:
1853 1853 maxindex = index
1854 1854 maxname = f
1855 1855 if maxname:
1856 1856 return (os.path.join(directory, maxname), maxindex)
1857 1857 return (None, None)
1858 1858
1859 1859 def savename(path):
1860 1860 (last, index) = lastsavename(path)
1861 1861 if last is None:
1862 1862 index = 0
1863 1863 newpath = path + ".%d" % (index + 1)
1864 1864 return newpath
1865 1865
1866 1866 def push(ui, repo, patch=None, **opts):
1867 1867 """push the next patch onto the stack"""
1868 1868 q = repo.mq
1869 1869 mergeq = None
1870 1870
1871 1871 if opts['all']:
1872 1872 if not q.series:
1873 1873 ui.warn(_('no patches in series\n'))
1874 1874 return 0
1875 1875 patch = q.series[-1]
1876 1876 if opts['merge']:
1877 1877 if opts['name']:
1878 1878 newpath = opts['name']
1879 1879 else:
1880 1880 newpath, i = lastsavename(q.path)
1881 1881 if not newpath:
1882 1882 ui.warn("no saved queues found, please use -n\n")
1883 1883 return 1
1884 1884 mergeq = queue(ui, repo.join(""), newpath)
1885 1885 ui.warn("merging with queue at: %s\n" % mergeq.path)
1886 1886 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1887 1887 mergeq=mergeq)
1888 1888 return ret
1889 1889
1890 1890 def pop(ui, repo, patch=None, **opts):
1891 1891 """pop the current patch off the stack"""
1892 1892 localupdate = True
1893 1893 if opts['name']:
1894 1894 q = queue(ui, repo.join(""), repo.join(opts['name']))
1895 1895 ui.warn('using patch queue: %s\n' % q.path)
1896 1896 localupdate = False
1897 1897 else:
1898 1898 q = repo.mq
1899 1899 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1900 1900 all=opts['all'])
1901 1901 q.save_dirty()
1902 1902 return ret
1903 1903
1904 1904 def rename(ui, repo, patch, name=None, **opts):
1905 1905 """rename a patch
1906 1906
1907 1907 With one argument, renames the current patch to PATCH1.
1908 1908 With two arguments, renames PATCH1 to PATCH2."""
1909 1909
1910 1910 q = repo.mq
1911 1911
1912 1912 if not name:
1913 1913 name = patch
1914 1914 patch = None
1915 1915
1916 1916 if patch:
1917 1917 patch = q.lookup(patch)
1918 1918 else:
1919 1919 if not q.applied:
1920 1920 ui.write(_('No patches applied\n'))
1921 1921 return
1922 1922 patch = q.lookup('qtip')
1923 1923 absdest = q.join(name)
1924 1924 if os.path.isdir(absdest):
1925 1925 name = normname(os.path.join(name, os.path.basename(patch)))
1926 1926 absdest = q.join(name)
1927 1927 if os.path.exists(absdest):
1928 1928 raise util.Abort(_('%s already exists') % absdest)
1929 1929
1930 1930 if name in q.series:
1931 1931 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1932 1932
1933 1933 if ui.verbose:
1934 1934 ui.write('Renaming %s to %s\n' % (patch, name))
1935 1935 i = q.find_series(patch)
1936 1936 guards = q.guard_re.findall(q.full_series[i])
1937 1937 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1938 1938 q.parse_series()
1939 1939 q.series_dirty = 1
1940 1940
1941 1941 info = q.isapplied(patch)
1942 1942 if info:
1943 1943 q.applied[info[0]] = statusentry(info[1], name)
1944 1944 q.applied_dirty = 1
1945 1945
1946 1946 util.rename(q.join(patch), absdest)
1947 1947 r = q.qrepo()
1948 1948 if r:
1949 1949 wlock = r.wlock()
1950 1950 if r.dirstate.state(name) == 'r':
1951 1951 r.undelete([name], wlock)
1952 1952 r.copy(patch, name, wlock)
1953 1953 r.remove([patch], False, wlock)
1954 1954
1955 1955 q.save_dirty()
1956 1956
1957 1957 def restore(ui, repo, rev, **opts):
1958 1958 """restore the queue state saved by a rev"""
1959 1959 rev = repo.lookup(rev)
1960 1960 q = repo.mq
1961 1961 q.restore(repo, rev, delete=opts['delete'],
1962 1962 qupdate=opts['update'])
1963 1963 q.save_dirty()
1964 1964 return 0
1965 1965
1966 1966 def save(ui, repo, **opts):
1967 1967 """save current queue state"""
1968 1968 q = repo.mq
1969 1969 message = cmdutil.logmessage(opts)
1970 1970 ret = q.save(repo, msg=message)
1971 1971 if ret:
1972 1972 return ret
1973 1973 q.save_dirty()
1974 1974 if opts['copy']:
1975 1975 path = q.path
1976 1976 if opts['name']:
1977 1977 newpath = os.path.join(q.basepath, opts['name'])
1978 1978 if os.path.exists(newpath):
1979 1979 if not os.path.isdir(newpath):
1980 1980 raise util.Abort(_('destination %s exists and is not '
1981 1981 'a directory') % newpath)
1982 1982 if not opts['force']:
1983 1983 raise util.Abort(_('destination %s exists, '
1984 1984 'use -f to force') % newpath)
1985 1985 else:
1986 1986 newpath = savename(path)
1987 1987 ui.warn("copy %s to %s\n" % (path, newpath))
1988 1988 util.copyfiles(path, newpath)
1989 1989 if opts['empty']:
1990 1990 try:
1991 1991 os.unlink(q.join(q.status_path))
1992 1992 except:
1993 1993 pass
1994 1994 return 0
1995 1995
1996 1996 def strip(ui, repo, rev, **opts):
1997 1997 """strip a revision and all later revs on the same branch"""
1998 1998 rev = repo.lookup(rev)
1999 1999 backup = 'all'
2000 2000 if opts['backup']:
2001 2001 backup = 'strip'
2002 2002 elif opts['nobackup']:
2003 2003 backup = 'none'
2004 2004 update = repo.dirstate.parents()[0] != revlog.nullid
2005 2005 repo.mq.strip(repo, rev, backup=backup, update=update)
2006 2006 return 0
2007 2007
2008 2008 def select(ui, repo, *args, **opts):
2009 2009 '''set or print guarded patches to push
2010 2010
2011 2011 Use the qguard command to set or print guards on patch, then use
2012 2012 qselect to tell mq which guards to use. A patch will be pushed if it
2013 2013 has no guards or any positive guards match the currently selected guard,
2014 2014 but will not be pushed if any negative guards match the current guard.
2015 2015 For example:
2016 2016
2017 2017 qguard foo.patch -stable (negative guard)
2018 2018 qguard bar.patch +stable (positive guard)
2019 2019 qselect stable
2020 2020
2021 2021 This activates the "stable" guard. mq will skip foo.patch (because
2022 2022 it has a negative match) but push bar.patch (because it
2023 2023 has a positive match).
2024 2024
2025 2025 With no arguments, prints the currently active guards.
2026 2026 With one argument, sets the active guard.
2027 2027
2028 2028 Use -n/--none to deactivate guards (no other arguments needed).
2029 2029 When no guards are active, patches with positive guards are skipped
2030 2030 and patches with negative guards are pushed.
2031 2031
2032 2032 qselect can change the guards on applied patches. It does not pop
2033 2033 guarded patches by default. Use --pop to pop back to the last applied
2034 2034 patch that is not guarded. Use --reapply (which implies --pop) to push
2035 2035 back to the current patch afterwards, but skip guarded patches.
2036 2036
2037 2037 Use -s/--series to print a list of all guards in the series file (no
2038 2038 other arguments needed). Use -v for more information.'''
2039 2039
2040 2040 q = repo.mq
2041 2041 guards = q.active()
2042 2042 if args or opts['none']:
2043 2043 old_unapplied = q.unapplied(repo)
2044 2044 old_guarded = [i for i in xrange(len(q.applied)) if
2045 2045 not q.pushable(i)[0]]
2046 2046 q.set_active(args)
2047 2047 q.save_dirty()
2048 2048 if not args:
2049 2049 ui.status(_('guards deactivated\n'))
2050 2050 if not opts['pop'] and not opts['reapply']:
2051 2051 unapplied = q.unapplied(repo)
2052 2052 guarded = [i for i in xrange(len(q.applied))
2053 2053 if not q.pushable(i)[0]]
2054 2054 if len(unapplied) != len(old_unapplied):
2055 2055 ui.status(_('number of unguarded, unapplied patches has '
2056 2056 'changed from %d to %d\n') %
2057 2057 (len(old_unapplied), len(unapplied)))
2058 2058 if len(guarded) != len(old_guarded):
2059 2059 ui.status(_('number of guarded, applied patches has changed '
2060 2060 'from %d to %d\n') %
2061 2061 (len(old_guarded), len(guarded)))
2062 2062 elif opts['series']:
2063 2063 guards = {}
2064 2064 noguards = 0
2065 2065 for gs in q.series_guards:
2066 2066 if not gs:
2067 2067 noguards += 1
2068 2068 for g in gs:
2069 2069 guards.setdefault(g, 0)
2070 2070 guards[g] += 1
2071 2071 if ui.verbose:
2072 2072 guards['NONE'] = noguards
2073 2073 guards = guards.items()
2074 2074 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2075 2075 if guards:
2076 2076 ui.note(_('guards in series file:\n'))
2077 2077 for guard, count in guards:
2078 2078 ui.note('%2d ' % count)
2079 2079 ui.write(guard, '\n')
2080 2080 else:
2081 2081 ui.note(_('no guards in series file\n'))
2082 2082 else:
2083 2083 if guards:
2084 2084 ui.note(_('active guards:\n'))
2085 2085 for g in guards:
2086 2086 ui.write(g, '\n')
2087 2087 else:
2088 2088 ui.write(_('no active guards\n'))
2089 2089 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2090 2090 popped = False
2091 2091 if opts['pop'] or opts['reapply']:
2092 2092 for i in xrange(len(q.applied)):
2093 2093 pushable, reason = q.pushable(i)
2094 2094 if not pushable:
2095 2095 ui.status(_('popping guarded patches\n'))
2096 2096 popped = True
2097 2097 if i == 0:
2098 2098 q.pop(repo, all=True)
2099 2099 else:
2100 2100 q.pop(repo, i-1)
2101 2101 break
2102 2102 if popped:
2103 2103 try:
2104 2104 if reapply:
2105 2105 ui.status(_('reapplying unguarded patches\n'))
2106 2106 q.push(repo, reapply)
2107 2107 finally:
2108 2108 q.save_dirty()
2109 2109
2110 2110 def reposetup(ui, repo):
2111 2111 class mqrepo(repo.__class__):
2112 2112 def abort_if_wdir_patched(self, errmsg, force=False):
2113 2113 if self.mq.applied and not force:
2114 2114 parent = revlog.hex(self.dirstate.parents()[0])
2115 2115 if parent in [s.rev for s in self.mq.applied]:
2116 2116 raise util.Abort(errmsg)
2117 2117
2118 2118 def commit(self, *args, **opts):
2119 2119 if len(args) >= 6:
2120 2120 force = args[5]
2121 2121 else:
2122 2122 force = opts.get('force')
2123 2123 self.abort_if_wdir_patched(
2124 2124 _('cannot commit over an applied mq patch'),
2125 2125 force)
2126 2126
2127 2127 return super(mqrepo, self).commit(*args, **opts)
2128 2128
2129 2129 def push(self, remote, force=False, revs=None):
2130 2130 if self.mq.applied and not force and not revs:
2131 2131 raise util.Abort(_('source has mq patches applied'))
2132 2132 return super(mqrepo, self).push(remote, force, revs)
2133 2133
2134 2134 def tags(self):
2135 2135 if self.tagscache:
2136 2136 return self.tagscache
2137 2137
2138 2138 tagscache = super(mqrepo, self).tags()
2139 2139
2140 2140 q = self.mq
2141 2141 if not q.applied:
2142 2142 return tagscache
2143 2143
2144 2144 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2145 2145 mqtags.append((mqtags[-1][0], 'qtip'))
2146 2146 mqtags.append((mqtags[0][0], 'qbase'))
2147 2147 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2148 2148 for patch in mqtags:
2149 2149 if patch[1] in tagscache:
2150 2150 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2151 2151 else:
2152 2152 tagscache[patch[1]] = patch[0]
2153 2153
2154 2154 return tagscache
2155 2155
2156 2156 def _branchtags(self):
2157 2157 q = self.mq
2158 2158 if not q.applied:
2159 2159 return super(mqrepo, self)._branchtags()
2160 2160
2161 2161 self.branchcache = {} # avoid recursion in changectx
2162 2162 cl = self.changelog
2163 2163 partial, last, lrev = self._readbranchcache()
2164 2164
2165 2165 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2166 2166 start = lrev + 1
2167 2167 if start < qbase:
2168 2168 # update the cache (excluding the patches) and save it
2169 2169 self._updatebranchcache(partial, lrev+1, qbase)
2170 2170 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2171 2171 start = qbase
2172 2172 # if start = qbase, the cache is as updated as it should be.
2173 2173 # if start > qbase, the cache includes (part of) the patches.
2174 2174 # we might as well use it, but we won't save it.
2175 2175
2176 2176 # update the cache up to the tip
2177 2177 self._updatebranchcache(partial, start, cl.count())
2178 2178
2179 2179 return partial
2180 2180
2181 2181 if repo.local():
2182 2182 repo.__class__ = mqrepo
2183 2183 repo.mq = queue(ui, repo.join(""))
2184 2184
2185 2185 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2186 2186
2187 2187 cmdtable = {
2188 2188 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2189 2189 "qclone": (clone,
2190 2190 [('', 'pull', None, _('use pull protocol to copy metadata')),
2191 2191 ('U', 'noupdate', None, _('do not update the new working directories')),
2192 2192 ('', 'uncompressed', None,
2193 2193 _('use uncompressed transfer (fast over LAN)')),
2194 2194 ('e', 'ssh', '', _('specify ssh command to use')),
2195 2195 ('p', 'patches', '', _('location of source patch repo')),
2196 2196 ('', 'remotecmd', '',
2197 2197 _('specify hg command to run on the remote side'))],
2198 2198 'hg qclone [OPTION]... SOURCE [DEST]'),
2199 2199 "qcommit|qci":
2200 2200 (commit,
2201 2201 commands.table["^commit|ci"][1],
2202 2202 'hg qcommit [OPTION]... [FILE]...'),
2203 2203 "^qdiff": (diff,
2204 2204 [('g', 'git', None, _('use git extended diff format')),
2205 2205 ('I', 'include', [], _('include names matching the given patterns')),
2206 2206 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2207 2207 'hg qdiff [-I] [-X] [FILE]...'),
2208 2208 "qdelete|qremove|qrm":
2209 2209 (delete,
2210 2210 [('k', 'keep', None, _('keep patch file')),
2211 2211 ('r', 'rev', [], _('stop managing a revision'))],
2212 2212 'hg qdelete [-k] [-r REV]... PATCH...'),
2213 2213 'qfold':
2214 2214 (fold,
2215 2215 [('e', 'edit', None, _('edit patch header')),
2216 2216 ('k', 'keep', None, _('keep folded patch files'))
2217 2217 ] + commands.commitopts,
2218 2218 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2219 2219 'qgoto': (goto, [('f', 'force', None, _('overwrite any local changes'))],
2220 2220 'hg qgoto [OPT]... PATCH'),
2221 2221 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2222 2222 ('n', 'none', None, _('drop all guards'))],
2223 2223 'hg qguard [PATCH] [+GUARD]... [-GUARD]...'),
2224 2224 'qheader': (header, [],
2225 2225 _('hg qheader [PATCH]')),
2226 2226 "^qimport":
2227 2227 (qimport,
2228 2228 [('e', 'existing', None, 'import file in patch dir'),
2229 2229 ('n', 'name', '', 'patch file name'),
2230 2230 ('f', 'force', None, 'overwrite existing files'),
2231 2231 ('r', 'rev', [], 'place existing revisions under mq control'),
2232 2232 ('g', 'git', None, _('use git extended diff format'))],
2233 2233 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2234 2234 "^qinit":
2235 2235 (init,
2236 2236 [('c', 'create-repo', None, 'create queue repository')],
2237 2237 'hg qinit [-c]'),
2238 2238 "qnew":
2239 2239 (new,
2240 2240 [('e', 'edit', None, _('edit commit message')),
2241 2241 ('f', 'force', None, _('import uncommitted changes into patch'))
2242 2242 ] + commands.commitopts,
2243 2243 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2244 2244 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2245 2245 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2246 2246 "^qpop":
2247 2247 (pop,
2248 2248 [('a', 'all', None, 'pop all patches'),
2249 2249 ('n', 'name', '', 'queue name to pop'),
2250 2250 ('f', 'force', None, 'forget any local changes')],
2251 2251 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2252 2252 "^qpush":
2253 2253 (push,
2254 2254 [('f', 'force', None, 'apply if the patch has rejects'),
2255 2255 ('l', 'list', None, 'list patch name in commit text'),
2256 2256 ('a', 'all', None, 'apply all patches'),
2257 2257 ('m', 'merge', None, 'merge from another queue'),
2258 2258 ('n', 'name', '', 'merge queue name')],
2259 2259 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2260 2260 "^qrefresh":
2261 2261 (refresh,
2262 2262 [('e', 'edit', None, _('edit commit message')),
2263 2263 ('g', 'git', None, _('use git extended diff format')),
2264 2264 ('s', 'short', None, 'refresh only files already in the patch'),
2265 2265 ('I', 'include', [], _('include names matching the given patterns')),
2266 2266 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2267 2267 ] + commands.commitopts,
2268 2268 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2269 2269 'qrename|qmv':
2270 2270 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2271 2271 "qrestore":
2272 2272 (restore,
2273 2273 [('d', 'delete', None, 'delete save entry'),
2274 2274 ('u', 'update', None, 'update queue working dir')],
2275 2275 'hg qrestore [-d] [-u] REV'),
2276 2276 "qsave":
2277 2277 (save,
2278 2278 [('c', 'copy', None, 'copy patch directory'),
2279 2279 ('n', 'name', '', 'copy directory name'),
2280 2280 ('e', 'empty', None, 'clear queue status file'),
2281 2281 ('f', 'force', None, 'force copy')] + commands.commitopts,
2282 2282 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2283 2283 "qselect": (select,
2284 2284 [('n', 'none', None, _('disable all guards')),
2285 2285 ('s', 'series', None, _('list all guards in series file')),
2286 2286 ('', 'pop', None,
2287 2287 _('pop to before first guarded applied patch')),
2288 2288 ('', 'reapply', None, _('pop, then reapply patches'))],
2289 2289 'hg qselect [OPTION]... [GUARD]...'),
2290 2290 "qseries":
2291 2291 (series,
2292 2292 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2293 2293 'hg qseries [-ms]'),
2294 2294 "^strip":
2295 2295 (strip,
2296 2296 [('f', 'force', None, 'force multi-head removal'),
2297 2297 ('b', 'backup', None, 'bundle unrelated changesets'),
2298 2298 ('n', 'nobackup', None, 'no backups')],
2299 2299 'hg strip [-f] [-b] [-n] REV'),
2300 2300 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2301 2301 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2302 2302 }
@@ -1,481 +1,481 b''
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import *
11 11 from i18n import _
12 12 import struct, os, time, bisect, stat, strutil, util, re, errno, ignore
13 13 import cStringIO
14 14
15 15 _unknown = ('?', 0, 0, 0)
16 16 _format = ">cllll"
17 17
18 18 class dirstate(object):
19 19
20 20 def __init__(self, opener, ui, root):
21 21 self.opener = opener
22 22 self.root = root
23 23 self._dirty = 0
24 24 self.ui = ui
25 25
26 26 def __getattr__(self, name):
27 27 if name == 'map':
28 28 self.read()
29 29 return self.map
30 30 elif name == 'copymap':
31 31 self.read()
32 32 return self.copymap
33 33 elif name == '_branch':
34 34 try:
35 35 self._branch = self.opener("branch").read().strip()\
36 36 or "default"
37 37 except IOError:
38 38 self._branch = "default"
39 39 return self._branch
40 40 elif name == 'pl':
41 41 self.pl = [nullid, nullid]
42 42 try:
43 43 st = self.opener("dirstate").read(40)
44 44 if len(st) == 40:
45 45 self.pl = st[:20], st[20:40]
46 46 except IOError, err:
47 47 if err.errno != errno.ENOENT: raise
48 48 return self.pl
49 49 elif name == 'dirs':
50 50 self.dirs = {}
51 51 for f in self.map:
52 52 self.updatedirs(f, 1)
53 53 return self.dirs
54 54 elif name == '_ignore':
55 55 files = [self.wjoin('.hgignore')] + self.ui.hgignorefiles()
56 56 self._ignore = ignore.ignore(self.root, files, self.ui.warn)
57 57 return self._ignore
58 58 elif name == '_slash':
59 59 self._slash = self.ui.configbool('ui', 'slash') and os.sep != '/'
60 60 return self._slash
61 61 else:
62 62 raise AttributeError, name
63 63
64 64 def wjoin(self, f):
65 65 return os.path.join(self.root, f)
66 66
67 67 def getcwd(self):
68 68 cwd = os.getcwd()
69 69 if cwd == self.root: return ''
70 70 # self.root ends with a path separator if self.root is '/' or 'C:\'
71 71 rootsep = self.root
72 72 if not rootsep.endswith(os.sep):
73 73 rootsep += os.sep
74 74 if cwd.startswith(rootsep):
75 75 return cwd[len(rootsep):]
76 76 else:
77 77 # we're outside the repo. return an absolute path.
78 78 return cwd
79 79
80 80 def pathto(self, f, cwd=None):
81 81 if cwd is None:
82 82 cwd = self.getcwd()
83 83 path = util.pathto(self.root, cwd, f)
84 84 if self._slash:
85 85 return path.replace(os.sep, '/')
86 86 return path
87 87
88 88 def __del__(self):
89 89 self.write()
90 90
91 91 def __getitem__(self, key):
92 92 return self.map[key]
93 93
94 94 def __contains__(self, key):
95 95 return key in self.map
96 96
97 97 def parents(self):
98 98 return self.pl
99 99
100 100 def branch(self):
101 101 return self._branch
102 102
103 103 def markdirty(self):
104 104 self._dirty = 1
105 105
106 106 def setparents(self, p1, p2=nullid):
107 107 self.markdirty()
108 108 self.pl = p1, p2
109 109
110 110 def setbranch(self, branch):
111 111 self._branch = branch
112 112 self.opener("branch", "w").write(branch + '\n')
113 113
114 114 def state(self, key):
115 115 return self.map.get(key, ("?",))[0]
116 116
117 117 def read(self):
118 118 self.map = {}
119 119 self.copymap = {}
120 120 self.pl = [nullid, nullid]
121 121 try:
122 122 st = self.opener("dirstate").read()
123 123 except IOError, err:
124 124 if err.errno != errno.ENOENT: raise
125 125 return
126 126 if not st:
127 127 return
128 128
129 129 self.pl = [st[:20], st[20: 40]]
130 130
131 131 # deref fields so they will be local in loop
132 132 dmap = self.map
133 133 copymap = self.copymap
134 134 unpack = struct.unpack
135 135
136 136 pos = 40
137 137 e_size = struct.calcsize(_format)
138 138
139 139 while pos < len(st):
140 140 newpos = pos + e_size
141 141 e = unpack(_format, st[pos:newpos])
142 142 l = e[4]
143 143 pos = newpos
144 144 newpos = pos + l
145 145 f = st[pos:newpos]
146 146 if '\0' in f:
147 147 f, c = f.split('\0')
148 148 copymap[f] = c
149 149 dmap[f] = e[:4]
150 150 pos = newpos
151 151
152 def reload(self):
152 def invalidate(self):
153 153 for a in "map copymap _branch pl dirs _ignore".split():
154 154 if hasattr(self, a):
155 155 self.__delattr__(a)
156 156
157 157 def copy(self, source, dest):
158 158 self.markdirty()
159 159 self.copymap[dest] = source
160 160
161 161 def copied(self, file):
162 162 return self.copymap.get(file, None)
163 163
164 164 def copies(self):
165 165 return self.copymap
166 166
167 167 def updatedirs(self, path, delta):
168 168 for c in strutil.findall(path, '/'):
169 169 pc = path[:c]
170 170 self.dirs.setdefault(pc, 0)
171 171 self.dirs[pc] += delta
172 172
173 173 def checkinterfering(self, files):
174 174 def prefixes(f):
175 175 for c in strutil.rfindall(f, '/'):
176 176 yield f[:c]
177 177 seendirs = {}
178 178 for f in files:
179 179 # shadows
180 180 if self.dirs.get(f):
181 181 raise util.Abort(_('directory named %r already in dirstate') %
182 182 f)
183 183 for d in prefixes(f):
184 184 if d in seendirs:
185 185 break
186 186 if d in self.map:
187 187 raise util.Abort(_('file named %r already in dirstate') %
188 188 d)
189 189 seendirs[d] = True
190 190 # disallowed
191 191 if '\r' in f or '\n' in f:
192 192 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
193 193
194 194 def update(self, files, state, **kw):
195 195 ''' current states:
196 196 n normal
197 197 m needs merging
198 198 r marked for removal
199 199 a marked for addition'''
200 200
201 201 if not files: return
202 202 self.markdirty()
203 203 if state == "a":
204 204 self.checkinterfering(files)
205 205 for f in files:
206 206 if state == "r":
207 207 self.map[f] = ('r', 0, 0, 0)
208 208 self.updatedirs(f, -1)
209 209 else:
210 210 if state == "a":
211 211 self.updatedirs(f, 1)
212 212 s = os.lstat(self.wjoin(f))
213 213 st_size = kw.get('st_size', s.st_size)
214 214 st_mtime = kw.get('st_mtime', s.st_mtime)
215 215 self.map[f] = (state, s.st_mode, st_size, st_mtime)
216 216 if self.copymap.has_key(f):
217 217 del self.copymap[f]
218 218
219 219 def forget(self, files):
220 220 if not files: return
221 221 self.markdirty()
222 222 for f in files:
223 223 try:
224 224 del self.map[f]
225 225 self.updatedirs(f, -1)
226 226 except KeyError:
227 227 self.ui.warn(_("not in dirstate: %s!\n") % f)
228 228 pass
229 229
230 230 def rebuild(self, parent, files):
231 self.reload()
231 self.invalidate()
232 232 for f in files:
233 233 if files.execf(f):
234 234 self.map[f] = ('n', 0777, -1, 0)
235 235 else:
236 236 self.map[f] = ('n', 0666, -1, 0)
237 237 self.pl = (parent, nullid)
238 238 self.markdirty()
239 239
240 240 def write(self):
241 241 if not self._dirty:
242 242 return
243 243 cs = cStringIO.StringIO()
244 244 cs.write("".join(self.pl))
245 245 for f, e in self.map.iteritems():
246 246 c = self.copied(f)
247 247 if c:
248 248 f = f + "\0" + c
249 249 e = struct.pack(_format, e[0], e[1], e[2], e[3], len(f))
250 250 cs.write(e)
251 251 cs.write(f)
252 252 st = self.opener("dirstate", "w", atomictemp=True)
253 253 st.write(cs.getvalue())
254 254 st.rename()
255 255 self._dirty = 0
256 256
257 257 def filterfiles(self, files):
258 258 ret = {}
259 259 unknown = []
260 260
261 261 for x in files:
262 262 if x == '.':
263 263 return self.map.copy()
264 264 if x not in self.map:
265 265 unknown.append(x)
266 266 else:
267 267 ret[x] = self.map[x]
268 268
269 269 if not unknown:
270 270 return ret
271 271
272 272 b = self.map.keys()
273 273 b.sort()
274 274 blen = len(b)
275 275
276 276 for x in unknown:
277 277 bs = bisect.bisect(b, "%s%s" % (x, '/'))
278 278 while bs < blen:
279 279 s = b[bs]
280 280 if len(s) > len(x) and s.startswith(x):
281 281 ret[s] = self.map[s]
282 282 else:
283 283 break
284 284 bs += 1
285 285 return ret
286 286
287 287 def supported_type(self, f, st, verbose=False):
288 288 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
289 289 return True
290 290 if verbose:
291 291 kind = 'unknown'
292 292 if stat.S_ISCHR(st.st_mode): kind = _('character device')
293 293 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
294 294 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
295 295 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
296 296 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
297 297 self.ui.warn(_('%s: unsupported file type (type is %s)\n')
298 298 % (self.pathto(f), kind))
299 299 return False
300 300
301 301 def walk(self, files=None, match=util.always, badmatch=None):
302 302 # filter out the stat
303 303 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
304 304 yield src, f
305 305
306 306 def statwalk(self, files=None, match=util.always, ignored=False,
307 307 badmatch=None, directories=False):
308 308 '''
309 309 walk recursively through the directory tree, finding all files
310 310 matched by the match function
311 311
312 312 results are yielded in a tuple (src, filename, st), where src
313 313 is one of:
314 314 'f' the file was found in the directory tree
315 315 'd' the file is a directory of the tree
316 316 'm' the file was only in the dirstate and not in the tree
317 317 'b' file was not found and matched badmatch
318 318
319 319 and st is the stat result if the file was found in the directory.
320 320 '''
321 321
322 322 # walk all files by default
323 323 if not files:
324 324 files = ['.']
325 325 dc = self.map.copy()
326 326 else:
327 327 files = util.unique(files)
328 328 dc = self.filterfiles(files)
329 329
330 330 def imatch(file_):
331 331 if file_ not in dc and self._ignore(file_):
332 332 return False
333 333 return match(file_)
334 334
335 335 ignore = self._ignore
336 336 if ignored:
337 337 imatch = match
338 338 ignore = util.never
339 339
340 340 # self.root may end with a path separator when self.root == '/'
341 341 common_prefix_len = len(self.root)
342 342 if not self.root.endswith(os.sep):
343 343 common_prefix_len += 1
344 344 # recursion free walker, faster than os.walk.
345 345 def findfiles(s):
346 346 work = [s]
347 347 if directories:
348 348 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
349 349 while work:
350 350 top = work.pop()
351 351 names = os.listdir(top)
352 352 names.sort()
353 353 # nd is the top of the repository dir tree
354 354 nd = util.normpath(top[common_prefix_len:])
355 355 if nd == '.':
356 356 nd = ''
357 357 else:
358 358 # do not recurse into a repo contained in this
359 359 # one. use bisect to find .hg directory so speed
360 360 # is good on big directory.
361 361 hg = bisect.bisect_left(names, '.hg')
362 362 if hg < len(names) and names[hg] == '.hg':
363 363 if os.path.isdir(os.path.join(top, '.hg')):
364 364 continue
365 365 for f in names:
366 366 np = util.pconvert(os.path.join(nd, f))
367 367 if seen(np):
368 368 continue
369 369 p = os.path.join(top, f)
370 370 # don't trip over symlinks
371 371 st = os.lstat(p)
372 372 if stat.S_ISDIR(st.st_mode):
373 373 if not ignore(np):
374 374 work.append(p)
375 375 if directories:
376 376 yield 'd', np, st
377 377 if imatch(np) and np in dc:
378 378 yield 'm', np, st
379 379 elif imatch(np):
380 380 if self.supported_type(np, st):
381 381 yield 'f', np, st
382 382 elif np in dc:
383 383 yield 'm', np, st
384 384
385 385 known = {'.hg': 1}
386 386 def seen(fn):
387 387 if fn in known: return True
388 388 known[fn] = 1
389 389
390 390 # step one, find all files that match our criteria
391 391 files.sort()
392 392 for ff in files:
393 393 nf = util.normpath(ff)
394 394 f = self.wjoin(ff)
395 395 try:
396 396 st = os.lstat(f)
397 397 except OSError, inst:
398 398 found = False
399 399 for fn in dc:
400 400 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
401 401 found = True
402 402 break
403 403 if not found:
404 404 if inst.errno != errno.ENOENT or not badmatch:
405 405 self.ui.warn('%s: %s\n' % (self.pathto(ff),
406 406 inst.strerror))
407 407 elif badmatch and badmatch(ff) and imatch(nf):
408 408 yield 'b', ff, None
409 409 continue
410 410 if stat.S_ISDIR(st.st_mode):
411 411 cmp1 = (lambda x, y: cmp(x[1], y[1]))
412 412 sorted_ = [ x for x in findfiles(f) ]
413 413 sorted_.sort(cmp1)
414 414 for e in sorted_:
415 415 yield e
416 416 else:
417 417 if not seen(nf) and match(nf):
418 418 if self.supported_type(ff, st, verbose=True):
419 419 yield 'f', nf, st
420 420 elif ff in dc:
421 421 yield 'm', nf, st
422 422
423 423 # step two run through anything left in the dc hash and yield
424 424 # if we haven't already seen it
425 425 ks = dc.keys()
426 426 ks.sort()
427 427 for k in ks:
428 428 if not seen(k) and imatch(k):
429 429 yield 'm', k, None
430 430
431 431 def status(self, files=None, match=util.always, list_ignored=False,
432 432 list_clean=False):
433 433 lookup, modified, added, unknown, ignored = [], [], [], [], []
434 434 removed, deleted, clean = [], [], []
435 435
436 436 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
437 437 try:
438 438 type_, mode, size, time = self[fn]
439 439 except KeyError:
440 440 if list_ignored and self._ignore(fn):
441 441 ignored.append(fn)
442 442 else:
443 443 unknown.append(fn)
444 444 continue
445 445 if src == 'm':
446 446 nonexistent = True
447 447 if not st:
448 448 try:
449 449 st = os.lstat(self.wjoin(fn))
450 450 except OSError, inst:
451 451 if inst.errno != errno.ENOENT:
452 452 raise
453 453 st = None
454 454 # We need to re-check that it is a valid file
455 455 if st and self.supported_type(fn, st):
456 456 nonexistent = False
457 457 # XXX: what to do with file no longer present in the fs
458 458 # who are not removed in the dirstate ?
459 459 if nonexistent and type_ in "nm":
460 460 deleted.append(fn)
461 461 continue
462 462 # check the common case first
463 463 if type_ == 'n':
464 464 if not st:
465 465 st = os.lstat(self.wjoin(fn))
466 466 if size >= 0 and (size != st.st_size
467 467 or (mode ^ st.st_mode) & 0100):
468 468 modified.append(fn)
469 469 elif time != int(st.st_mtime):
470 470 lookup.append(fn)
471 471 elif list_clean:
472 472 clean.append(fn)
473 473 elif type_ == 'm':
474 474 modified.append(fn)
475 475 elif type_ == 'a':
476 476 added.append(fn)
477 477 elif type_ == 'r':
478 478 removed.append(fn)
479 479
480 480 return (lookup, modified, added, removed, deleted, unknown, ignored,
481 481 clean)
@@ -1,1969 +1,1968 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 self.path = path
24 24 self.root = os.path.realpath(path)
25 25 self.path = os.path.join(self.root, ".hg")
26 26 self.origroot = path
27 27 self.opener = util.opener(self.path)
28 28 self.wopener = util.opener(self.root)
29 29
30 30 if not os.path.isdir(self.path):
31 31 if create:
32 32 if not os.path.exists(path):
33 33 os.mkdir(path)
34 34 os.mkdir(self.path)
35 35 requirements = ["revlogv1"]
36 36 if parentui.configbool('format', 'usestore', True):
37 37 os.mkdir(os.path.join(self.path, "store"))
38 38 requirements.append("store")
39 39 # create an invalid changelog
40 40 self.opener("00changelog.i", "a").write(
41 41 '\0\0\0\2' # represents revlogv2
42 42 ' dummy changelog to prevent using the old repo layout'
43 43 )
44 44 reqfile = self.opener("requires", "w")
45 45 for r in requirements:
46 46 reqfile.write("%s\n" % r)
47 47 reqfile.close()
48 48 else:
49 49 raise repo.RepoError(_("repository %s not found") % path)
50 50 elif create:
51 51 raise repo.RepoError(_("repository %s already exists") % path)
52 52 else:
53 53 # find requirements
54 54 try:
55 55 requirements = self.opener("requires").read().splitlines()
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 requirements = []
60 60 # check them
61 61 for r in requirements:
62 62 if r not in self.supported:
63 63 raise repo.RepoError(_("requirement '%s' not supported") % r)
64 64
65 65 # setup store
66 66 if "store" in requirements:
67 67 self.encodefn = util.encodefilename
68 68 self.decodefn = util.decodefilename
69 69 self.spath = os.path.join(self.path, "store")
70 70 else:
71 71 self.encodefn = lambda x: x
72 72 self.decodefn = lambda x: x
73 73 self.spath = self.path
74 74 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
75 75
76 76 self.ui = ui.ui(parentui=parentui)
77 77 try:
78 78 self.ui.readconfig(self.join("hgrc"), self.root)
79 79 except IOError:
80 80 pass
81 81
82 82 fallback = self.ui.config('ui', 'fallbackencoding')
83 83 if fallback:
84 84 util._fallbackencoding = fallback
85 85
86 86 self.tagscache = None
87 87 self.branchcache = None
88 88 self.nodetagscache = None
89 89 self.filterpats = {}
90 90 self.transhandle = None
91 91
92 92 def __getattr__(self, name):
93 93 if name == 'changelog':
94 94 self.changelog = changelog.changelog(self.sopener)
95 95 self.sopener.defversion = self.changelog.version
96 96 return self.changelog
97 97 if name == 'manifest':
98 98 self.changelog
99 99 self.manifest = manifest.manifest(self.sopener)
100 100 return self.manifest
101 101 if name == 'dirstate':
102 102 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
103 103 return self.dirstate
104 104 else:
105 105 raise AttributeError, name
106 106
107 107 def url(self):
108 108 return 'file:' + self.root
109 109
110 110 def hook(self, name, throw=False, **args):
111 111 def callhook(hname, funcname):
112 112 '''call python hook. hook is callable object, looked up as
113 113 name in python module. if callable returns "true", hook
114 114 fails, else passes. if hook raises exception, treated as
115 115 hook failure. exception propagates if throw is "true".
116 116
117 117 reason for "true" meaning "hook failed" is so that
118 118 unmodified commands (e.g. mercurial.commands.update) can
119 119 be run as hooks without wrappers to convert return values.'''
120 120
121 121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 122 obj = funcname
123 123 if not callable(obj):
124 124 d = funcname.rfind('.')
125 125 if d == -1:
126 126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 127 'a module)') % (hname, funcname))
128 128 modname = funcname[:d]
129 129 try:
130 130 obj = __import__(modname)
131 131 except ImportError:
132 132 try:
133 133 # extensions are loaded with hgext_ prefix
134 134 obj = __import__("hgext_%s" % modname)
135 135 except ImportError:
136 136 raise util.Abort(_('%s hook is invalid '
137 137 '(import of "%s" failed)') %
138 138 (hname, modname))
139 139 try:
140 140 for p in funcname.split('.')[1:]:
141 141 obj = getattr(obj, p)
142 142 except AttributeError, err:
143 143 raise util.Abort(_('%s hook is invalid '
144 144 '("%s" is not defined)') %
145 145 (hname, funcname))
146 146 if not callable(obj):
147 147 raise util.Abort(_('%s hook is invalid '
148 148 '("%s" is not callable)') %
149 149 (hname, funcname))
150 150 try:
151 151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 152 except (KeyboardInterrupt, util.SignalInterrupt):
153 153 raise
154 154 except Exception, exc:
155 155 if isinstance(exc, util.Abort):
156 156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 157 (hname, exc.args[0]))
158 158 else:
159 159 self.ui.warn(_('error: %s hook raised an exception: '
160 160 '%s\n') % (hname, exc))
161 161 if throw:
162 162 raise
163 163 self.ui.print_exc()
164 164 return True
165 165 if r:
166 166 if throw:
167 167 raise util.Abort(_('%s hook failed') % hname)
168 168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 169 return r
170 170
171 171 def runhook(name, cmd):
172 172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 174 r = util.system(cmd, environ=env, cwd=self.root)
175 175 if r:
176 176 desc, r = util.explain_exit(r)
177 177 if throw:
178 178 raise util.Abort(_('%s hook %s') % (name, desc))
179 179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 180 return r
181 181
182 182 r = False
183 183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 184 if hname.split(".", 1)[0] == name and cmd]
185 185 hooks.sort()
186 186 for hname, cmd in hooks:
187 187 if callable(cmd):
188 188 r = callhook(hname, cmd) or r
189 189 elif cmd.startswith('python:'):
190 190 r = callhook(hname, cmd[7:].strip()) or r
191 191 else:
192 192 r = runhook(hname, cmd) or r
193 193 return r
194 194
195 195 tag_disallowed = ':\r\n'
196 196
197 197 def _tag(self, name, node, message, local, user, date, parent=None):
198 198 use_dirstate = parent is None
199 199
200 200 for c in self.tag_disallowed:
201 201 if c in name:
202 202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203 203
204 204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205 205
206 206 if local:
207 207 # local tags are stored in the current charset
208 208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 209 self.hook('tag', node=hex(node), tag=name, local=local)
210 210 return
211 211
212 212 # committed tags are stored in UTF-8
213 213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 214 if use_dirstate:
215 215 self.wfile('.hgtags', 'ab').write(line)
216 216 else:
217 217 ntags = self.filectx('.hgtags', parent).data()
218 218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 220 self.add(['.hgtags'])
221 221
222 222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223 223
224 224 self.hook('tag', node=hex(node), tag=name, local=local)
225 225
226 226 return tagnode
227 227
228 228 def tag(self, name, node, message, local, user, date):
229 229 '''tag a revision with a symbolic name.
230 230
231 231 if local is True, the tag is stored in a per-repository file.
232 232 otherwise, it is stored in the .hgtags file, and a new
233 233 changeset is committed with the change.
234 234
235 235 keyword arguments:
236 236
237 237 local: whether to store tag in non-version-controlled file
238 238 (default False)
239 239
240 240 message: commit message to use if committing
241 241
242 242 user: name of user to use if committing
243 243
244 244 date: date tuple to use if committing'''
245 245
246 246 for x in self.status()[:5]:
247 247 if '.hgtags' in x:
248 248 raise util.Abort(_('working copy of .hgtags is changed '
249 249 '(please commit .hgtags manually)'))
250 250
251 251
252 252 self._tag(name, node, message, local, user, date)
253 253
254 254 def tags(self):
255 255 '''return a mapping of tag to node'''
256 256 if self.tagscache:
257 257 return self.tagscache
258 258
259 259 globaltags = {}
260 260
261 261 def readtags(lines, fn):
262 262 filetags = {}
263 263 count = 0
264 264
265 265 def warn(msg):
266 266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267 267
268 268 for l in lines:
269 269 count += 1
270 270 if not l:
271 271 continue
272 272 s = l.split(" ", 1)
273 273 if len(s) != 2:
274 274 warn(_("cannot parse entry"))
275 275 continue
276 276 node, key = s
277 277 key = util.tolocal(key.strip()) # stored in UTF-8
278 278 try:
279 279 bin_n = bin(node)
280 280 except TypeError:
281 281 warn(_("node '%s' is not well formed") % node)
282 282 continue
283 283 if bin_n not in self.changelog.nodemap:
284 284 warn(_("tag '%s' refers to unknown node") % key)
285 285 continue
286 286
287 287 h = []
288 288 if key in filetags:
289 289 n, h = filetags[key]
290 290 h.append(n)
291 291 filetags[key] = (bin_n, h)
292 292
293 293 for k,nh in filetags.items():
294 294 if k not in globaltags:
295 295 globaltags[k] = nh
296 296 continue
297 297 # we prefer the global tag if:
298 298 # it supercedes us OR
299 299 # mutual supercedes and it has a higher rank
300 300 # otherwise we win because we're tip-most
301 301 an, ah = nh
302 302 bn, bh = globaltags[k]
303 303 if bn != an and an in bh and \
304 304 (bn not in ah or len(bh) > len(ah)):
305 305 an = bn
306 306 ah.extend([n for n in bh if n not in ah])
307 307 globaltags[k] = an, ah
308 308
309 309 # read the tags file from each head, ending with the tip
310 310 f = None
311 311 for rev, node, fnode in self._hgtagsnodes():
312 312 f = (f and f.filectx(fnode) or
313 313 self.filectx('.hgtags', fileid=fnode))
314 314 readtags(f.data().splitlines(), f)
315 315
316 316 try:
317 317 data = util.fromlocal(self.opener("localtags").read())
318 318 # localtags are stored in the local character set
319 319 # while the internal tag table is stored in UTF-8
320 320 readtags(data.splitlines(), "localtags")
321 321 except IOError:
322 322 pass
323 323
324 324 self.tagscache = {}
325 325 for k,nh in globaltags.items():
326 326 n = nh[0]
327 327 if n != nullid:
328 328 self.tagscache[k] = n
329 329 self.tagscache['tip'] = self.changelog.tip()
330 330
331 331 return self.tagscache
332 332
333 333 def _hgtagsnodes(self):
334 334 heads = self.heads()
335 335 heads.reverse()
336 336 last = {}
337 337 ret = []
338 338 for node in heads:
339 339 c = self.changectx(node)
340 340 rev = c.rev()
341 341 try:
342 342 fnode = c.filenode('.hgtags')
343 343 except revlog.LookupError:
344 344 continue
345 345 ret.append((rev, node, fnode))
346 346 if fnode in last:
347 347 ret[last[fnode]] = None
348 348 last[fnode] = len(ret) - 1
349 349 return [item for item in ret if item]
350 350
351 351 def tagslist(self):
352 352 '''return a list of tags ordered by revision'''
353 353 l = []
354 354 for t, n in self.tags().items():
355 355 try:
356 356 r = self.changelog.rev(n)
357 357 except:
358 358 r = -2 # sort to the beginning of the list if unknown
359 359 l.append((r, t, n))
360 360 l.sort()
361 361 return [(t, n) for r, t, n in l]
362 362
363 363 def nodetags(self, node):
364 364 '''return the tags associated with a node'''
365 365 if not self.nodetagscache:
366 366 self.nodetagscache = {}
367 367 for t, n in self.tags().items():
368 368 self.nodetagscache.setdefault(n, []).append(t)
369 369 return self.nodetagscache.get(node, [])
370 370
371 371 def _branchtags(self):
372 372 partial, last, lrev = self._readbranchcache()
373 373
374 374 tiprev = self.changelog.count() - 1
375 375 if lrev != tiprev:
376 376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378 378
379 379 return partial
380 380
381 381 def branchtags(self):
382 382 if self.branchcache is not None:
383 383 return self.branchcache
384 384
385 385 self.branchcache = {} # avoid recursion in changectx
386 386 partial = self._branchtags()
387 387
388 388 # the branch cache is stored on disk as UTF-8, but in the local
389 389 # charset internally
390 390 for k, v in partial.items():
391 391 self.branchcache[util.tolocal(k)] = v
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 except (IOError, OSError):
401 401 return {}, nullid, nullrev
402 402
403 403 try:
404 404 last, lrev = lines.pop(0).split(" ", 1)
405 405 last, lrev = bin(last), int(lrev)
406 406 if not (lrev < self.changelog.count() and
407 407 self.changelog.node(lrev) == last): # sanity check
408 408 # invalidate the cache
409 409 raise ValueError('Invalid branch cache: unknown tip')
410 410 for l in lines:
411 411 if not l: continue
412 412 node, label = l.split(" ", 1)
413 413 partial[label.strip()] = bin(node)
414 414 except (KeyboardInterrupt, util.SignalInterrupt):
415 415 raise
416 416 except Exception, inst:
417 417 if self.ui.debugflag:
418 418 self.ui.warn(str(inst), '\n')
419 419 partial, last, lrev = {}, nullid, nullrev
420 420 return partial, last, lrev
421 421
422 422 def _writebranchcache(self, branches, tip, tiprev):
423 423 try:
424 424 f = self.opener("branch.cache", "w", atomictemp=True)
425 425 f.write("%s %s\n" % (hex(tip), tiprev))
426 426 for label, node in branches.iteritems():
427 427 f.write("%s %s\n" % (hex(node), label))
428 428 f.rename()
429 429 except (IOError, OSError):
430 430 pass
431 431
432 432 def _updatebranchcache(self, partial, start, end):
433 433 for r in xrange(start, end):
434 434 c = self.changectx(r)
435 435 b = c.branch()
436 436 partial[b] = c.node()
437 437
438 438 def lookup(self, key):
439 439 if key == '.':
440 440 key, second = self.dirstate.parents()
441 441 if key == nullid:
442 442 raise repo.RepoError(_("no revision checked out"))
443 443 if second != nullid:
444 444 self.ui.warn(_("warning: working directory has two parents, "
445 445 "tag '.' uses the first\n"))
446 446 elif key == 'null':
447 447 return nullid
448 448 n = self.changelog._match(key)
449 449 if n:
450 450 return n
451 451 if key in self.tags():
452 452 return self.tags()[key]
453 453 if key in self.branchtags():
454 454 return self.branchtags()[key]
455 455 n = self.changelog._partialmatch(key)
456 456 if n:
457 457 return n
458 458 raise repo.RepoError(_("unknown revision '%s'") % key)
459 459
460 460 def dev(self):
461 461 return os.lstat(self.path).st_dev
462 462
463 463 def local(self):
464 464 return True
465 465
466 466 def join(self, f):
467 467 return os.path.join(self.path, f)
468 468
469 469 def sjoin(self, f):
470 470 f = self.encodefn(f)
471 471 return os.path.join(self.spath, f)
472 472
473 473 def wjoin(self, f):
474 474 return os.path.join(self.root, f)
475 475
476 476 def file(self, f):
477 477 if f[0] == '/':
478 478 f = f[1:]
479 479 return filelog.filelog(self.sopener, f)
480 480
481 481 def changectx(self, changeid=None):
482 482 return context.changectx(self, changeid)
483 483
484 484 def workingctx(self):
485 485 return context.workingctx(self)
486 486
487 487 def parents(self, changeid=None):
488 488 '''
489 489 get list of changectxs for parents of changeid or working directory
490 490 '''
491 491 if changeid is None:
492 492 pl = self.dirstate.parents()
493 493 else:
494 494 n = self.changelog.lookup(changeid)
495 495 pl = self.changelog.parents(n)
496 496 if pl[1] == nullid:
497 497 return [self.changectx(pl[0])]
498 498 return [self.changectx(pl[0]), self.changectx(pl[1])]
499 499
500 500 def filectx(self, path, changeid=None, fileid=None):
501 501 """changeid can be a changeset revision, node, or tag.
502 502 fileid can be a file revision or node."""
503 503 return context.filectx(self, path, changeid, fileid)
504 504
505 505 def getcwd(self):
506 506 return self.dirstate.getcwd()
507 507
508 508 def pathto(self, f, cwd=None):
509 509 return self.dirstate.pathto(f, cwd)
510 510
511 511 def wfile(self, f, mode='r'):
512 512 return self.wopener(f, mode)
513 513
514 514 def _link(self, f):
515 515 return os.path.islink(self.wjoin(f))
516 516
517 517 def _filter(self, filter, filename, data):
518 518 if filter not in self.filterpats:
519 519 l = []
520 520 for pat, cmd in self.ui.configitems(filter):
521 521 mf = util.matcher(self.root, "", [pat], [], [])[1]
522 522 l.append((mf, cmd))
523 523 self.filterpats[filter] = l
524 524
525 525 for mf, cmd in self.filterpats[filter]:
526 526 if mf(filename):
527 527 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
528 528 data = util.filter(data, cmd)
529 529 break
530 530
531 531 return data
532 532
533 533 def wread(self, filename):
534 534 if self._link(filename):
535 535 data = os.readlink(self.wjoin(filename))
536 536 else:
537 537 data = self.wopener(filename, 'r').read()
538 538 return self._filter("encode", filename, data)
539 539
540 540 def wwrite(self, filename, data, flags):
541 541 data = self._filter("decode", filename, data)
542 542 if "l" in flags:
543 543 f = self.wjoin(filename)
544 544 try:
545 545 os.unlink(f)
546 546 except OSError:
547 547 pass
548 548 d = os.path.dirname(f)
549 549 if not os.path.exists(d):
550 550 os.makedirs(d)
551 551 os.symlink(data, f)
552 552 else:
553 553 try:
554 554 if self._link(filename):
555 555 os.unlink(self.wjoin(filename))
556 556 except OSError:
557 557 pass
558 558 self.wopener(filename, 'w').write(data)
559 559 util.set_exec(self.wjoin(filename), "x" in flags)
560 560
561 561 def wwritedata(self, filename, data):
562 562 return self._filter("decode", filename, data)
563 563
564 564 def transaction(self):
565 565 tr = self.transhandle
566 566 if tr != None and tr.running():
567 567 return tr.nest()
568 568
569 569 # save dirstate for rollback
570 570 try:
571 571 ds = self.opener("dirstate").read()
572 572 except IOError:
573 573 ds = ""
574 574 self.opener("journal.dirstate", "w").write(ds)
575 575
576 576 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 577 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
578 578 tr = transaction.transaction(self.ui.warn, self.sopener,
579 579 self.sjoin("journal"),
580 580 aftertrans(renames))
581 581 self.transhandle = tr
582 582 return tr
583 583
584 584 def recover(self):
585 585 l = self.lock()
586 586 if os.path.exists(self.sjoin("journal")):
587 587 self.ui.status(_("rolling back interrupted transaction\n"))
588 588 transaction.rollback(self.sopener, self.sjoin("journal"))
589 self.reload()
589 self.invalidate()
590 590 return True
591 591 else:
592 592 self.ui.warn(_("no interrupted transaction available\n"))
593 593 return False
594 594
595 595 def rollback(self, wlock=None, lock=None):
596 596 if not wlock:
597 597 wlock = self.wlock()
598 598 if not lock:
599 599 lock = self.lock()
600 600 if os.path.exists(self.sjoin("undo")):
601 601 self.ui.status(_("rolling back last transaction\n"))
602 602 transaction.rollback(self.sopener, self.sjoin("undo"))
603 603 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
604 self.reload()
605 self.wreload()
604 self.invalidate()
605 self.dirstate.invalidate()
606 606 else:
607 607 self.ui.warn(_("no rollback information available\n"))
608 608
609 def wreload(self):
610 self.dirstate.reload()
611
612 def reload(self):
613 self.changelog.load()
614 self.manifest.load()
609 def invalidate(self):
610 for a in "changelog manifest".split():
611 if hasattr(self, a):
612 self.__delattr__(a)
615 613 self.tagscache = None
616 614 self.nodetagscache = None
617 615
618 616 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
619 617 desc=None):
620 618 try:
621 619 l = lock.lock(lockname, 0, releasefn, desc=desc)
622 620 except lock.LockHeld, inst:
623 621 if not wait:
624 622 raise
625 623 self.ui.warn(_("waiting for lock on %s held by %r\n") %
626 624 (desc, inst.locker))
627 625 # default to 600 seconds timeout
628 626 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
629 627 releasefn, desc=desc)
630 628 if acquirefn:
631 629 acquirefn()
632 630 return l
633 631
634 632 def lock(self, wait=1):
635 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
633 return self.do_lock(self.sjoin("lock"), wait,
634 acquirefn=self.invalidate,
636 635 desc=_('repository %s') % self.origroot)
637 636
638 637 def wlock(self, wait=1):
639 638 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
640 self.wreload,
639 self.dirstate.invalidate,
641 640 desc=_('working directory of %s') % self.origroot)
642 641
643 642 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
644 643 """
645 644 commit an individual file as part of a larger transaction
646 645 """
647 646
648 647 t = self.wread(fn)
649 648 fl = self.file(fn)
650 649 fp1 = manifest1.get(fn, nullid)
651 650 fp2 = manifest2.get(fn, nullid)
652 651
653 652 meta = {}
654 653 cp = self.dirstate.copied(fn)
655 654 if cp:
656 655 # Mark the new revision of this file as a copy of another
657 656 # file. This copy data will effectively act as a parent
658 657 # of this new revision. If this is a merge, the first
659 658 # parent will be the nullid (meaning "look up the copy data")
660 659 # and the second one will be the other parent. For example:
661 660 #
662 661 # 0 --- 1 --- 3 rev1 changes file foo
663 662 # \ / rev2 renames foo to bar and changes it
664 663 # \- 2 -/ rev3 should have bar with all changes and
665 664 # should record that bar descends from
666 665 # bar in rev2 and foo in rev1
667 666 #
668 667 # this allows this merge to succeed:
669 668 #
670 669 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
671 670 # \ / merging rev3 and rev4 should use bar@rev2
672 671 # \- 2 --- 4 as the merge base
673 672 #
674 673 meta["copy"] = cp
675 674 if not manifest2: # not a branch merge
676 675 meta["copyrev"] = hex(manifest1.get(cp, nullid))
677 676 fp2 = nullid
678 677 elif fp2 != nullid: # copied on remote side
679 678 meta["copyrev"] = hex(manifest1.get(cp, nullid))
680 679 elif fp1 != nullid: # copied on local side, reversed
681 680 meta["copyrev"] = hex(manifest2.get(cp))
682 681 fp2 = fp1
683 682 else: # directory rename
684 683 meta["copyrev"] = hex(manifest1.get(cp, nullid))
685 684 self.ui.debug(_(" %s: copy %s:%s\n") %
686 685 (fn, cp, meta["copyrev"]))
687 686 fp1 = nullid
688 687 elif fp2 != nullid:
689 688 # is one parent an ancestor of the other?
690 689 fpa = fl.ancestor(fp1, fp2)
691 690 if fpa == fp1:
692 691 fp1, fp2 = fp2, nullid
693 692 elif fpa == fp2:
694 693 fp2 = nullid
695 694
696 695 # is the file unmodified from the parent? report existing entry
697 696 if fp2 == nullid and not fl.cmp(fp1, t):
698 697 return fp1
699 698
700 699 changelist.append(fn)
701 700 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
702 701
703 702 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
704 703 if p1 is None:
705 704 p1, p2 = self.dirstate.parents()
706 705 return self.commit(files=files, text=text, user=user, date=date,
707 706 p1=p1, p2=p2, wlock=wlock, extra=extra)
708 707
709 708 def commit(self, files=None, text="", user=None, date=None,
710 709 match=util.always, force=False, lock=None, wlock=None,
711 710 force_editor=False, p1=None, p2=None, extra={}):
712 711
713 712 commit = []
714 713 remove = []
715 714 changed = []
716 715 use_dirstate = (p1 is None) # not rawcommit
717 716 extra = extra.copy()
718 717
719 718 if use_dirstate:
720 719 if files:
721 720 for f in files:
722 721 s = self.dirstate.state(f)
723 722 if s in 'nmai':
724 723 commit.append(f)
725 724 elif s == 'r':
726 725 remove.append(f)
727 726 else:
728 727 self.ui.warn(_("%s not tracked!\n") % f)
729 728 else:
730 729 changes = self.status(match=match)[:5]
731 730 modified, added, removed, deleted, unknown = changes
732 731 commit = modified + added
733 732 remove = removed
734 733 else:
735 734 commit = files
736 735
737 736 if use_dirstate:
738 737 p1, p2 = self.dirstate.parents()
739 738 update_dirstate = True
740 739 else:
741 740 p1, p2 = p1, p2 or nullid
742 741 update_dirstate = (self.dirstate.parents()[0] == p1)
743 742
744 743 c1 = self.changelog.read(p1)
745 744 c2 = self.changelog.read(p2)
746 745 m1 = self.manifest.read(c1[0]).copy()
747 746 m2 = self.manifest.read(c2[0])
748 747
749 748 if use_dirstate:
750 749 branchname = self.workingctx().branch()
751 750 try:
752 751 branchname = branchname.decode('UTF-8').encode('UTF-8')
753 752 except UnicodeDecodeError:
754 753 raise util.Abort(_('branch name not in UTF-8!'))
755 754 else:
756 755 branchname = ""
757 756
758 757 if use_dirstate:
759 758 oldname = c1[5].get("branch") # stored in UTF-8
760 759 if not commit and not remove and not force and p2 == nullid and \
761 760 branchname == oldname:
762 761 self.ui.status(_("nothing changed\n"))
763 762 return None
764 763
765 764 xp1 = hex(p1)
766 765 if p2 == nullid: xp2 = ''
767 766 else: xp2 = hex(p2)
768 767
769 768 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
770 769
771 770 if not wlock:
772 771 wlock = self.wlock()
773 772 if not lock:
774 773 lock = self.lock()
775 774 tr = self.transaction()
776 775
777 776 # check in files
778 777 new = {}
779 778 linkrev = self.changelog.count()
780 779 commit.sort()
781 780 is_exec = util.execfunc(self.root, m1.execf)
782 781 is_link = util.linkfunc(self.root, m1.linkf)
783 782 for f in commit:
784 783 self.ui.note(f + "\n")
785 784 try:
786 785 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
787 786 new_exec = is_exec(f)
788 787 new_link = is_link(f)
789 788 if not changed or changed[-1] != f:
790 789 # mention the file in the changelog if some flag changed,
791 790 # even if there was no content change.
792 791 old_exec = m1.execf(f)
793 792 old_link = m1.linkf(f)
794 793 if old_exec != new_exec or old_link != new_link:
795 794 changed.append(f)
796 795 m1.set(f, new_exec, new_link)
797 796 except (OSError, IOError):
798 797 if use_dirstate:
799 798 self.ui.warn(_("trouble committing %s!\n") % f)
800 799 raise
801 800 else:
802 801 remove.append(f)
803 802
804 803 # update manifest
805 804 m1.update(new)
806 805 remove.sort()
807 806 removed = []
808 807
809 808 for f in remove:
810 809 if f in m1:
811 810 del m1[f]
812 811 removed.append(f)
813 812 elif f in m2:
814 813 removed.append(f)
815 814 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
816 815
817 816 # add changeset
818 817 new = new.keys()
819 818 new.sort()
820 819
821 820 user = user or self.ui.username()
822 821 if not text or force_editor:
823 822 edittext = []
824 823 if text:
825 824 edittext.append(text)
826 825 edittext.append("")
827 826 edittext.append("HG: user: %s" % user)
828 827 if p2 != nullid:
829 828 edittext.append("HG: branch merge")
830 829 if branchname:
831 830 edittext.append("HG: branch %s" % util.tolocal(branchname))
832 831 edittext.extend(["HG: changed %s" % f for f in changed])
833 832 edittext.extend(["HG: removed %s" % f for f in removed])
834 833 if not changed and not remove:
835 834 edittext.append("HG: no files changed")
836 835 edittext.append("")
837 836 # run editor in the repository root
838 837 olddir = os.getcwd()
839 838 os.chdir(self.root)
840 839 text = self.ui.edit("\n".join(edittext), user)
841 840 os.chdir(olddir)
842 841
843 842 lines = [line.rstrip() for line in text.rstrip().splitlines()]
844 843 while lines and not lines[0]:
845 844 del lines[0]
846 845 if not lines:
847 846 return None
848 847 text = '\n'.join(lines)
849 848 if branchname:
850 849 extra["branch"] = branchname
851 850 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
852 851 user, date, extra)
853 852 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
854 853 parent2=xp2)
855 854 tr.close()
856 855
857 856 if self.branchcache and "branch" in extra:
858 857 self.branchcache[util.tolocal(extra["branch"])] = n
859 858
860 859 if use_dirstate or update_dirstate:
861 860 self.dirstate.setparents(n)
862 861 if use_dirstate:
863 862 self.dirstate.update(new, "n")
864 863 self.dirstate.forget(removed)
865 864
866 865 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
867 866 return n
868 867
869 868 def walk(self, node=None, files=[], match=util.always, badmatch=None):
870 869 '''
871 870 walk recursively through the directory tree or a given
872 871 changeset, finding all files matched by the match
873 872 function
874 873
875 874 results are yielded in a tuple (src, filename), where src
876 875 is one of:
877 876 'f' the file was found in the directory tree
878 877 'm' the file was only in the dirstate and not in the tree
879 878 'b' file was not found and matched badmatch
880 879 '''
881 880
882 881 if node:
883 882 fdict = dict.fromkeys(files)
884 883 # for dirstate.walk, files=['.'] means "walk the whole tree".
885 884 # follow that here, too
886 885 fdict.pop('.', None)
887 886 mdict = self.manifest.read(self.changelog.read(node)[0])
888 887 mfiles = mdict.keys()
889 888 mfiles.sort()
890 889 for fn in mfiles:
891 890 for ffn in fdict:
892 891 # match if the file is the exact name or a directory
893 892 if ffn == fn or fn.startswith("%s/" % ffn):
894 893 del fdict[ffn]
895 894 break
896 895 if match(fn):
897 896 yield 'm', fn
898 897 ffiles = fdict.keys()
899 898 ffiles.sort()
900 899 for fn in ffiles:
901 900 if badmatch and badmatch(fn):
902 901 if match(fn):
903 902 yield 'b', fn
904 903 else:
905 904 self.ui.warn(_('%s: No such file in rev %s\n')
906 905 % (self.pathto(fn), short(node)))
907 906 else:
908 907 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
909 908 yield src, fn
910 909
911 910 def status(self, node1=None, node2=None, files=[], match=util.always,
912 911 wlock=None, list_ignored=False, list_clean=False):
913 912 """return status of files between two nodes or node and working directory
914 913
915 914 If node1 is None, use the first dirstate parent instead.
916 915 If node2 is None, compare node1 with working directory.
917 916 """
918 917
919 918 def fcmp(fn, getnode):
920 919 t1 = self.wread(fn)
921 920 return self.file(fn).cmp(getnode(fn), t1)
922 921
923 922 def mfmatches(node):
924 923 change = self.changelog.read(node)
925 924 mf = self.manifest.read(change[0]).copy()
926 925 for fn in mf.keys():
927 926 if not match(fn):
928 927 del mf[fn]
929 928 return mf
930 929
931 930 modified, added, removed, deleted, unknown = [], [], [], [], []
932 931 ignored, clean = [], []
933 932
934 933 compareworking = False
935 934 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
936 935 compareworking = True
937 936
938 937 if not compareworking:
939 938 # read the manifest from node1 before the manifest from node2,
940 939 # so that we'll hit the manifest cache if we're going through
941 940 # all the revisions in parent->child order.
942 941 mf1 = mfmatches(node1)
943 942
944 943 mywlock = False
945 944
946 945 # are we comparing the working directory?
947 946 if not node2:
948 947 (lookup, modified, added, removed, deleted, unknown,
949 948 ignored, clean) = self.dirstate.status(files, match,
950 949 list_ignored, list_clean)
951 950
952 951 # are we comparing working dir against its parent?
953 952 if compareworking:
954 953 if lookup:
955 954 # do a full compare of any files that might have changed
956 955 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
957 956 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
958 957 nullid)
959 958 for f in lookup:
960 959 if fcmp(f, getnode):
961 960 modified.append(f)
962 961 else:
963 962 if list_clean:
964 963 clean.append(f)
965 964 if not wlock and not mywlock:
966 965 mywlock = True
967 966 try:
968 967 wlock = self.wlock(wait=0)
969 968 except lock.LockException:
970 969 pass
971 970 if wlock:
972 971 self.dirstate.update([f], "n")
973 972 else:
974 973 # we are comparing working dir against non-parent
975 974 # generate a pseudo-manifest for the working dir
976 975 # XXX: create it in dirstate.py ?
977 976 mf2 = mfmatches(self.dirstate.parents()[0])
978 977 is_exec = util.execfunc(self.root, mf2.execf)
979 978 is_link = util.linkfunc(self.root, mf2.linkf)
980 979 for f in lookup + modified + added:
981 980 mf2[f] = ""
982 981 mf2.set(f, is_exec(f), is_link(f))
983 982 for f in removed:
984 983 if f in mf2:
985 984 del mf2[f]
986 985
987 986 if mywlock and wlock:
988 987 wlock.release()
989 988 else:
990 989 # we are comparing two revisions
991 990 mf2 = mfmatches(node2)
992 991
993 992 if not compareworking:
994 993 # flush lists from dirstate before comparing manifests
995 994 modified, added, clean = [], [], []
996 995
997 996 # make sure to sort the files so we talk to the disk in a
998 997 # reasonable order
999 998 mf2keys = mf2.keys()
1000 999 mf2keys.sort()
1001 1000 getnode = lambda fn: mf1.get(fn, nullid)
1002 1001 for fn in mf2keys:
1003 1002 if mf1.has_key(fn):
1004 1003 if mf1.flags(fn) != mf2.flags(fn) or \
1005 1004 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
1006 1005 fcmp(fn, getnode))):
1007 1006 modified.append(fn)
1008 1007 elif list_clean:
1009 1008 clean.append(fn)
1010 1009 del mf1[fn]
1011 1010 else:
1012 1011 added.append(fn)
1013 1012
1014 1013 removed = mf1.keys()
1015 1014
1016 1015 # sort and return results:
1017 1016 for l in modified, added, removed, deleted, unknown, ignored, clean:
1018 1017 l.sort()
1019 1018 return (modified, added, removed, deleted, unknown, ignored, clean)
1020 1019
1021 1020 def add(self, list, wlock=None):
1022 1021 if not wlock:
1023 1022 wlock = self.wlock()
1024 1023 for f in list:
1025 1024 p = self.wjoin(f)
1026 1025 try:
1027 1026 st = os.lstat(p)
1028 1027 except:
1029 1028 self.ui.warn(_("%s does not exist!\n") % f)
1030 1029 continue
1031 1030 if st.st_size > 10000000:
1032 1031 self.ui.warn(_("%s: files over 10MB may cause memory and"
1033 1032 " performance problems\n"
1034 1033 "(use 'hg revert %s' to unadd the file)\n")
1035 1034 % (f, f))
1036 1035 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1037 1036 self.ui.warn(_("%s not added: only files and symlinks "
1038 1037 "supported currently\n") % f)
1039 1038 elif self.dirstate.state(f) in 'an':
1040 1039 self.ui.warn(_("%s already tracked!\n") % f)
1041 1040 else:
1042 1041 self.dirstate.update([f], "a")
1043 1042
1044 1043 def forget(self, list, wlock=None):
1045 1044 if not wlock:
1046 1045 wlock = self.wlock()
1047 1046 for f in list:
1048 1047 if self.dirstate.state(f) not in 'ai':
1049 1048 self.ui.warn(_("%s not added!\n") % f)
1050 1049 else:
1051 1050 self.dirstate.forget([f])
1052 1051
1053 1052 def remove(self, list, unlink=False, wlock=None):
1054 1053 if unlink:
1055 1054 for f in list:
1056 1055 try:
1057 1056 util.unlink(self.wjoin(f))
1058 1057 except OSError, inst:
1059 1058 if inst.errno != errno.ENOENT:
1060 1059 raise
1061 1060 if not wlock:
1062 1061 wlock = self.wlock()
1063 1062 for f in list:
1064 1063 if unlink and os.path.exists(self.wjoin(f)):
1065 1064 self.ui.warn(_("%s still exists!\n") % f)
1066 1065 elif self.dirstate.state(f) == 'a':
1067 1066 self.dirstate.forget([f])
1068 1067 elif f not in self.dirstate:
1069 1068 self.ui.warn(_("%s not tracked!\n") % f)
1070 1069 else:
1071 1070 self.dirstate.update([f], "r")
1072 1071
1073 1072 def undelete(self, list, wlock=None):
1074 1073 p = self.dirstate.parents()[0]
1075 1074 mn = self.changelog.read(p)[0]
1076 1075 m = self.manifest.read(mn)
1077 1076 if not wlock:
1078 1077 wlock = self.wlock()
1079 1078 for f in list:
1080 1079 if self.dirstate.state(f) not in "r":
1081 1080 self.ui.warn("%s not removed!\n" % f)
1082 1081 else:
1083 1082 t = self.file(f).read(m[f])
1084 1083 self.wwrite(f, t, m.flags(f))
1085 1084 self.dirstate.update([f], "n")
1086 1085
1087 1086 def copy(self, source, dest, wlock=None):
1088 1087 p = self.wjoin(dest)
1089 1088 if not (os.path.exists(p) or os.path.islink(p)):
1090 1089 self.ui.warn(_("%s does not exist!\n") % dest)
1091 1090 elif not (os.path.isfile(p) or os.path.islink(p)):
1092 1091 self.ui.warn(_("copy failed: %s is not a file or a "
1093 1092 "symbolic link\n") % dest)
1094 1093 else:
1095 1094 if not wlock:
1096 1095 wlock = self.wlock()
1097 1096 if self.dirstate.state(dest) == '?':
1098 1097 self.dirstate.update([dest], "a")
1099 1098 self.dirstate.copy(source, dest)
1100 1099
1101 1100 def heads(self, start=None):
1102 1101 heads = self.changelog.heads(start)
1103 1102 # sort the output in rev descending order
1104 1103 heads = [(-self.changelog.rev(h), h) for h in heads]
1105 1104 heads.sort()
1106 1105 return [n for (r, n) in heads]
1107 1106
1108 1107 def branches(self, nodes):
1109 1108 if not nodes:
1110 1109 nodes = [self.changelog.tip()]
1111 1110 b = []
1112 1111 for n in nodes:
1113 1112 t = n
1114 1113 while 1:
1115 1114 p = self.changelog.parents(n)
1116 1115 if p[1] != nullid or p[0] == nullid:
1117 1116 b.append((t, n, p[0], p[1]))
1118 1117 break
1119 1118 n = p[0]
1120 1119 return b
1121 1120
1122 1121 def between(self, pairs):
1123 1122 r = []
1124 1123
1125 1124 for top, bottom in pairs:
1126 1125 n, l, i = top, [], 0
1127 1126 f = 1
1128 1127
1129 1128 while n != bottom:
1130 1129 p = self.changelog.parents(n)[0]
1131 1130 if i == f:
1132 1131 l.append(n)
1133 1132 f = f * 2
1134 1133 n = p
1135 1134 i += 1
1136 1135
1137 1136 r.append(l)
1138 1137
1139 1138 return r
1140 1139
1141 1140 def findincoming(self, remote, base=None, heads=None, force=False):
1142 1141 """Return list of roots of the subsets of missing nodes from remote
1143 1142
1144 1143 If base dict is specified, assume that these nodes and their parents
1145 1144 exist on the remote side and that no child of a node of base exists
1146 1145 in both remote and self.
1147 1146 Furthermore base will be updated to include the nodes that exists
1148 1147 in self and remote but no children exists in self and remote.
1149 1148 If a list of heads is specified, return only nodes which are heads
1150 1149 or ancestors of these heads.
1151 1150
1152 1151 All the ancestors of base are in self and in remote.
1153 1152 All the descendants of the list returned are missing in self.
1154 1153 (and so we know that the rest of the nodes are missing in remote, see
1155 1154 outgoing)
1156 1155 """
1157 1156 m = self.changelog.nodemap
1158 1157 search = []
1159 1158 fetch = {}
1160 1159 seen = {}
1161 1160 seenbranch = {}
1162 1161 if base == None:
1163 1162 base = {}
1164 1163
1165 1164 if not heads:
1166 1165 heads = remote.heads()
1167 1166
1168 1167 if self.changelog.tip() == nullid:
1169 1168 base[nullid] = 1
1170 1169 if heads != [nullid]:
1171 1170 return [nullid]
1172 1171 return []
1173 1172
1174 1173 # assume we're closer to the tip than the root
1175 1174 # and start by examining the heads
1176 1175 self.ui.status(_("searching for changes\n"))
1177 1176
1178 1177 unknown = []
1179 1178 for h in heads:
1180 1179 if h not in m:
1181 1180 unknown.append(h)
1182 1181 else:
1183 1182 base[h] = 1
1184 1183
1185 1184 if not unknown:
1186 1185 return []
1187 1186
1188 1187 req = dict.fromkeys(unknown)
1189 1188 reqcnt = 0
1190 1189
1191 1190 # search through remote branches
1192 1191 # a 'branch' here is a linear segment of history, with four parts:
1193 1192 # head, root, first parent, second parent
1194 1193 # (a branch always has two parents (or none) by definition)
1195 1194 unknown = remote.branches(unknown)
1196 1195 while unknown:
1197 1196 r = []
1198 1197 while unknown:
1199 1198 n = unknown.pop(0)
1200 1199 if n[0] in seen:
1201 1200 continue
1202 1201
1203 1202 self.ui.debug(_("examining %s:%s\n")
1204 1203 % (short(n[0]), short(n[1])))
1205 1204 if n[0] == nullid: # found the end of the branch
1206 1205 pass
1207 1206 elif n in seenbranch:
1208 1207 self.ui.debug(_("branch already found\n"))
1209 1208 continue
1210 1209 elif n[1] and n[1] in m: # do we know the base?
1211 1210 self.ui.debug(_("found incomplete branch %s:%s\n")
1212 1211 % (short(n[0]), short(n[1])))
1213 1212 search.append(n) # schedule branch range for scanning
1214 1213 seenbranch[n] = 1
1215 1214 else:
1216 1215 if n[1] not in seen and n[1] not in fetch:
1217 1216 if n[2] in m and n[3] in m:
1218 1217 self.ui.debug(_("found new changeset %s\n") %
1219 1218 short(n[1]))
1220 1219 fetch[n[1]] = 1 # earliest unknown
1221 1220 for p in n[2:4]:
1222 1221 if p in m:
1223 1222 base[p] = 1 # latest known
1224 1223
1225 1224 for p in n[2:4]:
1226 1225 if p not in req and p not in m:
1227 1226 r.append(p)
1228 1227 req[p] = 1
1229 1228 seen[n[0]] = 1
1230 1229
1231 1230 if r:
1232 1231 reqcnt += 1
1233 1232 self.ui.debug(_("request %d: %s\n") %
1234 1233 (reqcnt, " ".join(map(short, r))))
1235 1234 for p in xrange(0, len(r), 10):
1236 1235 for b in remote.branches(r[p:p+10]):
1237 1236 self.ui.debug(_("received %s:%s\n") %
1238 1237 (short(b[0]), short(b[1])))
1239 1238 unknown.append(b)
1240 1239
1241 1240 # do binary search on the branches we found
1242 1241 while search:
1243 1242 n = search.pop(0)
1244 1243 reqcnt += 1
1245 1244 l = remote.between([(n[0], n[1])])[0]
1246 1245 l.append(n[1])
1247 1246 p = n[0]
1248 1247 f = 1
1249 1248 for i in l:
1250 1249 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1251 1250 if i in m:
1252 1251 if f <= 2:
1253 1252 self.ui.debug(_("found new branch changeset %s\n") %
1254 1253 short(p))
1255 1254 fetch[p] = 1
1256 1255 base[i] = 1
1257 1256 else:
1258 1257 self.ui.debug(_("narrowed branch search to %s:%s\n")
1259 1258 % (short(p), short(i)))
1260 1259 search.append((p, i))
1261 1260 break
1262 1261 p, f = i, f * 2
1263 1262
1264 1263 # sanity check our fetch list
1265 1264 for f in fetch.keys():
1266 1265 if f in m:
1267 1266 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1268 1267
1269 1268 if base.keys() == [nullid]:
1270 1269 if force:
1271 1270 self.ui.warn(_("warning: repository is unrelated\n"))
1272 1271 else:
1273 1272 raise util.Abort(_("repository is unrelated"))
1274 1273
1275 1274 self.ui.debug(_("found new changesets starting at ") +
1276 1275 " ".join([short(f) for f in fetch]) + "\n")
1277 1276
1278 1277 self.ui.debug(_("%d total queries\n") % reqcnt)
1279 1278
1280 1279 return fetch.keys()
1281 1280
1282 1281 def findoutgoing(self, remote, base=None, heads=None, force=False):
1283 1282 """Return list of nodes that are roots of subsets not in remote
1284 1283
1285 1284 If base dict is specified, assume that these nodes and their parents
1286 1285 exist on the remote side.
1287 1286 If a list of heads is specified, return only nodes which are heads
1288 1287 or ancestors of these heads, and return a second element which
1289 1288 contains all remote heads which get new children.
1290 1289 """
1291 1290 if base == None:
1292 1291 base = {}
1293 1292 self.findincoming(remote, base, heads, force=force)
1294 1293
1295 1294 self.ui.debug(_("common changesets up to ")
1296 1295 + " ".join(map(short, base.keys())) + "\n")
1297 1296
1298 1297 remain = dict.fromkeys(self.changelog.nodemap)
1299 1298
1300 1299 # prune everything remote has from the tree
1301 1300 del remain[nullid]
1302 1301 remove = base.keys()
1303 1302 while remove:
1304 1303 n = remove.pop(0)
1305 1304 if n in remain:
1306 1305 del remain[n]
1307 1306 for p in self.changelog.parents(n):
1308 1307 remove.append(p)
1309 1308
1310 1309 # find every node whose parents have been pruned
1311 1310 subset = []
1312 1311 # find every remote head that will get new children
1313 1312 updated_heads = {}
1314 1313 for n in remain:
1315 1314 p1, p2 = self.changelog.parents(n)
1316 1315 if p1 not in remain and p2 not in remain:
1317 1316 subset.append(n)
1318 1317 if heads:
1319 1318 if p1 in heads:
1320 1319 updated_heads[p1] = True
1321 1320 if p2 in heads:
1322 1321 updated_heads[p2] = True
1323 1322
1324 1323 # this is the set of all roots we have to push
1325 1324 if heads:
1326 1325 return subset, updated_heads.keys()
1327 1326 else:
1328 1327 return subset
1329 1328
1330 1329 def pull(self, remote, heads=None, force=False, lock=None):
1331 1330 mylock = False
1332 1331 if not lock:
1333 1332 lock = self.lock()
1334 1333 mylock = True
1335 1334
1336 1335 try:
1337 1336 fetch = self.findincoming(remote, force=force)
1338 1337 if fetch == [nullid]:
1339 1338 self.ui.status(_("requesting all changes\n"))
1340 1339
1341 1340 if not fetch:
1342 1341 self.ui.status(_("no changes found\n"))
1343 1342 return 0
1344 1343
1345 1344 if heads is None:
1346 1345 cg = remote.changegroup(fetch, 'pull')
1347 1346 else:
1348 1347 if 'changegroupsubset' not in remote.capabilities:
1349 1348 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1350 1349 cg = remote.changegroupsubset(fetch, heads, 'pull')
1351 1350 return self.addchangegroup(cg, 'pull', remote.url())
1352 1351 finally:
1353 1352 if mylock:
1354 1353 lock.release()
1355 1354
1356 1355 def push(self, remote, force=False, revs=None):
1357 1356 # there are two ways to push to remote repo:
1358 1357 #
1359 1358 # addchangegroup assumes local user can lock remote
1360 1359 # repo (local filesystem, old ssh servers).
1361 1360 #
1362 1361 # unbundle assumes local user cannot lock remote repo (new ssh
1363 1362 # servers, http servers).
1364 1363
1365 1364 if remote.capable('unbundle'):
1366 1365 return self.push_unbundle(remote, force, revs)
1367 1366 return self.push_addchangegroup(remote, force, revs)
1368 1367
1369 1368 def prepush(self, remote, force, revs):
1370 1369 base = {}
1371 1370 remote_heads = remote.heads()
1372 1371 inc = self.findincoming(remote, base, remote_heads, force=force)
1373 1372
1374 1373 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1375 1374 if revs is not None:
1376 1375 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1377 1376 else:
1378 1377 bases, heads = update, self.changelog.heads()
1379 1378
1380 1379 if not bases:
1381 1380 self.ui.status(_("no changes found\n"))
1382 1381 return None, 1
1383 1382 elif not force:
1384 1383 # check if we're creating new remote heads
1385 1384 # to be a remote head after push, node must be either
1386 1385 # - unknown locally
1387 1386 # - a local outgoing head descended from update
1388 1387 # - a remote head that's known locally and not
1389 1388 # ancestral to an outgoing head
1390 1389
1391 1390 warn = 0
1392 1391
1393 1392 if remote_heads == [nullid]:
1394 1393 warn = 0
1395 1394 elif not revs and len(heads) > len(remote_heads):
1396 1395 warn = 1
1397 1396 else:
1398 1397 newheads = list(heads)
1399 1398 for r in remote_heads:
1400 1399 if r in self.changelog.nodemap:
1401 1400 desc = self.changelog.heads(r, heads)
1402 1401 l = [h for h in heads if h in desc]
1403 1402 if not l:
1404 1403 newheads.append(r)
1405 1404 else:
1406 1405 newheads.append(r)
1407 1406 if len(newheads) > len(remote_heads):
1408 1407 warn = 1
1409 1408
1410 1409 if warn:
1411 1410 self.ui.warn(_("abort: push creates new remote branches!\n"))
1412 1411 self.ui.status(_("(did you forget to merge?"
1413 1412 " use push -f to force)\n"))
1414 1413 return None, 1
1415 1414 elif inc:
1416 1415 self.ui.warn(_("note: unsynced remote changes!\n"))
1417 1416
1418 1417
1419 1418 if revs is None:
1420 1419 cg = self.changegroup(update, 'push')
1421 1420 else:
1422 1421 cg = self.changegroupsubset(update, revs, 'push')
1423 1422 return cg, remote_heads
1424 1423
1425 1424 def push_addchangegroup(self, remote, force, revs):
1426 1425 lock = remote.lock()
1427 1426
1428 1427 ret = self.prepush(remote, force, revs)
1429 1428 if ret[0] is not None:
1430 1429 cg, remote_heads = ret
1431 1430 return remote.addchangegroup(cg, 'push', self.url())
1432 1431 return ret[1]
1433 1432
1434 1433 def push_unbundle(self, remote, force, revs):
1435 1434 # local repo finds heads on server, finds out what revs it
1436 1435 # must push. once revs transferred, if server finds it has
1437 1436 # different heads (someone else won commit/push race), server
1438 1437 # aborts.
1439 1438
1440 1439 ret = self.prepush(remote, force, revs)
1441 1440 if ret[0] is not None:
1442 1441 cg, remote_heads = ret
1443 1442 if force: remote_heads = ['force']
1444 1443 return remote.unbundle(cg, remote_heads, 'push')
1445 1444 return ret[1]
1446 1445
1447 1446 def changegroupinfo(self, nodes):
1448 1447 self.ui.note(_("%d changesets found\n") % len(nodes))
1449 1448 if self.ui.debugflag:
1450 1449 self.ui.debug(_("List of changesets:\n"))
1451 1450 for node in nodes:
1452 1451 self.ui.debug("%s\n" % hex(node))
1453 1452
1454 1453 def changegroupsubset(self, bases, heads, source):
1455 1454 """This function generates a changegroup consisting of all the nodes
1456 1455 that are descendents of any of the bases, and ancestors of any of
1457 1456 the heads.
1458 1457
1459 1458 It is fairly complex as determining which filenodes and which
1460 1459 manifest nodes need to be included for the changeset to be complete
1461 1460 is non-trivial.
1462 1461
1463 1462 Another wrinkle is doing the reverse, figuring out which changeset in
1464 1463 the changegroup a particular filenode or manifestnode belongs to."""
1465 1464
1466 1465 self.hook('preoutgoing', throw=True, source=source)
1467 1466
1468 1467 # Set up some initial variables
1469 1468 # Make it easy to refer to self.changelog
1470 1469 cl = self.changelog
1471 1470 # msng is short for missing - compute the list of changesets in this
1472 1471 # changegroup.
1473 1472 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1474 1473 self.changegroupinfo(msng_cl_lst)
1475 1474 # Some bases may turn out to be superfluous, and some heads may be
1476 1475 # too. nodesbetween will return the minimal set of bases and heads
1477 1476 # necessary to re-create the changegroup.
1478 1477
1479 1478 # Known heads are the list of heads that it is assumed the recipient
1480 1479 # of this changegroup will know about.
1481 1480 knownheads = {}
1482 1481 # We assume that all parents of bases are known heads.
1483 1482 for n in bases:
1484 1483 for p in cl.parents(n):
1485 1484 if p != nullid:
1486 1485 knownheads[p] = 1
1487 1486 knownheads = knownheads.keys()
1488 1487 if knownheads:
1489 1488 # Now that we know what heads are known, we can compute which
1490 1489 # changesets are known. The recipient must know about all
1491 1490 # changesets required to reach the known heads from the null
1492 1491 # changeset.
1493 1492 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1494 1493 junk = None
1495 1494 # Transform the list into an ersatz set.
1496 1495 has_cl_set = dict.fromkeys(has_cl_set)
1497 1496 else:
1498 1497 # If there were no known heads, the recipient cannot be assumed to
1499 1498 # know about any changesets.
1500 1499 has_cl_set = {}
1501 1500
1502 1501 # Make it easy to refer to self.manifest
1503 1502 mnfst = self.manifest
1504 1503 # We don't know which manifests are missing yet
1505 1504 msng_mnfst_set = {}
1506 1505 # Nor do we know which filenodes are missing.
1507 1506 msng_filenode_set = {}
1508 1507
1509 1508 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1510 1509 junk = None
1511 1510
1512 1511 # A changeset always belongs to itself, so the changenode lookup
1513 1512 # function for a changenode is identity.
1514 1513 def identity(x):
1515 1514 return x
1516 1515
1517 1516 # A function generating function. Sets up an environment for the
1518 1517 # inner function.
1519 1518 def cmp_by_rev_func(revlog):
1520 1519 # Compare two nodes by their revision number in the environment's
1521 1520 # revision history. Since the revision number both represents the
1522 1521 # most efficient order to read the nodes in, and represents a
1523 1522 # topological sorting of the nodes, this function is often useful.
1524 1523 def cmp_by_rev(a, b):
1525 1524 return cmp(revlog.rev(a), revlog.rev(b))
1526 1525 return cmp_by_rev
1527 1526
1528 1527 # If we determine that a particular file or manifest node must be a
1529 1528 # node that the recipient of the changegroup will already have, we can
1530 1529 # also assume the recipient will have all the parents. This function
1531 1530 # prunes them from the set of missing nodes.
1532 1531 def prune_parents(revlog, hasset, msngset):
1533 1532 haslst = hasset.keys()
1534 1533 haslst.sort(cmp_by_rev_func(revlog))
1535 1534 for node in haslst:
1536 1535 parentlst = [p for p in revlog.parents(node) if p != nullid]
1537 1536 while parentlst:
1538 1537 n = parentlst.pop()
1539 1538 if n not in hasset:
1540 1539 hasset[n] = 1
1541 1540 p = [p for p in revlog.parents(n) if p != nullid]
1542 1541 parentlst.extend(p)
1543 1542 for n in hasset:
1544 1543 msngset.pop(n, None)
1545 1544
1546 1545 # This is a function generating function used to set up an environment
1547 1546 # for the inner function to execute in.
1548 1547 def manifest_and_file_collector(changedfileset):
1549 1548 # This is an information gathering function that gathers
1550 1549 # information from each changeset node that goes out as part of
1551 1550 # the changegroup. The information gathered is a list of which
1552 1551 # manifest nodes are potentially required (the recipient may
1553 1552 # already have them) and total list of all files which were
1554 1553 # changed in any changeset in the changegroup.
1555 1554 #
1556 1555 # We also remember the first changenode we saw any manifest
1557 1556 # referenced by so we can later determine which changenode 'owns'
1558 1557 # the manifest.
1559 1558 def collect_manifests_and_files(clnode):
1560 1559 c = cl.read(clnode)
1561 1560 for f in c[3]:
1562 1561 # This is to make sure we only have one instance of each
1563 1562 # filename string for each filename.
1564 1563 changedfileset.setdefault(f, f)
1565 1564 msng_mnfst_set.setdefault(c[0], clnode)
1566 1565 return collect_manifests_and_files
1567 1566
1568 1567 # Figure out which manifest nodes (of the ones we think might be part
1569 1568 # of the changegroup) the recipient must know about and remove them
1570 1569 # from the changegroup.
1571 1570 def prune_manifests():
1572 1571 has_mnfst_set = {}
1573 1572 for n in msng_mnfst_set:
1574 1573 # If a 'missing' manifest thinks it belongs to a changenode
1575 1574 # the recipient is assumed to have, obviously the recipient
1576 1575 # must have that manifest.
1577 1576 linknode = cl.node(mnfst.linkrev(n))
1578 1577 if linknode in has_cl_set:
1579 1578 has_mnfst_set[n] = 1
1580 1579 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1581 1580
1582 1581 # Use the information collected in collect_manifests_and_files to say
1583 1582 # which changenode any manifestnode belongs to.
1584 1583 def lookup_manifest_link(mnfstnode):
1585 1584 return msng_mnfst_set[mnfstnode]
1586 1585
1587 1586 # A function generating function that sets up the initial environment
1588 1587 # the inner function.
1589 1588 def filenode_collector(changedfiles):
1590 1589 next_rev = [0]
1591 1590 # This gathers information from each manifestnode included in the
1592 1591 # changegroup about which filenodes the manifest node references
1593 1592 # so we can include those in the changegroup too.
1594 1593 #
1595 1594 # It also remembers which changenode each filenode belongs to. It
1596 1595 # does this by assuming the a filenode belongs to the changenode
1597 1596 # the first manifest that references it belongs to.
1598 1597 def collect_msng_filenodes(mnfstnode):
1599 1598 r = mnfst.rev(mnfstnode)
1600 1599 if r == next_rev[0]:
1601 1600 # If the last rev we looked at was the one just previous,
1602 1601 # we only need to see a diff.
1603 1602 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1604 1603 # For each line in the delta
1605 1604 for dline in delta.splitlines():
1606 1605 # get the filename and filenode for that line
1607 1606 f, fnode = dline.split('\0')
1608 1607 fnode = bin(fnode[:40])
1609 1608 f = changedfiles.get(f, None)
1610 1609 # And if the file is in the list of files we care
1611 1610 # about.
1612 1611 if f is not None:
1613 1612 # Get the changenode this manifest belongs to
1614 1613 clnode = msng_mnfst_set[mnfstnode]
1615 1614 # Create the set of filenodes for the file if
1616 1615 # there isn't one already.
1617 1616 ndset = msng_filenode_set.setdefault(f, {})
1618 1617 # And set the filenode's changelog node to the
1619 1618 # manifest's if it hasn't been set already.
1620 1619 ndset.setdefault(fnode, clnode)
1621 1620 else:
1622 1621 # Otherwise we need a full manifest.
1623 1622 m = mnfst.read(mnfstnode)
1624 1623 # For every file in we care about.
1625 1624 for f in changedfiles:
1626 1625 fnode = m.get(f, None)
1627 1626 # If it's in the manifest
1628 1627 if fnode is not None:
1629 1628 # See comments above.
1630 1629 clnode = msng_mnfst_set[mnfstnode]
1631 1630 ndset = msng_filenode_set.setdefault(f, {})
1632 1631 ndset.setdefault(fnode, clnode)
1633 1632 # Remember the revision we hope to see next.
1634 1633 next_rev[0] = r + 1
1635 1634 return collect_msng_filenodes
1636 1635
1637 1636 # We have a list of filenodes we think we need for a file, lets remove
1638 1637 # all those we now the recipient must have.
1639 1638 def prune_filenodes(f, filerevlog):
1640 1639 msngset = msng_filenode_set[f]
1641 1640 hasset = {}
1642 1641 # If a 'missing' filenode thinks it belongs to a changenode we
1643 1642 # assume the recipient must have, then the recipient must have
1644 1643 # that filenode.
1645 1644 for n in msngset:
1646 1645 clnode = cl.node(filerevlog.linkrev(n))
1647 1646 if clnode in has_cl_set:
1648 1647 hasset[n] = 1
1649 1648 prune_parents(filerevlog, hasset, msngset)
1650 1649
1651 1650 # A function generator function that sets up the a context for the
1652 1651 # inner function.
1653 1652 def lookup_filenode_link_func(fname):
1654 1653 msngset = msng_filenode_set[fname]
1655 1654 # Lookup the changenode the filenode belongs to.
1656 1655 def lookup_filenode_link(fnode):
1657 1656 return msngset[fnode]
1658 1657 return lookup_filenode_link
1659 1658
1660 1659 # Now that we have all theses utility functions to help out and
1661 1660 # logically divide up the task, generate the group.
1662 1661 def gengroup():
1663 1662 # The set of changed files starts empty.
1664 1663 changedfiles = {}
1665 1664 # Create a changenode group generator that will call our functions
1666 1665 # back to lookup the owning changenode and collect information.
1667 1666 group = cl.group(msng_cl_lst, identity,
1668 1667 manifest_and_file_collector(changedfiles))
1669 1668 for chnk in group:
1670 1669 yield chnk
1671 1670
1672 1671 # The list of manifests has been collected by the generator
1673 1672 # calling our functions back.
1674 1673 prune_manifests()
1675 1674 msng_mnfst_lst = msng_mnfst_set.keys()
1676 1675 # Sort the manifestnodes by revision number.
1677 1676 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1678 1677 # Create a generator for the manifestnodes that calls our lookup
1679 1678 # and data collection functions back.
1680 1679 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1681 1680 filenode_collector(changedfiles))
1682 1681 for chnk in group:
1683 1682 yield chnk
1684 1683
1685 1684 # These are no longer needed, dereference and toss the memory for
1686 1685 # them.
1687 1686 msng_mnfst_lst = None
1688 1687 msng_mnfst_set.clear()
1689 1688
1690 1689 changedfiles = changedfiles.keys()
1691 1690 changedfiles.sort()
1692 1691 # Go through all our files in order sorted by name.
1693 1692 for fname in changedfiles:
1694 1693 filerevlog = self.file(fname)
1695 1694 # Toss out the filenodes that the recipient isn't really
1696 1695 # missing.
1697 1696 if msng_filenode_set.has_key(fname):
1698 1697 prune_filenodes(fname, filerevlog)
1699 1698 msng_filenode_lst = msng_filenode_set[fname].keys()
1700 1699 else:
1701 1700 msng_filenode_lst = []
1702 1701 # If any filenodes are left, generate the group for them,
1703 1702 # otherwise don't bother.
1704 1703 if len(msng_filenode_lst) > 0:
1705 1704 yield changegroup.genchunk(fname)
1706 1705 # Sort the filenodes by their revision #
1707 1706 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1708 1707 # Create a group generator and only pass in a changenode
1709 1708 # lookup function as we need to collect no information
1710 1709 # from filenodes.
1711 1710 group = filerevlog.group(msng_filenode_lst,
1712 1711 lookup_filenode_link_func(fname))
1713 1712 for chnk in group:
1714 1713 yield chnk
1715 1714 if msng_filenode_set.has_key(fname):
1716 1715 # Don't need this anymore, toss it to free memory.
1717 1716 del msng_filenode_set[fname]
1718 1717 # Signal that no more groups are left.
1719 1718 yield changegroup.closechunk()
1720 1719
1721 1720 if msng_cl_lst:
1722 1721 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1723 1722
1724 1723 return util.chunkbuffer(gengroup())
1725 1724
1726 1725 def changegroup(self, basenodes, source):
1727 1726 """Generate a changegroup of all nodes that we have that a recipient
1728 1727 doesn't.
1729 1728
1730 1729 This is much easier than the previous function as we can assume that
1731 1730 the recipient has any changenode we aren't sending them."""
1732 1731
1733 1732 self.hook('preoutgoing', throw=True, source=source)
1734 1733
1735 1734 cl = self.changelog
1736 1735 nodes = cl.nodesbetween(basenodes, None)[0]
1737 1736 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1738 1737 self.changegroupinfo(nodes)
1739 1738
1740 1739 def identity(x):
1741 1740 return x
1742 1741
1743 1742 def gennodelst(revlog):
1744 1743 for r in xrange(0, revlog.count()):
1745 1744 n = revlog.node(r)
1746 1745 if revlog.linkrev(n) in revset:
1747 1746 yield n
1748 1747
1749 1748 def changed_file_collector(changedfileset):
1750 1749 def collect_changed_files(clnode):
1751 1750 c = cl.read(clnode)
1752 1751 for fname in c[3]:
1753 1752 changedfileset[fname] = 1
1754 1753 return collect_changed_files
1755 1754
1756 1755 def lookuprevlink_func(revlog):
1757 1756 def lookuprevlink(n):
1758 1757 return cl.node(revlog.linkrev(n))
1759 1758 return lookuprevlink
1760 1759
1761 1760 def gengroup():
1762 1761 # construct a list of all changed files
1763 1762 changedfiles = {}
1764 1763
1765 1764 for chnk in cl.group(nodes, identity,
1766 1765 changed_file_collector(changedfiles)):
1767 1766 yield chnk
1768 1767 changedfiles = changedfiles.keys()
1769 1768 changedfiles.sort()
1770 1769
1771 1770 mnfst = self.manifest
1772 1771 nodeiter = gennodelst(mnfst)
1773 1772 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1774 1773 yield chnk
1775 1774
1776 1775 for fname in changedfiles:
1777 1776 filerevlog = self.file(fname)
1778 1777 nodeiter = gennodelst(filerevlog)
1779 1778 nodeiter = list(nodeiter)
1780 1779 if nodeiter:
1781 1780 yield changegroup.genchunk(fname)
1782 1781 lookup = lookuprevlink_func(filerevlog)
1783 1782 for chnk in filerevlog.group(nodeiter, lookup):
1784 1783 yield chnk
1785 1784
1786 1785 yield changegroup.closechunk()
1787 1786
1788 1787 if nodes:
1789 1788 self.hook('outgoing', node=hex(nodes[0]), source=source)
1790 1789
1791 1790 return util.chunkbuffer(gengroup())
1792 1791
1793 1792 def addchangegroup(self, source, srctype, url):
1794 1793 """add changegroup to repo.
1795 1794
1796 1795 return values:
1797 1796 - nothing changed or no source: 0
1798 1797 - more heads than before: 1+added heads (2..n)
1799 1798 - less heads than before: -1-removed heads (-2..-n)
1800 1799 - number of heads stays the same: 1
1801 1800 """
1802 1801 def csmap(x):
1803 1802 self.ui.debug(_("add changeset %s\n") % short(x))
1804 1803 return cl.count()
1805 1804
1806 1805 def revmap(x):
1807 1806 return cl.rev(x)
1808 1807
1809 1808 if not source:
1810 1809 return 0
1811 1810
1812 1811 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1813 1812
1814 1813 changesets = files = revisions = 0
1815 1814
1816 1815 tr = self.transaction()
1817 1816
1818 1817 # write changelog data to temp files so concurrent readers will not see
1819 1818 # inconsistent view
1820 1819 cl = self.changelog
1821 1820 cl.delayupdate()
1822 1821 oldheads = len(cl.heads())
1823 1822
1824 1823 # pull off the changeset group
1825 1824 self.ui.status(_("adding changesets\n"))
1826 1825 cor = cl.count() - 1
1827 1826 chunkiter = changegroup.chunkiter(source)
1828 1827 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1829 1828 raise util.Abort(_("received changelog group is empty"))
1830 1829 cnr = cl.count() - 1
1831 1830 changesets = cnr - cor
1832 1831
1833 1832 # pull off the manifest group
1834 1833 self.ui.status(_("adding manifests\n"))
1835 1834 chunkiter = changegroup.chunkiter(source)
1836 1835 # no need to check for empty manifest group here:
1837 1836 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1838 1837 # no new manifest will be created and the manifest group will
1839 1838 # be empty during the pull
1840 1839 self.manifest.addgroup(chunkiter, revmap, tr)
1841 1840
1842 1841 # process the files
1843 1842 self.ui.status(_("adding file changes\n"))
1844 1843 while 1:
1845 1844 f = changegroup.getchunk(source)
1846 1845 if not f:
1847 1846 break
1848 1847 self.ui.debug(_("adding %s revisions\n") % f)
1849 1848 fl = self.file(f)
1850 1849 o = fl.count()
1851 1850 chunkiter = changegroup.chunkiter(source)
1852 1851 if fl.addgroup(chunkiter, revmap, tr) is None:
1853 1852 raise util.Abort(_("received file revlog group is empty"))
1854 1853 revisions += fl.count() - o
1855 1854 files += 1
1856 1855
1857 1856 # make changelog see real files again
1858 1857 cl.finalize(tr)
1859 1858
1860 1859 newheads = len(self.changelog.heads())
1861 1860 heads = ""
1862 1861 if oldheads and newheads != oldheads:
1863 1862 heads = _(" (%+d heads)") % (newheads - oldheads)
1864 1863
1865 1864 self.ui.status(_("added %d changesets"
1866 1865 " with %d changes to %d files%s\n")
1867 1866 % (changesets, revisions, files, heads))
1868 1867
1869 1868 if changesets > 0:
1870 1869 self.hook('pretxnchangegroup', throw=True,
1871 1870 node=hex(self.changelog.node(cor+1)), source=srctype,
1872 1871 url=url)
1873 1872
1874 1873 tr.close()
1875 1874
1876 1875 if changesets > 0:
1877 1876 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1878 1877 source=srctype, url=url)
1879 1878
1880 1879 for i in xrange(cor + 1, cnr + 1):
1881 1880 self.hook("incoming", node=hex(self.changelog.node(i)),
1882 1881 source=srctype, url=url)
1883 1882
1884 1883 # never return 0 here:
1885 1884 if newheads < oldheads:
1886 1885 return newheads - oldheads - 1
1887 1886 else:
1888 1887 return newheads - oldheads + 1
1889 1888
1890 1889
1891 1890 def stream_in(self, remote):
1892 1891 fp = remote.stream_out()
1893 1892 l = fp.readline()
1894 1893 try:
1895 1894 resp = int(l)
1896 1895 except ValueError:
1897 1896 raise util.UnexpectedOutput(
1898 1897 _('Unexpected response from remote server:'), l)
1899 1898 if resp == 1:
1900 1899 raise util.Abort(_('operation forbidden by server'))
1901 1900 elif resp == 2:
1902 1901 raise util.Abort(_('locking the remote repository failed'))
1903 1902 elif resp != 0:
1904 1903 raise util.Abort(_('the server sent an unknown error code'))
1905 1904 self.ui.status(_('streaming all changes\n'))
1906 1905 l = fp.readline()
1907 1906 try:
1908 1907 total_files, total_bytes = map(int, l.split(' ', 1))
1909 1908 except ValueError, TypeError:
1910 1909 raise util.UnexpectedOutput(
1911 1910 _('Unexpected response from remote server:'), l)
1912 1911 self.ui.status(_('%d files to transfer, %s of data\n') %
1913 1912 (total_files, util.bytecount(total_bytes)))
1914 1913 start = time.time()
1915 1914 for i in xrange(total_files):
1916 1915 # XXX doesn't support '\n' or '\r' in filenames
1917 1916 l = fp.readline()
1918 1917 try:
1919 1918 name, size = l.split('\0', 1)
1920 1919 size = int(size)
1921 1920 except ValueError, TypeError:
1922 1921 raise util.UnexpectedOutput(
1923 1922 _('Unexpected response from remote server:'), l)
1924 1923 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1925 1924 ofp = self.sopener(name, 'w')
1926 1925 for chunk in util.filechunkiter(fp, limit=size):
1927 1926 ofp.write(chunk)
1928 1927 ofp.close()
1929 1928 elapsed = time.time() - start
1930 1929 if elapsed <= 0:
1931 1930 elapsed = 0.001
1932 1931 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1933 1932 (util.bytecount(total_bytes), elapsed,
1934 1933 util.bytecount(total_bytes / elapsed)))
1935 self.reload()
1934 self.invalidate()
1936 1935 return len(self.heads()) + 1
1937 1936
1938 1937 def clone(self, remote, heads=[], stream=False):
1939 1938 '''clone remote repository.
1940 1939
1941 1940 keyword arguments:
1942 1941 heads: list of revs to clone (forces use of pull)
1943 1942 stream: use streaming clone if possible'''
1944 1943
1945 1944 # now, all clients that can request uncompressed clones can
1946 1945 # read repo formats supported by all servers that can serve
1947 1946 # them.
1948 1947
1949 1948 # if revlog format changes, client will have to check version
1950 1949 # and format flags on "stream" capability, and use
1951 1950 # uncompressed only if compatible.
1952 1951
1953 1952 if stream and not heads and remote.capable('stream'):
1954 1953 return self.stream_in(remote)
1955 1954 return self.pull(remote, heads)
1956 1955
1957 1956 # used to avoid circular references so destructors work
1958 1957 def aftertrans(files):
1959 1958 renamefiles = [tuple(t) for t in files]
1960 1959 def a():
1961 1960 for src, dest in renamefiles:
1962 1961 util.rename(src, dest)
1963 1962 return a
1964 1963
1965 1964 def instance(ui, path, create):
1966 1965 return localrepository(ui, util.drop_scheme('file', path), create)
1967 1966
1968 1967 def islocal(path):
1969 1968 return True
General Comments 0
You need to be logged in to leave comments. Login now