##// END OF EJS Templates
atomictempfile: make close() consistent with other file-like objects....
Greg Ward -
r15057:774da712 default
parent child Browse files
Show More
@@ -1,3292 +1,3292 b''
1 1 # mq.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''manage a stack of patches
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use :hg:`help command` for more details)::
18 18
19 19 create new patch qnew
20 20 import existing patch qimport
21 21
22 22 print patch series qseries
23 23 print applied patches qapplied
24 24
25 25 add known patch to applied stack qpush
26 26 remove patch from applied stack qpop
27 27 refresh contents of top applied patch qrefresh
28 28
29 29 By default, mq will automatically use git patches when required to
30 30 avoid losing file mode changes, copy records, binary files or empty
31 31 files creations or deletions. This behaviour can be configured with::
32 32
33 33 [mq]
34 34 git = auto/keep/yes/no
35 35
36 36 If set to 'keep', mq will obey the [diff] section configuration while
37 37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 38 'no', mq will override the [diff] section and always generate git or
39 39 regular patches, possibly losing data in the second case.
40 40
41 41 You will by default be managing a patch queue named "patches". You can
42 42 create other, independent patch queues with the :hg:`qqueue` command.
43 43 '''
44 44
45 45 from mercurial.i18n import _
46 46 from mercurial.node import bin, hex, short, nullid, nullrev
47 47 from mercurial.lock import release
48 48 from mercurial import commands, cmdutil, hg, scmutil, util, revset
49 49 from mercurial import repair, extensions, url, error
50 50 from mercurial import patch as patchmod
51 51 import os, re, errno, shutil
52 52
53 53 commands.norepo += " qclone"
54 54
55 55 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
56 56
57 57 cmdtable = {}
58 58 command = cmdutil.command(cmdtable)
59 59
60 60 # Patch names looks like unix-file names.
61 61 # They must be joinable with queue directory and result in the patch path.
62 62 normname = util.normpath
63 63
64 64 class statusentry(object):
65 65 def __init__(self, node, name):
66 66 self.node, self.name = node, name
67 67 def __repr__(self):
68 68 return hex(self.node) + ':' + self.name
69 69
70 70 class patchheader(object):
71 71 def __init__(self, pf, plainmode=False):
72 72 def eatdiff(lines):
73 73 while lines:
74 74 l = lines[-1]
75 75 if (l.startswith("diff -") or
76 76 l.startswith("Index:") or
77 77 l.startswith("===========")):
78 78 del lines[-1]
79 79 else:
80 80 break
81 81 def eatempty(lines):
82 82 while lines:
83 83 if not lines[-1].strip():
84 84 del lines[-1]
85 85 else:
86 86 break
87 87
88 88 message = []
89 89 comments = []
90 90 user = None
91 91 date = None
92 92 parent = None
93 93 format = None
94 94 subject = None
95 95 branch = None
96 96 nodeid = None
97 97 diffstart = 0
98 98
99 99 for line in file(pf):
100 100 line = line.rstrip()
101 101 if (line.startswith('diff --git')
102 102 or (diffstart and line.startswith('+++ '))):
103 103 diffstart = 2
104 104 break
105 105 diffstart = 0 # reset
106 106 if line.startswith("--- "):
107 107 diffstart = 1
108 108 continue
109 109 elif format == "hgpatch":
110 110 # parse values when importing the result of an hg export
111 111 if line.startswith("# User "):
112 112 user = line[7:]
113 113 elif line.startswith("# Date "):
114 114 date = line[7:]
115 115 elif line.startswith("# Parent "):
116 116 parent = line[9:].lstrip()
117 117 elif line.startswith("# Branch "):
118 118 branch = line[9:]
119 119 elif line.startswith("# Node ID "):
120 120 nodeid = line[10:]
121 121 elif not line.startswith("# ") and line:
122 122 message.append(line)
123 123 format = None
124 124 elif line == '# HG changeset patch':
125 125 message = []
126 126 format = "hgpatch"
127 127 elif (format != "tagdone" and (line.startswith("Subject: ") or
128 128 line.startswith("subject: "))):
129 129 subject = line[9:]
130 130 format = "tag"
131 131 elif (format != "tagdone" and (line.startswith("From: ") or
132 132 line.startswith("from: "))):
133 133 user = line[6:]
134 134 format = "tag"
135 135 elif (format != "tagdone" and (line.startswith("Date: ") or
136 136 line.startswith("date: "))):
137 137 date = line[6:]
138 138 format = "tag"
139 139 elif format == "tag" and line == "":
140 140 # when looking for tags (subject: from: etc) they
141 141 # end once you find a blank line in the source
142 142 format = "tagdone"
143 143 elif message or line:
144 144 message.append(line)
145 145 comments.append(line)
146 146
147 147 eatdiff(message)
148 148 eatdiff(comments)
149 149 # Remember the exact starting line of the patch diffs before consuming
150 150 # empty lines, for external use by TortoiseHg and others
151 151 self.diffstartline = len(comments)
152 152 eatempty(message)
153 153 eatempty(comments)
154 154
155 155 # make sure message isn't empty
156 156 if format and format.startswith("tag") and subject:
157 157 message.insert(0, "")
158 158 message.insert(0, subject)
159 159
160 160 self.message = message
161 161 self.comments = comments
162 162 self.user = user
163 163 self.date = date
164 164 self.parent = parent
165 165 # nodeid and branch are for external use by TortoiseHg and others
166 166 self.nodeid = nodeid
167 167 self.branch = branch
168 168 self.haspatch = diffstart > 1
169 169 self.plainmode = plainmode
170 170
171 171 def setuser(self, user):
172 172 if not self.updateheader(['From: ', '# User '], user):
173 173 try:
174 174 patchheaderat = self.comments.index('# HG changeset patch')
175 175 self.comments.insert(patchheaderat + 1, '# User ' + user)
176 176 except ValueError:
177 177 if self.plainmode or self._hasheader(['Date: ']):
178 178 self.comments = ['From: ' + user] + self.comments
179 179 else:
180 180 tmp = ['# HG changeset patch', '# User ' + user, '']
181 181 self.comments = tmp + self.comments
182 182 self.user = user
183 183
184 184 def setdate(self, date):
185 185 if not self.updateheader(['Date: ', '# Date '], date):
186 186 try:
187 187 patchheaderat = self.comments.index('# HG changeset patch')
188 188 self.comments.insert(patchheaderat + 1, '# Date ' + date)
189 189 except ValueError:
190 190 if self.plainmode or self._hasheader(['From: ']):
191 191 self.comments = ['Date: ' + date] + self.comments
192 192 else:
193 193 tmp = ['# HG changeset patch', '# Date ' + date, '']
194 194 self.comments = tmp + self.comments
195 195 self.date = date
196 196
197 197 def setparent(self, parent):
198 198 if not self.updateheader(['# Parent '], parent):
199 199 try:
200 200 patchheaderat = self.comments.index('# HG changeset patch')
201 201 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
202 202 except ValueError:
203 203 pass
204 204 self.parent = parent
205 205
206 206 def setmessage(self, message):
207 207 if self.comments:
208 208 self._delmsg()
209 209 self.message = [message]
210 210 self.comments += self.message
211 211
212 212 def updateheader(self, prefixes, new):
213 213 '''Update all references to a field in the patch header.
214 214 Return whether the field is present.'''
215 215 res = False
216 216 for prefix in prefixes:
217 217 for i in xrange(len(self.comments)):
218 218 if self.comments[i].startswith(prefix):
219 219 self.comments[i] = prefix + new
220 220 res = True
221 221 break
222 222 return res
223 223
224 224 def _hasheader(self, prefixes):
225 225 '''Check if a header starts with any of the given prefixes.'''
226 226 for prefix in prefixes:
227 227 for comment in self.comments:
228 228 if comment.startswith(prefix):
229 229 return True
230 230 return False
231 231
232 232 def __str__(self):
233 233 if not self.comments:
234 234 return ''
235 235 return '\n'.join(self.comments) + '\n\n'
236 236
237 237 def _delmsg(self):
238 238 '''Remove existing message, keeping the rest of the comments fields.
239 239 If comments contains 'subject: ', message will prepend
240 240 the field and a blank line.'''
241 241 if self.message:
242 242 subj = 'subject: ' + self.message[0].lower()
243 243 for i in xrange(len(self.comments)):
244 244 if subj == self.comments[i].lower():
245 245 del self.comments[i]
246 246 self.message = self.message[2:]
247 247 break
248 248 ci = 0
249 249 for mi in self.message:
250 250 while mi != self.comments[ci]:
251 251 ci += 1
252 252 del self.comments[ci]
253 253
254 254 class queue(object):
255 255 def __init__(self, ui, path, patchdir=None):
256 256 self.basepath = path
257 257 try:
258 258 fh = open(os.path.join(path, 'patches.queue'))
259 259 cur = fh.read().rstrip()
260 260 fh.close()
261 261 if not cur:
262 262 curpath = os.path.join(path, 'patches')
263 263 else:
264 264 curpath = os.path.join(path, 'patches-' + cur)
265 265 except IOError:
266 266 curpath = os.path.join(path, 'patches')
267 267 self.path = patchdir or curpath
268 268 self.opener = scmutil.opener(self.path)
269 269 self.ui = ui
270 270 self.applieddirty = 0
271 271 self.seriesdirty = 0
272 272 self.added = []
273 273 self.seriespath = "series"
274 274 self.statuspath = "status"
275 275 self.guardspath = "guards"
276 276 self.activeguards = None
277 277 self.guardsdirty = False
278 278 # Handle mq.git as a bool with extended values
279 279 try:
280 280 gitmode = ui.configbool('mq', 'git', None)
281 281 if gitmode is None:
282 282 raise error.ConfigError()
283 283 self.gitmode = gitmode and 'yes' or 'no'
284 284 except error.ConfigError:
285 285 self.gitmode = ui.config('mq', 'git', 'auto').lower()
286 286 self.plainmode = ui.configbool('mq', 'plain', False)
287 287
288 288 @util.propertycache
289 289 def applied(self):
290 290 if os.path.exists(self.join(self.statuspath)):
291 291 def parselines(lines):
292 292 for l in lines:
293 293 entry = l.split(':', 1)
294 294 if len(entry) > 1:
295 295 n, name = entry
296 296 yield statusentry(bin(n), name)
297 297 elif l.strip():
298 298 self.ui.warn(_('malformated mq status line: %s\n') % entry)
299 299 # else we ignore empty lines
300 300 lines = self.opener.read(self.statuspath).splitlines()
301 301 return list(parselines(lines))
302 302 return []
303 303
304 304 @util.propertycache
305 305 def fullseries(self):
306 306 if os.path.exists(self.join(self.seriespath)):
307 307 return self.opener.read(self.seriespath).splitlines()
308 308 return []
309 309
310 310 @util.propertycache
311 311 def series(self):
312 312 self.parseseries()
313 313 return self.series
314 314
315 315 @util.propertycache
316 316 def seriesguards(self):
317 317 self.parseseries()
318 318 return self.seriesguards
319 319
320 320 def invalidate(self):
321 321 for a in 'applied fullseries series seriesguards'.split():
322 322 if a in self.__dict__:
323 323 delattr(self, a)
324 324 self.applieddirty = 0
325 325 self.seriesdirty = 0
326 326 self.guardsdirty = False
327 327 self.activeguards = None
328 328
329 329 def diffopts(self, opts={}, patchfn=None):
330 330 diffopts = patchmod.diffopts(self.ui, opts)
331 331 if self.gitmode == 'auto':
332 332 diffopts.upgrade = True
333 333 elif self.gitmode == 'keep':
334 334 pass
335 335 elif self.gitmode in ('yes', 'no'):
336 336 diffopts.git = self.gitmode == 'yes'
337 337 else:
338 338 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
339 339 ' got %s') % self.gitmode)
340 340 if patchfn:
341 341 diffopts = self.patchopts(diffopts, patchfn)
342 342 return diffopts
343 343
344 344 def patchopts(self, diffopts, *patches):
345 345 """Return a copy of input diff options with git set to true if
346 346 referenced patch is a git patch and should be preserved as such.
347 347 """
348 348 diffopts = diffopts.copy()
349 349 if not diffopts.git and self.gitmode == 'keep':
350 350 for patchfn in patches:
351 351 patchf = self.opener(patchfn, 'r')
352 352 # if the patch was a git patch, refresh it as a git patch
353 353 for line in patchf:
354 354 if line.startswith('diff --git'):
355 355 diffopts.git = True
356 356 break
357 357 patchf.close()
358 358 return diffopts
359 359
360 360 def join(self, *p):
361 361 return os.path.join(self.path, *p)
362 362
363 363 def findseries(self, patch):
364 364 def matchpatch(l):
365 365 l = l.split('#', 1)[0]
366 366 return l.strip() == patch
367 367 for index, l in enumerate(self.fullseries):
368 368 if matchpatch(l):
369 369 return index
370 370 return None
371 371
372 372 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
373 373
374 374 def parseseries(self):
375 375 self.series = []
376 376 self.seriesguards = []
377 377 for l in self.fullseries:
378 378 h = l.find('#')
379 379 if h == -1:
380 380 patch = l
381 381 comment = ''
382 382 elif h == 0:
383 383 continue
384 384 else:
385 385 patch = l[:h]
386 386 comment = l[h:]
387 387 patch = patch.strip()
388 388 if patch:
389 389 if patch in self.series:
390 390 raise util.Abort(_('%s appears more than once in %s') %
391 391 (patch, self.join(self.seriespath)))
392 392 self.series.append(patch)
393 393 self.seriesguards.append(self.guard_re.findall(comment))
394 394
395 395 def checkguard(self, guard):
396 396 if not guard:
397 397 return _('guard cannot be an empty string')
398 398 bad_chars = '# \t\r\n\f'
399 399 first = guard[0]
400 400 if first in '-+':
401 401 return (_('guard %r starts with invalid character: %r') %
402 402 (guard, first))
403 403 for c in bad_chars:
404 404 if c in guard:
405 405 return _('invalid character in guard %r: %r') % (guard, c)
406 406
407 407 def setactive(self, guards):
408 408 for guard in guards:
409 409 bad = self.checkguard(guard)
410 410 if bad:
411 411 raise util.Abort(bad)
412 412 guards = sorted(set(guards))
413 413 self.ui.debug('active guards: %s\n' % ' '.join(guards))
414 414 self.activeguards = guards
415 415 self.guardsdirty = True
416 416
417 417 def active(self):
418 418 if self.activeguards is None:
419 419 self.activeguards = []
420 420 try:
421 421 guards = self.opener.read(self.guardspath).split()
422 422 except IOError, err:
423 423 if err.errno != errno.ENOENT:
424 424 raise
425 425 guards = []
426 426 for i, guard in enumerate(guards):
427 427 bad = self.checkguard(guard)
428 428 if bad:
429 429 self.ui.warn('%s:%d: %s\n' %
430 430 (self.join(self.guardspath), i + 1, bad))
431 431 else:
432 432 self.activeguards.append(guard)
433 433 return self.activeguards
434 434
435 435 def setguards(self, idx, guards):
436 436 for g in guards:
437 437 if len(g) < 2:
438 438 raise util.Abort(_('guard %r too short') % g)
439 439 if g[0] not in '-+':
440 440 raise util.Abort(_('guard %r starts with invalid char') % g)
441 441 bad = self.checkguard(g[1:])
442 442 if bad:
443 443 raise util.Abort(bad)
444 444 drop = self.guard_re.sub('', self.fullseries[idx])
445 445 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
446 446 self.parseseries()
447 447 self.seriesdirty = True
448 448
449 449 def pushable(self, idx):
450 450 if isinstance(idx, str):
451 451 idx = self.series.index(idx)
452 452 patchguards = self.seriesguards[idx]
453 453 if not patchguards:
454 454 return True, None
455 455 guards = self.active()
456 456 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
457 457 if exactneg:
458 458 return False, repr(exactneg[0])
459 459 pos = [g for g in patchguards if g[0] == '+']
460 460 exactpos = [g for g in pos if g[1:] in guards]
461 461 if pos:
462 462 if exactpos:
463 463 return True, repr(exactpos[0])
464 464 return False, ' '.join(map(repr, pos))
465 465 return True, ''
466 466
467 467 def explainpushable(self, idx, all_patches=False):
468 468 write = all_patches and self.ui.write or self.ui.warn
469 469 if all_patches or self.ui.verbose:
470 470 if isinstance(idx, str):
471 471 idx = self.series.index(idx)
472 472 pushable, why = self.pushable(idx)
473 473 if all_patches and pushable:
474 474 if why is None:
475 475 write(_('allowing %s - no guards in effect\n') %
476 476 self.series[idx])
477 477 else:
478 478 if not why:
479 479 write(_('allowing %s - no matching negative guards\n') %
480 480 self.series[idx])
481 481 else:
482 482 write(_('allowing %s - guarded by %s\n') %
483 483 (self.series[idx], why))
484 484 if not pushable:
485 485 if why:
486 486 write(_('skipping %s - guarded by %s\n') %
487 487 (self.series[idx], why))
488 488 else:
489 489 write(_('skipping %s - no matching guards\n') %
490 490 self.series[idx])
491 491
492 492 def savedirty(self):
493 493 def writelist(items, path):
494 494 fp = self.opener(path, 'w')
495 495 for i in items:
496 496 fp.write("%s\n" % i)
497 497 fp.close()
498 498 if self.applieddirty:
499 499 writelist(map(str, self.applied), self.statuspath)
500 500 if self.seriesdirty:
501 501 writelist(self.fullseries, self.seriespath)
502 502 if self.guardsdirty:
503 503 writelist(self.activeguards, self.guardspath)
504 504 if self.added:
505 505 qrepo = self.qrepo()
506 506 if qrepo:
507 507 qrepo[None].add(f for f in self.added if f not in qrepo[None])
508 508 self.added = []
509 509
510 510 def removeundo(self, repo):
511 511 undo = repo.sjoin('undo')
512 512 if not os.path.exists(undo):
513 513 return
514 514 try:
515 515 os.unlink(undo)
516 516 except OSError, inst:
517 517 self.ui.warn(_('error removing undo: %s\n') % str(inst))
518 518
519 519 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
520 520 fp=None, changes=None, opts={}):
521 521 stat = opts.get('stat')
522 522 m = scmutil.match(repo[node1], files, opts)
523 523 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
524 524 changes, stat, fp)
525 525
526 526 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
527 527 # first try just applying the patch
528 528 (err, n) = self.apply(repo, [patch], update_status=False,
529 529 strict=True, merge=rev)
530 530
531 531 if err == 0:
532 532 return (err, n)
533 533
534 534 if n is None:
535 535 raise util.Abort(_("apply failed for patch %s") % patch)
536 536
537 537 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
538 538
539 539 # apply failed, strip away that rev and merge.
540 540 hg.clean(repo, head)
541 541 self.strip(repo, [n], update=False, backup='strip')
542 542
543 543 ctx = repo[rev]
544 544 ret = hg.merge(repo, rev)
545 545 if ret:
546 546 raise util.Abort(_("update returned %d") % ret)
547 547 n = repo.commit(ctx.description(), ctx.user(), force=True)
548 548 if n is None:
549 549 raise util.Abort(_("repo commit failed"))
550 550 try:
551 551 ph = patchheader(mergeq.join(patch), self.plainmode)
552 552 except:
553 553 raise util.Abort(_("unable to read %s") % patch)
554 554
555 555 diffopts = self.patchopts(diffopts, patch)
556 556 patchf = self.opener(patch, "w")
557 557 comments = str(ph)
558 558 if comments:
559 559 patchf.write(comments)
560 560 self.printdiff(repo, diffopts, head, n, fp=patchf)
561 561 patchf.close()
562 562 self.removeundo(repo)
563 563 return (0, n)
564 564
565 565 def qparents(self, repo, rev=None):
566 566 if rev is None:
567 567 (p1, p2) = repo.dirstate.parents()
568 568 if p2 == nullid:
569 569 return p1
570 570 if not self.applied:
571 571 return None
572 572 return self.applied[-1].node
573 573 p1, p2 = repo.changelog.parents(rev)
574 574 if p2 != nullid and p2 in [x.node for x in self.applied]:
575 575 return p2
576 576 return p1
577 577
578 578 def mergepatch(self, repo, mergeq, series, diffopts):
579 579 if not self.applied:
580 580 # each of the patches merged in will have two parents. This
581 581 # can confuse the qrefresh, qdiff, and strip code because it
582 582 # needs to know which parent is actually in the patch queue.
583 583 # so, we insert a merge marker with only one parent. This way
584 584 # the first patch in the queue is never a merge patch
585 585 #
586 586 pname = ".hg.patches.merge.marker"
587 587 n = repo.commit('[mq]: merge marker', force=True)
588 588 self.removeundo(repo)
589 589 self.applied.append(statusentry(n, pname))
590 590 self.applieddirty = 1
591 591
592 592 head = self.qparents(repo)
593 593
594 594 for patch in series:
595 595 patch = mergeq.lookup(patch, strict=True)
596 596 if not patch:
597 597 self.ui.warn(_("patch %s does not exist\n") % patch)
598 598 return (1, None)
599 599 pushable, reason = self.pushable(patch)
600 600 if not pushable:
601 601 self.explainpushable(patch, all_patches=True)
602 602 continue
603 603 info = mergeq.isapplied(patch)
604 604 if not info:
605 605 self.ui.warn(_("patch %s is not applied\n") % patch)
606 606 return (1, None)
607 607 rev = info[1]
608 608 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
609 609 if head:
610 610 self.applied.append(statusentry(head, patch))
611 611 self.applieddirty = 1
612 612 if err:
613 613 return (err, head)
614 614 self.savedirty()
615 615 return (0, head)
616 616
617 617 def patch(self, repo, patchfile):
618 618 '''Apply patchfile to the working directory.
619 619 patchfile: name of patch file'''
620 620 files = set()
621 621 try:
622 622 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
623 623 files=files, eolmode=None)
624 624 return (True, list(files), fuzz)
625 625 except Exception, inst:
626 626 self.ui.note(str(inst) + '\n')
627 627 if not self.ui.verbose:
628 628 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
629 629 return (False, list(files), False)
630 630
631 631 def apply(self, repo, series, list=False, update_status=True,
632 632 strict=False, patchdir=None, merge=None, all_files=None):
633 633 wlock = lock = tr = None
634 634 try:
635 635 wlock = repo.wlock()
636 636 lock = repo.lock()
637 637 tr = repo.transaction("qpush")
638 638 try:
639 639 ret = self._apply(repo, series, list, update_status,
640 640 strict, patchdir, merge, all_files=all_files)
641 641 tr.close()
642 642 self.savedirty()
643 643 return ret
644 644 except:
645 645 try:
646 646 tr.abort()
647 647 finally:
648 648 repo.invalidate()
649 649 repo.dirstate.invalidate()
650 650 raise
651 651 finally:
652 652 release(tr, lock, wlock)
653 653 self.removeundo(repo)
654 654
655 655 def _apply(self, repo, series, list=False, update_status=True,
656 656 strict=False, patchdir=None, merge=None, all_files=None):
657 657 '''returns (error, hash)
658 658 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
659 659 # TODO unify with commands.py
660 660 if not patchdir:
661 661 patchdir = self.path
662 662 err = 0
663 663 n = None
664 664 for patchname in series:
665 665 pushable, reason = self.pushable(patchname)
666 666 if not pushable:
667 667 self.explainpushable(patchname, all_patches=True)
668 668 continue
669 669 self.ui.status(_("applying %s\n") % patchname)
670 670 pf = os.path.join(patchdir, patchname)
671 671
672 672 try:
673 673 ph = patchheader(self.join(patchname), self.plainmode)
674 674 except IOError:
675 675 self.ui.warn(_("unable to read %s\n") % patchname)
676 676 err = 1
677 677 break
678 678
679 679 message = ph.message
680 680 if not message:
681 681 # The commit message should not be translated
682 682 message = "imported patch %s\n" % patchname
683 683 else:
684 684 if list:
685 685 # The commit message should not be translated
686 686 message.append("\nimported patch %s" % patchname)
687 687 message = '\n'.join(message)
688 688
689 689 if ph.haspatch:
690 690 (patcherr, files, fuzz) = self.patch(repo, pf)
691 691 if all_files is not None:
692 692 all_files.update(files)
693 693 patcherr = not patcherr
694 694 else:
695 695 self.ui.warn(_("patch %s is empty\n") % patchname)
696 696 patcherr, files, fuzz = 0, [], 0
697 697
698 698 if merge and files:
699 699 # Mark as removed/merged and update dirstate parent info
700 700 removed = []
701 701 merged = []
702 702 for f in files:
703 703 if os.path.lexists(repo.wjoin(f)):
704 704 merged.append(f)
705 705 else:
706 706 removed.append(f)
707 707 for f in removed:
708 708 repo.dirstate.remove(f)
709 709 for f in merged:
710 710 repo.dirstate.merge(f)
711 711 p1, p2 = repo.dirstate.parents()
712 712 repo.dirstate.setparents(p1, merge)
713 713
714 714 match = scmutil.matchfiles(repo, files or [])
715 715 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
716 716
717 717 if n is None:
718 718 raise util.Abort(_("repository commit failed"))
719 719
720 720 if update_status:
721 721 self.applied.append(statusentry(n, patchname))
722 722
723 723 if patcherr:
724 724 self.ui.warn(_("patch failed, rejects left in working dir\n"))
725 725 err = 2
726 726 break
727 727
728 728 if fuzz and strict:
729 729 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
730 730 err = 3
731 731 break
732 732 return (err, n)
733 733
734 734 def _cleanup(self, patches, numrevs, keep=False):
735 735 if not keep:
736 736 r = self.qrepo()
737 737 if r:
738 738 r[None].forget(patches)
739 739 for p in patches:
740 740 os.unlink(self.join(p))
741 741
742 742 if numrevs:
743 743 qfinished = self.applied[:numrevs]
744 744 del self.applied[:numrevs]
745 745 self.applieddirty = 1
746 746
747 747 unknown = []
748 748
749 749 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
750 750 reverse=True):
751 751 if i is not None:
752 752 del self.fullseries[i]
753 753 else:
754 754 unknown.append(p)
755 755
756 756 if unknown:
757 757 if numrevs:
758 758 rev = dict((entry.name, entry.node) for entry in qfinished)
759 759 for p in unknown:
760 760 msg = _('revision %s refers to unknown patches: %s\n')
761 761 self.ui.warn(msg % (short(rev[p]), p))
762 762 else:
763 763 msg = _('unknown patches: %s\n')
764 764 raise util.Abort(''.join(msg % p for p in unknown))
765 765
766 766 self.parseseries()
767 767 self.seriesdirty = 1
768 768
769 769 def _revpatches(self, repo, revs):
770 770 firstrev = repo[self.applied[0].node].rev()
771 771 patches = []
772 772 for i, rev in enumerate(revs):
773 773
774 774 if rev < firstrev:
775 775 raise util.Abort(_('revision %d is not managed') % rev)
776 776
777 777 ctx = repo[rev]
778 778 base = self.applied[i].node
779 779 if ctx.node() != base:
780 780 msg = _('cannot delete revision %d above applied patches')
781 781 raise util.Abort(msg % rev)
782 782
783 783 patch = self.applied[i].name
784 784 for fmt in ('[mq]: %s', 'imported patch %s'):
785 785 if ctx.description() == fmt % patch:
786 786 msg = _('patch %s finalized without changeset message\n')
787 787 repo.ui.status(msg % patch)
788 788 break
789 789
790 790 patches.append(patch)
791 791 return patches
792 792
793 793 def finish(self, repo, revs):
794 794 patches = self._revpatches(repo, sorted(revs))
795 795 self._cleanup(patches, len(patches))
796 796
797 797 def delete(self, repo, patches, opts):
798 798 if not patches and not opts.get('rev'):
799 799 raise util.Abort(_('qdelete requires at least one revision or '
800 800 'patch name'))
801 801
802 802 realpatches = []
803 803 for patch in patches:
804 804 patch = self.lookup(patch, strict=True)
805 805 info = self.isapplied(patch)
806 806 if info:
807 807 raise util.Abort(_("cannot delete applied patch %s") % patch)
808 808 if patch not in self.series:
809 809 raise util.Abort(_("patch %s not in series file") % patch)
810 810 if patch not in realpatches:
811 811 realpatches.append(patch)
812 812
813 813 numrevs = 0
814 814 if opts.get('rev'):
815 815 if not self.applied:
816 816 raise util.Abort(_('no patches applied'))
817 817 revs = scmutil.revrange(repo, opts.get('rev'))
818 818 if len(revs) > 1 and revs[0] > revs[1]:
819 819 revs.reverse()
820 820 revpatches = self._revpatches(repo, revs)
821 821 realpatches += revpatches
822 822 numrevs = len(revpatches)
823 823
824 824 self._cleanup(realpatches, numrevs, opts.get('keep'))
825 825
826 826 def checktoppatch(self, repo):
827 827 if self.applied:
828 828 top = self.applied[-1].node
829 829 patch = self.applied[-1].name
830 830 pp = repo.dirstate.parents()
831 831 if top not in pp:
832 832 raise util.Abort(_("working directory revision is not qtip"))
833 833 return top, patch
834 834 return None, None
835 835
836 836 def checksubstate(self, repo):
837 837 '''return list of subrepos at a different revision than substate.
838 838 Abort if any subrepos have uncommitted changes.'''
839 839 inclsubs = []
840 840 wctx = repo[None]
841 841 for s in wctx.substate:
842 842 if wctx.sub(s).dirty(True):
843 843 raise util.Abort(
844 844 _("uncommitted changes in subrepository %s") % s)
845 845 elif wctx.sub(s).dirty():
846 846 inclsubs.append(s)
847 847 return inclsubs
848 848
849 849 def localchangesfound(self, refresh=True):
850 850 if refresh:
851 851 raise util.Abort(_("local changes found, refresh first"))
852 852 else:
853 853 raise util.Abort(_("local changes found"))
854 854
855 855 def checklocalchanges(self, repo, force=False, refresh=True):
856 856 m, a, r, d = repo.status()[:4]
857 857 if (m or a or r or d) and not force:
858 858 self.localchangesfound(refresh)
859 859 return m, a, r, d
860 860
861 861 _reserved = ('series', 'status', 'guards', '.', '..')
862 862 def checkreservedname(self, name):
863 863 if name in self._reserved:
864 864 raise util.Abort(_('"%s" cannot be used as the name of a patch')
865 865 % name)
866 866 for prefix in ('.hg', '.mq'):
867 867 if name.startswith(prefix):
868 868 raise util.Abort(_('patch name cannot begin with "%s"')
869 869 % prefix)
870 870 for c in ('#', ':'):
871 871 if c in name:
872 872 raise util.Abort(_('"%s" cannot be used in the name of a patch')
873 873 % c)
874 874
875 875 def checkpatchname(self, name, force=False):
876 876 self.checkreservedname(name)
877 877 if not force and os.path.exists(self.join(name)):
878 878 if os.path.isdir(self.join(name)):
879 879 raise util.Abort(_('"%s" already exists as a directory')
880 880 % name)
881 881 else:
882 882 raise util.Abort(_('patch "%s" already exists') % name)
883 883
884 884 def new(self, repo, patchfn, *pats, **opts):
885 885 """options:
886 886 msg: a string or a no-argument function returning a string
887 887 """
888 888 msg = opts.get('msg')
889 889 user = opts.get('user')
890 890 date = opts.get('date')
891 891 if date:
892 892 date = util.parsedate(date)
893 893 diffopts = self.diffopts({'git': opts.get('git')})
894 894 if opts.get('checkname', True):
895 895 self.checkpatchname(patchfn)
896 896 inclsubs = self.checksubstate(repo)
897 897 if inclsubs:
898 898 inclsubs.append('.hgsubstate')
899 899 if opts.get('include') or opts.get('exclude') or pats:
900 900 if inclsubs:
901 901 pats = list(pats or []) + inclsubs
902 902 match = scmutil.match(repo[None], pats, opts)
903 903 # detect missing files in pats
904 904 def badfn(f, msg):
905 905 if f != '.hgsubstate': # .hgsubstate is auto-created
906 906 raise util.Abort('%s: %s' % (f, msg))
907 907 match.bad = badfn
908 908 m, a, r, d = repo.status(match=match)[:4]
909 909 else:
910 910 m, a, r, d = self.checklocalchanges(repo, force=True)
911 911 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
912 912 if len(repo[None].parents()) > 1:
913 913 raise util.Abort(_('cannot manage merge changesets'))
914 914 commitfiles = m + a + r
915 915 self.checktoppatch(repo)
916 916 insert = self.fullseriesend()
917 917 wlock = repo.wlock()
918 918 try:
919 919 try:
920 920 # if patch file write fails, abort early
921 921 p = self.opener(patchfn, "w")
922 922 except IOError, e:
923 923 raise util.Abort(_('cannot write patch "%s": %s')
924 924 % (patchfn, e.strerror))
925 925 try:
926 926 if self.plainmode:
927 927 if user:
928 928 p.write("From: " + user + "\n")
929 929 if not date:
930 930 p.write("\n")
931 931 if date:
932 932 p.write("Date: %d %d\n\n" % date)
933 933 else:
934 934 p.write("# HG changeset patch\n")
935 935 p.write("# Parent "
936 936 + hex(repo[None].p1().node()) + "\n")
937 937 if user:
938 938 p.write("# User " + user + "\n")
939 939 if date:
940 940 p.write("# Date %s %s\n\n" % date)
941 941 if util.safehasattr(msg, '__call__'):
942 942 msg = msg()
943 943 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
944 944 n = repo.commit(commitmsg, user, date, match=match, force=True)
945 945 if n is None:
946 946 raise util.Abort(_("repo commit failed"))
947 947 try:
948 948 self.fullseries[insert:insert] = [patchfn]
949 949 self.applied.append(statusentry(n, patchfn))
950 950 self.parseseries()
951 951 self.seriesdirty = 1
952 952 self.applieddirty = 1
953 953 if msg:
954 954 msg = msg + "\n\n"
955 955 p.write(msg)
956 956 if commitfiles:
957 957 parent = self.qparents(repo, n)
958 958 chunks = patchmod.diff(repo, node1=parent, node2=n,
959 959 match=match, opts=diffopts)
960 960 for chunk in chunks:
961 961 p.write(chunk)
962 962 p.close()
963 963 wlock.release()
964 964 wlock = None
965 965 r = self.qrepo()
966 966 if r:
967 967 r[None].add([patchfn])
968 968 except:
969 969 repo.rollback()
970 970 raise
971 971 except Exception:
972 972 patchpath = self.join(patchfn)
973 973 try:
974 974 os.unlink(patchpath)
975 975 except:
976 976 self.ui.warn(_('error unlinking %s\n') % patchpath)
977 977 raise
978 978 self.removeundo(repo)
979 979 finally:
980 980 release(wlock)
981 981
982 982 def strip(self, repo, revs, update=True, backup="all", force=None):
983 983 wlock = lock = None
984 984 try:
985 985 wlock = repo.wlock()
986 986 lock = repo.lock()
987 987
988 988 if update:
989 989 self.checklocalchanges(repo, force=force, refresh=False)
990 990 urev = self.qparents(repo, revs[0])
991 991 hg.clean(repo, urev)
992 992 repo.dirstate.write()
993 993
994 994 self.removeundo(repo)
995 995 for rev in revs:
996 996 repair.strip(self.ui, repo, rev, backup)
997 997 # strip may have unbundled a set of backed up revisions after
998 998 # the actual strip
999 999 self.removeundo(repo)
1000 1000 finally:
1001 1001 release(lock, wlock)
1002 1002
1003 1003 def isapplied(self, patch):
1004 1004 """returns (index, rev, patch)"""
1005 1005 for i, a in enumerate(self.applied):
1006 1006 if a.name == patch:
1007 1007 return (i, a.node, a.name)
1008 1008 return None
1009 1009
1010 1010 # if the exact patch name does not exist, we try a few
1011 1011 # variations. If strict is passed, we try only #1
1012 1012 #
1013 1013 # 1) a number to indicate an offset in the series file
1014 1014 # 2) a unique substring of the patch name was given
1015 1015 # 3) patchname[-+]num to indicate an offset in the series file
1016 1016 def lookup(self, patch, strict=False):
1017 1017 patch = patch and str(patch)
1018 1018
1019 1019 def partialname(s):
1020 1020 if s in self.series:
1021 1021 return s
1022 1022 matches = [x for x in self.series if s in x]
1023 1023 if len(matches) > 1:
1024 1024 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1025 1025 for m in matches:
1026 1026 self.ui.warn(' %s\n' % m)
1027 1027 return None
1028 1028 if matches:
1029 1029 return matches[0]
1030 1030 if self.series and self.applied:
1031 1031 if s == 'qtip':
1032 1032 return self.series[self.seriesend(True)-1]
1033 1033 if s == 'qbase':
1034 1034 return self.series[0]
1035 1035 return None
1036 1036
1037 1037 if patch is None:
1038 1038 return None
1039 1039 if patch in self.series:
1040 1040 return patch
1041 1041
1042 1042 if not os.path.isfile(self.join(patch)):
1043 1043 try:
1044 1044 sno = int(patch)
1045 1045 except (ValueError, OverflowError):
1046 1046 pass
1047 1047 else:
1048 1048 if -len(self.series) <= sno < len(self.series):
1049 1049 return self.series[sno]
1050 1050
1051 1051 if not strict:
1052 1052 res = partialname(patch)
1053 1053 if res:
1054 1054 return res
1055 1055 minus = patch.rfind('-')
1056 1056 if minus >= 0:
1057 1057 res = partialname(patch[:minus])
1058 1058 if res:
1059 1059 i = self.series.index(res)
1060 1060 try:
1061 1061 off = int(patch[minus + 1:] or 1)
1062 1062 except (ValueError, OverflowError):
1063 1063 pass
1064 1064 else:
1065 1065 if i - off >= 0:
1066 1066 return self.series[i - off]
1067 1067 plus = patch.rfind('+')
1068 1068 if plus >= 0:
1069 1069 res = partialname(patch[:plus])
1070 1070 if res:
1071 1071 i = self.series.index(res)
1072 1072 try:
1073 1073 off = int(patch[plus + 1:] or 1)
1074 1074 except (ValueError, OverflowError):
1075 1075 pass
1076 1076 else:
1077 1077 if i + off < len(self.series):
1078 1078 return self.series[i + off]
1079 1079 raise util.Abort(_("patch %s not in series") % patch)
1080 1080
1081 1081 def push(self, repo, patch=None, force=False, list=False,
1082 1082 mergeq=None, all=False, move=False, exact=False):
1083 1083 diffopts = self.diffopts()
1084 1084 wlock = repo.wlock()
1085 1085 try:
1086 1086 heads = []
1087 1087 for b, ls in repo.branchmap().iteritems():
1088 1088 heads += ls
1089 1089 if not heads:
1090 1090 heads = [nullid]
1091 1091 if repo.dirstate.p1() not in heads and not exact:
1092 1092 self.ui.status(_("(working directory not at a head)\n"))
1093 1093
1094 1094 if not self.series:
1095 1095 self.ui.warn(_('no patches in series\n'))
1096 1096 return 0
1097 1097
1098 1098 patch = self.lookup(patch)
1099 1099 # Suppose our series file is: A B C and the current 'top'
1100 1100 # patch is B. qpush C should be performed (moving forward)
1101 1101 # qpush B is a NOP (no change) qpush A is an error (can't
1102 1102 # go backwards with qpush)
1103 1103 if patch:
1104 1104 info = self.isapplied(patch)
1105 1105 if info and info[0] >= len(self.applied) - 1:
1106 1106 self.ui.warn(
1107 1107 _('qpush: %s is already at the top\n') % patch)
1108 1108 return 0
1109 1109
1110 1110 pushable, reason = self.pushable(patch)
1111 1111 if pushable:
1112 1112 if self.series.index(patch) < self.seriesend():
1113 1113 raise util.Abort(
1114 1114 _("cannot push to a previous patch: %s") % patch)
1115 1115 else:
1116 1116 if reason:
1117 1117 reason = _('guarded by %s') % reason
1118 1118 else:
1119 1119 reason = _('no matching guards')
1120 1120 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1121 1121 return 1
1122 1122 elif all:
1123 1123 patch = self.series[-1]
1124 1124 if self.isapplied(patch):
1125 1125 self.ui.warn(_('all patches are currently applied\n'))
1126 1126 return 0
1127 1127
1128 1128 # Following the above example, starting at 'top' of B:
1129 1129 # qpush should be performed (pushes C), but a subsequent
1130 1130 # qpush without an argument is an error (nothing to
1131 1131 # apply). This allows a loop of "...while hg qpush..." to
1132 1132 # work as it detects an error when done
1133 1133 start = self.seriesend()
1134 1134 if start == len(self.series):
1135 1135 self.ui.warn(_('patch series already fully applied\n'))
1136 1136 return 1
1137 1137 if not force:
1138 1138 self.checklocalchanges(repo, refresh=self.applied)
1139 1139
1140 1140 if exact:
1141 1141 if move:
1142 1142 raise util.Abort(_("cannot use --exact and --move together"))
1143 1143 if self.applied:
1144 1144 raise util.Abort(_("cannot push --exact with applied patches"))
1145 1145 root = self.series[start]
1146 1146 target = patchheader(self.join(root), self.plainmode).parent
1147 1147 if not target:
1148 1148 raise util.Abort(_("%s does not have a parent recorded" % root))
1149 1149 if not repo[target] == repo['.']:
1150 1150 hg.update(repo, target)
1151 1151
1152 1152 if move:
1153 1153 if not patch:
1154 1154 raise util.Abort(_("please specify the patch to move"))
1155 1155 for i, rpn in enumerate(self.fullseries[start:]):
1156 1156 # strip markers for patch guards
1157 1157 if self.guard_re.split(rpn, 1)[0] == patch:
1158 1158 break
1159 1159 index = start + i
1160 1160 assert index < len(self.fullseries)
1161 1161 fullpatch = self.fullseries[index]
1162 1162 del self.fullseries[index]
1163 1163 self.fullseries.insert(start, fullpatch)
1164 1164 self.parseseries()
1165 1165 self.seriesdirty = 1
1166 1166
1167 1167 self.applieddirty = 1
1168 1168 if start > 0:
1169 1169 self.checktoppatch(repo)
1170 1170 if not patch:
1171 1171 patch = self.series[start]
1172 1172 end = start + 1
1173 1173 else:
1174 1174 end = self.series.index(patch, start) + 1
1175 1175
1176 1176 s = self.series[start:end]
1177 1177 all_files = set()
1178 1178 try:
1179 1179 if mergeq:
1180 1180 ret = self.mergepatch(repo, mergeq, s, diffopts)
1181 1181 else:
1182 1182 ret = self.apply(repo, s, list, all_files=all_files)
1183 1183 except:
1184 1184 self.ui.warn(_('cleaning up working directory...'))
1185 1185 node = repo.dirstate.p1()
1186 1186 hg.revert(repo, node, None)
1187 1187 # only remove unknown files that we know we touched or
1188 1188 # created while patching
1189 1189 for f in all_files:
1190 1190 if f not in repo.dirstate:
1191 1191 try:
1192 1192 util.unlinkpath(repo.wjoin(f))
1193 1193 except OSError, inst:
1194 1194 if inst.errno != errno.ENOENT:
1195 1195 raise
1196 1196 self.ui.warn(_('done\n'))
1197 1197 raise
1198 1198
1199 1199 if not self.applied:
1200 1200 return ret[0]
1201 1201 top = self.applied[-1].name
1202 1202 if ret[0] and ret[0] > 1:
1203 1203 msg = _("errors during apply, please fix and refresh %s\n")
1204 1204 self.ui.write(msg % top)
1205 1205 else:
1206 1206 self.ui.write(_("now at: %s\n") % top)
1207 1207 return ret[0]
1208 1208
1209 1209 finally:
1210 1210 wlock.release()
1211 1211
1212 1212 def pop(self, repo, patch=None, force=False, update=True, all=False):
1213 1213 wlock = repo.wlock()
1214 1214 try:
1215 1215 if patch:
1216 1216 # index, rev, patch
1217 1217 info = self.isapplied(patch)
1218 1218 if not info:
1219 1219 patch = self.lookup(patch)
1220 1220 info = self.isapplied(patch)
1221 1221 if not info:
1222 1222 raise util.Abort(_("patch %s is not applied") % patch)
1223 1223
1224 1224 if not self.applied:
1225 1225 # Allow qpop -a to work repeatedly,
1226 1226 # but not qpop without an argument
1227 1227 self.ui.warn(_("no patches applied\n"))
1228 1228 return not all
1229 1229
1230 1230 if all:
1231 1231 start = 0
1232 1232 elif patch:
1233 1233 start = info[0] + 1
1234 1234 else:
1235 1235 start = len(self.applied) - 1
1236 1236
1237 1237 if start >= len(self.applied):
1238 1238 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1239 1239 return
1240 1240
1241 1241 if not update:
1242 1242 parents = repo.dirstate.parents()
1243 1243 rr = [x.node for x in self.applied]
1244 1244 for p in parents:
1245 1245 if p in rr:
1246 1246 self.ui.warn(_("qpop: forcing dirstate update\n"))
1247 1247 update = True
1248 1248 else:
1249 1249 parents = [p.node() for p in repo[None].parents()]
1250 1250 needupdate = False
1251 1251 for entry in self.applied[start:]:
1252 1252 if entry.node in parents:
1253 1253 needupdate = True
1254 1254 break
1255 1255 update = needupdate
1256 1256
1257 1257 if not force and update:
1258 1258 self.checklocalchanges(repo)
1259 1259
1260 1260 self.applieddirty = 1
1261 1261 end = len(self.applied)
1262 1262 rev = self.applied[start].node
1263 1263 if update:
1264 1264 top = self.checktoppatch(repo)[0]
1265 1265
1266 1266 try:
1267 1267 heads = repo.changelog.heads(rev)
1268 1268 except error.LookupError:
1269 1269 node = short(rev)
1270 1270 raise util.Abort(_('trying to pop unknown node %s') % node)
1271 1271
1272 1272 if heads != [self.applied[-1].node]:
1273 1273 raise util.Abort(_("popping would remove a revision not "
1274 1274 "managed by this patch queue"))
1275 1275
1276 1276 # we know there are no local changes, so we can make a simplified
1277 1277 # form of hg.update.
1278 1278 if update:
1279 1279 qp = self.qparents(repo, rev)
1280 1280 ctx = repo[qp]
1281 1281 m, a, r, d = repo.status(qp, top)[:4]
1282 1282 if d:
1283 1283 raise util.Abort(_("deletions found between repo revs"))
1284 1284 for f in a:
1285 1285 try:
1286 1286 util.unlinkpath(repo.wjoin(f))
1287 1287 except OSError, e:
1288 1288 if e.errno != errno.ENOENT:
1289 1289 raise
1290 1290 repo.dirstate.drop(f)
1291 1291 for f in m + r:
1292 1292 fctx = ctx[f]
1293 1293 repo.wwrite(f, fctx.data(), fctx.flags())
1294 1294 repo.dirstate.normal(f)
1295 1295 repo.dirstate.setparents(qp, nullid)
1296 1296 for patch in reversed(self.applied[start:end]):
1297 1297 self.ui.status(_("popping %s\n") % patch.name)
1298 1298 del self.applied[start:end]
1299 1299 self.strip(repo, [rev], update=False, backup='strip')
1300 1300 if self.applied:
1301 1301 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1302 1302 else:
1303 1303 self.ui.write(_("patch queue now empty\n"))
1304 1304 finally:
1305 1305 wlock.release()
1306 1306
1307 1307 def diff(self, repo, pats, opts):
1308 1308 top, patch = self.checktoppatch(repo)
1309 1309 if not top:
1310 1310 self.ui.write(_("no patches applied\n"))
1311 1311 return
1312 1312 qp = self.qparents(repo, top)
1313 1313 if opts.get('reverse'):
1314 1314 node1, node2 = None, qp
1315 1315 else:
1316 1316 node1, node2 = qp, None
1317 1317 diffopts = self.diffopts(opts, patch)
1318 1318 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1319 1319
1320 1320 def refresh(self, repo, pats=None, **opts):
1321 1321 if not self.applied:
1322 1322 self.ui.write(_("no patches applied\n"))
1323 1323 return 1
1324 1324 msg = opts.get('msg', '').rstrip()
1325 1325 newuser = opts.get('user')
1326 1326 newdate = opts.get('date')
1327 1327 if newdate:
1328 1328 newdate = '%d %d' % util.parsedate(newdate)
1329 1329 wlock = repo.wlock()
1330 1330
1331 1331 try:
1332 1332 self.checktoppatch(repo)
1333 1333 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1334 1334 if repo.changelog.heads(top) != [top]:
1335 1335 raise util.Abort(_("cannot refresh a revision with children"))
1336 1336
1337 1337 inclsubs = self.checksubstate(repo)
1338 1338
1339 1339 cparents = repo.changelog.parents(top)
1340 1340 patchparent = self.qparents(repo, top)
1341 1341 ph = patchheader(self.join(patchfn), self.plainmode)
1342 1342 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1343 1343 if msg:
1344 1344 ph.setmessage(msg)
1345 1345 if newuser:
1346 1346 ph.setuser(newuser)
1347 1347 if newdate:
1348 1348 ph.setdate(newdate)
1349 1349 ph.setparent(hex(patchparent))
1350 1350
1351 1351 # only commit new patch when write is complete
1352 1352 patchf = self.opener(patchfn, 'w', atomictemp=True)
1353 1353
1354 1354 comments = str(ph)
1355 1355 if comments:
1356 1356 patchf.write(comments)
1357 1357
1358 1358 # update the dirstate in place, strip off the qtip commit
1359 1359 # and then commit.
1360 1360 #
1361 1361 # this should really read:
1362 1362 # mm, dd, aa = repo.status(top, patchparent)[:3]
1363 1363 # but we do it backwards to take advantage of manifest/chlog
1364 1364 # caching against the next repo.status call
1365 1365 mm, aa, dd = repo.status(patchparent, top)[:3]
1366 1366 changes = repo.changelog.read(top)
1367 1367 man = repo.manifest.read(changes[0])
1368 1368 aaa = aa[:]
1369 1369 matchfn = scmutil.match(repo[None], pats, opts)
1370 1370 # in short mode, we only diff the files included in the
1371 1371 # patch already plus specified files
1372 1372 if opts.get('short'):
1373 1373 # if amending a patch, we start with existing
1374 1374 # files plus specified files - unfiltered
1375 1375 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1376 1376 # filter with inc/exl options
1377 1377 matchfn = scmutil.match(repo[None], opts=opts)
1378 1378 else:
1379 1379 match = scmutil.matchall(repo)
1380 1380 m, a, r, d = repo.status(match=match)[:4]
1381 1381 mm = set(mm)
1382 1382 aa = set(aa)
1383 1383 dd = set(dd)
1384 1384
1385 1385 # we might end up with files that were added between
1386 1386 # qtip and the dirstate parent, but then changed in the
1387 1387 # local dirstate. in this case, we want them to only
1388 1388 # show up in the added section
1389 1389 for x in m:
1390 1390 if x not in aa:
1391 1391 mm.add(x)
1392 1392 # we might end up with files added by the local dirstate that
1393 1393 # were deleted by the patch. In this case, they should only
1394 1394 # show up in the changed section.
1395 1395 for x in a:
1396 1396 if x in dd:
1397 1397 dd.remove(x)
1398 1398 mm.add(x)
1399 1399 else:
1400 1400 aa.add(x)
1401 1401 # make sure any files deleted in the local dirstate
1402 1402 # are not in the add or change column of the patch
1403 1403 forget = []
1404 1404 for x in d + r:
1405 1405 if x in aa:
1406 1406 aa.remove(x)
1407 1407 forget.append(x)
1408 1408 continue
1409 1409 else:
1410 1410 mm.discard(x)
1411 1411 dd.add(x)
1412 1412
1413 1413 m = list(mm)
1414 1414 r = list(dd)
1415 1415 a = list(aa)
1416 1416 c = [filter(matchfn, l) for l in (m, a, r)]
1417 1417 match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1418 1418 chunks = patchmod.diff(repo, patchparent, match=match,
1419 1419 changes=c, opts=diffopts)
1420 1420 for chunk in chunks:
1421 1421 patchf.write(chunk)
1422 1422
1423 1423 try:
1424 1424 if diffopts.git or diffopts.upgrade:
1425 1425 copies = {}
1426 1426 for dst in a:
1427 1427 src = repo.dirstate.copied(dst)
1428 1428 # during qfold, the source file for copies may
1429 1429 # be removed. Treat this as a simple add.
1430 1430 if src is not None and src in repo.dirstate:
1431 1431 copies.setdefault(src, []).append(dst)
1432 1432 repo.dirstate.add(dst)
1433 1433 # remember the copies between patchparent and qtip
1434 1434 for dst in aaa:
1435 1435 f = repo.file(dst)
1436 1436 src = f.renamed(man[dst])
1437 1437 if src:
1438 1438 copies.setdefault(src[0], []).extend(
1439 1439 copies.get(dst, []))
1440 1440 if dst in a:
1441 1441 copies[src[0]].append(dst)
1442 1442 # we can't copy a file created by the patch itself
1443 1443 if dst in copies:
1444 1444 del copies[dst]
1445 1445 for src, dsts in copies.iteritems():
1446 1446 for dst in dsts:
1447 1447 repo.dirstate.copy(src, dst)
1448 1448 else:
1449 1449 for dst in a:
1450 1450 repo.dirstate.add(dst)
1451 1451 # Drop useless copy information
1452 1452 for f in list(repo.dirstate.copies()):
1453 1453 repo.dirstate.copy(None, f)
1454 1454 for f in r:
1455 1455 repo.dirstate.remove(f)
1456 1456 # if the patch excludes a modified file, mark that
1457 1457 # file with mtime=0 so status can see it.
1458 1458 mm = []
1459 1459 for i in xrange(len(m)-1, -1, -1):
1460 1460 if not matchfn(m[i]):
1461 1461 mm.append(m[i])
1462 1462 del m[i]
1463 1463 for f in m:
1464 1464 repo.dirstate.normal(f)
1465 1465 for f in mm:
1466 1466 repo.dirstate.normallookup(f)
1467 1467 for f in forget:
1468 1468 repo.dirstate.drop(f)
1469 1469
1470 1470 if not msg:
1471 1471 if not ph.message:
1472 1472 message = "[mq]: %s\n" % patchfn
1473 1473 else:
1474 1474 message = "\n".join(ph.message)
1475 1475 else:
1476 1476 message = msg
1477 1477
1478 1478 user = ph.user or changes[1]
1479 1479
1480 1480 # assumes strip can roll itself back if interrupted
1481 1481 repo.dirstate.setparents(*cparents)
1482 1482 self.applied.pop()
1483 1483 self.applieddirty = 1
1484 1484 self.strip(repo, [top], update=False,
1485 1485 backup='strip')
1486 1486 except:
1487 1487 repo.dirstate.invalidate()
1488 1488 raise
1489 1489
1490 1490 try:
1491 1491 # might be nice to attempt to roll back strip after this
1492 1492 n = repo.commit(message, user, ph.date, match=match,
1493 1493 force=True)
1494 1494 # only write patch after a successful commit
1495 patchf.rename()
1495 patchf.close()
1496 1496 self.applied.append(statusentry(n, patchfn))
1497 1497 except:
1498 1498 ctx = repo[cparents[0]]
1499 1499 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1500 1500 self.savedirty()
1501 1501 self.ui.warn(_('refresh interrupted while patch was popped! '
1502 1502 '(revert --all, qpush to recover)\n'))
1503 1503 raise
1504 1504 finally:
1505 1505 wlock.release()
1506 1506 self.removeundo(repo)
1507 1507
1508 1508 def init(self, repo, create=False):
1509 1509 if not create and os.path.isdir(self.path):
1510 1510 raise util.Abort(_("patch queue directory already exists"))
1511 1511 try:
1512 1512 os.mkdir(self.path)
1513 1513 except OSError, inst:
1514 1514 if inst.errno != errno.EEXIST or not create:
1515 1515 raise
1516 1516 if create:
1517 1517 return self.qrepo(create=True)
1518 1518
1519 1519 def unapplied(self, repo, patch=None):
1520 1520 if patch and patch not in self.series:
1521 1521 raise util.Abort(_("patch %s is not in series file") % patch)
1522 1522 if not patch:
1523 1523 start = self.seriesend()
1524 1524 else:
1525 1525 start = self.series.index(patch) + 1
1526 1526 unapplied = []
1527 1527 for i in xrange(start, len(self.series)):
1528 1528 pushable, reason = self.pushable(i)
1529 1529 if pushable:
1530 1530 unapplied.append((i, self.series[i]))
1531 1531 self.explainpushable(i)
1532 1532 return unapplied
1533 1533
1534 1534 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1535 1535 summary=False):
1536 1536 def displayname(pfx, patchname, state):
1537 1537 if pfx:
1538 1538 self.ui.write(pfx)
1539 1539 if summary:
1540 1540 ph = patchheader(self.join(patchname), self.plainmode)
1541 1541 msg = ph.message and ph.message[0] or ''
1542 1542 if self.ui.formatted():
1543 1543 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1544 1544 if width > 0:
1545 1545 msg = util.ellipsis(msg, width)
1546 1546 else:
1547 1547 msg = ''
1548 1548 self.ui.write(patchname, label='qseries.' + state)
1549 1549 self.ui.write(': ')
1550 1550 self.ui.write(msg, label='qseries.message.' + state)
1551 1551 else:
1552 1552 self.ui.write(patchname, label='qseries.' + state)
1553 1553 self.ui.write('\n')
1554 1554
1555 1555 applied = set([p.name for p in self.applied])
1556 1556 if length is None:
1557 1557 length = len(self.series) - start
1558 1558 if not missing:
1559 1559 if self.ui.verbose:
1560 1560 idxwidth = len(str(start + length - 1))
1561 1561 for i in xrange(start, start + length):
1562 1562 patch = self.series[i]
1563 1563 if patch in applied:
1564 1564 char, state = 'A', 'applied'
1565 1565 elif self.pushable(i)[0]:
1566 1566 char, state = 'U', 'unapplied'
1567 1567 else:
1568 1568 char, state = 'G', 'guarded'
1569 1569 pfx = ''
1570 1570 if self.ui.verbose:
1571 1571 pfx = '%*d %s ' % (idxwidth, i, char)
1572 1572 elif status and status != char:
1573 1573 continue
1574 1574 displayname(pfx, patch, state)
1575 1575 else:
1576 1576 msng_list = []
1577 1577 for root, dirs, files in os.walk(self.path):
1578 1578 d = root[len(self.path) + 1:]
1579 1579 for f in files:
1580 1580 fl = os.path.join(d, f)
1581 1581 if (fl not in self.series and
1582 1582 fl not in (self.statuspath, self.seriespath,
1583 1583 self.guardspath)
1584 1584 and not fl.startswith('.')):
1585 1585 msng_list.append(fl)
1586 1586 for x in sorted(msng_list):
1587 1587 pfx = self.ui.verbose and ('D ') or ''
1588 1588 displayname(pfx, x, 'missing')
1589 1589
1590 1590 def issaveline(self, l):
1591 1591 if l.name == '.hg.patches.save.line':
1592 1592 return True
1593 1593
1594 1594 def qrepo(self, create=False):
1595 1595 ui = self.ui.copy()
1596 1596 ui.setconfig('paths', 'default', '', overlay=False)
1597 1597 ui.setconfig('paths', 'default-push', '', overlay=False)
1598 1598 if create or os.path.isdir(self.join(".hg")):
1599 1599 return hg.repository(ui, path=self.path, create=create)
1600 1600
1601 1601 def restore(self, repo, rev, delete=None, qupdate=None):
1602 1602 desc = repo[rev].description().strip()
1603 1603 lines = desc.splitlines()
1604 1604 i = 0
1605 1605 datastart = None
1606 1606 series = []
1607 1607 applied = []
1608 1608 qpp = None
1609 1609 for i, line in enumerate(lines):
1610 1610 if line == 'Patch Data:':
1611 1611 datastart = i + 1
1612 1612 elif line.startswith('Dirstate:'):
1613 1613 l = line.rstrip()
1614 1614 l = l[10:].split(' ')
1615 1615 qpp = [bin(x) for x in l]
1616 1616 elif datastart is not None:
1617 1617 l = line.rstrip()
1618 1618 n, name = l.split(':', 1)
1619 1619 if n:
1620 1620 applied.append(statusentry(bin(n), name))
1621 1621 else:
1622 1622 series.append(l)
1623 1623 if datastart is None:
1624 1624 self.ui.warn(_("No saved patch data found\n"))
1625 1625 return 1
1626 1626 self.ui.warn(_("restoring status: %s\n") % lines[0])
1627 1627 self.fullseries = series
1628 1628 self.applied = applied
1629 1629 self.parseseries()
1630 1630 self.seriesdirty = 1
1631 1631 self.applieddirty = 1
1632 1632 heads = repo.changelog.heads()
1633 1633 if delete:
1634 1634 if rev not in heads:
1635 1635 self.ui.warn(_("save entry has children, leaving it alone\n"))
1636 1636 else:
1637 1637 self.ui.warn(_("removing save entry %s\n") % short(rev))
1638 1638 pp = repo.dirstate.parents()
1639 1639 if rev in pp:
1640 1640 update = True
1641 1641 else:
1642 1642 update = False
1643 1643 self.strip(repo, [rev], update=update, backup='strip')
1644 1644 if qpp:
1645 1645 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1646 1646 (short(qpp[0]), short(qpp[1])))
1647 1647 if qupdate:
1648 1648 self.ui.status(_("updating queue directory\n"))
1649 1649 r = self.qrepo()
1650 1650 if not r:
1651 1651 self.ui.warn(_("Unable to load queue repository\n"))
1652 1652 return 1
1653 1653 hg.clean(r, qpp[0])
1654 1654
1655 1655 def save(self, repo, msg=None):
1656 1656 if not self.applied:
1657 1657 self.ui.warn(_("save: no patches applied, exiting\n"))
1658 1658 return 1
1659 1659 if self.issaveline(self.applied[-1]):
1660 1660 self.ui.warn(_("status is already saved\n"))
1661 1661 return 1
1662 1662
1663 1663 if not msg:
1664 1664 msg = _("hg patches saved state")
1665 1665 else:
1666 1666 msg = "hg patches: " + msg.rstrip('\r\n')
1667 1667 r = self.qrepo()
1668 1668 if r:
1669 1669 pp = r.dirstate.parents()
1670 1670 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1671 1671 msg += "\n\nPatch Data:\n"
1672 1672 msg += ''.join('%s\n' % x for x in self.applied)
1673 1673 msg += ''.join(':%s\n' % x for x in self.fullseries)
1674 1674 n = repo.commit(msg, force=True)
1675 1675 if not n:
1676 1676 self.ui.warn(_("repo commit failed\n"))
1677 1677 return 1
1678 1678 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1679 1679 self.applieddirty = 1
1680 1680 self.removeundo(repo)
1681 1681
1682 1682 def fullseriesend(self):
1683 1683 if self.applied:
1684 1684 p = self.applied[-1].name
1685 1685 end = self.findseries(p)
1686 1686 if end is None:
1687 1687 return len(self.fullseries)
1688 1688 return end + 1
1689 1689 return 0
1690 1690
1691 1691 def seriesend(self, all_patches=False):
1692 1692 """If all_patches is False, return the index of the next pushable patch
1693 1693 in the series, or the series length. If all_patches is True, return the
1694 1694 index of the first patch past the last applied one.
1695 1695 """
1696 1696 end = 0
1697 1697 def next(start):
1698 1698 if all_patches or start >= len(self.series):
1699 1699 return start
1700 1700 for i in xrange(start, len(self.series)):
1701 1701 p, reason = self.pushable(i)
1702 1702 if p:
1703 1703 break
1704 1704 self.explainpushable(i)
1705 1705 return i
1706 1706 if self.applied:
1707 1707 p = self.applied[-1].name
1708 1708 try:
1709 1709 end = self.series.index(p)
1710 1710 except ValueError:
1711 1711 return 0
1712 1712 return next(end + 1)
1713 1713 return next(end)
1714 1714
1715 1715 def appliedname(self, index):
1716 1716 pname = self.applied[index].name
1717 1717 if not self.ui.verbose:
1718 1718 p = pname
1719 1719 else:
1720 1720 p = str(self.series.index(pname)) + " " + pname
1721 1721 return p
1722 1722
1723 1723 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1724 1724 force=None, git=False):
1725 1725 def checkseries(patchname):
1726 1726 if patchname in self.series:
1727 1727 raise util.Abort(_('patch %s is already in the series file')
1728 1728 % patchname)
1729 1729
1730 1730 if rev:
1731 1731 if files:
1732 1732 raise util.Abort(_('option "-r" not valid when importing '
1733 1733 'files'))
1734 1734 rev = scmutil.revrange(repo, rev)
1735 1735 rev.sort(reverse=True)
1736 1736 if (len(files) > 1 or len(rev) > 1) and patchname:
1737 1737 raise util.Abort(_('option "-n" not valid when importing multiple '
1738 1738 'patches'))
1739 1739 if rev:
1740 1740 # If mq patches are applied, we can only import revisions
1741 1741 # that form a linear path to qbase.
1742 1742 # Otherwise, they should form a linear path to a head.
1743 1743 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1744 1744 if len(heads) > 1:
1745 1745 raise util.Abort(_('revision %d is the root of more than one '
1746 1746 'branch') % rev[-1])
1747 1747 if self.applied:
1748 1748 base = repo.changelog.node(rev[0])
1749 1749 if base in [n.node for n in self.applied]:
1750 1750 raise util.Abort(_('revision %d is already managed')
1751 1751 % rev[0])
1752 1752 if heads != [self.applied[-1].node]:
1753 1753 raise util.Abort(_('revision %d is not the parent of '
1754 1754 'the queue') % rev[0])
1755 1755 base = repo.changelog.rev(self.applied[0].node)
1756 1756 lastparent = repo.changelog.parentrevs(base)[0]
1757 1757 else:
1758 1758 if heads != [repo.changelog.node(rev[0])]:
1759 1759 raise util.Abort(_('revision %d has unmanaged children')
1760 1760 % rev[0])
1761 1761 lastparent = None
1762 1762
1763 1763 diffopts = self.diffopts({'git': git})
1764 1764 for r in rev:
1765 1765 p1, p2 = repo.changelog.parentrevs(r)
1766 1766 n = repo.changelog.node(r)
1767 1767 if p2 != nullrev:
1768 1768 raise util.Abort(_('cannot import merge revision %d') % r)
1769 1769 if lastparent and lastparent != r:
1770 1770 raise util.Abort(_('revision %d is not the parent of %d')
1771 1771 % (r, lastparent))
1772 1772 lastparent = p1
1773 1773
1774 1774 if not patchname:
1775 1775 patchname = normname('%d.diff' % r)
1776 1776 checkseries(patchname)
1777 1777 self.checkpatchname(patchname, force)
1778 1778 self.fullseries.insert(0, patchname)
1779 1779
1780 1780 patchf = self.opener(patchname, "w")
1781 1781 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1782 1782 patchf.close()
1783 1783
1784 1784 se = statusentry(n, patchname)
1785 1785 self.applied.insert(0, se)
1786 1786
1787 1787 self.added.append(patchname)
1788 1788 patchname = None
1789 1789 self.parseseries()
1790 1790 self.applieddirty = 1
1791 1791 self.seriesdirty = True
1792 1792
1793 1793 for i, filename in enumerate(files):
1794 1794 if existing:
1795 1795 if filename == '-':
1796 1796 raise util.Abort(_('-e is incompatible with import from -'))
1797 1797 filename = normname(filename)
1798 1798 self.checkreservedname(filename)
1799 1799 originpath = self.join(filename)
1800 1800 if not os.path.isfile(originpath):
1801 1801 raise util.Abort(_("patch %s does not exist") % filename)
1802 1802
1803 1803 if patchname:
1804 1804 self.checkpatchname(patchname, force)
1805 1805
1806 1806 self.ui.write(_('renaming %s to %s\n')
1807 1807 % (filename, patchname))
1808 1808 util.rename(originpath, self.join(patchname))
1809 1809 else:
1810 1810 patchname = filename
1811 1811
1812 1812 else:
1813 1813 if filename == '-' and not patchname:
1814 1814 raise util.Abort(_('need --name to import a patch from -'))
1815 1815 elif not patchname:
1816 1816 patchname = normname(os.path.basename(filename.rstrip('/')))
1817 1817 self.checkpatchname(patchname, force)
1818 1818 try:
1819 1819 if filename == '-':
1820 1820 text = self.ui.fin.read()
1821 1821 else:
1822 1822 fp = url.open(self.ui, filename)
1823 1823 text = fp.read()
1824 1824 fp.close()
1825 1825 except (OSError, IOError):
1826 1826 raise util.Abort(_("unable to read file %s") % filename)
1827 1827 patchf = self.opener(patchname, "w")
1828 1828 patchf.write(text)
1829 1829 patchf.close()
1830 1830 if not force:
1831 1831 checkseries(patchname)
1832 1832 if patchname not in self.series:
1833 1833 index = self.fullseriesend() + i
1834 1834 self.fullseries[index:index] = [patchname]
1835 1835 self.parseseries()
1836 1836 self.seriesdirty = True
1837 1837 self.ui.warn(_("adding %s to series file\n") % patchname)
1838 1838 self.added.append(patchname)
1839 1839 patchname = None
1840 1840
1841 1841 self.removeundo(repo)
1842 1842
1843 1843 @command("qdelete|qremove|qrm",
1844 1844 [('k', 'keep', None, _('keep patch file')),
1845 1845 ('r', 'rev', [],
1846 1846 _('stop managing a revision (DEPRECATED)'), _('REV'))],
1847 1847 _('hg qdelete [-k] [PATCH]...'))
1848 1848 def delete(ui, repo, *patches, **opts):
1849 1849 """remove patches from queue
1850 1850
1851 1851 The patches must not be applied, and at least one patch is required. With
1852 1852 -k/--keep, the patch files are preserved in the patch directory.
1853 1853
1854 1854 To stop managing a patch and move it into permanent history,
1855 1855 use the :hg:`qfinish` command."""
1856 1856 q = repo.mq
1857 1857 q.delete(repo, patches, opts)
1858 1858 q.savedirty()
1859 1859 return 0
1860 1860
1861 1861 @command("qapplied",
1862 1862 [('1', 'last', None, _('show only the last patch'))
1863 1863 ] + seriesopts,
1864 1864 _('hg qapplied [-1] [-s] [PATCH]'))
1865 1865 def applied(ui, repo, patch=None, **opts):
1866 1866 """print the patches already applied
1867 1867
1868 1868 Returns 0 on success."""
1869 1869
1870 1870 q = repo.mq
1871 1871
1872 1872 if patch:
1873 1873 if patch not in q.series:
1874 1874 raise util.Abort(_("patch %s is not in series file") % patch)
1875 1875 end = q.series.index(patch) + 1
1876 1876 else:
1877 1877 end = q.seriesend(True)
1878 1878
1879 1879 if opts.get('last') and not end:
1880 1880 ui.write(_("no patches applied\n"))
1881 1881 return 1
1882 1882 elif opts.get('last') and end == 1:
1883 1883 ui.write(_("only one patch applied\n"))
1884 1884 return 1
1885 1885 elif opts.get('last'):
1886 1886 start = end - 2
1887 1887 end = 1
1888 1888 else:
1889 1889 start = 0
1890 1890
1891 1891 q.qseries(repo, length=end, start=start, status='A',
1892 1892 summary=opts.get('summary'))
1893 1893
1894 1894
1895 1895 @command("qunapplied",
1896 1896 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
1897 1897 _('hg qunapplied [-1] [-s] [PATCH]'))
1898 1898 def unapplied(ui, repo, patch=None, **opts):
1899 1899 """print the patches not yet applied
1900 1900
1901 1901 Returns 0 on success."""
1902 1902
1903 1903 q = repo.mq
1904 1904 if patch:
1905 1905 if patch not in q.series:
1906 1906 raise util.Abort(_("patch %s is not in series file") % patch)
1907 1907 start = q.series.index(patch) + 1
1908 1908 else:
1909 1909 start = q.seriesend(True)
1910 1910
1911 1911 if start == len(q.series) and opts.get('first'):
1912 1912 ui.write(_("all patches applied\n"))
1913 1913 return 1
1914 1914
1915 1915 length = opts.get('first') and 1 or None
1916 1916 q.qseries(repo, start=start, length=length, status='U',
1917 1917 summary=opts.get('summary'))
1918 1918
1919 1919 @command("qimport",
1920 1920 [('e', 'existing', None, _('import file in patch directory')),
1921 1921 ('n', 'name', '',
1922 1922 _('name of patch file'), _('NAME')),
1923 1923 ('f', 'force', None, _('overwrite existing files')),
1924 1924 ('r', 'rev', [],
1925 1925 _('place existing revisions under mq control'), _('REV')),
1926 1926 ('g', 'git', None, _('use git extended diff format')),
1927 1927 ('P', 'push', None, _('qpush after importing'))],
1928 1928 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...'))
1929 1929 def qimport(ui, repo, *filename, **opts):
1930 1930 """import a patch
1931 1931
1932 1932 The patch is inserted into the series after the last applied
1933 1933 patch. If no patches have been applied, qimport prepends the patch
1934 1934 to the series.
1935 1935
1936 1936 The patch will have the same name as its source file unless you
1937 1937 give it a new one with -n/--name.
1938 1938
1939 1939 You can register an existing patch inside the patch directory with
1940 1940 the -e/--existing flag.
1941 1941
1942 1942 With -f/--force, an existing patch of the same name will be
1943 1943 overwritten.
1944 1944
1945 1945 An existing changeset may be placed under mq control with -r/--rev
1946 1946 (e.g. qimport --rev tip -n patch will place tip under mq control).
1947 1947 With -g/--git, patches imported with --rev will use the git diff
1948 1948 format. See the diffs help topic for information on why this is
1949 1949 important for preserving rename/copy information and permission
1950 1950 changes. Use :hg:`qfinish` to remove changesets from mq control.
1951 1951
1952 1952 To import a patch from standard input, pass - as the patch file.
1953 1953 When importing from standard input, a patch name must be specified
1954 1954 using the --name flag.
1955 1955
1956 1956 To import an existing patch while renaming it::
1957 1957
1958 1958 hg qimport -e existing-patch -n new-name
1959 1959
1960 1960 Returns 0 if import succeeded.
1961 1961 """
1962 1962 q = repo.mq
1963 1963 try:
1964 1964 q.qimport(repo, filename, patchname=opts.get('name'),
1965 1965 existing=opts.get('existing'), force=opts.get('force'),
1966 1966 rev=opts.get('rev'), git=opts.get('git'))
1967 1967 finally:
1968 1968 q.savedirty()
1969 1969
1970 1970 if opts.get('push') and not opts.get('rev'):
1971 1971 return q.push(repo, None)
1972 1972 return 0
1973 1973
1974 1974 def qinit(ui, repo, create):
1975 1975 """initialize a new queue repository
1976 1976
1977 1977 This command also creates a series file for ordering patches, and
1978 1978 an mq-specific .hgignore file in the queue repository, to exclude
1979 1979 the status and guards files (these contain mostly transient state).
1980 1980
1981 1981 Returns 0 if initialization succeeded."""
1982 1982 q = repo.mq
1983 1983 r = q.init(repo, create)
1984 1984 q.savedirty()
1985 1985 if r:
1986 1986 if not os.path.exists(r.wjoin('.hgignore')):
1987 1987 fp = r.wopener('.hgignore', 'w')
1988 1988 fp.write('^\\.hg\n')
1989 1989 fp.write('^\\.mq\n')
1990 1990 fp.write('syntax: glob\n')
1991 1991 fp.write('status\n')
1992 1992 fp.write('guards\n')
1993 1993 fp.close()
1994 1994 if not os.path.exists(r.wjoin('series')):
1995 1995 r.wopener('series', 'w').close()
1996 1996 r[None].add(['.hgignore', 'series'])
1997 1997 commands.add(ui, r)
1998 1998 return 0
1999 1999
2000 2000 @command("^qinit",
2001 2001 [('c', 'create-repo', None, _('create queue repository'))],
2002 2002 _('hg qinit [-c]'))
2003 2003 def init(ui, repo, **opts):
2004 2004 """init a new queue repository (DEPRECATED)
2005 2005
2006 2006 The queue repository is unversioned by default. If
2007 2007 -c/--create-repo is specified, qinit will create a separate nested
2008 2008 repository for patches (qinit -c may also be run later to convert
2009 2009 an unversioned patch repository into a versioned one). You can use
2010 2010 qcommit to commit changes to this queue repository.
2011 2011
2012 2012 This command is deprecated. Without -c, it's implied by other relevant
2013 2013 commands. With -c, use :hg:`init --mq` instead."""
2014 2014 return qinit(ui, repo, create=opts.get('create_repo'))
2015 2015
2016 2016 @command("qclone",
2017 2017 [('', 'pull', None, _('use pull protocol to copy metadata')),
2018 2018 ('U', 'noupdate', None, _('do not update the new working directories')),
2019 2019 ('', 'uncompressed', None,
2020 2020 _('use uncompressed transfer (fast over LAN)')),
2021 2021 ('p', 'patches', '',
2022 2022 _('location of source patch repository'), _('REPO')),
2023 2023 ] + commands.remoteopts,
2024 2024 _('hg qclone [OPTION]... SOURCE [DEST]'))
2025 2025 def clone(ui, source, dest=None, **opts):
2026 2026 '''clone main and patch repository at same time
2027 2027
2028 2028 If source is local, destination will have no patches applied. If
2029 2029 source is remote, this command can not check if patches are
2030 2030 applied in source, so cannot guarantee that patches are not
2031 2031 applied in destination. If you clone remote repository, be sure
2032 2032 before that it has no patches applied.
2033 2033
2034 2034 Source patch repository is looked for in <src>/.hg/patches by
2035 2035 default. Use -p <url> to change.
2036 2036
2037 2037 The patch directory must be a nested Mercurial repository, as
2038 2038 would be created by :hg:`init --mq`.
2039 2039
2040 2040 Return 0 on success.
2041 2041 '''
2042 2042 def patchdir(repo):
2043 2043 url = repo.url()
2044 2044 if url.endswith('/'):
2045 2045 url = url[:-1]
2046 2046 return url + '/.hg/patches'
2047 2047 if dest is None:
2048 2048 dest = hg.defaultdest(source)
2049 2049 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
2050 2050 if opts.get('patches'):
2051 2051 patchespath = ui.expandpath(opts.get('patches'))
2052 2052 else:
2053 2053 patchespath = patchdir(sr)
2054 2054 try:
2055 2055 hg.repository(ui, patchespath)
2056 2056 except error.RepoError:
2057 2057 raise util.Abort(_('versioned patch repository not found'
2058 2058 ' (see init --mq)'))
2059 2059 qbase, destrev = None, None
2060 2060 if sr.local():
2061 2061 if sr.mq.applied:
2062 2062 qbase = sr.mq.applied[0].node
2063 2063 if not hg.islocal(dest):
2064 2064 heads = set(sr.heads())
2065 2065 destrev = list(heads.difference(sr.heads(qbase)))
2066 2066 destrev.append(sr.changelog.parents(qbase)[0])
2067 2067 elif sr.capable('lookup'):
2068 2068 try:
2069 2069 qbase = sr.lookup('qbase')
2070 2070 except error.RepoError:
2071 2071 pass
2072 2072 ui.note(_('cloning main repository\n'))
2073 2073 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2074 2074 pull=opts.get('pull'),
2075 2075 rev=destrev,
2076 2076 update=False,
2077 2077 stream=opts.get('uncompressed'))
2078 2078 ui.note(_('cloning patch repository\n'))
2079 2079 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2080 2080 pull=opts.get('pull'), update=not opts.get('noupdate'),
2081 2081 stream=opts.get('uncompressed'))
2082 2082 if dr.local():
2083 2083 if qbase:
2084 2084 ui.note(_('stripping applied patches from destination '
2085 2085 'repository\n'))
2086 2086 dr.mq.strip(dr, [qbase], update=False, backup=None)
2087 2087 if not opts.get('noupdate'):
2088 2088 ui.note(_('updating destination repository\n'))
2089 2089 hg.update(dr, dr.changelog.tip())
2090 2090
2091 2091 @command("qcommit|qci",
2092 2092 commands.table["^commit|ci"][1],
2093 2093 _('hg qcommit [OPTION]... [FILE]...'))
2094 2094 def commit(ui, repo, *pats, **opts):
2095 2095 """commit changes in the queue repository (DEPRECATED)
2096 2096
2097 2097 This command is deprecated; use :hg:`commit --mq` instead."""
2098 2098 q = repo.mq
2099 2099 r = q.qrepo()
2100 2100 if not r:
2101 2101 raise util.Abort('no queue repository')
2102 2102 commands.commit(r.ui, r, *pats, **opts)
2103 2103
2104 2104 @command("qseries",
2105 2105 [('m', 'missing', None, _('print patches not in series')),
2106 2106 ] + seriesopts,
2107 2107 _('hg qseries [-ms]'))
2108 2108 def series(ui, repo, **opts):
2109 2109 """print the entire series file
2110 2110
2111 2111 Returns 0 on success."""
2112 2112 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2113 2113 return 0
2114 2114
2115 2115 @command("qtop", seriesopts, _('hg qtop [-s]'))
2116 2116 def top(ui, repo, **opts):
2117 2117 """print the name of the current patch
2118 2118
2119 2119 Returns 0 on success."""
2120 2120 q = repo.mq
2121 2121 t = q.applied and q.seriesend(True) or 0
2122 2122 if t:
2123 2123 q.qseries(repo, start=t - 1, length=1, status='A',
2124 2124 summary=opts.get('summary'))
2125 2125 else:
2126 2126 ui.write(_("no patches applied\n"))
2127 2127 return 1
2128 2128
2129 2129 @command("qnext", seriesopts, _('hg qnext [-s]'))
2130 2130 def next(ui, repo, **opts):
2131 2131 """print the name of the next patch
2132 2132
2133 2133 Returns 0 on success."""
2134 2134 q = repo.mq
2135 2135 end = q.seriesend()
2136 2136 if end == len(q.series):
2137 2137 ui.write(_("all patches applied\n"))
2138 2138 return 1
2139 2139 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2140 2140
2141 2141 @command("qprev", seriesopts, _('hg qprev [-s]'))
2142 2142 def prev(ui, repo, **opts):
2143 2143 """print the name of the previous patch
2144 2144
2145 2145 Returns 0 on success."""
2146 2146 q = repo.mq
2147 2147 l = len(q.applied)
2148 2148 if l == 1:
2149 2149 ui.write(_("only one patch applied\n"))
2150 2150 return 1
2151 2151 if not l:
2152 2152 ui.write(_("no patches applied\n"))
2153 2153 return 1
2154 2154 q.qseries(repo, start=l - 2, length=1, status='A',
2155 2155 summary=opts.get('summary'))
2156 2156
2157 2157 def setupheaderopts(ui, opts):
2158 2158 if not opts.get('user') and opts.get('currentuser'):
2159 2159 opts['user'] = ui.username()
2160 2160 if not opts.get('date') and opts.get('currentdate'):
2161 2161 opts['date'] = "%d %d" % util.makedate()
2162 2162
2163 2163 @command("^qnew",
2164 2164 [('e', 'edit', None, _('edit commit message')),
2165 2165 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2166 2166 ('g', 'git', None, _('use git extended diff format')),
2167 2167 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2168 2168 ('u', 'user', '',
2169 2169 _('add "From: <USER>" to patch'), _('USER')),
2170 2170 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2171 2171 ('d', 'date', '',
2172 2172 _('add "Date: <DATE>" to patch'), _('DATE'))
2173 2173 ] + commands.walkopts + commands.commitopts,
2174 2174 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2175 2175 def new(ui, repo, patch, *args, **opts):
2176 2176 """create a new patch
2177 2177
2178 2178 qnew creates a new patch on top of the currently-applied patch (if
2179 2179 any). The patch will be initialized with any outstanding changes
2180 2180 in the working directory. You may also use -I/--include,
2181 2181 -X/--exclude, and/or a list of files after the patch name to add
2182 2182 only changes to matching files to the new patch, leaving the rest
2183 2183 as uncommitted modifications.
2184 2184
2185 2185 -u/--user and -d/--date can be used to set the (given) user and
2186 2186 date, respectively. -U/--currentuser and -D/--currentdate set user
2187 2187 to current user and date to current date.
2188 2188
2189 2189 -e/--edit, -m/--message or -l/--logfile set the patch header as
2190 2190 well as the commit message. If none is specified, the header is
2191 2191 empty and the commit message is '[mq]: PATCH'.
2192 2192
2193 2193 Use the -g/--git option to keep the patch in the git extended diff
2194 2194 format. Read the diffs help topic for more information on why this
2195 2195 is important for preserving permission changes and copy/rename
2196 2196 information.
2197 2197
2198 2198 Returns 0 on successful creation of a new patch.
2199 2199 """
2200 2200 msg = cmdutil.logmessage(ui, opts)
2201 2201 def getmsg():
2202 2202 return ui.edit(msg, opts.get('user') or ui.username())
2203 2203 q = repo.mq
2204 2204 opts['msg'] = msg
2205 2205 if opts.get('edit'):
2206 2206 opts['msg'] = getmsg
2207 2207 else:
2208 2208 opts['msg'] = msg
2209 2209 setupheaderopts(ui, opts)
2210 2210 q.new(repo, patch, *args, **opts)
2211 2211 q.savedirty()
2212 2212 return 0
2213 2213
2214 2214 @command("^qrefresh",
2215 2215 [('e', 'edit', None, _('edit commit message')),
2216 2216 ('g', 'git', None, _('use git extended diff format')),
2217 2217 ('s', 'short', None,
2218 2218 _('refresh only files already in the patch and specified files')),
2219 2219 ('U', 'currentuser', None,
2220 2220 _('add/update author field in patch with current user')),
2221 2221 ('u', 'user', '',
2222 2222 _('add/update author field in patch with given user'), _('USER')),
2223 2223 ('D', 'currentdate', None,
2224 2224 _('add/update date field in patch with current date')),
2225 2225 ('d', 'date', '',
2226 2226 _('add/update date field in patch with given date'), _('DATE'))
2227 2227 ] + commands.walkopts + commands.commitopts,
2228 2228 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2229 2229 def refresh(ui, repo, *pats, **opts):
2230 2230 """update the current patch
2231 2231
2232 2232 If any file patterns are provided, the refreshed patch will
2233 2233 contain only the modifications that match those patterns; the
2234 2234 remaining modifications will remain in the working directory.
2235 2235
2236 2236 If -s/--short is specified, files currently included in the patch
2237 2237 will be refreshed just like matched files and remain in the patch.
2238 2238
2239 2239 If -e/--edit is specified, Mercurial will start your configured editor for
2240 2240 you to enter a message. In case qrefresh fails, you will find a backup of
2241 2241 your message in ``.hg/last-message.txt``.
2242 2242
2243 2243 hg add/remove/copy/rename work as usual, though you might want to
2244 2244 use git-style patches (-g/--git or [diff] git=1) to track copies
2245 2245 and renames. See the diffs help topic for more information on the
2246 2246 git diff format.
2247 2247
2248 2248 Returns 0 on success.
2249 2249 """
2250 2250 q = repo.mq
2251 2251 message = cmdutil.logmessage(ui, opts)
2252 2252 if opts.get('edit'):
2253 2253 if not q.applied:
2254 2254 ui.write(_("no patches applied\n"))
2255 2255 return 1
2256 2256 if message:
2257 2257 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2258 2258 patch = q.applied[-1].name
2259 2259 ph = patchheader(q.join(patch), q.plainmode)
2260 2260 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2261 2261 # We don't want to lose the patch message if qrefresh fails (issue2062)
2262 2262 repo.savecommitmessage(message)
2263 2263 setupheaderopts(ui, opts)
2264 2264 wlock = repo.wlock()
2265 2265 try:
2266 2266 ret = q.refresh(repo, pats, msg=message, **opts)
2267 2267 q.savedirty()
2268 2268 return ret
2269 2269 finally:
2270 2270 wlock.release()
2271 2271
2272 2272 @command("^qdiff",
2273 2273 commands.diffopts + commands.diffopts2 + commands.walkopts,
2274 2274 _('hg qdiff [OPTION]... [FILE]...'))
2275 2275 def diff(ui, repo, *pats, **opts):
2276 2276 """diff of the current patch and subsequent modifications
2277 2277
2278 2278 Shows a diff which includes the current patch as well as any
2279 2279 changes which have been made in the working directory since the
2280 2280 last refresh (thus showing what the current patch would become
2281 2281 after a qrefresh).
2282 2282
2283 2283 Use :hg:`diff` if you only want to see the changes made since the
2284 2284 last qrefresh, or :hg:`export qtip` if you want to see changes
2285 2285 made by the current patch without including changes made since the
2286 2286 qrefresh.
2287 2287
2288 2288 Returns 0 on success.
2289 2289 """
2290 2290 repo.mq.diff(repo, pats, opts)
2291 2291 return 0
2292 2292
2293 2293 @command('qfold',
2294 2294 [('e', 'edit', None, _('edit patch header')),
2295 2295 ('k', 'keep', None, _('keep folded patch files')),
2296 2296 ] + commands.commitopts,
2297 2297 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2298 2298 def fold(ui, repo, *files, **opts):
2299 2299 """fold the named patches into the current patch
2300 2300
2301 2301 Patches must not yet be applied. Each patch will be successively
2302 2302 applied to the current patch in the order given. If all the
2303 2303 patches apply successfully, the current patch will be refreshed
2304 2304 with the new cumulative patch, and the folded patches will be
2305 2305 deleted. With -k/--keep, the folded patch files will not be
2306 2306 removed afterwards.
2307 2307
2308 2308 The header for each folded patch will be concatenated with the
2309 2309 current patch header, separated by a line of ``* * *``.
2310 2310
2311 2311 Returns 0 on success."""
2312 2312
2313 2313 q = repo.mq
2314 2314
2315 2315 if not files:
2316 2316 raise util.Abort(_('qfold requires at least one patch name'))
2317 2317 if not q.checktoppatch(repo)[0]:
2318 2318 raise util.Abort(_('no patches applied'))
2319 2319 q.checklocalchanges(repo)
2320 2320
2321 2321 message = cmdutil.logmessage(ui, opts)
2322 2322 if opts.get('edit'):
2323 2323 if message:
2324 2324 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2325 2325
2326 2326 parent = q.lookup('qtip')
2327 2327 patches = []
2328 2328 messages = []
2329 2329 for f in files:
2330 2330 p = q.lookup(f)
2331 2331 if p in patches or p == parent:
2332 2332 ui.warn(_('Skipping already folded patch %s\n') % p)
2333 2333 if q.isapplied(p):
2334 2334 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2335 2335 patches.append(p)
2336 2336
2337 2337 for p in patches:
2338 2338 if not message:
2339 2339 ph = patchheader(q.join(p), q.plainmode)
2340 2340 if ph.message:
2341 2341 messages.append(ph.message)
2342 2342 pf = q.join(p)
2343 2343 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2344 2344 if not patchsuccess:
2345 2345 raise util.Abort(_('error folding patch %s') % p)
2346 2346
2347 2347 if not message:
2348 2348 ph = patchheader(q.join(parent), q.plainmode)
2349 2349 message, user = ph.message, ph.user
2350 2350 for msg in messages:
2351 2351 message.append('* * *')
2352 2352 message.extend(msg)
2353 2353 message = '\n'.join(message)
2354 2354
2355 2355 if opts.get('edit'):
2356 2356 message = ui.edit(message, user or ui.username())
2357 2357
2358 2358 diffopts = q.patchopts(q.diffopts(), *patches)
2359 2359 wlock = repo.wlock()
2360 2360 try:
2361 2361 q.refresh(repo, msg=message, git=diffopts.git)
2362 2362 q.delete(repo, patches, opts)
2363 2363 q.savedirty()
2364 2364 finally:
2365 2365 wlock.release()
2366 2366
2367 2367 @command("qgoto",
2368 2368 [('f', 'force', None, _('overwrite any local changes'))],
2369 2369 _('hg qgoto [OPTION]... PATCH'))
2370 2370 def goto(ui, repo, patch, **opts):
2371 2371 '''push or pop patches until named patch is at top of stack
2372 2372
2373 2373 Returns 0 on success.'''
2374 2374 q = repo.mq
2375 2375 patch = q.lookup(patch)
2376 2376 if q.isapplied(patch):
2377 2377 ret = q.pop(repo, patch, force=opts.get('force'))
2378 2378 else:
2379 2379 ret = q.push(repo, patch, force=opts.get('force'))
2380 2380 q.savedirty()
2381 2381 return ret
2382 2382
2383 2383 @command("qguard",
2384 2384 [('l', 'list', None, _('list all patches and guards')),
2385 2385 ('n', 'none', None, _('drop all guards'))],
2386 2386 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2387 2387 def guard(ui, repo, *args, **opts):
2388 2388 '''set or print guards for a patch
2389 2389
2390 2390 Guards control whether a patch can be pushed. A patch with no
2391 2391 guards is always pushed. A patch with a positive guard ("+foo") is
2392 2392 pushed only if the :hg:`qselect` command has activated it. A patch with
2393 2393 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2394 2394 has activated it.
2395 2395
2396 2396 With no arguments, print the currently active guards.
2397 2397 With arguments, set guards for the named patch.
2398 2398
2399 2399 .. note::
2400 2400 Specifying negative guards now requires '--'.
2401 2401
2402 2402 To set guards on another patch::
2403 2403
2404 2404 hg qguard other.patch -- +2.6.17 -stable
2405 2405
2406 2406 Returns 0 on success.
2407 2407 '''
2408 2408 def status(idx):
2409 2409 guards = q.seriesguards[idx] or ['unguarded']
2410 2410 if q.series[idx] in applied:
2411 2411 state = 'applied'
2412 2412 elif q.pushable(idx)[0]:
2413 2413 state = 'unapplied'
2414 2414 else:
2415 2415 state = 'guarded'
2416 2416 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2417 2417 ui.write('%s: ' % ui.label(q.series[idx], label))
2418 2418
2419 2419 for i, guard in enumerate(guards):
2420 2420 if guard.startswith('+'):
2421 2421 ui.write(guard, label='qguard.positive')
2422 2422 elif guard.startswith('-'):
2423 2423 ui.write(guard, label='qguard.negative')
2424 2424 else:
2425 2425 ui.write(guard, label='qguard.unguarded')
2426 2426 if i != len(guards) - 1:
2427 2427 ui.write(' ')
2428 2428 ui.write('\n')
2429 2429 q = repo.mq
2430 2430 applied = set(p.name for p in q.applied)
2431 2431 patch = None
2432 2432 args = list(args)
2433 2433 if opts.get('list'):
2434 2434 if args or opts.get('none'):
2435 2435 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2436 2436 for i in xrange(len(q.series)):
2437 2437 status(i)
2438 2438 return
2439 2439 if not args or args[0][0:1] in '-+':
2440 2440 if not q.applied:
2441 2441 raise util.Abort(_('no patches applied'))
2442 2442 patch = q.applied[-1].name
2443 2443 if patch is None and args[0][0:1] not in '-+':
2444 2444 patch = args.pop(0)
2445 2445 if patch is None:
2446 2446 raise util.Abort(_('no patch to work with'))
2447 2447 if args or opts.get('none'):
2448 2448 idx = q.findseries(patch)
2449 2449 if idx is None:
2450 2450 raise util.Abort(_('no patch named %s') % patch)
2451 2451 q.setguards(idx, args)
2452 2452 q.savedirty()
2453 2453 else:
2454 2454 status(q.series.index(q.lookup(patch)))
2455 2455
2456 2456 @command("qheader", [], _('hg qheader [PATCH]'))
2457 2457 def header(ui, repo, patch=None):
2458 2458 """print the header of the topmost or specified patch
2459 2459
2460 2460 Returns 0 on success."""
2461 2461 q = repo.mq
2462 2462
2463 2463 if patch:
2464 2464 patch = q.lookup(patch)
2465 2465 else:
2466 2466 if not q.applied:
2467 2467 ui.write(_('no patches applied\n'))
2468 2468 return 1
2469 2469 patch = q.lookup('qtip')
2470 2470 ph = patchheader(q.join(patch), q.plainmode)
2471 2471
2472 2472 ui.write('\n'.join(ph.message) + '\n')
2473 2473
2474 2474 def lastsavename(path):
2475 2475 (directory, base) = os.path.split(path)
2476 2476 names = os.listdir(directory)
2477 2477 namere = re.compile("%s.([0-9]+)" % base)
2478 2478 maxindex = None
2479 2479 maxname = None
2480 2480 for f in names:
2481 2481 m = namere.match(f)
2482 2482 if m:
2483 2483 index = int(m.group(1))
2484 2484 if maxindex is None or index > maxindex:
2485 2485 maxindex = index
2486 2486 maxname = f
2487 2487 if maxname:
2488 2488 return (os.path.join(directory, maxname), maxindex)
2489 2489 return (None, None)
2490 2490
2491 2491 def savename(path):
2492 2492 (last, index) = lastsavename(path)
2493 2493 if last is None:
2494 2494 index = 0
2495 2495 newpath = path + ".%d" % (index + 1)
2496 2496 return newpath
2497 2497
2498 2498 @command("^qpush",
2499 2499 [('f', 'force', None, _('apply on top of local changes')),
2500 2500 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
2501 2501 ('l', 'list', None, _('list patch name in commit text')),
2502 2502 ('a', 'all', None, _('apply all patches')),
2503 2503 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2504 2504 ('n', 'name', '',
2505 2505 _('merge queue name (DEPRECATED)'), _('NAME')),
2506 2506 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2507 2507 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2508 2508 def push(ui, repo, patch=None, **opts):
2509 2509 """push the next patch onto the stack
2510 2510
2511 2511 When -f/--force is applied, all local changes in patched files
2512 2512 will be lost.
2513 2513
2514 2514 Return 0 on success.
2515 2515 """
2516 2516 q = repo.mq
2517 2517 mergeq = None
2518 2518
2519 2519 if opts.get('merge'):
2520 2520 if opts.get('name'):
2521 2521 newpath = repo.join(opts.get('name'))
2522 2522 else:
2523 2523 newpath, i = lastsavename(q.path)
2524 2524 if not newpath:
2525 2525 ui.warn(_("no saved queues found, please use -n\n"))
2526 2526 return 1
2527 2527 mergeq = queue(ui, repo.join(""), newpath)
2528 2528 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2529 2529 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2530 2530 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2531 2531 exact=opts.get('exact'))
2532 2532 return ret
2533 2533
2534 2534 @command("^qpop",
2535 2535 [('a', 'all', None, _('pop all patches')),
2536 2536 ('n', 'name', '',
2537 2537 _('queue name to pop (DEPRECATED)'), _('NAME')),
2538 2538 ('f', 'force', None, _('forget any local changes to patched files'))],
2539 2539 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2540 2540 def pop(ui, repo, patch=None, **opts):
2541 2541 """pop the current patch off the stack
2542 2542
2543 2543 By default, pops off the top of the patch stack. If given a patch
2544 2544 name, keeps popping off patches until the named patch is at the
2545 2545 top of the stack.
2546 2546
2547 2547 Return 0 on success.
2548 2548 """
2549 2549 localupdate = True
2550 2550 if opts.get('name'):
2551 2551 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2552 2552 ui.warn(_('using patch queue: %s\n') % q.path)
2553 2553 localupdate = False
2554 2554 else:
2555 2555 q = repo.mq
2556 2556 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2557 2557 all=opts.get('all'))
2558 2558 q.savedirty()
2559 2559 return ret
2560 2560
2561 2561 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2562 2562 def rename(ui, repo, patch, name=None, **opts):
2563 2563 """rename a patch
2564 2564
2565 2565 With one argument, renames the current patch to PATCH1.
2566 2566 With two arguments, renames PATCH1 to PATCH2.
2567 2567
2568 2568 Returns 0 on success."""
2569 2569
2570 2570 q = repo.mq
2571 2571
2572 2572 if not name:
2573 2573 name = patch
2574 2574 patch = None
2575 2575
2576 2576 if patch:
2577 2577 patch = q.lookup(patch)
2578 2578 else:
2579 2579 if not q.applied:
2580 2580 ui.write(_('no patches applied\n'))
2581 2581 return
2582 2582 patch = q.lookup('qtip')
2583 2583 absdest = q.join(name)
2584 2584 if os.path.isdir(absdest):
2585 2585 name = normname(os.path.join(name, os.path.basename(patch)))
2586 2586 absdest = q.join(name)
2587 2587 q.checkpatchname(name)
2588 2588
2589 2589 ui.note(_('renaming %s to %s\n') % (patch, name))
2590 2590 i = q.findseries(patch)
2591 2591 guards = q.guard_re.findall(q.fullseries[i])
2592 2592 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2593 2593 q.parseseries()
2594 2594 q.seriesdirty = 1
2595 2595
2596 2596 info = q.isapplied(patch)
2597 2597 if info:
2598 2598 q.applied[info[0]] = statusentry(info[1], name)
2599 2599 q.applieddirty = 1
2600 2600
2601 2601 destdir = os.path.dirname(absdest)
2602 2602 if not os.path.isdir(destdir):
2603 2603 os.makedirs(destdir)
2604 2604 util.rename(q.join(patch), absdest)
2605 2605 r = q.qrepo()
2606 2606 if r and patch in r.dirstate:
2607 2607 wctx = r[None]
2608 2608 wlock = r.wlock()
2609 2609 try:
2610 2610 if r.dirstate[patch] == 'a':
2611 2611 r.dirstate.drop(patch)
2612 2612 r.dirstate.add(name)
2613 2613 else:
2614 2614 if r.dirstate[name] == 'r':
2615 2615 wctx.undelete([name])
2616 2616 wctx.copy(patch, name)
2617 2617 wctx.forget([patch])
2618 2618 finally:
2619 2619 wlock.release()
2620 2620
2621 2621 q.savedirty()
2622 2622
2623 2623 @command("qrestore",
2624 2624 [('d', 'delete', None, _('delete save entry')),
2625 2625 ('u', 'update', None, _('update queue working directory'))],
2626 2626 _('hg qrestore [-d] [-u] REV'))
2627 2627 def restore(ui, repo, rev, **opts):
2628 2628 """restore the queue state saved by a revision (DEPRECATED)
2629 2629
2630 2630 This command is deprecated, use :hg:`rebase` instead."""
2631 2631 rev = repo.lookup(rev)
2632 2632 q = repo.mq
2633 2633 q.restore(repo, rev, delete=opts.get('delete'),
2634 2634 qupdate=opts.get('update'))
2635 2635 q.savedirty()
2636 2636 return 0
2637 2637
2638 2638 @command("qsave",
2639 2639 [('c', 'copy', None, _('copy patch directory')),
2640 2640 ('n', 'name', '',
2641 2641 _('copy directory name'), _('NAME')),
2642 2642 ('e', 'empty', None, _('clear queue status file')),
2643 2643 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2644 2644 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2645 2645 def save(ui, repo, **opts):
2646 2646 """save current queue state (DEPRECATED)
2647 2647
2648 2648 This command is deprecated, use :hg:`rebase` instead."""
2649 2649 q = repo.mq
2650 2650 message = cmdutil.logmessage(ui, opts)
2651 2651 ret = q.save(repo, msg=message)
2652 2652 if ret:
2653 2653 return ret
2654 2654 q.savedirty()
2655 2655 if opts.get('copy'):
2656 2656 path = q.path
2657 2657 if opts.get('name'):
2658 2658 newpath = os.path.join(q.basepath, opts.get('name'))
2659 2659 if os.path.exists(newpath):
2660 2660 if not os.path.isdir(newpath):
2661 2661 raise util.Abort(_('destination %s exists and is not '
2662 2662 'a directory') % newpath)
2663 2663 if not opts.get('force'):
2664 2664 raise util.Abort(_('destination %s exists, '
2665 2665 'use -f to force') % newpath)
2666 2666 else:
2667 2667 newpath = savename(path)
2668 2668 ui.warn(_("copy %s to %s\n") % (path, newpath))
2669 2669 util.copyfiles(path, newpath)
2670 2670 if opts.get('empty'):
2671 2671 try:
2672 2672 os.unlink(q.join(q.statuspath))
2673 2673 except:
2674 2674 pass
2675 2675 return 0
2676 2676
2677 2677 @command("strip",
2678 2678 [('f', 'force', None, _('force removal of changesets, discard '
2679 2679 'uncommitted changes (no backup)')),
2680 2680 ('b', 'backup', None, _('bundle only changesets with local revision'
2681 2681 ' number greater than REV which are not'
2682 2682 ' descendants of REV (DEPRECATED)')),
2683 2683 ('n', 'no-backup', None, _('no backups')),
2684 2684 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2685 2685 ('k', 'keep', None, _("do not modify working copy during strip"))],
2686 2686 _('hg strip [-k] [-f] [-n] REV...'))
2687 2687 def strip(ui, repo, *revs, **opts):
2688 2688 """strip changesets and all their descendants from the repository
2689 2689
2690 2690 The strip command removes the specified changesets and all their
2691 2691 descendants. If the working directory has uncommitted changes, the
2692 2692 operation is aborted unless the --force flag is supplied, in which
2693 2693 case changes will be discarded.
2694 2694
2695 2695 If a parent of the working directory is stripped, then the working
2696 2696 directory will automatically be updated to the most recent
2697 2697 available ancestor of the stripped parent after the operation
2698 2698 completes.
2699 2699
2700 2700 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2701 2701 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2702 2702 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2703 2703 where BUNDLE is the bundle file created by the strip. Note that
2704 2704 the local revision numbers will in general be different after the
2705 2705 restore.
2706 2706
2707 2707 Use the --no-backup option to discard the backup bundle once the
2708 2708 operation completes.
2709 2709
2710 2710 Return 0 on success.
2711 2711 """
2712 2712 backup = 'all'
2713 2713 if opts.get('backup'):
2714 2714 backup = 'strip'
2715 2715 elif opts.get('no_backup') or opts.get('nobackup'):
2716 2716 backup = 'none'
2717 2717
2718 2718 cl = repo.changelog
2719 2719 revs = set(scmutil.revrange(repo, revs))
2720 2720 if not revs:
2721 2721 raise util.Abort(_('empty revision set'))
2722 2722
2723 2723 descendants = set(cl.descendants(*revs))
2724 2724 strippedrevs = revs.union(descendants)
2725 2725 roots = revs.difference(descendants)
2726 2726
2727 2727 update = False
2728 2728 # if one of the wdir parent is stripped we'll need
2729 2729 # to update away to an earlier revision
2730 2730 for p in repo.dirstate.parents():
2731 2731 if p != nullid and cl.rev(p) in strippedrevs:
2732 2732 update = True
2733 2733 break
2734 2734
2735 2735 rootnodes = set(cl.node(r) for r in roots)
2736 2736
2737 2737 q = repo.mq
2738 2738 if q.applied:
2739 2739 # refresh queue state if we're about to strip
2740 2740 # applied patches
2741 2741 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2742 2742 q.applieddirty = True
2743 2743 start = 0
2744 2744 end = len(q.applied)
2745 2745 for i, statusentry in enumerate(q.applied):
2746 2746 if statusentry.node in rootnodes:
2747 2747 # if one of the stripped roots is an applied
2748 2748 # patch, only part of the queue is stripped
2749 2749 start = i
2750 2750 break
2751 2751 del q.applied[start:end]
2752 2752 q.savedirty()
2753 2753
2754 2754 revs = list(rootnodes)
2755 2755 if update and opts.get('keep'):
2756 2756 wlock = repo.wlock()
2757 2757 try:
2758 2758 urev = repo.mq.qparents(repo, revs[0])
2759 2759 repo.dirstate.rebuild(urev, repo[urev].manifest())
2760 2760 repo.dirstate.write()
2761 2761 update = False
2762 2762 finally:
2763 2763 wlock.release()
2764 2764
2765 2765 repo.mq.strip(repo, revs, backup=backup, update=update,
2766 2766 force=opts.get('force'))
2767 2767 return 0
2768 2768
2769 2769 @command("qselect",
2770 2770 [('n', 'none', None, _('disable all guards')),
2771 2771 ('s', 'series', None, _('list all guards in series file')),
2772 2772 ('', 'pop', None, _('pop to before first guarded applied patch')),
2773 2773 ('', 'reapply', None, _('pop, then reapply patches'))],
2774 2774 _('hg qselect [OPTION]... [GUARD]...'))
2775 2775 def select(ui, repo, *args, **opts):
2776 2776 '''set or print guarded patches to push
2777 2777
2778 2778 Use the :hg:`qguard` command to set or print guards on patch, then use
2779 2779 qselect to tell mq which guards to use. A patch will be pushed if
2780 2780 it has no guards or any positive guards match the currently
2781 2781 selected guard, but will not be pushed if any negative guards
2782 2782 match the current guard. For example::
2783 2783
2784 2784 qguard foo.patch -- -stable (negative guard)
2785 2785 qguard bar.patch +stable (positive guard)
2786 2786 qselect stable
2787 2787
2788 2788 This activates the "stable" guard. mq will skip foo.patch (because
2789 2789 it has a negative match) but push bar.patch (because it has a
2790 2790 positive match).
2791 2791
2792 2792 With no arguments, prints the currently active guards.
2793 2793 With one argument, sets the active guard.
2794 2794
2795 2795 Use -n/--none to deactivate guards (no other arguments needed).
2796 2796 When no guards are active, patches with positive guards are
2797 2797 skipped and patches with negative guards are pushed.
2798 2798
2799 2799 qselect can change the guards on applied patches. It does not pop
2800 2800 guarded patches by default. Use --pop to pop back to the last
2801 2801 applied patch that is not guarded. Use --reapply (which implies
2802 2802 --pop) to push back to the current patch afterwards, but skip
2803 2803 guarded patches.
2804 2804
2805 2805 Use -s/--series to print a list of all guards in the series file
2806 2806 (no other arguments needed). Use -v for more information.
2807 2807
2808 2808 Returns 0 on success.'''
2809 2809
2810 2810 q = repo.mq
2811 2811 guards = q.active()
2812 2812 if args or opts.get('none'):
2813 2813 old_unapplied = q.unapplied(repo)
2814 2814 old_guarded = [i for i in xrange(len(q.applied)) if
2815 2815 not q.pushable(i)[0]]
2816 2816 q.setactive(args)
2817 2817 q.savedirty()
2818 2818 if not args:
2819 2819 ui.status(_('guards deactivated\n'))
2820 2820 if not opts.get('pop') and not opts.get('reapply'):
2821 2821 unapplied = q.unapplied(repo)
2822 2822 guarded = [i for i in xrange(len(q.applied))
2823 2823 if not q.pushable(i)[0]]
2824 2824 if len(unapplied) != len(old_unapplied):
2825 2825 ui.status(_('number of unguarded, unapplied patches has '
2826 2826 'changed from %d to %d\n') %
2827 2827 (len(old_unapplied), len(unapplied)))
2828 2828 if len(guarded) != len(old_guarded):
2829 2829 ui.status(_('number of guarded, applied patches has changed '
2830 2830 'from %d to %d\n') %
2831 2831 (len(old_guarded), len(guarded)))
2832 2832 elif opts.get('series'):
2833 2833 guards = {}
2834 2834 noguards = 0
2835 2835 for gs in q.seriesguards:
2836 2836 if not gs:
2837 2837 noguards += 1
2838 2838 for g in gs:
2839 2839 guards.setdefault(g, 0)
2840 2840 guards[g] += 1
2841 2841 if ui.verbose:
2842 2842 guards['NONE'] = noguards
2843 2843 guards = guards.items()
2844 2844 guards.sort(key=lambda x: x[0][1:])
2845 2845 if guards:
2846 2846 ui.note(_('guards in series file:\n'))
2847 2847 for guard, count in guards:
2848 2848 ui.note('%2d ' % count)
2849 2849 ui.write(guard, '\n')
2850 2850 else:
2851 2851 ui.note(_('no guards in series file\n'))
2852 2852 else:
2853 2853 if guards:
2854 2854 ui.note(_('active guards:\n'))
2855 2855 for g in guards:
2856 2856 ui.write(g, '\n')
2857 2857 else:
2858 2858 ui.write(_('no active guards\n'))
2859 2859 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2860 2860 popped = False
2861 2861 if opts.get('pop') or opts.get('reapply'):
2862 2862 for i in xrange(len(q.applied)):
2863 2863 pushable, reason = q.pushable(i)
2864 2864 if not pushable:
2865 2865 ui.status(_('popping guarded patches\n'))
2866 2866 popped = True
2867 2867 if i == 0:
2868 2868 q.pop(repo, all=True)
2869 2869 else:
2870 2870 q.pop(repo, i - 1)
2871 2871 break
2872 2872 if popped:
2873 2873 try:
2874 2874 if reapply:
2875 2875 ui.status(_('reapplying unguarded patches\n'))
2876 2876 q.push(repo, reapply)
2877 2877 finally:
2878 2878 q.savedirty()
2879 2879
2880 2880 @command("qfinish",
2881 2881 [('a', 'applied', None, _('finish all applied changesets'))],
2882 2882 _('hg qfinish [-a] [REV]...'))
2883 2883 def finish(ui, repo, *revrange, **opts):
2884 2884 """move applied patches into repository history
2885 2885
2886 2886 Finishes the specified revisions (corresponding to applied
2887 2887 patches) by moving them out of mq control into regular repository
2888 2888 history.
2889 2889
2890 2890 Accepts a revision range or the -a/--applied option. If --applied
2891 2891 is specified, all applied mq revisions are removed from mq
2892 2892 control. Otherwise, the given revisions must be at the base of the
2893 2893 stack of applied patches.
2894 2894
2895 2895 This can be especially useful if your changes have been applied to
2896 2896 an upstream repository, or if you are about to push your changes
2897 2897 to upstream.
2898 2898
2899 2899 Returns 0 on success.
2900 2900 """
2901 2901 if not opts.get('applied') and not revrange:
2902 2902 raise util.Abort(_('no revisions specified'))
2903 2903 elif opts.get('applied'):
2904 2904 revrange = ('qbase::qtip',) + revrange
2905 2905
2906 2906 q = repo.mq
2907 2907 if not q.applied:
2908 2908 ui.status(_('no patches applied\n'))
2909 2909 return 0
2910 2910
2911 2911 revs = scmutil.revrange(repo, revrange)
2912 2912 q.finish(repo, revs)
2913 2913 q.savedirty()
2914 2914 return 0
2915 2915
2916 2916 @command("qqueue",
2917 2917 [('l', 'list', False, _('list all available queues')),
2918 2918 ('', 'active', False, _('print name of active queue')),
2919 2919 ('c', 'create', False, _('create new queue')),
2920 2920 ('', 'rename', False, _('rename active queue')),
2921 2921 ('', 'delete', False, _('delete reference to queue')),
2922 2922 ('', 'purge', False, _('delete queue, and remove patch dir')),
2923 2923 ],
2924 2924 _('[OPTION] [QUEUE]'))
2925 2925 def qqueue(ui, repo, name=None, **opts):
2926 2926 '''manage multiple patch queues
2927 2927
2928 2928 Supports switching between different patch queues, as well as creating
2929 2929 new patch queues and deleting existing ones.
2930 2930
2931 2931 Omitting a queue name or specifying -l/--list will show you the registered
2932 2932 queues - by default the "normal" patches queue is registered. The currently
2933 2933 active queue will be marked with "(active)". Specifying --active will print
2934 2934 only the name of the active queue.
2935 2935
2936 2936 To create a new queue, use -c/--create. The queue is automatically made
2937 2937 active, except in the case where there are applied patches from the
2938 2938 currently active queue in the repository. Then the queue will only be
2939 2939 created and switching will fail.
2940 2940
2941 2941 To delete an existing queue, use --delete. You cannot delete the currently
2942 2942 active queue.
2943 2943
2944 2944 Returns 0 on success.
2945 2945 '''
2946 2946
2947 2947 q = repo.mq
2948 2948
2949 2949 _defaultqueue = 'patches'
2950 2950 _allqueues = 'patches.queues'
2951 2951 _activequeue = 'patches.queue'
2952 2952
2953 2953 def _getcurrent():
2954 2954 cur = os.path.basename(q.path)
2955 2955 if cur.startswith('patches-'):
2956 2956 cur = cur[8:]
2957 2957 return cur
2958 2958
2959 2959 def _noqueues():
2960 2960 try:
2961 2961 fh = repo.opener(_allqueues, 'r')
2962 2962 fh.close()
2963 2963 except IOError:
2964 2964 return True
2965 2965
2966 2966 return False
2967 2967
2968 2968 def _getqueues():
2969 2969 current = _getcurrent()
2970 2970
2971 2971 try:
2972 2972 fh = repo.opener(_allqueues, 'r')
2973 2973 queues = [queue.strip() for queue in fh if queue.strip()]
2974 2974 fh.close()
2975 2975 if current not in queues:
2976 2976 queues.append(current)
2977 2977 except IOError:
2978 2978 queues = [_defaultqueue]
2979 2979
2980 2980 return sorted(queues)
2981 2981
2982 2982 def _setactive(name):
2983 2983 if q.applied:
2984 2984 raise util.Abort(_('patches applied - cannot set new queue active'))
2985 2985 _setactivenocheck(name)
2986 2986
2987 2987 def _setactivenocheck(name):
2988 2988 fh = repo.opener(_activequeue, 'w')
2989 2989 if name != 'patches':
2990 2990 fh.write(name)
2991 2991 fh.close()
2992 2992
2993 2993 def _addqueue(name):
2994 2994 fh = repo.opener(_allqueues, 'a')
2995 2995 fh.write('%s\n' % (name,))
2996 2996 fh.close()
2997 2997
2998 2998 def _queuedir(name):
2999 2999 if name == 'patches':
3000 3000 return repo.join('patches')
3001 3001 else:
3002 3002 return repo.join('patches-' + name)
3003 3003
3004 3004 def _validname(name):
3005 3005 for n in name:
3006 3006 if n in ':\\/.':
3007 3007 return False
3008 3008 return True
3009 3009
3010 3010 def _delete(name):
3011 3011 if name not in existing:
3012 3012 raise util.Abort(_('cannot delete queue that does not exist'))
3013 3013
3014 3014 current = _getcurrent()
3015 3015
3016 3016 if name == current:
3017 3017 raise util.Abort(_('cannot delete currently active queue'))
3018 3018
3019 3019 fh = repo.opener('patches.queues.new', 'w')
3020 3020 for queue in existing:
3021 3021 if queue == name:
3022 3022 continue
3023 3023 fh.write('%s\n' % (queue,))
3024 3024 fh.close()
3025 3025 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3026 3026
3027 3027 if not name or opts.get('list') or opts.get('active'):
3028 3028 current = _getcurrent()
3029 3029 if opts.get('active'):
3030 3030 ui.write('%s\n' % (current,))
3031 3031 return
3032 3032 for queue in _getqueues():
3033 3033 ui.write('%s' % (queue,))
3034 3034 if queue == current and not ui.quiet:
3035 3035 ui.write(_(' (active)\n'))
3036 3036 else:
3037 3037 ui.write('\n')
3038 3038 return
3039 3039
3040 3040 if not _validname(name):
3041 3041 raise util.Abort(
3042 3042 _('invalid queue name, may not contain the characters ":\\/."'))
3043 3043
3044 3044 existing = _getqueues()
3045 3045
3046 3046 if opts.get('create'):
3047 3047 if name in existing:
3048 3048 raise util.Abort(_('queue "%s" already exists') % name)
3049 3049 if _noqueues():
3050 3050 _addqueue(_defaultqueue)
3051 3051 _addqueue(name)
3052 3052 _setactive(name)
3053 3053 elif opts.get('rename'):
3054 3054 current = _getcurrent()
3055 3055 if name == current:
3056 3056 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3057 3057 if name in existing:
3058 3058 raise util.Abort(_('queue "%s" already exists') % name)
3059 3059
3060 3060 olddir = _queuedir(current)
3061 3061 newdir = _queuedir(name)
3062 3062
3063 3063 if os.path.exists(newdir):
3064 3064 raise util.Abort(_('non-queue directory "%s" already exists') %
3065 3065 newdir)
3066 3066
3067 3067 fh = repo.opener('patches.queues.new', 'w')
3068 3068 for queue in existing:
3069 3069 if queue == current:
3070 3070 fh.write('%s\n' % (name,))
3071 3071 if os.path.exists(olddir):
3072 3072 util.rename(olddir, newdir)
3073 3073 else:
3074 3074 fh.write('%s\n' % (queue,))
3075 3075 fh.close()
3076 3076 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3077 3077 _setactivenocheck(name)
3078 3078 elif opts.get('delete'):
3079 3079 _delete(name)
3080 3080 elif opts.get('purge'):
3081 3081 if name in existing:
3082 3082 _delete(name)
3083 3083 qdir = _queuedir(name)
3084 3084 if os.path.exists(qdir):
3085 3085 shutil.rmtree(qdir)
3086 3086 else:
3087 3087 if name not in existing:
3088 3088 raise util.Abort(_('use --create to create a new queue'))
3089 3089 _setactive(name)
3090 3090
3091 3091 def reposetup(ui, repo):
3092 3092 class mqrepo(repo.__class__):
3093 3093 @util.propertycache
3094 3094 def mq(self):
3095 3095 return queue(self.ui, self.join(""))
3096 3096
3097 3097 def abortifwdirpatched(self, errmsg, force=False):
3098 3098 if self.mq.applied and not force:
3099 3099 parents = self.dirstate.parents()
3100 3100 patches = [s.node for s in self.mq.applied]
3101 3101 if parents[0] in patches or parents[1] in patches:
3102 3102 raise util.Abort(errmsg)
3103 3103
3104 3104 def commit(self, text="", user=None, date=None, match=None,
3105 3105 force=False, editor=False, extra={}):
3106 3106 self.abortifwdirpatched(
3107 3107 _('cannot commit over an applied mq patch'),
3108 3108 force)
3109 3109
3110 3110 return super(mqrepo, self).commit(text, user, date, match, force,
3111 3111 editor, extra)
3112 3112
3113 3113 def checkpush(self, force, revs):
3114 3114 if self.mq.applied and not force:
3115 3115 haspatches = True
3116 3116 if revs:
3117 3117 # Assume applied patches have no non-patch descendants
3118 3118 # and are not on remote already. If they appear in the
3119 3119 # set of resolved 'revs', bail out.
3120 3120 applied = set(e.node for e in self.mq.applied)
3121 3121 haspatches = bool([n for n in revs if n in applied])
3122 3122 if haspatches:
3123 3123 raise util.Abort(_('source has mq patches applied'))
3124 3124 super(mqrepo, self).checkpush(force, revs)
3125 3125
3126 3126 def _findtags(self):
3127 3127 '''augment tags from base class with patch tags'''
3128 3128 result = super(mqrepo, self)._findtags()
3129 3129
3130 3130 q = self.mq
3131 3131 if not q.applied:
3132 3132 return result
3133 3133
3134 3134 mqtags = [(patch.node, patch.name) for patch in q.applied]
3135 3135
3136 3136 try:
3137 3137 self.changelog.rev(mqtags[-1][0])
3138 3138 except error.LookupError:
3139 3139 self.ui.warn(_('mq status file refers to unknown node %s\n')
3140 3140 % short(mqtags[-1][0]))
3141 3141 return result
3142 3142
3143 3143 mqtags.append((mqtags[-1][0], 'qtip'))
3144 3144 mqtags.append((mqtags[0][0], 'qbase'))
3145 3145 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3146 3146 tags = result[0]
3147 3147 for patch in mqtags:
3148 3148 if patch[1] in tags:
3149 3149 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
3150 3150 % patch[1])
3151 3151 else:
3152 3152 tags[patch[1]] = patch[0]
3153 3153
3154 3154 return result
3155 3155
3156 3156 def _branchtags(self, partial, lrev):
3157 3157 q = self.mq
3158 3158 if not q.applied:
3159 3159 return super(mqrepo, self)._branchtags(partial, lrev)
3160 3160
3161 3161 cl = self.changelog
3162 3162 qbasenode = q.applied[0].node
3163 3163 try:
3164 3164 qbase = cl.rev(qbasenode)
3165 3165 except error.LookupError:
3166 3166 self.ui.warn(_('mq status file refers to unknown node %s\n')
3167 3167 % short(qbasenode))
3168 3168 return super(mqrepo, self)._branchtags(partial, lrev)
3169 3169
3170 3170 start = lrev + 1
3171 3171 if start < qbase:
3172 3172 # update the cache (excluding the patches) and save it
3173 3173 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3174 3174 self._updatebranchcache(partial, ctxgen)
3175 3175 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3176 3176 start = qbase
3177 3177 # if start = qbase, the cache is as updated as it should be.
3178 3178 # if start > qbase, the cache includes (part of) the patches.
3179 3179 # we might as well use it, but we won't save it.
3180 3180
3181 3181 # update the cache up to the tip
3182 3182 ctxgen = (self[r] for r in xrange(start, len(cl)))
3183 3183 self._updatebranchcache(partial, ctxgen)
3184 3184
3185 3185 return partial
3186 3186
3187 3187 if repo.local():
3188 3188 repo.__class__ = mqrepo
3189 3189
3190 3190 def mqimport(orig, ui, repo, *args, **kwargs):
3191 3191 if (hasattr(repo, 'abortifwdirpatched')
3192 3192 and not kwargs.get('no_commit', False)):
3193 3193 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3194 3194 kwargs.get('force'))
3195 3195 return orig(ui, repo, *args, **kwargs)
3196 3196
3197 3197 def mqinit(orig, ui, *args, **kwargs):
3198 3198 mq = kwargs.pop('mq', None)
3199 3199
3200 3200 if not mq:
3201 3201 return orig(ui, *args, **kwargs)
3202 3202
3203 3203 if args:
3204 3204 repopath = args[0]
3205 3205 if not hg.islocal(repopath):
3206 3206 raise util.Abort(_('only a local queue repository '
3207 3207 'may be initialized'))
3208 3208 else:
3209 3209 repopath = cmdutil.findrepo(os.getcwd())
3210 3210 if not repopath:
3211 3211 raise util.Abort(_('there is no Mercurial repository here '
3212 3212 '(.hg not found)'))
3213 3213 repo = hg.repository(ui, repopath)
3214 3214 return qinit(ui, repo, True)
3215 3215
3216 3216 def mqcommand(orig, ui, repo, *args, **kwargs):
3217 3217 """Add --mq option to operate on patch repository instead of main"""
3218 3218
3219 3219 # some commands do not like getting unknown options
3220 3220 mq = kwargs.pop('mq', None)
3221 3221
3222 3222 if not mq:
3223 3223 return orig(ui, repo, *args, **kwargs)
3224 3224
3225 3225 q = repo.mq
3226 3226 r = q.qrepo()
3227 3227 if not r:
3228 3228 raise util.Abort(_('no queue repository'))
3229 3229 return orig(r.ui, r, *args, **kwargs)
3230 3230
3231 3231 def summary(orig, ui, repo, *args, **kwargs):
3232 3232 r = orig(ui, repo, *args, **kwargs)
3233 3233 q = repo.mq
3234 3234 m = []
3235 3235 a, u = len(q.applied), len(q.unapplied(repo))
3236 3236 if a:
3237 3237 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3238 3238 if u:
3239 3239 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3240 3240 if m:
3241 3241 ui.write("mq: %s\n" % ', '.join(m))
3242 3242 else:
3243 3243 ui.note(_("mq: (empty queue)\n"))
3244 3244 return r
3245 3245
3246 3246 def revsetmq(repo, subset, x):
3247 3247 """``mq()``
3248 3248 Changesets managed by MQ.
3249 3249 """
3250 3250 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3251 3251 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3252 3252 return [r for r in subset if r in applied]
3253 3253
3254 3254 def extsetup(ui):
3255 3255 revset.symbols['mq'] = revsetmq
3256 3256
3257 3257 # tell hggettext to extract docstrings from these functions:
3258 3258 i18nfunctions = [revsetmq]
3259 3259
3260 3260 def uisetup(ui):
3261 3261 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3262 3262
3263 3263 extensions.wrapcommand(commands.table, 'import', mqimport)
3264 3264 extensions.wrapcommand(commands.table, 'summary', summary)
3265 3265
3266 3266 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3267 3267 entry[1].extend(mqopt)
3268 3268
3269 3269 nowrap = set(commands.norepo.split(" "))
3270 3270
3271 3271 def dotable(cmdtable):
3272 3272 for cmd in cmdtable.keys():
3273 3273 cmd = cmdutil.parsealiases(cmd)[0]
3274 3274 if cmd in nowrap:
3275 3275 continue
3276 3276 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3277 3277 entry[1].extend(mqopt)
3278 3278
3279 3279 dotable(commands.table)
3280 3280
3281 3281 for extname, extmodule in extensions.extensions():
3282 3282 if extmodule.__file__ != __file__:
3283 3283 dotable(getattr(extmodule, 'cmdtable', {}))
3284 3284
3285 3285
3286 3286 colortable = {'qguard.negative': 'red',
3287 3287 'qguard.positive': 'yellow',
3288 3288 'qguard.unguarded': 'green',
3289 3289 'qseries.applied': 'blue bold underline',
3290 3290 'qseries.guarded': 'black bold',
3291 3291 'qseries.missing': 'red bold',
3292 3292 'qseries.unapplied': 'black bold'}
@@ -1,284 +1,284 b''
1 1 # archival.py - revision archival for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 from node import hex
10 10 import cmdutil
11 11 import scmutil, util, encoding
12 12 import cStringIO, os, tarfile, time, zipfile
13 13 import zlib, gzip
14 14
15 15 def tidyprefix(dest, kind, prefix):
16 16 '''choose prefix to use for names in archive. make sure prefix is
17 17 safe for consumers.'''
18 18
19 19 if prefix:
20 20 prefix = util.normpath(prefix)
21 21 else:
22 22 if not isinstance(dest, str):
23 23 raise ValueError('dest must be string if no prefix')
24 24 prefix = os.path.basename(dest)
25 25 lower = prefix.lower()
26 26 for sfx in exts.get(kind, []):
27 27 if lower.endswith(sfx):
28 28 prefix = prefix[:-len(sfx)]
29 29 break
30 30 lpfx = os.path.normpath(util.localpath(prefix))
31 31 prefix = util.pconvert(lpfx)
32 32 if not prefix.endswith('/'):
33 33 prefix += '/'
34 34 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
35 35 raise util.Abort(_('archive prefix contains illegal components'))
36 36 return prefix
37 37
38 38 exts = {
39 39 'tar': ['.tar'],
40 40 'tbz2': ['.tbz2', '.tar.bz2'],
41 41 'tgz': ['.tgz', '.tar.gz'],
42 42 'zip': ['.zip'],
43 43 }
44 44
45 45 def guesskind(dest):
46 46 for kind, extensions in exts.iteritems():
47 47 if util.any(dest.endswith(ext) for ext in extensions):
48 48 return kind
49 49 return None
50 50
51 51
52 52 class tarit(object):
53 53 '''write archive to tar file or stream. can write uncompressed,
54 54 or compress with gzip or bzip2.'''
55 55
56 56 class GzipFileWithTime(gzip.GzipFile):
57 57
58 58 def __init__(self, *args, **kw):
59 59 timestamp = None
60 60 if 'timestamp' in kw:
61 61 timestamp = kw.pop('timestamp')
62 62 if timestamp is None:
63 63 self.timestamp = time.time()
64 64 else:
65 65 self.timestamp = timestamp
66 66 gzip.GzipFile.__init__(self, *args, **kw)
67 67
68 68 def _write_gzip_header(self):
69 69 self.fileobj.write('\037\213') # magic header
70 70 self.fileobj.write('\010') # compression method
71 71 # Python 2.6 deprecates self.filename
72 72 fname = getattr(self, 'name', None) or self.filename
73 73 if fname and fname.endswith('.gz'):
74 74 fname = fname[:-3]
75 75 flags = 0
76 76 if fname:
77 77 flags = gzip.FNAME
78 78 self.fileobj.write(chr(flags))
79 79 gzip.write32u(self.fileobj, long(self.timestamp))
80 80 self.fileobj.write('\002')
81 81 self.fileobj.write('\377')
82 82 if fname:
83 83 self.fileobj.write(fname + '\000')
84 84
85 85 def __init__(self, dest, mtime, kind=''):
86 86 self.mtime = mtime
87 87 self.fileobj = None
88 88
89 89 def taropen(name, mode, fileobj=None):
90 90 if kind == 'gz':
91 91 mode = mode[0]
92 92 if not fileobj:
93 93 fileobj = open(name, mode + 'b')
94 94 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
95 95 zlib.Z_BEST_COMPRESSION,
96 96 fileobj, timestamp=mtime)
97 97 self.fileobj = gzfileobj
98 98 return tarfile.TarFile.taropen(name, mode, gzfileobj)
99 99 else:
100 100 self.fileobj = fileobj
101 101 return tarfile.open(name, mode + kind, fileobj)
102 102
103 103 if isinstance(dest, str):
104 104 self.z = taropen(dest, mode='w:')
105 105 else:
106 106 # Python 2.5-2.5.1 have a regression that requires a name arg
107 107 self.z = taropen(name='', mode='w|', fileobj=dest)
108 108
109 109 def addfile(self, name, mode, islink, data):
110 110 i = tarfile.TarInfo(name)
111 111 i.mtime = self.mtime
112 112 i.size = len(data)
113 113 if islink:
114 114 i.type = tarfile.SYMTYPE
115 115 i.mode = 0777
116 116 i.linkname = data
117 117 data = None
118 118 i.size = 0
119 119 else:
120 120 i.mode = mode
121 121 data = cStringIO.StringIO(data)
122 122 self.z.addfile(i, data)
123 123
124 124 def done(self):
125 125 self.z.close()
126 126 if self.fileobj:
127 127 self.fileobj.close()
128 128
129 129 class tellable(object):
130 130 '''provide tell method for zipfile.ZipFile when writing to http
131 131 response file object.'''
132 132
133 133 def __init__(self, fp):
134 134 self.fp = fp
135 135 self.offset = 0
136 136
137 137 def __getattr__(self, key):
138 138 return getattr(self.fp, key)
139 139
140 140 def write(self, s):
141 141 self.fp.write(s)
142 142 self.offset += len(s)
143 143
144 144 def tell(self):
145 145 return self.offset
146 146
147 147 class zipit(object):
148 148 '''write archive to zip file or stream. can write uncompressed,
149 149 or compressed with deflate.'''
150 150
151 151 def __init__(self, dest, mtime, compress=True):
152 152 if not isinstance(dest, str):
153 153 try:
154 154 dest.tell()
155 155 except (AttributeError, IOError):
156 156 dest = tellable(dest)
157 157 self.z = zipfile.ZipFile(dest, 'w',
158 158 compress and zipfile.ZIP_DEFLATED or
159 159 zipfile.ZIP_STORED)
160 160
161 161 # Python's zipfile module emits deprecation warnings if we try
162 162 # to store files with a date before 1980.
163 163 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
164 164 if mtime < epoch:
165 165 mtime = epoch
166 166
167 167 self.date_time = time.gmtime(mtime)[:6]
168 168
169 169 def addfile(self, name, mode, islink, data):
170 170 i = zipfile.ZipInfo(name, self.date_time)
171 171 i.compress_type = self.z.compression
172 172 # unzip will not honor unix file modes unless file creator is
173 173 # set to unix (id 3).
174 174 i.create_system = 3
175 175 ftype = 0x8000 # UNX_IFREG in unzip source code
176 176 if islink:
177 177 mode = 0777
178 178 ftype = 0xa000 # UNX_IFLNK in unzip source code
179 179 i.external_attr = (mode | ftype) << 16L
180 180 self.z.writestr(i, data)
181 181
182 182 def done(self):
183 183 self.z.close()
184 184
185 185 class fileit(object):
186 186 '''write archive as files in directory.'''
187 187
188 188 def __init__(self, name, mtime):
189 189 self.basedir = name
190 190 self.opener = scmutil.opener(self.basedir)
191 191
192 192 def addfile(self, name, mode, islink, data):
193 193 if islink:
194 194 self.opener.symlink(data, name)
195 195 return
196 196 f = self.opener(name, "w", atomictemp=True)
197 197 f.write(data)
198 f.rename()
198 f.close()
199 199 destfile = os.path.join(self.basedir, name)
200 200 os.chmod(destfile, mode)
201 201
202 202 def done(self):
203 203 pass
204 204
205 205 archivers = {
206 206 'files': fileit,
207 207 'tar': tarit,
208 208 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
209 209 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
210 210 'uzip': lambda name, mtime: zipit(name, mtime, False),
211 211 'zip': zipit,
212 212 }
213 213
214 214 def archive(repo, dest, node, kind, decode=True, matchfn=None,
215 215 prefix=None, mtime=None, subrepos=False):
216 216 '''create archive of repo as it was at node.
217 217
218 218 dest can be name of directory, name of archive file, or file
219 219 object to write archive to.
220 220
221 221 kind is type of archive to create.
222 222
223 223 decode tells whether to put files through decode filters from
224 224 hgrc.
225 225
226 226 matchfn is function to filter names of files to write to archive.
227 227
228 228 prefix is name of path to put before every archive member.'''
229 229
230 230 if kind == 'files':
231 231 if prefix:
232 232 raise util.Abort(_('cannot give prefix when archiving to files'))
233 233 else:
234 234 prefix = tidyprefix(dest, kind, prefix)
235 235
236 236 def write(name, mode, islink, getdata):
237 237 if matchfn and not matchfn(name):
238 238 return
239 239 data = getdata()
240 240 if decode:
241 241 data = repo.wwritedata(name, data)
242 242 archiver.addfile(prefix + name, mode, islink, data)
243 243
244 244 if kind not in archivers:
245 245 raise util.Abort(_("unknown archive type '%s'") % kind)
246 246
247 247 ctx = repo[node]
248 248 archiver = archivers[kind](dest, mtime or ctx.date()[0])
249 249
250 250 if repo.ui.configbool("ui", "archivemeta", True):
251 251 def metadata():
252 252 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
253 253 repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
254 254
255 255 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
256 256 if repo.tagtype(t) == 'global')
257 257 if not tags:
258 258 repo.ui.pushbuffer()
259 259 opts = {'template': '{latesttag}\n{latesttagdistance}',
260 260 'style': '', 'patch': None, 'git': None}
261 261 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
262 262 ltags, dist = repo.ui.popbuffer().split('\n')
263 263 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
264 264 tags += 'latesttagdistance: %s\n' % dist
265 265
266 266 return base + tags
267 267
268 268 write('.hg_archival.txt', 0644, False, metadata)
269 269
270 270 total = len(ctx.manifest())
271 271 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
272 272 for i, f in enumerate(ctx):
273 273 ff = ctx.flags(f)
274 274 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
275 275 repo.ui.progress(_('archiving'), i + 1, item=f,
276 276 unit=_('files'), total=total)
277 277 repo.ui.progress(_('archiving'), None)
278 278
279 279 if subrepos:
280 280 for subpath in ctx.substate:
281 281 sub = ctx.sub(subpath)
282 282 sub.archive(repo.ui, archiver, prefix)
283 283
284 284 archiver.done()
@@ -1,213 +1,213 b''
1 1 # Mercurial bookmark support code
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from mercurial.i18n import _
9 9 from mercurial.node import hex
10 10 from mercurial import encoding, error, util
11 11 import errno, os
12 12
13 13 def valid(mark):
14 14 for c in (':', '\0', '\n', '\r'):
15 15 if c in mark:
16 16 return False
17 17 return True
18 18
19 19 def read(repo):
20 20 '''Parse .hg/bookmarks file and return a dictionary
21 21
22 22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 23 in the .hg/bookmarks file.
24 24 Read the file and return a (name=>nodeid) dictionary
25 25 '''
26 26 bookmarks = {}
27 27 try:
28 28 for line in repo.opener('bookmarks'):
29 29 line = line.strip()
30 30 if not line:
31 31 continue
32 32 if ' ' not in line:
33 33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 34 continue
35 35 sha, refspec = line.split(' ', 1)
36 36 refspec = encoding.tolocal(refspec)
37 37 try:
38 38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 39 except error.RepoLookupError:
40 40 pass
41 41 except IOError, inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44 return bookmarks
45 45
46 46 def readcurrent(repo):
47 47 '''Get the current bookmark
48 48
49 49 If we use gittishsh branches we have a current bookmark that
50 50 we are on. This function returns the name of the bookmark. It
51 51 is stored in .hg/bookmarks.current
52 52 '''
53 53 mark = None
54 54 try:
55 55 file = repo.opener('bookmarks.current')
56 56 except IOError, inst:
57 57 if inst.errno != errno.ENOENT:
58 58 raise
59 59 return None
60 60 try:
61 61 # No readline() in posixfile_nt, reading everything is cheap
62 62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 63 if mark == '' or mark not in repo._bookmarks:
64 64 mark = None
65 65 finally:
66 66 file.close()
67 67 return mark
68 68
69 69 def write(repo):
70 70 '''Write bookmarks
71 71
72 72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 73 in a format equal to those of localtags.
74 74
75 75 We also store a backup of the previous state in undo.bookmarks that
76 76 can be copied back on rollback.
77 77 '''
78 78 refs = repo._bookmarks
79 79
80 80 if repo._bookmarkcurrent not in refs:
81 81 setcurrent(repo, None)
82 82 for mark in refs.keys():
83 83 if not valid(mark):
84 84 raise util.Abort(_("bookmark '%s' contains illegal "
85 85 "character" % mark))
86 86
87 87 wlock = repo.wlock()
88 88 try:
89 89
90 90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 91 for refspec, node in refs.iteritems():
92 92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 file.rename()
93 file.close()
94 94
95 95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 96 try:
97 97 os.utime(repo.sjoin('00changelog.i'), None)
98 98 except OSError:
99 99 pass
100 100
101 101 finally:
102 102 wlock.release()
103 103
104 104 def setcurrent(repo, mark):
105 105 '''Set the name of the bookmark that we are currently on
106 106
107 107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 108 The name is recorded in .hg/bookmarks.current
109 109 '''
110 110 current = repo._bookmarkcurrent
111 111 if current == mark:
112 112 return
113 113
114 114 if mark not in repo._bookmarks:
115 115 mark = ''
116 116 if not valid(mark):
117 117 raise util.Abort(_("bookmark '%s' contains illegal "
118 118 "character" % mark))
119 119
120 120 wlock = repo.wlock()
121 121 try:
122 122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 123 file.write(encoding.fromlocal(mark))
124 file.rename()
124 file.close()
125 125 finally:
126 126 wlock.release()
127 127 repo._bookmarkcurrent = mark
128 128
129 129 def updatecurrentbookmark(repo, oldnode, curbranch):
130 130 try:
131 131 update(repo, oldnode, repo.branchtags()[curbranch])
132 132 except KeyError:
133 133 if curbranch == "default": # no default branch!
134 134 update(repo, oldnode, repo.lookup("tip"))
135 135 else:
136 136 raise util.Abort(_("branch %s not found") % curbranch)
137 137
138 138 def update(repo, parents, node):
139 139 marks = repo._bookmarks
140 140 update = False
141 141 mark = repo._bookmarkcurrent
142 142 if mark and marks[mark] in parents:
143 143 old = repo[marks[mark]]
144 144 new = repo[node]
145 145 if new in old.descendants():
146 146 marks[mark] = new.node()
147 147 update = True
148 148 if update:
149 149 write(repo)
150 150
151 151 def listbookmarks(repo):
152 152 # We may try to list bookmarks on a repo type that does not
153 153 # support it (e.g., statichttprepository).
154 154 marks = getattr(repo, '_bookmarks', {})
155 155
156 156 d = {}
157 157 for k, v in marks.iteritems():
158 158 d[k] = hex(v)
159 159 return d
160 160
161 161 def pushbookmark(repo, key, old, new):
162 162 w = repo.wlock()
163 163 try:
164 164 marks = repo._bookmarks
165 165 if hex(marks.get(key, '')) != old:
166 166 return False
167 167 if new == '':
168 168 del marks[key]
169 169 else:
170 170 if new not in repo:
171 171 return False
172 172 marks[key] = repo[new].node()
173 173 write(repo)
174 174 return True
175 175 finally:
176 176 w.release()
177 177
178 178 def updatefromremote(ui, repo, remote):
179 179 ui.debug("checking for updated bookmarks\n")
180 180 rb = remote.listkeys('bookmarks')
181 181 changed = False
182 182 for k in rb.keys():
183 183 if k in repo._bookmarks:
184 184 nr, nl = rb[k], repo._bookmarks[k]
185 185 if nr in repo:
186 186 cr = repo[nr]
187 187 cl = repo[nl]
188 188 if cl.rev() >= cr.rev():
189 189 continue
190 190 if cr in cl.descendants():
191 191 repo._bookmarks[k] = cr.node()
192 192 changed = True
193 193 ui.status(_("updating bookmark %s\n") % k)
194 194 else:
195 195 ui.warn(_("not updating divergent"
196 196 " bookmark %s\n") % k)
197 197 if changed:
198 198 write(repo)
199 199
200 200 def diff(ui, repo, remote):
201 201 ui.status(_("searching for changed bookmarks\n"))
202 202
203 203 lmarks = repo.listkeys('bookmarks')
204 204 rmarks = remote.listkeys('bookmarks')
205 205
206 206 diff = sorted(set(rmarks) - set(lmarks))
207 207 for k in diff:
208 208 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
209 209
210 210 if len(diff) <= 0:
211 211 ui.status(_("no changed bookmarks found\n"))
212 212 return 1
213 213 return 0
@@ -1,721 +1,721 b''
1 1 # dirstate.py - working directory tracking for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import nullid
9 9 from i18n import _
10 10 import scmutil, util, ignore, osutil, parsers, encoding
11 11 import struct, os, stat, errno
12 12 import cStringIO
13 13
14 14 _format = ">cllll"
15 15 propertycache = util.propertycache
16 16
17 17 def _finddirs(path):
18 18 pos = path.rfind('/')
19 19 while pos != -1:
20 20 yield path[:pos]
21 21 pos = path.rfind('/', 0, pos)
22 22
23 23 def _incdirs(dirs, path):
24 24 for base in _finddirs(path):
25 25 if base in dirs:
26 26 dirs[base] += 1
27 27 return
28 28 dirs[base] = 1
29 29
30 30 def _decdirs(dirs, path):
31 31 for base in _finddirs(path):
32 32 if dirs[base] > 1:
33 33 dirs[base] -= 1
34 34 return
35 35 del dirs[base]
36 36
37 37 class dirstate(object):
38 38
39 39 def __init__(self, opener, ui, root, validate):
40 40 '''Create a new dirstate object.
41 41
42 42 opener is an open()-like callable that can be used to open the
43 43 dirstate file; root is the root of the directory tracked by
44 44 the dirstate.
45 45 '''
46 46 self._opener = opener
47 47 self._validate = validate
48 48 self._root = root
49 49 self._rootdir = os.path.join(root, '')
50 50 self._dirty = False
51 51 self._dirtypl = False
52 52 self._lastnormaltime = None
53 53 self._ui = ui
54 54
55 55 @propertycache
56 56 def _map(self):
57 57 '''Return the dirstate contents as a map from filename to
58 58 (state, mode, size, time).'''
59 59 self._read()
60 60 return self._map
61 61
62 62 @propertycache
63 63 def _copymap(self):
64 64 self._read()
65 65 return self._copymap
66 66
67 67 @propertycache
68 68 def _foldmap(self):
69 69 f = {}
70 70 for name in self._map:
71 71 f[os.path.normcase(name)] = name
72 72 return f
73 73
74 74 @propertycache
75 75 def _branch(self):
76 76 try:
77 77 return self._opener.read("branch").strip() or "default"
78 78 except IOError:
79 79 return "default"
80 80
81 81 @propertycache
82 82 def _pl(self):
83 83 try:
84 84 fp = self._opener("dirstate")
85 85 st = fp.read(40)
86 86 fp.close()
87 87 l = len(st)
88 88 if l == 40:
89 89 return st[:20], st[20:40]
90 90 elif l > 0 and l < 40:
91 91 raise util.Abort(_('working directory state appears damaged!'))
92 92 except IOError, err:
93 93 if err.errno != errno.ENOENT:
94 94 raise
95 95 return [nullid, nullid]
96 96
97 97 @propertycache
98 98 def _dirs(self):
99 99 dirs = {}
100 100 for f, s in self._map.iteritems():
101 101 if s[0] != 'r':
102 102 _incdirs(dirs, f)
103 103 return dirs
104 104
105 105 @propertycache
106 106 def _ignore(self):
107 107 files = [self._join('.hgignore')]
108 108 for name, path in self._ui.configitems("ui"):
109 109 if name == 'ignore' or name.startswith('ignore.'):
110 110 files.append(util.expandpath(path))
111 111 return ignore.ignore(self._root, files, self._ui.warn)
112 112
113 113 @propertycache
114 114 def _slash(self):
115 115 return self._ui.configbool('ui', 'slash') and os.sep != '/'
116 116
117 117 @propertycache
118 118 def _checklink(self):
119 119 return util.checklink(self._root)
120 120
121 121 @propertycache
122 122 def _checkexec(self):
123 123 return util.checkexec(self._root)
124 124
125 125 @propertycache
126 126 def _checkcase(self):
127 127 return not util.checkcase(self._join('.hg'))
128 128
129 129 def _join(self, f):
130 130 # much faster than os.path.join()
131 131 # it's safe because f is always a relative path
132 132 return self._rootdir + f
133 133
134 134 def flagfunc(self, fallback):
135 135 if self._checklink:
136 136 if self._checkexec:
137 137 def f(x):
138 138 p = self._join(x)
139 139 if os.path.islink(p):
140 140 return 'l'
141 141 if util.isexec(p):
142 142 return 'x'
143 143 return ''
144 144 return f
145 145 def f(x):
146 146 if os.path.islink(self._join(x)):
147 147 return 'l'
148 148 if 'x' in fallback(x):
149 149 return 'x'
150 150 return ''
151 151 return f
152 152 if self._checkexec:
153 153 def f(x):
154 154 if 'l' in fallback(x):
155 155 return 'l'
156 156 if util.isexec(self._join(x)):
157 157 return 'x'
158 158 return ''
159 159 return f
160 160 return fallback
161 161
162 162 def getcwd(self):
163 163 cwd = os.getcwd()
164 164 if cwd == self._root:
165 165 return ''
166 166 # self._root ends with a path separator if self._root is '/' or 'C:\'
167 167 rootsep = self._root
168 168 if not util.endswithsep(rootsep):
169 169 rootsep += os.sep
170 170 if cwd.startswith(rootsep):
171 171 return cwd[len(rootsep):]
172 172 else:
173 173 # we're outside the repo. return an absolute path.
174 174 return cwd
175 175
176 176 def pathto(self, f, cwd=None):
177 177 if cwd is None:
178 178 cwd = self.getcwd()
179 179 path = util.pathto(self._root, cwd, f)
180 180 if self._slash:
181 181 return util.normpath(path)
182 182 return path
183 183
184 184 def __getitem__(self, key):
185 185 '''Return the current state of key (a filename) in the dirstate.
186 186
187 187 States are:
188 188 n normal
189 189 m needs merging
190 190 r marked for removal
191 191 a marked for addition
192 192 ? not tracked
193 193 '''
194 194 return self._map.get(key, ("?",))[0]
195 195
196 196 def __contains__(self, key):
197 197 return key in self._map
198 198
199 199 def __iter__(self):
200 200 for x in sorted(self._map):
201 201 yield x
202 202
203 203 def parents(self):
204 204 return [self._validate(p) for p in self._pl]
205 205
206 206 def p1(self):
207 207 return self._validate(self._pl[0])
208 208
209 209 def p2(self):
210 210 return self._validate(self._pl[1])
211 211
212 212 def branch(self):
213 213 return encoding.tolocal(self._branch)
214 214
215 215 def setparents(self, p1, p2=nullid):
216 216 self._dirty = self._dirtypl = True
217 217 self._pl = p1, p2
218 218
219 219 def setbranch(self, branch):
220 220 if branch in ['tip', '.', 'null']:
221 221 raise util.Abort(_('the name \'%s\' is reserved') % branch)
222 222 self._branch = encoding.fromlocal(branch)
223 223 self._opener.write("branch", self._branch + '\n')
224 224
225 225 def _read(self):
226 226 self._map = {}
227 227 self._copymap = {}
228 228 try:
229 229 st = self._opener.read("dirstate")
230 230 except IOError, err:
231 231 if err.errno != errno.ENOENT:
232 232 raise
233 233 return
234 234 if not st:
235 235 return
236 236
237 237 p = parsers.parse_dirstate(self._map, self._copymap, st)
238 238 if not self._dirtypl:
239 239 self._pl = p
240 240
241 241 def invalidate(self):
242 242 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
243 243 "_ignore"):
244 244 if a in self.__dict__:
245 245 delattr(self, a)
246 246 self._lastnormaltime = None
247 247 self._dirty = False
248 248
249 249 def copy(self, source, dest):
250 250 """Mark dest as a copy of source. Unmark dest if source is None."""
251 251 if source == dest:
252 252 return
253 253 self._dirty = True
254 254 if source is not None:
255 255 self._copymap[dest] = source
256 256 elif dest in self._copymap:
257 257 del self._copymap[dest]
258 258
259 259 def copied(self, file):
260 260 return self._copymap.get(file, None)
261 261
262 262 def copies(self):
263 263 return self._copymap
264 264
265 265 def _droppath(self, f):
266 266 if self[f] not in "?r" and "_dirs" in self.__dict__:
267 267 _decdirs(self._dirs, f)
268 268
269 269 def _addpath(self, f, check=False):
270 270 oldstate = self[f]
271 271 if check or oldstate == "r":
272 272 scmutil.checkfilename(f)
273 273 if f in self._dirs:
274 274 raise util.Abort(_('directory %r already in dirstate') % f)
275 275 # shadows
276 276 for d in _finddirs(f):
277 277 if d in self._dirs:
278 278 break
279 279 if d in self._map and self[d] != 'r':
280 280 raise util.Abort(
281 281 _('file %r in dirstate clashes with %r') % (d, f))
282 282 if oldstate in "?r" and "_dirs" in self.__dict__:
283 283 _incdirs(self._dirs, f)
284 284
285 285 def normal(self, f):
286 286 '''Mark a file normal and clean.'''
287 287 self._dirty = True
288 288 self._addpath(f)
289 289 s = os.lstat(self._join(f))
290 290 mtime = int(s.st_mtime)
291 291 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
292 292 if f in self._copymap:
293 293 del self._copymap[f]
294 294 if mtime > self._lastnormaltime:
295 295 # Remember the most recent modification timeslot for status(),
296 296 # to make sure we won't miss future size-preserving file content
297 297 # modifications that happen within the same timeslot.
298 298 self._lastnormaltime = mtime
299 299
300 300 def normallookup(self, f):
301 301 '''Mark a file normal, but possibly dirty.'''
302 302 if self._pl[1] != nullid and f in self._map:
303 303 # if there is a merge going on and the file was either
304 304 # in state 'm' (-1) or coming from other parent (-2) before
305 305 # being removed, restore that state.
306 306 entry = self._map[f]
307 307 if entry[0] == 'r' and entry[2] in (-1, -2):
308 308 source = self._copymap.get(f)
309 309 if entry[2] == -1:
310 310 self.merge(f)
311 311 elif entry[2] == -2:
312 312 self.otherparent(f)
313 313 if source:
314 314 self.copy(source, f)
315 315 return
316 316 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
317 317 return
318 318 self._dirty = True
319 319 self._addpath(f)
320 320 self._map[f] = ('n', 0, -1, -1)
321 321 if f in self._copymap:
322 322 del self._copymap[f]
323 323
324 324 def otherparent(self, f):
325 325 '''Mark as coming from the other parent, always dirty.'''
326 326 if self._pl[1] == nullid:
327 327 raise util.Abort(_("setting %r to other parent "
328 328 "only allowed in merges") % f)
329 329 self._dirty = True
330 330 self._addpath(f)
331 331 self._map[f] = ('n', 0, -2, -1)
332 332 if f in self._copymap:
333 333 del self._copymap[f]
334 334
335 335 def add(self, f):
336 336 '''Mark a file added.'''
337 337 self._dirty = True
338 338 self._addpath(f, True)
339 339 self._map[f] = ('a', 0, -1, -1)
340 340 if f in self._copymap:
341 341 del self._copymap[f]
342 342
343 343 def remove(self, f):
344 344 '''Mark a file removed.'''
345 345 self._dirty = True
346 346 self._droppath(f)
347 347 size = 0
348 348 if self._pl[1] != nullid and f in self._map:
349 349 # backup the previous state
350 350 entry = self._map[f]
351 351 if entry[0] == 'm': # merge
352 352 size = -1
353 353 elif entry[0] == 'n' and entry[2] == -2: # other parent
354 354 size = -2
355 355 self._map[f] = ('r', 0, size, 0)
356 356 if size == 0 and f in self._copymap:
357 357 del self._copymap[f]
358 358
359 359 def merge(self, f):
360 360 '''Mark a file merged.'''
361 361 self._dirty = True
362 362 s = os.lstat(self._join(f))
363 363 self._addpath(f)
364 364 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
365 365 if f in self._copymap:
366 366 del self._copymap[f]
367 367
368 368 def drop(self, f):
369 369 '''Drop a file from the dirstate'''
370 370 self._dirty = True
371 371 self._droppath(f)
372 372 del self._map[f]
373 373
374 374 def _normalize(self, path, isknown):
375 375 normed = os.path.normcase(path)
376 376 folded = self._foldmap.get(normed, None)
377 377 if folded is None:
378 378 if isknown or not os.path.lexists(os.path.join(self._root, path)):
379 379 folded = path
380 380 else:
381 381 folded = self._foldmap.setdefault(normed,
382 382 util.fspath(path, self._root))
383 383 return folded
384 384
385 385 def normalize(self, path, isknown=False):
386 386 '''
387 387 normalize the case of a pathname when on a casefolding filesystem
388 388
389 389 isknown specifies whether the filename came from walking the
390 390 disk, to avoid extra filesystem access
391 391
392 392 The normalized case is determined based on the following precedence:
393 393
394 394 - version of name already stored in the dirstate
395 395 - version of name stored on disk
396 396 - version provided via command arguments
397 397 '''
398 398
399 399 if self._checkcase:
400 400 return self._normalize(path, isknown)
401 401 return path
402 402
403 403 def clear(self):
404 404 self._map = {}
405 405 if "_dirs" in self.__dict__:
406 406 delattr(self, "_dirs")
407 407 self._copymap = {}
408 408 self._pl = [nullid, nullid]
409 409 self._lastnormaltime = None
410 410 self._dirty = True
411 411
412 412 def rebuild(self, parent, files):
413 413 self.clear()
414 414 for f in files:
415 415 if 'x' in files.flags(f):
416 416 self._map[f] = ('n', 0777, -1, 0)
417 417 else:
418 418 self._map[f] = ('n', 0666, -1, 0)
419 419 self._pl = (parent, nullid)
420 420 self._dirty = True
421 421
422 422 def write(self):
423 423 if not self._dirty:
424 424 return
425 425 st = self._opener("dirstate", "w", atomictemp=True)
426 426
427 427 # use the modification time of the newly created temporary file as the
428 428 # filesystem's notion of 'now'
429 429 now = int(util.fstat(st).st_mtime)
430 430
431 431 cs = cStringIO.StringIO()
432 432 copymap = self._copymap
433 433 pack = struct.pack
434 434 write = cs.write
435 435 write("".join(self._pl))
436 436 for f, e in self._map.iteritems():
437 437 if e[0] == 'n' and e[3] == now:
438 438 # The file was last modified "simultaneously" with the current
439 439 # write to dirstate (i.e. within the same second for file-
440 440 # systems with a granularity of 1 sec). This commonly happens
441 441 # for at least a couple of files on 'update'.
442 442 # The user could change the file without changing its size
443 443 # within the same second. Invalidate the file's stat data in
444 444 # dirstate, forcing future 'status' calls to compare the
445 445 # contents of the file. This prevents mistakenly treating such
446 446 # files as clean.
447 447 e = (e[0], 0, -1, -1) # mark entry as 'unset'
448 448 self._map[f] = e
449 449
450 450 if f in copymap:
451 451 f = "%s\0%s" % (f, copymap[f])
452 452 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
453 453 write(e)
454 454 write(f)
455 455 st.write(cs.getvalue())
456 st.rename()
456 st.close()
457 457 self._lastnormaltime = None
458 458 self._dirty = self._dirtypl = False
459 459
460 460 def _dirignore(self, f):
461 461 if f == '.':
462 462 return False
463 463 if self._ignore(f):
464 464 return True
465 465 for p in _finddirs(f):
466 466 if self._ignore(p):
467 467 return True
468 468 return False
469 469
470 470 def walk(self, match, subrepos, unknown, ignored):
471 471 '''
472 472 Walk recursively through the directory tree, finding all files
473 473 matched by match.
474 474
475 475 Return a dict mapping filename to stat-like object (either
476 476 mercurial.osutil.stat instance or return value of os.stat()).
477 477 '''
478 478
479 479 def fwarn(f, msg):
480 480 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
481 481 return False
482 482
483 483 def badtype(mode):
484 484 kind = _('unknown')
485 485 if stat.S_ISCHR(mode):
486 486 kind = _('character device')
487 487 elif stat.S_ISBLK(mode):
488 488 kind = _('block device')
489 489 elif stat.S_ISFIFO(mode):
490 490 kind = _('fifo')
491 491 elif stat.S_ISSOCK(mode):
492 492 kind = _('socket')
493 493 elif stat.S_ISDIR(mode):
494 494 kind = _('directory')
495 495 return _('unsupported file type (type is %s)') % kind
496 496
497 497 ignore = self._ignore
498 498 dirignore = self._dirignore
499 499 if ignored:
500 500 ignore = util.never
501 501 dirignore = util.never
502 502 elif not unknown:
503 503 # if unknown and ignored are False, skip step 2
504 504 ignore = util.always
505 505 dirignore = util.always
506 506
507 507 matchfn = match.matchfn
508 508 badfn = match.bad
509 509 dmap = self._map
510 510 normpath = util.normpath
511 511 listdir = osutil.listdir
512 512 lstat = os.lstat
513 513 getkind = stat.S_IFMT
514 514 dirkind = stat.S_IFDIR
515 515 regkind = stat.S_IFREG
516 516 lnkkind = stat.S_IFLNK
517 517 join = self._join
518 518 work = []
519 519 wadd = work.append
520 520
521 521 exact = skipstep3 = False
522 522 if matchfn == match.exact: # match.exact
523 523 exact = True
524 524 dirignore = util.always # skip step 2
525 525 elif match.files() and not match.anypats(): # match.match, no patterns
526 526 skipstep3 = True
527 527
528 528 if self._checkcase:
529 529 normalize = self._normalize
530 530 skipstep3 = False
531 531 else:
532 532 normalize = lambda x, y: x
533 533
534 534 files = sorted(match.files())
535 535 subrepos.sort()
536 536 i, j = 0, 0
537 537 while i < len(files) and j < len(subrepos):
538 538 subpath = subrepos[j] + "/"
539 539 if files[i] < subpath:
540 540 i += 1
541 541 continue
542 542 while i < len(files) and files[i].startswith(subpath):
543 543 del files[i]
544 544 j += 1
545 545
546 546 if not files or '.' in files:
547 547 files = ['']
548 548 results = dict.fromkeys(subrepos)
549 549 results['.hg'] = None
550 550
551 551 # step 1: find all explicit files
552 552 for ff in files:
553 553 nf = normalize(normpath(ff), False)
554 554 if nf in results:
555 555 continue
556 556
557 557 try:
558 558 st = lstat(join(nf))
559 559 kind = getkind(st.st_mode)
560 560 if kind == dirkind:
561 561 skipstep3 = False
562 562 if nf in dmap:
563 563 #file deleted on disk but still in dirstate
564 564 results[nf] = None
565 565 match.dir(nf)
566 566 if not dirignore(nf):
567 567 wadd(nf)
568 568 elif kind == regkind or kind == lnkkind:
569 569 results[nf] = st
570 570 else:
571 571 badfn(ff, badtype(kind))
572 572 if nf in dmap:
573 573 results[nf] = None
574 574 except OSError, inst:
575 575 if nf in dmap: # does it exactly match a file?
576 576 results[nf] = None
577 577 else: # does it match a directory?
578 578 prefix = nf + "/"
579 579 for fn in dmap:
580 580 if fn.startswith(prefix):
581 581 match.dir(nf)
582 582 skipstep3 = False
583 583 break
584 584 else:
585 585 badfn(ff, inst.strerror)
586 586
587 587 # step 2: visit subdirectories
588 588 while work:
589 589 nd = work.pop()
590 590 skip = None
591 591 if nd == '.':
592 592 nd = ''
593 593 else:
594 594 skip = '.hg'
595 595 try:
596 596 entries = listdir(join(nd), stat=True, skip=skip)
597 597 except OSError, inst:
598 598 if inst.errno == errno.EACCES:
599 599 fwarn(nd, inst.strerror)
600 600 continue
601 601 raise
602 602 for f, kind, st in entries:
603 603 nf = normalize(nd and (nd + "/" + f) or f, True)
604 604 if nf not in results:
605 605 if kind == dirkind:
606 606 if not ignore(nf):
607 607 match.dir(nf)
608 608 wadd(nf)
609 609 if nf in dmap and matchfn(nf):
610 610 results[nf] = None
611 611 elif kind == regkind or kind == lnkkind:
612 612 if nf in dmap:
613 613 if matchfn(nf):
614 614 results[nf] = st
615 615 elif matchfn(nf) and not ignore(nf):
616 616 results[nf] = st
617 617 elif nf in dmap and matchfn(nf):
618 618 results[nf] = None
619 619
620 620 # step 3: report unseen items in the dmap hash
621 621 if not skipstep3 and not exact:
622 622 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
623 623 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
624 624 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
625 625 st = None
626 626 results[nf] = st
627 627 for s in subrepos:
628 628 del results[s]
629 629 del results['.hg']
630 630 return results
631 631
632 632 def status(self, match, subrepos, ignored, clean, unknown):
633 633 '''Determine the status of the working copy relative to the
634 634 dirstate and return a tuple of lists (unsure, modified, added,
635 635 removed, deleted, unknown, ignored, clean), where:
636 636
637 637 unsure:
638 638 files that might have been modified since the dirstate was
639 639 written, but need to be read to be sure (size is the same
640 640 but mtime differs)
641 641 modified:
642 642 files that have definitely been modified since the dirstate
643 643 was written (different size or mode)
644 644 added:
645 645 files that have been explicitly added with hg add
646 646 removed:
647 647 files that have been explicitly removed with hg remove
648 648 deleted:
649 649 files that have been deleted through other means ("missing")
650 650 unknown:
651 651 files not in the dirstate that are not ignored
652 652 ignored:
653 653 files not in the dirstate that are ignored
654 654 (by _dirignore())
655 655 clean:
656 656 files that have definitely not been modified since the
657 657 dirstate was written
658 658 '''
659 659 listignored, listclean, listunknown = ignored, clean, unknown
660 660 lookup, modified, added, unknown, ignored = [], [], [], [], []
661 661 removed, deleted, clean = [], [], []
662 662
663 663 dmap = self._map
664 664 ladd = lookup.append # aka "unsure"
665 665 madd = modified.append
666 666 aadd = added.append
667 667 uadd = unknown.append
668 668 iadd = ignored.append
669 669 radd = removed.append
670 670 dadd = deleted.append
671 671 cadd = clean.append
672 672
673 673 lnkkind = stat.S_IFLNK
674 674
675 675 for fn, st in self.walk(match, subrepos, listunknown,
676 676 listignored).iteritems():
677 677 if fn not in dmap:
678 678 if (listignored or match.exact(fn)) and self._dirignore(fn):
679 679 if listignored:
680 680 iadd(fn)
681 681 elif listunknown:
682 682 uadd(fn)
683 683 continue
684 684
685 685 state, mode, size, time = dmap[fn]
686 686
687 687 if not st and state in "nma":
688 688 dadd(fn)
689 689 elif state == 'n':
690 690 # The "mode & lnkkind != lnkkind or self._checklink"
691 691 # lines are an expansion of "islink => checklink"
692 692 # where islink means "is this a link?" and checklink
693 693 # means "can we check links?".
694 694 mtime = int(st.st_mtime)
695 695 if (size >= 0 and
696 696 (size != st.st_size
697 697 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
698 698 and (mode & lnkkind != lnkkind or self._checklink)
699 699 or size == -2 # other parent
700 700 or fn in self._copymap):
701 701 madd(fn)
702 702 elif (mtime != time
703 703 and (mode & lnkkind != lnkkind or self._checklink)):
704 704 ladd(fn)
705 705 elif mtime == self._lastnormaltime:
706 706 # fn may have been changed in the same timeslot without
707 707 # changing its size. This can happen if we quickly do
708 708 # multiple commits in a single transaction.
709 709 # Force lookup, so we don't miss such a racy file change.
710 710 ladd(fn)
711 711 elif listclean:
712 712 cadd(fn)
713 713 elif state == 'm':
714 714 madd(fn)
715 715 elif state == 'a':
716 716 aadd(fn)
717 717 elif state == 'r':
718 718 radd(fn)
719 719
720 720 return (lookup, modified, added, removed, deleted, unknown, ignored,
721 721 clean)
@@ -1,156 +1,156 b''
1 1 # changelog bisection for mercurial
2 2 #
3 3 # Copyright 2007 Matt Mackall
4 4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
5 5 #
6 6 # Inspired by git bisect, extension skeleton taken from mq.py.
7 7 #
8 8 # This software may be used and distributed according to the terms of the
9 9 # GNU General Public License version 2 or any later version.
10 10
11 11 import os
12 12 from i18n import _
13 13 from node import short, hex
14 14 import util
15 15
16 16 def bisect(changelog, state):
17 17 """find the next node (if any) for testing during a bisect search.
18 18 returns a (nodes, number, good) tuple.
19 19
20 20 'nodes' is the final result of the bisect if 'number' is 0.
21 21 Otherwise 'number' indicates the remaining possible candidates for
22 22 the search and 'nodes' contains the next bisect target.
23 23 'good' is True if bisect is searching for a first good changeset, False
24 24 if searching for a first bad one.
25 25 """
26 26
27 27 clparents = changelog.parentrevs
28 28 skip = set([changelog.rev(n) for n in state['skip']])
29 29
30 30 def buildancestors(bad, good):
31 31 # only the earliest bad revision matters
32 32 badrev = min([changelog.rev(n) for n in bad])
33 33 goodrevs = [changelog.rev(n) for n in good]
34 34 goodrev = min(goodrevs)
35 35 # build visit array
36 36 ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
37 37
38 38 # set nodes descended from goodrevs
39 39 for rev in goodrevs:
40 40 ancestors[rev] = []
41 41 for rev in xrange(goodrev + 1, len(changelog)):
42 42 for prev in clparents(rev):
43 43 if ancestors[prev] == []:
44 44 ancestors[rev] = []
45 45
46 46 # clear good revs from array
47 47 for rev in goodrevs:
48 48 ancestors[rev] = None
49 49 for rev in xrange(len(changelog), goodrev, -1):
50 50 if ancestors[rev] is None:
51 51 for prev in clparents(rev):
52 52 ancestors[prev] = None
53 53
54 54 if ancestors[badrev] is None:
55 55 return badrev, None
56 56 return badrev, ancestors
57 57
58 58 good = False
59 59 badrev, ancestors = buildancestors(state['bad'], state['good'])
60 60 if not ancestors: # looking for bad to good transition?
61 61 good = True
62 62 badrev, ancestors = buildancestors(state['good'], state['bad'])
63 63 bad = changelog.node(badrev)
64 64 if not ancestors: # now we're confused
65 65 if len(state['bad']) == 1 and len(state['good']) == 1:
66 66 raise util.Abort(_("starting revisions are not directly related"))
67 67 raise util.Abort(_("inconsistent state, %s:%s is good and bad")
68 68 % (badrev, short(bad)))
69 69
70 70 # build children dict
71 71 children = {}
72 72 visit = [badrev]
73 73 candidates = []
74 74 while visit:
75 75 rev = visit.pop(0)
76 76 if ancestors[rev] == []:
77 77 candidates.append(rev)
78 78 for prev in clparents(rev):
79 79 if prev != -1:
80 80 if prev in children:
81 81 children[prev].append(rev)
82 82 else:
83 83 children[prev] = [rev]
84 84 visit.append(prev)
85 85
86 86 candidates.sort()
87 87 # have we narrowed it down to one entry?
88 88 # or have all other possible candidates besides 'bad' have been skipped?
89 89 tot = len(candidates)
90 90 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
91 91 if tot == 1 or not unskipped:
92 92 return ([changelog.node(rev) for rev in candidates], 0, good)
93 93 perfect = tot // 2
94 94
95 95 # find the best node to test
96 96 best_rev = None
97 97 best_len = -1
98 98 poison = set()
99 99 for rev in candidates:
100 100 if rev in poison:
101 101 # poison children
102 102 poison.update(children.get(rev, []))
103 103 continue
104 104
105 105 a = ancestors[rev] or [rev]
106 106 ancestors[rev] = None
107 107
108 108 x = len(a) # number of ancestors
109 109 y = tot - x # number of non-ancestors
110 110 value = min(x, y) # how good is this test?
111 111 if value > best_len and rev not in skip:
112 112 best_len = value
113 113 best_rev = rev
114 114 if value == perfect: # found a perfect candidate? quit early
115 115 break
116 116
117 117 if y < perfect and rev not in skip: # all downhill from here?
118 118 # poison children
119 119 poison.update(children.get(rev, []))
120 120 continue
121 121
122 122 for c in children.get(rev, []):
123 123 if ancestors[c]:
124 124 ancestors[c] = list(set(ancestors[c] + a))
125 125 else:
126 126 ancestors[c] = a + [c]
127 127
128 128 assert best_rev is not None
129 129 best_node = changelog.node(best_rev)
130 130
131 131 return ([best_node], tot, good)
132 132
133 133
134 134 def load_state(repo):
135 135 state = {'good': [], 'bad': [], 'skip': []}
136 136 if os.path.exists(repo.join("bisect.state")):
137 137 for l in repo.opener("bisect.state"):
138 138 kind, node = l[:-1].split()
139 139 node = repo.lookup(node)
140 140 if kind not in state:
141 141 raise util.Abort(_("unknown bisect kind %s") % kind)
142 142 state[kind].append(node)
143 143 return state
144 144
145 145
146 146 def save_state(repo, state):
147 147 f = repo.opener("bisect.state", "w", atomictemp=True)
148 148 wlock = repo.wlock()
149 149 try:
150 150 for kind in state:
151 151 for node in state[kind]:
152 152 f.write("%s %s\n" % (kind, hex(node)))
153 f.rename()
153 f.close()
154 154 finally:
155 155 wlock.release()
156 156
@@ -1,2058 +1,2058 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error, revset
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20 filecache = scmutil.filecache
21 21
22 22 class localrepository(repo.repository):
23 23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 24 'known', 'getbundle'))
25 25 supportedformats = set(('revlogv1', 'generaldelta'))
26 26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 27 'dotencode'))
28 28
29 29 def __init__(self, baseui, path=None, create=False):
30 30 repo.repository.__init__(self)
31 31 self.root = os.path.realpath(util.expandpath(path))
32 32 self.path = os.path.join(self.root, ".hg")
33 33 self.origroot = path
34 34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 35 self.opener = scmutil.opener(self.path)
36 36 self.wopener = scmutil.opener(self.root)
37 37 self.baseui = baseui
38 38 self.ui = baseui.copy()
39 39
40 40 try:
41 41 self.ui.readconfig(self.join("hgrc"), self.root)
42 42 extensions.loadall(self.ui)
43 43 except IOError:
44 44 pass
45 45
46 46 if not os.path.isdir(self.path):
47 47 if create:
48 48 if not os.path.exists(path):
49 49 util.makedirs(path)
50 50 util.makedir(self.path, notindexed=True)
51 51 requirements = ["revlogv1"]
52 52 if self.ui.configbool('format', 'usestore', True):
53 53 os.mkdir(os.path.join(self.path, "store"))
54 54 requirements.append("store")
55 55 if self.ui.configbool('format', 'usefncache', True):
56 56 requirements.append("fncache")
57 57 if self.ui.configbool('format', 'dotencode', True):
58 58 requirements.append('dotencode')
59 59 # create an invalid changelog
60 60 self.opener.append(
61 61 "00changelog.i",
62 62 '\0\0\0\2' # represents revlogv2
63 63 ' dummy changelog to prevent using the old repo layout'
64 64 )
65 65 if self.ui.configbool('format', 'generaldelta', False):
66 66 requirements.append("generaldelta")
67 67 requirements = set(requirements)
68 68 else:
69 69 raise error.RepoError(_("repository %s not found") % path)
70 70 elif create:
71 71 raise error.RepoError(_("repository %s already exists") % path)
72 72 else:
73 73 try:
74 74 requirements = scmutil.readrequires(self.opener, self.supported)
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 requirements = set()
79 79
80 80 self.sharedpath = self.path
81 81 try:
82 82 s = os.path.realpath(self.opener.read("sharedpath"))
83 83 if not os.path.exists(s):
84 84 raise error.RepoError(
85 85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 86 self.sharedpath = s
87 87 except IOError, inst:
88 88 if inst.errno != errno.ENOENT:
89 89 raise
90 90
91 91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 92 self.spath = self.store.path
93 93 self.sopener = self.store.opener
94 94 self.sjoin = self.store.join
95 95 self.opener.createmode = self.store.createmode
96 96 self._applyrequirements(requirements)
97 97 if create:
98 98 self._writerequirements()
99 99
100 100
101 101 self._branchcache = None
102 102 self._branchcachetip = None
103 103 self.filterpats = {}
104 104 self._datafilters = {}
105 105 self._transref = self._lockref = self._wlockref = None
106 106
107 107 # A cache for various files under .hg/ that tracks file changes,
108 108 # (used by the filecache decorator)
109 109 #
110 110 # Maps a property name to its util.filecacheentry
111 111 self._filecache = {}
112 112
113 113 def _applyrequirements(self, requirements):
114 114 self.requirements = requirements
115 115 openerreqs = set(('revlogv1', 'generaldelta'))
116 116 self.sopener.options = dict((r, 1) for r in requirements
117 117 if r in openerreqs)
118 118
119 119 def _writerequirements(self):
120 120 reqfile = self.opener("requires", "w")
121 121 for r in self.requirements:
122 122 reqfile.write("%s\n" % r)
123 123 reqfile.close()
124 124
125 125 def _checknested(self, path):
126 126 """Determine if path is a legal nested repository."""
127 127 if not path.startswith(self.root):
128 128 return False
129 129 subpath = path[len(self.root) + 1:]
130 130
131 131 # XXX: Checking against the current working copy is wrong in
132 132 # the sense that it can reject things like
133 133 #
134 134 # $ hg cat -r 10 sub/x.txt
135 135 #
136 136 # if sub/ is no longer a subrepository in the working copy
137 137 # parent revision.
138 138 #
139 139 # However, it can of course also allow things that would have
140 140 # been rejected before, such as the above cat command if sub/
141 141 # is a subrepository now, but was a normal directory before.
142 142 # The old path auditor would have rejected by mistake since it
143 143 # panics when it sees sub/.hg/.
144 144 #
145 145 # All in all, checking against the working copy seems sensible
146 146 # since we want to prevent access to nested repositories on
147 147 # the filesystem *now*.
148 148 ctx = self[None]
149 149 parts = util.splitpath(subpath)
150 150 while parts:
151 151 prefix = os.sep.join(parts)
152 152 if prefix in ctx.substate:
153 153 if prefix == subpath:
154 154 return True
155 155 else:
156 156 sub = ctx.sub(prefix)
157 157 return sub.checknested(subpath[len(prefix) + 1:])
158 158 else:
159 159 parts.pop()
160 160 return False
161 161
162 162 @filecache('bookmarks')
163 163 def _bookmarks(self):
164 164 return bookmarks.read(self)
165 165
166 166 @filecache('bookmarks.current')
167 167 def _bookmarkcurrent(self):
168 168 return bookmarks.readcurrent(self)
169 169
170 170 @filecache('00changelog.i', True)
171 171 def changelog(self):
172 172 c = changelog.changelog(self.sopener)
173 173 if 'HG_PENDING' in os.environ:
174 174 p = os.environ['HG_PENDING']
175 175 if p.startswith(self.root):
176 176 c.readpending('00changelog.i.a')
177 177 return c
178 178
179 179 @filecache('00manifest.i', True)
180 180 def manifest(self):
181 181 return manifest.manifest(self.sopener)
182 182
183 183 @filecache('dirstate')
184 184 def dirstate(self):
185 185 warned = [0]
186 186 def validate(node):
187 187 try:
188 188 self.changelog.rev(node)
189 189 return node
190 190 except error.LookupError:
191 191 if not warned[0]:
192 192 warned[0] = True
193 193 self.ui.warn(_("warning: ignoring unknown"
194 194 " working parent %s!\n") % short(node))
195 195 return nullid
196 196
197 197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198 198
199 199 def __getitem__(self, changeid):
200 200 if changeid is None:
201 201 return context.workingctx(self)
202 202 return context.changectx(self, changeid)
203 203
204 204 def __contains__(self, changeid):
205 205 try:
206 206 return bool(self.lookup(changeid))
207 207 except error.RepoLookupError:
208 208 return False
209 209
210 210 def __nonzero__(self):
211 211 return True
212 212
213 213 def __len__(self):
214 214 return len(self.changelog)
215 215
216 216 def __iter__(self):
217 217 for i in xrange(len(self)):
218 218 yield i
219 219
220 220 def set(self, expr, *args):
221 221 '''
222 222 Yield a context for each matching revision, after doing arg
223 223 replacement via revset.formatspec
224 224 '''
225 225
226 226 expr = revset.formatspec(expr, *args)
227 227 m = revset.match(None, expr)
228 228 for r in m(self, range(len(self))):
229 229 yield self[r]
230 230
231 231 def url(self):
232 232 return 'file:' + self.root
233 233
234 234 def hook(self, name, throw=False, **args):
235 235 return hook.hook(self.ui, self, name, throw, **args)
236 236
237 237 tag_disallowed = ':\r\n'
238 238
239 239 def _tag(self, names, node, message, local, user, date, extra={}):
240 240 if isinstance(names, str):
241 241 allchars = names
242 242 names = (names,)
243 243 else:
244 244 allchars = ''.join(names)
245 245 for c in self.tag_disallowed:
246 246 if c in allchars:
247 247 raise util.Abort(_('%r cannot be used in a tag name') % c)
248 248
249 249 branches = self.branchmap()
250 250 for name in names:
251 251 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 252 local=local)
253 253 if name in branches:
254 254 self.ui.warn(_("warning: tag %s conflicts with existing"
255 255 " branch name\n") % name)
256 256
257 257 def writetags(fp, names, munge, prevtags):
258 258 fp.seek(0, 2)
259 259 if prevtags and prevtags[-1] != '\n':
260 260 fp.write('\n')
261 261 for name in names:
262 262 m = munge and munge(name) or name
263 263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
264 264 old = self.tags().get(name, nullid)
265 265 fp.write('%s %s\n' % (hex(old), m))
266 266 fp.write('%s %s\n' % (hex(node), m))
267 267 fp.close()
268 268
269 269 prevtags = ''
270 270 if local:
271 271 try:
272 272 fp = self.opener('localtags', 'r+')
273 273 except IOError:
274 274 fp = self.opener('localtags', 'a')
275 275 else:
276 276 prevtags = fp.read()
277 277
278 278 # local tags are stored in the current charset
279 279 writetags(fp, names, None, prevtags)
280 280 for name in names:
281 281 self.hook('tag', node=hex(node), tag=name, local=local)
282 282 return
283 283
284 284 try:
285 285 fp = self.wfile('.hgtags', 'rb+')
286 286 except IOError, e:
287 287 if e.errno != errno.ENOENT:
288 288 raise
289 289 fp = self.wfile('.hgtags', 'ab')
290 290 else:
291 291 prevtags = fp.read()
292 292
293 293 # committed tags are stored in UTF-8
294 294 writetags(fp, names, encoding.fromlocal, prevtags)
295 295
296 296 fp.close()
297 297
298 298 if '.hgtags' not in self.dirstate:
299 299 self[None].add(['.hgtags'])
300 300
301 301 m = matchmod.exact(self.root, '', ['.hgtags'])
302 302 tagnode = self.commit(message, user, date, extra=extra, match=m)
303 303
304 304 for name in names:
305 305 self.hook('tag', node=hex(node), tag=name, local=local)
306 306
307 307 return tagnode
308 308
309 309 def tag(self, names, node, message, local, user, date):
310 310 '''tag a revision with one or more symbolic names.
311 311
312 312 names is a list of strings or, when adding a single tag, names may be a
313 313 string.
314 314
315 315 if local is True, the tags are stored in a per-repository file.
316 316 otherwise, they are stored in the .hgtags file, and a new
317 317 changeset is committed with the change.
318 318
319 319 keyword arguments:
320 320
321 321 local: whether to store tags in non-version-controlled file
322 322 (default False)
323 323
324 324 message: commit message to use if committing
325 325
326 326 user: name of user to use if committing
327 327
328 328 date: date tuple to use if committing'''
329 329
330 330 if not local:
331 331 for x in self.status()[:5]:
332 332 if '.hgtags' in x:
333 333 raise util.Abort(_('working copy of .hgtags is changed '
334 334 '(please commit .hgtags manually)'))
335 335
336 336 self.tags() # instantiate the cache
337 337 self._tag(names, node, message, local, user, date)
338 338
339 339 @propertycache
340 340 def _tagscache(self):
341 341 '''Returns a tagscache object that contains various tags related caches.'''
342 342
343 343 # This simplifies its cache management by having one decorated
344 344 # function (this one) and the rest simply fetch things from it.
345 345 class tagscache(object):
346 346 def __init__(self):
347 347 # These two define the set of tags for this repository. tags
348 348 # maps tag name to node; tagtypes maps tag name to 'global' or
349 349 # 'local'. (Global tags are defined by .hgtags across all
350 350 # heads, and local tags are defined in .hg/localtags.)
351 351 # They constitute the in-memory cache of tags.
352 352 self.tags = self.tagtypes = None
353 353
354 354 self.nodetagscache = self.tagslist = None
355 355
356 356 cache = tagscache()
357 357 cache.tags, cache.tagtypes = self._findtags()
358 358
359 359 return cache
360 360
361 361 def tags(self):
362 362 '''return a mapping of tag to node'''
363 363 return self._tagscache.tags
364 364
365 365 def _findtags(self):
366 366 '''Do the hard work of finding tags. Return a pair of dicts
367 367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
368 368 maps tag name to a string like \'global\' or \'local\'.
369 369 Subclasses or extensions are free to add their own tags, but
370 370 should be aware that the returned dicts will be retained for the
371 371 duration of the localrepo object.'''
372 372
373 373 # XXX what tagtype should subclasses/extensions use? Currently
374 374 # mq and bookmarks add tags, but do not set the tagtype at all.
375 375 # Should each extension invent its own tag type? Should there
376 376 # be one tagtype for all such "virtual" tags? Or is the status
377 377 # quo fine?
378 378
379 379 alltags = {} # map tag name to (node, hist)
380 380 tagtypes = {}
381 381
382 382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
383 383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
384 384
385 385 # Build the return dicts. Have to re-encode tag names because
386 386 # the tags module always uses UTF-8 (in order not to lose info
387 387 # writing to the cache), but the rest of Mercurial wants them in
388 388 # local encoding.
389 389 tags = {}
390 390 for (name, (node, hist)) in alltags.iteritems():
391 391 if node != nullid:
392 392 try:
393 393 # ignore tags to unknown nodes
394 394 self.changelog.lookup(node)
395 395 tags[encoding.tolocal(name)] = node
396 396 except error.LookupError:
397 397 pass
398 398 tags['tip'] = self.changelog.tip()
399 399 tagtypes = dict([(encoding.tolocal(name), value)
400 400 for (name, value) in tagtypes.iteritems()])
401 401 return (tags, tagtypes)
402 402
403 403 def tagtype(self, tagname):
404 404 '''
405 405 return the type of the given tag. result can be:
406 406
407 407 'local' : a local tag
408 408 'global' : a global tag
409 409 None : tag does not exist
410 410 '''
411 411
412 412 return self._tagscache.tagtypes.get(tagname)
413 413
414 414 def tagslist(self):
415 415 '''return a list of tags ordered by revision'''
416 416 if not self._tagscache.tagslist:
417 417 l = []
418 418 for t, n in self.tags().iteritems():
419 419 r = self.changelog.rev(n)
420 420 l.append((r, t, n))
421 421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
422 422
423 423 return self._tagscache.tagslist
424 424
425 425 def nodetags(self, node):
426 426 '''return the tags associated with a node'''
427 427 if not self._tagscache.nodetagscache:
428 428 nodetagscache = {}
429 429 for t, n in self.tags().iteritems():
430 430 nodetagscache.setdefault(n, []).append(t)
431 431 for tags in nodetagscache.itervalues():
432 432 tags.sort()
433 433 self._tagscache.nodetagscache = nodetagscache
434 434 return self._tagscache.nodetagscache.get(node, [])
435 435
436 436 def nodebookmarks(self, node):
437 437 marks = []
438 438 for bookmark, n in self._bookmarks.iteritems():
439 439 if n == node:
440 440 marks.append(bookmark)
441 441 return sorted(marks)
442 442
443 443 def _branchtags(self, partial, lrev):
444 444 # TODO: rename this function?
445 445 tiprev = len(self) - 1
446 446 if lrev != tiprev:
447 447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
448 448 self._updatebranchcache(partial, ctxgen)
449 449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
450 450
451 451 return partial
452 452
453 453 def updatebranchcache(self):
454 454 tip = self.changelog.tip()
455 455 if self._branchcache is not None and self._branchcachetip == tip:
456 456 return self._branchcache
457 457
458 458 oldtip = self._branchcachetip
459 459 self._branchcachetip = tip
460 460 if oldtip is None or oldtip not in self.changelog.nodemap:
461 461 partial, last, lrev = self._readbranchcache()
462 462 else:
463 463 lrev = self.changelog.rev(oldtip)
464 464 partial = self._branchcache
465 465
466 466 self._branchtags(partial, lrev)
467 467 # this private cache holds all heads (not just tips)
468 468 self._branchcache = partial
469 469
470 470 def branchmap(self):
471 471 '''returns a dictionary {branch: [branchheads]}'''
472 472 self.updatebranchcache()
473 473 return self._branchcache
474 474
475 475 def branchtags(self):
476 476 '''return a dict where branch names map to the tipmost head of
477 477 the branch, open heads come before closed'''
478 478 bt = {}
479 479 for bn, heads in self.branchmap().iteritems():
480 480 tip = heads[-1]
481 481 for h in reversed(heads):
482 482 if 'close' not in self.changelog.read(h)[5]:
483 483 tip = h
484 484 break
485 485 bt[bn] = tip
486 486 return bt
487 487
488 488 def _readbranchcache(self):
489 489 partial = {}
490 490 try:
491 491 f = self.opener("cache/branchheads")
492 492 lines = f.read().split('\n')
493 493 f.close()
494 494 except (IOError, OSError):
495 495 return {}, nullid, nullrev
496 496
497 497 try:
498 498 last, lrev = lines.pop(0).split(" ", 1)
499 499 last, lrev = bin(last), int(lrev)
500 500 if lrev >= len(self) or self[lrev].node() != last:
501 501 # invalidate the cache
502 502 raise ValueError('invalidating branch cache (tip differs)')
503 503 for l in lines:
504 504 if not l:
505 505 continue
506 506 node, label = l.split(" ", 1)
507 507 label = encoding.tolocal(label.strip())
508 508 partial.setdefault(label, []).append(bin(node))
509 509 except KeyboardInterrupt:
510 510 raise
511 511 except Exception, inst:
512 512 if self.ui.debugflag:
513 513 self.ui.warn(str(inst), '\n')
514 514 partial, last, lrev = {}, nullid, nullrev
515 515 return partial, last, lrev
516 516
517 517 def _writebranchcache(self, branches, tip, tiprev):
518 518 try:
519 519 f = self.opener("cache/branchheads", "w", atomictemp=True)
520 520 f.write("%s %s\n" % (hex(tip), tiprev))
521 521 for label, nodes in branches.iteritems():
522 522 for node in nodes:
523 523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
524 f.rename()
524 f.close()
525 525 except (IOError, OSError):
526 526 pass
527 527
528 528 def _updatebranchcache(self, partial, ctxgen):
529 529 # collect new branch entries
530 530 newbranches = {}
531 531 for c in ctxgen:
532 532 newbranches.setdefault(c.branch(), []).append(c.node())
533 533 # if older branchheads are reachable from new ones, they aren't
534 534 # really branchheads. Note checking parents is insufficient:
535 535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
536 536 for branch, newnodes in newbranches.iteritems():
537 537 bheads = partial.setdefault(branch, [])
538 538 bheads.extend(newnodes)
539 539 if len(bheads) <= 1:
540 540 continue
541 541 bheads = sorted(bheads, key=lambda x: self[x].rev())
542 542 # starting from tip means fewer passes over reachable
543 543 while newnodes:
544 544 latest = newnodes.pop()
545 545 if latest not in bheads:
546 546 continue
547 547 minbhrev = self[bheads[0]].node()
548 548 reachable = self.changelog.reachable(latest, minbhrev)
549 549 reachable.remove(latest)
550 550 if reachable:
551 551 bheads = [b for b in bheads if b not in reachable]
552 552 partial[branch] = bheads
553 553
554 554 def lookup(self, key):
555 555 if isinstance(key, int):
556 556 return self.changelog.node(key)
557 557 elif key == '.':
558 558 return self.dirstate.p1()
559 559 elif key == 'null':
560 560 return nullid
561 561 elif key == 'tip':
562 562 return self.changelog.tip()
563 563 n = self.changelog._match(key)
564 564 if n:
565 565 return n
566 566 if key in self._bookmarks:
567 567 return self._bookmarks[key]
568 568 if key in self.tags():
569 569 return self.tags()[key]
570 570 if key in self.branchtags():
571 571 return self.branchtags()[key]
572 572 n = self.changelog._partialmatch(key)
573 573 if n:
574 574 return n
575 575
576 576 # can't find key, check if it might have come from damaged dirstate
577 577 if key in self.dirstate.parents():
578 578 raise error.Abort(_("working directory has unknown parent '%s'!")
579 579 % short(key))
580 580 try:
581 581 if len(key) == 20:
582 582 key = hex(key)
583 583 except TypeError:
584 584 pass
585 585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
586 586
587 587 def lookupbranch(self, key, remote=None):
588 588 repo = remote or self
589 589 if key in repo.branchmap():
590 590 return key
591 591
592 592 repo = (remote and remote.local()) and remote or self
593 593 return repo[key].branch()
594 594
595 595 def known(self, nodes):
596 596 nm = self.changelog.nodemap
597 597 return [(n in nm) for n in nodes]
598 598
599 599 def local(self):
600 600 return self
601 601
602 602 def join(self, f):
603 603 return os.path.join(self.path, f)
604 604
605 605 def wjoin(self, f):
606 606 return os.path.join(self.root, f)
607 607
608 608 def file(self, f):
609 609 if f[0] == '/':
610 610 f = f[1:]
611 611 return filelog.filelog(self.sopener, f)
612 612
613 613 def changectx(self, changeid):
614 614 return self[changeid]
615 615
616 616 def parents(self, changeid=None):
617 617 '''get list of changectxs for parents of changeid'''
618 618 return self[changeid].parents()
619 619
620 620 def filectx(self, path, changeid=None, fileid=None):
621 621 """changeid can be a changeset revision, node, or tag.
622 622 fileid can be a file revision or node."""
623 623 return context.filectx(self, path, changeid, fileid)
624 624
625 625 def getcwd(self):
626 626 return self.dirstate.getcwd()
627 627
628 628 def pathto(self, f, cwd=None):
629 629 return self.dirstate.pathto(f, cwd)
630 630
631 631 def wfile(self, f, mode='r'):
632 632 return self.wopener(f, mode)
633 633
634 634 def _link(self, f):
635 635 return os.path.islink(self.wjoin(f))
636 636
637 637 def _loadfilter(self, filter):
638 638 if filter not in self.filterpats:
639 639 l = []
640 640 for pat, cmd in self.ui.configitems(filter):
641 641 if cmd == '!':
642 642 continue
643 643 mf = matchmod.match(self.root, '', [pat])
644 644 fn = None
645 645 params = cmd
646 646 for name, filterfn in self._datafilters.iteritems():
647 647 if cmd.startswith(name):
648 648 fn = filterfn
649 649 params = cmd[len(name):].lstrip()
650 650 break
651 651 if not fn:
652 652 fn = lambda s, c, **kwargs: util.filter(s, c)
653 653 # Wrap old filters not supporting keyword arguments
654 654 if not inspect.getargspec(fn)[2]:
655 655 oldfn = fn
656 656 fn = lambda s, c, **kwargs: oldfn(s, c)
657 657 l.append((mf, fn, params))
658 658 self.filterpats[filter] = l
659 659 return self.filterpats[filter]
660 660
661 661 def _filter(self, filterpats, filename, data):
662 662 for mf, fn, cmd in filterpats:
663 663 if mf(filename):
664 664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
665 665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
666 666 break
667 667
668 668 return data
669 669
670 670 @propertycache
671 671 def _encodefilterpats(self):
672 672 return self._loadfilter('encode')
673 673
674 674 @propertycache
675 675 def _decodefilterpats(self):
676 676 return self._loadfilter('decode')
677 677
678 678 def adddatafilter(self, name, filter):
679 679 self._datafilters[name] = filter
680 680
681 681 def wread(self, filename):
682 682 if self._link(filename):
683 683 data = os.readlink(self.wjoin(filename))
684 684 else:
685 685 data = self.wopener.read(filename)
686 686 return self._filter(self._encodefilterpats, filename, data)
687 687
688 688 def wwrite(self, filename, data, flags):
689 689 data = self._filter(self._decodefilterpats, filename, data)
690 690 if 'l' in flags:
691 691 self.wopener.symlink(data, filename)
692 692 else:
693 693 self.wopener.write(filename, data)
694 694 if 'x' in flags:
695 695 util.setflags(self.wjoin(filename), False, True)
696 696
697 697 def wwritedata(self, filename, data):
698 698 return self._filter(self._decodefilterpats, filename, data)
699 699
700 700 def transaction(self, desc):
701 701 tr = self._transref and self._transref() or None
702 702 if tr and tr.running():
703 703 return tr.nest()
704 704
705 705 # abort here if the journal already exists
706 706 if os.path.exists(self.sjoin("journal")):
707 707 raise error.RepoError(
708 708 _("abandoned transaction found - run hg recover"))
709 709
710 710 journalfiles = self._writejournal(desc)
711 711 renames = [(x, undoname(x)) for x in journalfiles]
712 712
713 713 tr = transaction.transaction(self.ui.warn, self.sopener,
714 714 self.sjoin("journal"),
715 715 aftertrans(renames),
716 716 self.store.createmode)
717 717 self._transref = weakref.ref(tr)
718 718 return tr
719 719
720 720 def _writejournal(self, desc):
721 721 # save dirstate for rollback
722 722 try:
723 723 ds = self.opener.read("dirstate")
724 724 except IOError:
725 725 ds = ""
726 726 self.opener.write("journal.dirstate", ds)
727 727 self.opener.write("journal.branch",
728 728 encoding.fromlocal(self.dirstate.branch()))
729 729 self.opener.write("journal.desc",
730 730 "%d\n%s\n" % (len(self), desc))
731 731
732 732 bkname = self.join('bookmarks')
733 733 if os.path.exists(bkname):
734 734 util.copyfile(bkname, self.join('journal.bookmarks'))
735 735 else:
736 736 self.opener.write('journal.bookmarks', '')
737 737
738 738 return (self.sjoin('journal'), self.join('journal.dirstate'),
739 739 self.join('journal.branch'), self.join('journal.desc'),
740 740 self.join('journal.bookmarks'))
741 741
742 742 def recover(self):
743 743 lock = self.lock()
744 744 try:
745 745 if os.path.exists(self.sjoin("journal")):
746 746 self.ui.status(_("rolling back interrupted transaction\n"))
747 747 transaction.rollback(self.sopener, self.sjoin("journal"),
748 748 self.ui.warn)
749 749 self.invalidate()
750 750 return True
751 751 else:
752 752 self.ui.warn(_("no interrupted transaction available\n"))
753 753 return False
754 754 finally:
755 755 lock.release()
756 756
757 757 def rollback(self, dryrun=False):
758 758 wlock = lock = None
759 759 try:
760 760 wlock = self.wlock()
761 761 lock = self.lock()
762 762 if os.path.exists(self.sjoin("undo")):
763 763 try:
764 764 args = self.opener.read("undo.desc").splitlines()
765 765 if len(args) >= 3 and self.ui.verbose:
766 766 desc = _("repository tip rolled back to revision %s"
767 767 " (undo %s: %s)\n") % (
768 768 int(args[0]) - 1, args[1], args[2])
769 769 elif len(args) >= 2:
770 770 desc = _("repository tip rolled back to revision %s"
771 771 " (undo %s)\n") % (
772 772 int(args[0]) - 1, args[1])
773 773 except IOError:
774 774 desc = _("rolling back unknown transaction\n")
775 775 self.ui.status(desc)
776 776 if dryrun:
777 777 return
778 778 transaction.rollback(self.sopener, self.sjoin("undo"),
779 779 self.ui.warn)
780 780 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
781 781 if os.path.exists(self.join('undo.bookmarks')):
782 782 util.rename(self.join('undo.bookmarks'),
783 783 self.join('bookmarks'))
784 784 try:
785 785 branch = self.opener.read("undo.branch")
786 786 self.dirstate.setbranch(branch)
787 787 except IOError:
788 788 self.ui.warn(_("named branch could not be reset, "
789 789 "current branch is still: %s\n")
790 790 % self.dirstate.branch())
791 791 self.invalidate()
792 792 self.dirstate.invalidate()
793 793 self.destroyed()
794 794 parents = tuple([p.rev() for p in self.parents()])
795 795 if len(parents) > 1:
796 796 self.ui.status(_("working directory now based on "
797 797 "revisions %d and %d\n") % parents)
798 798 else:
799 799 self.ui.status(_("working directory now based on "
800 800 "revision %d\n") % parents)
801 801 else:
802 802 self.ui.warn(_("no rollback information available\n"))
803 803 return 1
804 804 finally:
805 805 release(lock, wlock)
806 806
807 807 def invalidatecaches(self):
808 808 try:
809 809 delattr(self, '_tagscache')
810 810 except AttributeError:
811 811 pass
812 812
813 813 self._branchcache = None # in UTF-8
814 814 self._branchcachetip = None
815 815
816 816 def invalidatedirstate(self):
817 817 '''Invalidates the dirstate, causing the next call to dirstate
818 818 to check if it was modified since the last time it was read,
819 819 rereading it if it has.
820 820
821 821 This is different to dirstate.invalidate() that it doesn't always
822 822 rereads the dirstate. Use dirstate.invalidate() if you want to
823 823 explicitly read the dirstate again (i.e. restoring it to a previous
824 824 known good state).'''
825 825 try:
826 826 delattr(self, 'dirstate')
827 827 except AttributeError:
828 828 pass
829 829
830 830 def invalidate(self):
831 831 for k in self._filecache:
832 832 # dirstate is invalidated separately in invalidatedirstate()
833 833 if k == 'dirstate':
834 834 continue
835 835
836 836 try:
837 837 delattr(self, k)
838 838 except AttributeError:
839 839 pass
840 840 self.invalidatecaches()
841 841
842 842 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
843 843 try:
844 844 l = lock.lock(lockname, 0, releasefn, desc=desc)
845 845 except error.LockHeld, inst:
846 846 if not wait:
847 847 raise
848 848 self.ui.warn(_("waiting for lock on %s held by %r\n") %
849 849 (desc, inst.locker))
850 850 # default to 600 seconds timeout
851 851 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
852 852 releasefn, desc=desc)
853 853 if acquirefn:
854 854 acquirefn()
855 855 return l
856 856
857 857 def lock(self, wait=True):
858 858 '''Lock the repository store (.hg/store) and return a weak reference
859 859 to the lock. Use this before modifying the store (e.g. committing or
860 860 stripping). If you are opening a transaction, get a lock as well.)'''
861 861 l = self._lockref and self._lockref()
862 862 if l is not None and l.held:
863 863 l.lock()
864 864 return l
865 865
866 866 def unlock():
867 867 self.store.write()
868 868 for k, ce in self._filecache.items():
869 869 if k == 'dirstate':
870 870 continue
871 871 ce.refresh()
872 872
873 873 l = self._lock(self.sjoin("lock"), wait, unlock,
874 874 self.invalidate, _('repository %s') % self.origroot)
875 875 self._lockref = weakref.ref(l)
876 876 return l
877 877
878 878 def wlock(self, wait=True):
879 879 '''Lock the non-store parts of the repository (everything under
880 880 .hg except .hg/store) and return a weak reference to the lock.
881 881 Use this before modifying files in .hg.'''
882 882 l = self._wlockref and self._wlockref()
883 883 if l is not None and l.held:
884 884 l.lock()
885 885 return l
886 886
887 887 def unlock():
888 888 self.dirstate.write()
889 889 ce = self._filecache.get('dirstate')
890 890 if ce:
891 891 ce.refresh()
892 892
893 893 l = self._lock(self.join("wlock"), wait, unlock,
894 894 self.invalidatedirstate, _('working directory of %s') %
895 895 self.origroot)
896 896 self._wlockref = weakref.ref(l)
897 897 return l
898 898
899 899 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
900 900 """
901 901 commit an individual file as part of a larger transaction
902 902 """
903 903
904 904 fname = fctx.path()
905 905 text = fctx.data()
906 906 flog = self.file(fname)
907 907 fparent1 = manifest1.get(fname, nullid)
908 908 fparent2 = fparent2o = manifest2.get(fname, nullid)
909 909
910 910 meta = {}
911 911 copy = fctx.renamed()
912 912 if copy and copy[0] != fname:
913 913 # Mark the new revision of this file as a copy of another
914 914 # file. This copy data will effectively act as a parent
915 915 # of this new revision. If this is a merge, the first
916 916 # parent will be the nullid (meaning "look up the copy data")
917 917 # and the second one will be the other parent. For example:
918 918 #
919 919 # 0 --- 1 --- 3 rev1 changes file foo
920 920 # \ / rev2 renames foo to bar and changes it
921 921 # \- 2 -/ rev3 should have bar with all changes and
922 922 # should record that bar descends from
923 923 # bar in rev2 and foo in rev1
924 924 #
925 925 # this allows this merge to succeed:
926 926 #
927 927 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
928 928 # \ / merging rev3 and rev4 should use bar@rev2
929 929 # \- 2 --- 4 as the merge base
930 930 #
931 931
932 932 cfname = copy[0]
933 933 crev = manifest1.get(cfname)
934 934 newfparent = fparent2
935 935
936 936 if manifest2: # branch merge
937 937 if fparent2 == nullid or crev is None: # copied on remote side
938 938 if cfname in manifest2:
939 939 crev = manifest2[cfname]
940 940 newfparent = fparent1
941 941
942 942 # find source in nearest ancestor if we've lost track
943 943 if not crev:
944 944 self.ui.debug(" %s: searching for copy revision for %s\n" %
945 945 (fname, cfname))
946 946 for ancestor in self[None].ancestors():
947 947 if cfname in ancestor:
948 948 crev = ancestor[cfname].filenode()
949 949 break
950 950
951 951 if crev:
952 952 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
953 953 meta["copy"] = cfname
954 954 meta["copyrev"] = hex(crev)
955 955 fparent1, fparent2 = nullid, newfparent
956 956 else:
957 957 self.ui.warn(_("warning: can't find ancestor for '%s' "
958 958 "copied from '%s'!\n") % (fname, cfname))
959 959
960 960 elif fparent2 != nullid:
961 961 # is one parent an ancestor of the other?
962 962 fparentancestor = flog.ancestor(fparent1, fparent2)
963 963 if fparentancestor == fparent1:
964 964 fparent1, fparent2 = fparent2, nullid
965 965 elif fparentancestor == fparent2:
966 966 fparent2 = nullid
967 967
968 968 # is the file changed?
969 969 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
970 970 changelist.append(fname)
971 971 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
972 972
973 973 # are just the flags changed during merge?
974 974 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
975 975 changelist.append(fname)
976 976
977 977 return fparent1
978 978
979 979 def commit(self, text="", user=None, date=None, match=None, force=False,
980 980 editor=False, extra={}):
981 981 """Add a new revision to current repository.
982 982
983 983 Revision information is gathered from the working directory,
984 984 match can be used to filter the committed files. If editor is
985 985 supplied, it is called to get a commit message.
986 986 """
987 987
988 988 def fail(f, msg):
989 989 raise util.Abort('%s: %s' % (f, msg))
990 990
991 991 if not match:
992 992 match = matchmod.always(self.root, '')
993 993
994 994 if not force:
995 995 vdirs = []
996 996 match.dir = vdirs.append
997 997 match.bad = fail
998 998
999 999 wlock = self.wlock()
1000 1000 try:
1001 1001 wctx = self[None]
1002 1002 merge = len(wctx.parents()) > 1
1003 1003
1004 1004 if (not force and merge and match and
1005 1005 (match.files() or match.anypats())):
1006 1006 raise util.Abort(_('cannot partially commit a merge '
1007 1007 '(do not specify files or patterns)'))
1008 1008
1009 1009 changes = self.status(match=match, clean=force)
1010 1010 if force:
1011 1011 changes[0].extend(changes[6]) # mq may commit unchanged files
1012 1012
1013 1013 # check subrepos
1014 1014 subs = []
1015 1015 removedsubs = set()
1016 1016 if '.hgsub' in wctx:
1017 1017 # only manage subrepos and .hgsubstate if .hgsub is present
1018 1018 for p in wctx.parents():
1019 1019 removedsubs.update(s for s in p.substate if match(s))
1020 1020 for s in wctx.substate:
1021 1021 removedsubs.discard(s)
1022 1022 if match(s) and wctx.sub(s).dirty():
1023 1023 subs.append(s)
1024 1024 if (subs or removedsubs):
1025 1025 if (not match('.hgsub') and
1026 1026 '.hgsub' in (wctx.modified() + wctx.added())):
1027 1027 raise util.Abort(
1028 1028 _("can't commit subrepos without .hgsub"))
1029 1029 if '.hgsubstate' not in changes[0]:
1030 1030 changes[0].insert(0, '.hgsubstate')
1031 1031 if '.hgsubstate' in changes[2]:
1032 1032 changes[2].remove('.hgsubstate')
1033 1033 elif '.hgsub' in changes[2]:
1034 1034 # clean up .hgsubstate when .hgsub is removed
1035 1035 if ('.hgsubstate' in wctx and
1036 1036 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1037 1037 changes[2].insert(0, '.hgsubstate')
1038 1038
1039 1039 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1040 1040 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1041 1041 if changedsubs:
1042 1042 raise util.Abort(_("uncommitted changes in subrepo %s")
1043 1043 % changedsubs[0])
1044 1044
1045 1045 # make sure all explicit patterns are matched
1046 1046 if not force and match.files():
1047 1047 matched = set(changes[0] + changes[1] + changes[2])
1048 1048
1049 1049 for f in match.files():
1050 1050 if f == '.' or f in matched or f in wctx.substate:
1051 1051 continue
1052 1052 if f in changes[3]: # missing
1053 1053 fail(f, _('file not found!'))
1054 1054 if f in vdirs: # visited directory
1055 1055 d = f + '/'
1056 1056 for mf in matched:
1057 1057 if mf.startswith(d):
1058 1058 break
1059 1059 else:
1060 1060 fail(f, _("no match under directory!"))
1061 1061 elif f not in self.dirstate:
1062 1062 fail(f, _("file not tracked!"))
1063 1063
1064 1064 if (not force and not extra.get("close") and not merge
1065 1065 and not (changes[0] or changes[1] or changes[2])
1066 1066 and wctx.branch() == wctx.p1().branch()):
1067 1067 return None
1068 1068
1069 1069 ms = mergemod.mergestate(self)
1070 1070 for f in changes[0]:
1071 1071 if f in ms and ms[f] == 'u':
1072 1072 raise util.Abort(_("unresolved merge conflicts "
1073 1073 "(see hg help resolve)"))
1074 1074
1075 1075 cctx = context.workingctx(self, text, user, date, extra, changes)
1076 1076 if editor:
1077 1077 cctx._text = editor(self, cctx, subs)
1078 1078 edited = (text != cctx._text)
1079 1079
1080 1080 # commit subs
1081 1081 if subs or removedsubs:
1082 1082 state = wctx.substate.copy()
1083 1083 for s in sorted(subs):
1084 1084 sub = wctx.sub(s)
1085 1085 self.ui.status(_('committing subrepository %s\n') %
1086 1086 subrepo.subrelpath(sub))
1087 1087 sr = sub.commit(cctx._text, user, date)
1088 1088 state[s] = (state[s][0], sr)
1089 1089 subrepo.writestate(self, state)
1090 1090
1091 1091 # Save commit message in case this transaction gets rolled back
1092 1092 # (e.g. by a pretxncommit hook). Leave the content alone on
1093 1093 # the assumption that the user will use the same editor again.
1094 1094 msgfn = self.savecommitmessage(cctx._text)
1095 1095
1096 1096 p1, p2 = self.dirstate.parents()
1097 1097 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1098 1098 try:
1099 1099 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1100 1100 ret = self.commitctx(cctx, True)
1101 1101 except:
1102 1102 if edited:
1103 1103 self.ui.write(
1104 1104 _('note: commit message saved in %s\n') % msgfn)
1105 1105 raise
1106 1106
1107 1107 # update bookmarks, dirstate and mergestate
1108 1108 bookmarks.update(self, p1, ret)
1109 1109 for f in changes[0] + changes[1]:
1110 1110 self.dirstate.normal(f)
1111 1111 for f in changes[2]:
1112 1112 self.dirstate.drop(f)
1113 1113 self.dirstate.setparents(ret)
1114 1114 ms.reset()
1115 1115 finally:
1116 1116 wlock.release()
1117 1117
1118 1118 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1119 1119 return ret
1120 1120
1121 1121 def commitctx(self, ctx, error=False):
1122 1122 """Add a new revision to current repository.
1123 1123 Revision information is passed via the context argument.
1124 1124 """
1125 1125
1126 1126 tr = lock = None
1127 1127 removed = list(ctx.removed())
1128 1128 p1, p2 = ctx.p1(), ctx.p2()
1129 1129 user = ctx.user()
1130 1130
1131 1131 lock = self.lock()
1132 1132 try:
1133 1133 tr = self.transaction("commit")
1134 1134 trp = weakref.proxy(tr)
1135 1135
1136 1136 if ctx.files():
1137 1137 m1 = p1.manifest().copy()
1138 1138 m2 = p2.manifest()
1139 1139
1140 1140 # check in files
1141 1141 new = {}
1142 1142 changed = []
1143 1143 linkrev = len(self)
1144 1144 for f in sorted(ctx.modified() + ctx.added()):
1145 1145 self.ui.note(f + "\n")
1146 1146 try:
1147 1147 fctx = ctx[f]
1148 1148 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1149 1149 changed)
1150 1150 m1.set(f, fctx.flags())
1151 1151 except OSError, inst:
1152 1152 self.ui.warn(_("trouble committing %s!\n") % f)
1153 1153 raise
1154 1154 except IOError, inst:
1155 1155 errcode = getattr(inst, 'errno', errno.ENOENT)
1156 1156 if error or errcode and errcode != errno.ENOENT:
1157 1157 self.ui.warn(_("trouble committing %s!\n") % f)
1158 1158 raise
1159 1159 else:
1160 1160 removed.append(f)
1161 1161
1162 1162 # update manifest
1163 1163 m1.update(new)
1164 1164 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1165 1165 drop = [f for f in removed if f in m1]
1166 1166 for f in drop:
1167 1167 del m1[f]
1168 1168 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1169 1169 p2.manifestnode(), (new, drop))
1170 1170 files = changed + removed
1171 1171 else:
1172 1172 mn = p1.manifestnode()
1173 1173 files = []
1174 1174
1175 1175 # update changelog
1176 1176 self.changelog.delayupdate()
1177 1177 n = self.changelog.add(mn, files, ctx.description(),
1178 1178 trp, p1.node(), p2.node(),
1179 1179 user, ctx.date(), ctx.extra().copy())
1180 1180 p = lambda: self.changelog.writepending() and self.root or ""
1181 1181 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1182 1182 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1183 1183 parent2=xp2, pending=p)
1184 1184 self.changelog.finalize(trp)
1185 1185 tr.close()
1186 1186
1187 1187 if self._branchcache:
1188 1188 self.updatebranchcache()
1189 1189 return n
1190 1190 finally:
1191 1191 if tr:
1192 1192 tr.release()
1193 1193 lock.release()
1194 1194
1195 1195 def destroyed(self):
1196 1196 '''Inform the repository that nodes have been destroyed.
1197 1197 Intended for use by strip and rollback, so there's a common
1198 1198 place for anything that has to be done after destroying history.'''
1199 1199 # XXX it might be nice if we could take the list of destroyed
1200 1200 # nodes, but I don't see an easy way for rollback() to do that
1201 1201
1202 1202 # Ensure the persistent tag cache is updated. Doing it now
1203 1203 # means that the tag cache only has to worry about destroyed
1204 1204 # heads immediately after a strip/rollback. That in turn
1205 1205 # guarantees that "cachetip == currenttip" (comparing both rev
1206 1206 # and node) always means no nodes have been added or destroyed.
1207 1207
1208 1208 # XXX this is suboptimal when qrefresh'ing: we strip the current
1209 1209 # head, refresh the tag cache, then immediately add a new head.
1210 1210 # But I think doing it this way is necessary for the "instant
1211 1211 # tag cache retrieval" case to work.
1212 1212 self.invalidatecaches()
1213 1213
1214 1214 def walk(self, match, node=None):
1215 1215 '''
1216 1216 walk recursively through the directory tree or a given
1217 1217 changeset, finding all files matched by the match
1218 1218 function
1219 1219 '''
1220 1220 return self[node].walk(match)
1221 1221
1222 1222 def status(self, node1='.', node2=None, match=None,
1223 1223 ignored=False, clean=False, unknown=False,
1224 1224 listsubrepos=False):
1225 1225 """return status of files between two nodes or node and working directory
1226 1226
1227 1227 If node1 is None, use the first dirstate parent instead.
1228 1228 If node2 is None, compare node1 with working directory.
1229 1229 """
1230 1230
1231 1231 def mfmatches(ctx):
1232 1232 mf = ctx.manifest().copy()
1233 1233 for fn in mf.keys():
1234 1234 if not match(fn):
1235 1235 del mf[fn]
1236 1236 return mf
1237 1237
1238 1238 if isinstance(node1, context.changectx):
1239 1239 ctx1 = node1
1240 1240 else:
1241 1241 ctx1 = self[node1]
1242 1242 if isinstance(node2, context.changectx):
1243 1243 ctx2 = node2
1244 1244 else:
1245 1245 ctx2 = self[node2]
1246 1246
1247 1247 working = ctx2.rev() is None
1248 1248 parentworking = working and ctx1 == self['.']
1249 1249 match = match or matchmod.always(self.root, self.getcwd())
1250 1250 listignored, listclean, listunknown = ignored, clean, unknown
1251 1251
1252 1252 # load earliest manifest first for caching reasons
1253 1253 if not working and ctx2.rev() < ctx1.rev():
1254 1254 ctx2.manifest()
1255 1255
1256 1256 if not parentworking:
1257 1257 def bad(f, msg):
1258 1258 if f not in ctx1:
1259 1259 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1260 1260 match.bad = bad
1261 1261
1262 1262 if working: # we need to scan the working dir
1263 1263 subrepos = []
1264 1264 if '.hgsub' in self.dirstate:
1265 1265 subrepos = ctx2.substate.keys()
1266 1266 s = self.dirstate.status(match, subrepos, listignored,
1267 1267 listclean, listunknown)
1268 1268 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1269 1269
1270 1270 # check for any possibly clean files
1271 1271 if parentworking and cmp:
1272 1272 fixup = []
1273 1273 # do a full compare of any files that might have changed
1274 1274 for f in sorted(cmp):
1275 1275 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1276 1276 or ctx1[f].cmp(ctx2[f])):
1277 1277 modified.append(f)
1278 1278 else:
1279 1279 fixup.append(f)
1280 1280
1281 1281 # update dirstate for files that are actually clean
1282 1282 if fixup:
1283 1283 if listclean:
1284 1284 clean += fixup
1285 1285
1286 1286 try:
1287 1287 # updating the dirstate is optional
1288 1288 # so we don't wait on the lock
1289 1289 wlock = self.wlock(False)
1290 1290 try:
1291 1291 for f in fixup:
1292 1292 self.dirstate.normal(f)
1293 1293 finally:
1294 1294 wlock.release()
1295 1295 except error.LockError:
1296 1296 pass
1297 1297
1298 1298 if not parentworking:
1299 1299 mf1 = mfmatches(ctx1)
1300 1300 if working:
1301 1301 # we are comparing working dir against non-parent
1302 1302 # generate a pseudo-manifest for the working dir
1303 1303 mf2 = mfmatches(self['.'])
1304 1304 for f in cmp + modified + added:
1305 1305 mf2[f] = None
1306 1306 mf2.set(f, ctx2.flags(f))
1307 1307 for f in removed:
1308 1308 if f in mf2:
1309 1309 del mf2[f]
1310 1310 else:
1311 1311 # we are comparing two revisions
1312 1312 deleted, unknown, ignored = [], [], []
1313 1313 mf2 = mfmatches(ctx2)
1314 1314
1315 1315 modified, added, clean = [], [], []
1316 1316 for fn in mf2:
1317 1317 if fn in mf1:
1318 1318 if (fn not in deleted and
1319 1319 (mf1.flags(fn) != mf2.flags(fn) or
1320 1320 (mf1[fn] != mf2[fn] and
1321 1321 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1322 1322 modified.append(fn)
1323 1323 elif listclean:
1324 1324 clean.append(fn)
1325 1325 del mf1[fn]
1326 1326 elif fn not in deleted:
1327 1327 added.append(fn)
1328 1328 removed = mf1.keys()
1329 1329
1330 1330 r = modified, added, removed, deleted, unknown, ignored, clean
1331 1331
1332 1332 if listsubrepos:
1333 1333 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1334 1334 if working:
1335 1335 rev2 = None
1336 1336 else:
1337 1337 rev2 = ctx2.substate[subpath][1]
1338 1338 try:
1339 1339 submatch = matchmod.narrowmatcher(subpath, match)
1340 1340 s = sub.status(rev2, match=submatch, ignored=listignored,
1341 1341 clean=listclean, unknown=listunknown,
1342 1342 listsubrepos=True)
1343 1343 for rfiles, sfiles in zip(r, s):
1344 1344 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1345 1345 except error.LookupError:
1346 1346 self.ui.status(_("skipping missing subrepository: %s\n")
1347 1347 % subpath)
1348 1348
1349 1349 for l in r:
1350 1350 l.sort()
1351 1351 return r
1352 1352
1353 1353 def heads(self, start=None):
1354 1354 heads = self.changelog.heads(start)
1355 1355 # sort the output in rev descending order
1356 1356 return sorted(heads, key=self.changelog.rev, reverse=True)
1357 1357
1358 1358 def branchheads(self, branch=None, start=None, closed=False):
1359 1359 '''return a (possibly filtered) list of heads for the given branch
1360 1360
1361 1361 Heads are returned in topological order, from newest to oldest.
1362 1362 If branch is None, use the dirstate branch.
1363 1363 If start is not None, return only heads reachable from start.
1364 1364 If closed is True, return heads that are marked as closed as well.
1365 1365 '''
1366 1366 if branch is None:
1367 1367 branch = self[None].branch()
1368 1368 branches = self.branchmap()
1369 1369 if branch not in branches:
1370 1370 return []
1371 1371 # the cache returns heads ordered lowest to highest
1372 1372 bheads = list(reversed(branches[branch]))
1373 1373 if start is not None:
1374 1374 # filter out the heads that cannot be reached from startrev
1375 1375 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1376 1376 bheads = [h for h in bheads if h in fbheads]
1377 1377 if not closed:
1378 1378 bheads = [h for h in bheads if
1379 1379 ('close' not in self.changelog.read(h)[5])]
1380 1380 return bheads
1381 1381
1382 1382 def branches(self, nodes):
1383 1383 if not nodes:
1384 1384 nodes = [self.changelog.tip()]
1385 1385 b = []
1386 1386 for n in nodes:
1387 1387 t = n
1388 1388 while True:
1389 1389 p = self.changelog.parents(n)
1390 1390 if p[1] != nullid or p[0] == nullid:
1391 1391 b.append((t, n, p[0], p[1]))
1392 1392 break
1393 1393 n = p[0]
1394 1394 return b
1395 1395
1396 1396 def between(self, pairs):
1397 1397 r = []
1398 1398
1399 1399 for top, bottom in pairs:
1400 1400 n, l, i = top, [], 0
1401 1401 f = 1
1402 1402
1403 1403 while n != bottom and n != nullid:
1404 1404 p = self.changelog.parents(n)[0]
1405 1405 if i == f:
1406 1406 l.append(n)
1407 1407 f = f * 2
1408 1408 n = p
1409 1409 i += 1
1410 1410
1411 1411 r.append(l)
1412 1412
1413 1413 return r
1414 1414
1415 1415 def pull(self, remote, heads=None, force=False):
1416 1416 lock = self.lock()
1417 1417 try:
1418 1418 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1419 1419 force=force)
1420 1420 common, fetch, rheads = tmp
1421 1421 if not fetch:
1422 1422 self.ui.status(_("no changes found\n"))
1423 1423 result = 0
1424 1424 else:
1425 1425 if heads is None and list(common) == [nullid]:
1426 1426 self.ui.status(_("requesting all changes\n"))
1427 1427 elif heads is None and remote.capable('changegroupsubset'):
1428 1428 # issue1320, avoid a race if remote changed after discovery
1429 1429 heads = rheads
1430 1430
1431 1431 if remote.capable('getbundle'):
1432 1432 cg = remote.getbundle('pull', common=common,
1433 1433 heads=heads or rheads)
1434 1434 elif heads is None:
1435 1435 cg = remote.changegroup(fetch, 'pull')
1436 1436 elif not remote.capable('changegroupsubset'):
1437 1437 raise util.Abort(_("partial pull cannot be done because "
1438 1438 "other repository doesn't support "
1439 1439 "changegroupsubset."))
1440 1440 else:
1441 1441 cg = remote.changegroupsubset(fetch, heads, 'pull')
1442 1442 result = self.addchangegroup(cg, 'pull', remote.url(),
1443 1443 lock=lock)
1444 1444 finally:
1445 1445 lock.release()
1446 1446
1447 1447 return result
1448 1448
1449 1449 def checkpush(self, force, revs):
1450 1450 """Extensions can override this function if additional checks have
1451 1451 to be performed before pushing, or call it if they override push
1452 1452 command.
1453 1453 """
1454 1454 pass
1455 1455
1456 1456 def push(self, remote, force=False, revs=None, newbranch=False):
1457 1457 '''Push outgoing changesets (limited by revs) from the current
1458 1458 repository to remote. Return an integer:
1459 1459 - 0 means HTTP error *or* nothing to push
1460 1460 - 1 means we pushed and remote head count is unchanged *or*
1461 1461 we have outgoing changesets but refused to push
1462 1462 - other values as described by addchangegroup()
1463 1463 '''
1464 1464 # there are two ways to push to remote repo:
1465 1465 #
1466 1466 # addchangegroup assumes local user can lock remote
1467 1467 # repo (local filesystem, old ssh servers).
1468 1468 #
1469 1469 # unbundle assumes local user cannot lock remote repo (new ssh
1470 1470 # servers, http servers).
1471 1471
1472 1472 self.checkpush(force, revs)
1473 1473 lock = None
1474 1474 unbundle = remote.capable('unbundle')
1475 1475 if not unbundle:
1476 1476 lock = remote.lock()
1477 1477 try:
1478 1478 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1479 1479 newbranch)
1480 1480 ret = remote_heads
1481 1481 if cg is not None:
1482 1482 if unbundle:
1483 1483 # local repo finds heads on server, finds out what
1484 1484 # revs it must push. once revs transferred, if server
1485 1485 # finds it has different heads (someone else won
1486 1486 # commit/push race), server aborts.
1487 1487 if force:
1488 1488 remote_heads = ['force']
1489 1489 # ssh: return remote's addchangegroup()
1490 1490 # http: return remote's addchangegroup() or 0 for error
1491 1491 ret = remote.unbundle(cg, remote_heads, 'push')
1492 1492 else:
1493 1493 # we return an integer indicating remote head count change
1494 1494 ret = remote.addchangegroup(cg, 'push', self.url(),
1495 1495 lock=lock)
1496 1496 finally:
1497 1497 if lock is not None:
1498 1498 lock.release()
1499 1499
1500 1500 self.ui.debug("checking for updated bookmarks\n")
1501 1501 rb = remote.listkeys('bookmarks')
1502 1502 for k in rb.keys():
1503 1503 if k in self._bookmarks:
1504 1504 nr, nl = rb[k], hex(self._bookmarks[k])
1505 1505 if nr in self:
1506 1506 cr = self[nr]
1507 1507 cl = self[nl]
1508 1508 if cl in cr.descendants():
1509 1509 r = remote.pushkey('bookmarks', k, nr, nl)
1510 1510 if r:
1511 1511 self.ui.status(_("updating bookmark %s\n") % k)
1512 1512 else:
1513 1513 self.ui.warn(_('updating bookmark %s'
1514 1514 ' failed!\n') % k)
1515 1515
1516 1516 return ret
1517 1517
1518 1518 def changegroupinfo(self, nodes, source):
1519 1519 if self.ui.verbose or source == 'bundle':
1520 1520 self.ui.status(_("%d changesets found\n") % len(nodes))
1521 1521 if self.ui.debugflag:
1522 1522 self.ui.debug("list of changesets:\n")
1523 1523 for node in nodes:
1524 1524 self.ui.debug("%s\n" % hex(node))
1525 1525
1526 1526 def changegroupsubset(self, bases, heads, source):
1527 1527 """Compute a changegroup consisting of all the nodes that are
1528 1528 descendants of any of the bases and ancestors of any of the heads.
1529 1529 Return a chunkbuffer object whose read() method will return
1530 1530 successive changegroup chunks.
1531 1531
1532 1532 It is fairly complex as determining which filenodes and which
1533 1533 manifest nodes need to be included for the changeset to be complete
1534 1534 is non-trivial.
1535 1535
1536 1536 Another wrinkle is doing the reverse, figuring out which changeset in
1537 1537 the changegroup a particular filenode or manifestnode belongs to.
1538 1538 """
1539 1539 cl = self.changelog
1540 1540 if not bases:
1541 1541 bases = [nullid]
1542 1542 csets, bases, heads = cl.nodesbetween(bases, heads)
1543 1543 # We assume that all ancestors of bases are known
1544 1544 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1545 1545 return self._changegroupsubset(common, csets, heads, source)
1546 1546
1547 1547 def getbundle(self, source, heads=None, common=None):
1548 1548 """Like changegroupsubset, but returns the set difference between the
1549 1549 ancestors of heads and the ancestors common.
1550 1550
1551 1551 If heads is None, use the local heads. If common is None, use [nullid].
1552 1552
1553 1553 The nodes in common might not all be known locally due to the way the
1554 1554 current discovery protocol works.
1555 1555 """
1556 1556 cl = self.changelog
1557 1557 if common:
1558 1558 nm = cl.nodemap
1559 1559 common = [n for n in common if n in nm]
1560 1560 else:
1561 1561 common = [nullid]
1562 1562 if not heads:
1563 1563 heads = cl.heads()
1564 1564 common, missing = cl.findcommonmissing(common, heads)
1565 1565 if not missing:
1566 1566 return None
1567 1567 return self._changegroupsubset(common, missing, heads, source)
1568 1568
1569 1569 def _changegroupsubset(self, commonrevs, csets, heads, source):
1570 1570
1571 1571 cl = self.changelog
1572 1572 mf = self.manifest
1573 1573 mfs = {} # needed manifests
1574 1574 fnodes = {} # needed file nodes
1575 1575 changedfiles = set()
1576 1576 fstate = ['', {}]
1577 1577 count = [0]
1578 1578
1579 1579 # can we go through the fast path ?
1580 1580 heads.sort()
1581 1581 if heads == sorted(self.heads()):
1582 1582 return self._changegroup(csets, source)
1583 1583
1584 1584 # slow path
1585 1585 self.hook('preoutgoing', throw=True, source=source)
1586 1586 self.changegroupinfo(csets, source)
1587 1587
1588 1588 # filter any nodes that claim to be part of the known set
1589 1589 def prune(revlog, missing):
1590 1590 return [n for n in missing
1591 1591 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1592 1592
1593 1593 def lookup(revlog, x):
1594 1594 if revlog == cl:
1595 1595 c = cl.read(x)
1596 1596 changedfiles.update(c[3])
1597 1597 mfs.setdefault(c[0], x)
1598 1598 count[0] += 1
1599 1599 self.ui.progress(_('bundling'), count[0],
1600 1600 unit=_('changesets'), total=len(csets))
1601 1601 return x
1602 1602 elif revlog == mf:
1603 1603 clnode = mfs[x]
1604 1604 mdata = mf.readfast(x)
1605 1605 for f in changedfiles:
1606 1606 if f in mdata:
1607 1607 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1608 1608 count[0] += 1
1609 1609 self.ui.progress(_('bundling'), count[0],
1610 1610 unit=_('manifests'), total=len(mfs))
1611 1611 return mfs[x]
1612 1612 else:
1613 1613 self.ui.progress(
1614 1614 _('bundling'), count[0], item=fstate[0],
1615 1615 unit=_('files'), total=len(changedfiles))
1616 1616 return fstate[1][x]
1617 1617
1618 1618 bundler = changegroup.bundle10(lookup)
1619 1619 reorder = self.ui.config('bundle', 'reorder', 'auto')
1620 1620 if reorder == 'auto':
1621 1621 reorder = None
1622 1622 else:
1623 1623 reorder = util.parsebool(reorder)
1624 1624
1625 1625 def gengroup():
1626 1626 # Create a changenode group generator that will call our functions
1627 1627 # back to lookup the owning changenode and collect information.
1628 1628 for chunk in cl.group(csets, bundler, reorder=reorder):
1629 1629 yield chunk
1630 1630 self.ui.progress(_('bundling'), None)
1631 1631
1632 1632 # Create a generator for the manifestnodes that calls our lookup
1633 1633 # and data collection functions back.
1634 1634 count[0] = 0
1635 1635 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1636 1636 yield chunk
1637 1637 self.ui.progress(_('bundling'), None)
1638 1638
1639 1639 mfs.clear()
1640 1640
1641 1641 # Go through all our files in order sorted by name.
1642 1642 count[0] = 0
1643 1643 for fname in sorted(changedfiles):
1644 1644 filerevlog = self.file(fname)
1645 1645 if not len(filerevlog):
1646 1646 raise util.Abort(_("empty or missing revlog for %s") % fname)
1647 1647 fstate[0] = fname
1648 1648 fstate[1] = fnodes.pop(fname, {})
1649 1649
1650 1650 nodelist = prune(filerevlog, fstate[1])
1651 1651 if nodelist:
1652 1652 count[0] += 1
1653 1653 yield bundler.fileheader(fname)
1654 1654 for chunk in filerevlog.group(nodelist, bundler, reorder):
1655 1655 yield chunk
1656 1656
1657 1657 # Signal that no more groups are left.
1658 1658 yield bundler.close()
1659 1659 self.ui.progress(_('bundling'), None)
1660 1660
1661 1661 if csets:
1662 1662 self.hook('outgoing', node=hex(csets[0]), source=source)
1663 1663
1664 1664 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1665 1665
1666 1666 def changegroup(self, basenodes, source):
1667 1667 # to avoid a race we use changegroupsubset() (issue1320)
1668 1668 return self.changegroupsubset(basenodes, self.heads(), source)
1669 1669
1670 1670 def _changegroup(self, nodes, source):
1671 1671 """Compute the changegroup of all nodes that we have that a recipient
1672 1672 doesn't. Return a chunkbuffer object whose read() method will return
1673 1673 successive changegroup chunks.
1674 1674
1675 1675 This is much easier than the previous function as we can assume that
1676 1676 the recipient has any changenode we aren't sending them.
1677 1677
1678 1678 nodes is the set of nodes to send"""
1679 1679
1680 1680 cl = self.changelog
1681 1681 mf = self.manifest
1682 1682 mfs = {}
1683 1683 changedfiles = set()
1684 1684 fstate = ['']
1685 1685 count = [0]
1686 1686
1687 1687 self.hook('preoutgoing', throw=True, source=source)
1688 1688 self.changegroupinfo(nodes, source)
1689 1689
1690 1690 revset = set([cl.rev(n) for n in nodes])
1691 1691
1692 1692 def gennodelst(log):
1693 1693 return [log.node(r) for r in log if log.linkrev(r) in revset]
1694 1694
1695 1695 def lookup(revlog, x):
1696 1696 if revlog == cl:
1697 1697 c = cl.read(x)
1698 1698 changedfiles.update(c[3])
1699 1699 mfs.setdefault(c[0], x)
1700 1700 count[0] += 1
1701 1701 self.ui.progress(_('bundling'), count[0],
1702 1702 unit=_('changesets'), total=len(nodes))
1703 1703 return x
1704 1704 elif revlog == mf:
1705 1705 count[0] += 1
1706 1706 self.ui.progress(_('bundling'), count[0],
1707 1707 unit=_('manifests'), total=len(mfs))
1708 1708 return cl.node(revlog.linkrev(revlog.rev(x)))
1709 1709 else:
1710 1710 self.ui.progress(
1711 1711 _('bundling'), count[0], item=fstate[0],
1712 1712 total=len(changedfiles), unit=_('files'))
1713 1713 return cl.node(revlog.linkrev(revlog.rev(x)))
1714 1714
1715 1715 bundler = changegroup.bundle10(lookup)
1716 1716 reorder = self.ui.config('bundle', 'reorder', 'auto')
1717 1717 if reorder == 'auto':
1718 1718 reorder = None
1719 1719 else:
1720 1720 reorder = util.parsebool(reorder)
1721 1721
1722 1722 def gengroup():
1723 1723 '''yield a sequence of changegroup chunks (strings)'''
1724 1724 # construct a list of all changed files
1725 1725
1726 1726 for chunk in cl.group(nodes, bundler, reorder=reorder):
1727 1727 yield chunk
1728 1728 self.ui.progress(_('bundling'), None)
1729 1729
1730 1730 count[0] = 0
1731 1731 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1732 1732 yield chunk
1733 1733 self.ui.progress(_('bundling'), None)
1734 1734
1735 1735 count[0] = 0
1736 1736 for fname in sorted(changedfiles):
1737 1737 filerevlog = self.file(fname)
1738 1738 if not len(filerevlog):
1739 1739 raise util.Abort(_("empty or missing revlog for %s") % fname)
1740 1740 fstate[0] = fname
1741 1741 nodelist = gennodelst(filerevlog)
1742 1742 if nodelist:
1743 1743 count[0] += 1
1744 1744 yield bundler.fileheader(fname)
1745 1745 for chunk in filerevlog.group(nodelist, bundler, reorder):
1746 1746 yield chunk
1747 1747 yield bundler.close()
1748 1748 self.ui.progress(_('bundling'), None)
1749 1749
1750 1750 if nodes:
1751 1751 self.hook('outgoing', node=hex(nodes[0]), source=source)
1752 1752
1753 1753 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1754 1754
1755 1755 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1756 1756 """Add the changegroup returned by source.read() to this repo.
1757 1757 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1758 1758 the URL of the repo where this changegroup is coming from.
1759 1759 If lock is not None, the function takes ownership of the lock
1760 1760 and releases it after the changegroup is added.
1761 1761
1762 1762 Return an integer summarizing the change to this repo:
1763 1763 - nothing changed or no source: 0
1764 1764 - more heads than before: 1+added heads (2..n)
1765 1765 - fewer heads than before: -1-removed heads (-2..-n)
1766 1766 - number of heads stays the same: 1
1767 1767 """
1768 1768 def csmap(x):
1769 1769 self.ui.debug("add changeset %s\n" % short(x))
1770 1770 return len(cl)
1771 1771
1772 1772 def revmap(x):
1773 1773 return cl.rev(x)
1774 1774
1775 1775 if not source:
1776 1776 return 0
1777 1777
1778 1778 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1779 1779
1780 1780 changesets = files = revisions = 0
1781 1781 efiles = set()
1782 1782
1783 1783 # write changelog data to temp files so concurrent readers will not see
1784 1784 # inconsistent view
1785 1785 cl = self.changelog
1786 1786 cl.delayupdate()
1787 1787 oldheads = cl.heads()
1788 1788
1789 1789 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1790 1790 try:
1791 1791 trp = weakref.proxy(tr)
1792 1792 # pull off the changeset group
1793 1793 self.ui.status(_("adding changesets\n"))
1794 1794 clstart = len(cl)
1795 1795 class prog(object):
1796 1796 step = _('changesets')
1797 1797 count = 1
1798 1798 ui = self.ui
1799 1799 total = None
1800 1800 def __call__(self):
1801 1801 self.ui.progress(self.step, self.count, unit=_('chunks'),
1802 1802 total=self.total)
1803 1803 self.count += 1
1804 1804 pr = prog()
1805 1805 source.callback = pr
1806 1806
1807 1807 source.changelogheader()
1808 1808 if (cl.addgroup(source, csmap, trp) is None
1809 1809 and not emptyok):
1810 1810 raise util.Abort(_("received changelog group is empty"))
1811 1811 clend = len(cl)
1812 1812 changesets = clend - clstart
1813 1813 for c in xrange(clstart, clend):
1814 1814 efiles.update(self[c].files())
1815 1815 efiles = len(efiles)
1816 1816 self.ui.progress(_('changesets'), None)
1817 1817
1818 1818 # pull off the manifest group
1819 1819 self.ui.status(_("adding manifests\n"))
1820 1820 pr.step = _('manifests')
1821 1821 pr.count = 1
1822 1822 pr.total = changesets # manifests <= changesets
1823 1823 # no need to check for empty manifest group here:
1824 1824 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1825 1825 # no new manifest will be created and the manifest group will
1826 1826 # be empty during the pull
1827 1827 source.manifestheader()
1828 1828 self.manifest.addgroup(source, revmap, trp)
1829 1829 self.ui.progress(_('manifests'), None)
1830 1830
1831 1831 needfiles = {}
1832 1832 if self.ui.configbool('server', 'validate', default=False):
1833 1833 # validate incoming csets have their manifests
1834 1834 for cset in xrange(clstart, clend):
1835 1835 mfest = self.changelog.read(self.changelog.node(cset))[0]
1836 1836 mfest = self.manifest.readdelta(mfest)
1837 1837 # store file nodes we must see
1838 1838 for f, n in mfest.iteritems():
1839 1839 needfiles.setdefault(f, set()).add(n)
1840 1840
1841 1841 # process the files
1842 1842 self.ui.status(_("adding file changes\n"))
1843 1843 pr.step = _('files')
1844 1844 pr.count = 1
1845 1845 pr.total = efiles
1846 1846 source.callback = None
1847 1847
1848 1848 while True:
1849 1849 chunkdata = source.filelogheader()
1850 1850 if not chunkdata:
1851 1851 break
1852 1852 f = chunkdata["filename"]
1853 1853 self.ui.debug("adding %s revisions\n" % f)
1854 1854 pr()
1855 1855 fl = self.file(f)
1856 1856 o = len(fl)
1857 1857 if fl.addgroup(source, revmap, trp) is None:
1858 1858 raise util.Abort(_("received file revlog group is empty"))
1859 1859 revisions += len(fl) - o
1860 1860 files += 1
1861 1861 if f in needfiles:
1862 1862 needs = needfiles[f]
1863 1863 for new in xrange(o, len(fl)):
1864 1864 n = fl.node(new)
1865 1865 if n in needs:
1866 1866 needs.remove(n)
1867 1867 if not needs:
1868 1868 del needfiles[f]
1869 1869 self.ui.progress(_('files'), None)
1870 1870
1871 1871 for f, needs in needfiles.iteritems():
1872 1872 fl = self.file(f)
1873 1873 for n in needs:
1874 1874 try:
1875 1875 fl.rev(n)
1876 1876 except error.LookupError:
1877 1877 raise util.Abort(
1878 1878 _('missing file data for %s:%s - run hg verify') %
1879 1879 (f, hex(n)))
1880 1880
1881 1881 dh = 0
1882 1882 if oldheads:
1883 1883 heads = cl.heads()
1884 1884 dh = len(heads) - len(oldheads)
1885 1885 for h in heads:
1886 1886 if h not in oldheads and 'close' in self[h].extra():
1887 1887 dh -= 1
1888 1888 htext = ""
1889 1889 if dh:
1890 1890 htext = _(" (%+d heads)") % dh
1891 1891
1892 1892 self.ui.status(_("added %d changesets"
1893 1893 " with %d changes to %d files%s\n")
1894 1894 % (changesets, revisions, files, htext))
1895 1895
1896 1896 if changesets > 0:
1897 1897 p = lambda: cl.writepending() and self.root or ""
1898 1898 self.hook('pretxnchangegroup', throw=True,
1899 1899 node=hex(cl.node(clstart)), source=srctype,
1900 1900 url=url, pending=p)
1901 1901
1902 1902 # make changelog see real files again
1903 1903 cl.finalize(trp)
1904 1904
1905 1905 tr.close()
1906 1906 finally:
1907 1907 tr.release()
1908 1908 if lock:
1909 1909 lock.release()
1910 1910
1911 1911 if changesets > 0:
1912 1912 # forcefully update the on-disk branch cache
1913 1913 self.ui.debug("updating the branch cache\n")
1914 1914 self.updatebranchcache()
1915 1915 self.hook("changegroup", node=hex(cl.node(clstart)),
1916 1916 source=srctype, url=url)
1917 1917
1918 1918 for i in xrange(clstart, clend):
1919 1919 self.hook("incoming", node=hex(cl.node(i)),
1920 1920 source=srctype, url=url)
1921 1921
1922 1922 # never return 0 here:
1923 1923 if dh < 0:
1924 1924 return dh - 1
1925 1925 else:
1926 1926 return dh + 1
1927 1927
1928 1928 def stream_in(self, remote, requirements):
1929 1929 lock = self.lock()
1930 1930 try:
1931 1931 fp = remote.stream_out()
1932 1932 l = fp.readline()
1933 1933 try:
1934 1934 resp = int(l)
1935 1935 except ValueError:
1936 1936 raise error.ResponseError(
1937 1937 _('Unexpected response from remote server:'), l)
1938 1938 if resp == 1:
1939 1939 raise util.Abort(_('operation forbidden by server'))
1940 1940 elif resp == 2:
1941 1941 raise util.Abort(_('locking the remote repository failed'))
1942 1942 elif resp != 0:
1943 1943 raise util.Abort(_('the server sent an unknown error code'))
1944 1944 self.ui.status(_('streaming all changes\n'))
1945 1945 l = fp.readline()
1946 1946 try:
1947 1947 total_files, total_bytes = map(int, l.split(' ', 1))
1948 1948 except (ValueError, TypeError):
1949 1949 raise error.ResponseError(
1950 1950 _('Unexpected response from remote server:'), l)
1951 1951 self.ui.status(_('%d files to transfer, %s of data\n') %
1952 1952 (total_files, util.bytecount(total_bytes)))
1953 1953 start = time.time()
1954 1954 for i in xrange(total_files):
1955 1955 # XXX doesn't support '\n' or '\r' in filenames
1956 1956 l = fp.readline()
1957 1957 try:
1958 1958 name, size = l.split('\0', 1)
1959 1959 size = int(size)
1960 1960 except (ValueError, TypeError):
1961 1961 raise error.ResponseError(
1962 1962 _('Unexpected response from remote server:'), l)
1963 1963 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1964 1964 # for backwards compat, name was partially encoded
1965 1965 ofp = self.sopener(store.decodedir(name), 'w')
1966 1966 for chunk in util.filechunkiter(fp, limit=size):
1967 1967 ofp.write(chunk)
1968 1968 ofp.close()
1969 1969 elapsed = time.time() - start
1970 1970 if elapsed <= 0:
1971 1971 elapsed = 0.001
1972 1972 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1973 1973 (util.bytecount(total_bytes), elapsed,
1974 1974 util.bytecount(total_bytes / elapsed)))
1975 1975
1976 1976 # new requirements = old non-format requirements + new format-related
1977 1977 # requirements from the streamed-in repository
1978 1978 requirements.update(set(self.requirements) - self.supportedformats)
1979 1979 self._applyrequirements(requirements)
1980 1980 self._writerequirements()
1981 1981
1982 1982 self.invalidate()
1983 1983 return len(self.heads()) + 1
1984 1984 finally:
1985 1985 lock.release()
1986 1986
1987 1987 def clone(self, remote, heads=[], stream=False):
1988 1988 '''clone remote repository.
1989 1989
1990 1990 keyword arguments:
1991 1991 heads: list of revs to clone (forces use of pull)
1992 1992 stream: use streaming clone if possible'''
1993 1993
1994 1994 # now, all clients that can request uncompressed clones can
1995 1995 # read repo formats supported by all servers that can serve
1996 1996 # them.
1997 1997
1998 1998 # if revlog format changes, client will have to check version
1999 1999 # and format flags on "stream" capability, and use
2000 2000 # uncompressed only if compatible.
2001 2001
2002 2002 if stream and not heads:
2003 2003 # 'stream' means remote revlog format is revlogv1 only
2004 2004 if remote.capable('stream'):
2005 2005 return self.stream_in(remote, set(('revlogv1',)))
2006 2006 # otherwise, 'streamreqs' contains the remote revlog format
2007 2007 streamreqs = remote.capable('streamreqs')
2008 2008 if streamreqs:
2009 2009 streamreqs = set(streamreqs.split(','))
2010 2010 # if we support it, stream in and adjust our requirements
2011 2011 if not streamreqs - self.supportedformats:
2012 2012 return self.stream_in(remote, streamreqs)
2013 2013 return self.pull(remote, heads)
2014 2014
2015 2015 def pushkey(self, namespace, key, old, new):
2016 2016 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2017 2017 old=old, new=new)
2018 2018 ret = pushkey.push(self, namespace, key, old, new)
2019 2019 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2020 2020 ret=ret)
2021 2021 return ret
2022 2022
2023 2023 def listkeys(self, namespace):
2024 2024 self.hook('prelistkeys', throw=True, namespace=namespace)
2025 2025 values = pushkey.list(self, namespace)
2026 2026 self.hook('listkeys', namespace=namespace, values=values)
2027 2027 return values
2028 2028
2029 2029 def debugwireargs(self, one, two, three=None, four=None, five=None):
2030 2030 '''used to test argument passing over the wire'''
2031 2031 return "%s %s %s %s %s" % (one, two, three, four, five)
2032 2032
2033 2033 def savecommitmessage(self, text):
2034 2034 fp = self.opener('last-message.txt', 'wb')
2035 2035 try:
2036 2036 fp.write(text)
2037 2037 finally:
2038 2038 fp.close()
2039 2039 return self.pathto(fp.name[len(self.root)+1:])
2040 2040
2041 2041 # used to avoid circular references so destructors work
2042 2042 def aftertrans(files):
2043 2043 renamefiles = [tuple(t) for t in files]
2044 2044 def a():
2045 2045 for src, dest in renamefiles:
2046 2046 util.rename(src, dest)
2047 2047 return a
2048 2048
2049 2049 def undoname(fn):
2050 2050 base, name = os.path.split(fn)
2051 2051 assert name.startswith('journal')
2052 2052 return os.path.join(base, name.replace('journal', 'undo', 1))
2053 2053
2054 2054 def instance(ui, path, create):
2055 2055 return localrepository(ui, util.urllocalpath(path), create)
2056 2056
2057 2057 def islocal(path):
2058 2058 return True
@@ -1,1279 +1,1279 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev
16 16 from i18n import _
17 17 import ancestor, mdiff, parsers, error, util, dagutil
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog header flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOGGENERALDELTA = (1 << 17)
31 31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35 35
36 36 # revlog index flags
37 37 REVIDX_KNOWN_FLAGS = 0
38 38
39 39 # max size of revlog with inline data
40 40 _maxinline = 131072
41 41 _chunksize = 1048576
42 42
43 43 RevlogError = error.RevlogError
44 44 LookupError = error.LookupError
45 45
46 46 def getoffset(q):
47 47 return int(q >> 16)
48 48
49 49 def gettype(q):
50 50 return int(q & 0xFFFF)
51 51
52 52 def offset_type(offset, type):
53 53 return long(long(offset) << 16 | type)
54 54
55 55 nullhash = _sha(nullid)
56 56
57 57 def hash(text, p1, p2):
58 58 """generate a hash from the given text and its parent hashes
59 59
60 60 This hash combines both the current file contents and its history
61 61 in a manner that makes it easy to distinguish nodes with the same
62 62 content in the revision graph.
63 63 """
64 64 # As of now, if one of the parent node is null, p2 is null
65 65 if p2 == nullid:
66 66 # deep copy of a hash is faster than creating one
67 67 s = nullhash.copy()
68 68 s.update(p1)
69 69 else:
70 70 # none of the parent nodes are nullid
71 71 l = [p1, p2]
72 72 l.sort()
73 73 s = _sha(l[0])
74 74 s.update(l[1])
75 75 s.update(text)
76 76 return s.digest()
77 77
78 78 def compress(text):
79 79 """ generate a possibly-compressed representation of text """
80 80 if not text:
81 81 return ("", text)
82 82 l = len(text)
83 83 bin = None
84 84 if l < 44:
85 85 pass
86 86 elif l > 1000000:
87 87 # zlib makes an internal copy, thus doubling memory usage for
88 88 # large files, so lets do this in pieces
89 89 z = zlib.compressobj()
90 90 p = []
91 91 pos = 0
92 92 while pos < l:
93 93 pos2 = pos + 2**20
94 94 p.append(z.compress(text[pos:pos2]))
95 95 pos = pos2
96 96 p.append(z.flush())
97 97 if sum(map(len, p)) < l:
98 98 bin = "".join(p)
99 99 else:
100 100 bin = _compress(text)
101 101 if bin is None or len(bin) > l:
102 102 if text[0] == '\0':
103 103 return ("", text)
104 104 return ('u', text)
105 105 return ("", bin)
106 106
107 107 def decompress(bin):
108 108 """ decompress the given input """
109 109 if not bin:
110 110 return bin
111 111 t = bin[0]
112 112 if t == '\0':
113 113 return bin
114 114 if t == 'x':
115 115 return _decompress(bin)
116 116 if t == 'u':
117 117 return bin[1:]
118 118 raise RevlogError(_("unknown compression type %r") % t)
119 119
120 120 indexformatv0 = ">4l20s20s20s"
121 121 v0shaoffset = 56
122 122
123 123 class revlogoldio(object):
124 124 def __init__(self):
125 125 self.size = struct.calcsize(indexformatv0)
126 126
127 127 def parseindex(self, data, inline):
128 128 s = self.size
129 129 index = []
130 130 nodemap = {nullid: nullrev}
131 131 n = off = 0
132 132 l = len(data)
133 133 while off + s <= l:
134 134 cur = data[off:off + s]
135 135 off += s
136 136 e = _unpack(indexformatv0, cur)
137 137 # transform to revlogv1 format
138 138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 140 index.append(e2)
141 141 nodemap[e[6]] = n
142 142 n += 1
143 143
144 144 # add the magic null revision at -1
145 145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146 146
147 147 return index, nodemap, None
148 148
149 149 def packentry(self, entry, node, version, rev):
150 150 if gettype(entry[0]):
151 151 raise RevlogError(_("index entry flags need RevlogNG"))
152 152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 153 node(entry[5]), node(entry[6]), entry[7])
154 154 return _pack(indexformatv0, *e2)
155 155
156 156 # index ng:
157 157 # 6 bytes: offset
158 158 # 2 bytes: flags
159 159 # 4 bytes: compressed length
160 160 # 4 bytes: uncompressed length
161 161 # 4 bytes: base rev
162 162 # 4 bytes: link rev
163 163 # 4 bytes: parent 1 rev
164 164 # 4 bytes: parent 2 rev
165 165 # 32 bytes: nodeid
166 166 indexformatng = ">Qiiiiii20s12x"
167 167 ngshaoffset = 32
168 168 versionformat = ">I"
169 169
170 170 class revlogio(object):
171 171 def __init__(self):
172 172 self.size = struct.calcsize(indexformatng)
173 173
174 174 def parseindex(self, data, inline):
175 175 # call the C implementation to parse the index data
176 176 index, cache = parsers.parse_index2(data, inline)
177 177 return index, None, cache
178 178
179 179 def packentry(self, entry, node, version, rev):
180 180 p = _pack(indexformatng, *entry)
181 181 if rev == 0:
182 182 p = _pack(versionformat, version) + p[4:]
183 183 return p
184 184
185 185 class revlog(object):
186 186 """
187 187 the underlying revision storage object
188 188
189 189 A revlog consists of two parts, an index and the revision data.
190 190
191 191 The index is a file with a fixed record size containing
192 192 information on each revision, including its nodeid (hash), the
193 193 nodeids of its parents, the position and offset of its data within
194 194 the data file, and the revision it's based on. Finally, each entry
195 195 contains a linkrev entry that can serve as a pointer to external
196 196 data.
197 197
198 198 The revision data itself is a linear collection of data chunks.
199 199 Each chunk represents a revision and is usually represented as a
200 200 delta against the previous chunk. To bound lookup time, runs of
201 201 deltas are limited to about 2 times the length of the original
202 202 version data. This makes retrieval of a version proportional to
203 203 its size, or O(1) relative to the number of revisions.
204 204
205 205 Both pieces of the revlog are written to in an append-only
206 206 fashion, which means we never need to rewrite a file to insert or
207 207 remove data, and can use some simple techniques to avoid the need
208 208 for locking while reading.
209 209 """
210 210 def __init__(self, opener, indexfile):
211 211 """
212 212 create a revlog object
213 213
214 214 opener is a function that abstracts the file opening operation
215 215 and can be used to implement COW semantics or the like.
216 216 """
217 217 self.indexfile = indexfile
218 218 self.datafile = indexfile[:-2] + ".d"
219 219 self.opener = opener
220 220 self._cache = None
221 221 self._basecache = (0, 0)
222 222 self._chunkcache = (0, '')
223 223 self.index = []
224 224 self._pcache = {}
225 225 self._nodecache = {nullid: nullrev}
226 226 self._nodepos = None
227 227
228 228 v = REVLOG_DEFAULT_VERSION
229 229 opts = getattr(opener, 'options', None)
230 230 if opts is not None:
231 231 if 'revlogv1' in opts:
232 232 if 'generaldelta' in opts:
233 233 v |= REVLOGGENERALDELTA
234 234 else:
235 235 v = 0
236 236
237 237 i = ''
238 238 self._initempty = True
239 239 try:
240 240 f = self.opener(self.indexfile)
241 241 i = f.read()
242 242 f.close()
243 243 if len(i) > 0:
244 244 v = struct.unpack(versionformat, i[:4])[0]
245 245 self._initempty = False
246 246 except IOError, inst:
247 247 if inst.errno != errno.ENOENT:
248 248 raise
249 249
250 250 self.version = v
251 251 self._inline = v & REVLOGNGINLINEDATA
252 252 self._generaldelta = v & REVLOGGENERALDELTA
253 253 flags = v & ~0xFFFF
254 254 fmt = v & 0xFFFF
255 255 if fmt == REVLOGV0 and flags:
256 256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
257 257 % (self.indexfile, flags >> 16))
258 258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
259 259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
260 260 % (self.indexfile, flags >> 16))
261 261 elif fmt > REVLOGNG:
262 262 raise RevlogError(_("index %s unknown format %d")
263 263 % (self.indexfile, fmt))
264 264
265 265 self._io = revlogio()
266 266 if self.version == REVLOGV0:
267 267 self._io = revlogoldio()
268 268 try:
269 269 d = self._io.parseindex(i, self._inline)
270 270 except (ValueError, IndexError):
271 271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
272 272 self.index, nodemap, self._chunkcache = d
273 273 if nodemap is not None:
274 274 self.nodemap = self._nodecache = nodemap
275 275 if not self._chunkcache:
276 276 self._chunkclear()
277 277
278 278 def tip(self):
279 279 return self.node(len(self.index) - 2)
280 280 def __len__(self):
281 281 return len(self.index) - 1
282 282 def __iter__(self):
283 283 for i in xrange(len(self)):
284 284 yield i
285 285
286 286 @util.propertycache
287 287 def nodemap(self):
288 288 self.rev(self.node(0))
289 289 return self._nodecache
290 290
291 291 def rev(self, node):
292 292 try:
293 293 return self._nodecache[node]
294 294 except KeyError:
295 295 n = self._nodecache
296 296 i = self.index
297 297 p = self._nodepos
298 298 if p is None:
299 299 p = len(i) - 2
300 300 for r in xrange(p, -1, -1):
301 301 v = i[r][7]
302 302 n[v] = r
303 303 if v == node:
304 304 self._nodepos = r - 1
305 305 return r
306 306 raise LookupError(node, self.indexfile, _('no node'))
307 307
308 308 def node(self, rev):
309 309 return self.index[rev][7]
310 310 def linkrev(self, rev):
311 311 return self.index[rev][4]
312 312 def parents(self, node):
313 313 i = self.index
314 314 d = i[self.rev(node)]
315 315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
316 316 def parentrevs(self, rev):
317 317 return self.index[rev][5:7]
318 318 def start(self, rev):
319 319 return int(self.index[rev][0] >> 16)
320 320 def end(self, rev):
321 321 return self.start(rev) + self.length(rev)
322 322 def length(self, rev):
323 323 return self.index[rev][1]
324 324 def chainbase(self, rev):
325 325 index = self.index
326 326 base = index[rev][3]
327 327 while base != rev:
328 328 rev = base
329 329 base = index[rev][3]
330 330 return base
331 331 def flags(self, rev):
332 332 return self.index[rev][0] & 0xFFFF
333 333 def rawsize(self, rev):
334 334 """return the length of the uncompressed text for a given revision"""
335 335 l = self.index[rev][2]
336 336 if l >= 0:
337 337 return l
338 338
339 339 t = self.revision(self.node(rev))
340 340 return len(t)
341 341 size = rawsize
342 342
343 343 def reachable(self, node, stop=None):
344 344 """return the set of all nodes ancestral to a given node, including
345 345 the node itself, stopping when stop is matched"""
346 346 reachable = set((node,))
347 347 visit = [node]
348 348 if stop:
349 349 stopn = self.rev(stop)
350 350 else:
351 351 stopn = 0
352 352 while visit:
353 353 n = visit.pop(0)
354 354 if n == stop:
355 355 continue
356 356 if n == nullid:
357 357 continue
358 358 for p in self.parents(n):
359 359 if self.rev(p) < stopn:
360 360 continue
361 361 if p not in reachable:
362 362 reachable.add(p)
363 363 visit.append(p)
364 364 return reachable
365 365
366 366 def ancestors(self, *revs):
367 367 """Generate the ancestors of 'revs' in reverse topological order.
368 368
369 369 Yield a sequence of revision numbers starting with the parents
370 370 of each revision in revs, i.e., each revision is *not* considered
371 371 an ancestor of itself. Results are in breadth-first order:
372 372 parents of each rev in revs, then parents of those, etc. Result
373 373 does not include the null revision."""
374 374 visit = list(revs)
375 375 seen = set([nullrev])
376 376 while visit:
377 377 for parent in self.parentrevs(visit.pop(0)):
378 378 if parent not in seen:
379 379 visit.append(parent)
380 380 seen.add(parent)
381 381 yield parent
382 382
383 383 def descendants(self, *revs):
384 384 """Generate the descendants of 'revs' in revision order.
385 385
386 386 Yield a sequence of revision numbers starting with a child of
387 387 some rev in revs, i.e., each revision is *not* considered a
388 388 descendant of itself. Results are ordered by revision number (a
389 389 topological sort)."""
390 390 first = min(revs)
391 391 if first == nullrev:
392 392 for i in self:
393 393 yield i
394 394 return
395 395
396 396 seen = set(revs)
397 397 for i in xrange(first + 1, len(self)):
398 398 for x in self.parentrevs(i):
399 399 if x != nullrev and x in seen:
400 400 seen.add(i)
401 401 yield i
402 402 break
403 403
404 404 def findcommonmissing(self, common=None, heads=None):
405 405 """Return a tuple of the ancestors of common and the ancestors of heads
406 406 that are not ancestors of common.
407 407
408 408 More specifically, the second element is a list of nodes N such that
409 409 every N satisfies the following constraints:
410 410
411 411 1. N is an ancestor of some node in 'heads'
412 412 2. N is not an ancestor of any node in 'common'
413 413
414 414 The list is sorted by revision number, meaning it is
415 415 topologically sorted.
416 416
417 417 'heads' and 'common' are both lists of node IDs. If heads is
418 418 not supplied, uses all of the revlog's heads. If common is not
419 419 supplied, uses nullid."""
420 420 if common is None:
421 421 common = [nullid]
422 422 if heads is None:
423 423 heads = self.heads()
424 424
425 425 common = [self.rev(n) for n in common]
426 426 heads = [self.rev(n) for n in heads]
427 427
428 428 # we want the ancestors, but inclusive
429 429 has = set(self.ancestors(*common))
430 430 has.add(nullrev)
431 431 has.update(common)
432 432
433 433 # take all ancestors from heads that aren't in has
434 434 missing = set()
435 435 visit = [r for r in heads if r not in has]
436 436 while visit:
437 437 r = visit.pop(0)
438 438 if r in missing:
439 439 continue
440 440 else:
441 441 missing.add(r)
442 442 for p in self.parentrevs(r):
443 443 if p not in has:
444 444 visit.append(p)
445 445 missing = list(missing)
446 446 missing.sort()
447 447 return has, [self.node(r) for r in missing]
448 448
449 449 def findmissing(self, common=None, heads=None):
450 450 """Return the ancestors of heads that are not ancestors of common.
451 451
452 452 More specifically, return a list of nodes N such that every N
453 453 satisfies the following constraints:
454 454
455 455 1. N is an ancestor of some node in 'heads'
456 456 2. N is not an ancestor of any node in 'common'
457 457
458 458 The list is sorted by revision number, meaning it is
459 459 topologically sorted.
460 460
461 461 'heads' and 'common' are both lists of node IDs. If heads is
462 462 not supplied, uses all of the revlog's heads. If common is not
463 463 supplied, uses nullid."""
464 464 _common, missing = self.findcommonmissing(common, heads)
465 465 return missing
466 466
467 467 def nodesbetween(self, roots=None, heads=None):
468 468 """Return a topological path from 'roots' to 'heads'.
469 469
470 470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
471 471 topologically sorted list of all nodes N that satisfy both of
472 472 these constraints:
473 473
474 474 1. N is a descendant of some node in 'roots'
475 475 2. N is an ancestor of some node in 'heads'
476 476
477 477 Every node is considered to be both a descendant and an ancestor
478 478 of itself, so every reachable node in 'roots' and 'heads' will be
479 479 included in 'nodes'.
480 480
481 481 'outroots' is the list of reachable nodes in 'roots', i.e., the
482 482 subset of 'roots' that is returned in 'nodes'. Likewise,
483 483 'outheads' is the subset of 'heads' that is also in 'nodes'.
484 484
485 485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
486 486 unspecified, uses nullid as the only root. If 'heads' is
487 487 unspecified, uses list of all of the revlog's heads."""
488 488 nonodes = ([], [], [])
489 489 if roots is not None:
490 490 roots = list(roots)
491 491 if not roots:
492 492 return nonodes
493 493 lowestrev = min([self.rev(n) for n in roots])
494 494 else:
495 495 roots = [nullid] # Everybody's a descendant of nullid
496 496 lowestrev = nullrev
497 497 if (lowestrev == nullrev) and (heads is None):
498 498 # We want _all_ the nodes!
499 499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
500 500 if heads is None:
501 501 # All nodes are ancestors, so the latest ancestor is the last
502 502 # node.
503 503 highestrev = len(self) - 1
504 504 # Set ancestors to None to signal that every node is an ancestor.
505 505 ancestors = None
506 506 # Set heads to an empty dictionary for later discovery of heads
507 507 heads = {}
508 508 else:
509 509 heads = list(heads)
510 510 if not heads:
511 511 return nonodes
512 512 ancestors = set()
513 513 # Turn heads into a dictionary so we can remove 'fake' heads.
514 514 # Also, later we will be using it to filter out the heads we can't
515 515 # find from roots.
516 516 heads = dict.fromkeys(heads, False)
517 517 # Start at the top and keep marking parents until we're done.
518 518 nodestotag = set(heads)
519 519 # Remember where the top was so we can use it as a limit later.
520 520 highestrev = max([self.rev(n) for n in nodestotag])
521 521 while nodestotag:
522 522 # grab a node to tag
523 523 n = nodestotag.pop()
524 524 # Never tag nullid
525 525 if n == nullid:
526 526 continue
527 527 # A node's revision number represents its place in a
528 528 # topologically sorted list of nodes.
529 529 r = self.rev(n)
530 530 if r >= lowestrev:
531 531 if n not in ancestors:
532 532 # If we are possibly a descendant of one of the roots
533 533 # and we haven't already been marked as an ancestor
534 534 ancestors.add(n) # Mark as ancestor
535 535 # Add non-nullid parents to list of nodes to tag.
536 536 nodestotag.update([p for p in self.parents(n) if
537 537 p != nullid])
538 538 elif n in heads: # We've seen it before, is it a fake head?
539 539 # So it is, real heads should not be the ancestors of
540 540 # any other heads.
541 541 heads.pop(n)
542 542 if not ancestors:
543 543 return nonodes
544 544 # Now that we have our set of ancestors, we want to remove any
545 545 # roots that are not ancestors.
546 546
547 547 # If one of the roots was nullid, everything is included anyway.
548 548 if lowestrev > nullrev:
549 549 # But, since we weren't, let's recompute the lowest rev to not
550 550 # include roots that aren't ancestors.
551 551
552 552 # Filter out roots that aren't ancestors of heads
553 553 roots = [n for n in roots if n in ancestors]
554 554 # Recompute the lowest revision
555 555 if roots:
556 556 lowestrev = min([self.rev(n) for n in roots])
557 557 else:
558 558 # No more roots? Return empty list
559 559 return nonodes
560 560 else:
561 561 # We are descending from nullid, and don't need to care about
562 562 # any other roots.
563 563 lowestrev = nullrev
564 564 roots = [nullid]
565 565 # Transform our roots list into a set.
566 566 descendants = set(roots)
567 567 # Also, keep the original roots so we can filter out roots that aren't
568 568 # 'real' roots (i.e. are descended from other roots).
569 569 roots = descendants.copy()
570 570 # Our topologically sorted list of output nodes.
571 571 orderedout = []
572 572 # Don't start at nullid since we don't want nullid in our output list,
573 573 # and if nullid shows up in descedents, empty parents will look like
574 574 # they're descendants.
575 575 for r in xrange(max(lowestrev, 0), highestrev + 1):
576 576 n = self.node(r)
577 577 isdescendant = False
578 578 if lowestrev == nullrev: # Everybody is a descendant of nullid
579 579 isdescendant = True
580 580 elif n in descendants:
581 581 # n is already a descendant
582 582 isdescendant = True
583 583 # This check only needs to be done here because all the roots
584 584 # will start being marked is descendants before the loop.
585 585 if n in roots:
586 586 # If n was a root, check if it's a 'real' root.
587 587 p = tuple(self.parents(n))
588 588 # If any of its parents are descendants, it's not a root.
589 589 if (p[0] in descendants) or (p[1] in descendants):
590 590 roots.remove(n)
591 591 else:
592 592 p = tuple(self.parents(n))
593 593 # A node is a descendant if either of its parents are
594 594 # descendants. (We seeded the dependents list with the roots
595 595 # up there, remember?)
596 596 if (p[0] in descendants) or (p[1] in descendants):
597 597 descendants.add(n)
598 598 isdescendant = True
599 599 if isdescendant and ((ancestors is None) or (n in ancestors)):
600 600 # Only include nodes that are both descendants and ancestors.
601 601 orderedout.append(n)
602 602 if (ancestors is not None) and (n in heads):
603 603 # We're trying to figure out which heads are reachable
604 604 # from roots.
605 605 # Mark this head as having been reached
606 606 heads[n] = True
607 607 elif ancestors is None:
608 608 # Otherwise, we're trying to discover the heads.
609 609 # Assume this is a head because if it isn't, the next step
610 610 # will eventually remove it.
611 611 heads[n] = True
612 612 # But, obviously its parents aren't.
613 613 for p in self.parents(n):
614 614 heads.pop(p, None)
615 615 heads = [n for n, flag in heads.iteritems() if flag]
616 616 roots = list(roots)
617 617 assert orderedout
618 618 assert roots
619 619 assert heads
620 620 return (orderedout, roots, heads)
621 621
622 622 def headrevs(self):
623 623 count = len(self)
624 624 if not count:
625 625 return [nullrev]
626 626 ishead = [1] * (count + 1)
627 627 index = self.index
628 628 for r in xrange(count):
629 629 e = index[r]
630 630 ishead[e[5]] = ishead[e[6]] = 0
631 631 return [r for r in xrange(count) if ishead[r]]
632 632
633 633 def heads(self, start=None, stop=None):
634 634 """return the list of all nodes that have no children
635 635
636 636 if start is specified, only heads that are descendants of
637 637 start will be returned
638 638 if stop is specified, it will consider all the revs from stop
639 639 as if they had no children
640 640 """
641 641 if start is None and stop is None:
642 642 if not len(self):
643 643 return [nullid]
644 644 return [self.node(r) for r in self.headrevs()]
645 645
646 646 if start is None:
647 647 start = nullid
648 648 if stop is None:
649 649 stop = []
650 650 stoprevs = set([self.rev(n) for n in stop])
651 651 startrev = self.rev(start)
652 652 reachable = set((startrev,))
653 653 heads = set((startrev,))
654 654
655 655 parentrevs = self.parentrevs
656 656 for r in xrange(startrev + 1, len(self)):
657 657 for p in parentrevs(r):
658 658 if p in reachable:
659 659 if r not in stoprevs:
660 660 reachable.add(r)
661 661 heads.add(r)
662 662 if p in heads and p not in stoprevs:
663 663 heads.remove(p)
664 664
665 665 return [self.node(r) for r in heads]
666 666
667 667 def children(self, node):
668 668 """find the children of a given node"""
669 669 c = []
670 670 p = self.rev(node)
671 671 for r in range(p + 1, len(self)):
672 672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
673 673 if prevs:
674 674 for pr in prevs:
675 675 if pr == p:
676 676 c.append(self.node(r))
677 677 elif p == nullrev:
678 678 c.append(self.node(r))
679 679 return c
680 680
681 681 def descendant(self, start, end):
682 682 if start == nullrev:
683 683 return True
684 684 for i in self.descendants(start):
685 685 if i == end:
686 686 return True
687 687 elif i > end:
688 688 break
689 689 return False
690 690
691 691 def ancestor(self, a, b):
692 692 """calculate the least common ancestor of nodes a and b"""
693 693
694 694 # fast path, check if it is a descendant
695 695 a, b = self.rev(a), self.rev(b)
696 696 start, end = sorted((a, b))
697 697 if self.descendant(start, end):
698 698 return self.node(start)
699 699
700 700 def parents(rev):
701 701 return [p for p in self.parentrevs(rev) if p != nullrev]
702 702
703 703 c = ancestor.ancestor(a, b, parents)
704 704 if c is None:
705 705 return nullid
706 706
707 707 return self.node(c)
708 708
709 709 def _match(self, id):
710 710 if isinstance(id, (long, int)):
711 711 # rev
712 712 return self.node(id)
713 713 if len(id) == 20:
714 714 # possibly a binary node
715 715 # odds of a binary node being all hex in ASCII are 1 in 10**25
716 716 try:
717 717 node = id
718 718 self.rev(node) # quick search the index
719 719 return node
720 720 except LookupError:
721 721 pass # may be partial hex id
722 722 try:
723 723 # str(rev)
724 724 rev = int(id)
725 725 if str(rev) != id:
726 726 raise ValueError
727 727 if rev < 0:
728 728 rev = len(self) + rev
729 729 if rev < 0 or rev >= len(self):
730 730 raise ValueError
731 731 return self.node(rev)
732 732 except (ValueError, OverflowError):
733 733 pass
734 734 if len(id) == 40:
735 735 try:
736 736 # a full hex nodeid?
737 737 node = bin(id)
738 738 self.rev(node)
739 739 return node
740 740 except (TypeError, LookupError):
741 741 pass
742 742
743 743 def _partialmatch(self, id):
744 744 if id in self._pcache:
745 745 return self._pcache[id]
746 746
747 747 if len(id) < 40:
748 748 try:
749 749 # hex(node)[:...]
750 750 l = len(id) // 2 # grab an even number of digits
751 751 prefix = bin(id[:l * 2])
752 752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
753 753 nl = [n for n in nl if hex(n).startswith(id)]
754 754 if len(nl) > 0:
755 755 if len(nl) == 1:
756 756 self._pcache[id] = nl[0]
757 757 return nl[0]
758 758 raise LookupError(id, self.indexfile,
759 759 _('ambiguous identifier'))
760 760 return None
761 761 except TypeError:
762 762 pass
763 763
764 764 def lookup(self, id):
765 765 """locate a node based on:
766 766 - revision number or str(revision number)
767 767 - nodeid or subset of hex nodeid
768 768 """
769 769 n = self._match(id)
770 770 if n is not None:
771 771 return n
772 772 n = self._partialmatch(id)
773 773 if n:
774 774 return n
775 775
776 776 raise LookupError(id, self.indexfile, _('no match found'))
777 777
778 778 def cmp(self, node, text):
779 779 """compare text with a given file revision
780 780
781 781 returns True if text is different than what is stored.
782 782 """
783 783 p1, p2 = self.parents(node)
784 784 return hash(text, p1, p2) != node
785 785
786 786 def _addchunk(self, offset, data):
787 787 o, d = self._chunkcache
788 788 # try to add to existing cache
789 789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
790 790 self._chunkcache = o, d + data
791 791 else:
792 792 self._chunkcache = offset, data
793 793
794 794 def _loadchunk(self, offset, length):
795 795 if self._inline:
796 796 df = self.opener(self.indexfile)
797 797 else:
798 798 df = self.opener(self.datafile)
799 799
800 800 readahead = max(65536, length)
801 801 df.seek(offset)
802 802 d = df.read(readahead)
803 803 self._addchunk(offset, d)
804 804 if readahead > length:
805 805 return d[:length]
806 806 return d
807 807
808 808 def _getchunk(self, offset, length):
809 809 o, d = self._chunkcache
810 810 l = len(d)
811 811
812 812 # is it in the cache?
813 813 cachestart = offset - o
814 814 cacheend = cachestart + length
815 815 if cachestart >= 0 and cacheend <= l:
816 816 if cachestart == 0 and cacheend == l:
817 817 return d # avoid a copy
818 818 return d[cachestart:cacheend]
819 819
820 820 return self._loadchunk(offset, length)
821 821
822 822 def _chunkraw(self, startrev, endrev):
823 823 start = self.start(startrev)
824 824 length = self.end(endrev) - start
825 825 if self._inline:
826 826 start += (startrev + 1) * self._io.size
827 827 return self._getchunk(start, length)
828 828
829 829 def _chunk(self, rev):
830 830 return decompress(self._chunkraw(rev, rev))
831 831
832 832 def _chunkbase(self, rev):
833 833 return self._chunk(rev)
834 834
835 835 def _chunkclear(self):
836 836 self._chunkcache = (0, '')
837 837
838 838 def deltaparent(self, rev):
839 839 """return deltaparent of the given revision"""
840 840 base = self.index[rev][3]
841 841 if base == rev:
842 842 return nullrev
843 843 elif self._generaldelta:
844 844 return base
845 845 else:
846 846 return rev - 1
847 847
848 848 def revdiff(self, rev1, rev2):
849 849 """return or calculate a delta between two revisions"""
850 850 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 851 return self._chunk(rev2)
852 852
853 853 return mdiff.textdiff(self.revision(self.node(rev1)),
854 854 self.revision(self.node(rev2)))
855 855
856 856 def revision(self, node):
857 857 """return an uncompressed revision of a given node"""
858 858 cachedrev = None
859 859 if node == nullid:
860 860 return ""
861 861 if self._cache:
862 862 if self._cache[0] == node:
863 863 return self._cache[2]
864 864 cachedrev = self._cache[1]
865 865
866 866 # look up what we need to read
867 867 text = None
868 868 rev = self.rev(node)
869 869
870 870 # check rev flags
871 871 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 872 raise RevlogError(_('incompatible revision flag %x') %
873 873 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874 874
875 875 # build delta chain
876 876 chain = []
877 877 index = self.index # for performance
878 878 generaldelta = self._generaldelta
879 879 iterrev = rev
880 880 e = index[iterrev]
881 881 while iterrev != e[3] and iterrev != cachedrev:
882 882 chain.append(iterrev)
883 883 if generaldelta:
884 884 iterrev = e[3]
885 885 else:
886 886 iterrev -= 1
887 887 e = index[iterrev]
888 888 chain.reverse()
889 889 base = iterrev
890 890
891 891 if iterrev == cachedrev:
892 892 # cache hit
893 893 text = self._cache[2]
894 894
895 895 # drop cache to save memory
896 896 self._cache = None
897 897
898 898 self._chunkraw(base, rev)
899 899 if text is None:
900 900 text = self._chunkbase(base)
901 901
902 902 bins = [self._chunk(r) for r in chain]
903 903 text = mdiff.patches(text, bins)
904 904
905 905 text = self._checkhash(text, node, rev)
906 906
907 907 self._cache = (node, rev, text)
908 908 return text
909 909
910 910 def _checkhash(self, text, node, rev):
911 911 p1, p2 = self.parents(node)
912 912 if node != hash(text, p1, p2):
913 913 raise RevlogError(_("integrity check failed on %s:%d")
914 914 % (self.indexfile, rev))
915 915 return text
916 916
917 917 def checkinlinesize(self, tr, fp=None):
918 918 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 919 return
920 920
921 921 trinfo = tr.find(self.indexfile)
922 922 if trinfo is None:
923 923 raise RevlogError(_("%s not found in the transaction")
924 924 % self.indexfile)
925 925
926 926 trindex = trinfo[2]
927 927 dataoff = self.start(trindex)
928 928
929 929 tr.add(self.datafile, dataoff)
930 930
931 931 if fp:
932 932 fp.flush()
933 933 fp.close()
934 934
935 935 df = self.opener(self.datafile, 'w')
936 936 try:
937 937 for r in self:
938 938 df.write(self._chunkraw(r, r))
939 939 finally:
940 940 df.close()
941 941
942 942 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 943 self.version &= ~(REVLOGNGINLINEDATA)
944 944 self._inline = False
945 945 for i in self:
946 946 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 947 fp.write(e)
948 948
949 # if we don't call rename, the temp file will never replace the
949 # if we don't call close, the temp file will never replace the
950 950 # real index
951 fp.rename()
951 fp.close()
952 952
953 953 tr.replace(self.indexfile, trindex * self._io.size)
954 954 self._chunkclear()
955 955
956 956 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 957 """add a revision to the log
958 958
959 959 text - the revision data to add
960 960 transaction - the transaction object used for rollback
961 961 link - the linkrev data to add
962 962 p1, p2 - the parent nodeids of the revision
963 963 cachedelta - an optional precomputed delta
964 964 """
965 965 node = hash(text, p1, p2)
966 966 if node in self.nodemap:
967 967 return node
968 968
969 969 dfh = None
970 970 if not self._inline:
971 971 dfh = self.opener(self.datafile, "a")
972 972 ifh = self.opener(self.indexfile, "a+")
973 973 try:
974 974 return self._addrevision(node, text, transaction, link, p1, p2,
975 975 cachedelta, ifh, dfh)
976 976 finally:
977 977 if dfh:
978 978 dfh.close()
979 979 ifh.close()
980 980
981 981 def _addrevision(self, node, text, transaction, link, p1, p2,
982 982 cachedelta, ifh, dfh):
983 983 """internal function to add revisions to the log
984 984
985 985 see addrevision for argument descriptions.
986 986 invariants:
987 987 - text is optional (can be None); if not set, cachedelta must be set.
988 988 if both are set, they must correspond to eachother.
989 989 """
990 990 btext = [text]
991 991 def buildtext():
992 992 if btext[0] is not None:
993 993 return btext[0]
994 994 # flush any pending writes here so we can read it in revision
995 995 if dfh:
996 996 dfh.flush()
997 997 ifh.flush()
998 998 basetext = self.revision(self.node(cachedelta[0]))
999 999 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 1000 chk = hash(btext[0], p1, p2)
1001 1001 if chk != node:
1002 1002 raise RevlogError(_("consistency error in delta"))
1003 1003 return btext[0]
1004 1004
1005 1005 def builddelta(rev):
1006 1006 # can we use the cached delta?
1007 1007 if cachedelta and cachedelta[0] == rev:
1008 1008 delta = cachedelta[1]
1009 1009 else:
1010 1010 t = buildtext()
1011 1011 ptext = self.revision(self.node(rev))
1012 1012 delta = mdiff.textdiff(ptext, t)
1013 1013 data = compress(delta)
1014 1014 l = len(data[1]) + len(data[0])
1015 1015 if basecache[0] == rev:
1016 1016 chainbase = basecache[1]
1017 1017 else:
1018 1018 chainbase = self.chainbase(rev)
1019 1019 dist = l + offset - self.start(chainbase)
1020 1020 if self._generaldelta:
1021 1021 base = rev
1022 1022 else:
1023 1023 base = chainbase
1024 1024 return dist, l, data, base, chainbase
1025 1025
1026 1026 curr = len(self)
1027 1027 prev = curr - 1
1028 1028 base = chainbase = curr
1029 1029 offset = self.end(prev)
1030 1030 flags = 0
1031 1031 d = None
1032 1032 basecache = self._basecache
1033 1033 p1r, p2r = self.rev(p1), self.rev(p2)
1034 1034
1035 1035 # should we try to build a delta?
1036 1036 if prev != nullrev:
1037 1037 if self._generaldelta:
1038 1038 if p1r >= basecache[1]:
1039 1039 d = builddelta(p1r)
1040 1040 elif p2r >= basecache[1]:
1041 1041 d = builddelta(p2r)
1042 1042 else:
1043 1043 d = builddelta(prev)
1044 1044 else:
1045 1045 d = builddelta(prev)
1046 1046 dist, l, data, base, chainbase = d
1047 1047
1048 1048 # full versions are inserted when the needed deltas
1049 1049 # become comparable to the uncompressed text
1050 1050 if text is None:
1051 1051 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 1052 cachedelta[1])
1053 1053 else:
1054 1054 textlen = len(text)
1055 1055 if d is None or dist > textlen * 2:
1056 1056 text = buildtext()
1057 1057 data = compress(text)
1058 1058 l = len(data[1]) + len(data[0])
1059 1059 base = chainbase = curr
1060 1060
1061 1061 e = (offset_type(offset, flags), l, textlen,
1062 1062 base, link, p1r, p2r, node)
1063 1063 self.index.insert(-1, e)
1064 1064 self.nodemap[node] = curr
1065 1065
1066 1066 entry = self._io.packentry(e, self.node, self.version, curr)
1067 1067 if not self._inline:
1068 1068 transaction.add(self.datafile, offset)
1069 1069 transaction.add(self.indexfile, curr * len(entry))
1070 1070 if data[0]:
1071 1071 dfh.write(data[0])
1072 1072 dfh.write(data[1])
1073 1073 dfh.flush()
1074 1074 ifh.write(entry)
1075 1075 else:
1076 1076 offset += curr * self._io.size
1077 1077 transaction.add(self.indexfile, offset, curr)
1078 1078 ifh.write(entry)
1079 1079 ifh.write(data[0])
1080 1080 ifh.write(data[1])
1081 1081 self.checkinlinesize(transaction, ifh)
1082 1082
1083 1083 if type(text) == str: # only accept immutable objects
1084 1084 self._cache = (node, curr, text)
1085 1085 self._basecache = (curr, chainbase)
1086 1086 return node
1087 1087
1088 1088 def group(self, nodelist, bundler, reorder=None):
1089 1089 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 1090 (strings).
1091 1091
1092 1092 Given a list of changeset revs, return a set of deltas and
1093 1093 metadata corresponding to nodes. The first delta is
1094 1094 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 1095 guaranteed to have this parent as it has all history before
1096 1096 these changesets. In the case firstparent is nullrev the
1097 1097 changegroup starts with a full revision.
1098 1098 """
1099 1099
1100 1100 # if we don't have any revisions touched by these changesets, bail
1101 1101 if len(nodelist) == 0:
1102 1102 yield bundler.close()
1103 1103 return
1104 1104
1105 1105 # for generaldelta revlogs, we linearize the revs; this will both be
1106 1106 # much quicker and generate a much smaller bundle
1107 1107 if (self._generaldelta and reorder is not False) or reorder:
1108 1108 dag = dagutil.revlogdag(self)
1109 1109 revs = set(self.rev(n) for n in nodelist)
1110 1110 revs = dag.linearize(revs)
1111 1111 else:
1112 1112 revs = sorted([self.rev(n) for n in nodelist])
1113 1113
1114 1114 # add the parent of the first rev
1115 1115 p = self.parentrevs(revs[0])[0]
1116 1116 revs.insert(0, p)
1117 1117
1118 1118 # build deltas
1119 1119 for r in xrange(len(revs) - 1):
1120 1120 prev, curr = revs[r], revs[r + 1]
1121 1121 for c in bundler.revchunk(self, curr, prev):
1122 1122 yield c
1123 1123
1124 1124 yield bundler.close()
1125 1125
1126 1126 def addgroup(self, bundle, linkmapper, transaction):
1127 1127 """
1128 1128 add a delta group
1129 1129
1130 1130 given a set of deltas, add them to the revision log. the
1131 1131 first delta is against its parent, which should be in our
1132 1132 log, the rest are against the previous delta.
1133 1133 """
1134 1134
1135 1135 # track the base of the current delta log
1136 1136 node = None
1137 1137
1138 1138 r = len(self)
1139 1139 end = 0
1140 1140 if r:
1141 1141 end = self.end(r - 1)
1142 1142 ifh = self.opener(self.indexfile, "a+")
1143 1143 isize = r * self._io.size
1144 1144 if self._inline:
1145 1145 transaction.add(self.indexfile, end + isize, r)
1146 1146 dfh = None
1147 1147 else:
1148 1148 transaction.add(self.indexfile, isize, r)
1149 1149 transaction.add(self.datafile, end)
1150 1150 dfh = self.opener(self.datafile, "a")
1151 1151
1152 1152 try:
1153 1153 # loop through our set of deltas
1154 1154 chain = None
1155 1155 while True:
1156 1156 chunkdata = bundle.deltachunk(chain)
1157 1157 if not chunkdata:
1158 1158 break
1159 1159 node = chunkdata['node']
1160 1160 p1 = chunkdata['p1']
1161 1161 p2 = chunkdata['p2']
1162 1162 cs = chunkdata['cs']
1163 1163 deltabase = chunkdata['deltabase']
1164 1164 delta = chunkdata['delta']
1165 1165
1166 1166 link = linkmapper(cs)
1167 1167 if node in self.nodemap:
1168 1168 # this can happen if two branches make the same change
1169 1169 chain = node
1170 1170 continue
1171 1171
1172 1172 for p in (p1, p2):
1173 1173 if not p in self.nodemap:
1174 1174 raise LookupError(p, self.indexfile,
1175 1175 _('unknown parent'))
1176 1176
1177 1177 if deltabase not in self.nodemap:
1178 1178 raise LookupError(deltabase, self.indexfile,
1179 1179 _('unknown delta base'))
1180 1180
1181 1181 baserev = self.rev(deltabase)
1182 1182 chain = self._addrevision(node, None, transaction, link,
1183 1183 p1, p2, (baserev, delta), ifh, dfh)
1184 1184 if not dfh and not self._inline:
1185 1185 # addrevision switched from inline to conventional
1186 1186 # reopen the index
1187 1187 ifh.close()
1188 1188 dfh = self.opener(self.datafile, "a")
1189 1189 ifh = self.opener(self.indexfile, "a")
1190 1190 finally:
1191 1191 if dfh:
1192 1192 dfh.close()
1193 1193 ifh.close()
1194 1194
1195 1195 return node
1196 1196
1197 1197 def strip(self, minlink, transaction):
1198 1198 """truncate the revlog on the first revision with a linkrev >= minlink
1199 1199
1200 1200 This function is called when we're stripping revision minlink and
1201 1201 its descendants from the repository.
1202 1202
1203 1203 We have to remove all revisions with linkrev >= minlink, because
1204 1204 the equivalent changelog revisions will be renumbered after the
1205 1205 strip.
1206 1206
1207 1207 So we truncate the revlog on the first of these revisions, and
1208 1208 trust that the caller has saved the revisions that shouldn't be
1209 1209 removed and that it'll readd them after this truncation.
1210 1210 """
1211 1211 if len(self) == 0:
1212 1212 return
1213 1213
1214 1214 for rev in self:
1215 1215 if self.index[rev][4] >= minlink:
1216 1216 break
1217 1217 else:
1218 1218 return
1219 1219
1220 1220 # first truncate the files on disk
1221 1221 end = self.start(rev)
1222 1222 if not self._inline:
1223 1223 transaction.add(self.datafile, end)
1224 1224 end = rev * self._io.size
1225 1225 else:
1226 1226 end += rev * self._io.size
1227 1227
1228 1228 transaction.add(self.indexfile, end)
1229 1229
1230 1230 # then reset internal state in memory to forget those revisions
1231 1231 self._cache = None
1232 1232 self._chunkclear()
1233 1233 for x in xrange(rev, len(self)):
1234 1234 del self.nodemap[self.node(x)]
1235 1235
1236 1236 del self.index[rev:-1]
1237 1237
1238 1238 def checksize(self):
1239 1239 expected = 0
1240 1240 if len(self):
1241 1241 expected = max(0, self.end(len(self) - 1))
1242 1242
1243 1243 try:
1244 1244 f = self.opener(self.datafile)
1245 1245 f.seek(0, 2)
1246 1246 actual = f.tell()
1247 1247 f.close()
1248 1248 dd = actual - expected
1249 1249 except IOError, inst:
1250 1250 if inst.errno != errno.ENOENT:
1251 1251 raise
1252 1252 dd = 0
1253 1253
1254 1254 try:
1255 1255 f = self.opener(self.indexfile)
1256 1256 f.seek(0, 2)
1257 1257 actual = f.tell()
1258 1258 f.close()
1259 1259 s = self._io.size
1260 1260 i = max(0, actual // s)
1261 1261 di = actual - (i * s)
1262 1262 if self._inline:
1263 1263 databytes = 0
1264 1264 for r in self:
1265 1265 databytes += max(0, self.length(r))
1266 1266 dd = 0
1267 1267 di = actual - len(self) * s - databytes
1268 1268 except IOError, inst:
1269 1269 if inst.errno != errno.ENOENT:
1270 1270 raise
1271 1271 di = 0
1272 1272
1273 1273 return (dd, di)
1274 1274
1275 1275 def files(self):
1276 1276 res = [self.indexfile]
1277 1277 if not self._inline:
1278 1278 res.append(self.datafile)
1279 1279 return res
@@ -1,453 +1,453 b''
1 1 # Copyright (C) 2004, 2005 Canonical Ltd
2 2 #
3 3 # This program is free software; you can redistribute it and/or modify
4 4 # it under the terms of the GNU General Public License as published by
5 5 # the Free Software Foundation; either version 2 of the License, or
6 6 # (at your option) any later version.
7 7 #
8 8 # This program is distributed in the hope that it will be useful,
9 9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 11 # GNU General Public License for more details.
12 12 #
13 13 # You should have received a copy of the GNU General Public License
14 14 # along with this program; if not, write to the Free Software
15 15 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 16
17 17 # mbp: "you know that thing where cvs gives you conflict markers?"
18 18 # s: "i hate that."
19 19
20 20 from i18n import _
21 21 import scmutil, util, mdiff
22 22 import sys, os
23 23
24 24 class CantReprocessAndShowBase(Exception):
25 25 pass
26 26
27 27 def intersect(ra, rb):
28 28 """Given two ranges return the range where they intersect or None.
29 29
30 30 >>> intersect((0, 10), (0, 6))
31 31 (0, 6)
32 32 >>> intersect((0, 10), (5, 15))
33 33 (5, 10)
34 34 >>> intersect((0, 10), (10, 15))
35 35 >>> intersect((0, 9), (10, 15))
36 36 >>> intersect((0, 9), (7, 15))
37 37 (7, 9)
38 38 """
39 39 assert ra[0] <= ra[1]
40 40 assert rb[0] <= rb[1]
41 41
42 42 sa = max(ra[0], rb[0])
43 43 sb = min(ra[1], rb[1])
44 44 if sa < sb:
45 45 return sa, sb
46 46 else:
47 47 return None
48 48
49 49 def compare_range(a, astart, aend, b, bstart, bend):
50 50 """Compare a[astart:aend] == b[bstart:bend], without slicing.
51 51 """
52 52 if (aend - astart) != (bend - bstart):
53 53 return False
54 54 for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
55 55 if a[ia] != b[ib]:
56 56 return False
57 57 else:
58 58 return True
59 59
60 60 class Merge3Text(object):
61 61 """3-way merge of texts.
62 62
63 63 Given strings BASE, OTHER, THIS, tries to produce a combined text
64 64 incorporating the changes from both BASE->OTHER and BASE->THIS."""
65 65 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
66 66 self.basetext = basetext
67 67 self.atext = atext
68 68 self.btext = btext
69 69 if base is None:
70 70 base = mdiff.splitnewlines(basetext)
71 71 if a is None:
72 72 a = mdiff.splitnewlines(atext)
73 73 if b is None:
74 74 b = mdiff.splitnewlines(btext)
75 75 self.base = base
76 76 self.a = a
77 77 self.b = b
78 78
79 79 def merge_lines(self,
80 80 name_a=None,
81 81 name_b=None,
82 82 name_base=None,
83 83 start_marker='<<<<<<<',
84 84 mid_marker='=======',
85 85 end_marker='>>>>>>>',
86 86 base_marker=None,
87 87 reprocess=False):
88 88 """Return merge in cvs-like form.
89 89 """
90 90 self.conflicts = False
91 91 newline = '\n'
92 92 if len(self.a) > 0:
93 93 if self.a[0].endswith('\r\n'):
94 94 newline = '\r\n'
95 95 elif self.a[0].endswith('\r'):
96 96 newline = '\r'
97 97 if base_marker and reprocess:
98 98 raise CantReprocessAndShowBase()
99 99 if name_a:
100 100 start_marker = start_marker + ' ' + name_a
101 101 if name_b:
102 102 end_marker = end_marker + ' ' + name_b
103 103 if name_base and base_marker:
104 104 base_marker = base_marker + ' ' + name_base
105 105 merge_regions = self.merge_regions()
106 106 if reprocess is True:
107 107 merge_regions = self.reprocess_merge_regions(merge_regions)
108 108 for t in merge_regions:
109 109 what = t[0]
110 110 if what == 'unchanged':
111 111 for i in range(t[1], t[2]):
112 112 yield self.base[i]
113 113 elif what == 'a' or what == 'same':
114 114 for i in range(t[1], t[2]):
115 115 yield self.a[i]
116 116 elif what == 'b':
117 117 for i in range(t[1], t[2]):
118 118 yield self.b[i]
119 119 elif what == 'conflict':
120 120 self.conflicts = True
121 121 yield start_marker + newline
122 122 for i in range(t[3], t[4]):
123 123 yield self.a[i]
124 124 if base_marker is not None:
125 125 yield base_marker + newline
126 126 for i in range(t[1], t[2]):
127 127 yield self.base[i]
128 128 yield mid_marker + newline
129 129 for i in range(t[5], t[6]):
130 130 yield self.b[i]
131 131 yield end_marker + newline
132 132 else:
133 133 raise ValueError(what)
134 134
135 135 def merge_annotated(self):
136 136 """Return merge with conflicts, showing origin of lines.
137 137
138 138 Most useful for debugging merge.
139 139 """
140 140 for t in self.merge_regions():
141 141 what = t[0]
142 142 if what == 'unchanged':
143 143 for i in range(t[1], t[2]):
144 144 yield 'u | ' + self.base[i]
145 145 elif what == 'a' or what == 'same':
146 146 for i in range(t[1], t[2]):
147 147 yield what[0] + ' | ' + self.a[i]
148 148 elif what == 'b':
149 149 for i in range(t[1], t[2]):
150 150 yield 'b | ' + self.b[i]
151 151 elif what == 'conflict':
152 152 yield '<<<<\n'
153 153 for i in range(t[3], t[4]):
154 154 yield 'A | ' + self.a[i]
155 155 yield '----\n'
156 156 for i in range(t[5], t[6]):
157 157 yield 'B | ' + self.b[i]
158 158 yield '>>>>\n'
159 159 else:
160 160 raise ValueError(what)
161 161
162 162 def merge_groups(self):
163 163 """Yield sequence of line groups. Each one is a tuple:
164 164
165 165 'unchanged', lines
166 166 Lines unchanged from base
167 167
168 168 'a', lines
169 169 Lines taken from a
170 170
171 171 'same', lines
172 172 Lines taken from a (and equal to b)
173 173
174 174 'b', lines
175 175 Lines taken from b
176 176
177 177 'conflict', base_lines, a_lines, b_lines
178 178 Lines from base were changed to either a or b and conflict.
179 179 """
180 180 for t in self.merge_regions():
181 181 what = t[0]
182 182 if what == 'unchanged':
183 183 yield what, self.base[t[1]:t[2]]
184 184 elif what == 'a' or what == 'same':
185 185 yield what, self.a[t[1]:t[2]]
186 186 elif what == 'b':
187 187 yield what, self.b[t[1]:t[2]]
188 188 elif what == 'conflict':
189 189 yield (what,
190 190 self.base[t[1]:t[2]],
191 191 self.a[t[3]:t[4]],
192 192 self.b[t[5]:t[6]])
193 193 else:
194 194 raise ValueError(what)
195 195
196 196 def merge_regions(self):
197 197 """Return sequences of matching and conflicting regions.
198 198
199 199 This returns tuples, where the first value says what kind we
200 200 have:
201 201
202 202 'unchanged', start, end
203 203 Take a region of base[start:end]
204 204
205 205 'same', astart, aend
206 206 b and a are different from base but give the same result
207 207
208 208 'a', start, end
209 209 Non-clashing insertion from a[start:end]
210 210
211 211 Method is as follows:
212 212
213 213 The two sequences align only on regions which match the base
214 214 and both descendants. These are found by doing a two-way diff
215 215 of each one against the base, and then finding the
216 216 intersections between those regions. These "sync regions"
217 217 are by definition unchanged in both and easily dealt with.
218 218
219 219 The regions in between can be in any of three cases:
220 220 conflicted, or changed on only one side.
221 221 """
222 222
223 223 # section a[0:ia] has been disposed of, etc
224 224 iz = ia = ib = 0
225 225
226 226 for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
227 227 #print 'match base [%d:%d]' % (zmatch, zend)
228 228
229 229 matchlen = zend - zmatch
230 230 assert matchlen >= 0
231 231 assert matchlen == (aend - amatch)
232 232 assert matchlen == (bend - bmatch)
233 233
234 234 len_a = amatch - ia
235 235 len_b = bmatch - ib
236 236 len_base = zmatch - iz
237 237 assert len_a >= 0
238 238 assert len_b >= 0
239 239 assert len_base >= 0
240 240
241 241 #print 'unmatched a=%d, b=%d' % (len_a, len_b)
242 242
243 243 if len_a or len_b:
244 244 # try to avoid actually slicing the lists
245 245 equal_a = compare_range(self.a, ia, amatch,
246 246 self.base, iz, zmatch)
247 247 equal_b = compare_range(self.b, ib, bmatch,
248 248 self.base, iz, zmatch)
249 249 same = compare_range(self.a, ia, amatch,
250 250 self.b, ib, bmatch)
251 251
252 252 if same:
253 253 yield 'same', ia, amatch
254 254 elif equal_a and not equal_b:
255 255 yield 'b', ib, bmatch
256 256 elif equal_b and not equal_a:
257 257 yield 'a', ia, amatch
258 258 elif not equal_a and not equal_b:
259 259 yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
260 260 else:
261 261 raise AssertionError("can't handle a=b=base but unmatched")
262 262
263 263 ia = amatch
264 264 ib = bmatch
265 265 iz = zmatch
266 266
267 267 # if the same part of the base was deleted on both sides
268 268 # that's OK, we can just skip it.
269 269
270 270
271 271 if matchlen > 0:
272 272 assert ia == amatch
273 273 assert ib == bmatch
274 274 assert iz == zmatch
275 275
276 276 yield 'unchanged', zmatch, zend
277 277 iz = zend
278 278 ia = aend
279 279 ib = bend
280 280
281 281 def reprocess_merge_regions(self, merge_regions):
282 282 """Where there are conflict regions, remove the agreed lines.
283 283
284 284 Lines where both A and B have made the same changes are
285 285 eliminated.
286 286 """
287 287 for region in merge_regions:
288 288 if region[0] != "conflict":
289 289 yield region
290 290 continue
291 291 type, iz, zmatch, ia, amatch, ib, bmatch = region
292 292 a_region = self.a[ia:amatch]
293 293 b_region = self.b[ib:bmatch]
294 294 matches = mdiff.get_matching_blocks(''.join(a_region),
295 295 ''.join(b_region))
296 296 next_a = ia
297 297 next_b = ib
298 298 for region_ia, region_ib, region_len in matches[:-1]:
299 299 region_ia += ia
300 300 region_ib += ib
301 301 reg = self.mismatch_region(next_a, region_ia, next_b,
302 302 region_ib)
303 303 if reg is not None:
304 304 yield reg
305 305 yield 'same', region_ia, region_len + region_ia
306 306 next_a = region_ia + region_len
307 307 next_b = region_ib + region_len
308 308 reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
309 309 if reg is not None:
310 310 yield reg
311 311
312 312 def mismatch_region(next_a, region_ia, next_b, region_ib):
313 313 if next_a < region_ia or next_b < region_ib:
314 314 return 'conflict', None, None, next_a, region_ia, next_b, region_ib
315 315 mismatch_region = staticmethod(mismatch_region)
316 316
317 317 def find_sync_regions(self):
318 318 """Return a list of sync regions, where both descendants match the base.
319 319
320 320 Generates a list of (base1, base2, a1, a2, b1, b2). There is
321 321 always a zero-length sync region at the end of all the files.
322 322 """
323 323
324 324 ia = ib = 0
325 325 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
326 326 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
327 327 len_a = len(amatches)
328 328 len_b = len(bmatches)
329 329
330 330 sl = []
331 331
332 332 while ia < len_a and ib < len_b:
333 333 abase, amatch, alen = amatches[ia]
334 334 bbase, bmatch, blen = bmatches[ib]
335 335
336 336 # there is an unconflicted block at i; how long does it
337 337 # extend? until whichever one ends earlier.
338 338 i = intersect((abase, abase + alen), (bbase, bbase + blen))
339 339 if i:
340 340 intbase = i[0]
341 341 intend = i[1]
342 342 intlen = intend - intbase
343 343
344 344 # found a match of base[i[0], i[1]]; this may be less than
345 345 # the region that matches in either one
346 346 assert intlen <= alen
347 347 assert intlen <= blen
348 348 assert abase <= intbase
349 349 assert bbase <= intbase
350 350
351 351 asub = amatch + (intbase - abase)
352 352 bsub = bmatch + (intbase - bbase)
353 353 aend = asub + intlen
354 354 bend = bsub + intlen
355 355
356 356 assert self.base[intbase:intend] == self.a[asub:aend], \
357 357 (self.base[intbase:intend], self.a[asub:aend])
358 358
359 359 assert self.base[intbase:intend] == self.b[bsub:bend]
360 360
361 361 sl.append((intbase, intend,
362 362 asub, aend,
363 363 bsub, bend))
364 364
365 365 # advance whichever one ends first in the base text
366 366 if (abase + alen) < (bbase + blen):
367 367 ia += 1
368 368 else:
369 369 ib += 1
370 370
371 371 intbase = len(self.base)
372 372 abase = len(self.a)
373 373 bbase = len(self.b)
374 374 sl.append((intbase, intbase, abase, abase, bbase, bbase))
375 375
376 376 return sl
377 377
378 378 def find_unconflicted(self):
379 379 """Return a list of ranges in base that are not conflicted."""
380 380 am = mdiff.get_matching_blocks(self.basetext, self.atext)
381 381 bm = mdiff.get_matching_blocks(self.basetext, self.btext)
382 382
383 383 unc = []
384 384
385 385 while am and bm:
386 386 # there is an unconflicted block at i; how long does it
387 387 # extend? until whichever one ends earlier.
388 388 a1 = am[0][0]
389 389 a2 = a1 + am[0][2]
390 390 b1 = bm[0][0]
391 391 b2 = b1 + bm[0][2]
392 392 i = intersect((a1, a2), (b1, b2))
393 393 if i:
394 394 unc.append(i)
395 395
396 396 if a2 < b2:
397 397 del am[0]
398 398 else:
399 399 del bm[0]
400 400
401 401 return unc
402 402
403 403 def simplemerge(ui, local, base, other, **opts):
404 404 def readfile(filename):
405 405 f = open(filename, "rb")
406 406 text = f.read()
407 407 f.close()
408 408 if util.binary(text):
409 409 msg = _("%s looks like a binary file.") % filename
410 410 if not opts.get('quiet'):
411 411 ui.warn(_('warning: %s\n') % msg)
412 412 if not opts.get('text'):
413 413 raise util.Abort(msg)
414 414 return text
415 415
416 416 name_a = local
417 417 name_b = other
418 418 labels = opts.get('label', [])
419 419 if labels:
420 420 name_a = labels.pop(0)
421 421 if labels:
422 422 name_b = labels.pop(0)
423 423 if labels:
424 424 raise util.Abort(_("can only specify two labels."))
425 425
426 426 try:
427 427 localtext = readfile(local)
428 428 basetext = readfile(base)
429 429 othertext = readfile(other)
430 430 except util.Abort:
431 431 return 1
432 432
433 433 local = os.path.realpath(local)
434 434 if not opts.get('print'):
435 435 opener = scmutil.opener(os.path.dirname(local))
436 436 out = opener(os.path.basename(local), "w", atomictemp=True)
437 437 else:
438 438 out = sys.stdout
439 439
440 440 reprocess = not opts.get('no_minimal')
441 441
442 442 m3 = Merge3Text(basetext, localtext, othertext)
443 443 for line in m3.merge_lines(name_a=name_a, name_b=name_b,
444 444 reprocess=reprocess):
445 445 out.write(line)
446 446
447 447 if not opts.get('print'):
448 out.rename()
448 out.close()
449 449
450 450 if m3.conflicts:
451 451 if not opts.get('quiet'):
452 452 ui.warn(_("warning: conflicts during merge.\n"))
453 453 return 1
@@ -1,427 +1,427 b''
1 1 # store.py - repository store handling for Mercurial
2 2 #
3 3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import osutil, scmutil, util
10 10 import os, stat
11 11
12 12 _sha = util.sha1
13 13
14 14 # This avoids a collision between a file named foo and a dir named
15 15 # foo.i or foo.d
16 16 def encodedir(path):
17 17 '''
18 18 >>> encodedir('data/foo.i')
19 19 'data/foo.i'
20 20 >>> encodedir('data/foo.i/bla.i')
21 21 'data/foo.i.hg/bla.i'
22 22 >>> encodedir('data/foo.i.hg/bla.i')
23 23 'data/foo.i.hg.hg/bla.i'
24 24 '''
25 25 if not path.startswith('data/'):
26 26 return path
27 27 return (path
28 28 .replace(".hg/", ".hg.hg/")
29 29 .replace(".i/", ".i.hg/")
30 30 .replace(".d/", ".d.hg/"))
31 31
32 32 def decodedir(path):
33 33 '''
34 34 >>> decodedir('data/foo.i')
35 35 'data/foo.i'
36 36 >>> decodedir('data/foo.i.hg/bla.i')
37 37 'data/foo.i/bla.i'
38 38 >>> decodedir('data/foo.i.hg.hg/bla.i')
39 39 'data/foo.i.hg/bla.i'
40 40 '''
41 41 if not path.startswith('data/') or ".hg/" not in path:
42 42 return path
43 43 return (path
44 44 .replace(".d.hg/", ".d/")
45 45 .replace(".i.hg/", ".i/")
46 46 .replace(".hg.hg/", ".hg/"))
47 47
48 48 def _buildencodefun():
49 49 '''
50 50 >>> enc, dec = _buildencodefun()
51 51
52 52 >>> enc('nothing/special.txt')
53 53 'nothing/special.txt'
54 54 >>> dec('nothing/special.txt')
55 55 'nothing/special.txt'
56 56
57 57 >>> enc('HELLO')
58 58 '_h_e_l_l_o'
59 59 >>> dec('_h_e_l_l_o')
60 60 'HELLO'
61 61
62 62 >>> enc('hello:world?')
63 63 'hello~3aworld~3f'
64 64 >>> dec('hello~3aworld~3f')
65 65 'hello:world?'
66 66
67 67 >>> enc('the\x07quick\xADshot')
68 68 'the~07quick~adshot'
69 69 >>> dec('the~07quick~adshot')
70 70 'the\\x07quick\\xadshot'
71 71 '''
72 72 e = '_'
73 73 winreserved = [ord(x) for x in '\\:*?"<>|']
74 74 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
75 75 for x in (range(32) + range(126, 256) + winreserved):
76 76 cmap[chr(x)] = "~%02x" % x
77 77 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
78 78 cmap[chr(x)] = e + chr(x).lower()
79 79 dmap = {}
80 80 for k, v in cmap.iteritems():
81 81 dmap[v] = k
82 82 def decode(s):
83 83 i = 0
84 84 while i < len(s):
85 85 for l in xrange(1, 4):
86 86 try:
87 87 yield dmap[s[i:i + l]]
88 88 i += l
89 89 break
90 90 except KeyError:
91 91 pass
92 92 else:
93 93 raise KeyError
94 94 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
95 95 lambda s: decodedir("".join(list(decode(s)))))
96 96
97 97 encodefilename, decodefilename = _buildencodefun()
98 98
99 99 def _buildlowerencodefun():
100 100 '''
101 101 >>> f = _buildlowerencodefun()
102 102 >>> f('nothing/special.txt')
103 103 'nothing/special.txt'
104 104 >>> f('HELLO')
105 105 'hello'
106 106 >>> f('hello:world?')
107 107 'hello~3aworld~3f'
108 108 >>> f('the\x07quick\xADshot')
109 109 'the~07quick~adshot'
110 110 '''
111 111 winreserved = [ord(x) for x in '\\:*?"<>|']
112 112 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
113 113 for x in (range(32) + range(126, 256) + winreserved):
114 114 cmap[chr(x)] = "~%02x" % x
115 115 for x in range(ord("A"), ord("Z")+1):
116 116 cmap[chr(x)] = chr(x).lower()
117 117 return lambda s: "".join([cmap[c] for c in s])
118 118
119 119 lowerencode = _buildlowerencodefun()
120 120
121 121 _winreservednames = '''con prn aux nul
122 122 com1 com2 com3 com4 com5 com6 com7 com8 com9
123 123 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
124 124 def _auxencode(path, dotencode):
125 125 '''
126 126 Encodes filenames containing names reserved by Windows or which end in
127 127 period or space. Does not touch other single reserved characters c.
128 128 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
129 129 Additionally encodes space or period at the beginning, if dotencode is
130 130 True.
131 131 path is assumed to be all lowercase.
132 132
133 133 >>> _auxencode('.foo/aux.txt/txt.aux/con/prn/nul/foo.', True)
134 134 '~2efoo/au~78.txt/txt.aux/co~6e/pr~6e/nu~6c/foo~2e'
135 135 >>> _auxencode('.com1com2/lpt9.lpt4.lpt1/conprn/foo.', False)
136 136 '.com1com2/lp~749.lpt4.lpt1/conprn/foo~2e'
137 137 >>> _auxencode('foo. ', True)
138 138 'foo.~20'
139 139 >>> _auxencode(' .foo', True)
140 140 '~20.foo'
141 141 '''
142 142 res = []
143 143 for n in path.split('/'):
144 144 if n:
145 145 base = n.split('.')[0]
146 146 if base and (base in _winreservednames):
147 147 # encode third letter ('aux' -> 'au~78')
148 148 ec = "~%02x" % ord(n[2])
149 149 n = n[0:2] + ec + n[3:]
150 150 if n[-1] in '. ':
151 151 # encode last period or space ('foo...' -> 'foo..~2e')
152 152 n = n[:-1] + "~%02x" % ord(n[-1])
153 153 if dotencode and n[0] in '. ':
154 154 n = "~%02x" % ord(n[0]) + n[1:]
155 155 res.append(n)
156 156 return '/'.join(res)
157 157
158 158 _maxstorepathlen = 120
159 159 _dirprefixlen = 8
160 160 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
161 161 def _hybridencode(path, auxencode):
162 162 '''encodes path with a length limit
163 163
164 164 Encodes all paths that begin with 'data/', according to the following.
165 165
166 166 Default encoding (reversible):
167 167
168 168 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
169 169 characters are encoded as '~xx', where xx is the two digit hex code
170 170 of the character (see encodefilename).
171 171 Relevant path components consisting of Windows reserved filenames are
172 172 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
173 173
174 174 Hashed encoding (not reversible):
175 175
176 176 If the default-encoded path is longer than _maxstorepathlen, a
177 177 non-reversible hybrid hashing of the path is done instead.
178 178 This encoding uses up to _dirprefixlen characters of all directory
179 179 levels of the lowerencoded path, but not more levels than can fit into
180 180 _maxshortdirslen.
181 181 Then follows the filler followed by the sha digest of the full path.
182 182 The filler is the beginning of the basename of the lowerencoded path
183 183 (the basename is everything after the last path separator). The filler
184 184 is as long as possible, filling in characters from the basename until
185 185 the encoded path has _maxstorepathlen characters (or all chars of the
186 186 basename have been taken).
187 187 The extension (e.g. '.i' or '.d') is preserved.
188 188
189 189 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
190 190 encoding was used.
191 191 '''
192 192 if not path.startswith('data/'):
193 193 return path
194 194 # escape directories ending with .i and .d
195 195 path = encodedir(path)
196 196 ndpath = path[len('data/'):]
197 197 res = 'data/' + auxencode(encodefilename(ndpath))
198 198 if len(res) > _maxstorepathlen:
199 199 digest = _sha(path).hexdigest()
200 200 aep = auxencode(lowerencode(ndpath))
201 201 _root, ext = os.path.splitext(aep)
202 202 parts = aep.split('/')
203 203 basename = parts[-1]
204 204 sdirs = []
205 205 for p in parts[:-1]:
206 206 d = p[:_dirprefixlen]
207 207 if d[-1] in '. ':
208 208 # Windows can't access dirs ending in period or space
209 209 d = d[:-1] + '_'
210 210 t = '/'.join(sdirs) + '/' + d
211 211 if len(t) > _maxshortdirslen:
212 212 break
213 213 sdirs.append(d)
214 214 dirs = '/'.join(sdirs)
215 215 if len(dirs) > 0:
216 216 dirs += '/'
217 217 res = 'dh/' + dirs + digest + ext
218 218 spaceleft = _maxstorepathlen - len(res)
219 219 if spaceleft > 0:
220 220 filler = basename[:spaceleft]
221 221 res = 'dh/' + dirs + filler + digest + ext
222 222 return res
223 223
224 224 def _calcmode(path):
225 225 try:
226 226 # files in .hg/ will be created using this mode
227 227 mode = os.stat(path).st_mode
228 228 # avoid some useless chmods
229 229 if (0777 & ~util.umask) == (0777 & mode):
230 230 mode = None
231 231 except OSError:
232 232 mode = None
233 233 return mode
234 234
235 235 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
236 236
237 237 class basicstore(object):
238 238 '''base class for local repository stores'''
239 239 def __init__(self, path, openertype):
240 240 self.path = path
241 241 self.createmode = _calcmode(path)
242 242 op = openertype(self.path)
243 243 op.createmode = self.createmode
244 244 self.opener = scmutil.filteropener(op, encodedir)
245 245
246 246 def join(self, f):
247 247 return self.path + '/' + encodedir(f)
248 248
249 249 def _walk(self, relpath, recurse):
250 250 '''yields (unencoded, encoded, size)'''
251 251 path = self.path
252 252 if relpath:
253 253 path += '/' + relpath
254 254 striplen = len(self.path) + 1
255 255 l = []
256 256 if os.path.isdir(path):
257 257 visit = [path]
258 258 while visit:
259 259 p = visit.pop()
260 260 for f, kind, st in osutil.listdir(p, stat=True):
261 261 fp = p + '/' + f
262 262 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
263 263 n = util.pconvert(fp[striplen:])
264 264 l.append((decodedir(n), n, st.st_size))
265 265 elif kind == stat.S_IFDIR and recurse:
266 266 visit.append(fp)
267 267 return sorted(l)
268 268
269 269 def datafiles(self):
270 270 return self._walk('data', True)
271 271
272 272 def walk(self):
273 273 '''yields (unencoded, encoded, size)'''
274 274 # yield data files first
275 275 for x in self.datafiles():
276 276 yield x
277 277 # yield manifest before changelog
278 278 for x in reversed(self._walk('', False)):
279 279 yield x
280 280
281 281 def copylist(self):
282 282 return ['requires'] + _data.split()
283 283
284 284 def write(self):
285 285 pass
286 286
287 287 class encodedstore(basicstore):
288 288 def __init__(self, path, openertype):
289 289 self.path = path + '/store'
290 290 self.createmode = _calcmode(self.path)
291 291 op = openertype(self.path)
292 292 op.createmode = self.createmode
293 293 self.opener = scmutil.filteropener(op, encodefilename)
294 294
295 295 def datafiles(self):
296 296 for a, b, size in self._walk('data', True):
297 297 try:
298 298 a = decodefilename(a)
299 299 except KeyError:
300 300 a = None
301 301 yield a, b, size
302 302
303 303 def join(self, f):
304 304 return self.path + '/' + encodefilename(f)
305 305
306 306 def copylist(self):
307 307 return (['requires', '00changelog.i'] +
308 308 ['store/' + f for f in _data.split()])
309 309
310 310 class fncache(object):
311 311 # the filename used to be partially encoded
312 312 # hence the encodedir/decodedir dance
313 313 def __init__(self, opener):
314 314 self.opener = opener
315 315 self.entries = None
316 316 self._dirty = False
317 317
318 318 def _load(self):
319 319 '''fill the entries from the fncache file'''
320 320 self.entries = set()
321 321 self._dirty = False
322 322 try:
323 323 fp = self.opener('fncache', mode='rb')
324 324 except IOError:
325 325 # skip nonexistent file
326 326 return
327 327 for n, line in enumerate(fp):
328 328 if (len(line) < 2) or (line[-1] != '\n'):
329 329 t = _('invalid entry in fncache, line %s') % (n + 1)
330 330 raise util.Abort(t)
331 331 self.entries.add(decodedir(line[:-1]))
332 332 fp.close()
333 333
334 334 def rewrite(self, files):
335 335 fp = self.opener('fncache', mode='wb')
336 336 for p in files:
337 337 fp.write(encodedir(p) + '\n')
338 338 fp.close()
339 339 self.entries = set(files)
340 340 self._dirty = False
341 341
342 342 def write(self):
343 343 if not self._dirty:
344 344 return
345 345 fp = self.opener('fncache', mode='wb', atomictemp=True)
346 346 for p in self.entries:
347 347 fp.write(encodedir(p) + '\n')
348 fp.rename()
348 fp.close()
349 349 self._dirty = False
350 350
351 351 def add(self, fn):
352 352 if self.entries is None:
353 353 self._load()
354 354 if fn not in self.entries:
355 355 self._dirty = True
356 356 self.entries.add(fn)
357 357
358 358 def __contains__(self, fn):
359 359 if self.entries is None:
360 360 self._load()
361 361 return fn in self.entries
362 362
363 363 def __iter__(self):
364 364 if self.entries is None:
365 365 self._load()
366 366 return iter(self.entries)
367 367
368 368 class _fncacheopener(scmutil.abstractopener):
369 369 def __init__(self, op, fnc, encode):
370 370 self.opener = op
371 371 self.fncache = fnc
372 372 self.encode = encode
373 373
374 374 def __call__(self, path, mode='r', *args, **kw):
375 375 if mode not in ('r', 'rb') and path.startswith('data/'):
376 376 self.fncache.add(path)
377 377 return self.opener(self.encode(path), mode, *args, **kw)
378 378
379 379 class fncachestore(basicstore):
380 380 def __init__(self, path, openertype, encode):
381 381 self.encode = encode
382 382 self.path = path + '/store'
383 383 self.createmode = _calcmode(self.path)
384 384 op = openertype(self.path)
385 385 op.createmode = self.createmode
386 386 fnc = fncache(op)
387 387 self.fncache = fnc
388 388 self.opener = _fncacheopener(op, fnc, encode)
389 389
390 390 def join(self, f):
391 391 return self.path + '/' + self.encode(f)
392 392
393 393 def datafiles(self):
394 394 rewrite = False
395 395 existing = []
396 396 spath = self.path
397 397 for f in self.fncache:
398 398 ef = self.encode(f)
399 399 try:
400 400 st = os.stat(spath + '/' + ef)
401 401 yield f, ef, st.st_size
402 402 existing.append(f)
403 403 except OSError:
404 404 # nonexistent entry
405 405 rewrite = True
406 406 if rewrite:
407 407 # rewrite fncache to remove nonexistent entries
408 408 # (may be caused by rollback / strip)
409 409 self.fncache.rewrite(existing)
410 410
411 411 def copylist(self):
412 412 d = ('data dh fncache'
413 413 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
414 414 return (['requires', '00changelog.i'] +
415 415 ['store/' + f for f in d.split()])
416 416
417 417 def write(self):
418 418 self.fncache.write()
419 419
420 420 def store(requirements, path, openertype):
421 421 if 'store' in requirements:
422 422 if 'fncache' in requirements:
423 423 auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
424 424 encode = lambda f: _hybridencode(f, auxencode)
425 425 return fncachestore(path, openertype, encode)
426 426 return encodedstore(path, openertype)
427 427 return basicstore(path, openertype)
@@ -1,292 +1,292 b''
1 1 # tags.py - read tag info from local repository
2 2 #
3 3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 # Currently this module only deals with reading and caching tags.
10 10 # Eventually, it could take care of updating (adding/removing/moving)
11 11 # tags too.
12 12
13 13 from node import nullid, bin, hex, short
14 14 from i18n import _
15 15 import encoding
16 16 import error
17 17 import errno
18 18
19 19 def findglobaltags(ui, repo, alltags, tagtypes):
20 20 '''Find global tags in repo by reading .hgtags from every head that
21 21 has a distinct version of it, using a cache to avoid excess work.
22 22 Updates the dicts alltags, tagtypes in place: alltags maps tag name
23 23 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
24 24 name to tag type ("global" in this case).'''
25 25 # This is so we can be lazy and assume alltags contains only global
26 26 # tags when we pass it to _writetagcache().
27 27 assert len(alltags) == len(tagtypes) == 0, \
28 28 "findglobaltags() should be called first"
29 29
30 30 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
31 31 if cachetags is not None:
32 32 assert not shouldwrite
33 33 # XXX is this really 100% correct? are there oddball special
34 34 # cases where a global tag should outrank a local tag but won't,
35 35 # because cachetags does not contain rank info?
36 36 _updatetags(cachetags, 'global', alltags, tagtypes)
37 37 return
38 38
39 39 seen = set() # set of fnode
40 40 fctx = None
41 41 for head in reversed(heads): # oldest to newest
42 42 assert head in repo.changelog.nodemap, \
43 43 "tag cache returned bogus head %s" % short(head)
44 44
45 45 fnode = tagfnode.get(head)
46 46 if fnode and fnode not in seen:
47 47 seen.add(fnode)
48 48 if not fctx:
49 49 fctx = repo.filectx('.hgtags', fileid=fnode)
50 50 else:
51 51 fctx = fctx.filectx(fnode)
52 52
53 53 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
54 54 _updatetags(filetags, 'global', alltags, tagtypes)
55 55
56 56 # and update the cache (if necessary)
57 57 if shouldwrite:
58 58 _writetagcache(ui, repo, heads, tagfnode, alltags)
59 59
60 60 def readlocaltags(ui, repo, alltags, tagtypes):
61 61 '''Read local tags in repo. Update alltags and tagtypes.'''
62 62 try:
63 63 data = repo.opener.read("localtags")
64 64 except IOError, inst:
65 65 if inst.errno != errno.ENOENT:
66 66 raise
67 67 return
68 68
69 69 # localtags is in the local encoding; re-encode to UTF-8 on
70 70 # input for consistency with the rest of this module.
71 71 filetags = _readtags(
72 72 ui, repo, data.splitlines(), "localtags",
73 73 recode=encoding.fromlocal)
74 74 _updatetags(filetags, "local", alltags, tagtypes)
75 75
76 76 def _readtags(ui, repo, lines, fn, recode=None):
77 77 '''Read tag definitions from a file (or any source of lines).
78 78 Return a mapping from tag name to (node, hist): node is the node id
79 79 from the last line read for that name, and hist is the list of node
80 80 ids previously associated with it (in file order). All node ids are
81 81 binary, not hex.'''
82 82
83 83 filetags = {} # map tag name to (node, hist)
84 84 count = 0
85 85
86 86 def warn(msg):
87 87 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
88 88
89 89 for line in lines:
90 90 count += 1
91 91 if not line:
92 92 continue
93 93 try:
94 94 (nodehex, name) = line.split(" ", 1)
95 95 except ValueError:
96 96 warn(_("cannot parse entry"))
97 97 continue
98 98 name = name.strip()
99 99 if recode:
100 100 name = recode(name)
101 101 try:
102 102 nodebin = bin(nodehex)
103 103 except TypeError:
104 104 warn(_("node '%s' is not well formed") % nodehex)
105 105 continue
106 106
107 107 # update filetags
108 108 hist = []
109 109 if name in filetags:
110 110 n, hist = filetags[name]
111 111 hist.append(n)
112 112 filetags[name] = (nodebin, hist)
113 113 return filetags
114 114
115 115 def _updatetags(filetags, tagtype, alltags, tagtypes):
116 116 '''Incorporate the tag info read from one file into the two
117 117 dictionaries, alltags and tagtypes, that contain all tag
118 118 info (global across all heads plus local).'''
119 119
120 120 for name, nodehist in filetags.iteritems():
121 121 if name not in alltags:
122 122 alltags[name] = nodehist
123 123 tagtypes[name] = tagtype
124 124 continue
125 125
126 126 # we prefer alltags[name] if:
127 127 # it supercedes us OR
128 128 # mutual supercedes and it has a higher rank
129 129 # otherwise we win because we're tip-most
130 130 anode, ahist = nodehist
131 131 bnode, bhist = alltags[name]
132 132 if (bnode != anode and anode in bhist and
133 133 (bnode not in ahist or len(bhist) > len(ahist))):
134 134 anode = bnode
135 135 ahist.extend([n for n in bhist if n not in ahist])
136 136 alltags[name] = anode, ahist
137 137 tagtypes[name] = tagtype
138 138
139 139
140 140 # The tag cache only stores info about heads, not the tag contents
141 141 # from each head. I.e. it doesn't try to squeeze out the maximum
142 142 # performance, but is simpler has a better chance of actually
143 143 # working correctly. And this gives the biggest performance win: it
144 144 # avoids looking up .hgtags in the manifest for every head, and it
145 145 # can avoid calling heads() at all if there have been no changes to
146 146 # the repo.
147 147
148 148 def _readtagcache(ui, repo):
149 149 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
150 150 shouldwrite). If the cache is completely up-to-date, cachetags is a
151 151 dict of the form returned by _readtags(); otherwise, it is None and
152 152 heads and fnodes are set. In that case, heads is the list of all
153 153 heads currently in the repository (ordered from tip to oldest) and
154 154 fnodes is a mapping from head to .hgtags filenode. If those two are
155 155 set, caller is responsible for reading tag info from each head.'''
156 156
157 157 try:
158 158 cachefile = repo.opener('cache/tags', 'r')
159 159 # force reading the file for static-http
160 160 cachelines = iter(cachefile)
161 161 except IOError:
162 162 cachefile = None
163 163
164 164 # The cache file consists of lines like
165 165 # <headrev> <headnode> [<tagnode>]
166 166 # where <headrev> and <headnode> redundantly identify a repository
167 167 # head from the time the cache was written, and <tagnode> is the
168 168 # filenode of .hgtags on that head. Heads with no .hgtags file will
169 169 # have no <tagnode>. The cache is ordered from tip to oldest (which
170 170 # is part of why <headrev> is there: a quick visual check is all
171 171 # that's required to ensure correct order).
172 172 #
173 173 # This information is enough to let us avoid the most expensive part
174 174 # of finding global tags, which is looking up <tagnode> in the
175 175 # manifest for each head.
176 176 cacherevs = [] # list of headrev
177 177 cacheheads = [] # list of headnode
178 178 cachefnode = {} # map headnode to filenode
179 179 if cachefile:
180 180 try:
181 181 for line in cachelines:
182 182 if line == "\n":
183 183 break
184 184 line = line.rstrip().split()
185 185 cacherevs.append(int(line[0]))
186 186 headnode = bin(line[1])
187 187 cacheheads.append(headnode)
188 188 if len(line) == 3:
189 189 fnode = bin(line[2])
190 190 cachefnode[headnode] = fnode
191 191 except Exception:
192 192 # corruption of the tags cache, just recompute it
193 193 ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
194 194 cacheheads = []
195 195 cacherevs = []
196 196 cachefnode = {}
197 197
198 198 tipnode = repo.changelog.tip()
199 199 tiprev = len(repo.changelog) - 1
200 200
201 201 # Case 1 (common): tip is the same, so nothing has changed.
202 202 # (Unchanged tip trivially means no changesets have been added.
203 203 # But, thanks to localrepository.destroyed(), it also means none
204 204 # have been destroyed by strip or rollback.)
205 205 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
206 206 tags = _readtags(ui, repo, cachelines, cachefile.name)
207 207 cachefile.close()
208 208 return (None, None, tags, False)
209 209 if cachefile:
210 210 cachefile.close() # ignore rest of file
211 211
212 212 repoheads = repo.heads()
213 213 # Case 2 (uncommon): empty repo; get out quickly and don't bother
214 214 # writing an empty cache.
215 215 if repoheads == [nullid]:
216 216 return ([], {}, {}, False)
217 217
218 218 # Case 3 (uncommon): cache file missing or empty.
219 219
220 220 # Case 4 (uncommon): tip rev decreased. This should only happen
221 221 # when we're called from localrepository.destroyed(). Refresh the
222 222 # cache so future invocations will not see disappeared heads in the
223 223 # cache.
224 224
225 225 # Case 5 (common): tip has changed, so we've added/replaced heads.
226 226
227 227 # As it happens, the code to handle cases 3, 4, 5 is the same.
228 228
229 229 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
230 230 # exposed".
231 231 newheads = [head
232 232 for head in repoheads
233 233 if head not in set(cacheheads)]
234 234
235 235 # Now we have to lookup the .hgtags filenode for every new head.
236 236 # This is the most expensive part of finding tags, so performance
237 237 # depends primarily on the size of newheads. Worst case: no cache
238 238 # file, so newheads == repoheads.
239 239 for head in newheads:
240 240 cctx = repo[head]
241 241 try:
242 242 fnode = cctx.filenode('.hgtags')
243 243 cachefnode[head] = fnode
244 244 except error.LookupError:
245 245 # no .hgtags file on this head
246 246 pass
247 247
248 248 # Caller has to iterate over all heads, but can use the filenodes in
249 249 # cachefnode to get to each .hgtags revision quickly.
250 250 return (repoheads, cachefnode, None, True)
251 251
252 252 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
253 253
254 254 try:
255 255 cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
256 256 except (OSError, IOError):
257 257 return
258 258
259 259 realheads = repo.heads() # for sanity checks below
260 260 for head in heads:
261 261 # temporary sanity checks; these can probably be removed
262 262 # once this code has been in crew for a few weeks
263 263 assert head in repo.changelog.nodemap, \
264 264 'trying to write non-existent node %s to tag cache' % short(head)
265 265 assert head in realheads, \
266 266 'trying to write non-head %s to tag cache' % short(head)
267 267 assert head != nullid, \
268 268 'trying to write nullid to tag cache'
269 269
270 270 # This can't fail because of the first assert above. When/if we
271 271 # remove that assert, we might want to catch LookupError here
272 272 # and downgrade it to a warning.
273 273 rev = repo.changelog.rev(head)
274 274
275 275 fnode = tagfnode.get(head)
276 276 if fnode:
277 277 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
278 278 else:
279 279 cachefile.write('%d %s\n' % (rev, hex(head)))
280 280
281 281 # Tag names in the cache are in UTF-8 -- which is the whole reason
282 282 # we keep them in UTF-8 throughout this module. If we converted
283 283 # them local encoding on input, we would lose info writing them to
284 284 # the cache.
285 285 cachefile.write('\n')
286 286 for (name, (node, hist)) in cachetags.iteritems():
287 287 cachefile.write("%s %s\n" % (hex(node), name))
288 288
289 289 try:
290 cachefile.rename()
290 cachefile.close()
291 291 except (OSError, IOError):
292 292 pass
@@ -1,1648 +1,1647 b''
1 1 # util.py - Mercurial utility functions and platform specfic implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specfic implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from i18n import _
17 17 import error, osutil, encoding
18 18 import errno, re, shutil, sys, tempfile, traceback
19 19 import os, time, calendar, textwrap, unicodedata, signal
20 20 import imp, socket, urllib
21 21
22 22 if os.name == 'nt':
23 23 import windows as platform
24 24 else:
25 25 import posix as platform
26 26
27 27 cachestat = platform.cachestat
28 28 checkexec = platform.checkexec
29 29 checklink = platform.checklink
30 30 copymode = platform.copymode
31 31 executablepath = platform.executablepath
32 32 expandglobs = platform.expandglobs
33 33 explainexit = platform.explainexit
34 34 findexe = platform.findexe
35 35 gethgcmd = platform.gethgcmd
36 36 getuser = platform.getuser
37 37 groupmembers = platform.groupmembers
38 38 groupname = platform.groupname
39 39 hidewindow = platform.hidewindow
40 40 isexec = platform.isexec
41 41 isowner = platform.isowner
42 42 localpath = platform.localpath
43 43 lookupreg = platform.lookupreg
44 44 makedir = platform.makedir
45 45 nlinks = platform.nlinks
46 46 normpath = platform.normpath
47 47 nulldev = platform.nulldev
48 48 openhardlinks = platform.openhardlinks
49 49 oslink = platform.oslink
50 50 parsepatchoutput = platform.parsepatchoutput
51 51 pconvert = platform.pconvert
52 52 popen = platform.popen
53 53 posixfile = platform.posixfile
54 54 quotecommand = platform.quotecommand
55 55 realpath = platform.realpath
56 56 rename = platform.rename
57 57 samedevice = platform.samedevice
58 58 samefile = platform.samefile
59 59 samestat = platform.samestat
60 60 setbinary = platform.setbinary
61 61 setflags = platform.setflags
62 62 setsignalhandler = platform.setsignalhandler
63 63 shellquote = platform.shellquote
64 64 spawndetached = platform.spawndetached
65 65 sshargs = platform.sshargs
66 66 statfiles = platform.statfiles
67 67 termwidth = platform.termwidth
68 68 testpid = platform.testpid
69 69 umask = platform.umask
70 70 unlink = platform.unlink
71 71 unlinkpath = platform.unlinkpath
72 72 username = platform.username
73 73
74 74 # Python compatibility
75 75
76 76 def sha1(s):
77 77 return _fastsha1(s)
78 78
79 79 _notset = object()
80 80 def safehasattr(thing, attr):
81 81 return getattr(thing, attr, _notset) is not _notset
82 82
83 83 def _fastsha1(s):
84 84 # This function will import sha1 from hashlib or sha (whichever is
85 85 # available) and overwrite itself with it on the first call.
86 86 # Subsequent calls will go directly to the imported function.
87 87 if sys.version_info >= (2, 5):
88 88 from hashlib import sha1 as _sha1
89 89 else:
90 90 from sha import sha as _sha1
91 91 global _fastsha1, sha1
92 92 _fastsha1 = sha1 = _sha1
93 93 return _sha1(s)
94 94
95 95 import __builtin__
96 96
97 97 if sys.version_info[0] < 3:
98 98 def fakebuffer(sliceable, offset=0):
99 99 return sliceable[offset:]
100 100 else:
101 101 def fakebuffer(sliceable, offset=0):
102 102 return memoryview(sliceable)[offset:]
103 103 try:
104 104 buffer
105 105 except NameError:
106 106 __builtin__.buffer = fakebuffer
107 107
108 108 import subprocess
109 109 closefds = os.name == 'posix'
110 110
111 111 def popen2(cmd, env=None, newlines=False):
112 112 # Setting bufsize to -1 lets the system decide the buffer size.
113 113 # The default for bufsize is 0, meaning unbuffered. This leads to
114 114 # poor performance on Mac OS X: http://bugs.python.org/issue4194
115 115 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
116 116 close_fds=closefds,
117 117 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
118 118 universal_newlines=newlines,
119 119 env=env)
120 120 return p.stdin, p.stdout
121 121
122 122 def popen3(cmd, env=None, newlines=False):
123 123 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
124 124 close_fds=closefds,
125 125 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
126 126 stderr=subprocess.PIPE,
127 127 universal_newlines=newlines,
128 128 env=env)
129 129 return p.stdin, p.stdout, p.stderr
130 130
131 131 def version():
132 132 """Return version information if available."""
133 133 try:
134 134 import __version__
135 135 return __version__.version
136 136 except ImportError:
137 137 return 'unknown'
138 138
139 139 # used by parsedate
140 140 defaultdateformats = (
141 141 '%Y-%m-%d %H:%M:%S',
142 142 '%Y-%m-%d %I:%M:%S%p',
143 143 '%Y-%m-%d %H:%M',
144 144 '%Y-%m-%d %I:%M%p',
145 145 '%Y-%m-%d',
146 146 '%m-%d',
147 147 '%m/%d',
148 148 '%m/%d/%y',
149 149 '%m/%d/%Y',
150 150 '%a %b %d %H:%M:%S %Y',
151 151 '%a %b %d %I:%M:%S%p %Y',
152 152 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
153 153 '%b %d %H:%M:%S %Y',
154 154 '%b %d %I:%M:%S%p %Y',
155 155 '%b %d %H:%M:%S',
156 156 '%b %d %I:%M:%S%p',
157 157 '%b %d %H:%M',
158 158 '%b %d %I:%M%p',
159 159 '%b %d %Y',
160 160 '%b %d',
161 161 '%H:%M:%S',
162 162 '%I:%M:%S%p',
163 163 '%H:%M',
164 164 '%I:%M%p',
165 165 )
166 166
167 167 extendeddateformats = defaultdateformats + (
168 168 "%Y",
169 169 "%Y-%m",
170 170 "%b",
171 171 "%b %Y",
172 172 )
173 173
174 174 def cachefunc(func):
175 175 '''cache the result of function calls'''
176 176 # XXX doesn't handle keywords args
177 177 cache = {}
178 178 if func.func_code.co_argcount == 1:
179 179 # we gain a small amount of time because
180 180 # we don't need to pack/unpack the list
181 181 def f(arg):
182 182 if arg not in cache:
183 183 cache[arg] = func(arg)
184 184 return cache[arg]
185 185 else:
186 186 def f(*args):
187 187 if args not in cache:
188 188 cache[args] = func(*args)
189 189 return cache[args]
190 190
191 191 return f
192 192
193 193 def lrucachefunc(func):
194 194 '''cache most recent results of function calls'''
195 195 cache = {}
196 196 order = []
197 197 if func.func_code.co_argcount == 1:
198 198 def f(arg):
199 199 if arg not in cache:
200 200 if len(cache) > 20:
201 201 del cache[order.pop(0)]
202 202 cache[arg] = func(arg)
203 203 else:
204 204 order.remove(arg)
205 205 order.append(arg)
206 206 return cache[arg]
207 207 else:
208 208 def f(*args):
209 209 if args not in cache:
210 210 if len(cache) > 20:
211 211 del cache[order.pop(0)]
212 212 cache[args] = func(*args)
213 213 else:
214 214 order.remove(args)
215 215 order.append(args)
216 216 return cache[args]
217 217
218 218 return f
219 219
220 220 class propertycache(object):
221 221 def __init__(self, func):
222 222 self.func = func
223 223 self.name = func.__name__
224 224 def __get__(self, obj, type=None):
225 225 result = self.func(obj)
226 226 setattr(obj, self.name, result)
227 227 return result
228 228
229 229 def pipefilter(s, cmd):
230 230 '''filter string S through command CMD, returning its output'''
231 231 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
232 232 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
233 233 pout, perr = p.communicate(s)
234 234 return pout
235 235
236 236 def tempfilter(s, cmd):
237 237 '''filter string S through a pair of temporary files with CMD.
238 238 CMD is used as a template to create the real command to be run,
239 239 with the strings INFILE and OUTFILE replaced by the real names of
240 240 the temporary files generated.'''
241 241 inname, outname = None, None
242 242 try:
243 243 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
244 244 fp = os.fdopen(infd, 'wb')
245 245 fp.write(s)
246 246 fp.close()
247 247 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
248 248 os.close(outfd)
249 249 cmd = cmd.replace('INFILE', inname)
250 250 cmd = cmd.replace('OUTFILE', outname)
251 251 code = os.system(cmd)
252 252 if sys.platform == 'OpenVMS' and code & 1:
253 253 code = 0
254 254 if code:
255 255 raise Abort(_("command '%s' failed: %s") %
256 256 (cmd, explainexit(code)))
257 257 fp = open(outname, 'rb')
258 258 r = fp.read()
259 259 fp.close()
260 260 return r
261 261 finally:
262 262 try:
263 263 if inname:
264 264 os.unlink(inname)
265 265 except OSError:
266 266 pass
267 267 try:
268 268 if outname:
269 269 os.unlink(outname)
270 270 except OSError:
271 271 pass
272 272
273 273 filtertable = {
274 274 'tempfile:': tempfilter,
275 275 'pipe:': pipefilter,
276 276 }
277 277
278 278 def filter(s, cmd):
279 279 "filter a string through a command that transforms its input to its output"
280 280 for name, fn in filtertable.iteritems():
281 281 if cmd.startswith(name):
282 282 return fn(s, cmd[len(name):].lstrip())
283 283 return pipefilter(s, cmd)
284 284
285 285 def binary(s):
286 286 """return true if a string is binary data"""
287 287 return bool(s and '\0' in s)
288 288
289 289 def increasingchunks(source, min=1024, max=65536):
290 290 '''return no less than min bytes per chunk while data remains,
291 291 doubling min after each chunk until it reaches max'''
292 292 def log2(x):
293 293 if not x:
294 294 return 0
295 295 i = 0
296 296 while x:
297 297 x >>= 1
298 298 i += 1
299 299 return i - 1
300 300
301 301 buf = []
302 302 blen = 0
303 303 for chunk in source:
304 304 buf.append(chunk)
305 305 blen += len(chunk)
306 306 if blen >= min:
307 307 if min < max:
308 308 min = min << 1
309 309 nmin = 1 << log2(blen)
310 310 if nmin > min:
311 311 min = nmin
312 312 if min > max:
313 313 min = max
314 314 yield ''.join(buf)
315 315 blen = 0
316 316 buf = []
317 317 if buf:
318 318 yield ''.join(buf)
319 319
320 320 Abort = error.Abort
321 321
322 322 def always(fn):
323 323 return True
324 324
325 325 def never(fn):
326 326 return False
327 327
328 328 def pathto(root, n1, n2):
329 329 '''return the relative path from one place to another.
330 330 root should use os.sep to separate directories
331 331 n1 should use os.sep to separate directories
332 332 n2 should use "/" to separate directories
333 333 returns an os.sep-separated path.
334 334
335 335 If n1 is a relative path, it's assumed it's
336 336 relative to root.
337 337 n2 should always be relative to root.
338 338 '''
339 339 if not n1:
340 340 return localpath(n2)
341 341 if os.path.isabs(n1):
342 342 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
343 343 return os.path.join(root, localpath(n2))
344 344 n2 = '/'.join((pconvert(root), n2))
345 345 a, b = splitpath(n1), n2.split('/')
346 346 a.reverse()
347 347 b.reverse()
348 348 while a and b and a[-1] == b[-1]:
349 349 a.pop()
350 350 b.pop()
351 351 b.reverse()
352 352 return os.sep.join((['..'] * len(a)) + b) or '.'
353 353
354 354 _hgexecutable = None
355 355
356 356 def mainfrozen():
357 357 """return True if we are a frozen executable.
358 358
359 359 The code supports py2exe (most common, Windows only) and tools/freeze
360 360 (portable, not much used).
361 361 """
362 362 return (safehasattr(sys, "frozen") or # new py2exe
363 363 safehasattr(sys, "importers") or # old py2exe
364 364 imp.is_frozen("__main__")) # tools/freeze
365 365
366 366 def hgexecutable():
367 367 """return location of the 'hg' executable.
368 368
369 369 Defaults to $HG or 'hg' in the search path.
370 370 """
371 371 if _hgexecutable is None:
372 372 hg = os.environ.get('HG')
373 373 if hg:
374 374 _sethgexecutable(hg)
375 375 elif mainfrozen():
376 376 _sethgexecutable(sys.executable)
377 377 else:
378 378 exe = findexe('hg') or os.path.basename(sys.argv[0])
379 379 _sethgexecutable(exe)
380 380 return _hgexecutable
381 381
382 382 def _sethgexecutable(path):
383 383 """set location of the 'hg' executable"""
384 384 global _hgexecutable
385 385 _hgexecutable = path
386 386
387 387 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
388 388 '''enhanced shell command execution.
389 389 run with environment maybe modified, maybe in different dir.
390 390
391 391 if command fails and onerr is None, return status. if ui object,
392 392 print error message and return status, else raise onerr object as
393 393 exception.
394 394
395 395 if out is specified, it is assumed to be a file-like object that has a
396 396 write() method. stdout and stderr will be redirected to out.'''
397 397 try:
398 398 sys.stdout.flush()
399 399 except Exception:
400 400 pass
401 401 def py2shell(val):
402 402 'convert python object into string that is useful to shell'
403 403 if val is None or val is False:
404 404 return '0'
405 405 if val is True:
406 406 return '1'
407 407 return str(val)
408 408 origcmd = cmd
409 409 cmd = quotecommand(cmd)
410 410 env = dict(os.environ)
411 411 env.update((k, py2shell(v)) for k, v in environ.iteritems())
412 412 env['HG'] = hgexecutable()
413 413 if out is None or out == sys.__stdout__:
414 414 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
415 415 env=env, cwd=cwd)
416 416 else:
417 417 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
418 418 env=env, cwd=cwd, stdout=subprocess.PIPE,
419 419 stderr=subprocess.STDOUT)
420 420 for line in proc.stdout:
421 421 out.write(line)
422 422 proc.wait()
423 423 rc = proc.returncode
424 424 if sys.platform == 'OpenVMS' and rc & 1:
425 425 rc = 0
426 426 if rc and onerr:
427 427 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
428 428 explainexit(rc)[0])
429 429 if errprefix:
430 430 errmsg = '%s: %s' % (errprefix, errmsg)
431 431 try:
432 432 onerr.warn(errmsg + '\n')
433 433 except AttributeError:
434 434 raise onerr(errmsg)
435 435 return rc
436 436
437 437 def checksignature(func):
438 438 '''wrap a function with code to check for calling errors'''
439 439 def check(*args, **kwargs):
440 440 try:
441 441 return func(*args, **kwargs)
442 442 except TypeError:
443 443 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
444 444 raise error.SignatureError
445 445 raise
446 446
447 447 return check
448 448
449 449 def copyfile(src, dest):
450 450 "copy a file, preserving mode and atime/mtime"
451 451 if os.path.islink(src):
452 452 try:
453 453 os.unlink(dest)
454 454 except OSError:
455 455 pass
456 456 os.symlink(os.readlink(src), dest)
457 457 else:
458 458 try:
459 459 shutil.copyfile(src, dest)
460 460 shutil.copymode(src, dest)
461 461 except shutil.Error, inst:
462 462 raise Abort(str(inst))
463 463
464 464 def copyfiles(src, dst, hardlink=None):
465 465 """Copy a directory tree using hardlinks if possible"""
466 466
467 467 if hardlink is None:
468 468 hardlink = (os.stat(src).st_dev ==
469 469 os.stat(os.path.dirname(dst)).st_dev)
470 470
471 471 num = 0
472 472 if os.path.isdir(src):
473 473 os.mkdir(dst)
474 474 for name, kind in osutil.listdir(src):
475 475 srcname = os.path.join(src, name)
476 476 dstname = os.path.join(dst, name)
477 477 hardlink, n = copyfiles(srcname, dstname, hardlink)
478 478 num += n
479 479 else:
480 480 if hardlink:
481 481 try:
482 482 oslink(src, dst)
483 483 except (IOError, OSError):
484 484 hardlink = False
485 485 shutil.copy(src, dst)
486 486 else:
487 487 shutil.copy(src, dst)
488 488 num += 1
489 489
490 490 return hardlink, num
491 491
492 492 _winreservednames = '''con prn aux nul
493 493 com1 com2 com3 com4 com5 com6 com7 com8 com9
494 494 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
495 495 _winreservedchars = ':*?"<>|'
496 496 def checkwinfilename(path):
497 497 '''Check that the base-relative path is a valid filename on Windows.
498 498 Returns None if the path is ok, or a UI string describing the problem.
499 499
500 500 >>> checkwinfilename("just/a/normal/path")
501 501 >>> checkwinfilename("foo/bar/con.xml")
502 502 "filename contains 'con', which is reserved on Windows"
503 503 >>> checkwinfilename("foo/con.xml/bar")
504 504 "filename contains 'con', which is reserved on Windows"
505 505 >>> checkwinfilename("foo/bar/xml.con")
506 506 >>> checkwinfilename("foo/bar/AUX/bla.txt")
507 507 "filename contains 'AUX', which is reserved on Windows"
508 508 >>> checkwinfilename("foo/bar/bla:.txt")
509 509 "filename contains ':', which is reserved on Windows"
510 510 >>> checkwinfilename("foo/bar/b\07la.txt")
511 511 "filename contains '\\\\x07', which is invalid on Windows"
512 512 >>> checkwinfilename("foo/bar/bla ")
513 513 "filename ends with ' ', which is not allowed on Windows"
514 514 '''
515 515 for n in path.replace('\\', '/').split('/'):
516 516 if not n:
517 517 continue
518 518 for c in n:
519 519 if c in _winreservedchars:
520 520 return _("filename contains '%s', which is reserved "
521 521 "on Windows") % c
522 522 if ord(c) <= 31:
523 523 return _("filename contains %r, which is invalid "
524 524 "on Windows") % c
525 525 base = n.split('.')[0]
526 526 if base and base.lower() in _winreservednames:
527 527 return _("filename contains '%s', which is reserved "
528 528 "on Windows") % base
529 529 t = n[-1]
530 530 if t in '. ':
531 531 return _("filename ends with '%s', which is not allowed "
532 532 "on Windows") % t
533 533
534 534 if os.name == 'nt':
535 535 checkosfilename = checkwinfilename
536 536 else:
537 537 checkosfilename = platform.checkosfilename
538 538
539 539 def makelock(info, pathname):
540 540 try:
541 541 return os.symlink(info, pathname)
542 542 except OSError, why:
543 543 if why.errno == errno.EEXIST:
544 544 raise
545 545 except AttributeError: # no symlink in os
546 546 pass
547 547
548 548 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
549 549 os.write(ld, info)
550 550 os.close(ld)
551 551
552 552 def readlock(pathname):
553 553 try:
554 554 return os.readlink(pathname)
555 555 except OSError, why:
556 556 if why.errno not in (errno.EINVAL, errno.ENOSYS):
557 557 raise
558 558 except AttributeError: # no symlink in os
559 559 pass
560 560 fp = posixfile(pathname)
561 561 r = fp.read()
562 562 fp.close()
563 563 return r
564 564
565 565 def fstat(fp):
566 566 '''stat file object that may not have fileno method.'''
567 567 try:
568 568 return os.fstat(fp.fileno())
569 569 except AttributeError:
570 570 return os.stat(fp.name)
571 571
572 572 # File system features
573 573
574 574 def checkcase(path):
575 575 """
576 576 Check whether the given path is on a case-sensitive filesystem
577 577
578 578 Requires a path (like /foo/.hg) ending with a foldable final
579 579 directory component.
580 580 """
581 581 s1 = os.stat(path)
582 582 d, b = os.path.split(path)
583 583 p2 = os.path.join(d, b.upper())
584 584 if path == p2:
585 585 p2 = os.path.join(d, b.lower())
586 586 try:
587 587 s2 = os.stat(p2)
588 588 if s2 == s1:
589 589 return False
590 590 return True
591 591 except OSError:
592 592 return True
593 593
594 594 _fspathcache = {}
595 595 def fspath(name, root):
596 596 '''Get name in the case stored in the filesystem
597 597
598 598 The name is either relative to root, or it is an absolute path starting
599 599 with root. Note that this function is unnecessary, and should not be
600 600 called, for case-sensitive filesystems (simply because it's expensive).
601 601 '''
602 602 # If name is absolute, make it relative
603 603 if name.lower().startswith(root.lower()):
604 604 l = len(root)
605 605 if name[l] == os.sep or name[l] == os.altsep:
606 606 l = l + 1
607 607 name = name[l:]
608 608
609 609 if not os.path.lexists(os.path.join(root, name)):
610 610 return None
611 611
612 612 seps = os.sep
613 613 if os.altsep:
614 614 seps = seps + os.altsep
615 615 # Protect backslashes. This gets silly very quickly.
616 616 seps.replace('\\','\\\\')
617 617 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
618 618 dir = os.path.normcase(os.path.normpath(root))
619 619 result = []
620 620 for part, sep in pattern.findall(name):
621 621 if sep:
622 622 result.append(sep)
623 623 continue
624 624
625 625 if dir not in _fspathcache:
626 626 _fspathcache[dir] = os.listdir(dir)
627 627 contents = _fspathcache[dir]
628 628
629 629 lpart = part.lower()
630 630 lenp = len(part)
631 631 for n in contents:
632 632 if lenp == len(n) and n.lower() == lpart:
633 633 result.append(n)
634 634 break
635 635 else:
636 636 # Cannot happen, as the file exists!
637 637 result.append(part)
638 638 dir = os.path.join(dir, lpart)
639 639
640 640 return ''.join(result)
641 641
642 642 def checknlink(testfile):
643 643 '''check whether hardlink count reporting works properly'''
644 644
645 645 # testfile may be open, so we need a separate file for checking to
646 646 # work around issue2543 (or testfile may get lost on Samba shares)
647 647 f1 = testfile + ".hgtmp1"
648 648 if os.path.lexists(f1):
649 649 return False
650 650 try:
651 651 posixfile(f1, 'w').close()
652 652 except IOError:
653 653 return False
654 654
655 655 f2 = testfile + ".hgtmp2"
656 656 fd = None
657 657 try:
658 658 try:
659 659 oslink(f1, f2)
660 660 except OSError:
661 661 return False
662 662
663 663 # nlinks() may behave differently for files on Windows shares if
664 664 # the file is open.
665 665 fd = posixfile(f2)
666 666 return nlinks(f2) > 1
667 667 finally:
668 668 if fd is not None:
669 669 fd.close()
670 670 for f in (f1, f2):
671 671 try:
672 672 os.unlink(f)
673 673 except OSError:
674 674 pass
675 675
676 676 return False
677 677
678 678 def endswithsep(path):
679 679 '''Check path ends with os.sep or os.altsep.'''
680 680 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
681 681
682 682 def splitpath(path):
683 683 '''Split path by os.sep.
684 684 Note that this function does not use os.altsep because this is
685 685 an alternative of simple "xxx.split(os.sep)".
686 686 It is recommended to use os.path.normpath() before using this
687 687 function if need.'''
688 688 return path.split(os.sep)
689 689
690 690 def gui():
691 691 '''Are we running in a GUI?'''
692 692 if sys.platform == 'darwin':
693 693 if 'SSH_CONNECTION' in os.environ:
694 694 # handle SSH access to a box where the user is logged in
695 695 return False
696 696 elif getattr(osutil, 'isgui', None):
697 697 # check if a CoreGraphics session is available
698 698 return osutil.isgui()
699 699 else:
700 700 # pure build; use a safe default
701 701 return True
702 702 else:
703 703 return os.name == "nt" or os.environ.get("DISPLAY")
704 704
705 705 def mktempcopy(name, emptyok=False, createmode=None):
706 706 """Create a temporary file with the same contents from name
707 707
708 708 The permission bits are copied from the original file.
709 709
710 710 If the temporary file is going to be truncated immediately, you
711 711 can use emptyok=True as an optimization.
712 712
713 713 Returns the name of the temporary file.
714 714 """
715 715 d, fn = os.path.split(name)
716 716 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
717 717 os.close(fd)
718 718 # Temporary files are created with mode 0600, which is usually not
719 719 # what we want. If the original file already exists, just copy
720 720 # its mode. Otherwise, manually obey umask.
721 721 copymode(name, temp, createmode)
722 722 if emptyok:
723 723 return temp
724 724 try:
725 725 try:
726 726 ifp = posixfile(name, "rb")
727 727 except IOError, inst:
728 728 if inst.errno == errno.ENOENT:
729 729 return temp
730 730 if not getattr(inst, 'filename', None):
731 731 inst.filename = name
732 732 raise
733 733 ofp = posixfile(temp, "wb")
734 734 for chunk in filechunkiter(ifp):
735 735 ofp.write(chunk)
736 736 ifp.close()
737 737 ofp.close()
738 738 except:
739 739 try: os.unlink(temp)
740 740 except: pass
741 741 raise
742 742 return temp
743 743
744 744 class atomictempfile(object):
745 745 '''writeable file object that atomically updates a file
746 746
747 747 All writes will go to a temporary copy of the original file. Call
748 rename() when you are done writing, and atomictempfile will rename
749 the temporary copy to the original name, making the changes visible.
750
751 Unlike other file-like objects, close() discards your writes by
752 simply deleting the temporary file.
748 close() when you are done writing, and atomictempfile will rename
749 the temporary copy to the original name, making the changes
750 visible. If the object is destroyed without being closed, all your
751 writes are discarded.
753 752 '''
754 753 def __init__(self, name, mode='w+b', createmode=None):
755 754 self.__name = name # permanent name
756 755 self._tempname = mktempcopy(name, emptyok=('w' in mode),
757 756 createmode=createmode)
758 757 self._fp = posixfile(self._tempname, mode)
759 758
760 759 # delegated methods
761 760 self.write = self._fp.write
762 761 self.fileno = self._fp.fileno
763 762
764 def rename(self):
763 def close(self):
765 764 if not self._fp.closed:
766 765 self._fp.close()
767 766 rename(self._tempname, localpath(self.__name))
768 767
769 def close(self):
768 def discard(self):
770 769 if not self._fp.closed:
771 770 try:
772 771 os.unlink(self._tempname)
773 772 except OSError:
774 773 pass
775 774 self._fp.close()
776 775
777 776 def __del__(self):
778 777 if safehasattr(self, '_fp'): # constructor actually did something
779 self.close()
778 self.discard()
780 779
781 780 def makedirs(name, mode=None):
782 781 """recursive directory creation with parent mode inheritance"""
783 782 parent = os.path.abspath(os.path.dirname(name))
784 783 try:
785 784 os.mkdir(name)
786 785 except OSError, err:
787 786 if err.errno == errno.EEXIST:
788 787 return
789 788 if not name or parent == name or err.errno != errno.ENOENT:
790 789 raise
791 790 makedirs(parent, mode)
792 791 os.mkdir(name)
793 792 if mode is not None:
794 793 os.chmod(name, mode)
795 794
796 795 def readfile(path):
797 796 fp = open(path, 'rb')
798 797 try:
799 798 return fp.read()
800 799 finally:
801 800 fp.close()
802 801
803 802 def writefile(path, text):
804 803 fp = open(path, 'wb')
805 804 try:
806 805 fp.write(text)
807 806 finally:
808 807 fp.close()
809 808
810 809 def appendfile(path, text):
811 810 fp = open(path, 'ab')
812 811 try:
813 812 fp.write(text)
814 813 finally:
815 814 fp.close()
816 815
817 816 class chunkbuffer(object):
818 817 """Allow arbitrary sized chunks of data to be efficiently read from an
819 818 iterator over chunks of arbitrary size."""
820 819
821 820 def __init__(self, in_iter):
822 821 """in_iter is the iterator that's iterating over the input chunks.
823 822 targetsize is how big a buffer to try to maintain."""
824 823 def splitbig(chunks):
825 824 for chunk in chunks:
826 825 if len(chunk) > 2**20:
827 826 pos = 0
828 827 while pos < len(chunk):
829 828 end = pos + 2 ** 18
830 829 yield chunk[pos:end]
831 830 pos = end
832 831 else:
833 832 yield chunk
834 833 self.iter = splitbig(in_iter)
835 834 self._queue = []
836 835
837 836 def read(self, l):
838 837 """Read L bytes of data from the iterator of chunks of data.
839 838 Returns less than L bytes if the iterator runs dry."""
840 839 left = l
841 840 buf = ''
842 841 queue = self._queue
843 842 while left > 0:
844 843 # refill the queue
845 844 if not queue:
846 845 target = 2**18
847 846 for chunk in self.iter:
848 847 queue.append(chunk)
849 848 target -= len(chunk)
850 849 if target <= 0:
851 850 break
852 851 if not queue:
853 852 break
854 853
855 854 chunk = queue.pop(0)
856 855 left -= len(chunk)
857 856 if left < 0:
858 857 queue.insert(0, chunk[left:])
859 858 buf += chunk[:left]
860 859 else:
861 860 buf += chunk
862 861
863 862 return buf
864 863
865 864 def filechunkiter(f, size=65536, limit=None):
866 865 """Create a generator that produces the data in the file size
867 866 (default 65536) bytes at a time, up to optional limit (default is
868 867 to read all data). Chunks may be less than size bytes if the
869 868 chunk is the last chunk in the file, or the file is a socket or
870 869 some other type of file that sometimes reads less data than is
871 870 requested."""
872 871 assert size >= 0
873 872 assert limit is None or limit >= 0
874 873 while True:
875 874 if limit is None:
876 875 nbytes = size
877 876 else:
878 877 nbytes = min(limit, size)
879 878 s = nbytes and f.read(nbytes)
880 879 if not s:
881 880 break
882 881 if limit:
883 882 limit -= len(s)
884 883 yield s
885 884
886 885 def makedate():
887 886 lt = time.localtime()
888 887 if lt[8] == 1 and time.daylight:
889 888 tz = time.altzone
890 889 else:
891 890 tz = time.timezone
892 891 t = time.mktime(lt)
893 892 if t < 0:
894 893 hint = _("check your clock")
895 894 raise Abort(_("negative timestamp: %d") % t, hint=hint)
896 895 return t, tz
897 896
898 897 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
899 898 """represent a (unixtime, offset) tuple as a localized time.
900 899 unixtime is seconds since the epoch, and offset is the time zone's
901 900 number of seconds away from UTC. if timezone is false, do not
902 901 append time zone to string."""
903 902 t, tz = date or makedate()
904 903 if t < 0:
905 904 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
906 905 tz = 0
907 906 if "%1" in format or "%2" in format:
908 907 sign = (tz > 0) and "-" or "+"
909 908 minutes = abs(tz) // 60
910 909 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
911 910 format = format.replace("%2", "%02d" % (minutes % 60))
912 911 s = time.strftime(format, time.gmtime(float(t) - tz))
913 912 return s
914 913
915 914 def shortdate(date=None):
916 915 """turn (timestamp, tzoff) tuple into iso 8631 date."""
917 916 return datestr(date, format='%Y-%m-%d')
918 917
919 918 def strdate(string, format, defaults=[]):
920 919 """parse a localized time string and return a (unixtime, offset) tuple.
921 920 if the string cannot be parsed, ValueError is raised."""
922 921 def timezone(string):
923 922 tz = string.split()[-1]
924 923 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
925 924 sign = (tz[0] == "+") and 1 or -1
926 925 hours = int(tz[1:3])
927 926 minutes = int(tz[3:5])
928 927 return -sign * (hours * 60 + minutes) * 60
929 928 if tz == "GMT" or tz == "UTC":
930 929 return 0
931 930 return None
932 931
933 932 # NOTE: unixtime = localunixtime + offset
934 933 offset, date = timezone(string), string
935 934 if offset is not None:
936 935 date = " ".join(string.split()[:-1])
937 936
938 937 # add missing elements from defaults
939 938 usenow = False # default to using biased defaults
940 939 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
941 940 found = [True for p in part if ("%"+p) in format]
942 941 if not found:
943 942 date += "@" + defaults[part][usenow]
944 943 format += "@%" + part[0]
945 944 else:
946 945 # We've found a specific time element, less specific time
947 946 # elements are relative to today
948 947 usenow = True
949 948
950 949 timetuple = time.strptime(date, format)
951 950 localunixtime = int(calendar.timegm(timetuple))
952 951 if offset is None:
953 952 # local timezone
954 953 unixtime = int(time.mktime(timetuple))
955 954 offset = unixtime - localunixtime
956 955 else:
957 956 unixtime = localunixtime + offset
958 957 return unixtime, offset
959 958
960 959 def parsedate(date, formats=None, bias={}):
961 960 """parse a localized date/time and return a (unixtime, offset) tuple.
962 961
963 962 The date may be a "unixtime offset" string or in one of the specified
964 963 formats. If the date already is a (unixtime, offset) tuple, it is returned.
965 964 """
966 965 if not date:
967 966 return 0, 0
968 967 if isinstance(date, tuple) and len(date) == 2:
969 968 return date
970 969 if not formats:
971 970 formats = defaultdateformats
972 971 date = date.strip()
973 972 try:
974 973 when, offset = map(int, date.split(' '))
975 974 except ValueError:
976 975 # fill out defaults
977 976 now = makedate()
978 977 defaults = {}
979 978 for part in ("d", "mb", "yY", "HI", "M", "S"):
980 979 # this piece is for rounding the specific end of unknowns
981 980 b = bias.get(part)
982 981 if b is None:
983 982 if part[0] in "HMS":
984 983 b = "00"
985 984 else:
986 985 b = "0"
987 986
988 987 # this piece is for matching the generic end to today's date
989 988 n = datestr(now, "%" + part[0])
990 989
991 990 defaults[part] = (b, n)
992 991
993 992 for format in formats:
994 993 try:
995 994 when, offset = strdate(date, format, defaults)
996 995 except (ValueError, OverflowError):
997 996 pass
998 997 else:
999 998 break
1000 999 else:
1001 1000 raise Abort(_('invalid date: %r') % date)
1002 1001 # validate explicit (probably user-specified) date and
1003 1002 # time zone offset. values must fit in signed 32 bits for
1004 1003 # current 32-bit linux runtimes. timezones go from UTC-12
1005 1004 # to UTC+14
1006 1005 if abs(when) > 0x7fffffff:
1007 1006 raise Abort(_('date exceeds 32 bits: %d') % when)
1008 1007 if when < 0:
1009 1008 raise Abort(_('negative date value: %d') % when)
1010 1009 if offset < -50400 or offset > 43200:
1011 1010 raise Abort(_('impossible time zone offset: %d') % offset)
1012 1011 return when, offset
1013 1012
1014 1013 def matchdate(date):
1015 1014 """Return a function that matches a given date match specifier
1016 1015
1017 1016 Formats include:
1018 1017
1019 1018 '{date}' match a given date to the accuracy provided
1020 1019
1021 1020 '<{date}' on or before a given date
1022 1021
1023 1022 '>{date}' on or after a given date
1024 1023
1025 1024 >>> p1 = parsedate("10:29:59")
1026 1025 >>> p2 = parsedate("10:30:00")
1027 1026 >>> p3 = parsedate("10:30:59")
1028 1027 >>> p4 = parsedate("10:31:00")
1029 1028 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1030 1029 >>> f = matchdate("10:30")
1031 1030 >>> f(p1[0])
1032 1031 False
1033 1032 >>> f(p2[0])
1034 1033 True
1035 1034 >>> f(p3[0])
1036 1035 True
1037 1036 >>> f(p4[0])
1038 1037 False
1039 1038 >>> f(p5[0])
1040 1039 False
1041 1040 """
1042 1041
1043 1042 def lower(date):
1044 1043 d = dict(mb="1", d="1")
1045 1044 return parsedate(date, extendeddateformats, d)[0]
1046 1045
1047 1046 def upper(date):
1048 1047 d = dict(mb="12", HI="23", M="59", S="59")
1049 1048 for days in ("31", "30", "29"):
1050 1049 try:
1051 1050 d["d"] = days
1052 1051 return parsedate(date, extendeddateformats, d)[0]
1053 1052 except:
1054 1053 pass
1055 1054 d["d"] = "28"
1056 1055 return parsedate(date, extendeddateformats, d)[0]
1057 1056
1058 1057 date = date.strip()
1059 1058
1060 1059 if not date:
1061 1060 raise Abort(_("dates cannot consist entirely of whitespace"))
1062 1061 elif date[0] == "<":
1063 1062 if not date[1:]:
1064 1063 raise Abort(_("invalid day spec, use '<DATE'"))
1065 1064 when = upper(date[1:])
1066 1065 return lambda x: x <= when
1067 1066 elif date[0] == ">":
1068 1067 if not date[1:]:
1069 1068 raise Abort(_("invalid day spec, use '>DATE'"))
1070 1069 when = lower(date[1:])
1071 1070 return lambda x: x >= when
1072 1071 elif date[0] == "-":
1073 1072 try:
1074 1073 days = int(date[1:])
1075 1074 except ValueError:
1076 1075 raise Abort(_("invalid day spec: %s") % date[1:])
1077 1076 if days < 0:
1078 1077 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1079 1078 % date[1:])
1080 1079 when = makedate()[0] - days * 3600 * 24
1081 1080 return lambda x: x >= when
1082 1081 elif " to " in date:
1083 1082 a, b = date.split(" to ")
1084 1083 start, stop = lower(a), upper(b)
1085 1084 return lambda x: x >= start and x <= stop
1086 1085 else:
1087 1086 start, stop = lower(date), upper(date)
1088 1087 return lambda x: x >= start and x <= stop
1089 1088
1090 1089 def shortuser(user):
1091 1090 """Return a short representation of a user name or email address."""
1092 1091 f = user.find('@')
1093 1092 if f >= 0:
1094 1093 user = user[:f]
1095 1094 f = user.find('<')
1096 1095 if f >= 0:
1097 1096 user = user[f + 1:]
1098 1097 f = user.find(' ')
1099 1098 if f >= 0:
1100 1099 user = user[:f]
1101 1100 f = user.find('.')
1102 1101 if f >= 0:
1103 1102 user = user[:f]
1104 1103 return user
1105 1104
1106 1105 def email(author):
1107 1106 '''get email of author.'''
1108 1107 r = author.find('>')
1109 1108 if r == -1:
1110 1109 r = None
1111 1110 return author[author.find('<') + 1:r]
1112 1111
1113 1112 def _ellipsis(text, maxlength):
1114 1113 if len(text) <= maxlength:
1115 1114 return text, False
1116 1115 else:
1117 1116 return "%s..." % (text[:maxlength - 3]), True
1118 1117
1119 1118 def ellipsis(text, maxlength=400):
1120 1119 """Trim string to at most maxlength (default: 400) characters."""
1121 1120 try:
1122 1121 # use unicode not to split at intermediate multi-byte sequence
1123 1122 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1124 1123 maxlength)
1125 1124 if not truncated:
1126 1125 return text
1127 1126 return utext.encode(encoding.encoding)
1128 1127 except (UnicodeDecodeError, UnicodeEncodeError):
1129 1128 return _ellipsis(text, maxlength)[0]
1130 1129
1131 1130 def bytecount(nbytes):
1132 1131 '''return byte count formatted as readable string, with units'''
1133 1132
1134 1133 units = (
1135 1134 (100, 1 << 30, _('%.0f GB')),
1136 1135 (10, 1 << 30, _('%.1f GB')),
1137 1136 (1, 1 << 30, _('%.2f GB')),
1138 1137 (100, 1 << 20, _('%.0f MB')),
1139 1138 (10, 1 << 20, _('%.1f MB')),
1140 1139 (1, 1 << 20, _('%.2f MB')),
1141 1140 (100, 1 << 10, _('%.0f KB')),
1142 1141 (10, 1 << 10, _('%.1f KB')),
1143 1142 (1, 1 << 10, _('%.2f KB')),
1144 1143 (1, 1, _('%.0f bytes')),
1145 1144 )
1146 1145
1147 1146 for multiplier, divisor, format in units:
1148 1147 if nbytes >= divisor * multiplier:
1149 1148 return format % (nbytes / float(divisor))
1150 1149 return units[-1][2] % nbytes
1151 1150
1152 1151 def uirepr(s):
1153 1152 # Avoid double backslash in Windows path repr()
1154 1153 return repr(s).replace('\\\\', '\\')
1155 1154
1156 1155 # delay import of textwrap
1157 1156 def MBTextWrapper(**kwargs):
1158 1157 class tw(textwrap.TextWrapper):
1159 1158 """
1160 1159 Extend TextWrapper for double-width characters.
1161 1160
1162 1161 Some Asian characters use two terminal columns instead of one.
1163 1162 A good example of this behavior can be seen with u'\u65e5\u672c',
1164 1163 the two Japanese characters for "Japan":
1165 1164 len() returns 2, but when printed to a terminal, they eat 4 columns.
1166 1165
1167 1166 (Note that this has nothing to do whatsoever with unicode
1168 1167 representation, or encoding of the underlying string)
1169 1168 """
1170 1169 def __init__(self, **kwargs):
1171 1170 textwrap.TextWrapper.__init__(self, **kwargs)
1172 1171
1173 1172 def _cutdown(self, ucstr, space_left):
1174 1173 l = 0
1175 1174 colwidth = unicodedata.east_asian_width
1176 1175 for i in xrange(len(ucstr)):
1177 1176 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1178 1177 if space_left < l:
1179 1178 return (ucstr[:i], ucstr[i:])
1180 1179 return ucstr, ''
1181 1180
1182 1181 # overriding of base class
1183 1182 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1184 1183 space_left = max(width - cur_len, 1)
1185 1184
1186 1185 if self.break_long_words:
1187 1186 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1188 1187 cur_line.append(cut)
1189 1188 reversed_chunks[-1] = res
1190 1189 elif not cur_line:
1191 1190 cur_line.append(reversed_chunks.pop())
1192 1191
1193 1192 global MBTextWrapper
1194 1193 MBTextWrapper = tw
1195 1194 return tw(**kwargs)
1196 1195
1197 1196 def wrap(line, width, initindent='', hangindent=''):
1198 1197 maxindent = max(len(hangindent), len(initindent))
1199 1198 if width <= maxindent:
1200 1199 # adjust for weird terminal size
1201 1200 width = max(78, maxindent + 1)
1202 1201 line = line.decode(encoding.encoding, encoding.encodingmode)
1203 1202 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1204 1203 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1205 1204 wrapper = MBTextWrapper(width=width,
1206 1205 initial_indent=initindent,
1207 1206 subsequent_indent=hangindent)
1208 1207 return wrapper.fill(line).encode(encoding.encoding)
1209 1208
1210 1209 def iterlines(iterator):
1211 1210 for chunk in iterator:
1212 1211 for line in chunk.splitlines():
1213 1212 yield line
1214 1213
1215 1214 def expandpath(path):
1216 1215 return os.path.expanduser(os.path.expandvars(path))
1217 1216
1218 1217 def hgcmd():
1219 1218 """Return the command used to execute current hg
1220 1219
1221 1220 This is different from hgexecutable() because on Windows we want
1222 1221 to avoid things opening new shell windows like batch files, so we
1223 1222 get either the python call or current executable.
1224 1223 """
1225 1224 if mainfrozen():
1226 1225 return [sys.executable]
1227 1226 return gethgcmd()
1228 1227
1229 1228 def rundetached(args, condfn):
1230 1229 """Execute the argument list in a detached process.
1231 1230
1232 1231 condfn is a callable which is called repeatedly and should return
1233 1232 True once the child process is known to have started successfully.
1234 1233 At this point, the child process PID is returned. If the child
1235 1234 process fails to start or finishes before condfn() evaluates to
1236 1235 True, return -1.
1237 1236 """
1238 1237 # Windows case is easier because the child process is either
1239 1238 # successfully starting and validating the condition or exiting
1240 1239 # on failure. We just poll on its PID. On Unix, if the child
1241 1240 # process fails to start, it will be left in a zombie state until
1242 1241 # the parent wait on it, which we cannot do since we expect a long
1243 1242 # running process on success. Instead we listen for SIGCHLD telling
1244 1243 # us our child process terminated.
1245 1244 terminated = set()
1246 1245 def handler(signum, frame):
1247 1246 terminated.add(os.wait())
1248 1247 prevhandler = None
1249 1248 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1250 1249 if SIGCHLD is not None:
1251 1250 prevhandler = signal.signal(SIGCHLD, handler)
1252 1251 try:
1253 1252 pid = spawndetached(args)
1254 1253 while not condfn():
1255 1254 if ((pid in terminated or not testpid(pid))
1256 1255 and not condfn()):
1257 1256 return -1
1258 1257 time.sleep(0.1)
1259 1258 return pid
1260 1259 finally:
1261 1260 if prevhandler is not None:
1262 1261 signal.signal(signal.SIGCHLD, prevhandler)
1263 1262
1264 1263 try:
1265 1264 any, all = any, all
1266 1265 except NameError:
1267 1266 def any(iterable):
1268 1267 for i in iterable:
1269 1268 if i:
1270 1269 return True
1271 1270 return False
1272 1271
1273 1272 def all(iterable):
1274 1273 for i in iterable:
1275 1274 if not i:
1276 1275 return False
1277 1276 return True
1278 1277
1279 1278 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1280 1279 """Return the result of interpolating items in the mapping into string s.
1281 1280
1282 1281 prefix is a single character string, or a two character string with
1283 1282 a backslash as the first character if the prefix needs to be escaped in
1284 1283 a regular expression.
1285 1284
1286 1285 fn is an optional function that will be applied to the replacement text
1287 1286 just before replacement.
1288 1287
1289 1288 escape_prefix is an optional flag that allows using doubled prefix for
1290 1289 its escaping.
1291 1290 """
1292 1291 fn = fn or (lambda s: s)
1293 1292 patterns = '|'.join(mapping.keys())
1294 1293 if escape_prefix:
1295 1294 patterns += '|' + prefix
1296 1295 if len(prefix) > 1:
1297 1296 prefix_char = prefix[1:]
1298 1297 else:
1299 1298 prefix_char = prefix
1300 1299 mapping[prefix_char] = prefix_char
1301 1300 r = re.compile(r'%s(%s)' % (prefix, patterns))
1302 1301 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1303 1302
1304 1303 def getport(port):
1305 1304 """Return the port for a given network service.
1306 1305
1307 1306 If port is an integer, it's returned as is. If it's a string, it's
1308 1307 looked up using socket.getservbyname(). If there's no matching
1309 1308 service, util.Abort is raised.
1310 1309 """
1311 1310 try:
1312 1311 return int(port)
1313 1312 except ValueError:
1314 1313 pass
1315 1314
1316 1315 try:
1317 1316 return socket.getservbyname(port)
1318 1317 except socket.error:
1319 1318 raise Abort(_("no port number associated with service '%s'") % port)
1320 1319
1321 1320 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1322 1321 '0': False, 'no': False, 'false': False, 'off': False,
1323 1322 'never': False}
1324 1323
1325 1324 def parsebool(s):
1326 1325 """Parse s into a boolean.
1327 1326
1328 1327 If s is not a valid boolean, returns None.
1329 1328 """
1330 1329 return _booleans.get(s.lower(), None)
1331 1330
1332 1331 _hexdig = '0123456789ABCDEFabcdef'
1333 1332 _hextochr = dict((a + b, chr(int(a + b, 16)))
1334 1333 for a in _hexdig for b in _hexdig)
1335 1334
1336 1335 def _urlunquote(s):
1337 1336 """unquote('abc%20def') -> 'abc def'."""
1338 1337 res = s.split('%')
1339 1338 # fastpath
1340 1339 if len(res) == 1:
1341 1340 return s
1342 1341 s = res[0]
1343 1342 for item in res[1:]:
1344 1343 try:
1345 1344 s += _hextochr[item[:2]] + item[2:]
1346 1345 except KeyError:
1347 1346 s += '%' + item
1348 1347 except UnicodeDecodeError:
1349 1348 s += unichr(int(item[:2], 16)) + item[2:]
1350 1349 return s
1351 1350
1352 1351 class url(object):
1353 1352 r"""Reliable URL parser.
1354 1353
1355 1354 This parses URLs and provides attributes for the following
1356 1355 components:
1357 1356
1358 1357 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1359 1358
1360 1359 Missing components are set to None. The only exception is
1361 1360 fragment, which is set to '' if present but empty.
1362 1361
1363 1362 If parsefragment is False, fragment is included in query. If
1364 1363 parsequery is False, query is included in path. If both are
1365 1364 False, both fragment and query are included in path.
1366 1365
1367 1366 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1368 1367
1369 1368 Note that for backward compatibility reasons, bundle URLs do not
1370 1369 take host names. That means 'bundle://../' has a path of '../'.
1371 1370
1372 1371 Examples:
1373 1372
1374 1373 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1375 1374 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1376 1375 >>> url('ssh://[::1]:2200//home/joe/repo')
1377 1376 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1378 1377 >>> url('file:///home/joe/repo')
1379 1378 <url scheme: 'file', path: '/home/joe/repo'>
1380 1379 >>> url('file:///c:/temp/foo/')
1381 1380 <url scheme: 'file', path: 'c:/temp/foo/'>
1382 1381 >>> url('bundle:foo')
1383 1382 <url scheme: 'bundle', path: 'foo'>
1384 1383 >>> url('bundle://../foo')
1385 1384 <url scheme: 'bundle', path: '../foo'>
1386 1385 >>> url(r'c:\foo\bar')
1387 1386 <url path: 'c:\\foo\\bar'>
1388 1387 >>> url(r'\\blah\blah\blah')
1389 1388 <url path: '\\\\blah\\blah\\blah'>
1390 1389
1391 1390 Authentication credentials:
1392 1391
1393 1392 >>> url('ssh://joe:xyz@x/repo')
1394 1393 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1395 1394 >>> url('ssh://joe@x/repo')
1396 1395 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1397 1396
1398 1397 Query strings and fragments:
1399 1398
1400 1399 >>> url('http://host/a?b#c')
1401 1400 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1402 1401 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1403 1402 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1404 1403 """
1405 1404
1406 1405 _safechars = "!~*'()+"
1407 1406 _safepchars = "/!~*'()+"
1408 1407 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1409 1408
1410 1409 def __init__(self, path, parsequery=True, parsefragment=True):
1411 1410 # We slowly chomp away at path until we have only the path left
1412 1411 self.scheme = self.user = self.passwd = self.host = None
1413 1412 self.port = self.path = self.query = self.fragment = None
1414 1413 self._localpath = True
1415 1414 self._hostport = ''
1416 1415 self._origpath = path
1417 1416
1418 1417 # special case for Windows drive letters and UNC paths
1419 1418 if hasdriveletter(path) or path.startswith(r'\\'):
1420 1419 self.path = path
1421 1420 return
1422 1421
1423 1422 # For compatibility reasons, we can't handle bundle paths as
1424 1423 # normal URLS
1425 1424 if path.startswith('bundle:'):
1426 1425 self.scheme = 'bundle'
1427 1426 path = path[7:]
1428 1427 if path.startswith('//'):
1429 1428 path = path[2:]
1430 1429 self.path = path
1431 1430 return
1432 1431
1433 1432 if self._matchscheme(path):
1434 1433 parts = path.split(':', 1)
1435 1434 if parts[0]:
1436 1435 self.scheme, path = parts
1437 1436 self._localpath = False
1438 1437
1439 1438 if not path:
1440 1439 path = None
1441 1440 if self._localpath:
1442 1441 self.path = ''
1443 1442 return
1444 1443 else:
1445 1444 if parsefragment and '#' in path:
1446 1445 path, self.fragment = path.split('#', 1)
1447 1446 if not path:
1448 1447 path = None
1449 1448 if self._localpath:
1450 1449 self.path = path
1451 1450 return
1452 1451
1453 1452 if parsequery and '?' in path:
1454 1453 path, self.query = path.split('?', 1)
1455 1454 if not path:
1456 1455 path = None
1457 1456 if not self.query:
1458 1457 self.query = None
1459 1458
1460 1459 # // is required to specify a host/authority
1461 1460 if path and path.startswith('//'):
1462 1461 parts = path[2:].split('/', 1)
1463 1462 if len(parts) > 1:
1464 1463 self.host, path = parts
1465 1464 path = path
1466 1465 else:
1467 1466 self.host = parts[0]
1468 1467 path = None
1469 1468 if not self.host:
1470 1469 self.host = None
1471 1470 # path of file:///d is /d
1472 1471 # path of file:///d:/ is d:/, not /d:/
1473 1472 if path and not hasdriveletter(path):
1474 1473 path = '/' + path
1475 1474
1476 1475 if self.host and '@' in self.host:
1477 1476 self.user, self.host = self.host.rsplit('@', 1)
1478 1477 if ':' in self.user:
1479 1478 self.user, self.passwd = self.user.split(':', 1)
1480 1479 if not self.host:
1481 1480 self.host = None
1482 1481
1483 1482 # Don't split on colons in IPv6 addresses without ports
1484 1483 if (self.host and ':' in self.host and
1485 1484 not (self.host.startswith('[') and self.host.endswith(']'))):
1486 1485 self._hostport = self.host
1487 1486 self.host, self.port = self.host.rsplit(':', 1)
1488 1487 if not self.host:
1489 1488 self.host = None
1490 1489
1491 1490 if (self.host and self.scheme == 'file' and
1492 1491 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1493 1492 raise Abort(_('file:// URLs can only refer to localhost'))
1494 1493
1495 1494 self.path = path
1496 1495
1497 1496 # leave the query string escaped
1498 1497 for a in ('user', 'passwd', 'host', 'port',
1499 1498 'path', 'fragment'):
1500 1499 v = getattr(self, a)
1501 1500 if v is not None:
1502 1501 setattr(self, a, _urlunquote(v))
1503 1502
1504 1503 def __repr__(self):
1505 1504 attrs = []
1506 1505 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1507 1506 'query', 'fragment'):
1508 1507 v = getattr(self, a)
1509 1508 if v is not None:
1510 1509 attrs.append('%s: %r' % (a, v))
1511 1510 return '<url %s>' % ', '.join(attrs)
1512 1511
1513 1512 def __str__(self):
1514 1513 r"""Join the URL's components back into a URL string.
1515 1514
1516 1515 Examples:
1517 1516
1518 1517 >>> str(url('http://user:pw@host:80/?foo#bar'))
1519 1518 'http://user:pw@host:80/?foo#bar'
1520 1519 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1521 1520 'http://user:pw@host:80/?foo=bar&baz=42'
1522 1521 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1523 1522 'http://user:pw@host:80/?foo=bar%3dbaz'
1524 1523 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1525 1524 'ssh://user:pw@[::1]:2200//home/joe#'
1526 1525 >>> str(url('http://localhost:80//'))
1527 1526 'http://localhost:80//'
1528 1527 >>> str(url('http://localhost:80/'))
1529 1528 'http://localhost:80/'
1530 1529 >>> str(url('http://localhost:80'))
1531 1530 'http://localhost:80/'
1532 1531 >>> str(url('bundle:foo'))
1533 1532 'bundle:foo'
1534 1533 >>> str(url('bundle://../foo'))
1535 1534 'bundle:../foo'
1536 1535 >>> str(url('path'))
1537 1536 'path'
1538 1537 >>> str(url('file:///tmp/foo/bar'))
1539 1538 'file:///tmp/foo/bar'
1540 1539 >>> print url(r'bundle:foo\bar')
1541 1540 bundle:foo\bar
1542 1541 """
1543 1542 if self._localpath:
1544 1543 s = self.path
1545 1544 if self.scheme == 'bundle':
1546 1545 s = 'bundle:' + s
1547 1546 if self.fragment:
1548 1547 s += '#' + self.fragment
1549 1548 return s
1550 1549
1551 1550 s = self.scheme + ':'
1552 1551 if self.user or self.passwd or self.host:
1553 1552 s += '//'
1554 1553 elif self.scheme and (not self.path or self.path.startswith('/')):
1555 1554 s += '//'
1556 1555 if self.user:
1557 1556 s += urllib.quote(self.user, safe=self._safechars)
1558 1557 if self.passwd:
1559 1558 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1560 1559 if self.user or self.passwd:
1561 1560 s += '@'
1562 1561 if self.host:
1563 1562 if not (self.host.startswith('[') and self.host.endswith(']')):
1564 1563 s += urllib.quote(self.host)
1565 1564 else:
1566 1565 s += self.host
1567 1566 if self.port:
1568 1567 s += ':' + urllib.quote(self.port)
1569 1568 if self.host:
1570 1569 s += '/'
1571 1570 if self.path:
1572 1571 # TODO: similar to the query string, we should not unescape the
1573 1572 # path when we store it, the path might contain '%2f' = '/',
1574 1573 # which we should *not* escape.
1575 1574 s += urllib.quote(self.path, safe=self._safepchars)
1576 1575 if self.query:
1577 1576 # we store the query in escaped form.
1578 1577 s += '?' + self.query
1579 1578 if self.fragment is not None:
1580 1579 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1581 1580 return s
1582 1581
1583 1582 def authinfo(self):
1584 1583 user, passwd = self.user, self.passwd
1585 1584 try:
1586 1585 self.user, self.passwd = None, None
1587 1586 s = str(self)
1588 1587 finally:
1589 1588 self.user, self.passwd = user, passwd
1590 1589 if not self.user:
1591 1590 return (s, None)
1592 1591 # authinfo[1] is passed to urllib2 password manager, and its
1593 1592 # URIs must not contain credentials. The host is passed in the
1594 1593 # URIs list because Python < 2.4.3 uses only that to search for
1595 1594 # a password.
1596 1595 return (s, (None, (s, self.host),
1597 1596 self.user, self.passwd or ''))
1598 1597
1599 1598 def isabs(self):
1600 1599 if self.scheme and self.scheme != 'file':
1601 1600 return True # remote URL
1602 1601 if hasdriveletter(self.path):
1603 1602 return True # absolute for our purposes - can't be joined()
1604 1603 if self.path.startswith(r'\\'):
1605 1604 return True # Windows UNC path
1606 1605 if self.path.startswith('/'):
1607 1606 return True # POSIX-style
1608 1607 return False
1609 1608
1610 1609 def localpath(self):
1611 1610 if self.scheme == 'file' or self.scheme == 'bundle':
1612 1611 path = self.path or '/'
1613 1612 # For Windows, we need to promote hosts containing drive
1614 1613 # letters to paths with drive letters.
1615 1614 if hasdriveletter(self._hostport):
1616 1615 path = self._hostport + '/' + self.path
1617 1616 elif self.host is not None and self.path:
1618 1617 path = '/' + path
1619 1618 return path
1620 1619 return self._origpath
1621 1620
1622 1621 def hasscheme(path):
1623 1622 return bool(url(path).scheme)
1624 1623
1625 1624 def hasdriveletter(path):
1626 1625 return path[1:2] == ':' and path[0:1].isalpha()
1627 1626
1628 1627 def urllocalpath(path):
1629 1628 return url(path, parsequery=False, parsefragment=False).localpath()
1630 1629
1631 1630 def hidepassword(u):
1632 1631 '''hide user credential in a url string'''
1633 1632 u = url(u)
1634 1633 if u.passwd:
1635 1634 u.passwd = '***'
1636 1635 return str(u)
1637 1636
1638 1637 def removeauth(u):
1639 1638 '''remove all authentication information from a url string'''
1640 1639 u = url(u)
1641 1640 u.user = u.passwd = None
1642 1641 return str(u)
1643 1642
1644 1643 def isatty(fd):
1645 1644 try:
1646 1645 return fd.isatty()
1647 1646 except AttributeError:
1648 1647 return False
@@ -1,49 +1,48 b''
1 1 import os
2 2 import glob
3 3 from mercurial.util import atomictempfile
4 4
5 5 # basic usage
6 6 def test1_simple():
7 7 if os.path.exists('foo'):
8 8 os.remove('foo')
9 9 file = atomictempfile('foo')
10 10 (dir, basename) = os.path.split(file._tempname)
11 11 assert not os.path.isfile('foo')
12 12 assert basename in glob.glob('.foo-*')
13 13
14 14 file.write('argh\n')
15 file.rename()
15 file.close()
16 16
17 17 assert os.path.isfile('foo')
18 18 assert basename not in glob.glob('.foo-*')
19 19 print 'OK'
20 20
21 # close() removes the temp file but does not make the write
22 # permanent -- essentially discards your work (WTF?!)
23 def test2_close():
21 # discard() removes the temp file without making the write permanent
22 def test2_discard():
24 23 if os.path.exists('foo'):
25 24 os.remove('foo')
26 25 file = atomictempfile('foo')
27 26 (dir, basename) = os.path.split(file._tempname)
28 27
29 28 file.write('yo\n')
30 file.close()
29 file.discard()
31 30
32 31 assert not os.path.isfile('foo')
33 32 assert basename not in os.listdir('.')
34 33 print 'OK'
35 34
36 35 # if a programmer screws up and passes bad args to atomictempfile, they
37 36 # get a plain ordinary TypeError, not infinite recursion
38 37 def test3_oops():
39 38 try:
40 39 file = atomictempfile()
41 40 except TypeError:
42 41 print "OK"
43 42 else:
44 43 print "expected TypeError"
45 44
46 45 if __name__ == '__main__':
47 46 test1_simple()
48 test2_close()
47 test2_discard()
49 48 test3_oops()
@@ -1,94 +1,94 b''
1 1 import sys, os, subprocess
2 2
3 3 if subprocess.call(['%s/hghave' % os.environ['TESTDIR'], 'cacheable']):
4 4 sys.exit(80)
5 5
6 6 from mercurial import util, scmutil, extensions
7 7
8 8 filecache = scmutil.filecache
9 9
10 10 class fakerepo(object):
11 11 def __init__(self):
12 12 self._filecache = {}
13 13
14 14 def join(self, p):
15 15 return p
16 16
17 17 def sjoin(self, p):
18 18 return p
19 19
20 20 @filecache('x')
21 21 def cached(self):
22 22 print 'creating'
23 23
24 24 def invalidate(self):
25 25 for k in self._filecache:
26 26 try:
27 27 delattr(self, k)
28 28 except AttributeError:
29 29 pass
30 30
31 31 def basic(repo):
32 32 # file doesn't exist, calls function
33 33 repo.cached
34 34
35 35 repo.invalidate()
36 36 # file still doesn't exist, uses cache
37 37 repo.cached
38 38
39 39 # create empty file
40 40 f = open('x', 'w')
41 41 f.close()
42 42 repo.invalidate()
43 43 # should recreate the object
44 44 repo.cached
45 45
46 46 f = open('x', 'w')
47 47 f.write('a')
48 48 f.close()
49 49 repo.invalidate()
50 50 # should recreate the object
51 51 repo.cached
52 52
53 53 repo.invalidate()
54 54 # stats file again, nothing changed, reuses object
55 55 repo.cached
56 56
57 57 # atomic replace file, size doesn't change
58 58 # hopefully st_mtime doesn't change as well so this doesn't use the cache
59 59 # because of inode change
60 60 f = scmutil.opener('.')('x', 'w', atomictemp=True)
61 61 f.write('b')
62 f.rename()
62 f.close()
63 63
64 64 repo.invalidate()
65 65 repo.cached
66 66
67 67 def fakeuncacheable():
68 68 def wrapcacheable(orig, *args, **kwargs):
69 69 return False
70 70
71 71 def wrapinit(orig, *args, **kwargs):
72 72 pass
73 73
74 74 originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
75 75 origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
76 76 wrapcacheable)
77 77
78 78 try:
79 79 os.remove('x')
80 80 except:
81 81 pass
82 82
83 83 basic(fakerepo())
84 84
85 85 util.cachestat.cacheable = origcacheable
86 86 util.cachestat.__init__ = originit
87 87
88 88 print 'basic:'
89 89 print
90 90 basic(fakerepo())
91 91 print
92 92 print 'fakeuncacheable:'
93 93 print
94 94 fakeuncacheable()
General Comments 0
You need to be logged in to leave comments. Login now