##// END OF EJS Templates
Merge with stable
Matt Mackall -
r4335:f4a1eac5 merge default
parent child Browse files
Show More
@@ -0,0 +1,39
1 #!/bin/sh
2
3 # Test issue 529 - mq aborts when merging patch deleting files
4
5 rewrite_path()
6 {
7 sed -e 's:\\:/:g' -e 's:[^ ]*/t/::g'
8 }
9
10 echo "[extensions]" >> $HGRCPATH
11 echo "hgext.mq=" >> $HGRCPATH
12
13 # Commit two dummy files in "init" changeset
14 hg init t
15 cd t
16 echo a > a
17 echo b > b
18 hg ci -Am init
19 hg tag -l init
20
21 # Create a patch removing a
22 hg qnew rm_a
23 hg rm a
24 hg qrefresh -m "rm a"
25
26 # Save the patch queue so we can merge it later
27 hg qsave -c -e 2>&1 | rewrite_path
28
29 # Update b and commit in an "update" changeset
30 hg up -C init
31 echo b >> b
32 hg st
33 hg ci -m update
34
35 # Here, qpush used to abort with :
36 # The system cannot find the file specified => a
37 hg manifest
38 hg qpush -a -m 2>&1 | rewrite_path
39 hg manifest
@@ -0,0 +1,11
1 adding a
2 adding b
3 copy .hg/patches to .hg/patches.1
4 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
5 M b
6 a
7 b
8 merging with queue at: .hg/patches.1
9 applying rm_a
10 Now at: rm_a
11 b
@@ -1,2238 +1,2246
1 1 # queue.py - patch queues for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 '''patch management and development
9 9
10 10 This extension lets you work with a stack of patches in a Mercurial
11 11 repository. It manages two stacks of patches - all known patches, and
12 12 applied patches (subset of known patches).
13 13
14 14 Known patches are represented as patch files in the .hg/patches
15 15 directory. Applied patches are both patch files and changesets.
16 16
17 17 Common tasks (use "hg help command" for more details):
18 18
19 19 prepare repository to work with patches qinit
20 20 create new patch qnew
21 21 import existing patch qimport
22 22
23 23 print patch series qseries
24 24 print applied patches qapplied
25 25 print name of top applied patch qtop
26 26
27 27 add known patch to applied stack qpush
28 28 remove patch from applied stack qpop
29 29 refresh contents of top applied patch qrefresh
30 30 '''
31 31
32 32 from mercurial.i18n import _
33 33 from mercurial import commands, cmdutil, hg, patch, revlog, util, changegroup
34 34 import os, sys, re, errno
35 35
36 36 commands.norepo += " qclone qversion"
37 37
38 38 # Patch names looks like unix-file names.
39 39 # They must be joinable with queue directory and result in the patch path.
40 40 normname = util.normpath
41 41
42 42 class statusentry:
43 43 def __init__(self, rev, name=None):
44 44 if not name:
45 45 fields = rev.split(':', 1)
46 46 if len(fields) == 2:
47 47 self.rev, self.name = fields
48 48 else:
49 49 self.rev, self.name = None, None
50 50 else:
51 51 self.rev, self.name = rev, name
52 52
53 53 def __str__(self):
54 54 return self.rev + ':' + self.name
55 55
56 56 class queue:
57 57 def __init__(self, ui, path, patchdir=None):
58 58 self.basepath = path
59 59 self.path = patchdir or os.path.join(path, "patches")
60 60 self.opener = util.opener(self.path)
61 61 self.ui = ui
62 62 self.applied = []
63 63 self.full_series = []
64 64 self.applied_dirty = 0
65 65 self.series_dirty = 0
66 66 self.series_path = "series"
67 67 self.status_path = "status"
68 68 self.guards_path = "guards"
69 69 self.active_guards = None
70 70 self.guards_dirty = False
71 71 self._diffopts = None
72 72
73 73 if os.path.exists(self.join(self.series_path)):
74 74 self.full_series = self.opener(self.series_path).read().splitlines()
75 75 self.parse_series()
76 76
77 77 if os.path.exists(self.join(self.status_path)):
78 78 lines = self.opener(self.status_path).read().splitlines()
79 79 self.applied = [statusentry(l) for l in lines]
80 80
81 81 def diffopts(self):
82 82 if self._diffopts is None:
83 83 self._diffopts = patch.diffopts(self.ui)
84 84 return self._diffopts
85 85
86 86 def join(self, *p):
87 87 return os.path.join(self.path, *p)
88 88
89 89 def find_series(self, patch):
90 90 pre = re.compile("(\s*)([^#]+)")
91 91 index = 0
92 92 for l in self.full_series:
93 93 m = pre.match(l)
94 94 if m:
95 95 s = m.group(2)
96 96 s = s.rstrip()
97 97 if s == patch:
98 98 return index
99 99 index += 1
100 100 return None
101 101
102 102 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
103 103
104 104 def parse_series(self):
105 105 self.series = []
106 106 self.series_guards = []
107 107 for l in self.full_series:
108 108 h = l.find('#')
109 109 if h == -1:
110 110 patch = l
111 111 comment = ''
112 112 elif h == 0:
113 113 continue
114 114 else:
115 115 patch = l[:h]
116 116 comment = l[h:]
117 117 patch = patch.strip()
118 118 if patch:
119 119 if patch in self.series:
120 120 raise util.Abort(_('%s appears more than once in %s') %
121 121 (patch, self.join(self.series_path)))
122 122 self.series.append(patch)
123 123 self.series_guards.append(self.guard_re.findall(comment))
124 124
125 125 def check_guard(self, guard):
126 126 bad_chars = '# \t\r\n\f'
127 127 first = guard[0]
128 128 for c in '-+':
129 129 if first == c:
130 130 return (_('guard %r starts with invalid character: %r') %
131 131 (guard, c))
132 132 for c in bad_chars:
133 133 if c in guard:
134 134 return _('invalid character in guard %r: %r') % (guard, c)
135 135
136 136 def set_active(self, guards):
137 137 for guard in guards:
138 138 bad = self.check_guard(guard)
139 139 if bad:
140 140 raise util.Abort(bad)
141 141 guards = dict.fromkeys(guards).keys()
142 142 guards.sort()
143 143 self.ui.debug('active guards: %s\n' % ' '.join(guards))
144 144 self.active_guards = guards
145 145 self.guards_dirty = True
146 146
147 147 def active(self):
148 148 if self.active_guards is None:
149 149 self.active_guards = []
150 150 try:
151 151 guards = self.opener(self.guards_path).read().split()
152 152 except IOError, err:
153 153 if err.errno != errno.ENOENT: raise
154 154 guards = []
155 155 for i, guard in enumerate(guards):
156 156 bad = self.check_guard(guard)
157 157 if bad:
158 158 self.ui.warn('%s:%d: %s\n' %
159 159 (self.join(self.guards_path), i + 1, bad))
160 160 else:
161 161 self.active_guards.append(guard)
162 162 return self.active_guards
163 163
164 164 def set_guards(self, idx, guards):
165 165 for g in guards:
166 166 if len(g) < 2:
167 167 raise util.Abort(_('guard %r too short') % g)
168 168 if g[0] not in '-+':
169 169 raise util.Abort(_('guard %r starts with invalid char') % g)
170 170 bad = self.check_guard(g[1:])
171 171 if bad:
172 172 raise util.Abort(bad)
173 173 drop = self.guard_re.sub('', self.full_series[idx])
174 174 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
175 175 self.parse_series()
176 176 self.series_dirty = True
177 177
178 178 def pushable(self, idx):
179 179 if isinstance(idx, str):
180 180 idx = self.series.index(idx)
181 181 patchguards = self.series_guards[idx]
182 182 if not patchguards:
183 183 return True, None
184 184 default = False
185 185 guards = self.active()
186 186 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
187 187 if exactneg:
188 188 return False, exactneg[0]
189 189 pos = [g for g in patchguards if g[0] == '+']
190 190 exactpos = [g for g in pos if g[1:] in guards]
191 191 if pos:
192 192 if exactpos:
193 193 return True, exactpos[0]
194 194 return False, pos
195 195 return True, ''
196 196
197 197 def explain_pushable(self, idx, all_patches=False):
198 198 write = all_patches and self.ui.write or self.ui.warn
199 199 if all_patches or self.ui.verbose:
200 200 if isinstance(idx, str):
201 201 idx = self.series.index(idx)
202 202 pushable, why = self.pushable(idx)
203 203 if all_patches and pushable:
204 204 if why is None:
205 205 write(_('allowing %s - no guards in effect\n') %
206 206 self.series[idx])
207 207 else:
208 208 if not why:
209 209 write(_('allowing %s - no matching negative guards\n') %
210 210 self.series[idx])
211 211 else:
212 212 write(_('allowing %s - guarded by %r\n') %
213 213 (self.series[idx], why))
214 214 if not pushable:
215 215 if why:
216 216 write(_('skipping %s - guarded by %r\n') %
217 217 (self.series[idx], why))
218 218 else:
219 219 write(_('skipping %s - no matching guards\n') %
220 220 self.series[idx])
221 221
222 222 def save_dirty(self):
223 223 def write_list(items, path):
224 224 fp = self.opener(path, 'w')
225 225 for i in items:
226 226 print >> fp, i
227 227 fp.close()
228 228 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
229 229 if self.series_dirty: write_list(self.full_series, self.series_path)
230 230 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
231 231
232 232 def readheaders(self, patch):
233 233 def eatdiff(lines):
234 234 while lines:
235 235 l = lines[-1]
236 236 if (l.startswith("diff -") or
237 237 l.startswith("Index:") or
238 238 l.startswith("===========")):
239 239 del lines[-1]
240 240 else:
241 241 break
242 242 def eatempty(lines):
243 243 while lines:
244 244 l = lines[-1]
245 245 if re.match('\s*$', l):
246 246 del lines[-1]
247 247 else:
248 248 break
249 249
250 250 pf = self.join(patch)
251 251 message = []
252 252 comments = []
253 253 user = None
254 254 date = None
255 255 format = None
256 256 subject = None
257 257 diffstart = 0
258 258
259 259 for line in file(pf):
260 260 line = line.rstrip()
261 261 if line.startswith('diff --git'):
262 262 diffstart = 2
263 263 break
264 264 if diffstart:
265 265 if line.startswith('+++ '):
266 266 diffstart = 2
267 267 break
268 268 if line.startswith("--- "):
269 269 diffstart = 1
270 270 continue
271 271 elif format == "hgpatch":
272 272 # parse values when importing the result of an hg export
273 273 if line.startswith("# User "):
274 274 user = line[7:]
275 275 elif line.startswith("# Date "):
276 276 date = line[7:]
277 277 elif not line.startswith("# ") and line:
278 278 message.append(line)
279 279 format = None
280 280 elif line == '# HG changeset patch':
281 281 format = "hgpatch"
282 282 elif (format != "tagdone" and (line.startswith("Subject: ") or
283 283 line.startswith("subject: "))):
284 284 subject = line[9:]
285 285 format = "tag"
286 286 elif (format != "tagdone" and (line.startswith("From: ") or
287 287 line.startswith("from: "))):
288 288 user = line[6:]
289 289 format = "tag"
290 290 elif format == "tag" and line == "":
291 291 # when looking for tags (subject: from: etc) they
292 292 # end once you find a blank line in the source
293 293 format = "tagdone"
294 294 elif message or line:
295 295 message.append(line)
296 296 comments.append(line)
297 297
298 298 eatdiff(message)
299 299 eatdiff(comments)
300 300 eatempty(message)
301 301 eatempty(comments)
302 302
303 303 # make sure message isn't empty
304 304 if format and format.startswith("tag") and subject:
305 305 message.insert(0, "")
306 306 message.insert(0, subject)
307 307 return (message, comments, user, date, diffstart > 1)
308 308
309 309 def removeundo(self, repo):
310 310 undo = repo.sjoin('undo')
311 311 if not os.path.exists(undo):
312 312 return
313 313 try:
314 314 os.unlink(undo)
315 315 except OSError, inst:
316 316 self.ui.warn('error removing undo: %s\n' % str(inst))
317 317
318 318 def printdiff(self, repo, node1, node2=None, files=None,
319 319 fp=None, changes=None, opts={}):
320 320 fns, matchfn, anypats = cmdutil.matchpats(repo, files, opts)
321 321
322 322 patch.diff(repo, node1, node2, fns, match=matchfn,
323 323 fp=fp, changes=changes, opts=self.diffopts())
324 324
325 325 def mergeone(self, repo, mergeq, head, patch, rev, wlock):
326 326 # first try just applying the patch
327 327 (err, n) = self.apply(repo, [ patch ], update_status=False,
328 328 strict=True, merge=rev, wlock=wlock)
329 329
330 330 if err == 0:
331 331 return (err, n)
332 332
333 333 if n is None:
334 334 raise util.Abort(_("apply failed for patch %s") % patch)
335 335
336 336 self.ui.warn("patch didn't work out, merging %s\n" % patch)
337 337
338 338 # apply failed, strip away that rev and merge.
339 339 hg.clean(repo, head, wlock=wlock)
340 340 self.strip(repo, n, update=False, backup='strip', wlock=wlock)
341 341
342 342 ctx = repo.changectx(rev)
343 343 ret = hg.merge(repo, rev, wlock=wlock)
344 344 if ret:
345 345 raise util.Abort(_("update returned %d") % ret)
346 346 n = repo.commit(None, ctx.description(), ctx.user(),
347 347 force=1, wlock=wlock)
348 348 if n == None:
349 349 raise util.Abort(_("repo commit failed"))
350 350 try:
351 351 message, comments, user, date, patchfound = mergeq.readheaders(patch)
352 352 except:
353 353 raise util.Abort(_("unable to read %s") % patch)
354 354
355 355 patchf = self.opener(patch, "w")
356 356 if comments:
357 357 comments = "\n".join(comments) + '\n\n'
358 358 patchf.write(comments)
359 359 self.printdiff(repo, head, n, fp=patchf)
360 360 patchf.close()
361 361 self.removeundo(repo)
362 362 return (0, n)
363 363
364 364 def qparents(self, repo, rev=None):
365 365 if rev is None:
366 366 (p1, p2) = repo.dirstate.parents()
367 367 if p2 == revlog.nullid:
368 368 return p1
369 369 if len(self.applied) == 0:
370 370 return None
371 371 return revlog.bin(self.applied[-1].rev)
372 372 pp = repo.changelog.parents(rev)
373 373 if pp[1] != revlog.nullid:
374 374 arevs = [ x.rev for x in self.applied ]
375 375 p0 = revlog.hex(pp[0])
376 376 p1 = revlog.hex(pp[1])
377 377 if p0 in arevs:
378 378 return pp[0]
379 379 if p1 in arevs:
380 380 return pp[1]
381 381 return pp[0]
382 382
383 383 def mergepatch(self, repo, mergeq, series, wlock):
384 384 if len(self.applied) == 0:
385 385 # each of the patches merged in will have two parents. This
386 386 # can confuse the qrefresh, qdiff, and strip code because it
387 387 # needs to know which parent is actually in the patch queue.
388 388 # so, we insert a merge marker with only one parent. This way
389 389 # the first patch in the queue is never a merge patch
390 390 #
391 391 pname = ".hg.patches.merge.marker"
392 392 n = repo.commit(None, '[mq]: merge marker', user=None, force=1,
393 393 wlock=wlock)
394 394 self.removeundo(repo)
395 395 self.applied.append(statusentry(revlog.hex(n), pname))
396 396 self.applied_dirty = 1
397 397
398 398 head = self.qparents(repo)
399 399
400 400 for patch in series:
401 401 patch = mergeq.lookup(patch, strict=True)
402 402 if not patch:
403 403 self.ui.warn("patch %s does not exist\n" % patch)
404 404 return (1, None)
405 405 pushable, reason = self.pushable(patch)
406 406 if not pushable:
407 407 self.explain_pushable(patch, all_patches=True)
408 408 continue
409 409 info = mergeq.isapplied(patch)
410 410 if not info:
411 411 self.ui.warn("patch %s is not applied\n" % patch)
412 412 return (1, None)
413 413 rev = revlog.bin(info[1])
414 414 (err, head) = self.mergeone(repo, mergeq, head, patch, rev, wlock)
415 415 if head:
416 416 self.applied.append(statusentry(revlog.hex(head), patch))
417 417 self.applied_dirty = 1
418 418 if err:
419 419 return (err, head)
420 420 return (0, head)
421 421
422 422 def patch(self, repo, patchfile):
423 423 '''Apply patchfile to the working directory.
424 424 patchfile: file name of patch'''
425 425 files = {}
426 426 try:
427 427 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
428 428 files=files)
429 429 except Exception, inst:
430 430 self.ui.note(str(inst) + '\n')
431 431 if not self.ui.verbose:
432 432 self.ui.warn("patch failed, unable to continue (try -v)\n")
433 433 return (False, files, False)
434 434
435 435 return (True, files, fuzz)
436 436
437 437 def apply(self, repo, series, list=False, update_status=True,
438 438 strict=False, patchdir=None, merge=None, wlock=None):
439 439 # TODO unify with commands.py
440 440 if not patchdir:
441 441 patchdir = self.path
442 442 err = 0
443 443 if not wlock:
444 444 wlock = repo.wlock()
445 445 lock = repo.lock()
446 446 tr = repo.transaction()
447 447 n = None
448 448 for patchname in series:
449 449 pushable, reason = self.pushable(patchname)
450 450 if not pushable:
451 451 self.explain_pushable(patchname, all_patches=True)
452 452 continue
453 453 self.ui.warn("applying %s\n" % patchname)
454 454 pf = os.path.join(patchdir, patchname)
455 455
456 456 try:
457 457 message, comments, user, date, patchfound = self.readheaders(patchname)
458 458 except:
459 459 self.ui.warn("Unable to read %s\n" % patchname)
460 460 err = 1
461 461 break
462 462
463 463 if not message:
464 464 message = "imported patch %s\n" % patchname
465 465 else:
466 466 if list:
467 467 message.append("\nimported patch %s" % patchname)
468 468 message = '\n'.join(message)
469 469
470 470 (patcherr, files, fuzz) = self.patch(repo, pf)
471 471 patcherr = not patcherr
472 472
473 473 if merge and files:
474 # Mark as merged and update dirstate parent info
475 repo.dirstate.update(repo.dirstate.filterfiles(files.keys()), 'm')
474 # Mark as removed/merged and update dirstate parent info
475 removed = []
476 merged = []
477 for f in files:
478 if os.path.exists(repo.dirstate.wjoin(f)):
479 merged.append(f)
480 else:
481 removed.append(f)
482 repo.dirstate.update(repo.dirstate.filterfiles(removed), 'r')
483 repo.dirstate.update(repo.dirstate.filterfiles(merged), 'm')
476 484 p1, p2 = repo.dirstate.parents()
477 485 repo.dirstate.setparents(p1, merge)
478 486 files = patch.updatedir(self.ui, repo, files, wlock=wlock)
479 487 n = repo.commit(files, message, user, date, force=1, lock=lock,
480 488 wlock=wlock)
481 489
482 490 if n == None:
483 491 raise util.Abort(_("repo commit failed"))
484 492
485 493 if update_status:
486 494 self.applied.append(statusentry(revlog.hex(n), patchname))
487 495
488 496 if patcherr:
489 497 if not patchfound:
490 498 self.ui.warn("patch %s is empty\n" % patchname)
491 499 err = 0
492 500 else:
493 501 self.ui.warn("patch failed, rejects left in working dir\n")
494 502 err = 1
495 503 break
496 504
497 505 if fuzz and strict:
498 506 self.ui.warn("fuzz found when applying patch, stopping\n")
499 507 err = 1
500 508 break
501 509 tr.close()
502 510 self.removeundo(repo)
503 511 return (err, n)
504 512
505 513 def delete(self, repo, patches, opts):
506 514 realpatches = []
507 515 for patch in patches:
508 516 patch = self.lookup(patch, strict=True)
509 517 info = self.isapplied(patch)
510 518 if info:
511 519 raise util.Abort(_("cannot delete applied patch %s") % patch)
512 520 if patch not in self.series:
513 521 raise util.Abort(_("patch %s not in series file") % patch)
514 522 realpatches.append(patch)
515 523
516 524 appliedbase = 0
517 525 if opts.get('rev'):
518 526 if not self.applied:
519 527 raise util.Abort(_('no patches applied'))
520 528 revs = cmdutil.revrange(repo, opts['rev'])
521 529 if len(revs) > 1 and revs[0] > revs[1]:
522 530 revs.reverse()
523 531 for rev in revs:
524 532 if appliedbase >= len(self.applied):
525 533 raise util.Abort(_("revision %d is not managed") % rev)
526 534
527 535 base = revlog.bin(self.applied[appliedbase].rev)
528 536 node = repo.changelog.node(rev)
529 537 if node != base:
530 538 raise util.Abort(_("cannot delete revision %d above "
531 539 "applied patches") % rev)
532 540 realpatches.append(self.applied[appliedbase].name)
533 541 appliedbase += 1
534 542
535 543 if not opts.get('keep'):
536 544 r = self.qrepo()
537 545 if r:
538 546 r.remove(realpatches, True)
539 547 else:
540 548 for p in realpatches:
541 549 os.unlink(self.join(p))
542 550
543 551 if appliedbase:
544 552 del self.applied[:appliedbase]
545 553 self.applied_dirty = 1
546 554 indices = [self.find_series(p) for p in realpatches]
547 555 indices.sort()
548 556 for i in indices[-1::-1]:
549 557 del self.full_series[i]
550 558 self.parse_series()
551 559 self.series_dirty = 1
552 560
553 561 def check_toppatch(self, repo):
554 562 if len(self.applied) > 0:
555 563 top = revlog.bin(self.applied[-1].rev)
556 564 pp = repo.dirstate.parents()
557 565 if top not in pp:
558 566 raise util.Abort(_("queue top not at same revision as working directory"))
559 567 return top
560 568 return None
561 569 def check_localchanges(self, repo, force=False, refresh=True):
562 570 m, a, r, d = repo.status()[:4]
563 571 if m or a or r or d:
564 572 if not force:
565 573 if refresh:
566 574 raise util.Abort(_("local changes found, refresh first"))
567 575 else:
568 576 raise util.Abort(_("local changes found"))
569 577 return m, a, r, d
570 578 def new(self, repo, patch, msg=None, force=None):
571 579 if os.path.exists(self.join(patch)):
572 580 raise util.Abort(_('patch "%s" already exists') % patch)
573 581 m, a, r, d = self.check_localchanges(repo, force)
574 582 commitfiles = m + a + r
575 583 self.check_toppatch(repo)
576 584 wlock = repo.wlock()
577 585 insert = self.full_series_end()
578 586 if msg:
579 587 n = repo.commit(commitfiles, "[mq]: %s" % msg, force=True,
580 588 wlock=wlock)
581 589 else:
582 590 n = repo.commit(commitfiles,
583 591 "New patch: %s" % patch, force=True, wlock=wlock)
584 592 if n == None:
585 593 raise util.Abort(_("repo commit failed"))
586 594 self.full_series[insert:insert] = [patch]
587 595 self.applied.append(statusentry(revlog.hex(n), patch))
588 596 self.parse_series()
589 597 self.series_dirty = 1
590 598 self.applied_dirty = 1
591 599 p = self.opener(patch, "w")
592 600 if msg:
593 601 msg = msg + "\n"
594 602 p.write(msg)
595 603 p.close()
596 604 wlock = None
597 605 r = self.qrepo()
598 606 if r: r.add([patch])
599 607 if commitfiles:
600 608 self.refresh(repo, short=True)
601 609 self.removeundo(repo)
602 610
603 611 def strip(self, repo, rev, update=True, backup="all", wlock=None):
604 612 def limitheads(chlog, stop):
605 613 """return the list of all nodes that have no children"""
606 614 p = {}
607 615 h = []
608 616 stoprev = 0
609 617 if stop in chlog.nodemap:
610 618 stoprev = chlog.rev(stop)
611 619
612 620 for r in xrange(chlog.count() - 1, -1, -1):
613 621 n = chlog.node(r)
614 622 if n not in p:
615 623 h.append(n)
616 624 if n == stop:
617 625 break
618 626 if r < stoprev:
619 627 break
620 628 for pn in chlog.parents(n):
621 629 p[pn] = 1
622 630 return h
623 631
624 632 def bundle(cg):
625 633 backupdir = repo.join("strip-backup")
626 634 if not os.path.isdir(backupdir):
627 635 os.mkdir(backupdir)
628 636 name = os.path.join(backupdir, "%s" % revlog.short(rev))
629 637 name = savename(name)
630 638 self.ui.warn("saving bundle to %s\n" % name)
631 639 return changegroup.writebundle(cg, name, "HG10BZ")
632 640
633 641 def stripall(revnum):
634 642 mm = repo.changectx(rev).manifest()
635 643 seen = {}
636 644
637 645 for x in xrange(revnum, repo.changelog.count()):
638 646 for f in repo.changectx(x).files():
639 647 if f in seen:
640 648 continue
641 649 seen[f] = 1
642 650 if f in mm:
643 651 filerev = mm[f]
644 652 else:
645 653 filerev = 0
646 654 seen[f] = filerev
647 655 # we go in two steps here so the strip loop happens in a
648 656 # sensible order. When stripping many files, this helps keep
649 657 # our disk access patterns under control.
650 658 seen_list = seen.keys()
651 659 seen_list.sort()
652 660 for f in seen_list:
653 661 ff = repo.file(f)
654 662 filerev = seen[f]
655 663 if filerev != 0:
656 664 if filerev in ff.nodemap:
657 665 filerev = ff.rev(filerev)
658 666 else:
659 667 filerev = 0
660 668 ff.strip(filerev, revnum)
661 669
662 670 if not wlock:
663 671 wlock = repo.wlock()
664 672 lock = repo.lock()
665 673 chlog = repo.changelog
666 674 # TODO delete the undo files, and handle undo of merge sets
667 675 pp = chlog.parents(rev)
668 676 revnum = chlog.rev(rev)
669 677
670 678 if update:
671 679 self.check_localchanges(repo, refresh=False)
672 680 urev = self.qparents(repo, rev)
673 681 hg.clean(repo, urev, wlock=wlock)
674 682 repo.dirstate.write()
675 683
676 684 # save is a list of all the branches we are truncating away
677 685 # that we actually want to keep. changegroup will be used
678 686 # to preserve them and add them back after the truncate
679 687 saveheads = []
680 688 savebases = {}
681 689
682 690 heads = limitheads(chlog, rev)
683 691 seen = {}
684 692
685 693 # search through all the heads, finding those where the revision
686 694 # we want to strip away is an ancestor. Also look for merges
687 695 # that might be turned into new heads by the strip.
688 696 while heads:
689 697 h = heads.pop()
690 698 n = h
691 699 while True:
692 700 seen[n] = 1
693 701 pp = chlog.parents(n)
694 702 if pp[1] != revlog.nullid:
695 703 for p in pp:
696 704 if chlog.rev(p) > revnum and p not in seen:
697 705 heads.append(p)
698 706 if pp[0] == revlog.nullid:
699 707 break
700 708 if chlog.rev(pp[0]) < revnum:
701 709 break
702 710 n = pp[0]
703 711 if n == rev:
704 712 break
705 713 r = chlog.reachable(h, rev)
706 714 if rev not in r:
707 715 saveheads.append(h)
708 716 for x in r:
709 717 if chlog.rev(x) > revnum:
710 718 savebases[x] = 1
711 719
712 720 # create a changegroup for all the branches we need to keep
713 721 if backup == "all":
714 722 backupch = repo.changegroupsubset([rev], chlog.heads(), 'strip')
715 723 bundle(backupch)
716 724 if saveheads:
717 725 backupch = repo.changegroupsubset(savebases.keys(), saveheads, 'strip')
718 726 chgrpfile = bundle(backupch)
719 727
720 728 stripall(revnum)
721 729
722 730 change = chlog.read(rev)
723 731 chlog.strip(revnum, revnum)
724 732 repo.manifest.strip(repo.manifest.rev(change[0]), revnum)
725 733 self.removeundo(repo)
726 734 if saveheads:
727 735 self.ui.status("adding branch\n")
728 736 commands.unbundle(self.ui, repo, "file:%s" % chgrpfile,
729 737 update=False)
730 738 if backup != "strip":
731 739 os.unlink(chgrpfile)
732 740
733 741 def isapplied(self, patch):
734 742 """returns (index, rev, patch)"""
735 743 for i in xrange(len(self.applied)):
736 744 a = self.applied[i]
737 745 if a.name == patch:
738 746 return (i, a.rev, a.name)
739 747 return None
740 748
741 749 # if the exact patch name does not exist, we try a few
742 750 # variations. If strict is passed, we try only #1
743 751 #
744 752 # 1) a number to indicate an offset in the series file
745 753 # 2) a unique substring of the patch name was given
746 754 # 3) patchname[-+]num to indicate an offset in the series file
747 755 def lookup(self, patch, strict=False):
748 756 patch = patch and str(patch)
749 757
750 758 def partial_name(s):
751 759 if s in self.series:
752 760 return s
753 761 matches = [x for x in self.series if s in x]
754 762 if len(matches) > 1:
755 763 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
756 764 for m in matches:
757 765 self.ui.warn(' %s\n' % m)
758 766 return None
759 767 if matches:
760 768 return matches[0]
761 769 if len(self.series) > 0 and len(self.applied) > 0:
762 770 if s == 'qtip':
763 771 return self.series[self.series_end(True)-1]
764 772 if s == 'qbase':
765 773 return self.series[0]
766 774 return None
767 775 if patch == None:
768 776 return None
769 777
770 778 # we don't want to return a partial match until we make
771 779 # sure the file name passed in does not exist (checked below)
772 780 res = partial_name(patch)
773 781 if res and res == patch:
774 782 return res
775 783
776 784 if not os.path.isfile(self.join(patch)):
777 785 try:
778 786 sno = int(patch)
779 787 except(ValueError, OverflowError):
780 788 pass
781 789 else:
782 790 if sno < len(self.series):
783 791 return self.series[sno]
784 792 if not strict:
785 793 # return any partial match made above
786 794 if res:
787 795 return res
788 796 minus = patch.rfind('-')
789 797 if minus >= 0:
790 798 res = partial_name(patch[:minus])
791 799 if res:
792 800 i = self.series.index(res)
793 801 try:
794 802 off = int(patch[minus+1:] or 1)
795 803 except(ValueError, OverflowError):
796 804 pass
797 805 else:
798 806 if i - off >= 0:
799 807 return self.series[i - off]
800 808 plus = patch.rfind('+')
801 809 if plus >= 0:
802 810 res = partial_name(patch[:plus])
803 811 if res:
804 812 i = self.series.index(res)
805 813 try:
806 814 off = int(patch[plus+1:] or 1)
807 815 except(ValueError, OverflowError):
808 816 pass
809 817 else:
810 818 if i + off < len(self.series):
811 819 return self.series[i + off]
812 820 raise util.Abort(_("patch %s not in series") % patch)
813 821
814 822 def push(self, repo, patch=None, force=False, list=False,
815 823 mergeq=None, wlock=None):
816 824 if not wlock:
817 825 wlock = repo.wlock()
818 826 patch = self.lookup(patch)
819 827 # Suppose our series file is: A B C and the current 'top' patch is B.
820 828 # qpush C should be performed (moving forward)
821 829 # qpush B is a NOP (no change)
822 830 # qpush A is an error (can't go backwards with qpush)
823 831 if patch:
824 832 info = self.isapplied(patch)
825 833 if info:
826 834 if info[0] < len(self.applied) - 1:
827 835 raise util.Abort(_("cannot push to a previous patch: %s") %
828 836 patch)
829 837 if info[0] < len(self.series) - 1:
830 838 self.ui.warn(_('qpush: %s is already at the top\n') % patch)
831 839 else:
832 840 self.ui.warn(_('all patches are currently applied\n'))
833 841 return
834 842
835 843 # Following the above example, starting at 'top' of B:
836 844 # qpush should be performed (pushes C), but a subsequent qpush without
837 845 # an argument is an error (nothing to apply). This allows a loop
838 846 # of "...while hg qpush..." to work as it detects an error when done
839 847 if self.series_end() == len(self.series):
840 848 self.ui.warn(_('patch series already fully applied\n'))
841 849 return 1
842 850 if not force:
843 851 self.check_localchanges(repo)
844 852
845 853 self.applied_dirty = 1;
846 854 start = self.series_end()
847 855 if start > 0:
848 856 self.check_toppatch(repo)
849 857 if not patch:
850 858 patch = self.series[start]
851 859 end = start + 1
852 860 else:
853 861 end = self.series.index(patch, start) + 1
854 862 s = self.series[start:end]
855 863 if mergeq:
856 864 ret = self.mergepatch(repo, mergeq, s, wlock)
857 865 else:
858 866 ret = self.apply(repo, s, list, wlock=wlock)
859 867 top = self.applied[-1].name
860 868 if ret[0]:
861 869 self.ui.write("Errors during apply, please fix and refresh %s\n" %
862 870 top)
863 871 else:
864 872 self.ui.write("Now at: %s\n" % top)
865 873 return ret[0]
866 874
867 875 def pop(self, repo, patch=None, force=False, update=True, all=False,
868 876 wlock=None):
869 877 def getfile(f, rev):
870 878 t = repo.file(f).read(rev)
871 879 repo.wfile(f, "w").write(t)
872 880
873 881 if not wlock:
874 882 wlock = repo.wlock()
875 883 if patch:
876 884 # index, rev, patch
877 885 info = self.isapplied(patch)
878 886 if not info:
879 887 patch = self.lookup(patch)
880 888 info = self.isapplied(patch)
881 889 if not info:
882 890 raise util.Abort(_("patch %s is not applied") % patch)
883 891
884 892 if len(self.applied) == 0:
885 893 # Allow qpop -a to work repeatedly,
886 894 # but not qpop without an argument
887 895 self.ui.warn(_("no patches applied\n"))
888 896 return not all
889 897
890 898 if not update:
891 899 parents = repo.dirstate.parents()
892 900 rr = [ revlog.bin(x.rev) for x in self.applied ]
893 901 for p in parents:
894 902 if p in rr:
895 903 self.ui.warn("qpop: forcing dirstate update\n")
896 904 update = True
897 905
898 906 if not force and update:
899 907 self.check_localchanges(repo)
900 908
901 909 self.applied_dirty = 1;
902 910 end = len(self.applied)
903 911 if not patch:
904 912 if all:
905 913 popi = 0
906 914 else:
907 915 popi = len(self.applied) - 1
908 916 else:
909 917 popi = info[0] + 1
910 918 if popi >= end:
911 919 self.ui.warn("qpop: %s is already at the top\n" % patch)
912 920 return
913 921 info = [ popi ] + [self.applied[popi].rev, self.applied[popi].name]
914 922
915 923 start = info[0]
916 924 rev = revlog.bin(info[1])
917 925
918 926 # we know there are no local changes, so we can make a simplified
919 927 # form of hg.update.
920 928 if update:
921 929 top = self.check_toppatch(repo)
922 930 qp = self.qparents(repo, rev)
923 931 changes = repo.changelog.read(qp)
924 932 mmap = repo.manifest.read(changes[0])
925 933 m, a, r, d, u = repo.status(qp, top)[:5]
926 934 if d:
927 935 raise util.Abort("deletions found between repo revs")
928 936 for f in m:
929 937 getfile(f, mmap[f])
930 938 for f in r:
931 939 getfile(f, mmap[f])
932 940 util.set_exec(repo.wjoin(f), mmap.execf(f))
933 941 repo.dirstate.update(m + r, 'n')
934 942 for f in a:
935 943 try:
936 944 os.unlink(repo.wjoin(f))
937 945 except OSError, e:
938 946 if e.errno != errno.ENOENT:
939 947 raise
940 948 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
941 949 except: pass
942 950 if a:
943 951 repo.dirstate.forget(a)
944 952 repo.dirstate.setparents(qp, revlog.nullid)
945 953 self.strip(repo, rev, update=False, backup='strip', wlock=wlock)
946 954 del self.applied[start:end]
947 955 if len(self.applied):
948 956 self.ui.write("Now at: %s\n" % self.applied[-1].name)
949 957 else:
950 958 self.ui.write("Patch queue now empty\n")
951 959
952 960 def diff(self, repo, pats, opts):
953 961 top = self.check_toppatch(repo)
954 962 if not top:
955 963 self.ui.write("No patches applied\n")
956 964 return
957 965 qp = self.qparents(repo, top)
958 966 if opts.get('git'):
959 967 self.diffopts().git = True
960 968 self.printdiff(repo, qp, files=pats, opts=opts)
961 969
962 970 def refresh(self, repo, pats=None, **opts):
963 971 if len(self.applied) == 0:
964 972 self.ui.write("No patches applied\n")
965 973 return 1
966 974 wlock = repo.wlock()
967 975 self.check_toppatch(repo)
968 976 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
969 977 top = revlog.bin(top)
970 978 cparents = repo.changelog.parents(top)
971 979 patchparent = self.qparents(repo, top)
972 980 message, comments, user, date, patchfound = self.readheaders(patchfn)
973 981
974 982 patchf = self.opener(patchfn, "w")
975 983 msg = opts.get('msg', '').rstrip()
976 984 if msg:
977 985 if comments:
978 986 # Remove existing message.
979 987 ci = 0
980 988 for mi in xrange(len(message)):
981 989 while message[mi] != comments[ci]:
982 990 ci += 1
983 991 del comments[ci]
984 992 comments.append(msg)
985 993 if comments:
986 994 comments = "\n".join(comments) + '\n\n'
987 995 patchf.write(comments)
988 996
989 997 if opts.get('git'):
990 998 self.diffopts().git = True
991 999 fns, matchfn, anypats = cmdutil.matchpats(repo, pats, opts)
992 1000 tip = repo.changelog.tip()
993 1001 if top == tip:
994 1002 # if the top of our patch queue is also the tip, there is an
995 1003 # optimization here. We update the dirstate in place and strip
996 1004 # off the tip commit. Then just commit the current directory
997 1005 # tree. We can also send repo.commit the list of files
998 1006 # changed to speed up the diff
999 1007 #
1000 1008 # in short mode, we only diff the files included in the
1001 1009 # patch already
1002 1010 #
1003 1011 # this should really read:
1004 1012 # mm, dd, aa, aa2, uu = repo.status(tip, patchparent)[:5]
1005 1013 # but we do it backwards to take advantage of manifest/chlog
1006 1014 # caching against the next repo.status call
1007 1015 #
1008 1016 mm, aa, dd, aa2, uu = repo.status(patchparent, tip)[:5]
1009 1017 changes = repo.changelog.read(tip)
1010 1018 man = repo.manifest.read(changes[0])
1011 1019 aaa = aa[:]
1012 1020 if opts.get('short'):
1013 1021 filelist = mm + aa + dd
1014 1022 else:
1015 1023 filelist = None
1016 1024 m, a, r, d, u = repo.status(files=filelist)[:5]
1017 1025
1018 1026 # we might end up with files that were added between tip and
1019 1027 # the dirstate parent, but then changed in the local dirstate.
1020 1028 # in this case, we want them to only show up in the added section
1021 1029 for x in m:
1022 1030 if x not in aa:
1023 1031 mm.append(x)
1024 1032 # we might end up with files added by the local dirstate that
1025 1033 # were deleted by the patch. In this case, they should only
1026 1034 # show up in the changed section.
1027 1035 for x in a:
1028 1036 if x in dd:
1029 1037 del dd[dd.index(x)]
1030 1038 mm.append(x)
1031 1039 else:
1032 1040 aa.append(x)
1033 1041 # make sure any files deleted in the local dirstate
1034 1042 # are not in the add or change column of the patch
1035 1043 forget = []
1036 1044 for x in d + r:
1037 1045 if x in aa:
1038 1046 del aa[aa.index(x)]
1039 1047 forget.append(x)
1040 1048 continue
1041 1049 elif x in mm:
1042 1050 del mm[mm.index(x)]
1043 1051 dd.append(x)
1044 1052
1045 1053 m = util.unique(mm)
1046 1054 r = util.unique(dd)
1047 1055 a = util.unique(aa)
1048 1056 c = [filter(matchfn, l) for l in (m, a, r, [], u)]
1049 1057 filelist = util.unique(c[0] + c[1] + c[2])
1050 1058 patch.diff(repo, patchparent, files=filelist, match=matchfn,
1051 1059 fp=patchf, changes=c, opts=self.diffopts())
1052 1060 patchf.close()
1053 1061
1054 1062 repo.dirstate.setparents(*cparents)
1055 1063 copies = {}
1056 1064 for dst in a:
1057 1065 src = repo.dirstate.copied(dst)
1058 1066 if src is None:
1059 1067 continue
1060 1068 copies.setdefault(src, []).append(dst)
1061 1069 repo.dirstate.update(a, 'a')
1062 1070 # remember the copies between patchparent and tip
1063 1071 # this may be slow, so don't do it if we're not tracking copies
1064 1072 if self.diffopts().git:
1065 1073 for dst in aaa:
1066 1074 f = repo.file(dst)
1067 1075 src = f.renamed(man[dst])
1068 1076 if src:
1069 1077 copies[src[0]] = copies.get(dst, [])
1070 1078 if dst in a:
1071 1079 copies[src[0]].append(dst)
1072 1080 # we can't copy a file created by the patch itself
1073 1081 if dst in copies:
1074 1082 del copies[dst]
1075 1083 for src, dsts in copies.iteritems():
1076 1084 for dst in dsts:
1077 1085 repo.dirstate.copy(src, dst)
1078 1086 repo.dirstate.update(r, 'r')
1079 1087 # if the patch excludes a modified file, mark that file with mtime=0
1080 1088 # so status can see it.
1081 1089 mm = []
1082 1090 for i in xrange(len(m)-1, -1, -1):
1083 1091 if not matchfn(m[i]):
1084 1092 mm.append(m[i])
1085 1093 del m[i]
1086 1094 repo.dirstate.update(m, 'n')
1087 1095 repo.dirstate.update(mm, 'n', st_mtime=-1, st_size=-1)
1088 1096 repo.dirstate.forget(forget)
1089 1097
1090 1098 if not msg:
1091 1099 if not message:
1092 1100 message = "patch queue: %s\n" % patchfn
1093 1101 else:
1094 1102 message = "\n".join(message)
1095 1103 else:
1096 1104 message = msg
1097 1105
1098 1106 self.strip(repo, top, update=False, backup='strip', wlock=wlock)
1099 1107 n = repo.commit(filelist, message, changes[1], match=matchfn,
1100 1108 force=1, wlock=wlock)
1101 1109 self.applied[-1] = statusentry(revlog.hex(n), patchfn)
1102 1110 self.applied_dirty = 1
1103 1111 self.removeundo(repo)
1104 1112 else:
1105 1113 self.printdiff(repo, patchparent, fp=patchf)
1106 1114 patchf.close()
1107 1115 added = repo.status()[1]
1108 1116 for a in added:
1109 1117 f = repo.wjoin(a)
1110 1118 try:
1111 1119 os.unlink(f)
1112 1120 except OSError, e:
1113 1121 if e.errno != errno.ENOENT:
1114 1122 raise
1115 1123 try: os.removedirs(os.path.dirname(f))
1116 1124 except: pass
1117 1125 # forget the file copies in the dirstate
1118 1126 # push should readd the files later on
1119 1127 repo.dirstate.forget(added)
1120 1128 self.pop(repo, force=True, wlock=wlock)
1121 1129 self.push(repo, force=True, wlock=wlock)
1122 1130
1123 1131 def init(self, repo, create=False):
1124 1132 if not create and os.path.isdir(self.path):
1125 1133 raise util.Abort(_("patch queue directory already exists"))
1126 1134 try:
1127 1135 os.mkdir(self.path)
1128 1136 except OSError, inst:
1129 1137 if inst.errno != errno.EEXIST or not create:
1130 1138 raise
1131 1139 if create:
1132 1140 return self.qrepo(create=True)
1133 1141
1134 1142 def unapplied(self, repo, patch=None):
1135 1143 if patch and patch not in self.series:
1136 1144 raise util.Abort(_("patch %s is not in series file") % patch)
1137 1145 if not patch:
1138 1146 start = self.series_end()
1139 1147 else:
1140 1148 start = self.series.index(patch) + 1
1141 1149 unapplied = []
1142 1150 for i in xrange(start, len(self.series)):
1143 1151 pushable, reason = self.pushable(i)
1144 1152 if pushable:
1145 1153 unapplied.append((i, self.series[i]))
1146 1154 self.explain_pushable(i)
1147 1155 return unapplied
1148 1156
1149 1157 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1150 1158 summary=False):
1151 1159 def displayname(patchname):
1152 1160 if summary:
1153 1161 msg = self.readheaders(patchname)[0]
1154 1162 msg = msg and ': ' + msg[0] or ': '
1155 1163 else:
1156 1164 msg = ''
1157 1165 return '%s%s' % (patchname, msg)
1158 1166
1159 1167 applied = dict.fromkeys([p.name for p in self.applied])
1160 1168 if length is None:
1161 1169 length = len(self.series) - start
1162 1170 if not missing:
1163 1171 for i in xrange(start, start+length):
1164 1172 patch = self.series[i]
1165 1173 if patch in applied:
1166 1174 stat = 'A'
1167 1175 elif self.pushable(i)[0]:
1168 1176 stat = 'U'
1169 1177 else:
1170 1178 stat = 'G'
1171 1179 pfx = ''
1172 1180 if self.ui.verbose:
1173 1181 pfx = '%d %s ' % (i, stat)
1174 1182 elif status and status != stat:
1175 1183 continue
1176 1184 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1177 1185 else:
1178 1186 msng_list = []
1179 1187 for root, dirs, files in os.walk(self.path):
1180 1188 d = root[len(self.path) + 1:]
1181 1189 for f in files:
1182 1190 fl = os.path.join(d, f)
1183 1191 if (fl not in self.series and
1184 1192 fl not in (self.status_path, self.series_path,
1185 1193 self.guards_path)
1186 1194 and not fl.startswith('.')):
1187 1195 msng_list.append(fl)
1188 1196 msng_list.sort()
1189 1197 for x in msng_list:
1190 1198 pfx = self.ui.verbose and ('D ') or ''
1191 1199 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1192 1200
1193 1201 def issaveline(self, l):
1194 1202 if l.name == '.hg.patches.save.line':
1195 1203 return True
1196 1204
1197 1205 def qrepo(self, create=False):
1198 1206 if create or os.path.isdir(self.join(".hg")):
1199 1207 return hg.repository(self.ui, path=self.path, create=create)
1200 1208
1201 1209 def restore(self, repo, rev, delete=None, qupdate=None):
1202 1210 c = repo.changelog.read(rev)
1203 1211 desc = c[4].strip()
1204 1212 lines = desc.splitlines()
1205 1213 i = 0
1206 1214 datastart = None
1207 1215 series = []
1208 1216 applied = []
1209 1217 qpp = None
1210 1218 for i in xrange(0, len(lines)):
1211 1219 if lines[i] == 'Patch Data:':
1212 1220 datastart = i + 1
1213 1221 elif lines[i].startswith('Dirstate:'):
1214 1222 l = lines[i].rstrip()
1215 1223 l = l[10:].split(' ')
1216 1224 qpp = [ hg.bin(x) for x in l ]
1217 1225 elif datastart != None:
1218 1226 l = lines[i].rstrip()
1219 1227 se = statusentry(l)
1220 1228 file_ = se.name
1221 1229 if se.rev:
1222 1230 applied.append(se)
1223 1231 else:
1224 1232 series.append(file_)
1225 1233 if datastart == None:
1226 1234 self.ui.warn("No saved patch data found\n")
1227 1235 return 1
1228 1236 self.ui.warn("restoring status: %s\n" % lines[0])
1229 1237 self.full_series = series
1230 1238 self.applied = applied
1231 1239 self.parse_series()
1232 1240 self.series_dirty = 1
1233 1241 self.applied_dirty = 1
1234 1242 heads = repo.changelog.heads()
1235 1243 if delete:
1236 1244 if rev not in heads:
1237 1245 self.ui.warn("save entry has children, leaving it alone\n")
1238 1246 else:
1239 1247 self.ui.warn("removing save entry %s\n" % hg.short(rev))
1240 1248 pp = repo.dirstate.parents()
1241 1249 if rev in pp:
1242 1250 update = True
1243 1251 else:
1244 1252 update = False
1245 1253 self.strip(repo, rev, update=update, backup='strip')
1246 1254 if qpp:
1247 1255 self.ui.warn("saved queue repository parents: %s %s\n" %
1248 1256 (hg.short(qpp[0]), hg.short(qpp[1])))
1249 1257 if qupdate:
1250 1258 print "queue directory updating"
1251 1259 r = self.qrepo()
1252 1260 if not r:
1253 1261 self.ui.warn("Unable to load queue repository\n")
1254 1262 return 1
1255 1263 hg.clean(r, qpp[0])
1256 1264
1257 1265 def save(self, repo, msg=None):
1258 1266 if len(self.applied) == 0:
1259 1267 self.ui.warn("save: no patches applied, exiting\n")
1260 1268 return 1
1261 1269 if self.issaveline(self.applied[-1]):
1262 1270 self.ui.warn("status is already saved\n")
1263 1271 return 1
1264 1272
1265 1273 ar = [ ':' + x for x in self.full_series ]
1266 1274 if not msg:
1267 1275 msg = "hg patches saved state"
1268 1276 else:
1269 1277 msg = "hg patches: " + msg.rstrip('\r\n')
1270 1278 r = self.qrepo()
1271 1279 if r:
1272 1280 pp = r.dirstate.parents()
1273 1281 msg += "\nDirstate: %s %s" % (hg.hex(pp[0]), hg.hex(pp[1]))
1274 1282 msg += "\n\nPatch Data:\n"
1275 1283 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1276 1284 "\n".join(ar) + '\n' or "")
1277 1285 n = repo.commit(None, text, user=None, force=1)
1278 1286 if not n:
1279 1287 self.ui.warn("repo commit failed\n")
1280 1288 return 1
1281 1289 self.applied.append(statusentry(revlog.hex(n),'.hg.patches.save.line'))
1282 1290 self.applied_dirty = 1
1283 1291 self.removeundo(repo)
1284 1292
1285 1293 def full_series_end(self):
1286 1294 if len(self.applied) > 0:
1287 1295 p = self.applied[-1].name
1288 1296 end = self.find_series(p)
1289 1297 if end == None:
1290 1298 return len(self.full_series)
1291 1299 return end + 1
1292 1300 return 0
1293 1301
1294 1302 def series_end(self, all_patches=False):
1295 1303 end = 0
1296 1304 def next(start):
1297 1305 if all_patches:
1298 1306 return start
1299 1307 i = start
1300 1308 while i < len(self.series):
1301 1309 p, reason = self.pushable(i)
1302 1310 if p:
1303 1311 break
1304 1312 self.explain_pushable(i)
1305 1313 i += 1
1306 1314 return i
1307 1315 if len(self.applied) > 0:
1308 1316 p = self.applied[-1].name
1309 1317 try:
1310 1318 end = self.series.index(p)
1311 1319 except ValueError:
1312 1320 return 0
1313 1321 return next(end + 1)
1314 1322 return next(end)
1315 1323
1316 1324 def appliedname(self, index):
1317 1325 pname = self.applied[index].name
1318 1326 if not self.ui.verbose:
1319 1327 p = pname
1320 1328 else:
1321 1329 p = str(self.series.index(pname)) + " " + pname
1322 1330 return p
1323 1331
1324 1332 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1325 1333 force=None, git=False):
1326 1334 def checkseries(patchname):
1327 1335 if patchname in self.series:
1328 1336 raise util.Abort(_('patch %s is already in the series file')
1329 1337 % patchname)
1330 1338 def checkfile(patchname):
1331 1339 if not force and os.path.exists(self.join(patchname)):
1332 1340 raise util.Abort(_('patch "%s" already exists')
1333 1341 % patchname)
1334 1342
1335 1343 if rev:
1336 1344 if files:
1337 1345 raise util.Abort(_('option "-r" not valid when importing '
1338 1346 'files'))
1339 1347 rev = cmdutil.revrange(repo, rev)
1340 1348 rev.sort(lambda x, y: cmp(y, x))
1341 1349 if (len(files) > 1 or len(rev) > 1) and patchname:
1342 1350 raise util.Abort(_('option "-n" not valid when importing multiple '
1343 1351 'patches'))
1344 1352 i = 0
1345 1353 added = []
1346 1354 if rev:
1347 1355 # If mq patches are applied, we can only import revisions
1348 1356 # that form a linear path to qbase.
1349 1357 # Otherwise, they should form a linear path to a head.
1350 1358 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1351 1359 if len(heads) > 1:
1352 1360 raise util.Abort(_('revision %d is the root of more than one '
1353 1361 'branch') % rev[-1])
1354 1362 if self.applied:
1355 1363 base = revlog.hex(repo.changelog.node(rev[0]))
1356 1364 if base in [n.rev for n in self.applied]:
1357 1365 raise util.Abort(_('revision %d is already managed')
1358 1366 % rev[0])
1359 1367 if heads != [revlog.bin(self.applied[-1].rev)]:
1360 1368 raise util.Abort(_('revision %d is not the parent of '
1361 1369 'the queue') % rev[0])
1362 1370 base = repo.changelog.rev(revlog.bin(self.applied[0].rev))
1363 1371 lastparent = repo.changelog.parentrevs(base)[0]
1364 1372 else:
1365 1373 if heads != [repo.changelog.node(rev[0])]:
1366 1374 raise util.Abort(_('revision %d has unmanaged children')
1367 1375 % rev[0])
1368 1376 lastparent = None
1369 1377
1370 1378 if git:
1371 1379 self.diffopts().git = True
1372 1380
1373 1381 for r in rev:
1374 1382 p1, p2 = repo.changelog.parentrevs(r)
1375 1383 n = repo.changelog.node(r)
1376 1384 if p2 != revlog.nullrev:
1377 1385 raise util.Abort(_('cannot import merge revision %d') % r)
1378 1386 if lastparent and lastparent != r:
1379 1387 raise util.Abort(_('revision %d is not the parent of %d')
1380 1388 % (r, lastparent))
1381 1389 lastparent = p1
1382 1390
1383 1391 if not patchname:
1384 1392 patchname = normname('%d.diff' % r)
1385 1393 checkseries(patchname)
1386 1394 checkfile(patchname)
1387 1395 self.full_series.insert(0, patchname)
1388 1396
1389 1397 patchf = self.opener(patchname, "w")
1390 1398 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1391 1399 patchf.close()
1392 1400
1393 1401 se = statusentry(revlog.hex(n), patchname)
1394 1402 self.applied.insert(0, se)
1395 1403
1396 1404 added.append(patchname)
1397 1405 patchname = None
1398 1406 self.parse_series()
1399 1407 self.applied_dirty = 1
1400 1408
1401 1409 for filename in files:
1402 1410 if existing:
1403 1411 if filename == '-':
1404 1412 raise util.Abort(_('-e is incompatible with import from -'))
1405 1413 if not patchname:
1406 1414 patchname = normname(filename)
1407 1415 if not os.path.isfile(self.join(patchname)):
1408 1416 raise util.Abort(_("patch %s does not exist") % patchname)
1409 1417 else:
1410 1418 try:
1411 1419 if filename == '-':
1412 1420 if not patchname:
1413 1421 raise util.Abort(_('need --name to import a patch from -'))
1414 1422 text = sys.stdin.read()
1415 1423 else:
1416 1424 text = file(filename).read()
1417 1425 except IOError:
1418 1426 raise util.Abort(_("unable to read %s") % patchname)
1419 1427 if not patchname:
1420 1428 patchname = normname(os.path.basename(filename))
1421 1429 checkfile(patchname)
1422 1430 patchf = self.opener(patchname, "w")
1423 1431 patchf.write(text)
1424 1432 checkseries(patchname)
1425 1433 index = self.full_series_end() + i
1426 1434 self.full_series[index:index] = [patchname]
1427 1435 self.parse_series()
1428 1436 self.ui.warn("adding %s to series file\n" % patchname)
1429 1437 i += 1
1430 1438 added.append(patchname)
1431 1439 patchname = None
1432 1440 self.series_dirty = 1
1433 1441 qrepo = self.qrepo()
1434 1442 if qrepo:
1435 1443 qrepo.add(added)
1436 1444
1437 1445 def delete(ui, repo, *patches, **opts):
1438 1446 """remove patches from queue
1439 1447
1440 1448 With --rev, mq will stop managing the named revisions. The
1441 1449 patches must be applied and at the base of the stack. This option
1442 1450 is useful when the patches have been applied upstream.
1443 1451
1444 1452 Otherwise, the patches must not be applied.
1445 1453
1446 1454 With --keep, the patch files are preserved in the patch directory."""
1447 1455 q = repo.mq
1448 1456 q.delete(repo, patches, opts)
1449 1457 q.save_dirty()
1450 1458 return 0
1451 1459
1452 1460 def applied(ui, repo, patch=None, **opts):
1453 1461 """print the patches already applied"""
1454 1462 q = repo.mq
1455 1463 if patch:
1456 1464 if patch not in q.series:
1457 1465 raise util.Abort(_("patch %s is not in series file") % patch)
1458 1466 end = q.series.index(patch) + 1
1459 1467 else:
1460 1468 end = q.series_end(True)
1461 1469 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1462 1470
1463 1471 def unapplied(ui, repo, patch=None, **opts):
1464 1472 """print the patches not yet applied"""
1465 1473 q = repo.mq
1466 1474 if patch:
1467 1475 if patch not in q.series:
1468 1476 raise util.Abort(_("patch %s is not in series file") % patch)
1469 1477 start = q.series.index(patch) + 1
1470 1478 else:
1471 1479 start = q.series_end(True)
1472 1480 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1473 1481
1474 1482 def qimport(ui, repo, *filename, **opts):
1475 1483 """import a patch
1476 1484
1477 1485 The patch will have the same name as its source file unless you
1478 1486 give it a new one with --name.
1479 1487
1480 1488 You can register an existing patch inside the patch directory
1481 1489 with the --existing flag.
1482 1490
1483 1491 With --force, an existing patch of the same name will be overwritten.
1484 1492
1485 1493 An existing changeset may be placed under mq control with --rev
1486 1494 (e.g. qimport --rev tip -n patch will place tip under mq control).
1487 1495 With --git, patches imported with --rev will use the git diff
1488 1496 format.
1489 1497 """
1490 1498 q = repo.mq
1491 1499 q.qimport(repo, filename, patchname=opts['name'],
1492 1500 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1493 1501 git=opts['git'])
1494 1502 q.save_dirty()
1495 1503 return 0
1496 1504
1497 1505 def init(ui, repo, **opts):
1498 1506 """init a new queue repository
1499 1507
1500 1508 The queue repository is unversioned by default. If -c is
1501 1509 specified, qinit will create a separate nested repository
1502 1510 for patches. Use qcommit to commit changes to this queue
1503 1511 repository."""
1504 1512 q = repo.mq
1505 1513 r = q.init(repo, create=opts['create_repo'])
1506 1514 q.save_dirty()
1507 1515 if r:
1508 1516 if not os.path.exists(r.wjoin('.hgignore')):
1509 1517 fp = r.wopener('.hgignore', 'w')
1510 1518 fp.write('syntax: glob\n')
1511 1519 fp.write('status\n')
1512 1520 fp.write('guards\n')
1513 1521 fp.close()
1514 1522 if not os.path.exists(r.wjoin('series')):
1515 1523 r.wopener('series', 'w').close()
1516 1524 r.add(['.hgignore', 'series'])
1517 1525 commands.add(ui, r)
1518 1526 return 0
1519 1527
1520 1528 def clone(ui, source, dest=None, **opts):
1521 1529 '''clone main and patch repository at same time
1522 1530
1523 1531 If source is local, destination will have no patches applied. If
1524 1532 source is remote, this command can not check if patches are
1525 1533 applied in source, so cannot guarantee that patches are not
1526 1534 applied in destination. If you clone remote repository, be sure
1527 1535 before that it has no patches applied.
1528 1536
1529 1537 Source patch repository is looked for in <src>/.hg/patches by
1530 1538 default. Use -p <url> to change.
1531 1539 '''
1532 1540 commands.setremoteconfig(ui, opts)
1533 1541 if dest is None:
1534 1542 dest = hg.defaultdest(source)
1535 1543 sr = hg.repository(ui, ui.expandpath(source))
1536 1544 qbase, destrev = None, None
1537 1545 if sr.local():
1538 1546 if sr.mq.applied:
1539 1547 qbase = revlog.bin(sr.mq.applied[0].rev)
1540 1548 if not hg.islocal(dest):
1541 1549 heads = dict.fromkeys(sr.heads())
1542 1550 for h in sr.heads(qbase):
1543 1551 del heads[h]
1544 1552 destrev = heads.keys()
1545 1553 destrev.append(sr.changelog.parents(qbase)[0])
1546 1554 ui.note(_('cloning main repo\n'))
1547 1555 sr, dr = hg.clone(ui, sr, dest,
1548 1556 pull=opts['pull'],
1549 1557 rev=destrev,
1550 1558 update=False,
1551 1559 stream=opts['uncompressed'])
1552 1560 ui.note(_('cloning patch repo\n'))
1553 1561 spr, dpr = hg.clone(ui, opts['patches'] or (sr.url() + '/.hg/patches'),
1554 1562 dr.url() + '/.hg/patches',
1555 1563 pull=opts['pull'],
1556 1564 update=not opts['noupdate'],
1557 1565 stream=opts['uncompressed'])
1558 1566 if dr.local():
1559 1567 if qbase:
1560 1568 ui.note(_('stripping applied patches from destination repo\n'))
1561 1569 dr.mq.strip(dr, qbase, update=False, backup=None)
1562 1570 if not opts['noupdate']:
1563 1571 ui.note(_('updating destination repo\n'))
1564 1572 hg.update(dr, dr.changelog.tip())
1565 1573
1566 1574 def commit(ui, repo, *pats, **opts):
1567 1575 """commit changes in the queue repository"""
1568 1576 q = repo.mq
1569 1577 r = q.qrepo()
1570 1578 if not r: raise util.Abort('no queue repository')
1571 1579 commands.commit(r.ui, r, *pats, **opts)
1572 1580
1573 1581 def series(ui, repo, **opts):
1574 1582 """print the entire series file"""
1575 1583 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1576 1584 return 0
1577 1585
1578 1586 def top(ui, repo, **opts):
1579 1587 """print the name of the current patch"""
1580 1588 q = repo.mq
1581 1589 t = len(q.applied)
1582 1590 if t:
1583 1591 return q.qseries(repo, start=t-1, length=1, status='A',
1584 1592 summary=opts.get('summary'))
1585 1593 else:
1586 1594 ui.write("No patches applied\n")
1587 1595 return 1
1588 1596
1589 1597 def next(ui, repo, **opts):
1590 1598 """print the name of the next patch"""
1591 1599 q = repo.mq
1592 1600 end = q.series_end()
1593 1601 if end == len(q.series):
1594 1602 ui.write("All patches applied\n")
1595 1603 return 1
1596 1604 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1597 1605
1598 1606 def prev(ui, repo, **opts):
1599 1607 """print the name of the previous patch"""
1600 1608 q = repo.mq
1601 1609 l = len(q.applied)
1602 1610 if l == 1:
1603 1611 ui.write("Only one patch applied\n")
1604 1612 return 1
1605 1613 if not l:
1606 1614 ui.write("No patches applied\n")
1607 1615 return 1
1608 1616 return q.qseries(repo, start=l-2, length=1, status='A',
1609 1617 summary=opts.get('summary'))
1610 1618
1611 1619 def new(ui, repo, patch, **opts):
1612 1620 """create a new patch
1613 1621
1614 1622 qnew creates a new patch on top of the currently-applied patch
1615 1623 (if any). It will refuse to run if there are any outstanding
1616 1624 changes unless -f is specified, in which case the patch will
1617 1625 be initialised with them.
1618 1626
1619 1627 -e, -m or -l set the patch header as well as the commit message.
1620 1628 If none is specified, the patch header is empty and the
1621 1629 commit message is 'New patch: PATCH'"""
1622 1630 q = repo.mq
1623 1631 message = commands.logmessage(opts)
1624 1632 if opts['edit']:
1625 1633 message = ui.edit(message, ui.username())
1626 1634 q.new(repo, patch, msg=message, force=opts['force'])
1627 1635 q.save_dirty()
1628 1636 return 0
1629 1637
1630 1638 def refresh(ui, repo, *pats, **opts):
1631 1639 """update the current patch
1632 1640
1633 1641 If any file patterns are provided, the refreshed patch will contain only
1634 1642 the modifications that match those patterns; the remaining modifications
1635 1643 will remain in the working directory.
1636 1644
1637 1645 hg add/remove/copy/rename work as usual, though you might want to use
1638 1646 git-style patches (--git or [diff] git=1) to track copies and renames.
1639 1647 """
1640 1648 q = repo.mq
1641 1649 message = commands.logmessage(opts)
1642 1650 if opts['edit']:
1643 1651 if message:
1644 1652 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1645 1653 patch = q.applied[-1].name
1646 1654 (message, comment, user, date, hasdiff) = q.readheaders(patch)
1647 1655 message = ui.edit('\n'.join(message), user or ui.username())
1648 1656 ret = q.refresh(repo, pats, msg=message, **opts)
1649 1657 q.save_dirty()
1650 1658 return ret
1651 1659
1652 1660 def diff(ui, repo, *pats, **opts):
1653 1661 """diff of the current patch"""
1654 1662 repo.mq.diff(repo, pats, opts)
1655 1663 return 0
1656 1664
1657 1665 def fold(ui, repo, *files, **opts):
1658 1666 """fold the named patches into the current patch
1659 1667
1660 1668 Patches must not yet be applied. Each patch will be successively
1661 1669 applied to the current patch in the order given. If all the
1662 1670 patches apply successfully, the current patch will be refreshed
1663 1671 with the new cumulative patch, and the folded patches will
1664 1672 be deleted. With -k/--keep, the folded patch files will not
1665 1673 be removed afterwards.
1666 1674
1667 1675 The header for each folded patch will be concatenated with
1668 1676 the current patch header, separated by a line of '* * *'."""
1669 1677
1670 1678 q = repo.mq
1671 1679
1672 1680 if not files:
1673 1681 raise util.Abort(_('qfold requires at least one patch name'))
1674 1682 if not q.check_toppatch(repo):
1675 1683 raise util.Abort(_('No patches applied'))
1676 1684
1677 1685 message = commands.logmessage(opts)
1678 1686 if opts['edit']:
1679 1687 if message:
1680 1688 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1681 1689
1682 1690 parent = q.lookup('qtip')
1683 1691 patches = []
1684 1692 messages = []
1685 1693 for f in files:
1686 1694 p = q.lookup(f)
1687 1695 if p in patches or p == parent:
1688 1696 ui.warn(_('Skipping already folded patch %s') % p)
1689 1697 if q.isapplied(p):
1690 1698 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1691 1699 patches.append(p)
1692 1700
1693 1701 for p in patches:
1694 1702 if not message:
1695 1703 messages.append(q.readheaders(p)[0])
1696 1704 pf = q.join(p)
1697 1705 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1698 1706 if not patchsuccess:
1699 1707 raise util.Abort(_('Error folding patch %s') % p)
1700 1708 patch.updatedir(ui, repo, files)
1701 1709
1702 1710 if not message:
1703 1711 message, comments, user = q.readheaders(parent)[0:3]
1704 1712 for msg in messages:
1705 1713 message.append('* * *')
1706 1714 message.extend(msg)
1707 1715 message = '\n'.join(message)
1708 1716
1709 1717 if opts['edit']:
1710 1718 message = ui.edit(message, user or ui.username())
1711 1719
1712 1720 q.refresh(repo, msg=message)
1713 1721 q.delete(repo, patches, opts)
1714 1722 q.save_dirty()
1715 1723
1716 1724 def guard(ui, repo, *args, **opts):
1717 1725 '''set or print guards for a patch
1718 1726
1719 1727 Guards control whether a patch can be pushed. A patch with no
1720 1728 guards is always pushed. A patch with a positive guard ("+foo") is
1721 1729 pushed only if the qselect command has activated it. A patch with
1722 1730 a negative guard ("-foo") is never pushed if the qselect command
1723 1731 has activated it.
1724 1732
1725 1733 With no arguments, print the currently active guards.
1726 1734 With arguments, set guards for the named patch.
1727 1735
1728 1736 To set a negative guard "-foo" on topmost patch ("--" is needed so
1729 1737 hg will not interpret "-foo" as an option):
1730 1738 hg qguard -- -foo
1731 1739
1732 1740 To set guards on another patch:
1733 1741 hg qguard other.patch +2.6.17 -stable
1734 1742 '''
1735 1743 def status(idx):
1736 1744 guards = q.series_guards[idx] or ['unguarded']
1737 1745 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
1738 1746 q = repo.mq
1739 1747 patch = None
1740 1748 args = list(args)
1741 1749 if opts['list']:
1742 1750 if args or opts['none']:
1743 1751 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
1744 1752 for i in xrange(len(q.series)):
1745 1753 status(i)
1746 1754 return
1747 1755 if not args or args[0][0:1] in '-+':
1748 1756 if not q.applied:
1749 1757 raise util.Abort(_('no patches applied'))
1750 1758 patch = q.applied[-1].name
1751 1759 if patch is None and args[0][0:1] not in '-+':
1752 1760 patch = args.pop(0)
1753 1761 if patch is None:
1754 1762 raise util.Abort(_('no patch to work with'))
1755 1763 if args or opts['none']:
1756 1764 idx = q.find_series(patch)
1757 1765 if idx is None:
1758 1766 raise util.Abort(_('no patch named %s') % patch)
1759 1767 q.set_guards(idx, args)
1760 1768 q.save_dirty()
1761 1769 else:
1762 1770 status(q.series.index(q.lookup(patch)))
1763 1771
1764 1772 def header(ui, repo, patch=None):
1765 1773 """Print the header of the topmost or specified patch"""
1766 1774 q = repo.mq
1767 1775
1768 1776 if patch:
1769 1777 patch = q.lookup(patch)
1770 1778 else:
1771 1779 if not q.applied:
1772 1780 ui.write('No patches applied\n')
1773 1781 return 1
1774 1782 patch = q.lookup('qtip')
1775 1783 message = repo.mq.readheaders(patch)[0]
1776 1784
1777 1785 ui.write('\n'.join(message) + '\n')
1778 1786
1779 1787 def lastsavename(path):
1780 1788 (directory, base) = os.path.split(path)
1781 1789 names = os.listdir(directory)
1782 1790 namere = re.compile("%s.([0-9]+)" % base)
1783 1791 maxindex = None
1784 1792 maxname = None
1785 1793 for f in names:
1786 1794 m = namere.match(f)
1787 1795 if m:
1788 1796 index = int(m.group(1))
1789 1797 if maxindex == None or index > maxindex:
1790 1798 maxindex = index
1791 1799 maxname = f
1792 1800 if maxname:
1793 1801 return (os.path.join(directory, maxname), maxindex)
1794 1802 return (None, None)
1795 1803
1796 1804 def savename(path):
1797 1805 (last, index) = lastsavename(path)
1798 1806 if last is None:
1799 1807 index = 0
1800 1808 newpath = path + ".%d" % (index + 1)
1801 1809 return newpath
1802 1810
1803 1811 def push(ui, repo, patch=None, **opts):
1804 1812 """push the next patch onto the stack"""
1805 1813 q = repo.mq
1806 1814 mergeq = None
1807 1815
1808 1816 if opts['all']:
1809 1817 if not q.series:
1810 1818 ui.warn(_('no patches in series\n'))
1811 1819 return 0
1812 1820 patch = q.series[-1]
1813 1821 if opts['merge']:
1814 1822 if opts['name']:
1815 1823 newpath = opts['name']
1816 1824 else:
1817 1825 newpath, i = lastsavename(q.path)
1818 1826 if not newpath:
1819 1827 ui.warn("no saved queues found, please use -n\n")
1820 1828 return 1
1821 1829 mergeq = queue(ui, repo.join(""), newpath)
1822 1830 ui.warn("merging with queue at: %s\n" % mergeq.path)
1823 1831 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
1824 1832 mergeq=mergeq)
1825 1833 q.save_dirty()
1826 1834 return ret
1827 1835
1828 1836 def pop(ui, repo, patch=None, **opts):
1829 1837 """pop the current patch off the stack"""
1830 1838 localupdate = True
1831 1839 if opts['name']:
1832 1840 q = queue(ui, repo.join(""), repo.join(opts['name']))
1833 1841 ui.warn('using patch queue: %s\n' % q.path)
1834 1842 localupdate = False
1835 1843 else:
1836 1844 q = repo.mq
1837 1845 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
1838 1846 all=opts['all'])
1839 1847 q.save_dirty()
1840 1848 return ret
1841 1849
1842 1850 def rename(ui, repo, patch, name=None, **opts):
1843 1851 """rename a patch
1844 1852
1845 1853 With one argument, renames the current patch to PATCH1.
1846 1854 With two arguments, renames PATCH1 to PATCH2."""
1847 1855
1848 1856 q = repo.mq
1849 1857
1850 1858 if not name:
1851 1859 name = patch
1852 1860 patch = None
1853 1861
1854 1862 if patch:
1855 1863 patch = q.lookup(patch)
1856 1864 else:
1857 1865 if not q.applied:
1858 1866 ui.write(_('No patches applied\n'))
1859 1867 return
1860 1868 patch = q.lookup('qtip')
1861 1869 absdest = q.join(name)
1862 1870 if os.path.isdir(absdest):
1863 1871 name = normname(os.path.join(name, os.path.basename(patch)))
1864 1872 absdest = q.join(name)
1865 1873 if os.path.exists(absdest):
1866 1874 raise util.Abort(_('%s already exists') % absdest)
1867 1875
1868 1876 if name in q.series:
1869 1877 raise util.Abort(_('A patch named %s already exists in the series file') % name)
1870 1878
1871 1879 if ui.verbose:
1872 1880 ui.write('Renaming %s to %s\n' % (patch, name))
1873 1881 i = q.find_series(patch)
1874 1882 guards = q.guard_re.findall(q.full_series[i])
1875 1883 q.full_series[i] = name + ''.join([' #' + g for g in guards])
1876 1884 q.parse_series()
1877 1885 q.series_dirty = 1
1878 1886
1879 1887 info = q.isapplied(patch)
1880 1888 if info:
1881 1889 q.applied[info[0]] = statusentry(info[1], name)
1882 1890 q.applied_dirty = 1
1883 1891
1884 1892 util.rename(q.join(patch), absdest)
1885 1893 r = q.qrepo()
1886 1894 if r:
1887 1895 wlock = r.wlock()
1888 1896 if r.dirstate.state(name) == 'r':
1889 1897 r.undelete([name], wlock)
1890 1898 r.copy(patch, name, wlock)
1891 1899 r.remove([patch], False, wlock)
1892 1900
1893 1901 q.save_dirty()
1894 1902
1895 1903 def restore(ui, repo, rev, **opts):
1896 1904 """restore the queue state saved by a rev"""
1897 1905 rev = repo.lookup(rev)
1898 1906 q = repo.mq
1899 1907 q.restore(repo, rev, delete=opts['delete'],
1900 1908 qupdate=opts['update'])
1901 1909 q.save_dirty()
1902 1910 return 0
1903 1911
1904 1912 def save(ui, repo, **opts):
1905 1913 """save current queue state"""
1906 1914 q = repo.mq
1907 1915 message = commands.logmessage(opts)
1908 1916 ret = q.save(repo, msg=message)
1909 1917 if ret:
1910 1918 return ret
1911 1919 q.save_dirty()
1912 1920 if opts['copy']:
1913 1921 path = q.path
1914 1922 if opts['name']:
1915 1923 newpath = os.path.join(q.basepath, opts['name'])
1916 1924 if os.path.exists(newpath):
1917 1925 if not os.path.isdir(newpath):
1918 1926 raise util.Abort(_('destination %s exists and is not '
1919 1927 'a directory') % newpath)
1920 1928 if not opts['force']:
1921 1929 raise util.Abort(_('destination %s exists, '
1922 1930 'use -f to force') % newpath)
1923 1931 else:
1924 1932 newpath = savename(path)
1925 1933 ui.warn("copy %s to %s\n" % (path, newpath))
1926 1934 util.copyfiles(path, newpath)
1927 1935 if opts['empty']:
1928 1936 try:
1929 1937 os.unlink(q.join(q.status_path))
1930 1938 except:
1931 1939 pass
1932 1940 return 0
1933 1941
1934 1942 def strip(ui, repo, rev, **opts):
1935 1943 """strip a revision and all later revs on the same branch"""
1936 1944 rev = repo.lookup(rev)
1937 1945 backup = 'all'
1938 1946 if opts['backup']:
1939 1947 backup = 'strip'
1940 1948 elif opts['nobackup']:
1941 1949 backup = 'none'
1942 1950 update = repo.dirstate.parents()[0] != revlog.nullid
1943 1951 repo.mq.strip(repo, rev, backup=backup, update=update)
1944 1952 return 0
1945 1953
1946 1954 def select(ui, repo, *args, **opts):
1947 1955 '''set or print guarded patches to push
1948 1956
1949 1957 Use the qguard command to set or print guards on patch, then use
1950 1958 qselect to tell mq which guards to use. A patch will be pushed if it
1951 1959 has no guards or any positive guards match the currently selected guard,
1952 1960 but will not be pushed if any negative guards match the current guard.
1953 1961 For example:
1954 1962
1955 1963 qguard foo.patch -stable (negative guard)
1956 1964 qguard bar.patch +stable (positive guard)
1957 1965 qselect stable
1958 1966
1959 1967 This activates the "stable" guard. mq will skip foo.patch (because
1960 1968 it has a negative match) but push bar.patch (because it
1961 1969 has a positive match).
1962 1970
1963 1971 With no arguments, prints the currently active guards.
1964 1972 With one argument, sets the active guard.
1965 1973
1966 1974 Use -n/--none to deactivate guards (no other arguments needed).
1967 1975 When no guards are active, patches with positive guards are skipped
1968 1976 and patches with negative guards are pushed.
1969 1977
1970 1978 qselect can change the guards on applied patches. It does not pop
1971 1979 guarded patches by default. Use --pop to pop back to the last applied
1972 1980 patch that is not guarded. Use --reapply (which implies --pop) to push
1973 1981 back to the current patch afterwards, but skip guarded patches.
1974 1982
1975 1983 Use -s/--series to print a list of all guards in the series file (no
1976 1984 other arguments needed). Use -v for more information.'''
1977 1985
1978 1986 q = repo.mq
1979 1987 guards = q.active()
1980 1988 if args or opts['none']:
1981 1989 old_unapplied = q.unapplied(repo)
1982 1990 old_guarded = [i for i in xrange(len(q.applied)) if
1983 1991 not q.pushable(i)[0]]
1984 1992 q.set_active(args)
1985 1993 q.save_dirty()
1986 1994 if not args:
1987 1995 ui.status(_('guards deactivated\n'))
1988 1996 if not opts['pop'] and not opts['reapply']:
1989 1997 unapplied = q.unapplied(repo)
1990 1998 guarded = [i for i in xrange(len(q.applied))
1991 1999 if not q.pushable(i)[0]]
1992 2000 if len(unapplied) != len(old_unapplied):
1993 2001 ui.status(_('number of unguarded, unapplied patches has '
1994 2002 'changed from %d to %d\n') %
1995 2003 (len(old_unapplied), len(unapplied)))
1996 2004 if len(guarded) != len(old_guarded):
1997 2005 ui.status(_('number of guarded, applied patches has changed '
1998 2006 'from %d to %d\n') %
1999 2007 (len(old_guarded), len(guarded)))
2000 2008 elif opts['series']:
2001 2009 guards = {}
2002 2010 noguards = 0
2003 2011 for gs in q.series_guards:
2004 2012 if not gs:
2005 2013 noguards += 1
2006 2014 for g in gs:
2007 2015 guards.setdefault(g, 0)
2008 2016 guards[g] += 1
2009 2017 if ui.verbose:
2010 2018 guards['NONE'] = noguards
2011 2019 guards = guards.items()
2012 2020 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2013 2021 if guards:
2014 2022 ui.note(_('guards in series file:\n'))
2015 2023 for guard, count in guards:
2016 2024 ui.note('%2d ' % count)
2017 2025 ui.write(guard, '\n')
2018 2026 else:
2019 2027 ui.note(_('no guards in series file\n'))
2020 2028 else:
2021 2029 if guards:
2022 2030 ui.note(_('active guards:\n'))
2023 2031 for g in guards:
2024 2032 ui.write(g, '\n')
2025 2033 else:
2026 2034 ui.write(_('no active guards\n'))
2027 2035 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2028 2036 popped = False
2029 2037 if opts['pop'] or opts['reapply']:
2030 2038 for i in xrange(len(q.applied)):
2031 2039 pushable, reason = q.pushable(i)
2032 2040 if not pushable:
2033 2041 ui.status(_('popping guarded patches\n'))
2034 2042 popped = True
2035 2043 if i == 0:
2036 2044 q.pop(repo, all=True)
2037 2045 else:
2038 2046 q.pop(repo, i-1)
2039 2047 break
2040 2048 if popped:
2041 2049 try:
2042 2050 if reapply:
2043 2051 ui.status(_('reapplying unguarded patches\n'))
2044 2052 q.push(repo, reapply)
2045 2053 finally:
2046 2054 q.save_dirty()
2047 2055
2048 2056 def reposetup(ui, repo):
2049 2057 class mqrepo(repo.__class__):
2050 2058 def abort_if_wdir_patched(self, errmsg, force=False):
2051 2059 if self.mq.applied and not force:
2052 2060 parent = revlog.hex(self.dirstate.parents()[0])
2053 2061 if parent in [s.rev for s in self.mq.applied]:
2054 2062 raise util.Abort(errmsg)
2055 2063
2056 2064 def commit(self, *args, **opts):
2057 2065 if len(args) >= 6:
2058 2066 force = args[5]
2059 2067 else:
2060 2068 force = opts.get('force')
2061 2069 self.abort_if_wdir_patched(
2062 2070 _('cannot commit over an applied mq patch'),
2063 2071 force)
2064 2072
2065 2073 return super(mqrepo, self).commit(*args, **opts)
2066 2074
2067 2075 def push(self, remote, force=False, revs=None):
2068 2076 if self.mq.applied and not force and not revs:
2069 2077 raise util.Abort(_('source has mq patches applied'))
2070 2078 return super(mqrepo, self).push(remote, force, revs)
2071 2079
2072 2080 def tags(self):
2073 2081 if self.tagscache:
2074 2082 return self.tagscache
2075 2083
2076 2084 tagscache = super(mqrepo, self).tags()
2077 2085
2078 2086 q = self.mq
2079 2087 if not q.applied:
2080 2088 return tagscache
2081 2089
2082 2090 mqtags = [(revlog.bin(patch.rev), patch.name) for patch in q.applied]
2083 2091 mqtags.append((mqtags[-1][0], 'qtip'))
2084 2092 mqtags.append((mqtags[0][0], 'qbase'))
2085 2093 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2086 2094 for patch in mqtags:
2087 2095 if patch[1] in tagscache:
2088 2096 self.ui.warn('Tag %s overrides mq patch of the same name\n' % patch[1])
2089 2097 else:
2090 2098 tagscache[patch[1]] = patch[0]
2091 2099
2092 2100 return tagscache
2093 2101
2094 2102 def _branchtags(self):
2095 2103 q = self.mq
2096 2104 if not q.applied:
2097 2105 return super(mqrepo, self)._branchtags()
2098 2106
2099 2107 self.branchcache = {} # avoid recursion in changectx
2100 2108 cl = self.changelog
2101 2109 partial, last, lrev = self._readbranchcache()
2102 2110
2103 2111 qbase = cl.rev(revlog.bin(q.applied[0].rev))
2104 2112 start = lrev + 1
2105 2113 if start < qbase:
2106 2114 # update the cache (excluding the patches) and save it
2107 2115 self._updatebranchcache(partial, lrev+1, qbase)
2108 2116 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2109 2117 start = qbase
2110 2118 # if start = qbase, the cache is as updated as it should be.
2111 2119 # if start > qbase, the cache includes (part of) the patches.
2112 2120 # we might as well use it, but we won't save it.
2113 2121
2114 2122 # update the cache up to the tip
2115 2123 self._updatebranchcache(partial, start, cl.count())
2116 2124
2117 2125 return partial
2118 2126
2119 2127 if repo.local():
2120 2128 repo.__class__ = mqrepo
2121 2129 repo.mq = queue(ui, repo.join(""))
2122 2130
2123 2131 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2124 2132
2125 2133 cmdtable = {
2126 2134 "qapplied": (applied, [] + seriesopts, 'hg qapplied [-s] [PATCH]'),
2127 2135 "qclone": (clone,
2128 2136 [('', 'pull', None, _('use pull protocol to copy metadata')),
2129 2137 ('U', 'noupdate', None, _('do not update the new working directories')),
2130 2138 ('', 'uncompressed', None,
2131 2139 _('use uncompressed transfer (fast over LAN)')),
2132 2140 ('e', 'ssh', '', _('specify ssh command to use')),
2133 2141 ('p', 'patches', '', _('location of source patch repo')),
2134 2142 ('', 'remotecmd', '',
2135 2143 _('specify hg command to run on the remote side'))],
2136 2144 'hg qclone [OPTION]... SOURCE [DEST]'),
2137 2145 "qcommit|qci":
2138 2146 (commit,
2139 2147 commands.table["^commit|ci"][1],
2140 2148 'hg qcommit [OPTION]... [FILE]...'),
2141 2149 "^qdiff": (diff,
2142 2150 [('g', 'git', None, _('use git extended diff format')),
2143 2151 ('I', 'include', [], _('include names matching the given patterns')),
2144 2152 ('X', 'exclude', [], _('exclude names matching the given patterns'))],
2145 2153 'hg qdiff [-I] [-X] [FILE]...'),
2146 2154 "qdelete|qremove|qrm":
2147 2155 (delete,
2148 2156 [('k', 'keep', None, _('keep patch file')),
2149 2157 ('r', 'rev', [], _('stop managing a revision'))],
2150 2158 'hg qdelete [-k] [-r REV]... PATCH...'),
2151 2159 'qfold':
2152 2160 (fold,
2153 2161 [('e', 'edit', None, _('edit patch header')),
2154 2162 ('k', 'keep', None, _('keep folded patch files'))
2155 2163 ] + commands.commitopts,
2156 2164 'hg qfold [-e] [-m <text>] [-l <file] PATCH...'),
2157 2165 'qguard': (guard, [('l', 'list', None, _('list all patches and guards')),
2158 2166 ('n', 'none', None, _('drop all guards'))],
2159 2167 'hg qguard [PATCH] [+GUARD]... [-GUARD]...'),
2160 2168 'qheader': (header, [],
2161 2169 _('hg qheader [PATCH]')),
2162 2170 "^qimport":
2163 2171 (qimport,
2164 2172 [('e', 'existing', None, 'import file in patch dir'),
2165 2173 ('n', 'name', '', 'patch file name'),
2166 2174 ('f', 'force', None, 'overwrite existing files'),
2167 2175 ('r', 'rev', [], 'place existing revisions under mq control'),
2168 2176 ('g', 'git', None, _('use git extended diff format'))],
2169 2177 'hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...'),
2170 2178 "^qinit":
2171 2179 (init,
2172 2180 [('c', 'create-repo', None, 'create queue repository')],
2173 2181 'hg qinit [-c]'),
2174 2182 "qnew":
2175 2183 (new,
2176 2184 [('e', 'edit', None, _('edit commit message')),
2177 2185 ('f', 'force', None, _('import uncommitted changes into patch'))
2178 2186 ] + commands.commitopts,
2179 2187 'hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH'),
2180 2188 "qnext": (next, [] + seriesopts, 'hg qnext [-s]'),
2181 2189 "qprev": (prev, [] + seriesopts, 'hg qprev [-s]'),
2182 2190 "^qpop":
2183 2191 (pop,
2184 2192 [('a', 'all', None, 'pop all patches'),
2185 2193 ('n', 'name', '', 'queue name to pop'),
2186 2194 ('f', 'force', None, 'forget any local changes')],
2187 2195 'hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]'),
2188 2196 "^qpush":
2189 2197 (push,
2190 2198 [('f', 'force', None, 'apply if the patch has rejects'),
2191 2199 ('l', 'list', None, 'list patch name in commit text'),
2192 2200 ('a', 'all', None, 'apply all patches'),
2193 2201 ('m', 'merge', None, 'merge from another queue'),
2194 2202 ('n', 'name', '', 'merge queue name')],
2195 2203 'hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]'),
2196 2204 "^qrefresh":
2197 2205 (refresh,
2198 2206 [('e', 'edit', None, _('edit commit message')),
2199 2207 ('g', 'git', None, _('use git extended diff format')),
2200 2208 ('s', 'short', None, 'refresh only files already in the patch'),
2201 2209 ('I', 'include', [], _('include names matching the given patterns')),
2202 2210 ('X', 'exclude', [], _('exclude names matching the given patterns'))
2203 2211 ] + commands.commitopts,
2204 2212 'hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'),
2205 2213 'qrename|qmv':
2206 2214 (rename, [], 'hg qrename PATCH1 [PATCH2]'),
2207 2215 "qrestore":
2208 2216 (restore,
2209 2217 [('d', 'delete', None, 'delete save entry'),
2210 2218 ('u', 'update', None, 'update queue working dir')],
2211 2219 'hg qrestore [-d] [-u] REV'),
2212 2220 "qsave":
2213 2221 (save,
2214 2222 [('c', 'copy', None, 'copy patch directory'),
2215 2223 ('n', 'name', '', 'copy directory name'),
2216 2224 ('e', 'empty', None, 'clear queue status file'),
2217 2225 ('f', 'force', None, 'force copy')] + commands.commitopts,
2218 2226 'hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'),
2219 2227 "qselect": (select,
2220 2228 [('n', 'none', None, _('disable all guards')),
2221 2229 ('s', 'series', None, _('list all guards in series file')),
2222 2230 ('', 'pop', None,
2223 2231 _('pop to before first guarded applied patch')),
2224 2232 ('', 'reapply', None, _('pop, then reapply patches'))],
2225 2233 'hg qselect [OPTION]... [GUARD]...'),
2226 2234 "qseries":
2227 2235 (series,
2228 2236 [('m', 'missing', None, 'print patches not in series')] + seriesopts,
2229 2237 'hg qseries [-ms]'),
2230 2238 "^strip":
2231 2239 (strip,
2232 2240 [('f', 'force', None, 'force multi-head removal'),
2233 2241 ('b', 'backup', None, 'bundle unrelated changesets'),
2234 2242 ('n', 'nobackup', None, 'no backups')],
2235 2243 'hg strip [-f] [-b] [-n] REV'),
2236 2244 "qtop": (top, [] + seriesopts, 'hg qtop [-s]'),
2237 2245 "qunapplied": (unapplied, [] + seriesopts, 'hg qunapplied [-s] [PATCH]'),
2238 2246 }
@@ -1,554 +1,555
1 1 """
2 2 dirstate.py - working directory tracking for mercurial
3 3
4 4 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
5 5
6 6 This software may be used and distributed according to the terms
7 7 of the GNU General Public License, incorporated herein by reference.
8 8 """
9 9
10 10 from node import *
11 11 from i18n import _
12 12 import struct, os, time, bisect, stat, strutil, util, re, errno
13 13
14 14 class dirstate(object):
15 15 format = ">cllll"
16 16
17 17 def __init__(self, opener, ui, root):
18 18 self.opener = opener
19 19 self.root = root
20 20 self.dirty = 0
21 21 self.ui = ui
22 22 self.map = None
23 23 self.pl = None
24 24 self.dirs = None
25 25 self.copymap = {}
26 26 self.ignorefunc = None
27 27 self._branch = None
28 28
29 29 def wjoin(self, f):
30 30 return os.path.join(self.root, f)
31 31
32 32 def getcwd(self):
33 33 cwd = os.getcwd()
34 34 if cwd == self.root: return ''
35 35 # self.root ends with a path separator if self.root is '/' or 'C:\'
36 36 rootsep = self.root
37 37 if not rootsep.endswith(os.sep):
38 38 rootsep += os.sep
39 39 if cwd.startswith(rootsep):
40 40 return cwd[len(rootsep):]
41 41 else:
42 42 # we're outside the repo. return an absolute path.
43 43 return cwd
44 44
45 45 def hgignore(self):
46 46 '''return the contents of .hgignore files as a list of patterns.
47 47
48 48 the files parsed for patterns include:
49 49 .hgignore in the repository root
50 50 any additional files specified in the [ui] section of ~/.hgrc
51 51
52 52 trailing white space is dropped.
53 53 the escape character is backslash.
54 54 comments start with #.
55 55 empty lines are skipped.
56 56
57 57 lines can be of the following formats:
58 58
59 59 syntax: regexp # defaults following lines to non-rooted regexps
60 60 syntax: glob # defaults following lines to non-rooted globs
61 61 re:pattern # non-rooted regular expression
62 62 glob:pattern # non-rooted glob
63 63 pattern # pattern of the current default type'''
64 64 syntaxes = {'re': 'relre:', 'regexp': 'relre:', 'glob': 'relglob:'}
65 65 def parselines(fp):
66 66 for line in fp:
67 67 escape = False
68 68 for i in xrange(len(line)):
69 69 if escape: escape = False
70 70 elif line[i] == '\\': escape = True
71 71 elif line[i] == '#': break
72 72 line = line[:i].rstrip()
73 73 if line: yield line
74 74 repoignore = self.wjoin('.hgignore')
75 75 files = [repoignore]
76 76 files.extend(self.ui.hgignorefiles())
77 77 pats = {}
78 78 for f in files:
79 79 try:
80 80 pats[f] = []
81 81 fp = open(f)
82 82 syntax = 'relre:'
83 83 for line in parselines(fp):
84 84 if line.startswith('syntax:'):
85 85 s = line[7:].strip()
86 86 try:
87 87 syntax = syntaxes[s]
88 88 except KeyError:
89 89 self.ui.warn(_("%s: ignoring invalid "
90 90 "syntax '%s'\n") % (f, s))
91 91 continue
92 92 pat = syntax + line
93 93 for s in syntaxes.values():
94 94 if line.startswith(s):
95 95 pat = line
96 96 break
97 97 pats[f].append(pat)
98 98 except IOError, inst:
99 99 if f != repoignore:
100 100 self.ui.warn(_("skipping unreadable ignore file"
101 101 " '%s': %s\n") % (f, inst.strerror))
102 102 return pats
103 103
104 104 def ignore(self, fn):
105 105 '''default match function used by dirstate and
106 106 localrepository. this honours the repository .hgignore file
107 107 and any other files specified in the [ui] section of .hgrc.'''
108 108 if not self.ignorefunc:
109 109 ignore = self.hgignore()
110 110 allpats = []
111 111 [allpats.extend(patlist) for patlist in ignore.values()]
112 112 if allpats:
113 113 try:
114 114 files, self.ignorefunc, anypats = (
115 115 util.matcher(self.root, inc=allpats, src='.hgignore'))
116 116 except util.Abort:
117 117 # Re-raise an exception where the src is the right file
118 118 for f, patlist in ignore.items():
119 119 files, self.ignorefunc, anypats = (
120 120 util.matcher(self.root, inc=patlist, src=f))
121 121 else:
122 122 self.ignorefunc = util.never
123 123 return self.ignorefunc(fn)
124 124
125 125 def __del__(self):
126 126 if self.dirty:
127 127 self.write()
128 128
129 129 def __getitem__(self, key):
130 130 try:
131 131 return self.map[key]
132 132 except TypeError:
133 133 self.lazyread()
134 134 return self[key]
135 135
136 136 def __contains__(self, key):
137 137 self.lazyread()
138 138 return key in self.map
139 139
140 140 def parents(self):
141 141 self.lazyread()
142 142 return self.pl
143 143
144 144 def branch(self):
145 145 if not self._branch:
146 146 try:
147 147 self._branch = self.opener("branch").read().strip()\
148 148 or "default"
149 149 except IOError:
150 150 self._branch = "default"
151 151 return self._branch
152 152
153 153 def markdirty(self):
154 154 if not self.dirty:
155 155 self.dirty = 1
156 156
157 157 def setparents(self, p1, p2=nullid):
158 158 self.lazyread()
159 159 self.markdirty()
160 160 self.pl = p1, p2
161 161
162 162 def setbranch(self, branch):
163 163 self._branch = branch
164 164 self.opener("branch", "w").write(branch + '\n')
165 165
166 166 def state(self, key):
167 167 try:
168 168 return self[key][0]
169 169 except KeyError:
170 170 return "?"
171 171
172 172 def lazyread(self):
173 173 if self.map is None:
174 174 self.read()
175 175
176 176 def parse(self, st):
177 177 self.pl = [st[:20], st[20: 40]]
178 178
179 179 # deref fields so they will be local in loop
180 180 map = self.map
181 181 copymap = self.copymap
182 182 format = self.format
183 183 unpack = struct.unpack
184 184
185 185 pos = 40
186 186 e_size = struct.calcsize(format)
187 187
188 188 while pos < len(st):
189 189 newpos = pos + e_size
190 190 e = unpack(format, st[pos:newpos])
191 191 l = e[4]
192 192 pos = newpos
193 193 newpos = pos + l
194 194 f = st[pos:newpos]
195 195 if '\0' in f:
196 196 f, c = f.split('\0')
197 197 copymap[f] = c
198 198 map[f] = e[:4]
199 199 pos = newpos
200 200
201 201 def read(self):
202 202 self.map = {}
203 203 self.pl = [nullid, nullid]
204 204 try:
205 205 st = self.opener("dirstate").read()
206 206 if st:
207 207 self.parse(st)
208 208 except IOError, err:
209 209 if err.errno != errno.ENOENT: raise
210 210
211 211 def copy(self, source, dest):
212 212 self.lazyread()
213 213 self.markdirty()
214 214 self.copymap[dest] = source
215 215
216 216 def copied(self, file):
217 217 return self.copymap.get(file, None)
218 218
219 219 def copies(self):
220 220 return self.copymap
221 221
222 222 def initdirs(self):
223 223 if self.dirs is None:
224 224 self.dirs = {}
225 225 for f in self.map:
226 226 self.updatedirs(f, 1)
227 227
228 228 def updatedirs(self, path, delta):
229 229 if self.dirs is not None:
230 230 for c in strutil.findall(path, '/'):
231 231 pc = path[:c]
232 232 self.dirs.setdefault(pc, 0)
233 233 self.dirs[pc] += delta
234 234
235 235 def checkinterfering(self, files):
236 236 def prefixes(f):
237 237 for c in strutil.rfindall(f, '/'):
238 238 yield f[:c]
239 239 self.lazyread()
240 240 self.initdirs()
241 241 seendirs = {}
242 242 for f in files:
243 243 # shadows
244 244 if self.dirs.get(f):
245 245 raise util.Abort(_('directory named %r already in dirstate') %
246 246 f)
247 247 for d in prefixes(f):
248 248 if d in seendirs:
249 249 break
250 250 if d in self.map:
251 251 raise util.Abort(_('file named %r already in dirstate') %
252 252 d)
253 253 seendirs[d] = True
254 254 # disallowed
255 255 if '\r' in f or '\n' in f:
256 256 raise util.Abort(_("'\\n' and '\\r' disallowed in filenames"))
257 257
258 258 def update(self, files, state, **kw):
259 259 ''' current states:
260 260 n normal
261 261 m needs merging
262 262 r marked for removal
263 263 a marked for addition'''
264 264
265 265 if not files: return
266 266 self.lazyread()
267 267 self.markdirty()
268 268 if state == "a":
269 269 self.initdirs()
270 270 self.checkinterfering(files)
271 271 for f in files:
272 272 if state == "r":
273 273 self.map[f] = ('r', 0, 0, 0)
274 274 self.updatedirs(f, -1)
275 275 else:
276 276 if state == "a":
277 277 self.updatedirs(f, 1)
278 278 s = os.lstat(self.wjoin(f))
279 279 st_size = kw.get('st_size', s.st_size)
280 280 st_mtime = kw.get('st_mtime', s.st_mtime)
281 281 self.map[f] = (state, s.st_mode, st_size, st_mtime)
282 282 if self.copymap.has_key(f):
283 283 del self.copymap[f]
284 284
285 285 def forget(self, files):
286 286 if not files: return
287 287 self.lazyread()
288 288 self.markdirty()
289 289 self.initdirs()
290 290 for f in files:
291 291 try:
292 292 del self.map[f]
293 293 self.updatedirs(f, -1)
294 294 except KeyError:
295 295 self.ui.warn(_("not in dirstate: %s!\n") % f)
296 296 pass
297 297
298 298 def clear(self):
299 299 self.map = {}
300 300 self.copymap = {}
301 301 self.dirs = None
302 302 self.markdirty()
303 303
304 304 def rebuild(self, parent, files):
305 305 self.clear()
306 306 for f in files:
307 307 if files.execf(f):
308 308 self.map[f] = ('n', 0777, -1, 0)
309 309 else:
310 310 self.map[f] = ('n', 0666, -1, 0)
311 311 self.pl = (parent, nullid)
312 312 self.markdirty()
313 313
314 314 def write(self):
315 315 if not self.dirty:
316 316 return
317 st = self.opener("dirstate", "w", atomic=True)
317 st = self.opener("dirstate", "w", atomictemp=True)
318 318 st.write("".join(self.pl))
319 319 for f, e in self.map.items():
320 320 c = self.copied(f)
321 321 if c:
322 322 f = f + "\0" + c
323 323 e = struct.pack(self.format, e[0], e[1], e[2], e[3], len(f))
324 324 st.write(e + f)
325 st.rename()
325 326 self.dirty = 0
326 327
327 328 def filterfiles(self, files):
328 329 ret = {}
329 330 unknown = []
330 331
331 332 for x in files:
332 333 if x == '.':
333 334 return self.map.copy()
334 335 if x not in self.map:
335 336 unknown.append(x)
336 337 else:
337 338 ret[x] = self.map[x]
338 339
339 340 if not unknown:
340 341 return ret
341 342
342 343 b = self.map.keys()
343 344 b.sort()
344 345 blen = len(b)
345 346
346 347 for x in unknown:
347 348 bs = bisect.bisect(b, "%s%s" % (x, '/'))
348 349 while bs < blen:
349 350 s = b[bs]
350 351 if len(s) > len(x) and s.startswith(x):
351 352 ret[s] = self.map[s]
352 353 else:
353 354 break
354 355 bs += 1
355 356 return ret
356 357
357 358 def supported_type(self, f, st, verbose=False):
358 359 if stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode):
359 360 return True
360 361 if verbose:
361 362 kind = 'unknown'
362 363 if stat.S_ISCHR(st.st_mode): kind = _('character device')
363 364 elif stat.S_ISBLK(st.st_mode): kind = _('block device')
364 365 elif stat.S_ISFIFO(st.st_mode): kind = _('fifo')
365 366 elif stat.S_ISSOCK(st.st_mode): kind = _('socket')
366 367 elif stat.S_ISDIR(st.st_mode): kind = _('directory')
367 368 self.ui.warn(_('%s: unsupported file type (type is %s)\n') % (
368 369 util.pathto(self.root, self.getcwd(), f),
369 370 kind))
370 371 return False
371 372
372 373 def walk(self, files=None, match=util.always, badmatch=None):
373 374 # filter out the stat
374 375 for src, f, st in self.statwalk(files, match, badmatch=badmatch):
375 376 yield src, f
376 377
377 378 def statwalk(self, files=None, match=util.always, ignored=False,
378 379 badmatch=None, directories=False):
379 380 '''
380 381 walk recursively through the directory tree, finding all files
381 382 matched by the match function
382 383
383 384 results are yielded in a tuple (src, filename, st), where src
384 385 is one of:
385 386 'f' the file was found in the directory tree
386 387 'd' the file is a directory of the tree
387 388 'm' the file was only in the dirstate and not in the tree
388 389 'b' file was not found and matched badmatch
389 390
390 391 and st is the stat result if the file was found in the directory.
391 392 '''
392 393 self.lazyread()
393 394
394 395 # walk all files by default
395 396 if not files:
396 397 files = ['.']
397 398 dc = self.map.copy()
398 399 else:
399 400 files = util.unique(files)
400 401 dc = self.filterfiles(files)
401 402
402 403 def imatch(file_):
403 404 if file_ not in dc and self.ignore(file_):
404 405 return False
405 406 return match(file_)
406 407
407 408 ignore = self.ignore
408 409 if ignored:
409 410 imatch = match
410 411 ignore = util.never
411 412
412 413 # self.root may end with a path separator when self.root == '/'
413 414 common_prefix_len = len(self.root)
414 415 if not self.root.endswith(os.sep):
415 416 common_prefix_len += 1
416 417 # recursion free walker, faster than os.walk.
417 418 def findfiles(s):
418 419 work = [s]
419 420 if directories:
420 421 yield 'd', util.normpath(s[common_prefix_len:]), os.lstat(s)
421 422 while work:
422 423 top = work.pop()
423 424 names = os.listdir(top)
424 425 names.sort()
425 426 # nd is the top of the repository dir tree
426 427 nd = util.normpath(top[common_prefix_len:])
427 428 if nd == '.':
428 429 nd = ''
429 430 else:
430 431 # do not recurse into a repo contained in this
431 432 # one. use bisect to find .hg directory so speed
432 433 # is good on big directory.
433 434 hg = bisect.bisect_left(names, '.hg')
434 435 if hg < len(names) and names[hg] == '.hg':
435 436 if os.path.isdir(os.path.join(top, '.hg')):
436 437 continue
437 438 for f in names:
438 439 np = util.pconvert(os.path.join(nd, f))
439 440 if seen(np):
440 441 continue
441 442 p = os.path.join(top, f)
442 443 # don't trip over symlinks
443 444 st = os.lstat(p)
444 445 if stat.S_ISDIR(st.st_mode):
445 446 if not ignore(np):
446 447 work.append(p)
447 448 if directories:
448 449 yield 'd', np, st
449 450 if imatch(np) and np in dc:
450 451 yield 'm', np, st
451 452 elif imatch(np):
452 453 if self.supported_type(np, st):
453 454 yield 'f', np, st
454 455 elif np in dc:
455 456 yield 'm', np, st
456 457
457 458 known = {'.hg': 1}
458 459 def seen(fn):
459 460 if fn in known: return True
460 461 known[fn] = 1
461 462
462 463 # step one, find all files that match our criteria
463 464 files.sort()
464 465 for ff in files:
465 466 nf = util.normpath(ff)
466 467 f = self.wjoin(ff)
467 468 try:
468 469 st = os.lstat(f)
469 470 except OSError, inst:
470 471 found = False
471 472 for fn in dc:
472 473 if nf == fn or (fn.startswith(nf) and fn[len(nf)] == '/'):
473 474 found = True
474 475 break
475 476 if not found:
476 477 if inst.errno != errno.ENOENT or not badmatch:
477 478 self.ui.warn('%s: %s\n' % (
478 479 util.pathto(self.root, self.getcwd(), ff),
479 480 inst.strerror))
480 481 elif badmatch and badmatch(ff) and imatch(nf):
481 482 yield 'b', ff, None
482 483 continue
483 484 if stat.S_ISDIR(st.st_mode):
484 485 cmp1 = (lambda x, y: cmp(x[1], y[1]))
485 486 sorted_ = [ x for x in findfiles(f) ]
486 487 sorted_.sort(cmp1)
487 488 for e in sorted_:
488 489 yield e
489 490 else:
490 491 if not seen(nf) and match(nf):
491 492 if self.supported_type(ff, st, verbose=True):
492 493 yield 'f', nf, st
493 494 elif ff in dc:
494 495 yield 'm', nf, st
495 496
496 497 # step two run through anything left in the dc hash and yield
497 498 # if we haven't already seen it
498 499 ks = dc.keys()
499 500 ks.sort()
500 501 for k in ks:
501 502 if not seen(k) and imatch(k):
502 503 yield 'm', k, None
503 504
504 505 def status(self, files=None, match=util.always, list_ignored=False,
505 506 list_clean=False):
506 507 lookup, modified, added, unknown, ignored = [], [], [], [], []
507 508 removed, deleted, clean = [], [], []
508 509
509 510 for src, fn, st in self.statwalk(files, match, ignored=list_ignored):
510 511 try:
511 512 type_, mode, size, time = self[fn]
512 513 except KeyError:
513 514 if list_ignored and self.ignore(fn):
514 515 ignored.append(fn)
515 516 else:
516 517 unknown.append(fn)
517 518 continue
518 519 if src == 'm':
519 520 nonexistent = True
520 521 if not st:
521 522 try:
522 523 st = os.lstat(self.wjoin(fn))
523 524 except OSError, inst:
524 525 if inst.errno != errno.ENOENT:
525 526 raise
526 527 st = None
527 528 # We need to re-check that it is a valid file
528 529 if st and self.supported_type(fn, st):
529 530 nonexistent = False
530 531 # XXX: what to do with file no longer present in the fs
531 532 # who are not removed in the dirstate ?
532 533 if nonexistent and type_ in "nm":
533 534 deleted.append(fn)
534 535 continue
535 536 # check the common case first
536 537 if type_ == 'n':
537 538 if not st:
538 539 st = os.lstat(self.wjoin(fn))
539 540 if size >= 0 and (size != st.st_size
540 541 or (mode ^ st.st_mode) & 0100):
541 542 modified.append(fn)
542 543 elif time != int(st.st_mtime):
543 544 lookup.append(fn)
544 545 elif list_clean:
545 546 clean.append(fn)
546 547 elif type_ == 'm':
547 548 modified.append(fn)
548 549 elif type_ == 'a':
549 550 added.append(fn)
550 551 elif type_ == 'r':
551 552 removed.append(fn)
552 553
553 554 return (lookup, modified, added, removed, deleted, unknown, ignored,
554 555 clean)
@@ -1,1933 +1,1934
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from node import *
9 9 from i18n import _
10 10 import repo, changegroup
11 11 import changelog, dirstate, filelog, manifest, context
12 12 import re, lock, transaction, tempfile, stat, mdiff, errno, ui
13 13 import os, revlog, time, util
14 14
15 15 class localrepository(repo.repository):
16 16 capabilities = ('lookup', 'changegroupsubset')
17 17 supported = ('revlogv1', 'store')
18 18
19 19 def __del__(self):
20 20 self.transhandle = None
21 21 def __init__(self, parentui, path=None, create=0):
22 22 repo.repository.__init__(self)
23 23 if not path:
24 24 p = os.getcwd()
25 25 while not os.path.isdir(os.path.join(p, ".hg")):
26 26 oldp = p
27 27 p = os.path.dirname(p)
28 28 if p == oldp:
29 29 raise repo.RepoError(_("There is no Mercurial repository"
30 30 " here (.hg not found)"))
31 31 path = p
32 32
33 33 self.root = os.path.realpath(path)
34 34 self.path = os.path.join(self.root, ".hg")
35 35 self.origroot = path
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 if not os.path.isdir(self.path):
40 40 if create:
41 41 if not os.path.exists(path):
42 42 os.mkdir(path)
43 43 os.mkdir(self.path)
44 44 requirements = ["revlogv1"]
45 45 if parentui.configbool('format', 'usestore', True):
46 46 os.mkdir(os.path.join(self.path, "store"))
47 47 requirements.append("store")
48 48 # create an invalid changelog
49 49 self.opener("00changelog.i", "a").write(
50 50 '\0\0\0\2' # represents revlogv2
51 51 ' dummy changelog to prevent using the old repo layout'
52 52 )
53 53 reqfile = self.opener("requires", "w")
54 54 for r in requirements:
55 55 reqfile.write("%s\n" % r)
56 56 reqfile.close()
57 57 else:
58 58 raise repo.RepoError(_("repository %s not found") % path)
59 59 elif create:
60 60 raise repo.RepoError(_("repository %s already exists") % path)
61 61 else:
62 62 # find requirements
63 63 try:
64 64 requirements = self.opener("requires").read().splitlines()
65 65 except IOError, inst:
66 66 if inst.errno != errno.ENOENT:
67 67 raise
68 68 requirements = []
69 69 # check them
70 70 for r in requirements:
71 71 if r not in self.supported:
72 72 raise repo.RepoError(_("requirement '%s' not supported") % r)
73 73
74 74 # setup store
75 75 if "store" in requirements:
76 76 self.encodefn = util.encodefilename
77 77 self.decodefn = util.decodefilename
78 78 self.spath = os.path.join(self.path, "store")
79 79 else:
80 80 self.encodefn = lambda x: x
81 81 self.decodefn = lambda x: x
82 82 self.spath = self.path
83 83 self.sopener = util.encodedopener(util.opener(self.spath), self.encodefn)
84 84
85 85 self.ui = ui.ui(parentui=parentui)
86 86 try:
87 87 self.ui.readconfig(self.join("hgrc"), self.root)
88 88 except IOError:
89 89 pass
90 90
91 91 self.changelog = changelog.changelog(self.sopener)
92 92 self.sopener.defversion = self.changelog.version
93 93 self.manifest = manifest.manifest(self.sopener)
94 94
95 95 fallback = self.ui.config('ui', 'fallbackencoding')
96 96 if fallback:
97 97 util._fallbackencoding = fallback
98 98
99 99 self.tagscache = None
100 100 self.branchcache = None
101 101 self.nodetagscache = None
102 102 self.filterpats = {}
103 103 self.transhandle = None
104 104
105 105 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
106 106
107 107 def url(self):
108 108 return 'file:' + self.root
109 109
110 110 def hook(self, name, throw=False, **args):
111 111 def callhook(hname, funcname):
112 112 '''call python hook. hook is callable object, looked up as
113 113 name in python module. if callable returns "true", hook
114 114 fails, else passes. if hook raises exception, treated as
115 115 hook failure. exception propagates if throw is "true".
116 116
117 117 reason for "true" meaning "hook failed" is so that
118 118 unmodified commands (e.g. mercurial.commands.update) can
119 119 be run as hooks without wrappers to convert return values.'''
120 120
121 121 self.ui.note(_("calling hook %s: %s\n") % (hname, funcname))
122 122 obj = funcname
123 123 if not callable(obj):
124 124 d = funcname.rfind('.')
125 125 if d == -1:
126 126 raise util.Abort(_('%s hook is invalid ("%s" not in '
127 127 'a module)') % (hname, funcname))
128 128 modname = funcname[:d]
129 129 try:
130 130 obj = __import__(modname)
131 131 except ImportError:
132 132 try:
133 133 # extensions are loaded with hgext_ prefix
134 134 obj = __import__("hgext_%s" % modname)
135 135 except ImportError:
136 136 raise util.Abort(_('%s hook is invalid '
137 137 '(import of "%s" failed)') %
138 138 (hname, modname))
139 139 try:
140 140 for p in funcname.split('.')[1:]:
141 141 obj = getattr(obj, p)
142 142 except AttributeError, err:
143 143 raise util.Abort(_('%s hook is invalid '
144 144 '("%s" is not defined)') %
145 145 (hname, funcname))
146 146 if not callable(obj):
147 147 raise util.Abort(_('%s hook is invalid '
148 148 '("%s" is not callable)') %
149 149 (hname, funcname))
150 150 try:
151 151 r = obj(ui=self.ui, repo=self, hooktype=name, **args)
152 152 except (KeyboardInterrupt, util.SignalInterrupt):
153 153 raise
154 154 except Exception, exc:
155 155 if isinstance(exc, util.Abort):
156 156 self.ui.warn(_('error: %s hook failed: %s\n') %
157 157 (hname, exc.args[0]))
158 158 else:
159 159 self.ui.warn(_('error: %s hook raised an exception: '
160 160 '%s\n') % (hname, exc))
161 161 if throw:
162 162 raise
163 163 self.ui.print_exc()
164 164 return True
165 165 if r:
166 166 if throw:
167 167 raise util.Abort(_('%s hook failed') % hname)
168 168 self.ui.warn(_('warning: %s hook failed\n') % hname)
169 169 return r
170 170
171 171 def runhook(name, cmd):
172 172 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
173 173 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
174 174 r = util.system(cmd, environ=env, cwd=self.root)
175 175 if r:
176 176 desc, r = util.explain_exit(r)
177 177 if throw:
178 178 raise util.Abort(_('%s hook %s') % (name, desc))
179 179 self.ui.warn(_('warning: %s hook %s\n') % (name, desc))
180 180 return r
181 181
182 182 r = False
183 183 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
184 184 if hname.split(".", 1)[0] == name and cmd]
185 185 hooks.sort()
186 186 for hname, cmd in hooks:
187 187 if callable(cmd):
188 188 r = callhook(hname, cmd) or r
189 189 elif cmd.startswith('python:'):
190 190 r = callhook(hname, cmd[7:].strip()) or r
191 191 else:
192 192 r = runhook(hname, cmd) or r
193 193 return r
194 194
195 195 tag_disallowed = ':\r\n'
196 196
197 197 def _tag(self, name, node, message, local, user, date, parent=None):
198 198 use_dirstate = parent is None
199 199
200 200 for c in self.tag_disallowed:
201 201 if c in name:
202 202 raise util.Abort(_('%r cannot be used in a tag name') % c)
203 203
204 204 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
205 205
206 206 if local:
207 207 # local tags are stored in the current charset
208 208 self.opener('localtags', 'a').write('%s %s\n' % (hex(node), name))
209 209 self.hook('tag', node=hex(node), tag=name, local=local)
210 210 return
211 211
212 212 # committed tags are stored in UTF-8
213 213 line = '%s %s\n' % (hex(node), util.fromlocal(name))
214 214 if use_dirstate:
215 215 self.wfile('.hgtags', 'ab').write(line)
216 216 else:
217 217 ntags = self.filectx('.hgtags', parent).data()
218 218 self.wfile('.hgtags', 'ab').write(ntags + line)
219 219 if use_dirstate and self.dirstate.state('.hgtags') == '?':
220 220 self.add(['.hgtags'])
221 221
222 222 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent)
223 223
224 224 self.hook('tag', node=hex(node), tag=name, local=local)
225 225
226 226 return tagnode
227 227
228 228 def tag(self, name, node, message, local, user, date):
229 229 '''tag a revision with a symbolic name.
230 230
231 231 if local is True, the tag is stored in a per-repository file.
232 232 otherwise, it is stored in the .hgtags file, and a new
233 233 changeset is committed with the change.
234 234
235 235 keyword arguments:
236 236
237 237 local: whether to store tag in non-version-controlled file
238 238 (default False)
239 239
240 240 message: commit message to use if committing
241 241
242 242 user: name of user to use if committing
243 243
244 244 date: date tuple to use if committing'''
245 245
246 246 for x in self.status()[:5]:
247 247 if '.hgtags' in x:
248 248 raise util.Abort(_('working copy of .hgtags is changed '
249 249 '(please commit .hgtags manually)'))
250 250
251 251
252 252 self._tag(name, node, message, local, user, date)
253 253
254 254 def tags(self):
255 255 '''return a mapping of tag to node'''
256 256 if self.tagscache:
257 257 return self.tagscache
258 258
259 259 globaltags = {}
260 260
261 261 def readtags(lines, fn):
262 262 filetags = {}
263 263 count = 0
264 264
265 265 def warn(msg):
266 266 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
267 267
268 268 for l in lines:
269 269 count += 1
270 270 if not l:
271 271 continue
272 272 s = l.split(" ", 1)
273 273 if len(s) != 2:
274 274 warn(_("cannot parse entry"))
275 275 continue
276 276 node, key = s
277 277 key = util.tolocal(key.strip()) # stored in UTF-8
278 278 try:
279 279 bin_n = bin(node)
280 280 except TypeError:
281 281 warn(_("node '%s' is not well formed") % node)
282 282 continue
283 283 if bin_n not in self.changelog.nodemap:
284 284 warn(_("tag '%s' refers to unknown node") % key)
285 285 continue
286 286
287 287 h = []
288 288 if key in filetags:
289 289 n, h = filetags[key]
290 290 h.append(n)
291 291 filetags[key] = (bin_n, h)
292 292
293 293 for k,nh in filetags.items():
294 294 if k not in globaltags:
295 295 globaltags[k] = nh
296 296 continue
297 297 # we prefer the global tag if:
298 298 # it supercedes us OR
299 299 # mutual supercedes and it has a higher rank
300 300 # otherwise we win because we're tip-most
301 301 an, ah = nh
302 302 bn, bh = globaltags[k]
303 303 if bn != an and an in bh and \
304 304 (bn not in ah or len(bh) > len(ah)):
305 305 an = bn
306 306 ah.append([n for n in bh if n not in ah])
307 307 globaltags[k] = an, ah
308 308
309 309 # read the tags file from each head, ending with the tip
310 310 f = None
311 311 for rev, node, fnode in self._hgtagsnodes():
312 312 f = (f and f.filectx(fnode) or
313 313 self.filectx('.hgtags', fileid=fnode))
314 314 readtags(f.data().splitlines(), f)
315 315
316 316 try:
317 317 data = util.fromlocal(self.opener("localtags").read())
318 318 # localtags are stored in the local character set
319 319 # while the internal tag table is stored in UTF-8
320 320 readtags(data.splitlines(), "localtags")
321 321 except IOError:
322 322 pass
323 323
324 324 self.tagscache = {}
325 325 for k,nh in globaltags.items():
326 326 n = nh[0]
327 327 if n != nullid:
328 328 self.tagscache[k] = n
329 329 self.tagscache['tip'] = self.changelog.tip()
330 330
331 331 return self.tagscache
332 332
333 333 def _hgtagsnodes(self):
334 334 heads = self.heads()
335 335 heads.reverse()
336 336 last = {}
337 337 ret = []
338 338 for node in heads:
339 339 c = self.changectx(node)
340 340 rev = c.rev()
341 341 try:
342 342 fnode = c.filenode('.hgtags')
343 343 except revlog.LookupError:
344 344 continue
345 345 ret.append((rev, node, fnode))
346 346 if fnode in last:
347 347 ret[last[fnode]] = None
348 348 last[fnode] = len(ret) - 1
349 349 return [item for item in ret if item]
350 350
351 351 def tagslist(self):
352 352 '''return a list of tags ordered by revision'''
353 353 l = []
354 354 for t, n in self.tags().items():
355 355 try:
356 356 r = self.changelog.rev(n)
357 357 except:
358 358 r = -2 # sort to the beginning of the list if unknown
359 359 l.append((r, t, n))
360 360 l.sort()
361 361 return [(t, n) for r, t, n in l]
362 362
363 363 def nodetags(self, node):
364 364 '''return the tags associated with a node'''
365 365 if not self.nodetagscache:
366 366 self.nodetagscache = {}
367 367 for t, n in self.tags().items():
368 368 self.nodetagscache.setdefault(n, []).append(t)
369 369 return self.nodetagscache.get(node, [])
370 370
371 371 def _branchtags(self):
372 372 partial, last, lrev = self._readbranchcache()
373 373
374 374 tiprev = self.changelog.count() - 1
375 375 if lrev != tiprev:
376 376 self._updatebranchcache(partial, lrev+1, tiprev+1)
377 377 self._writebranchcache(partial, self.changelog.tip(), tiprev)
378 378
379 379 return partial
380 380
381 381 def branchtags(self):
382 382 if self.branchcache is not None:
383 383 return self.branchcache
384 384
385 385 self.branchcache = {} # avoid recursion in changectx
386 386 partial = self._branchtags()
387 387
388 388 # the branch cache is stored on disk as UTF-8, but in the local
389 389 # charset internally
390 390 for k, v in partial.items():
391 391 self.branchcache[util.tolocal(k)] = v
392 392 return self.branchcache
393 393
394 394 def _readbranchcache(self):
395 395 partial = {}
396 396 try:
397 397 f = self.opener("branch.cache")
398 398 lines = f.read().split('\n')
399 399 f.close()
400 400 last, lrev = lines.pop(0).split(" ", 1)
401 401 last, lrev = bin(last), int(lrev)
402 402 if not (lrev < self.changelog.count() and
403 403 self.changelog.node(lrev) == last): # sanity check
404 404 # invalidate the cache
405 405 raise ValueError('Invalid branch cache: unknown tip')
406 406 for l in lines:
407 407 if not l: continue
408 408 node, label = l.split(" ", 1)
409 409 partial[label.strip()] = bin(node)
410 410 except (KeyboardInterrupt, util.SignalInterrupt):
411 411 raise
412 412 except Exception, inst:
413 413 if self.ui.debugflag:
414 414 self.ui.warn(str(inst), '\n')
415 415 partial, last, lrev = {}, nullid, nullrev
416 416 return partial, last, lrev
417 417
418 418 def _writebranchcache(self, branches, tip, tiprev):
419 419 try:
420 f = self.opener("branch.cache", "w")
420 f = self.opener("branch.cache", "w", atomictemp=True)
421 421 f.write("%s %s\n" % (hex(tip), tiprev))
422 422 for label, node in branches.iteritems():
423 423 f.write("%s %s\n" % (hex(node), label))
424 f.rename()
424 425 except IOError:
425 426 pass
426 427
427 428 def _updatebranchcache(self, partial, start, end):
428 429 for r in xrange(start, end):
429 430 c = self.changectx(r)
430 431 b = c.branch()
431 432 partial[b] = c.node()
432 433
433 434 def lookup(self, key):
434 435 if key == '.':
435 436 key = self.dirstate.parents()[0]
436 437 if key == nullid:
437 438 raise repo.RepoError(_("no revision checked out"))
438 439 elif key == 'null':
439 440 return nullid
440 441 n = self.changelog._match(key)
441 442 if n:
442 443 return n
443 444 if key in self.tags():
444 445 return self.tags()[key]
445 446 if key in self.branchtags():
446 447 return self.branchtags()[key]
447 448 n = self.changelog._partialmatch(key)
448 449 if n:
449 450 return n
450 451 raise repo.RepoError(_("unknown revision '%s'") % key)
451 452
452 453 def dev(self):
453 454 return os.lstat(self.path).st_dev
454 455
455 456 def local(self):
456 457 return True
457 458
458 459 def join(self, f):
459 460 return os.path.join(self.path, f)
460 461
461 462 def sjoin(self, f):
462 463 f = self.encodefn(f)
463 464 return os.path.join(self.spath, f)
464 465
465 466 def wjoin(self, f):
466 467 return os.path.join(self.root, f)
467 468
468 469 def file(self, f):
469 470 if f[0] == '/':
470 471 f = f[1:]
471 472 return filelog.filelog(self.sopener, f)
472 473
473 474 def changectx(self, changeid=None):
474 475 return context.changectx(self, changeid)
475 476
476 477 def workingctx(self):
477 478 return context.workingctx(self)
478 479
479 480 def parents(self, changeid=None):
480 481 '''
481 482 get list of changectxs for parents of changeid or working directory
482 483 '''
483 484 if changeid is None:
484 485 pl = self.dirstate.parents()
485 486 else:
486 487 n = self.changelog.lookup(changeid)
487 488 pl = self.changelog.parents(n)
488 489 if pl[1] == nullid:
489 490 return [self.changectx(pl[0])]
490 491 return [self.changectx(pl[0]), self.changectx(pl[1])]
491 492
492 493 def filectx(self, path, changeid=None, fileid=None):
493 494 """changeid can be a changeset revision, node, or tag.
494 495 fileid can be a file revision or node."""
495 496 return context.filectx(self, path, changeid, fileid)
496 497
497 498 def getcwd(self):
498 499 return self.dirstate.getcwd()
499 500
500 501 def wfile(self, f, mode='r'):
501 502 return self.wopener(f, mode)
502 503
503 504 def _link(self, f):
504 505 return os.path.islink(self.wjoin(f))
505 506
506 507 def _filter(self, filter, filename, data):
507 508 if filter not in self.filterpats:
508 509 l = []
509 510 for pat, cmd in self.ui.configitems(filter):
510 511 mf = util.matcher(self.root, "", [pat], [], [])[1]
511 512 l.append((mf, cmd))
512 513 self.filterpats[filter] = l
513 514
514 515 for mf, cmd in self.filterpats[filter]:
515 516 if mf(filename):
516 517 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
517 518 data = util.filter(data, cmd)
518 519 break
519 520
520 521 return data
521 522
522 523 def wread(self, filename):
523 524 if self._link(filename):
524 525 data = os.readlink(self.wjoin(filename))
525 526 else:
526 527 data = self.wopener(filename, 'r').read()
527 528 return self._filter("encode", filename, data)
528 529
529 530 def wwrite(self, filename, data, flags):
530 531 data = self._filter("decode", filename, data)
531 532 if "l" in flags:
532 533 f = self.wjoin(filename)
533 534 try:
534 535 os.unlink(f)
535 536 except OSError:
536 537 pass
537 538 d = os.path.dirname(f)
538 539 if not os.path.exists(d):
539 540 os.makedirs(d)
540 541 os.symlink(data, f)
541 542 else:
542 543 try:
543 544 if self._link(filename):
544 545 os.unlink(self.wjoin(filename))
545 546 except OSError:
546 547 pass
547 548 self.wopener(filename, 'w').write(data)
548 549 util.set_exec(self.wjoin(filename), "x" in flags)
549 550
550 551 def wwritedata(self, filename, data):
551 552 return self._filter("decode", filename, data)
552 553
553 554 def transaction(self):
554 555 tr = self.transhandle
555 556 if tr != None and tr.running():
556 557 return tr.nest()
557 558
558 559 # save dirstate for rollback
559 560 try:
560 561 ds = self.opener("dirstate").read()
561 562 except IOError:
562 563 ds = ""
563 564 self.opener("journal.dirstate", "w").write(ds)
564 565
565 566 renames = [(self.sjoin("journal"), self.sjoin("undo")),
566 567 (self.join("journal.dirstate"), self.join("undo.dirstate"))]
567 568 tr = transaction.transaction(self.ui.warn, self.sopener,
568 569 self.sjoin("journal"),
569 570 aftertrans(renames))
570 571 self.transhandle = tr
571 572 return tr
572 573
573 574 def recover(self):
574 575 l = self.lock()
575 576 if os.path.exists(self.sjoin("journal")):
576 577 self.ui.status(_("rolling back interrupted transaction\n"))
577 578 transaction.rollback(self.sopener, self.sjoin("journal"))
578 579 self.reload()
579 580 return True
580 581 else:
581 582 self.ui.warn(_("no interrupted transaction available\n"))
582 583 return False
583 584
584 585 def rollback(self, wlock=None):
585 586 if not wlock:
586 587 wlock = self.wlock()
587 588 l = self.lock()
588 589 if os.path.exists(self.sjoin("undo")):
589 590 self.ui.status(_("rolling back last transaction\n"))
590 591 transaction.rollback(self.sopener, self.sjoin("undo"))
591 592 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
592 593 self.reload()
593 594 self.wreload()
594 595 else:
595 596 self.ui.warn(_("no rollback information available\n"))
596 597
597 598 def wreload(self):
598 599 self.dirstate.read()
599 600
600 601 def reload(self):
601 602 self.changelog.load()
602 603 self.manifest.load()
603 604 self.tagscache = None
604 605 self.nodetagscache = None
605 606
606 607 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
607 608 desc=None):
608 609 try:
609 610 l = lock.lock(lockname, 0, releasefn, desc=desc)
610 611 except lock.LockHeld, inst:
611 612 if not wait:
612 613 raise
613 614 self.ui.warn(_("waiting for lock on %s held by %r\n") %
614 615 (desc, inst.locker))
615 616 # default to 600 seconds timeout
616 617 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
617 618 releasefn, desc=desc)
618 619 if acquirefn:
619 620 acquirefn()
620 621 return l
621 622
622 623 def lock(self, wait=1):
623 624 return self.do_lock(self.sjoin("lock"), wait, acquirefn=self.reload,
624 625 desc=_('repository %s') % self.origroot)
625 626
626 627 def wlock(self, wait=1):
627 628 return self.do_lock(self.join("wlock"), wait, self.dirstate.write,
628 629 self.wreload,
629 630 desc=_('working directory of %s') % self.origroot)
630 631
631 632 def filecommit(self, fn, manifest1, manifest2, linkrev, transaction, changelist):
632 633 """
633 634 commit an individual file as part of a larger transaction
634 635 """
635 636
636 637 t = self.wread(fn)
637 638 fl = self.file(fn)
638 639 fp1 = manifest1.get(fn, nullid)
639 640 fp2 = manifest2.get(fn, nullid)
640 641
641 642 meta = {}
642 643 cp = self.dirstate.copied(fn)
643 644 if cp:
644 645 # Mark the new revision of this file as a copy of another
645 646 # file. This copy data will effectively act as a parent
646 647 # of this new revision. If this is a merge, the first
647 648 # parent will be the nullid (meaning "look up the copy data")
648 649 # and the second one will be the other parent. For example:
649 650 #
650 651 # 0 --- 1 --- 3 rev1 changes file foo
651 652 # \ / rev2 renames foo to bar and changes it
652 653 # \- 2 -/ rev3 should have bar with all changes and
653 654 # should record that bar descends from
654 655 # bar in rev2 and foo in rev1
655 656 #
656 657 # this allows this merge to succeed:
657 658 #
658 659 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
659 660 # \ / merging rev3 and rev4 should use bar@rev2
660 661 # \- 2 --- 4 as the merge base
661 662 #
662 663 meta["copy"] = cp
663 664 if not manifest2: # not a branch merge
664 665 meta["copyrev"] = hex(manifest1.get(cp, nullid))
665 666 fp2 = nullid
666 667 elif fp2 != nullid: # copied on remote side
667 668 meta["copyrev"] = hex(manifest1.get(cp, nullid))
668 669 elif fp1 != nullid: # copied on local side, reversed
669 670 meta["copyrev"] = hex(manifest2.get(cp))
670 671 fp2 = fp1
671 672 else: # directory rename
672 673 meta["copyrev"] = hex(manifest1.get(cp, nullid))
673 674 self.ui.debug(_(" %s: copy %s:%s\n") %
674 675 (fn, cp, meta["copyrev"]))
675 676 fp1 = nullid
676 677 elif fp2 != nullid:
677 678 # is one parent an ancestor of the other?
678 679 fpa = fl.ancestor(fp1, fp2)
679 680 if fpa == fp1:
680 681 fp1, fp2 = fp2, nullid
681 682 elif fpa == fp2:
682 683 fp2 = nullid
683 684
684 685 # is the file unmodified from the parent? report existing entry
685 686 if fp2 == nullid and not fl.cmp(fp1, t):
686 687 return fp1
687 688
688 689 changelist.append(fn)
689 690 return fl.add(t, meta, transaction, linkrev, fp1, fp2)
690 691
691 692 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None, extra={}):
692 693 if p1 is None:
693 694 p1, p2 = self.dirstate.parents()
694 695 return self.commit(files=files, text=text, user=user, date=date,
695 696 p1=p1, p2=p2, wlock=wlock, extra=extra)
696 697
697 698 def commit(self, files=None, text="", user=None, date=None,
698 699 match=util.always, force=False, lock=None, wlock=None,
699 700 force_editor=False, p1=None, p2=None, extra={}):
700 701
701 702 commit = []
702 703 remove = []
703 704 changed = []
704 705 use_dirstate = (p1 is None) # not rawcommit
705 706 extra = extra.copy()
706 707
707 708 if use_dirstate:
708 709 if files:
709 710 for f in files:
710 711 s = self.dirstate.state(f)
711 712 if s in 'nmai':
712 713 commit.append(f)
713 714 elif s == 'r':
714 715 remove.append(f)
715 716 else:
716 717 self.ui.warn(_("%s not tracked!\n") % f)
717 718 else:
718 719 changes = self.status(match=match)[:5]
719 720 modified, added, removed, deleted, unknown = changes
720 721 commit = modified + added
721 722 remove = removed
722 723 else:
723 724 commit = files
724 725
725 726 if use_dirstate:
726 727 p1, p2 = self.dirstate.parents()
727 728 update_dirstate = True
728 729 else:
729 730 p1, p2 = p1, p2 or nullid
730 731 update_dirstate = (self.dirstate.parents()[0] == p1)
731 732
732 733 c1 = self.changelog.read(p1)
733 734 c2 = self.changelog.read(p2)
734 735 m1 = self.manifest.read(c1[0]).copy()
735 736 m2 = self.manifest.read(c2[0])
736 737
737 738 if use_dirstate:
738 739 branchname = self.workingctx().branch()
739 740 try:
740 741 branchname = branchname.decode('UTF-8').encode('UTF-8')
741 742 except UnicodeDecodeError:
742 743 raise util.Abort(_('branch name not in UTF-8!'))
743 744 else:
744 745 branchname = ""
745 746
746 747 if use_dirstate:
747 748 oldname = c1[5].get("branch") # stored in UTF-8
748 749 if not commit and not remove and not force and p2 == nullid and \
749 750 branchname == oldname:
750 751 self.ui.status(_("nothing changed\n"))
751 752 return None
752 753
753 754 xp1 = hex(p1)
754 755 if p2 == nullid: xp2 = ''
755 756 else: xp2 = hex(p2)
756 757
757 758 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
758 759
759 760 if not wlock:
760 761 wlock = self.wlock()
761 762 if not lock:
762 763 lock = self.lock()
763 764 tr = self.transaction()
764 765
765 766 # check in files
766 767 new = {}
767 768 linkrev = self.changelog.count()
768 769 commit.sort()
769 770 is_exec = util.execfunc(self.root, m1.execf)
770 771 is_link = util.linkfunc(self.root, m1.linkf)
771 772 for f in commit:
772 773 self.ui.note(f + "\n")
773 774 try:
774 775 new[f] = self.filecommit(f, m1, m2, linkrev, tr, changed)
775 776 m1.set(f, is_exec(f), is_link(f))
776 777 except (OSError, IOError):
777 778 if use_dirstate:
778 779 self.ui.warn(_("trouble committing %s!\n") % f)
779 780 raise
780 781 else:
781 782 remove.append(f)
782 783
783 784 # update manifest
784 785 m1.update(new)
785 786 remove.sort()
786 787 removed = []
787 788
788 789 for f in remove:
789 790 if f in m1:
790 791 del m1[f]
791 792 removed.append(f)
792 793 mn = self.manifest.add(m1, tr, linkrev, c1[0], c2[0], (new, removed))
793 794
794 795 # add changeset
795 796 new = new.keys()
796 797 new.sort()
797 798
798 799 user = user or self.ui.username()
799 800 if not text or force_editor:
800 801 edittext = []
801 802 if text:
802 803 edittext.append(text)
803 804 edittext.append("")
804 805 edittext.append("HG: user: %s" % user)
805 806 if p2 != nullid:
806 807 edittext.append("HG: branch merge")
807 808 if branchname:
808 809 edittext.append("HG: branch %s" % util.tolocal(branchname))
809 810 edittext.extend(["HG: changed %s" % f for f in changed])
810 811 edittext.extend(["HG: removed %s" % f for f in removed])
811 812 if not changed and not remove:
812 813 edittext.append("HG: no files changed")
813 814 edittext.append("")
814 815 # run editor in the repository root
815 816 olddir = os.getcwd()
816 817 os.chdir(self.root)
817 818 text = self.ui.edit("\n".join(edittext), user)
818 819 os.chdir(olddir)
819 820
820 821 lines = [line.rstrip() for line in text.rstrip().splitlines()]
821 822 while lines and not lines[0]:
822 823 del lines[0]
823 824 if not lines:
824 825 return None
825 826 text = '\n'.join(lines)
826 827 if branchname:
827 828 extra["branch"] = branchname
828 829 n = self.changelog.add(mn, changed + removed, text, tr, p1, p2,
829 830 user, date, extra)
830 831 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
831 832 parent2=xp2)
832 833 tr.close()
833 834
834 835 if self.branchcache and "branch" in extra:
835 836 self.branchcache[util.tolocal(extra["branch"])] = n
836 837
837 838 if use_dirstate or update_dirstate:
838 839 self.dirstate.setparents(n)
839 840 if use_dirstate:
840 841 self.dirstate.update(new, "n")
841 842 self.dirstate.forget(removed)
842 843
843 844 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
844 845 return n
845 846
846 847 def walk(self, node=None, files=[], match=util.always, badmatch=None):
847 848 '''
848 849 walk recursively through the directory tree or a given
849 850 changeset, finding all files matched by the match
850 851 function
851 852
852 853 results are yielded in a tuple (src, filename), where src
853 854 is one of:
854 855 'f' the file was found in the directory tree
855 856 'm' the file was only in the dirstate and not in the tree
856 857 'b' file was not found and matched badmatch
857 858 '''
858 859
859 860 if node:
860 861 fdict = dict.fromkeys(files)
861 862 # for dirstate.walk, files=['.'] means "walk the whole tree".
862 863 # follow that here, too
863 864 fdict.pop('.', None)
864 865 mdict = self.manifest.read(self.changelog.read(node)[0])
865 866 mfiles = mdict.keys()
866 867 mfiles.sort()
867 868 for fn in mfiles:
868 869 for ffn in fdict:
869 870 # match if the file is the exact name or a directory
870 871 if ffn == fn or fn.startswith("%s/" % ffn):
871 872 del fdict[ffn]
872 873 break
873 874 if match(fn):
874 875 yield 'm', fn
875 876 ffiles = fdict.keys()
876 877 ffiles.sort()
877 878 for fn in ffiles:
878 879 if badmatch and badmatch(fn):
879 880 if match(fn):
880 881 yield 'b', fn
881 882 else:
882 883 self.ui.warn(_('%s: No such file in rev %s\n') % (
883 884 util.pathto(self.root, self.getcwd(), fn), short(node)))
884 885 else:
885 886 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
886 887 yield src, fn
887 888
888 889 def status(self, node1=None, node2=None, files=[], match=util.always,
889 890 wlock=None, list_ignored=False, list_clean=False):
890 891 """return status of files between two nodes or node and working directory
891 892
892 893 If node1 is None, use the first dirstate parent instead.
893 894 If node2 is None, compare node1 with working directory.
894 895 """
895 896
896 897 def fcmp(fn, getnode):
897 898 t1 = self.wread(fn)
898 899 return self.file(fn).cmp(getnode(fn), t1)
899 900
900 901 def mfmatches(node):
901 902 change = self.changelog.read(node)
902 903 mf = self.manifest.read(change[0]).copy()
903 904 for fn in mf.keys():
904 905 if not match(fn):
905 906 del mf[fn]
906 907 return mf
907 908
908 909 modified, added, removed, deleted, unknown = [], [], [], [], []
909 910 ignored, clean = [], []
910 911
911 912 compareworking = False
912 913 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
913 914 compareworking = True
914 915
915 916 if not compareworking:
916 917 # read the manifest from node1 before the manifest from node2,
917 918 # so that we'll hit the manifest cache if we're going through
918 919 # all the revisions in parent->child order.
919 920 mf1 = mfmatches(node1)
920 921
921 922 # are we comparing the working directory?
922 923 if not node2:
923 924 if not wlock:
924 925 try:
925 926 wlock = self.wlock(wait=0)
926 927 except lock.LockException:
927 928 wlock = None
928 929 (lookup, modified, added, removed, deleted, unknown,
929 930 ignored, clean) = self.dirstate.status(files, match,
930 931 list_ignored, list_clean)
931 932
932 933 # are we comparing working dir against its parent?
933 934 if compareworking:
934 935 if lookup:
935 936 # do a full compare of any files that might have changed
936 937 mnode = self.changelog.read(self.dirstate.parents()[0])[0]
937 938 getnode = lambda fn: (self.manifest.find(mnode, fn)[0] or
938 939 nullid)
939 940 for f in lookup:
940 941 if fcmp(f, getnode):
941 942 modified.append(f)
942 943 else:
943 944 clean.append(f)
944 945 if wlock is not None:
945 946 self.dirstate.update([f], "n")
946 947 else:
947 948 # we are comparing working dir against non-parent
948 949 # generate a pseudo-manifest for the working dir
949 950 # XXX: create it in dirstate.py ?
950 951 mf2 = mfmatches(self.dirstate.parents()[0])
951 952 is_exec = util.execfunc(self.root, mf2.execf)
952 953 is_link = util.linkfunc(self.root, mf2.linkf)
953 954 for f in lookup + modified + added:
954 955 mf2[f] = ""
955 956 mf2.set(f, is_exec(f), is_link(f))
956 957 for f in removed:
957 958 if f in mf2:
958 959 del mf2[f]
959 960 else:
960 961 # we are comparing two revisions
961 962 mf2 = mfmatches(node2)
962 963
963 964 if not compareworking:
964 965 # flush lists from dirstate before comparing manifests
965 966 modified, added, clean = [], [], []
966 967
967 968 # make sure to sort the files so we talk to the disk in a
968 969 # reasonable order
969 970 mf2keys = mf2.keys()
970 971 mf2keys.sort()
971 972 getnode = lambda fn: mf1.get(fn, nullid)
972 973 for fn in mf2keys:
973 974 if mf1.has_key(fn):
974 975 if mf1.flags(fn) != mf2.flags(fn) or \
975 976 (mf1[fn] != mf2[fn] and (mf2[fn] != "" or
976 977 fcmp(fn, getnode))):
977 978 modified.append(fn)
978 979 elif list_clean:
979 980 clean.append(fn)
980 981 del mf1[fn]
981 982 else:
982 983 added.append(fn)
983 984
984 985 removed = mf1.keys()
985 986
986 987 # sort and return results:
987 988 for l in modified, added, removed, deleted, unknown, ignored, clean:
988 989 l.sort()
989 990 return (modified, added, removed, deleted, unknown, ignored, clean)
990 991
991 992 def add(self, list, wlock=None):
992 993 if not wlock:
993 994 wlock = self.wlock()
994 995 for f in list:
995 996 p = self.wjoin(f)
996 997 islink = os.path.islink(p)
997 998 if not islink and not os.path.exists(p):
998 999 self.ui.warn(_("%s does not exist!\n") % f)
999 1000 elif not islink and not os.path.isfile(p):
1000 1001 self.ui.warn(_("%s not added: only files and symlinks "
1001 1002 "supported currently\n") % f)
1002 1003 elif self.dirstate.state(f) in 'an':
1003 1004 self.ui.warn(_("%s already tracked!\n") % f)
1004 1005 else:
1005 1006 self.dirstate.update([f], "a")
1006 1007
1007 1008 def forget(self, list, wlock=None):
1008 1009 if not wlock:
1009 1010 wlock = self.wlock()
1010 1011 for f in list:
1011 1012 if self.dirstate.state(f) not in 'ai':
1012 1013 self.ui.warn(_("%s not added!\n") % f)
1013 1014 else:
1014 1015 self.dirstate.forget([f])
1015 1016
1016 1017 def remove(self, list, unlink=False, wlock=None):
1017 1018 if unlink:
1018 1019 for f in list:
1019 1020 try:
1020 1021 util.unlink(self.wjoin(f))
1021 1022 except OSError, inst:
1022 1023 if inst.errno != errno.ENOENT:
1023 1024 raise
1024 1025 if not wlock:
1025 1026 wlock = self.wlock()
1026 1027 for f in list:
1027 1028 p = self.wjoin(f)
1028 1029 if os.path.exists(p):
1029 1030 self.ui.warn(_("%s still exists!\n") % f)
1030 1031 elif self.dirstate.state(f) == 'a':
1031 1032 self.dirstate.forget([f])
1032 1033 elif f not in self.dirstate:
1033 1034 self.ui.warn(_("%s not tracked!\n") % f)
1034 1035 else:
1035 1036 self.dirstate.update([f], "r")
1036 1037
1037 1038 def undelete(self, list, wlock=None):
1038 1039 p = self.dirstate.parents()[0]
1039 1040 mn = self.changelog.read(p)[0]
1040 1041 m = self.manifest.read(mn)
1041 1042 if not wlock:
1042 1043 wlock = self.wlock()
1043 1044 for f in list:
1044 1045 if self.dirstate.state(f) not in "r":
1045 1046 self.ui.warn("%s not removed!\n" % f)
1046 1047 else:
1047 1048 t = self.file(f).read(m[f])
1048 1049 self.wwrite(f, t, m.flags(f))
1049 1050 self.dirstate.update([f], "n")
1050 1051
1051 1052 def copy(self, source, dest, wlock=None):
1052 1053 p = self.wjoin(dest)
1053 1054 if not (os.path.exists(p) or os.path.islink(p)):
1054 1055 self.ui.warn(_("%s does not exist!\n") % dest)
1055 1056 elif not (os.path.isfile(p) or os.path.islink(p)):
1056 1057 self.ui.warn(_("copy failed: %s is not a file or a "
1057 1058 "symbolic link\n") % dest)
1058 1059 else:
1059 1060 if not wlock:
1060 1061 wlock = self.wlock()
1061 1062 if self.dirstate.state(dest) == '?':
1062 1063 self.dirstate.update([dest], "a")
1063 1064 self.dirstate.copy(source, dest)
1064 1065
1065 1066 def heads(self, start=None):
1066 1067 heads = self.changelog.heads(start)
1067 1068 # sort the output in rev descending order
1068 1069 heads = [(-self.changelog.rev(h), h) for h in heads]
1069 1070 heads.sort()
1070 1071 return [n for (r, n) in heads]
1071 1072
1072 1073 def branches(self, nodes):
1073 1074 if not nodes:
1074 1075 nodes = [self.changelog.tip()]
1075 1076 b = []
1076 1077 for n in nodes:
1077 1078 t = n
1078 1079 while 1:
1079 1080 p = self.changelog.parents(n)
1080 1081 if p[1] != nullid or p[0] == nullid:
1081 1082 b.append((t, n, p[0], p[1]))
1082 1083 break
1083 1084 n = p[0]
1084 1085 return b
1085 1086
1086 1087 def between(self, pairs):
1087 1088 r = []
1088 1089
1089 1090 for top, bottom in pairs:
1090 1091 n, l, i = top, [], 0
1091 1092 f = 1
1092 1093
1093 1094 while n != bottom:
1094 1095 p = self.changelog.parents(n)[0]
1095 1096 if i == f:
1096 1097 l.append(n)
1097 1098 f = f * 2
1098 1099 n = p
1099 1100 i += 1
1100 1101
1101 1102 r.append(l)
1102 1103
1103 1104 return r
1104 1105
1105 1106 def findincoming(self, remote, base=None, heads=None, force=False):
1106 1107 """Return list of roots of the subsets of missing nodes from remote
1107 1108
1108 1109 If base dict is specified, assume that these nodes and their parents
1109 1110 exist on the remote side and that no child of a node of base exists
1110 1111 in both remote and self.
1111 1112 Furthermore base will be updated to include the nodes that exists
1112 1113 in self and remote but no children exists in self and remote.
1113 1114 If a list of heads is specified, return only nodes which are heads
1114 1115 or ancestors of these heads.
1115 1116
1116 1117 All the ancestors of base are in self and in remote.
1117 1118 All the descendants of the list returned are missing in self.
1118 1119 (and so we know that the rest of the nodes are missing in remote, see
1119 1120 outgoing)
1120 1121 """
1121 1122 m = self.changelog.nodemap
1122 1123 search = []
1123 1124 fetch = {}
1124 1125 seen = {}
1125 1126 seenbranch = {}
1126 1127 if base == None:
1127 1128 base = {}
1128 1129
1129 1130 if not heads:
1130 1131 heads = remote.heads()
1131 1132
1132 1133 if self.changelog.tip() == nullid:
1133 1134 base[nullid] = 1
1134 1135 if heads != [nullid]:
1135 1136 return [nullid]
1136 1137 return []
1137 1138
1138 1139 # assume we're closer to the tip than the root
1139 1140 # and start by examining the heads
1140 1141 self.ui.status(_("searching for changes\n"))
1141 1142
1142 1143 unknown = []
1143 1144 for h in heads:
1144 1145 if h not in m:
1145 1146 unknown.append(h)
1146 1147 else:
1147 1148 base[h] = 1
1148 1149
1149 1150 if not unknown:
1150 1151 return []
1151 1152
1152 1153 req = dict.fromkeys(unknown)
1153 1154 reqcnt = 0
1154 1155
1155 1156 # search through remote branches
1156 1157 # a 'branch' here is a linear segment of history, with four parts:
1157 1158 # head, root, first parent, second parent
1158 1159 # (a branch always has two parents (or none) by definition)
1159 1160 unknown = remote.branches(unknown)
1160 1161 while unknown:
1161 1162 r = []
1162 1163 while unknown:
1163 1164 n = unknown.pop(0)
1164 1165 if n[0] in seen:
1165 1166 continue
1166 1167
1167 1168 self.ui.debug(_("examining %s:%s\n")
1168 1169 % (short(n[0]), short(n[1])))
1169 1170 if n[0] == nullid: # found the end of the branch
1170 1171 pass
1171 1172 elif n in seenbranch:
1172 1173 self.ui.debug(_("branch already found\n"))
1173 1174 continue
1174 1175 elif n[1] and n[1] in m: # do we know the base?
1175 1176 self.ui.debug(_("found incomplete branch %s:%s\n")
1176 1177 % (short(n[0]), short(n[1])))
1177 1178 search.append(n) # schedule branch range for scanning
1178 1179 seenbranch[n] = 1
1179 1180 else:
1180 1181 if n[1] not in seen and n[1] not in fetch:
1181 1182 if n[2] in m and n[3] in m:
1182 1183 self.ui.debug(_("found new changeset %s\n") %
1183 1184 short(n[1]))
1184 1185 fetch[n[1]] = 1 # earliest unknown
1185 1186 for p in n[2:4]:
1186 1187 if p in m:
1187 1188 base[p] = 1 # latest known
1188 1189
1189 1190 for p in n[2:4]:
1190 1191 if p not in req and p not in m:
1191 1192 r.append(p)
1192 1193 req[p] = 1
1193 1194 seen[n[0]] = 1
1194 1195
1195 1196 if r:
1196 1197 reqcnt += 1
1197 1198 self.ui.debug(_("request %d: %s\n") %
1198 1199 (reqcnt, " ".join(map(short, r))))
1199 1200 for p in xrange(0, len(r), 10):
1200 1201 for b in remote.branches(r[p:p+10]):
1201 1202 self.ui.debug(_("received %s:%s\n") %
1202 1203 (short(b[0]), short(b[1])))
1203 1204 unknown.append(b)
1204 1205
1205 1206 # do binary search on the branches we found
1206 1207 while search:
1207 1208 n = search.pop(0)
1208 1209 reqcnt += 1
1209 1210 l = remote.between([(n[0], n[1])])[0]
1210 1211 l.append(n[1])
1211 1212 p = n[0]
1212 1213 f = 1
1213 1214 for i in l:
1214 1215 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1215 1216 if i in m:
1216 1217 if f <= 2:
1217 1218 self.ui.debug(_("found new branch changeset %s\n") %
1218 1219 short(p))
1219 1220 fetch[p] = 1
1220 1221 base[i] = 1
1221 1222 else:
1222 1223 self.ui.debug(_("narrowed branch search to %s:%s\n")
1223 1224 % (short(p), short(i)))
1224 1225 search.append((p, i))
1225 1226 break
1226 1227 p, f = i, f * 2
1227 1228
1228 1229 # sanity check our fetch list
1229 1230 for f in fetch.keys():
1230 1231 if f in m:
1231 1232 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1232 1233
1233 1234 if base.keys() == [nullid]:
1234 1235 if force:
1235 1236 self.ui.warn(_("warning: repository is unrelated\n"))
1236 1237 else:
1237 1238 raise util.Abort(_("repository is unrelated"))
1238 1239
1239 1240 self.ui.debug(_("found new changesets starting at ") +
1240 1241 " ".join([short(f) for f in fetch]) + "\n")
1241 1242
1242 1243 self.ui.debug(_("%d total queries\n") % reqcnt)
1243 1244
1244 1245 return fetch.keys()
1245 1246
1246 1247 def findoutgoing(self, remote, base=None, heads=None, force=False):
1247 1248 """Return list of nodes that are roots of subsets not in remote
1248 1249
1249 1250 If base dict is specified, assume that these nodes and their parents
1250 1251 exist on the remote side.
1251 1252 If a list of heads is specified, return only nodes which are heads
1252 1253 or ancestors of these heads, and return a second element which
1253 1254 contains all remote heads which get new children.
1254 1255 """
1255 1256 if base == None:
1256 1257 base = {}
1257 1258 self.findincoming(remote, base, heads, force=force)
1258 1259
1259 1260 self.ui.debug(_("common changesets up to ")
1260 1261 + " ".join(map(short, base.keys())) + "\n")
1261 1262
1262 1263 remain = dict.fromkeys(self.changelog.nodemap)
1263 1264
1264 1265 # prune everything remote has from the tree
1265 1266 del remain[nullid]
1266 1267 remove = base.keys()
1267 1268 while remove:
1268 1269 n = remove.pop(0)
1269 1270 if n in remain:
1270 1271 del remain[n]
1271 1272 for p in self.changelog.parents(n):
1272 1273 remove.append(p)
1273 1274
1274 1275 # find every node whose parents have been pruned
1275 1276 subset = []
1276 1277 # find every remote head that will get new children
1277 1278 updated_heads = {}
1278 1279 for n in remain:
1279 1280 p1, p2 = self.changelog.parents(n)
1280 1281 if p1 not in remain and p2 not in remain:
1281 1282 subset.append(n)
1282 1283 if heads:
1283 1284 if p1 in heads:
1284 1285 updated_heads[p1] = True
1285 1286 if p2 in heads:
1286 1287 updated_heads[p2] = True
1287 1288
1288 1289 # this is the set of all roots we have to push
1289 1290 if heads:
1290 1291 return subset, updated_heads.keys()
1291 1292 else:
1292 1293 return subset
1293 1294
1294 1295 def pull(self, remote, heads=None, force=False, lock=None):
1295 1296 mylock = False
1296 1297 if not lock:
1297 1298 lock = self.lock()
1298 1299 mylock = True
1299 1300
1300 1301 try:
1301 1302 fetch = self.findincoming(remote, force=force)
1302 1303 if fetch == [nullid]:
1303 1304 self.ui.status(_("requesting all changes\n"))
1304 1305
1305 1306 if not fetch:
1306 1307 self.ui.status(_("no changes found\n"))
1307 1308 return 0
1308 1309
1309 1310 if heads is None:
1310 1311 cg = remote.changegroup(fetch, 'pull')
1311 1312 else:
1312 1313 if 'changegroupsubset' not in remote.capabilities:
1313 1314 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1314 1315 cg = remote.changegroupsubset(fetch, heads, 'pull')
1315 1316 return self.addchangegroup(cg, 'pull', remote.url())
1316 1317 finally:
1317 1318 if mylock:
1318 1319 lock.release()
1319 1320
1320 1321 def push(self, remote, force=False, revs=None):
1321 1322 # there are two ways to push to remote repo:
1322 1323 #
1323 1324 # addchangegroup assumes local user can lock remote
1324 1325 # repo (local filesystem, old ssh servers).
1325 1326 #
1326 1327 # unbundle assumes local user cannot lock remote repo (new ssh
1327 1328 # servers, http servers).
1328 1329
1329 1330 if remote.capable('unbundle'):
1330 1331 return self.push_unbundle(remote, force, revs)
1331 1332 return self.push_addchangegroup(remote, force, revs)
1332 1333
1333 1334 def prepush(self, remote, force, revs):
1334 1335 base = {}
1335 1336 remote_heads = remote.heads()
1336 1337 inc = self.findincoming(remote, base, remote_heads, force=force)
1337 1338
1338 1339 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1339 1340 if revs is not None:
1340 1341 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1341 1342 else:
1342 1343 bases, heads = update, self.changelog.heads()
1343 1344
1344 1345 if not bases:
1345 1346 self.ui.status(_("no changes found\n"))
1346 1347 return None, 1
1347 1348 elif not force:
1348 1349 # check if we're creating new remote heads
1349 1350 # to be a remote head after push, node must be either
1350 1351 # - unknown locally
1351 1352 # - a local outgoing head descended from update
1352 1353 # - a remote head that's known locally and not
1353 1354 # ancestral to an outgoing head
1354 1355
1355 1356 warn = 0
1356 1357
1357 1358 if remote_heads == [nullid]:
1358 1359 warn = 0
1359 1360 elif not revs and len(heads) > len(remote_heads):
1360 1361 warn = 1
1361 1362 else:
1362 1363 newheads = list(heads)
1363 1364 for r in remote_heads:
1364 1365 if r in self.changelog.nodemap:
1365 1366 desc = self.changelog.heads(r, heads)
1366 1367 l = [h for h in heads if h in desc]
1367 1368 if not l:
1368 1369 newheads.append(r)
1369 1370 else:
1370 1371 newheads.append(r)
1371 1372 if len(newheads) > len(remote_heads):
1372 1373 warn = 1
1373 1374
1374 1375 if warn:
1375 1376 self.ui.warn(_("abort: push creates new remote branches!\n"))
1376 1377 self.ui.status(_("(did you forget to merge?"
1377 1378 " use push -f to force)\n"))
1378 1379 return None, 1
1379 1380 elif inc:
1380 1381 self.ui.warn(_("note: unsynced remote changes!\n"))
1381 1382
1382 1383
1383 1384 if revs is None:
1384 1385 cg = self.changegroup(update, 'push')
1385 1386 else:
1386 1387 cg = self.changegroupsubset(update, revs, 'push')
1387 1388 return cg, remote_heads
1388 1389
1389 1390 def push_addchangegroup(self, remote, force, revs):
1390 1391 lock = remote.lock()
1391 1392
1392 1393 ret = self.prepush(remote, force, revs)
1393 1394 if ret[0] is not None:
1394 1395 cg, remote_heads = ret
1395 1396 return remote.addchangegroup(cg, 'push', self.url())
1396 1397 return ret[1]
1397 1398
1398 1399 def push_unbundle(self, remote, force, revs):
1399 1400 # local repo finds heads on server, finds out what revs it
1400 1401 # must push. once revs transferred, if server finds it has
1401 1402 # different heads (someone else won commit/push race), server
1402 1403 # aborts.
1403 1404
1404 1405 ret = self.prepush(remote, force, revs)
1405 1406 if ret[0] is not None:
1406 1407 cg, remote_heads = ret
1407 1408 if force: remote_heads = ['force']
1408 1409 return remote.unbundle(cg, remote_heads, 'push')
1409 1410 return ret[1]
1410 1411
1411 1412 def changegroupinfo(self, nodes):
1412 1413 self.ui.note(_("%d changesets found\n") % len(nodes))
1413 1414 if self.ui.debugflag:
1414 1415 self.ui.debug(_("List of changesets:\n"))
1415 1416 for node in nodes:
1416 1417 self.ui.debug("%s\n" % hex(node))
1417 1418
1418 1419 def changegroupsubset(self, bases, heads, source):
1419 1420 """This function generates a changegroup consisting of all the nodes
1420 1421 that are descendents of any of the bases, and ancestors of any of
1421 1422 the heads.
1422 1423
1423 1424 It is fairly complex as determining which filenodes and which
1424 1425 manifest nodes need to be included for the changeset to be complete
1425 1426 is non-trivial.
1426 1427
1427 1428 Another wrinkle is doing the reverse, figuring out which changeset in
1428 1429 the changegroup a particular filenode or manifestnode belongs to."""
1429 1430
1430 1431 self.hook('preoutgoing', throw=True, source=source)
1431 1432
1432 1433 # Set up some initial variables
1433 1434 # Make it easy to refer to self.changelog
1434 1435 cl = self.changelog
1435 1436 # msng is short for missing - compute the list of changesets in this
1436 1437 # changegroup.
1437 1438 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1438 1439 self.changegroupinfo(msng_cl_lst)
1439 1440 # Some bases may turn out to be superfluous, and some heads may be
1440 1441 # too. nodesbetween will return the minimal set of bases and heads
1441 1442 # necessary to re-create the changegroup.
1442 1443
1443 1444 # Known heads are the list of heads that it is assumed the recipient
1444 1445 # of this changegroup will know about.
1445 1446 knownheads = {}
1446 1447 # We assume that all parents of bases are known heads.
1447 1448 for n in bases:
1448 1449 for p in cl.parents(n):
1449 1450 if p != nullid:
1450 1451 knownheads[p] = 1
1451 1452 knownheads = knownheads.keys()
1452 1453 if knownheads:
1453 1454 # Now that we know what heads are known, we can compute which
1454 1455 # changesets are known. The recipient must know about all
1455 1456 # changesets required to reach the known heads from the null
1456 1457 # changeset.
1457 1458 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1458 1459 junk = None
1459 1460 # Transform the list into an ersatz set.
1460 1461 has_cl_set = dict.fromkeys(has_cl_set)
1461 1462 else:
1462 1463 # If there were no known heads, the recipient cannot be assumed to
1463 1464 # know about any changesets.
1464 1465 has_cl_set = {}
1465 1466
1466 1467 # Make it easy to refer to self.manifest
1467 1468 mnfst = self.manifest
1468 1469 # We don't know which manifests are missing yet
1469 1470 msng_mnfst_set = {}
1470 1471 # Nor do we know which filenodes are missing.
1471 1472 msng_filenode_set = {}
1472 1473
1473 1474 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1474 1475 junk = None
1475 1476
1476 1477 # A changeset always belongs to itself, so the changenode lookup
1477 1478 # function for a changenode is identity.
1478 1479 def identity(x):
1479 1480 return x
1480 1481
1481 1482 # A function generating function. Sets up an environment for the
1482 1483 # inner function.
1483 1484 def cmp_by_rev_func(revlog):
1484 1485 # Compare two nodes by their revision number in the environment's
1485 1486 # revision history. Since the revision number both represents the
1486 1487 # most efficient order to read the nodes in, and represents a
1487 1488 # topological sorting of the nodes, this function is often useful.
1488 1489 def cmp_by_rev(a, b):
1489 1490 return cmp(revlog.rev(a), revlog.rev(b))
1490 1491 return cmp_by_rev
1491 1492
1492 1493 # If we determine that a particular file or manifest node must be a
1493 1494 # node that the recipient of the changegroup will already have, we can
1494 1495 # also assume the recipient will have all the parents. This function
1495 1496 # prunes them from the set of missing nodes.
1496 1497 def prune_parents(revlog, hasset, msngset):
1497 1498 haslst = hasset.keys()
1498 1499 haslst.sort(cmp_by_rev_func(revlog))
1499 1500 for node in haslst:
1500 1501 parentlst = [p for p in revlog.parents(node) if p != nullid]
1501 1502 while parentlst:
1502 1503 n = parentlst.pop()
1503 1504 if n not in hasset:
1504 1505 hasset[n] = 1
1505 1506 p = [p for p in revlog.parents(n) if p != nullid]
1506 1507 parentlst.extend(p)
1507 1508 for n in hasset:
1508 1509 msngset.pop(n, None)
1509 1510
1510 1511 # This is a function generating function used to set up an environment
1511 1512 # for the inner function to execute in.
1512 1513 def manifest_and_file_collector(changedfileset):
1513 1514 # This is an information gathering function that gathers
1514 1515 # information from each changeset node that goes out as part of
1515 1516 # the changegroup. The information gathered is a list of which
1516 1517 # manifest nodes are potentially required (the recipient may
1517 1518 # already have them) and total list of all files which were
1518 1519 # changed in any changeset in the changegroup.
1519 1520 #
1520 1521 # We also remember the first changenode we saw any manifest
1521 1522 # referenced by so we can later determine which changenode 'owns'
1522 1523 # the manifest.
1523 1524 def collect_manifests_and_files(clnode):
1524 1525 c = cl.read(clnode)
1525 1526 for f in c[3]:
1526 1527 # This is to make sure we only have one instance of each
1527 1528 # filename string for each filename.
1528 1529 changedfileset.setdefault(f, f)
1529 1530 msng_mnfst_set.setdefault(c[0], clnode)
1530 1531 return collect_manifests_and_files
1531 1532
1532 1533 # Figure out which manifest nodes (of the ones we think might be part
1533 1534 # of the changegroup) the recipient must know about and remove them
1534 1535 # from the changegroup.
1535 1536 def prune_manifests():
1536 1537 has_mnfst_set = {}
1537 1538 for n in msng_mnfst_set:
1538 1539 # If a 'missing' manifest thinks it belongs to a changenode
1539 1540 # the recipient is assumed to have, obviously the recipient
1540 1541 # must have that manifest.
1541 1542 linknode = cl.node(mnfst.linkrev(n))
1542 1543 if linknode in has_cl_set:
1543 1544 has_mnfst_set[n] = 1
1544 1545 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1545 1546
1546 1547 # Use the information collected in collect_manifests_and_files to say
1547 1548 # which changenode any manifestnode belongs to.
1548 1549 def lookup_manifest_link(mnfstnode):
1549 1550 return msng_mnfst_set[mnfstnode]
1550 1551
1551 1552 # A function generating function that sets up the initial environment
1552 1553 # the inner function.
1553 1554 def filenode_collector(changedfiles):
1554 1555 next_rev = [0]
1555 1556 # This gathers information from each manifestnode included in the
1556 1557 # changegroup about which filenodes the manifest node references
1557 1558 # so we can include those in the changegroup too.
1558 1559 #
1559 1560 # It also remembers which changenode each filenode belongs to. It
1560 1561 # does this by assuming the a filenode belongs to the changenode
1561 1562 # the first manifest that references it belongs to.
1562 1563 def collect_msng_filenodes(mnfstnode):
1563 1564 r = mnfst.rev(mnfstnode)
1564 1565 if r == next_rev[0]:
1565 1566 # If the last rev we looked at was the one just previous,
1566 1567 # we only need to see a diff.
1567 1568 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1568 1569 # For each line in the delta
1569 1570 for dline in delta.splitlines():
1570 1571 # get the filename and filenode for that line
1571 1572 f, fnode = dline.split('\0')
1572 1573 fnode = bin(fnode[:40])
1573 1574 f = changedfiles.get(f, None)
1574 1575 # And if the file is in the list of files we care
1575 1576 # about.
1576 1577 if f is not None:
1577 1578 # Get the changenode this manifest belongs to
1578 1579 clnode = msng_mnfst_set[mnfstnode]
1579 1580 # Create the set of filenodes for the file if
1580 1581 # there isn't one already.
1581 1582 ndset = msng_filenode_set.setdefault(f, {})
1582 1583 # And set the filenode's changelog node to the
1583 1584 # manifest's if it hasn't been set already.
1584 1585 ndset.setdefault(fnode, clnode)
1585 1586 else:
1586 1587 # Otherwise we need a full manifest.
1587 1588 m = mnfst.read(mnfstnode)
1588 1589 # For every file in we care about.
1589 1590 for f in changedfiles:
1590 1591 fnode = m.get(f, None)
1591 1592 # If it's in the manifest
1592 1593 if fnode is not None:
1593 1594 # See comments above.
1594 1595 clnode = msng_mnfst_set[mnfstnode]
1595 1596 ndset = msng_filenode_set.setdefault(f, {})
1596 1597 ndset.setdefault(fnode, clnode)
1597 1598 # Remember the revision we hope to see next.
1598 1599 next_rev[0] = r + 1
1599 1600 return collect_msng_filenodes
1600 1601
1601 1602 # We have a list of filenodes we think we need for a file, lets remove
1602 1603 # all those we now the recipient must have.
1603 1604 def prune_filenodes(f, filerevlog):
1604 1605 msngset = msng_filenode_set[f]
1605 1606 hasset = {}
1606 1607 # If a 'missing' filenode thinks it belongs to a changenode we
1607 1608 # assume the recipient must have, then the recipient must have
1608 1609 # that filenode.
1609 1610 for n in msngset:
1610 1611 clnode = cl.node(filerevlog.linkrev(n))
1611 1612 if clnode in has_cl_set:
1612 1613 hasset[n] = 1
1613 1614 prune_parents(filerevlog, hasset, msngset)
1614 1615
1615 1616 # A function generator function that sets up the a context for the
1616 1617 # inner function.
1617 1618 def lookup_filenode_link_func(fname):
1618 1619 msngset = msng_filenode_set[fname]
1619 1620 # Lookup the changenode the filenode belongs to.
1620 1621 def lookup_filenode_link(fnode):
1621 1622 return msngset[fnode]
1622 1623 return lookup_filenode_link
1623 1624
1624 1625 # Now that we have all theses utility functions to help out and
1625 1626 # logically divide up the task, generate the group.
1626 1627 def gengroup():
1627 1628 # The set of changed files starts empty.
1628 1629 changedfiles = {}
1629 1630 # Create a changenode group generator that will call our functions
1630 1631 # back to lookup the owning changenode and collect information.
1631 1632 group = cl.group(msng_cl_lst, identity,
1632 1633 manifest_and_file_collector(changedfiles))
1633 1634 for chnk in group:
1634 1635 yield chnk
1635 1636
1636 1637 # The list of manifests has been collected by the generator
1637 1638 # calling our functions back.
1638 1639 prune_manifests()
1639 1640 msng_mnfst_lst = msng_mnfst_set.keys()
1640 1641 # Sort the manifestnodes by revision number.
1641 1642 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1642 1643 # Create a generator for the manifestnodes that calls our lookup
1643 1644 # and data collection functions back.
1644 1645 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1645 1646 filenode_collector(changedfiles))
1646 1647 for chnk in group:
1647 1648 yield chnk
1648 1649
1649 1650 # These are no longer needed, dereference and toss the memory for
1650 1651 # them.
1651 1652 msng_mnfst_lst = None
1652 1653 msng_mnfst_set.clear()
1653 1654
1654 1655 changedfiles = changedfiles.keys()
1655 1656 changedfiles.sort()
1656 1657 # Go through all our files in order sorted by name.
1657 1658 for fname in changedfiles:
1658 1659 filerevlog = self.file(fname)
1659 1660 # Toss out the filenodes that the recipient isn't really
1660 1661 # missing.
1661 1662 if msng_filenode_set.has_key(fname):
1662 1663 prune_filenodes(fname, filerevlog)
1663 1664 msng_filenode_lst = msng_filenode_set[fname].keys()
1664 1665 else:
1665 1666 msng_filenode_lst = []
1666 1667 # If any filenodes are left, generate the group for them,
1667 1668 # otherwise don't bother.
1668 1669 if len(msng_filenode_lst) > 0:
1669 1670 yield changegroup.genchunk(fname)
1670 1671 # Sort the filenodes by their revision #
1671 1672 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1672 1673 # Create a group generator and only pass in a changenode
1673 1674 # lookup function as we need to collect no information
1674 1675 # from filenodes.
1675 1676 group = filerevlog.group(msng_filenode_lst,
1676 1677 lookup_filenode_link_func(fname))
1677 1678 for chnk in group:
1678 1679 yield chnk
1679 1680 if msng_filenode_set.has_key(fname):
1680 1681 # Don't need this anymore, toss it to free memory.
1681 1682 del msng_filenode_set[fname]
1682 1683 # Signal that no more groups are left.
1683 1684 yield changegroup.closechunk()
1684 1685
1685 1686 if msng_cl_lst:
1686 1687 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1687 1688
1688 1689 return util.chunkbuffer(gengroup())
1689 1690
1690 1691 def changegroup(self, basenodes, source):
1691 1692 """Generate a changegroup of all nodes that we have that a recipient
1692 1693 doesn't.
1693 1694
1694 1695 This is much easier than the previous function as we can assume that
1695 1696 the recipient has any changenode we aren't sending them."""
1696 1697
1697 1698 self.hook('preoutgoing', throw=True, source=source)
1698 1699
1699 1700 cl = self.changelog
1700 1701 nodes = cl.nodesbetween(basenodes, None)[0]
1701 1702 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1702 1703 self.changegroupinfo(nodes)
1703 1704
1704 1705 def identity(x):
1705 1706 return x
1706 1707
1707 1708 def gennodelst(revlog):
1708 1709 for r in xrange(0, revlog.count()):
1709 1710 n = revlog.node(r)
1710 1711 if revlog.linkrev(n) in revset:
1711 1712 yield n
1712 1713
1713 1714 def changed_file_collector(changedfileset):
1714 1715 def collect_changed_files(clnode):
1715 1716 c = cl.read(clnode)
1716 1717 for fname in c[3]:
1717 1718 changedfileset[fname] = 1
1718 1719 return collect_changed_files
1719 1720
1720 1721 def lookuprevlink_func(revlog):
1721 1722 def lookuprevlink(n):
1722 1723 return cl.node(revlog.linkrev(n))
1723 1724 return lookuprevlink
1724 1725
1725 1726 def gengroup():
1726 1727 # construct a list of all changed files
1727 1728 changedfiles = {}
1728 1729
1729 1730 for chnk in cl.group(nodes, identity,
1730 1731 changed_file_collector(changedfiles)):
1731 1732 yield chnk
1732 1733 changedfiles = changedfiles.keys()
1733 1734 changedfiles.sort()
1734 1735
1735 1736 mnfst = self.manifest
1736 1737 nodeiter = gennodelst(mnfst)
1737 1738 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1738 1739 yield chnk
1739 1740
1740 1741 for fname in changedfiles:
1741 1742 filerevlog = self.file(fname)
1742 1743 nodeiter = gennodelst(filerevlog)
1743 1744 nodeiter = list(nodeiter)
1744 1745 if nodeiter:
1745 1746 yield changegroup.genchunk(fname)
1746 1747 lookup = lookuprevlink_func(filerevlog)
1747 1748 for chnk in filerevlog.group(nodeiter, lookup):
1748 1749 yield chnk
1749 1750
1750 1751 yield changegroup.closechunk()
1751 1752
1752 1753 if nodes:
1753 1754 self.hook('outgoing', node=hex(nodes[0]), source=source)
1754 1755
1755 1756 return util.chunkbuffer(gengroup())
1756 1757
1757 1758 def addchangegroup(self, source, srctype, url):
1758 1759 """add changegroup to repo.
1759 1760
1760 1761 return values:
1761 1762 - nothing changed or no source: 0
1762 1763 - more heads than before: 1+added heads (2..n)
1763 1764 - less heads than before: -1-removed heads (-2..-n)
1764 1765 - number of heads stays the same: 1
1765 1766 """
1766 1767 def csmap(x):
1767 1768 self.ui.debug(_("add changeset %s\n") % short(x))
1768 1769 return cl.count()
1769 1770
1770 1771 def revmap(x):
1771 1772 return cl.rev(x)
1772 1773
1773 1774 if not source:
1774 1775 return 0
1775 1776
1776 1777 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1777 1778
1778 1779 changesets = files = revisions = 0
1779 1780
1780 1781 tr = self.transaction()
1781 1782
1782 1783 # write changelog data to temp files so concurrent readers will not see
1783 1784 # inconsistent view
1784 1785 cl = self.changelog
1785 1786 cl.delayupdate()
1786 1787 oldheads = len(cl.heads())
1787 1788
1788 1789 # pull off the changeset group
1789 1790 self.ui.status(_("adding changesets\n"))
1790 1791 cor = cl.count() - 1
1791 1792 chunkiter = changegroup.chunkiter(source)
1792 1793 if cl.addgroup(chunkiter, csmap, tr, 1) is None:
1793 1794 raise util.Abort(_("received changelog group is empty"))
1794 1795 cnr = cl.count() - 1
1795 1796 changesets = cnr - cor
1796 1797
1797 1798 # pull off the manifest group
1798 1799 self.ui.status(_("adding manifests\n"))
1799 1800 chunkiter = changegroup.chunkiter(source)
1800 1801 # no need to check for empty manifest group here:
1801 1802 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1802 1803 # no new manifest will be created and the manifest group will
1803 1804 # be empty during the pull
1804 1805 self.manifest.addgroup(chunkiter, revmap, tr)
1805 1806
1806 1807 # process the files
1807 1808 self.ui.status(_("adding file changes\n"))
1808 1809 while 1:
1809 1810 f = changegroup.getchunk(source)
1810 1811 if not f:
1811 1812 break
1812 1813 self.ui.debug(_("adding %s revisions\n") % f)
1813 1814 fl = self.file(f)
1814 1815 o = fl.count()
1815 1816 chunkiter = changegroup.chunkiter(source)
1816 1817 if fl.addgroup(chunkiter, revmap, tr) is None:
1817 1818 raise util.Abort(_("received file revlog group is empty"))
1818 1819 revisions += fl.count() - o
1819 1820 files += 1
1820 1821
1821 1822 # make changelog see real files again
1822 1823 cl.finalize(tr)
1823 1824
1824 1825 newheads = len(self.changelog.heads())
1825 1826 heads = ""
1826 1827 if oldheads and newheads != oldheads:
1827 1828 heads = _(" (%+d heads)") % (newheads - oldheads)
1828 1829
1829 1830 self.ui.status(_("added %d changesets"
1830 1831 " with %d changes to %d files%s\n")
1831 1832 % (changesets, revisions, files, heads))
1832 1833
1833 1834 if changesets > 0:
1834 1835 self.hook('pretxnchangegroup', throw=True,
1835 1836 node=hex(self.changelog.node(cor+1)), source=srctype,
1836 1837 url=url)
1837 1838
1838 1839 tr.close()
1839 1840
1840 1841 if changesets > 0:
1841 1842 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
1842 1843 source=srctype, url=url)
1843 1844
1844 1845 for i in xrange(cor + 1, cnr + 1):
1845 1846 self.hook("incoming", node=hex(self.changelog.node(i)),
1846 1847 source=srctype, url=url)
1847 1848
1848 1849 # never return 0 here:
1849 1850 if newheads < oldheads:
1850 1851 return newheads - oldheads - 1
1851 1852 else:
1852 1853 return newheads - oldheads + 1
1853 1854
1854 1855
1855 1856 def stream_in(self, remote):
1856 1857 fp = remote.stream_out()
1857 1858 l = fp.readline()
1858 1859 try:
1859 1860 resp = int(l)
1860 1861 except ValueError:
1861 1862 raise util.UnexpectedOutput(
1862 1863 _('Unexpected response from remote server:'), l)
1863 1864 if resp == 1:
1864 1865 raise util.Abort(_('operation forbidden by server'))
1865 1866 elif resp == 2:
1866 1867 raise util.Abort(_('locking the remote repository failed'))
1867 1868 elif resp != 0:
1868 1869 raise util.Abort(_('the server sent an unknown error code'))
1869 1870 self.ui.status(_('streaming all changes\n'))
1870 1871 l = fp.readline()
1871 1872 try:
1872 1873 total_files, total_bytes = map(int, l.split(' ', 1))
1873 1874 except ValueError, TypeError:
1874 1875 raise util.UnexpectedOutput(
1875 1876 _('Unexpected response from remote server:'), l)
1876 1877 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 1878 (total_files, util.bytecount(total_bytes)))
1878 1879 start = time.time()
1879 1880 for i in xrange(total_files):
1880 1881 # XXX doesn't support '\n' or '\r' in filenames
1881 1882 l = fp.readline()
1882 1883 try:
1883 1884 name, size = l.split('\0', 1)
1884 1885 size = int(size)
1885 1886 except ValueError, TypeError:
1886 1887 raise util.UnexpectedOutput(
1887 1888 _('Unexpected response from remote server:'), l)
1888 1889 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 1890 ofp = self.sopener(name, 'w')
1890 1891 for chunk in util.filechunkiter(fp, limit=size):
1891 1892 ofp.write(chunk)
1892 1893 ofp.close()
1893 1894 elapsed = time.time() - start
1894 1895 if elapsed <= 0:
1895 1896 elapsed = 0.001
1896 1897 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1897 1898 (util.bytecount(total_bytes), elapsed,
1898 1899 util.bytecount(total_bytes / elapsed)))
1899 1900 self.reload()
1900 1901 return len(self.heads()) + 1
1901 1902
1902 1903 def clone(self, remote, heads=[], stream=False):
1903 1904 '''clone remote repository.
1904 1905
1905 1906 keyword arguments:
1906 1907 heads: list of revs to clone (forces use of pull)
1907 1908 stream: use streaming clone if possible'''
1908 1909
1909 1910 # now, all clients that can request uncompressed clones can
1910 1911 # read repo formats supported by all servers that can serve
1911 1912 # them.
1912 1913
1913 1914 # if revlog format changes, client will have to check version
1914 1915 # and format flags on "stream" capability, and use
1915 1916 # uncompressed only if compatible.
1916 1917
1917 1918 if stream and not heads and remote.capable('stream'):
1918 1919 return self.stream_in(remote)
1919 1920 return self.pull(remote, heads)
1920 1921
1921 1922 # used to avoid circular references so destructors work
1922 1923 def aftertrans(files):
1923 1924 renamefiles = [tuple(t) for t in files]
1924 1925 def a():
1925 1926 for src, dest in renamefiles:
1926 1927 util.rename(src, dest)
1927 1928 return a
1928 1929
1929 1930 def instance(ui, path, create):
1930 1931 return localrepository(ui, util.drop_scheme('file', path), create)
1931 1932
1932 1933 def islocal(path):
1933 1934 return True
@@ -1,290 +1,290
1 1 # templater.py - template expansion for output
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 from i18n import _
9 9 from node import *
10 10 import cgi, re, sys, os, time, urllib, util, textwrap
11 11
12 12 def parsestring(s, quoted=True):
13 13 '''parse a string using simple c-like syntax.
14 14 string must be in quotes if quoted is True.'''
15 15 if quoted:
16 16 if len(s) < 2 or s[0] != s[-1]:
17 17 raise SyntaxError(_('unmatched quotes'))
18 18 return s[1:-1].decode('string_escape')
19 19
20 20 return s.decode('string_escape')
21 21
22 22 class templater(object):
23 23 '''template expansion engine.
24 24
25 25 template expansion works like this. a map file contains key=value
26 26 pairs. if value is quoted, it is treated as string. otherwise, it
27 27 is treated as name of template file.
28 28
29 29 templater is asked to expand a key in map. it looks up key, and
30 looks for atrings like this: {foo}. it expands {foo} by looking up
30 looks for strings like this: {foo}. it expands {foo} by looking up
31 31 foo in map, and substituting it. expansion is recursive: it stops
32 32 when there is no more {foo} to replace.
33 33
34 34 expansion also allows formatting and filtering.
35 35
36 36 format uses key to expand each item in list. syntax is
37 37 {key%format}.
38 38
39 39 filter uses function to transform value. syntax is
40 40 {key|filter1|filter2|...}.'''
41 41
42 42 template_re = re.compile(r"(?:(?:#(?=[\w\|%]+#))|(?:{(?=[\w\|%]+})))"
43 43 r"(\w+)(?:(?:%(\w+))|((?:\|\w+)*))[#}]")
44 44
45 45 def __init__(self, mapfile, filters={}, defaults={}, cache={}):
46 46 '''set up template engine.
47 47 mapfile is name of file to read map definitions from.
48 48 filters is dict of functions. each transforms a value into another.
49 49 defaults is dict of default map definitions.'''
50 50 self.mapfile = mapfile or 'template'
51 51 self.cache = cache.copy()
52 52 self.map = {}
53 53 self.base = (mapfile and os.path.dirname(mapfile)) or ''
54 54 self.filters = filters
55 55 self.defaults = defaults
56 56
57 57 if not mapfile:
58 58 return
59 59 i = 0
60 60 for l in file(mapfile):
61 61 l = l.strip()
62 62 i += 1
63 63 if not l or l[0] in '#;': continue
64 64 m = re.match(r'([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.+)$', l)
65 65 if m:
66 66 key, val = m.groups()
67 67 if val[0] in "'\"":
68 68 try:
69 69 self.cache[key] = parsestring(val)
70 70 except SyntaxError, inst:
71 71 raise SyntaxError('%s:%s: %s' %
72 72 (mapfile, i, inst.args[0]))
73 73 else:
74 74 self.map[key] = os.path.join(self.base, val)
75 75 else:
76 76 raise SyntaxError(_("%s:%s: parse error") % (mapfile, i))
77 77
78 78 def __contains__(self, key):
79 79 return key in self.cache or key in self.map
80 80
81 81 def __call__(self, t, **map):
82 82 '''perform expansion.
83 83 t is name of map element to expand.
84 84 map is added elements to use during expansion.'''
85 85 if not self.cache.has_key(t):
86 86 try:
87 87 self.cache[t] = file(self.map[t]).read()
88 88 except IOError, inst:
89 89 raise IOError(inst.args[0], _('template file %s: %s') %
90 90 (self.map[t], inst.args[1]))
91 91 tmpl = self.cache[t]
92 92
93 93 while tmpl:
94 94 m = self.template_re.search(tmpl)
95 95 if not m:
96 96 yield tmpl
97 97 break
98 98
99 99 start, end = m.span(0)
100 100 key, format, fl = m.groups()
101 101
102 102 if start:
103 103 yield tmpl[:start]
104 104 tmpl = tmpl[end:]
105 105
106 106 if key in map:
107 107 v = map[key]
108 108 else:
109 109 v = self.defaults.get(key, "")
110 110 if callable(v):
111 111 v = v(**map)
112 112 if format:
113 113 if not hasattr(v, '__iter__'):
114 114 raise SyntaxError(_("Error expanding '%s%s'")
115 115 % (key, format))
116 116 lm = map.copy()
117 117 for i in v:
118 118 lm.update(i)
119 119 yield self(format, **lm)
120 120 else:
121 121 if fl:
122 122 for f in fl.split("|")[1:]:
123 123 v = self.filters[f](v)
124 124 yield v
125 125
126 126 agescales = [("second", 1),
127 127 ("minute", 60),
128 128 ("hour", 3600),
129 129 ("day", 3600 * 24),
130 130 ("week", 3600 * 24 * 7),
131 131 ("month", 3600 * 24 * 30),
132 132 ("year", 3600 * 24 * 365)]
133 133
134 134 agescales.reverse()
135 135
136 136 def age(date):
137 137 '''turn a (timestamp, tzoff) tuple into an age string.'''
138 138
139 139 def plural(t, c):
140 140 if c == 1:
141 141 return t
142 142 return t + "s"
143 143 def fmt(t, c):
144 144 return "%d %s" % (c, plural(t, c))
145 145
146 146 now = time.time()
147 147 then = date[0]
148 148 delta = max(1, int(now - then))
149 149
150 150 for t, s in agescales:
151 151 n = delta / s
152 152 if n >= 2 or s == 1:
153 153 return fmt(t, n)
154 154
155 155 def stringify(thing):
156 156 '''turn nested template iterator into string.'''
157 157 if hasattr(thing, '__iter__'):
158 158 return "".join([stringify(t) for t in thing if t is not None])
159 159 return str(thing)
160 160
161 161 para_re = None
162 162 space_re = None
163 163
164 164 def fill(text, width):
165 165 '''fill many paragraphs.'''
166 166 global para_re, space_re
167 167 if para_re is None:
168 168 para_re = re.compile('(\n\n|\n\\s*[-*]\\s*)', re.M)
169 169 space_re = re.compile(r' +')
170 170
171 171 def findparas():
172 172 start = 0
173 173 while True:
174 174 m = para_re.search(text, start)
175 175 if not m:
176 176 w = len(text)
177 177 while w > start and text[w-1].isspace(): w -= 1
178 178 yield text[start:w], text[w:]
179 179 break
180 180 yield text[start:m.start(0)], m.group(1)
181 181 start = m.end(1)
182 182
183 183 return "".join([space_re.sub(' ', textwrap.fill(para, width)) + rest
184 184 for para, rest in findparas()])
185 185
186 186 def firstline(text):
187 187 '''return the first line of text'''
188 188 try:
189 189 return text.splitlines(1)[0].rstrip('\r\n')
190 190 except IndexError:
191 191 return ''
192 192
193 193 def isodate(date):
194 194 '''turn a (timestamp, tzoff) tuple into an iso 8631 date and time.'''
195 195 return util.datestr(date, format='%Y-%m-%d %H:%M')
196 196
197 197 def hgdate(date):
198 198 '''turn a (timestamp, tzoff) tuple into an hg cset timestamp.'''
199 199 return "%d %d" % date
200 200
201 201 def nl2br(text):
202 202 '''replace raw newlines with xhtml line breaks.'''
203 203 return text.replace('\n', '<br/>\n')
204 204
205 205 def obfuscate(text):
206 206 text = unicode(text, util._encoding, 'replace')
207 207 return ''.join(['&#%d;' % ord(c) for c in text])
208 208
209 209 def domain(author):
210 210 '''get domain of author, or empty string if none.'''
211 211 f = author.find('@')
212 212 if f == -1: return ''
213 213 author = author[f+1:]
214 214 f = author.find('>')
215 215 if f >= 0: author = author[:f]
216 216 return author
217 217
218 218 def email(author):
219 219 '''get email of author.'''
220 220 r = author.find('>')
221 221 if r == -1: r = None
222 222 return author[author.find('<')+1:r]
223 223
224 224 def person(author):
225 225 '''get name of author, or else username.'''
226 226 f = author.find('<')
227 227 if f == -1: return util.shortuser(author)
228 228 return author[:f].rstrip()
229 229
230 230 def shortdate(date):
231 231 '''turn (timestamp, tzoff) tuple into iso 8631 date.'''
232 232 return util.datestr(date, format='%Y-%m-%d', timezone=False)
233 233
234 234 def indent(text, prefix):
235 235 '''indent each non-empty line of text after first with prefix.'''
236 236 lines = text.splitlines()
237 237 num_lines = len(lines)
238 238 def indenter():
239 239 for i in xrange(num_lines):
240 240 l = lines[i]
241 241 if i and l.strip():
242 242 yield prefix
243 243 yield l
244 244 if i < num_lines - 1 or text.endswith('\n'):
245 245 yield '\n'
246 246 return "".join(indenter())
247 247
248 248 common_filters = {
249 249 "addbreaks": nl2br,
250 250 "basename": os.path.basename,
251 251 "age": age,
252 252 "date": lambda x: util.datestr(x),
253 253 "domain": domain,
254 254 "email": email,
255 255 "escape": lambda x: cgi.escape(x, True),
256 256 "fill68": lambda x: fill(x, width=68),
257 257 "fill76": lambda x: fill(x, width=76),
258 258 "firstline": firstline,
259 259 "tabindent": lambda x: indent(x, '\t'),
260 260 "hgdate": hgdate,
261 261 "isodate": isodate,
262 262 "obfuscate": obfuscate,
263 263 "permissions": lambda x: x and "-rwxr-xr-x" or "-rw-r--r--",
264 264 "person": person,
265 265 "rfc822date": lambda x: util.datestr(x, "%a, %d %b %Y %H:%M:%S"),
266 266 "short": lambda x: x[:12],
267 267 "shortdate": shortdate,
268 268 "stringify": stringify,
269 269 "strip": lambda x: x.strip(),
270 270 "urlescape": lambda x: urllib.quote(x),
271 271 "user": lambda x: util.shortuser(x),
272 272 "stringescape": lambda x: x.encode('string_escape'),
273 273 }
274 274
275 275 def templatepath(name=None):
276 276 '''return location of template file or directory (if no name).
277 277 returns None if not found.'''
278 278
279 279 # executable version (py2exe) doesn't support __file__
280 280 if hasattr(sys, 'frozen'):
281 281 module = sys.executable
282 282 else:
283 283 module = __file__
284 284 for f in 'templates', '../templates':
285 285 fl = f.split('/')
286 286 if name: fl.append(name)
287 287 p = os.path.join(os.path.dirname(module), *fl)
288 288 if (name and os.path.exists(p)) or os.path.isdir(p):
289 289 return os.path.normpath(p)
290 290
@@ -1,1484 +1,1499
1 1 """
2 2 util.py - Mercurial utility functions and platform specfic implementations
3 3
4 4 Copyright 2005 K. Thananchayan <thananck@yahoo.com>
5 5 Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
6 6 Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
7 7
8 8 This software may be used and distributed according to the terms
9 9 of the GNU General Public License, incorporated herein by reference.
10 10
11 11 This contains helper routines that are independent of the SCM core and hide
12 12 platform-specific details from the core.
13 13 """
14 14
15 15 from i18n import _
16 16 import cStringIO, errno, getpass, popen2, re, shutil, sys, tempfile
17 17 import os, threading, time, calendar, ConfigParser, locale, glob
18 18
19 19 try:
20 20 _encoding = os.environ.get("HGENCODING") or locale.getpreferredencoding() \
21 21 or "ascii"
22 22 except locale.Error:
23 23 _encoding = 'ascii'
24 24 _encodingmode = os.environ.get("HGENCODINGMODE", "strict")
25 25 _fallbackencoding = 'ISO-8859-1'
26 26
27 27 def tolocal(s):
28 28 """
29 29 Convert a string from internal UTF-8 to local encoding
30 30
31 31 All internal strings should be UTF-8 but some repos before the
32 32 implementation of locale support may contain latin1 or possibly
33 33 other character sets. We attempt to decode everything strictly
34 34 using UTF-8, then Latin-1, and failing that, we use UTF-8 and
35 35 replace unknown characters.
36 36 """
37 37 for e in ('UTF-8', _fallbackencoding):
38 38 try:
39 39 u = s.decode(e) # attempt strict decoding
40 40 return u.encode(_encoding, "replace")
41 41 except LookupError, k:
42 42 raise Abort(_("%s, please check your locale settings") % k)
43 43 except UnicodeDecodeError:
44 44 pass
45 45 u = s.decode("utf-8", "replace") # last ditch
46 46 return u.encode(_encoding, "replace")
47 47
48 48 def fromlocal(s):
49 49 """
50 50 Convert a string from the local character encoding to UTF-8
51 51
52 52 We attempt to decode strings using the encoding mode set by
53 53 HG_ENCODINGMODE, which defaults to 'strict'. In this mode, unknown
54 54 characters will cause an error message. Other modes include
55 55 'replace', which replaces unknown characters with a special
56 56 Unicode character, and 'ignore', which drops the character.
57 57 """
58 58 try:
59 59 return s.decode(_encoding, _encodingmode).encode("utf-8")
60 60 except UnicodeDecodeError, inst:
61 61 sub = s[max(0, inst.start-10):inst.start+10]
62 62 raise Abort("decoding near '%s': %s!" % (sub, inst))
63 63 except LookupError, k:
64 64 raise Abort(_("%s, please check your locale settings") % k)
65 65
66 66 def locallen(s):
67 67 """Find the length in characters of a local string"""
68 68 return len(s.decode(_encoding, "replace"))
69 69
70 70 def localsub(s, a, b=None):
71 71 try:
72 72 u = s.decode(_encoding, _encodingmode)
73 73 if b is not None:
74 74 u = u[a:b]
75 75 else:
76 76 u = u[:a]
77 77 return u.encode(_encoding, _encodingmode)
78 78 except UnicodeDecodeError, inst:
79 79 sub = s[max(0, inst.start-10), inst.start+10]
80 80 raise Abort(_("decoding near '%s': %s!\n") % (sub, inst))
81 81
82 82 # used by parsedate
83 83 defaultdateformats = (
84 84 '%Y-%m-%d %H:%M:%S',
85 85 '%Y-%m-%d %I:%M:%S%p',
86 86 '%Y-%m-%d %H:%M',
87 87 '%Y-%m-%d %I:%M%p',
88 88 '%Y-%m-%d',
89 89 '%m-%d',
90 90 '%m/%d',
91 91 '%m/%d/%y',
92 92 '%m/%d/%Y',
93 93 '%a %b %d %H:%M:%S %Y',
94 94 '%a %b %d %I:%M:%S%p %Y',
95 95 '%b %d %H:%M:%S %Y',
96 96 '%b %d %I:%M:%S%p %Y',
97 97 '%b %d %H:%M:%S',
98 98 '%b %d %I:%M:%S%p',
99 99 '%b %d %H:%M',
100 100 '%b %d %I:%M%p',
101 101 '%b %d %Y',
102 102 '%b %d',
103 103 '%H:%M:%S',
104 104 '%I:%M:%SP',
105 105 '%H:%M',
106 106 '%I:%M%p',
107 107 )
108 108
109 109 extendeddateformats = defaultdateformats + (
110 110 "%Y",
111 111 "%Y-%m",
112 112 "%b",
113 113 "%b %Y",
114 114 )
115 115
116 116 class SignalInterrupt(Exception):
117 117 """Exception raised on SIGTERM and SIGHUP."""
118 118
119 119 # differences from SafeConfigParser:
120 120 # - case-sensitive keys
121 121 # - allows values that are not strings (this means that you may not
122 122 # be able to save the configuration to a file)
123 123 class configparser(ConfigParser.SafeConfigParser):
124 124 def optionxform(self, optionstr):
125 125 return optionstr
126 126
127 127 def set(self, section, option, value):
128 128 return ConfigParser.ConfigParser.set(self, section, option, value)
129 129
130 130 def _interpolate(self, section, option, rawval, vars):
131 131 if not isinstance(rawval, basestring):
132 132 return rawval
133 133 return ConfigParser.SafeConfigParser._interpolate(self, section,
134 134 option, rawval, vars)
135 135
136 136 def cachefunc(func):
137 137 '''cache the result of function calls'''
138 138 # XXX doesn't handle keywords args
139 139 cache = {}
140 140 if func.func_code.co_argcount == 1:
141 141 # we gain a small amount of time because
142 142 # we don't need to pack/unpack the list
143 143 def f(arg):
144 144 if arg not in cache:
145 145 cache[arg] = func(arg)
146 146 return cache[arg]
147 147 else:
148 148 def f(*args):
149 149 if args not in cache:
150 150 cache[args] = func(*args)
151 151 return cache[args]
152 152
153 153 return f
154 154
155 155 def pipefilter(s, cmd):
156 156 '''filter string S through command CMD, returning its output'''
157 157 (pout, pin) = popen2.popen2(cmd, -1, 'b')
158 158 def writer():
159 159 try:
160 160 pin.write(s)
161 161 pin.close()
162 162 except IOError, inst:
163 163 if inst.errno != errno.EPIPE:
164 164 raise
165 165
166 166 # we should use select instead on UNIX, but this will work on most
167 167 # systems, including Windows
168 168 w = threading.Thread(target=writer)
169 169 w.start()
170 170 f = pout.read()
171 171 pout.close()
172 172 w.join()
173 173 return f
174 174
175 175 def tempfilter(s, cmd):
176 176 '''filter string S through a pair of temporary files with CMD.
177 177 CMD is used as a template to create the real command to be run,
178 178 with the strings INFILE and OUTFILE replaced by the real names of
179 179 the temporary files generated.'''
180 180 inname, outname = None, None
181 181 try:
182 182 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
183 183 fp = os.fdopen(infd, 'wb')
184 184 fp.write(s)
185 185 fp.close()
186 186 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
187 187 os.close(outfd)
188 188 cmd = cmd.replace('INFILE', inname)
189 189 cmd = cmd.replace('OUTFILE', outname)
190 190 code = os.system(cmd)
191 191 if code: raise Abort(_("command '%s' failed: %s") %
192 192 (cmd, explain_exit(code)))
193 193 return open(outname, 'rb').read()
194 194 finally:
195 195 try:
196 196 if inname: os.unlink(inname)
197 197 except: pass
198 198 try:
199 199 if outname: os.unlink(outname)
200 200 except: pass
201 201
202 202 filtertable = {
203 203 'tempfile:': tempfilter,
204 204 'pipe:': pipefilter,
205 205 }
206 206
207 207 def filter(s, cmd):
208 208 "filter a string through a command that transforms its input to its output"
209 209 for name, fn in filtertable.iteritems():
210 210 if cmd.startswith(name):
211 211 return fn(s, cmd[len(name):].lstrip())
212 212 return pipefilter(s, cmd)
213 213
214 214 def find_in_path(name, path, default=None):
215 215 '''find name in search path. path can be string (will be split
216 216 with os.pathsep), or iterable thing that returns strings. if name
217 217 found, return path to name. else return default.'''
218 218 if isinstance(path, str):
219 219 path = path.split(os.pathsep)
220 220 for p in path:
221 221 p_name = os.path.join(p, name)
222 222 if os.path.exists(p_name):
223 223 return p_name
224 224 return default
225 225
226 226 def binary(s):
227 227 """return true if a string is binary data using diff's heuristic"""
228 228 if s and '\0' in s[:4096]:
229 229 return True
230 230 return False
231 231
232 232 def unique(g):
233 233 """return the uniq elements of iterable g"""
234 234 seen = {}
235 235 l = []
236 236 for f in g:
237 237 if f not in seen:
238 238 seen[f] = 1
239 239 l.append(f)
240 240 return l
241 241
242 242 class Abort(Exception):
243 243 """Raised if a command needs to print an error and exit."""
244 244
245 245 class UnexpectedOutput(Abort):
246 246 """Raised to print an error with part of output and exit."""
247 247
248 248 def always(fn): return True
249 249 def never(fn): return False
250 250
251 251 def expand_glob(pats):
252 252 '''On Windows, expand the implicit globs in a list of patterns'''
253 253 if os.name != 'nt':
254 254 return list(pats)
255 255 ret = []
256 256 for p in pats:
257 257 kind, name = patkind(p, None)
258 258 if kind is None:
259 259 globbed = glob.glob(name)
260 260 if globbed:
261 261 ret.extend(globbed)
262 262 continue
263 263 # if we couldn't expand the glob, just keep it around
264 264 ret.append(p)
265 265 return ret
266 266
267 267 def patkind(name, dflt_pat='glob'):
268 268 """Split a string into an optional pattern kind prefix and the
269 269 actual pattern."""
270 270 for prefix in 're', 'glob', 'path', 'relglob', 'relpath', 'relre':
271 271 if name.startswith(prefix + ':'): return name.split(':', 1)
272 272 return dflt_pat, name
273 273
274 274 def globre(pat, head='^', tail='$'):
275 275 "convert a glob pattern into a regexp"
276 276 i, n = 0, len(pat)
277 277 res = ''
278 278 group = False
279 279 def peek(): return i < n and pat[i]
280 280 while i < n:
281 281 c = pat[i]
282 282 i = i+1
283 283 if c == '*':
284 284 if peek() == '*':
285 285 i += 1
286 286 res += '.*'
287 287 else:
288 288 res += '[^/]*'
289 289 elif c == '?':
290 290 res += '.'
291 291 elif c == '[':
292 292 j = i
293 293 if j < n and pat[j] in '!]':
294 294 j += 1
295 295 while j < n and pat[j] != ']':
296 296 j += 1
297 297 if j >= n:
298 298 res += '\\['
299 299 else:
300 300 stuff = pat[i:j].replace('\\','\\\\')
301 301 i = j + 1
302 302 if stuff[0] == '!':
303 303 stuff = '^' + stuff[1:]
304 304 elif stuff[0] == '^':
305 305 stuff = '\\' + stuff
306 306 res = '%s[%s]' % (res, stuff)
307 307 elif c == '{':
308 308 group = True
309 309 res += '(?:'
310 310 elif c == '}' and group:
311 311 res += ')'
312 312 group = False
313 313 elif c == ',' and group:
314 314 res += '|'
315 315 elif c == '\\':
316 316 p = peek()
317 317 if p:
318 318 i += 1
319 319 res += re.escape(p)
320 320 else:
321 321 res += re.escape(c)
322 322 else:
323 323 res += re.escape(c)
324 324 return head + res + tail
325 325
326 326 _globchars = {'[': 1, '{': 1, '*': 1, '?': 1}
327 327
328 328 def pathto(root, n1, n2):
329 329 '''return the relative path from one place to another.
330 330 root should use os.sep to separate directories
331 331 n1 should use os.sep to separate directories
332 332 n2 should use "/" to separate directories
333 333 returns an os.sep-separated path.
334 334
335 335 If n1 is a relative path, it's assumed it's
336 336 relative to root.
337 337 n2 should always be relative to root.
338 338 '''
339 339 if not n1: return localpath(n2)
340 340 if os.path.isabs(n1):
341 341 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
342 342 return os.path.join(root, localpath(n2))
343 343 n2 = '/'.join((pconvert(root), n2))
344 344 a, b = n1.split(os.sep), n2.split('/')
345 345 a.reverse()
346 346 b.reverse()
347 347 while a and b and a[-1] == b[-1]:
348 348 a.pop()
349 349 b.pop()
350 350 b.reverse()
351 351 return os.sep.join((['..'] * len(a)) + b)
352 352
353 353 def canonpath(root, cwd, myname):
354 354 """return the canonical path of myname, given cwd and root"""
355 355 if root == os.sep:
356 356 rootsep = os.sep
357 357 elif root.endswith(os.sep):
358 358 rootsep = root
359 359 else:
360 360 rootsep = root + os.sep
361 361 name = myname
362 362 if not os.path.isabs(name):
363 363 name = os.path.join(root, cwd, name)
364 364 name = os.path.normpath(name)
365 365 if name != rootsep and name.startswith(rootsep):
366 366 name = name[len(rootsep):]
367 367 audit_path(name)
368 368 return pconvert(name)
369 369 elif name == root:
370 370 return ''
371 371 else:
372 372 # Determine whether `name' is in the hierarchy at or beneath `root',
373 373 # by iterating name=dirname(name) until that causes no change (can't
374 374 # check name == '/', because that doesn't work on windows). For each
375 375 # `name', compare dev/inode numbers. If they match, the list `rel'
376 376 # holds the reversed list of components making up the relative file
377 377 # name we want.
378 378 root_st = os.stat(root)
379 379 rel = []
380 380 while True:
381 381 try:
382 382 name_st = os.stat(name)
383 383 except OSError:
384 384 break
385 385 if samestat(name_st, root_st):
386 386 if not rel:
387 387 # name was actually the same as root (maybe a symlink)
388 388 return ''
389 389 rel.reverse()
390 390 name = os.path.join(*rel)
391 391 audit_path(name)
392 392 return pconvert(name)
393 393 dirname, basename = os.path.split(name)
394 394 rel.append(basename)
395 395 if dirname == name:
396 396 break
397 397 name = dirname
398 398
399 399 raise Abort('%s not under root' % myname)
400 400
401 401 def matcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None):
402 402 return _matcher(canonroot, cwd, names, inc, exc, 'glob', src)
403 403
404 404 def cmdmatcher(canonroot, cwd='', names=[], inc=[], exc=[], src=None,
405 405 globbed=False, default=None):
406 406 default = default or 'relpath'
407 407 if default == 'relpath' and not globbed:
408 408 names = expand_glob(names)
409 409 return _matcher(canonroot, cwd, names, inc, exc, default, src)
410 410
411 411 def _matcher(canonroot, cwd, names, inc, exc, dflt_pat, src):
412 412 """build a function to match a set of file patterns
413 413
414 414 arguments:
415 415 canonroot - the canonical root of the tree you're matching against
416 416 cwd - the current working directory, if relevant
417 417 names - patterns to find
418 418 inc - patterns to include
419 419 exc - patterns to exclude
420 420 dflt_pat - if a pattern in names has no explicit type, assume this one
421 421 src - where these patterns came from (e.g. .hgignore)
422 422
423 423 a pattern is one of:
424 424 'glob:<glob>' - a glob relative to cwd
425 425 're:<regexp>' - a regular expression
426 426 'path:<path>' - a path relative to canonroot
427 427 'relglob:<glob>' - an unrooted glob (*.c matches C files in all dirs)
428 428 'relpath:<path>' - a path relative to cwd
429 429 'relre:<regexp>' - a regexp that doesn't have to match the start of a name
430 430 '<something>' - one of the cases above, selected by the dflt_pat argument
431 431
432 432 returns:
433 433 a 3-tuple containing
434 434 - list of roots (places where one should start a recursive walk of the fs);
435 435 this often matches the explicit non-pattern names passed in, but also
436 436 includes the initial part of glob: patterns that has no glob characters
437 437 - a bool match(filename) function
438 438 - a bool indicating if any patterns were passed in
439 439 """
440 440
441 441 # a common case: no patterns at all
442 442 if not names and not inc and not exc:
443 443 return [], always, False
444 444
445 445 def contains_glob(name):
446 446 for c in name:
447 447 if c in _globchars: return True
448 448 return False
449 449
450 450 def regex(kind, name, tail):
451 451 '''convert a pattern into a regular expression'''
452 452 if not name:
453 453 return ''
454 454 if kind == 're':
455 455 return name
456 456 elif kind == 'path':
457 457 return '^' + re.escape(name) + '(?:/|$)'
458 458 elif kind == 'relglob':
459 459 return globre(name, '(?:|.*/)', tail)
460 460 elif kind == 'relpath':
461 461 return re.escape(name) + '(?:/|$)'
462 462 elif kind == 'relre':
463 463 if name.startswith('^'):
464 464 return name
465 465 return '.*' + name
466 466 return globre(name, '', tail)
467 467
468 468 def matchfn(pats, tail):
469 469 """build a matching function from a set of patterns"""
470 470 if not pats:
471 471 return
472 472 matches = []
473 473 for k, p in pats:
474 474 try:
475 475 pat = '(?:%s)' % regex(k, p, tail)
476 476 matches.append(re.compile(pat).match)
477 477 except re.error:
478 478 if src: raise Abort("%s: invalid pattern (%s): %s" % (src, k, p))
479 479 else: raise Abort("invalid pattern (%s): %s" % (k, p))
480 480
481 481 def buildfn(text):
482 482 for m in matches:
483 483 r = m(text)
484 484 if r:
485 485 return r
486 486
487 487 return buildfn
488 488
489 489 def globprefix(pat):
490 490 '''return the non-glob prefix of a path, e.g. foo/* -> foo'''
491 491 root = []
492 492 for p in pat.split('/'):
493 493 if contains_glob(p): break
494 494 root.append(p)
495 495 return '/'.join(root) or '.'
496 496
497 497 def normalizepats(names, default):
498 498 pats = []
499 499 roots = []
500 500 anypats = False
501 501 for kind, name in [patkind(p, default) for p in names]:
502 502 if kind in ('glob', 'relpath'):
503 503 name = canonpath(canonroot, cwd, name)
504 504 elif kind in ('relglob', 'path'):
505 505 name = normpath(name)
506 506
507 507 pats.append((kind, name))
508 508
509 509 if kind in ('glob', 're', 'relglob', 'relre'):
510 510 anypats = True
511 511
512 512 if kind == 'glob':
513 513 root = globprefix(name)
514 514 roots.append(root)
515 515 elif kind in ('relpath', 'path'):
516 516 roots.append(name or '.')
517 517 elif kind == 'relglob':
518 518 roots.append('.')
519 519 return roots, pats, anypats
520 520
521 521 roots, pats, anypats = normalizepats(names, dflt_pat)
522 522
523 523 patmatch = matchfn(pats, '$') or always
524 524 incmatch = always
525 525 if inc:
526 526 dummy, inckinds, dummy = normalizepats(inc, 'glob')
527 527 incmatch = matchfn(inckinds, '(?:/|$)')
528 528 excmatch = lambda fn: False
529 529 if exc:
530 530 dummy, exckinds, dummy = normalizepats(exc, 'glob')
531 531 excmatch = matchfn(exckinds, '(?:/|$)')
532 532
533 533 if not names and inc and not exc:
534 534 # common case: hgignore patterns
535 535 match = incmatch
536 536 else:
537 537 match = lambda fn: incmatch(fn) and not excmatch(fn) and patmatch(fn)
538 538
539 539 return (roots, match, (inc or exc or anypats) and True)
540 540
541 541 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None):
542 542 '''enhanced shell command execution.
543 543 run with environment maybe modified, maybe in different dir.
544 544
545 545 if command fails and onerr is None, return status. if ui object,
546 546 print error message and return status, else raise onerr object as
547 547 exception.'''
548 548 def py2shell(val):
549 549 'convert python object into string that is useful to shell'
550 550 if val in (None, False):
551 551 return '0'
552 552 if val == True:
553 553 return '1'
554 554 return str(val)
555 555 oldenv = {}
556 556 for k in environ:
557 557 oldenv[k] = os.environ.get(k)
558 558 if cwd is not None:
559 559 oldcwd = os.getcwd()
560 560 origcmd = cmd
561 561 if os.name == 'nt':
562 562 cmd = '"%s"' % cmd
563 563 try:
564 564 for k, v in environ.iteritems():
565 565 os.environ[k] = py2shell(v)
566 566 if cwd is not None and oldcwd != cwd:
567 567 os.chdir(cwd)
568 568 rc = os.system(cmd)
569 569 if rc and onerr:
570 570 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
571 571 explain_exit(rc)[0])
572 572 if errprefix:
573 573 errmsg = '%s: %s' % (errprefix, errmsg)
574 574 try:
575 575 onerr.warn(errmsg + '\n')
576 576 except AttributeError:
577 577 raise onerr(errmsg)
578 578 return rc
579 579 finally:
580 580 for k, v in oldenv.iteritems():
581 581 if v is None:
582 582 del os.environ[k]
583 583 else:
584 584 os.environ[k] = v
585 585 if cwd is not None and oldcwd != cwd:
586 586 os.chdir(oldcwd)
587 587
588 588 # os.path.lexists is not available on python2.3
589 589 def lexists(filename):
590 590 "test whether a file with this name exists. does not follow symlinks"
591 591 try:
592 592 os.lstat(filename)
593 593 except:
594 594 return False
595 595 return True
596 596
597 597 def rename(src, dst):
598 598 """forcibly rename a file"""
599 599 try:
600 600 os.rename(src, dst)
601 601 except OSError, err:
602 602 # on windows, rename to existing file is not allowed, so we
603 603 # must delete destination first. but if file is open, unlink
604 604 # schedules it for delete but does not delete it. rename
605 605 # happens immediately even for open files, so we create
606 606 # temporary file, delete it, rename destination to that name,
607 607 # then delete that. then rename is safe to do.
608 608 fd, temp = tempfile.mkstemp(dir=os.path.dirname(dst) or '.')
609 609 os.close(fd)
610 610 os.unlink(temp)
611 611 os.rename(dst, temp)
612 612 os.unlink(temp)
613 613 os.rename(src, dst)
614 614
615 615 def unlink(f):
616 616 """unlink and remove the directory if it is empty"""
617 617 os.unlink(f)
618 618 # try removing directories that might now be empty
619 619 try:
620 620 os.removedirs(os.path.dirname(f))
621 621 except OSError:
622 622 pass
623 623
624 624 def copyfile(src, dest):
625 625 "copy a file, preserving mode"
626 626 if os.path.islink(src):
627 627 try:
628 628 os.unlink(dest)
629 629 except:
630 630 pass
631 631 os.symlink(os.readlink(src), dest)
632 632 else:
633 633 try:
634 634 shutil.copyfile(src, dest)
635 635 shutil.copymode(src, dest)
636 636 except shutil.Error, inst:
637 637 raise Abort(str(inst))
638 638
639 639 def copyfiles(src, dst, hardlink=None):
640 640 """Copy a directory tree using hardlinks if possible"""
641 641
642 642 if hardlink is None:
643 643 hardlink = (os.stat(src).st_dev ==
644 644 os.stat(os.path.dirname(dst)).st_dev)
645 645
646 646 if os.path.isdir(src):
647 647 os.mkdir(dst)
648 648 for name in os.listdir(src):
649 649 srcname = os.path.join(src, name)
650 650 dstname = os.path.join(dst, name)
651 651 copyfiles(srcname, dstname, hardlink)
652 652 else:
653 653 if hardlink:
654 654 try:
655 655 os_link(src, dst)
656 656 except (IOError, OSError):
657 657 hardlink = False
658 658 shutil.copy(src, dst)
659 659 else:
660 660 shutil.copy(src, dst)
661 661
662 662 def audit_path(path):
663 663 """Abort if path contains dangerous components"""
664 664 parts = os.path.normcase(path).split(os.sep)
665 665 if (os.path.splitdrive(path)[0] or parts[0] in ('.hg', '')
666 666 or os.pardir in parts):
667 667 raise Abort(_("path contains illegal component: %s\n") % path)
668 668
669 669 def _makelock_file(info, pathname):
670 670 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
671 671 os.write(ld, info)
672 672 os.close(ld)
673 673
674 674 def _readlock_file(pathname):
675 675 return posixfile(pathname).read()
676 676
677 677 def nlinks(pathname):
678 678 """Return number of hardlinks for the given file."""
679 679 return os.lstat(pathname).st_nlink
680 680
681 681 if hasattr(os, 'link'):
682 682 os_link = os.link
683 683 else:
684 684 def os_link(src, dst):
685 685 raise OSError(0, _("Hardlinks not supported"))
686 686
687 687 def fstat(fp):
688 688 '''stat file object that may not have fileno method.'''
689 689 try:
690 690 return os.fstat(fp.fileno())
691 691 except AttributeError:
692 692 return os.stat(fp.name)
693 693
694 694 posixfile = file
695 695
696 696 def is_win_9x():
697 697 '''return true if run on windows 95, 98 or me.'''
698 698 try:
699 699 return sys.getwindowsversion()[3] == 1
700 700 except AttributeError:
701 701 return os.name == 'nt' and 'command' in os.environ.get('comspec', '')
702 702
703 703 getuser_fallback = None
704 704
705 705 def getuser():
706 706 '''return name of current user'''
707 707 try:
708 708 return getpass.getuser()
709 709 except ImportError:
710 710 # import of pwd will fail on windows - try fallback
711 711 if getuser_fallback:
712 712 return getuser_fallback()
713 713 # raised if win32api not available
714 714 raise Abort(_('user name not available - set USERNAME '
715 715 'environment variable'))
716 716
717 717 def username(uid=None):
718 718 """Return the name of the user with the given uid.
719 719
720 720 If uid is None, return the name of the current user."""
721 721 try:
722 722 import pwd
723 723 if uid is None:
724 724 uid = os.getuid()
725 725 try:
726 726 return pwd.getpwuid(uid)[0]
727 727 except KeyError:
728 728 return str(uid)
729 729 except ImportError:
730 730 return None
731 731
732 732 def groupname(gid=None):
733 733 """Return the name of the group with the given gid.
734 734
735 735 If gid is None, return the name of the current group."""
736 736 try:
737 737 import grp
738 738 if gid is None:
739 739 gid = os.getgid()
740 740 try:
741 741 return grp.getgrgid(gid)[0]
742 742 except KeyError:
743 743 return str(gid)
744 744 except ImportError:
745 745 return None
746 746
747 747 # File system features
748 748
749 749 def checkfolding(path):
750 750 """
751 751 Check whether the given path is on a case-sensitive filesystem
752 752
753 753 Requires a path (like /foo/.hg) ending with a foldable final
754 754 directory component.
755 755 """
756 756 s1 = os.stat(path)
757 757 d, b = os.path.split(path)
758 758 p2 = os.path.join(d, b.upper())
759 759 if path == p2:
760 760 p2 = os.path.join(d, b.lower())
761 761 try:
762 762 s2 = os.stat(p2)
763 763 if s2 == s1:
764 764 return False
765 765 return True
766 766 except:
767 767 return True
768 768
769 _umask = os.umask(0)
770 os.umask(_umask)
771
769 772 def checkexec(path):
770 773 """
771 774 Check whether the given path is on a filesystem with UNIX-like exec flags
772 775
773 776 Requires a directory (like /foo/.hg)
774 777 """
775 778 fh, fn = tempfile.mkstemp("", "", path)
776 779 os.close(fh)
777 780 m = os.stat(fn).st_mode
778 781 os.chmod(fn, m ^ 0111)
779 782 r = (os.stat(fn).st_mode != m)
780 783 os.unlink(fn)
781 784 return r
782 785
783 786 def execfunc(path, fallback):
784 787 '''return an is_exec() function with default to fallback'''
785 788 if checkexec(path):
786 789 return lambda x: is_exec(os.path.join(path, x))
787 790 return fallback
788 791
789 792 def checklink(path):
790 793 """check whether the given path is on a symlink-capable filesystem"""
791 794 # mktemp is not racy because symlink creation will fail if the
792 795 # file already exists
793 796 name = tempfile.mktemp(dir=path)
794 797 try:
795 798 os.symlink(".", name)
796 799 os.unlink(name)
797 800 return True
798 801 except (OSError, AttributeError):
799 802 return False
800 803
801 804 def linkfunc(path, fallback):
802 805 '''return an is_link() function with default to fallback'''
803 806 if checklink(path):
804 807 return lambda x: os.path.islink(os.path.join(path, x))
805 808 return fallback
806 809
807 810 # Platform specific variants
808 811 if os.name == 'nt':
809 812 import msvcrt
810 813 nulldev = 'NUL:'
811 814
812 815 class winstdout:
813 816 '''stdout on windows misbehaves if sent through a pipe'''
814 817
815 818 def __init__(self, fp):
816 819 self.fp = fp
817 820
818 821 def __getattr__(self, key):
819 822 return getattr(self.fp, key)
820 823
821 824 def close(self):
822 825 try:
823 826 self.fp.close()
824 827 except: pass
825 828
826 829 def write(self, s):
827 830 try:
828 831 return self.fp.write(s)
829 832 except IOError, inst:
830 833 if inst.errno != 0: raise
831 834 self.close()
832 835 raise IOError(errno.EPIPE, 'Broken pipe')
833 836
834 837 def flush(self):
835 838 try:
836 839 return self.fp.flush()
837 840 except IOError, inst:
838 841 if inst.errno != errno.EINVAL: raise
839 842 self.close()
840 843 raise IOError(errno.EPIPE, 'Broken pipe')
841 844
842 845 sys.stdout = winstdout(sys.stdout)
843 846
844 847 def system_rcpath():
845 848 try:
846 849 return system_rcpath_win32()
847 850 except:
848 851 return [r'c:\mercurial\mercurial.ini']
849 852
850 853 def user_rcpath():
851 854 '''return os-specific hgrc search path to the user dir'''
852 855 try:
853 856 userrc = user_rcpath_win32()
854 857 except:
855 858 userrc = os.path.join(os.path.expanduser('~'), 'mercurial.ini')
856 859 path = [userrc]
857 860 userprofile = os.environ.get('USERPROFILE')
858 861 if userprofile:
859 862 path.append(os.path.join(userprofile, 'mercurial.ini'))
860 863 return path
861 864
862 865 def parse_patch_output(output_line):
863 866 """parses the output produced by patch and returns the file name"""
864 867 pf = output_line[14:]
865 868 if pf[0] == '`':
866 869 pf = pf[1:-1] # Remove the quotes
867 870 return pf
868 871
869 872 def testpid(pid):
870 873 '''return False if pid dead, True if running or not known'''
871 874 return True
872 875
873 876 def set_exec(f, mode):
874 877 pass
875 878
876 879 def set_link(f, mode):
877 880 pass
878 881
879 882 def set_binary(fd):
880 883 msvcrt.setmode(fd.fileno(), os.O_BINARY)
881 884
882 885 def pconvert(path):
883 886 return path.replace("\\", "/")
884 887
885 888 def localpath(path):
886 889 return path.replace('/', '\\')
887 890
888 891 def normpath(path):
889 892 return pconvert(os.path.normpath(path))
890 893
891 894 makelock = _makelock_file
892 895 readlock = _readlock_file
893 896
894 897 def samestat(s1, s2):
895 898 return False
896 899
897 900 # A sequence of backslashes is special iff it precedes a double quote:
898 901 # - if there's an even number of backslashes, the double quote is not
899 902 # quoted (i.e. it ends the quoted region)
900 903 # - if there's an odd number of backslashes, the double quote is quoted
901 904 # - in both cases, every pair of backslashes is unquoted into a single
902 905 # backslash
903 906 # (See http://msdn2.microsoft.com/en-us/library/a1y7w461.aspx )
904 907 # So, to quote a string, we must surround it in double quotes, double
905 908 # the number of backslashes that preceed double quotes and add another
906 909 # backslash before every double quote (being careful with the double
907 910 # quote we've appended to the end)
908 911 _quotere = None
909 912 def shellquote(s):
910 913 global _quotere
911 914 if _quotere is None:
912 915 _quotere = re.compile(r'(\\*)("|\\$)')
913 916 return '"%s"' % _quotere.sub(r'\1\1\\\2', s)
914 917
915 918 def explain_exit(code):
916 919 return _("exited with status %d") % code, code
917 920
918 921 # if you change this stub into a real check, please try to implement the
919 922 # username and groupname functions above, too.
920 923 def isowner(fp, st=None):
921 924 return True
922 925
923 926 try:
924 927 # override functions with win32 versions if possible
925 928 from util_win32 import *
926 929 if not is_win_9x():
927 930 posixfile = posixfile_nt
928 931 except ImportError:
929 932 pass
930 933
931 934 else:
932 935 nulldev = '/dev/null'
933 936 _umask = os.umask(0)
934 937 os.umask(_umask)
935 938
936 939 def rcfiles(path):
937 940 rcs = [os.path.join(path, 'hgrc')]
938 941 rcdir = os.path.join(path, 'hgrc.d')
939 942 try:
940 943 rcs.extend([os.path.join(rcdir, f) for f in os.listdir(rcdir)
941 944 if f.endswith(".rc")])
942 945 except OSError:
943 946 pass
944 947 return rcs
945 948
946 949 def system_rcpath():
947 950 path = []
948 951 # old mod_python does not set sys.argv
949 952 if len(getattr(sys, 'argv', [])) > 0:
950 953 path.extend(rcfiles(os.path.dirname(sys.argv[0]) +
951 954 '/../etc/mercurial'))
952 955 path.extend(rcfiles('/etc/mercurial'))
953 956 return path
954 957
955 958 def user_rcpath():
956 959 return [os.path.expanduser('~/.hgrc')]
957 960
958 961 def parse_patch_output(output_line):
959 962 """parses the output produced by patch and returns the file name"""
960 963 pf = output_line[14:]
961 964 if pf.startswith("'") and pf.endswith("'") and " " in pf:
962 965 pf = pf[1:-1] # Remove the quotes
963 966 return pf
964 967
965 968 def is_exec(f):
966 969 """check whether a file is executable"""
967 970 return (os.lstat(f).st_mode & 0100 != 0)
968 971
969 972 def set_exec(f, mode):
970 973 s = os.lstat(f).st_mode
971 974 if (s & 0100 != 0) == mode:
972 975 return
973 976 if mode:
974 977 # Turn on +x for every +r bit when making a file executable
975 978 # and obey umask.
976 979 os.chmod(f, s | (s & 0444) >> 2 & ~_umask)
977 980 else:
978 981 os.chmod(f, s & 0666)
979 982
980 983 def set_link(f, mode):
981 984 """make a file a symbolic link/regular file
982 985
983 986 if a file is changed to a link, its contents become the link data
984 987 if a link is changed to a file, its link data become its contents
985 988 """
986 989
987 990 m = os.path.islink(f)
988 991 if m == bool(mode):
989 992 return
990 993
991 994 if mode: # switch file to link
992 995 data = file(f).read()
993 996 os.unlink(f)
994 997 os.symlink(data, f)
995 998 else:
996 999 data = os.readlink(f)
997 1000 os.unlink(f)
998 1001 file(f, "w").write(data)
999 1002
1000 1003 def set_binary(fd):
1001 1004 pass
1002 1005
1003 1006 def pconvert(path):
1004 1007 return path
1005 1008
1006 1009 def localpath(path):
1007 1010 return path
1008 1011
1009 1012 normpath = os.path.normpath
1010 1013 samestat = os.path.samestat
1011 1014
1012 1015 def makelock(info, pathname):
1013 1016 try:
1014 1017 os.symlink(info, pathname)
1015 1018 except OSError, why:
1016 1019 if why.errno == errno.EEXIST:
1017 1020 raise
1018 1021 else:
1019 1022 _makelock_file(info, pathname)
1020 1023
1021 1024 def readlock(pathname):
1022 1025 try:
1023 1026 return os.readlink(pathname)
1024 1027 except OSError, why:
1025 1028 if why.errno == errno.EINVAL:
1026 1029 return _readlock_file(pathname)
1027 1030 else:
1028 1031 raise
1029 1032
1030 1033 def shellquote(s):
1031 1034 return "'%s'" % s.replace("'", "'\\''")
1032 1035
1033 1036 def testpid(pid):
1034 1037 '''return False if pid dead, True if running or not sure'''
1035 1038 try:
1036 1039 os.kill(pid, 0)
1037 1040 return True
1038 1041 except OSError, inst:
1039 1042 return inst.errno != errno.ESRCH
1040 1043
1041 1044 def explain_exit(code):
1042 1045 """return a 2-tuple (desc, code) describing a process's status"""
1043 1046 if os.WIFEXITED(code):
1044 1047 val = os.WEXITSTATUS(code)
1045 1048 return _("exited with status %d") % val, val
1046 1049 elif os.WIFSIGNALED(code):
1047 1050 val = os.WTERMSIG(code)
1048 1051 return _("killed by signal %d") % val, val
1049 1052 elif os.WIFSTOPPED(code):
1050 1053 val = os.WSTOPSIG(code)
1051 1054 return _("stopped by signal %d") % val, val
1052 1055 raise ValueError(_("invalid exit code"))
1053 1056
1054 1057 def isowner(fp, st=None):
1055 1058 """Return True if the file object f belongs to the current user.
1056 1059
1057 1060 The return value of a util.fstat(f) may be passed as the st argument.
1058 1061 """
1059 1062 if st is None:
1060 1063 st = fstat(fp)
1061 1064 return st.st_uid == os.getuid()
1062 1065
1063 1066 def _buildencodefun():
1064 1067 e = '_'
1065 1068 win_reserved = [ord(x) for x in '\\:*?"<>|']
1066 1069 cmap = dict([ (chr(x), chr(x)) for x in xrange(127) ])
1067 1070 for x in (range(32) + range(126, 256) + win_reserved):
1068 1071 cmap[chr(x)] = "~%02x" % x
1069 1072 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
1070 1073 cmap[chr(x)] = e + chr(x).lower()
1071 1074 dmap = {}
1072 1075 for k, v in cmap.iteritems():
1073 1076 dmap[v] = k
1074 1077 def decode(s):
1075 1078 i = 0
1076 1079 while i < len(s):
1077 1080 for l in xrange(1, 4):
1078 1081 try:
1079 1082 yield dmap[s[i:i+l]]
1080 1083 i += l
1081 1084 break
1082 1085 except KeyError:
1083 1086 pass
1084 1087 else:
1085 1088 raise KeyError
1086 1089 return (lambda s: "".join([cmap[c] for c in s]),
1087 1090 lambda s: "".join(list(decode(s))))
1088 1091
1089 1092 encodefilename, decodefilename = _buildencodefun()
1090 1093
1091 1094 def encodedopener(openerfn, fn):
1092 1095 def o(path, *args, **kw):
1093 1096 return openerfn(fn(path), *args, **kw)
1094 1097 return o
1095 1098
1096 1099 def opener(base, audit=True):
1097 1100 """
1098 1101 return a function that opens files relative to base
1099 1102
1100 1103 this function is used to hide the details of COW semantics and
1101 1104 remote file access from higher level code.
1102 1105 """
1103 1106 p = base
1104 1107 audit_p = audit
1105 1108
1106 def mktempcopy(name):
1109 def mktempcopy(name, emptyok=False):
1107 1110 d, fn = os.path.split(name)
1108 1111 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1109 1112 os.close(fd)
1110 ofp = posixfile(temp, "wb")
1113 # Temporary files are created with mode 0600, which is usually not
1114 # what we want. If the original file already exists, just copy
1115 # its mode. Otherwise, manually obey umask.
1116 try:
1117 st_mode = os.lstat(name).st_mode
1118 except OSError, inst:
1119 if inst.errno != errno.ENOENT:
1120 raise
1121 st_mode = 0666 & ~_umask
1122 os.chmod(temp, st_mode)
1123 if emptyok:
1124 return temp
1111 1125 try:
1112 1126 try:
1113 1127 ifp = posixfile(name, "rb")
1114 1128 except IOError, inst:
1129 if inst.errno == errno.ENOENT:
1130 return temp
1115 1131 if not getattr(inst, 'filename', None):
1116 1132 inst.filename = name
1117 1133 raise
1134 ofp = posixfile(temp, "wb")
1118 1135 for chunk in filechunkiter(ifp):
1119 1136 ofp.write(chunk)
1120 1137 ifp.close()
1121 1138 ofp.close()
1122 1139 except:
1123 1140 try: os.unlink(temp)
1124 1141 except: pass
1125 1142 raise
1126 st = os.lstat(name)
1127 os.chmod(temp, st.st_mode)
1128 1143 return temp
1129 1144
1130 1145 class atomictempfile(posixfile):
1131 1146 """the file will only be copied when rename is called"""
1132 1147 def __init__(self, name, mode):
1133 1148 self.__name = name
1134 self.temp = mktempcopy(name)
1149 self.temp = mktempcopy(name, emptyok=('w' in mode))
1135 1150 posixfile.__init__(self, self.temp, mode)
1136 1151 def rename(self):
1137 1152 if not self.closed:
1138 1153 posixfile.close(self)
1139 1154 rename(self.temp, localpath(self.__name))
1140 1155 def __del__(self):
1141 1156 if not self.closed:
1142 1157 try:
1143 1158 os.unlink(self.temp)
1144 1159 except: pass
1145 1160 posixfile.close(self)
1146 1161
1147 1162 class atomicfile(atomictempfile):
1148 1163 """the file will only be copied on close"""
1149 1164 def __init__(self, name, mode):
1150 1165 atomictempfile.__init__(self, name, mode)
1151 1166 def close(self):
1152 1167 self.rename()
1153 1168 def __del__(self):
1154 1169 self.rename()
1155 1170
1156 1171 def o(path, mode="r", text=False, atomic=False, atomictemp=False):
1157 1172 if audit_p:
1158 1173 audit_path(path)
1159 1174 f = os.path.join(p, path)
1160 1175
1161 1176 if not text:
1162 1177 mode += "b" # for that other OS
1163 1178
1164 1179 if mode[0] != "r":
1165 1180 try:
1166 1181 nlink = nlinks(f)
1167 1182 except OSError:
1183 nlink = 0
1168 1184 d = os.path.dirname(f)
1169 1185 if not os.path.isdir(d):
1170 1186 os.makedirs(d)
1171 else:
1172 if atomic:
1173 return atomicfile(f, mode)
1174 elif atomictemp:
1175 return atomictempfile(f, mode)
1176 if nlink > 1:
1177 rename(mktempcopy(f), f)
1187 if atomic:
1188 return atomicfile(f, mode)
1189 elif atomictemp:
1190 return atomictempfile(f, mode)
1191 if nlink > 1:
1192 rename(mktempcopy(f), f)
1178 1193 return posixfile(f, mode)
1179 1194
1180 1195 return o
1181 1196
1182 1197 class chunkbuffer(object):
1183 1198 """Allow arbitrary sized chunks of data to be efficiently read from an
1184 1199 iterator over chunks of arbitrary size."""
1185 1200
1186 1201 def __init__(self, in_iter, targetsize = 2**16):
1187 1202 """in_iter is the iterator that's iterating over the input chunks.
1188 1203 targetsize is how big a buffer to try to maintain."""
1189 1204 self.in_iter = iter(in_iter)
1190 1205 self.buf = ''
1191 1206 self.targetsize = int(targetsize)
1192 1207 if self.targetsize <= 0:
1193 1208 raise ValueError(_("targetsize must be greater than 0, was %d") %
1194 1209 targetsize)
1195 1210 self.iterempty = False
1196 1211
1197 1212 def fillbuf(self):
1198 1213 """Ignore target size; read every chunk from iterator until empty."""
1199 1214 if not self.iterempty:
1200 1215 collector = cStringIO.StringIO()
1201 1216 collector.write(self.buf)
1202 1217 for ch in self.in_iter:
1203 1218 collector.write(ch)
1204 1219 self.buf = collector.getvalue()
1205 1220 self.iterempty = True
1206 1221
1207 1222 def read(self, l):
1208 1223 """Read L bytes of data from the iterator of chunks of data.
1209 1224 Returns less than L bytes if the iterator runs dry."""
1210 1225 if l > len(self.buf) and not self.iterempty:
1211 1226 # Clamp to a multiple of self.targetsize
1212 1227 targetsize = self.targetsize * ((l // self.targetsize) + 1)
1213 1228 collector = cStringIO.StringIO()
1214 1229 collector.write(self.buf)
1215 1230 collected = len(self.buf)
1216 1231 for chunk in self.in_iter:
1217 1232 collector.write(chunk)
1218 1233 collected += len(chunk)
1219 1234 if collected >= targetsize:
1220 1235 break
1221 1236 if collected < targetsize:
1222 1237 self.iterempty = True
1223 1238 self.buf = collector.getvalue()
1224 1239 s, self.buf = self.buf[:l], buffer(self.buf, l)
1225 1240 return s
1226 1241
1227 1242 def filechunkiter(f, size=65536, limit=None):
1228 1243 """Create a generator that produces the data in the file size
1229 1244 (default 65536) bytes at a time, up to optional limit (default is
1230 1245 to read all data). Chunks may be less than size bytes if the
1231 1246 chunk is the last chunk in the file, or the file is a socket or
1232 1247 some other type of file that sometimes reads less data than is
1233 1248 requested."""
1234 1249 assert size >= 0
1235 1250 assert limit is None or limit >= 0
1236 1251 while True:
1237 1252 if limit is None: nbytes = size
1238 1253 else: nbytes = min(limit, size)
1239 1254 s = nbytes and f.read(nbytes)
1240 1255 if not s: break
1241 1256 if limit: limit -= len(s)
1242 1257 yield s
1243 1258
1244 1259 def makedate():
1245 1260 lt = time.localtime()
1246 1261 if lt[8] == 1 and time.daylight:
1247 1262 tz = time.altzone
1248 1263 else:
1249 1264 tz = time.timezone
1250 1265 return time.mktime(lt), tz
1251 1266
1252 1267 def datestr(date=None, format='%a %b %d %H:%M:%S %Y', timezone=True):
1253 1268 """represent a (unixtime, offset) tuple as a localized time.
1254 1269 unixtime is seconds since the epoch, and offset is the time zone's
1255 1270 number of seconds away from UTC. if timezone is false, do not
1256 1271 append time zone to string."""
1257 1272 t, tz = date or makedate()
1258 1273 s = time.strftime(format, time.gmtime(float(t) - tz))
1259 1274 if timezone:
1260 1275 s += " %+03d%02d" % (-tz / 3600, ((-tz % 3600) / 60))
1261 1276 return s
1262 1277
1263 1278 def strdate(string, format, defaults):
1264 1279 """parse a localized time string and return a (unixtime, offset) tuple.
1265 1280 if the string cannot be parsed, ValueError is raised."""
1266 1281 def timezone(string):
1267 1282 tz = string.split()[-1]
1268 1283 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1269 1284 tz = int(tz)
1270 1285 offset = - 3600 * (tz / 100) - 60 * (tz % 100)
1271 1286 return offset
1272 1287 if tz == "GMT" or tz == "UTC":
1273 1288 return 0
1274 1289 return None
1275 1290
1276 1291 # NOTE: unixtime = localunixtime + offset
1277 1292 offset, date = timezone(string), string
1278 1293 if offset != None:
1279 1294 date = " ".join(string.split()[:-1])
1280 1295
1281 1296 # add missing elements from defaults
1282 1297 for part in defaults:
1283 1298 found = [True for p in part if ("%"+p) in format]
1284 1299 if not found:
1285 1300 date += "@" + defaults[part]
1286 1301 format += "@%" + part[0]
1287 1302
1288 1303 timetuple = time.strptime(date, format)
1289 1304 localunixtime = int(calendar.timegm(timetuple))
1290 1305 if offset is None:
1291 1306 # local timezone
1292 1307 unixtime = int(time.mktime(timetuple))
1293 1308 offset = unixtime - localunixtime
1294 1309 else:
1295 1310 unixtime = localunixtime + offset
1296 1311 return unixtime, offset
1297 1312
1298 1313 def parsedate(string, formats=None, defaults=None):
1299 1314 """parse a localized time string and return a (unixtime, offset) tuple.
1300 1315 The date may be a "unixtime offset" string or in one of the specified
1301 1316 formats."""
1302 1317 if not string:
1303 1318 return 0, 0
1304 1319 if not formats:
1305 1320 formats = defaultdateformats
1306 1321 string = string.strip()
1307 1322 try:
1308 1323 when, offset = map(int, string.split(' '))
1309 1324 except ValueError:
1310 1325 # fill out defaults
1311 1326 if not defaults:
1312 1327 defaults = {}
1313 1328 now = makedate()
1314 1329 for part in "d mb yY HI M S".split():
1315 1330 if part not in defaults:
1316 1331 if part[0] in "HMS":
1317 1332 defaults[part] = "00"
1318 1333 elif part[0] in "dm":
1319 1334 defaults[part] = "1"
1320 1335 else:
1321 1336 defaults[part] = datestr(now, "%" + part[0], False)
1322 1337
1323 1338 for format in formats:
1324 1339 try:
1325 1340 when, offset = strdate(string, format, defaults)
1326 1341 except ValueError:
1327 1342 pass
1328 1343 else:
1329 1344 break
1330 1345 else:
1331 1346 raise Abort(_('invalid date: %r ') % string)
1332 1347 # validate explicit (probably user-specified) date and
1333 1348 # time zone offset. values must fit in signed 32 bits for
1334 1349 # current 32-bit linux runtimes. timezones go from UTC-12
1335 1350 # to UTC+14
1336 1351 if abs(when) > 0x7fffffff:
1337 1352 raise Abort(_('date exceeds 32 bits: %d') % when)
1338 1353 if offset < -50400 or offset > 43200:
1339 1354 raise Abort(_('impossible time zone offset: %d') % offset)
1340 1355 return when, offset
1341 1356
1342 1357 def matchdate(date):
1343 1358 """Return a function that matches a given date match specifier
1344 1359
1345 1360 Formats include:
1346 1361
1347 1362 '{date}' match a given date to the accuracy provided
1348 1363
1349 1364 '<{date}' on or before a given date
1350 1365
1351 1366 '>{date}' on or after a given date
1352 1367
1353 1368 """
1354 1369
1355 1370 def lower(date):
1356 1371 return parsedate(date, extendeddateformats)[0]
1357 1372
1358 1373 def upper(date):
1359 1374 d = dict(mb="12", HI="23", M="59", S="59")
1360 1375 for days in "31 30 29".split():
1361 1376 try:
1362 1377 d["d"] = days
1363 1378 return parsedate(date, extendeddateformats, d)[0]
1364 1379 except:
1365 1380 pass
1366 1381 d["d"] = "28"
1367 1382 return parsedate(date, extendeddateformats, d)[0]
1368 1383
1369 1384 if date[0] == "<":
1370 1385 when = upper(date[1:])
1371 1386 return lambda x: x <= when
1372 1387 elif date[0] == ">":
1373 1388 when = lower(date[1:])
1374 1389 return lambda x: x >= when
1375 1390 elif date[0] == "-":
1376 1391 try:
1377 1392 days = int(date[1:])
1378 1393 except ValueError:
1379 1394 raise Abort(_("invalid day spec: %s") % date[1:])
1380 1395 when = makedate()[0] - days * 3600 * 24
1381 1396 return lambda x: x >= when
1382 1397 elif " to " in date:
1383 1398 a, b = date.split(" to ")
1384 1399 start, stop = lower(a), upper(b)
1385 1400 return lambda x: x >= start and x <= stop
1386 1401 else:
1387 1402 start, stop = lower(date), upper(date)
1388 1403 return lambda x: x >= start and x <= stop
1389 1404
1390 1405 def shortuser(user):
1391 1406 """Return a short representation of a user name or email address."""
1392 1407 f = user.find('@')
1393 1408 if f >= 0:
1394 1409 user = user[:f]
1395 1410 f = user.find('<')
1396 1411 if f >= 0:
1397 1412 user = user[f+1:]
1398 1413 f = user.find(' ')
1399 1414 if f >= 0:
1400 1415 user = user[:f]
1401 1416 f = user.find('.')
1402 1417 if f >= 0:
1403 1418 user = user[:f]
1404 1419 return user
1405 1420
1406 1421 def ellipsis(text, maxlength=400):
1407 1422 """Trim string to at most maxlength (default: 400) characters."""
1408 1423 if len(text) <= maxlength:
1409 1424 return text
1410 1425 else:
1411 1426 return "%s..." % (text[:maxlength-3])
1412 1427
1413 1428 def walkrepos(path):
1414 1429 '''yield every hg repository under path, recursively.'''
1415 1430 def errhandler(err):
1416 1431 if err.filename == path:
1417 1432 raise err
1418 1433
1419 1434 for root, dirs, files in os.walk(path, onerror=errhandler):
1420 1435 for d in dirs:
1421 1436 if d == '.hg':
1422 1437 yield root
1423 1438 dirs[:] = []
1424 1439 break
1425 1440
1426 1441 _rcpath = None
1427 1442
1428 1443 def os_rcpath():
1429 1444 '''return default os-specific hgrc search path'''
1430 1445 path = system_rcpath()
1431 1446 path.extend(user_rcpath())
1432 1447 path = [os.path.normpath(f) for f in path]
1433 1448 return path
1434 1449
1435 1450 def rcpath():
1436 1451 '''return hgrc search path. if env var HGRCPATH is set, use it.
1437 1452 for each item in path, if directory, use files ending in .rc,
1438 1453 else use item.
1439 1454 make HGRCPATH empty to only look in .hg/hgrc of current repo.
1440 1455 if no HGRCPATH, use default os-specific path.'''
1441 1456 global _rcpath
1442 1457 if _rcpath is None:
1443 1458 if 'HGRCPATH' in os.environ:
1444 1459 _rcpath = []
1445 1460 for p in os.environ['HGRCPATH'].split(os.pathsep):
1446 1461 if not p: continue
1447 1462 if os.path.isdir(p):
1448 1463 for f in os.listdir(p):
1449 1464 if f.endswith('.rc'):
1450 1465 _rcpath.append(os.path.join(p, f))
1451 1466 else:
1452 1467 _rcpath.append(p)
1453 1468 else:
1454 1469 _rcpath = os_rcpath()
1455 1470 return _rcpath
1456 1471
1457 1472 def bytecount(nbytes):
1458 1473 '''return byte count formatted as readable string, with units'''
1459 1474
1460 1475 units = (
1461 1476 (100, 1<<30, _('%.0f GB')),
1462 1477 (10, 1<<30, _('%.1f GB')),
1463 1478 (1, 1<<30, _('%.2f GB')),
1464 1479 (100, 1<<20, _('%.0f MB')),
1465 1480 (10, 1<<20, _('%.1f MB')),
1466 1481 (1, 1<<20, _('%.2f MB')),
1467 1482 (100, 1<<10, _('%.0f KB')),
1468 1483 (10, 1<<10, _('%.1f KB')),
1469 1484 (1, 1<<10, _('%.2f KB')),
1470 1485 (1, 1, _('%.0f bytes')),
1471 1486 )
1472 1487
1473 1488 for multiplier, divisor, format in units:
1474 1489 if nbytes >= divisor * multiplier:
1475 1490 return format % (nbytes / float(divisor))
1476 1491 return units[-1][2] % nbytes
1477 1492
1478 1493 def drop_scheme(scheme, path):
1479 1494 sc = scheme + ':'
1480 1495 if path.startswith(sc):
1481 1496 path = path[len(sc):]
1482 1497 if path.startswith('//'):
1483 1498 path = path[2:]
1484 1499 return path
General Comments 0
You need to be logged in to leave comments. Login now