##// END OF EJS Templates
hgext: replace uses of hasattr with util.safehasattr
Augie Fackler -
r14945:11aad09a default
parent child Browse files
Show More
@@ -1,847 +1,848 b''
1 1 # Mercurial built-in replacement for cvsps.
2 2 #
3 3 # Copyright 2008, Frank Kingswood <frank@kingswood-consulting.co.uk>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import os
9 9 import re
10 10 import cPickle as pickle
11 11 from mercurial import util
12 12 from mercurial.i18n import _
13 13 from mercurial import hook
14 from mercurial import util
14 15
15 16 class logentry(object):
16 17 '''Class logentry has the following attributes:
17 18 .author - author name as CVS knows it
18 19 .branch - name of branch this revision is on
19 20 .branches - revision tuple of branches starting at this revision
20 21 .comment - commit message
21 22 .date - the commit date as a (time, tz) tuple
22 23 .dead - true if file revision is dead
23 24 .file - Name of file
24 25 .lines - a tuple (+lines, -lines) or None
25 26 .parent - Previous revision of this entry
26 27 .rcs - name of file as returned from CVS
27 28 .revision - revision number as tuple
28 29 .tags - list of tags on the file
29 30 .synthetic - is this a synthetic "file ... added on ..." revision?
30 31 .mergepoint- the branch that has been merged from
31 32 (if present in rlog output)
32 33 .branchpoints- the branches that start at the current entry
33 34 '''
34 35 def __init__(self, **entries):
35 36 self.synthetic = False
36 37 self.__dict__.update(entries)
37 38
38 39 def __repr__(self):
39 40 return "<%s at 0x%x: %s %s>" % (self.__class__.__name__,
40 41 id(self),
41 42 self.file,
42 43 ".".join(map(str, self.revision)))
43 44
44 45 class logerror(Exception):
45 46 pass
46 47
47 48 def getrepopath(cvspath):
48 49 """Return the repository path from a CVS path.
49 50
50 51 >>> getrepopath('/foo/bar')
51 52 '/foo/bar'
52 53 >>> getrepopath('c:/foo/bar')
53 54 'c:/foo/bar'
54 55 >>> getrepopath(':pserver:10/foo/bar')
55 56 '/foo/bar'
56 57 >>> getrepopath(':pserver:10c:/foo/bar')
57 58 '/foo/bar'
58 59 >>> getrepopath(':pserver:/foo/bar')
59 60 '/foo/bar'
60 61 >>> getrepopath(':pserver:c:/foo/bar')
61 62 'c:/foo/bar'
62 63 >>> getrepopath(':pserver:truc@foo.bar:/foo/bar')
63 64 '/foo/bar'
64 65 >>> getrepopath(':pserver:truc@foo.bar:c:/foo/bar')
65 66 'c:/foo/bar'
66 67 """
67 68 # According to CVS manual, CVS paths are expressed like:
68 69 # [:method:][[user][:password]@]hostname[:[port]]/path/to/repository
69 70 #
70 71 # Unfortunately, Windows absolute paths start with a drive letter
71 72 # like 'c:' making it harder to parse. Here we assume that drive
72 73 # letters are only one character long and any CVS component before
73 74 # the repository path is at least 2 characters long, and use this
74 75 # to disambiguate.
75 76 parts = cvspath.split(':')
76 77 if len(parts) == 1:
77 78 return parts[0]
78 79 # Here there is an ambiguous case if we have a port number
79 80 # immediately followed by a Windows driver letter. We assume this
80 81 # never happens and decide it must be CVS path component,
81 82 # therefore ignoring it.
82 83 if len(parts[-2]) > 1:
83 84 return parts[-1].lstrip('0123456789')
84 85 return parts[-2] + ':' + parts[-1]
85 86
86 87 def createlog(ui, directory=None, root="", rlog=True, cache=None):
87 88 '''Collect the CVS rlog'''
88 89
89 90 # Because we store many duplicate commit log messages, reusing strings
90 91 # saves a lot of memory and pickle storage space.
91 92 _scache = {}
92 93 def scache(s):
93 94 "return a shared version of a string"
94 95 return _scache.setdefault(s, s)
95 96
96 97 ui.status(_('collecting CVS rlog\n'))
97 98
98 99 log = [] # list of logentry objects containing the CVS state
99 100
100 101 # patterns to match in CVS (r)log output, by state of use
101 102 re_00 = re.compile('RCS file: (.+)$')
102 103 re_01 = re.compile('cvs \\[r?log aborted\\]: (.+)$')
103 104 re_02 = re.compile('cvs (r?log|server): (.+)\n$')
104 105 re_03 = re.compile("(Cannot access.+CVSROOT)|"
105 106 "(can't create temporary directory.+)$")
106 107 re_10 = re.compile('Working file: (.+)$')
107 108 re_20 = re.compile('symbolic names:')
108 109 re_30 = re.compile('\t(.+): ([\\d.]+)$')
109 110 re_31 = re.compile('----------------------------$')
110 111 re_32 = re.compile('======================================='
111 112 '======================================$')
112 113 re_50 = re.compile('revision ([\\d.]+)(\s+locked by:\s+.+;)?$')
113 114 re_60 = re.compile(r'date:\s+(.+);\s+author:\s+(.+);\s+state:\s+(.+?);'
114 115 r'(\s+lines:\s+(\+\d+)?\s+(-\d+)?;)?'
115 116 r'(.*mergepoint:\s+([^;]+);)?')
116 117 re_70 = re.compile('branches: (.+);$')
117 118
118 119 file_added_re = re.compile(r'file [^/]+ was (initially )?added on branch')
119 120
120 121 prefix = '' # leading path to strip of what we get from CVS
121 122
122 123 if directory is None:
123 124 # Current working directory
124 125
125 126 # Get the real directory in the repository
126 127 try:
127 128 prefix = open(os.path.join('CVS','Repository')).read().strip()
128 129 directory = prefix
129 130 if prefix == ".":
130 131 prefix = ""
131 132 except IOError:
132 133 raise logerror(_('not a CVS sandbox'))
133 134
134 135 if prefix and not prefix.endswith(os.sep):
135 136 prefix += os.sep
136 137
137 138 # Use the Root file in the sandbox, if it exists
138 139 try:
139 140 root = open(os.path.join('CVS','Root')).read().strip()
140 141 except IOError:
141 142 pass
142 143
143 144 if not root:
144 145 root = os.environ.get('CVSROOT', '')
145 146
146 147 # read log cache if one exists
147 148 oldlog = []
148 149 date = None
149 150
150 151 if cache:
151 152 cachedir = os.path.expanduser('~/.hg.cvsps')
152 153 if not os.path.exists(cachedir):
153 154 os.mkdir(cachedir)
154 155
155 156 # The cvsps cache pickle needs a uniquified name, based on the
156 157 # repository location. The address may have all sort of nasties
157 158 # in it, slashes, colons and such. So here we take just the
158 159 # alphanumerics, concatenated in a way that does not mix up the
159 160 # various components, so that
160 161 # :pserver:user@server:/path
161 162 # and
162 163 # /pserver/user/server/path
163 164 # are mapped to different cache file names.
164 165 cachefile = root.split(":") + [directory, "cache"]
165 166 cachefile = ['-'.join(re.findall(r'\w+', s)) for s in cachefile if s]
166 167 cachefile = os.path.join(cachedir,
167 168 '.'.join([s for s in cachefile if s]))
168 169
169 170 if cache == 'update':
170 171 try:
171 172 ui.note(_('reading cvs log cache %s\n') % cachefile)
172 173 oldlog = pickle.load(open(cachefile))
173 174 ui.note(_('cache has %d log entries\n') % len(oldlog))
174 175 except Exception, e:
175 176 ui.note(_('error reading cache: %r\n') % e)
176 177
177 178 if oldlog:
178 179 date = oldlog[-1].date # last commit date as a (time,tz) tuple
179 180 date = util.datestr(date, '%Y/%m/%d %H:%M:%S %1%2')
180 181
181 182 # build the CVS commandline
182 183 cmd = ['cvs', '-q']
183 184 if root:
184 185 cmd.append('-d%s' % root)
185 186 p = util.normpath(getrepopath(root))
186 187 if not p.endswith('/'):
187 188 p += '/'
188 189 if prefix:
189 190 # looks like normpath replaces "" by "."
190 191 prefix = p + util.normpath(prefix)
191 192 else:
192 193 prefix = p
193 194 cmd.append(['log', 'rlog'][rlog])
194 195 if date:
195 196 # no space between option and date string
196 197 cmd.append('-d>%s' % date)
197 198 cmd.append(directory)
198 199
199 200 # state machine begins here
200 201 tags = {} # dictionary of revisions on current file with their tags
201 202 branchmap = {} # mapping between branch names and revision numbers
202 203 state = 0
203 204 store = False # set when a new record can be appended
204 205
205 206 cmd = [util.shellquote(arg) for arg in cmd]
206 207 ui.note(_("running %s\n") % (' '.join(cmd)))
207 208 ui.debug("prefix=%r directory=%r root=%r\n" % (prefix, directory, root))
208 209
209 210 pfp = util.popen(' '.join(cmd))
210 211 peek = pfp.readline()
211 212 while True:
212 213 line = peek
213 214 if line == '':
214 215 break
215 216 peek = pfp.readline()
216 217 if line.endswith('\n'):
217 218 line = line[:-1]
218 219 #ui.debug('state=%d line=%r\n' % (state, line))
219 220
220 221 if state == 0:
221 222 # initial state, consume input until we see 'RCS file'
222 223 match = re_00.match(line)
223 224 if match:
224 225 rcs = match.group(1)
225 226 tags = {}
226 227 if rlog:
227 228 filename = util.normpath(rcs[:-2])
228 229 if filename.startswith(prefix):
229 230 filename = filename[len(prefix):]
230 231 if filename.startswith('/'):
231 232 filename = filename[1:]
232 233 if filename.startswith('Attic/'):
233 234 filename = filename[6:]
234 235 else:
235 236 filename = filename.replace('/Attic/', '/')
236 237 state = 2
237 238 continue
238 239 state = 1
239 240 continue
240 241 match = re_01.match(line)
241 242 if match:
242 243 raise logerror(match.group(1))
243 244 match = re_02.match(line)
244 245 if match:
245 246 raise logerror(match.group(2))
246 247 if re_03.match(line):
247 248 raise logerror(line)
248 249
249 250 elif state == 1:
250 251 # expect 'Working file' (only when using log instead of rlog)
251 252 match = re_10.match(line)
252 253 assert match, _('RCS file must be followed by working file')
253 254 filename = util.normpath(match.group(1))
254 255 state = 2
255 256
256 257 elif state == 2:
257 258 # expect 'symbolic names'
258 259 if re_20.match(line):
259 260 branchmap = {}
260 261 state = 3
261 262
262 263 elif state == 3:
263 264 # read the symbolic names and store as tags
264 265 match = re_30.match(line)
265 266 if match:
266 267 rev = [int(x) for x in match.group(2).split('.')]
267 268
268 269 # Convert magic branch number to an odd-numbered one
269 270 revn = len(rev)
270 271 if revn > 3 and (revn % 2) == 0 and rev[-2] == 0:
271 272 rev = rev[:-2] + rev[-1:]
272 273 rev = tuple(rev)
273 274
274 275 if rev not in tags:
275 276 tags[rev] = []
276 277 tags[rev].append(match.group(1))
277 278 branchmap[match.group(1)] = match.group(2)
278 279
279 280 elif re_31.match(line):
280 281 state = 5
281 282 elif re_32.match(line):
282 283 state = 0
283 284
284 285 elif state == 4:
285 286 # expecting '------' separator before first revision
286 287 if re_31.match(line):
287 288 state = 5
288 289 else:
289 290 assert not re_32.match(line), _('must have at least '
290 291 'some revisions')
291 292
292 293 elif state == 5:
293 294 # expecting revision number and possibly (ignored) lock indication
294 295 # we create the logentry here from values stored in states 0 to 4,
295 296 # as this state is re-entered for subsequent revisions of a file.
296 297 match = re_50.match(line)
297 298 assert match, _('expected revision number')
298 299 e = logentry(rcs=scache(rcs), file=scache(filename),
299 300 revision=tuple([int(x) for x in match.group(1).split('.')]),
300 301 branches=[], parent=None)
301 302 state = 6
302 303
303 304 elif state == 6:
304 305 # expecting date, author, state, lines changed
305 306 match = re_60.match(line)
306 307 assert match, _('revision must be followed by date line')
307 308 d = match.group(1)
308 309 if d[2] == '/':
309 310 # Y2K
310 311 d = '19' + d
311 312
312 313 if len(d.split()) != 3:
313 314 # cvs log dates always in GMT
314 315 d = d + ' UTC'
315 316 e.date = util.parsedate(d, ['%y/%m/%d %H:%M:%S',
316 317 '%Y/%m/%d %H:%M:%S',
317 318 '%Y-%m-%d %H:%M:%S'])
318 319 e.author = scache(match.group(2))
319 320 e.dead = match.group(3).lower() == 'dead'
320 321
321 322 if match.group(5):
322 323 if match.group(6):
323 324 e.lines = (int(match.group(5)), int(match.group(6)))
324 325 else:
325 326 e.lines = (int(match.group(5)), 0)
326 327 elif match.group(6):
327 328 e.lines = (0, int(match.group(6)))
328 329 else:
329 330 e.lines = None
330 331
331 332 if match.group(7): # cvsnt mergepoint
332 333 myrev = match.group(8).split('.')
333 334 if len(myrev) == 2: # head
334 335 e.mergepoint = 'HEAD'
335 336 else:
336 337 myrev = '.'.join(myrev[:-2] + ['0', myrev[-2]])
337 338 branches = [b for b in branchmap if branchmap[b] == myrev]
338 339 assert len(branches) == 1, 'unknown branch: %s' % e.mergepoint
339 340 e.mergepoint = branches[0]
340 341 else:
341 342 e.mergepoint = None
342 343 e.comment = []
343 344 state = 7
344 345
345 346 elif state == 7:
346 347 # read the revision numbers of branches that start at this revision
347 348 # or store the commit log message otherwise
348 349 m = re_70.match(line)
349 350 if m:
350 351 e.branches = [tuple([int(y) for y in x.strip().split('.')])
351 352 for x in m.group(1).split(';')]
352 353 state = 8
353 354 elif re_31.match(line) and re_50.match(peek):
354 355 state = 5
355 356 store = True
356 357 elif re_32.match(line):
357 358 state = 0
358 359 store = True
359 360 else:
360 361 e.comment.append(line)
361 362
362 363 elif state == 8:
363 364 # store commit log message
364 365 if re_31.match(line):
365 366 state = 5
366 367 store = True
367 368 elif re_32.match(line):
368 369 state = 0
369 370 store = True
370 371 else:
371 372 e.comment.append(line)
372 373
373 374 # When a file is added on a branch B1, CVS creates a synthetic
374 375 # dead trunk revision 1.1 so that the branch has a root.
375 376 # Likewise, if you merge such a file to a later branch B2 (one
376 377 # that already existed when the file was added on B1), CVS
377 378 # creates a synthetic dead revision 1.1.x.1 on B2. Don't drop
378 379 # these revisions now, but mark them synthetic so
379 380 # createchangeset() can take care of them.
380 381 if (store and
381 382 e.dead and
382 383 e.revision[-1] == 1 and # 1.1 or 1.1.x.1
383 384 len(e.comment) == 1 and
384 385 file_added_re.match(e.comment[0])):
385 386 ui.debug('found synthetic revision in %s: %r\n'
386 387 % (e.rcs, e.comment[0]))
387 388 e.synthetic = True
388 389
389 390 if store:
390 391 # clean up the results and save in the log.
391 392 store = False
392 393 e.tags = sorted([scache(x) for x in tags.get(e.revision, [])])
393 394 e.comment = scache('\n'.join(e.comment))
394 395
395 396 revn = len(e.revision)
396 397 if revn > 3 and (revn % 2) == 0:
397 398 e.branch = tags.get(e.revision[:-1], [None])[0]
398 399 else:
399 400 e.branch = None
400 401
401 402 # find the branches starting from this revision
402 403 branchpoints = set()
403 404 for branch, revision in branchmap.iteritems():
404 405 revparts = tuple([int(i) for i in revision.split('.')])
405 406 if len(revparts) < 2: # bad tags
406 407 continue
407 408 if revparts[-2] == 0 and revparts[-1] % 2 == 0:
408 409 # normal branch
409 410 if revparts[:-2] == e.revision:
410 411 branchpoints.add(branch)
411 412 elif revparts == (1, 1, 1): # vendor branch
412 413 if revparts in e.branches:
413 414 branchpoints.add(branch)
414 415 e.branchpoints = branchpoints
415 416
416 417 log.append(e)
417 418
418 419 if len(log) % 100 == 0:
419 420 ui.status(util.ellipsis('%d %s' % (len(log), e.file), 80)+'\n')
420 421
421 422 log.sort(key=lambda x: (x.rcs, x.revision))
422 423
423 424 # find parent revisions of individual files
424 425 versions = {}
425 426 for e in log:
426 427 branch = e.revision[:-1]
427 428 p = versions.get((e.rcs, branch), None)
428 429 if p is None:
429 430 p = e.revision[:-2]
430 431 e.parent = p
431 432 versions[(e.rcs, branch)] = e.revision
432 433
433 434 # update the log cache
434 435 if cache:
435 436 if log:
436 437 # join up the old and new logs
437 438 log.sort(key=lambda x: x.date)
438 439
439 440 if oldlog and oldlog[-1].date >= log[0].date:
440 441 raise logerror(_('log cache overlaps with new log entries,'
441 442 ' re-run without cache.'))
442 443
443 444 log = oldlog + log
444 445
445 446 # write the new cachefile
446 447 ui.note(_('writing cvs log cache %s\n') % cachefile)
447 448 pickle.dump(log, open(cachefile, 'w'))
448 449 else:
449 450 log = oldlog
450 451
451 452 ui.status(_('%d log entries\n') % len(log))
452 453
453 454 hook.hook(ui, None, "cvslog", True, log=log)
454 455
455 456 return log
456 457
457 458
458 459 class changeset(object):
459 460 '''Class changeset has the following attributes:
460 461 .id - integer identifying this changeset (list index)
461 462 .author - author name as CVS knows it
462 463 .branch - name of branch this changeset is on, or None
463 464 .comment - commit message
464 465 .date - the commit date as a (time,tz) tuple
465 466 .entries - list of logentry objects in this changeset
466 467 .parents - list of one or two parent changesets
467 468 .tags - list of tags on this changeset
468 469 .synthetic - from synthetic revision "file ... added on branch ..."
469 470 .mergepoint- the branch that has been merged from
470 471 (if present in rlog output)
471 472 .branchpoints- the branches that start at the current entry
472 473 '''
473 474 def __init__(self, **entries):
474 475 self.synthetic = False
475 476 self.__dict__.update(entries)
476 477
477 478 def __repr__(self):
478 479 return "<%s at 0x%x: %s>" % (self.__class__.__name__,
479 480 id(self),
480 481 getattr(self, 'id', "(no id)"))
481 482
482 483 def createchangeset(ui, log, fuzz=60, mergefrom=None, mergeto=None):
483 484 '''Convert log into changesets.'''
484 485
485 486 ui.status(_('creating changesets\n'))
486 487
487 488 # Merge changesets
488 489
489 490 log.sort(key=lambda x: (x.comment, x.author, x.branch, x.date))
490 491
491 492 changesets = []
492 493 files = set()
493 494 c = None
494 495 for i, e in enumerate(log):
495 496
496 497 # Check if log entry belongs to the current changeset or not.
497 498
498 499 # Since CVS is file centric, two different file revisions with
499 500 # different branchpoints should be treated as belonging to two
500 501 # different changesets (and the ordering is important and not
501 502 # honoured by cvsps at this point).
502 503 #
503 504 # Consider the following case:
504 505 # foo 1.1 branchpoints: [MYBRANCH]
505 506 # bar 1.1 branchpoints: [MYBRANCH, MYBRANCH2]
506 507 #
507 508 # Here foo is part only of MYBRANCH, but not MYBRANCH2, e.g. a
508 509 # later version of foo may be in MYBRANCH2, so foo should be the
509 510 # first changeset and bar the next and MYBRANCH and MYBRANCH2
510 511 # should both start off of the bar changeset. No provisions are
511 512 # made to ensure that this is, in fact, what happens.
512 513 if not (c and
513 514 e.comment == c.comment and
514 515 e.author == c.author and
515 516 e.branch == c.branch and
516 (not hasattr(e, 'branchpoints') or
517 not hasattr (c, 'branchpoints') or
517 (not util.safehasattr(e, 'branchpoints') or
518 not util.safehasattr (c, 'branchpoints') or
518 519 e.branchpoints == c.branchpoints) and
519 520 ((c.date[0] + c.date[1]) <=
520 521 (e.date[0] + e.date[1]) <=
521 522 (c.date[0] + c.date[1]) + fuzz) and
522 523 e.file not in files):
523 524 c = changeset(comment=e.comment, author=e.author,
524 525 branch=e.branch, date=e.date, entries=[],
525 526 mergepoint=getattr(e, 'mergepoint', None),
526 527 branchpoints=getattr(e, 'branchpoints', set()))
527 528 changesets.append(c)
528 529 files = set()
529 530 if len(changesets) % 100 == 0:
530 531 t = '%d %s' % (len(changesets), repr(e.comment)[1:-1])
531 532 ui.status(util.ellipsis(t, 80) + '\n')
532 533
533 534 c.entries.append(e)
534 535 files.add(e.file)
535 536 c.date = e.date # changeset date is date of latest commit in it
536 537
537 538 # Mark synthetic changesets
538 539
539 540 for c in changesets:
540 541 # Synthetic revisions always get their own changeset, because
541 542 # the log message includes the filename. E.g. if you add file3
542 543 # and file4 on a branch, you get four log entries and three
543 544 # changesets:
544 545 # "File file3 was added on branch ..." (synthetic, 1 entry)
545 546 # "File file4 was added on branch ..." (synthetic, 1 entry)
546 547 # "Add file3 and file4 to fix ..." (real, 2 entries)
547 548 # Hence the check for 1 entry here.
548 549 c.synthetic = len(c.entries) == 1 and c.entries[0].synthetic
549 550
550 551 # Sort files in each changeset
551 552
552 553 for c in changesets:
553 554 def pathcompare(l, r):
554 555 'Mimic cvsps sorting order'
555 556 l = l.split('/')
556 557 r = r.split('/')
557 558 nl = len(l)
558 559 nr = len(r)
559 560 n = min(nl, nr)
560 561 for i in range(n):
561 562 if i + 1 == nl and nl < nr:
562 563 return -1
563 564 elif i + 1 == nr and nl > nr:
564 565 return +1
565 566 elif l[i] < r[i]:
566 567 return -1
567 568 elif l[i] > r[i]:
568 569 return +1
569 570 return 0
570 571 def entitycompare(l, r):
571 572 return pathcompare(l.file, r.file)
572 573
573 574 c.entries.sort(entitycompare)
574 575
575 576 # Sort changesets by date
576 577
577 578 def cscmp(l, r):
578 579 d = sum(l.date) - sum(r.date)
579 580 if d:
580 581 return d
581 582
582 583 # detect vendor branches and initial commits on a branch
583 584 le = {}
584 585 for e in l.entries:
585 586 le[e.rcs] = e.revision
586 587 re = {}
587 588 for e in r.entries:
588 589 re[e.rcs] = e.revision
589 590
590 591 d = 0
591 592 for e in l.entries:
592 593 if re.get(e.rcs, None) == e.parent:
593 594 assert not d
594 595 d = 1
595 596 break
596 597
597 598 for e in r.entries:
598 599 if le.get(e.rcs, None) == e.parent:
599 600 assert not d
600 601 d = -1
601 602 break
602 603
603 604 return d
604 605
605 606 changesets.sort(cscmp)
606 607
607 608 # Collect tags
608 609
609 610 globaltags = {}
610 611 for c in changesets:
611 612 for e in c.entries:
612 613 for tag in e.tags:
613 614 # remember which is the latest changeset to have this tag
614 615 globaltags[tag] = c
615 616
616 617 for c in changesets:
617 618 tags = set()
618 619 for e in c.entries:
619 620 tags.update(e.tags)
620 621 # remember tags only if this is the latest changeset to have it
621 622 c.tags = sorted(tag for tag in tags if globaltags[tag] is c)
622 623
623 624 # Find parent changesets, handle {{mergetobranch BRANCHNAME}}
624 625 # by inserting dummy changesets with two parents, and handle
625 626 # {{mergefrombranch BRANCHNAME}} by setting two parents.
626 627
627 628 if mergeto is None:
628 629 mergeto = r'{{mergetobranch ([-\w]+)}}'
629 630 if mergeto:
630 631 mergeto = re.compile(mergeto)
631 632
632 633 if mergefrom is None:
633 634 mergefrom = r'{{mergefrombranch ([-\w]+)}}'
634 635 if mergefrom:
635 636 mergefrom = re.compile(mergefrom)
636 637
637 638 versions = {} # changeset index where we saw any particular file version
638 639 branches = {} # changeset index where we saw a branch
639 640 n = len(changesets)
640 641 i = 0
641 642 while i < n:
642 643 c = changesets[i]
643 644
644 645 for f in c.entries:
645 646 versions[(f.rcs, f.revision)] = i
646 647
647 648 p = None
648 649 if c.branch in branches:
649 650 p = branches[c.branch]
650 651 else:
651 652 # first changeset on a new branch
652 653 # the parent is a changeset with the branch in its
653 654 # branchpoints such that it is the latest possible
654 655 # commit without any intervening, unrelated commits.
655 656
656 657 for candidate in xrange(i):
657 658 if c.branch not in changesets[candidate].branchpoints:
658 659 if p is not None:
659 660 break
660 661 continue
661 662 p = candidate
662 663
663 664 c.parents = []
664 665 if p is not None:
665 666 p = changesets[p]
666 667
667 668 # Ensure no changeset has a synthetic changeset as a parent.
668 669 while p.synthetic:
669 670 assert len(p.parents) <= 1, \
670 671 _('synthetic changeset cannot have multiple parents')
671 672 if p.parents:
672 673 p = p.parents[0]
673 674 else:
674 675 p = None
675 676 break
676 677
677 678 if p is not None:
678 679 c.parents.append(p)
679 680
680 681 if c.mergepoint:
681 682 if c.mergepoint == 'HEAD':
682 683 c.mergepoint = None
683 684 c.parents.append(changesets[branches[c.mergepoint]])
684 685
685 686 if mergefrom:
686 687 m = mergefrom.search(c.comment)
687 688 if m:
688 689 m = m.group(1)
689 690 if m == 'HEAD':
690 691 m = None
691 692 try:
692 693 candidate = changesets[branches[m]]
693 694 except KeyError:
694 695 ui.warn(_("warning: CVS commit message references "
695 696 "non-existent branch %r:\n%s\n")
696 697 % (m, c.comment))
697 698 if m in branches and c.branch != m and not candidate.synthetic:
698 699 c.parents.append(candidate)
699 700
700 701 if mergeto:
701 702 m = mergeto.search(c.comment)
702 703 if m:
703 704 try:
704 705 m = m.group(1)
705 706 if m == 'HEAD':
706 707 m = None
707 708 except:
708 709 m = None # if no group found then merge to HEAD
709 710 if m in branches and c.branch != m:
710 711 # insert empty changeset for merge
711 712 cc = changeset(
712 713 author=c.author, branch=m, date=c.date,
713 714 comment='convert-repo: CVS merge from branch %s'
714 715 % c.branch,
715 716 entries=[], tags=[],
716 717 parents=[changesets[branches[m]], c])
717 718 changesets.insert(i + 1, cc)
718 719 branches[m] = i + 1
719 720
720 721 # adjust our loop counters now we have inserted a new entry
721 722 n += 1
722 723 i += 2
723 724 continue
724 725
725 726 branches[c.branch] = i
726 727 i += 1
727 728
728 729 # Drop synthetic changesets (safe now that we have ensured no other
729 730 # changesets can have them as parents).
730 731 i = 0
731 732 while i < len(changesets):
732 733 if changesets[i].synthetic:
733 734 del changesets[i]
734 735 else:
735 736 i += 1
736 737
737 738 # Number changesets
738 739
739 740 for i, c in enumerate(changesets):
740 741 c.id = i + 1
741 742
742 743 ui.status(_('%d changeset entries\n') % len(changesets))
743 744
744 745 hook.hook(ui, None, "cvschangesets", True, changesets=changesets)
745 746
746 747 return changesets
747 748
748 749
749 750 def debugcvsps(ui, *args, **opts):
750 751 '''Read CVS rlog for current directory or named path in
751 752 repository, and convert the log to changesets based on matching
752 753 commit log entries and dates.
753 754 '''
754 755 if opts["new_cache"]:
755 756 cache = "write"
756 757 elif opts["update_cache"]:
757 758 cache = "update"
758 759 else:
759 760 cache = None
760 761
761 762 revisions = opts["revisions"]
762 763
763 764 try:
764 765 if args:
765 766 log = []
766 767 for d in args:
767 768 log += createlog(ui, d, root=opts["root"], cache=cache)
768 769 else:
769 770 log = createlog(ui, root=opts["root"], cache=cache)
770 771 except logerror, e:
771 772 ui.write("%r\n"%e)
772 773 return
773 774
774 775 changesets = createchangeset(ui, log, opts["fuzz"])
775 776 del log
776 777
777 778 # Print changesets (optionally filtered)
778 779
779 780 off = len(revisions)
780 781 branches = {} # latest version number in each branch
781 782 ancestors = {} # parent branch
782 783 for cs in changesets:
783 784
784 785 if opts["ancestors"]:
785 786 if cs.branch not in branches and cs.parents and cs.parents[0].id:
786 787 ancestors[cs.branch] = (changesets[cs.parents[0].id - 1].branch,
787 788 cs.parents[0].id)
788 789 branches[cs.branch] = cs.id
789 790
790 791 # limit by branches
791 792 if opts["branches"] and (cs.branch or 'HEAD') not in opts["branches"]:
792 793 continue
793 794
794 795 if not off:
795 796 # Note: trailing spaces on several lines here are needed to have
796 797 # bug-for-bug compatibility with cvsps.
797 798 ui.write('---------------------\n')
798 799 ui.write('PatchSet %d \n' % cs.id)
799 800 ui.write('Date: %s\n' % util.datestr(cs.date,
800 801 '%Y/%m/%d %H:%M:%S %1%2'))
801 802 ui.write('Author: %s\n' % cs.author)
802 803 ui.write('Branch: %s\n' % (cs.branch or 'HEAD'))
803 804 ui.write('Tag%s: %s \n' % (['', 's'][len(cs.tags) > 1],
804 805 ','.join(cs.tags) or '(none)'))
805 806 branchpoints = getattr(cs, 'branchpoints', None)
806 807 if branchpoints:
807 808 ui.write('Branchpoints: %s \n' % ', '.join(branchpoints))
808 809 if opts["parents"] and cs.parents:
809 810 if len(cs.parents) > 1:
810 811 ui.write('Parents: %s\n' %
811 812 (','.join([str(p.id) for p in cs.parents])))
812 813 else:
813 814 ui.write('Parent: %d\n' % cs.parents[0].id)
814 815
815 816 if opts["ancestors"]:
816 817 b = cs.branch
817 818 r = []
818 819 while b:
819 820 b, c = ancestors[b]
820 821 r.append('%s:%d:%d' % (b or "HEAD", c, branches[b]))
821 822 if r:
822 823 ui.write('Ancestors: %s\n' % (','.join(r)))
823 824
824 825 ui.write('Log:\n')
825 826 ui.write('%s\n\n' % cs.comment)
826 827 ui.write('Members: \n')
827 828 for f in cs.entries:
828 829 fn = f.file
829 830 if fn.startswith(opts["prefix"]):
830 831 fn = fn[len(opts["prefix"]):]
831 832 ui.write('\t%s:%s->%s%s \n' % (
832 833 fn, '.'.join([str(x) for x in f.parent]) or 'INITIAL',
833 834 '.'.join([str(x) for x in f.revision]),
834 835 ['', '(DEAD)'][f.dead]))
835 836 ui.write('\n')
836 837
837 838 # have we seen the start tag?
838 839 if revisions and off:
839 840 if revisions[0] == str(cs.id) or \
840 841 revisions[0] in cs.tags:
841 842 off = False
842 843
843 844 # see if we reached the end tag
844 845 if len(revisions) > 1 and not off:
845 846 if revisions[1] == str(cs.id) or \
846 847 revisions[1] in cs.tags:
847 848 break
@@ -1,205 +1,205 b''
1 1 # git.py - git support for the convert extension
2 2 #
3 3 # Copyright 2005-2009 Matt Mackall <mpm@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 import os
9 9 from mercurial import util
10 10 from mercurial.node import hex, nullid
11 11 from mercurial.i18n import _
12 12
13 13 from common import NoRepo, commit, converter_source, checktool
14 14
15 15 class convert_git(converter_source):
16 16 # Windows does not support GIT_DIR= construct while other systems
17 17 # cannot remove environment variable. Just assume none have
18 18 # both issues.
19 if hasattr(os, 'unsetenv'):
19 if util.safehasattr(os, 'unsetenv'):
20 20 def gitopen(self, s, noerr=False):
21 21 prevgitdir = os.environ.get('GIT_DIR')
22 22 os.environ['GIT_DIR'] = self.path
23 23 try:
24 24 if noerr:
25 25 (stdin, stdout, stderr) = util.popen3(s)
26 26 return stdout
27 27 else:
28 28 return util.popen(s, 'rb')
29 29 finally:
30 30 if prevgitdir is None:
31 31 del os.environ['GIT_DIR']
32 32 else:
33 33 os.environ['GIT_DIR'] = prevgitdir
34 34 else:
35 35 def gitopen(self, s, noerr=False):
36 36 if noerr:
37 37 (sin, so, se) = util.popen3('GIT_DIR=%s %s' % (self.path, s))
38 38 return so
39 39 else:
40 40 return util.popen('GIT_DIR=%s %s' % (self.path, s), 'rb')
41 41
42 42 def gitread(self, s):
43 43 fh = self.gitopen(s)
44 44 data = fh.read()
45 45 return data, fh.close()
46 46
47 47 def __init__(self, ui, path, rev=None):
48 48 super(convert_git, self).__init__(ui, path, rev=rev)
49 49
50 50 if os.path.isdir(path + "/.git"):
51 51 path += "/.git"
52 52 if not os.path.exists(path + "/objects"):
53 53 raise NoRepo(_("%s does not look like a Git repository") % path)
54 54
55 55 checktool('git', 'git')
56 56
57 57 self.path = path
58 58
59 59 def getheads(self):
60 60 if not self.rev:
61 61 heads, ret = self.gitread('git rev-parse --branches --remotes')
62 62 heads = heads.splitlines()
63 63 else:
64 64 heads, ret = self.gitread("git rev-parse --verify %s" % self.rev)
65 65 heads = [heads[:-1]]
66 66 if ret:
67 67 raise util.Abort(_('cannot retrieve git heads'))
68 68 return heads
69 69
70 70 def catfile(self, rev, type):
71 71 if rev == hex(nullid):
72 72 raise IOError()
73 73 data, ret = self.gitread("git cat-file %s %s" % (type, rev))
74 74 if ret:
75 75 raise util.Abort(_('cannot read %r object at %s') % (type, rev))
76 76 return data
77 77
78 78 def getfile(self, name, rev):
79 79 data = self.catfile(rev, "blob")
80 80 mode = self.modecache[(name, rev)]
81 81 return data, mode
82 82
83 83 def getchanges(self, version):
84 84 self.modecache = {}
85 85 fh = self.gitopen("git diff-tree -z --root -m -r %s" % version)
86 86 changes = []
87 87 seen = set()
88 88 entry = None
89 89 for l in fh.read().split('\x00'):
90 90 if not entry:
91 91 if not l.startswith(':'):
92 92 continue
93 93 entry = l
94 94 continue
95 95 f = l
96 96 if f not in seen:
97 97 seen.add(f)
98 98 entry = entry.split()
99 99 h = entry[3]
100 100 p = (entry[1] == "100755")
101 101 s = (entry[1] == "120000")
102 102 self.modecache[(f, h)] = (p and "x") or (s and "l") or ""
103 103 changes.append((f, h))
104 104 entry = None
105 105 if fh.close():
106 106 raise util.Abort(_('cannot read changes in %s') % version)
107 107 return (changes, {})
108 108
109 109 def getcommit(self, version):
110 110 c = self.catfile(version, "commit") # read the commit hash
111 111 end = c.find("\n\n")
112 112 message = c[end + 2:]
113 113 message = self.recode(message)
114 114 l = c[:end].splitlines()
115 115 parents = []
116 116 author = committer = None
117 117 for e in l[1:]:
118 118 n, v = e.split(" ", 1)
119 119 if n == "author":
120 120 p = v.split()
121 121 tm, tz = p[-2:]
122 122 author = " ".join(p[:-2])
123 123 if author[0] == "<": author = author[1:-1]
124 124 author = self.recode(author)
125 125 if n == "committer":
126 126 p = v.split()
127 127 tm, tz = p[-2:]
128 128 committer = " ".join(p[:-2])
129 129 if committer[0] == "<": committer = committer[1:-1]
130 130 committer = self.recode(committer)
131 131 if n == "parent":
132 132 parents.append(v)
133 133
134 134 if committer and committer != author:
135 135 message += "\ncommitter: %s\n" % committer
136 136 tzs, tzh, tzm = tz[-5:-4] + "1", tz[-4:-2], tz[-2:]
137 137 tz = -int(tzs) * (int(tzh) * 3600 + int(tzm))
138 138 date = tm + " " + str(tz)
139 139
140 140 c = commit(parents=parents, date=date, author=author, desc=message,
141 141 rev=version)
142 142 return c
143 143
144 144 def gettags(self):
145 145 tags = {}
146 146 fh = self.gitopen('git ls-remote --tags "%s"' % self.path)
147 147 prefix = 'refs/tags/'
148 148 for line in fh:
149 149 line = line.strip()
150 150 if not line.endswith("^{}"):
151 151 continue
152 152 node, tag = line.split(None, 1)
153 153 if not tag.startswith(prefix):
154 154 continue
155 155 tag = tag[len(prefix):-3]
156 156 tags[tag] = node
157 157 if fh.close():
158 158 raise util.Abort(_('cannot read tags from %s') % self.path)
159 159
160 160 return tags
161 161
162 162 def getchangedfiles(self, version, i):
163 163 changes = []
164 164 if i is None:
165 165 fh = self.gitopen("git diff-tree --root -m -r %s" % version)
166 166 for l in fh:
167 167 if "\t" not in l:
168 168 continue
169 169 m, f = l[:-1].split("\t")
170 170 changes.append(f)
171 171 else:
172 172 fh = self.gitopen('git diff-tree --name-only --root -r %s "%s^%s" --'
173 173 % (version, version, i + 1))
174 174 changes = [f.rstrip('\n') for f in fh]
175 175 if fh.close():
176 176 raise util.Abort(_('cannot read changes in %s') % version)
177 177
178 178 return changes
179 179
180 180 def getbookmarks(self):
181 181 bookmarks = {}
182 182
183 183 # Interesting references in git are prefixed
184 184 prefix = 'refs/heads/'
185 185 prefixlen = len(prefix)
186 186
187 187 # factor two commands
188 188 gitcmd = { 'remote/': 'git ls-remote --heads origin',
189 189 '': 'git show-ref'}
190 190
191 191 # Origin heads
192 192 for reftype in gitcmd:
193 193 try:
194 194 fh = self.gitopen(gitcmd[reftype], noerr=True)
195 195 for line in fh:
196 196 line = line.strip()
197 197 rev, name = line.split(None, 1)
198 198 if not name.startswith(prefix):
199 199 continue
200 200 name = '%s%s' % (reftype, name[prefixlen:])
201 201 bookmarks[name] = rev
202 202 except:
203 203 pass
204 204
205 205 return bookmarks
@@ -1,128 +1,128 b''
1 1 # -*- coding: utf-8 -*-
2 2
3 3 # Copyright (C) 2007 Daniel Holth <dholth@fastmail.fm>
4 4 # This is a stripped-down version of the original bzr-svn transport.py,
5 5 # Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
6 6
7 7 # This program is free software; you can redistribute it and/or modify
8 8 # it under the terms of the GNU General Public License as published by
9 9 # the Free Software Foundation; either version 2 of the License, or
10 10 # (at your option) any later version.
11 11
12 12 # This program is distributed in the hope that it will be useful,
13 13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 15 # GNU General Public License for more details.
16 16
17 17 # You should have received a copy of the GNU General Public License
18 18 # along with this program; if not, write to the Free Software
19 19 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 20
21 21 from svn.core import SubversionException, Pool
22 22 import svn.ra
23 23 import svn.client
24 24 import svn.core
25 25
26 26 # Some older versions of the Python bindings need to be
27 27 # explicitly initialized. But what we want to do probably
28 28 # won't work worth a darn against those libraries anyway!
29 29 svn.ra.initialize()
30 30
31 31 svn_config = svn.core.svn_config_get_config(None)
32 32
33 33
34 34 def _create_auth_baton(pool):
35 35 """Create a Subversion authentication baton. """
36 36 import svn.client
37 37 # Give the client context baton a suite of authentication
38 38 # providers.h
39 39 providers = [
40 40 svn.client.get_simple_provider(pool),
41 41 svn.client.get_username_provider(pool),
42 42 svn.client.get_ssl_client_cert_file_provider(pool),
43 43 svn.client.get_ssl_client_cert_pw_file_provider(pool),
44 44 svn.client.get_ssl_server_trust_file_provider(pool),
45 45 ]
46 46 # Platform-dependant authentication methods
47 47 getprovider = getattr(svn.core, 'svn_auth_get_platform_specific_provider',
48 48 None)
49 49 if getprovider:
50 50 # Available in svn >= 1.6
51 51 for name in ('gnome_keyring', 'keychain', 'kwallet', 'windows'):
52 52 for type in ('simple', 'ssl_client_cert_pw', 'ssl_server_trust'):
53 53 p = getprovider(name, type, pool)
54 54 if p:
55 55 providers.append(p)
56 56 else:
57 if hasattr(svn.client, 'get_windows_simple_provider'):
57 if util.safehasattr(svn.client, 'get_windows_simple_provider'):
58 58 providers.append(svn.client.get_windows_simple_provider(pool))
59 59
60 60 return svn.core.svn_auth_open(providers, pool)
61 61
62 62 class NotBranchError(SubversionException):
63 63 pass
64 64
65 65 class SvnRaTransport(object):
66 66 """
67 67 Open an ra connection to a Subversion repository.
68 68 """
69 69 def __init__(self, url="", ra=None):
70 70 self.pool = Pool()
71 71 self.svn_url = url
72 72 self.username = ''
73 73 self.password = ''
74 74
75 75 # Only Subversion 1.4 has reparent()
76 if ra is None or not hasattr(svn.ra, 'reparent'):
76 if ra is None or not util.safehasattr(svn.ra, 'reparent'):
77 77 self.client = svn.client.create_context(self.pool)
78 78 ab = _create_auth_baton(self.pool)
79 79 if False:
80 80 svn.core.svn_auth_set_parameter(
81 81 ab, svn.core.SVN_AUTH_PARAM_DEFAULT_USERNAME, self.username)
82 82 svn.core.svn_auth_set_parameter(
83 83 ab, svn.core.SVN_AUTH_PARAM_DEFAULT_PASSWORD, self.password)
84 84 self.client.auth_baton = ab
85 85 self.client.config = svn_config
86 86 try:
87 87 self.ra = svn.client.open_ra_session(
88 88 self.svn_url.encode('utf8'),
89 89 self.client, self.pool)
90 90 except SubversionException, (inst, num):
91 91 if num in (svn.core.SVN_ERR_RA_ILLEGAL_URL,
92 92 svn.core.SVN_ERR_RA_LOCAL_REPOS_OPEN_FAILED,
93 93 svn.core.SVN_ERR_BAD_URL):
94 94 raise NotBranchError(url)
95 95 raise
96 96 else:
97 97 self.ra = ra
98 98 svn.ra.reparent(self.ra, self.svn_url.encode('utf8'))
99 99
100 100 class Reporter(object):
101 101 def __init__(self, reporter_data):
102 102 self._reporter, self._baton = reporter_data
103 103
104 104 def set_path(self, path, revnum, start_empty, lock_token, pool=None):
105 105 svn.ra.reporter2_invoke_set_path(self._reporter, self._baton,
106 106 path, revnum, start_empty, lock_token, pool)
107 107
108 108 def delete_path(self, path, pool=None):
109 109 svn.ra.reporter2_invoke_delete_path(self._reporter, self._baton,
110 110 path, pool)
111 111
112 112 def link_path(self, path, url, revision, start_empty, lock_token,
113 113 pool=None):
114 114 svn.ra.reporter2_invoke_link_path(self._reporter, self._baton,
115 115 path, url, revision, start_empty, lock_token,
116 116 pool)
117 117
118 118 def finish_report(self, pool=None):
119 119 svn.ra.reporter2_invoke_finish_report(self._reporter,
120 120 self._baton, pool)
121 121
122 122 def abort_report(self, pool=None):
123 123 svn.ra.reporter2_invoke_abort_report(self._reporter,
124 124 self._baton, pool)
125 125
126 126 def do_update(self, revnum, path, *args, **kwargs):
127 127 return self.Reporter(svn.ra.do_update(self.ra, revnum, path,
128 128 *args, **kwargs))
@@ -1,89 +1,90 b''
1 1 # __init__.py - inotify-based status acceleration for Linux
2 2 #
3 3 # Copyright 2006, 2007, 2008 Bryan O'Sullivan <bos@serpentine.com>
4 4 # Copyright 2007, 2008 Brendan Cully <brendan@kublai.com>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''accelerate status report using Linux's inotify service'''
10 10
11 11 # todo: socket permissions
12 12
13 13 from mercurial.i18n import _
14 from mercurial import util
14 15 import server
15 16 from client import client, QueryFailed
16 17
17 18 def serve(ui, repo, **opts):
18 19 '''start an inotify server for this repository'''
19 20 server.start(ui, repo.dirstate, repo.root, opts)
20 21
21 22 def debuginotify(ui, repo, **opts):
22 23 '''debugging information for inotify extension
23 24
24 25 Prints the list of directories being watched by the inotify server.
25 26 '''
26 27 cli = client(ui, repo)
27 28 response = cli.debugquery()
28 29
29 30 ui.write(_('directories being watched:\n'))
30 31 for path in response:
31 32 ui.write((' %s/\n') % path)
32 33
33 34 def reposetup(ui, repo):
34 if not hasattr(repo, 'dirstate'):
35 if not util.safehasattr(repo, 'dirstate'):
35 36 return
36 37
37 38 class inotifydirstate(repo.dirstate.__class__):
38 39
39 40 # We'll set this to false after an unsuccessful attempt so that
40 41 # next calls of status() within the same instance don't try again
41 42 # to start an inotify server if it won't start.
42 43 _inotifyon = True
43 44
44 45 def status(self, match, subrepos, ignored, clean, unknown):
45 46 files = match.files()
46 47 if '.' in files:
47 48 files = []
48 49 if self._inotifyon and not ignored and not subrepos and not self._dirty:
49 50 cli = client(ui, repo)
50 51 try:
51 52 result = cli.statusquery(files, match, False,
52 53 clean, unknown)
53 54 except QueryFailed, instr:
54 55 ui.debug(str(instr))
55 56 # don't retry within the same hg instance
56 57 inotifydirstate._inotifyon = False
57 58 pass
58 59 else:
59 60 if ui.config('inotify', 'debug'):
60 61 r2 = super(inotifydirstate, self).status(
61 62 match, [], False, clean, unknown)
62 63 for c, a, b in zip('LMARDUIC', result, r2):
63 64 for f in a:
64 65 if f not in b:
65 66 ui.warn('*** inotify: %s +%s\n' % (c, f))
66 67 for f in b:
67 68 if f not in a:
68 69 ui.warn('*** inotify: %s -%s\n' % (c, f))
69 70 result = r2
70 71 return result
71 72 return super(inotifydirstate, self).status(
72 73 match, subrepos, ignored, clean, unknown)
73 74
74 75 repo.dirstate.__class__ = inotifydirstate
75 76
76 77 cmdtable = {
77 78 'debuginotify':
78 79 (debuginotify, [], ('hg debuginotify')),
79 80 '^inserve':
80 81 (serve,
81 82 [('d', 'daemon', None, _('run server in background')),
82 83 ('', 'daemon-pipefds', '',
83 84 _('used internally by daemon mode'), _('NUM')),
84 85 ('t', 'idle-timeout', '',
85 86 _('minutes to sit idle before exiting'), _('NUM')),
86 87 ('', 'pid-file', '',
87 88 _('name of file to write process ID to'), _('FILE'))],
88 89 _('hg inserve [OPTION]...')),
89 90 }
@@ -1,117 +1,117 b''
1 1 # pager.py - display output using a pager
2 2 #
3 3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # To load the extension, add it to your configuration file:
9 9 #
10 10 # [extension]
11 11 # pager =
12 12 #
13 13 # Run "hg help pager" to get info on configuration.
14 14
15 15 '''browse command output with an external pager
16 16
17 17 To set the pager that should be used, set the application variable::
18 18
19 19 [pager]
20 20 pager = less -FRSX
21 21
22 22 If no pager is set, the pager extensions uses the environment variable
23 23 $PAGER. If neither pager.pager, nor $PAGER is set, no pager is used.
24 24
25 25 If you notice "BROKEN PIPE" error messages, you can disable them by
26 26 setting::
27 27
28 28 [pager]
29 29 quiet = True
30 30
31 31 You can disable the pager for certain commands by adding them to the
32 32 pager.ignore list::
33 33
34 34 [pager]
35 35 ignore = version, help, update
36 36
37 37 You can also enable the pager only for certain commands using
38 38 pager.attend. Below is the default list of commands to be paged::
39 39
40 40 [pager]
41 41 attend = annotate, cat, diff, export, glog, log, qdiff
42 42
43 43 Setting pager.attend to an empty value will cause all commands to be
44 44 paged.
45 45
46 46 If pager.attend is present, pager.ignore will be ignored.
47 47
48 48 To ignore global commands like :hg:`version` or :hg:`help`, you have
49 49 to specify them in your user configuration file.
50 50
51 51 The --pager=... option can also be used to control when the pager is
52 52 used. Use a boolean value like yes, no, on, off, or use auto for
53 53 normal behavior.
54 54 '''
55 55
56 56 import sys, os, signal, shlex, errno
57 57 from mercurial import commands, dispatch, util, extensions
58 58 from mercurial.i18n import _
59 59
60 60 def _runpager(p):
61 if not hasattr(os, 'fork'):
61 if not util.safehasattr(os, 'fork'):
62 62 sys.stdout = util.popen(p, 'wb')
63 63 if util.isatty(sys.stderr):
64 64 sys.stderr = sys.stdout
65 65 return
66 66 fdin, fdout = os.pipe()
67 67 pid = os.fork()
68 68 if pid == 0:
69 69 os.close(fdin)
70 70 os.dup2(fdout, sys.stdout.fileno())
71 71 if util.isatty(sys.stderr):
72 72 os.dup2(fdout, sys.stderr.fileno())
73 73 os.close(fdout)
74 74 return
75 75 os.dup2(fdin, sys.stdin.fileno())
76 76 os.close(fdin)
77 77 os.close(fdout)
78 78 try:
79 79 os.execvp('/bin/sh', ['/bin/sh', '-c', p])
80 80 except OSError, e:
81 81 if e.errno == errno.ENOENT:
82 82 # no /bin/sh, try executing the pager directly
83 83 args = shlex.split(p)
84 84 os.execvp(args[0], args)
85 85 else:
86 86 raise
87 87
88 88 def uisetup(ui):
89 89 if ui.plain() or '--debugger' in sys.argv or not util.isatty(sys.stdout):
90 90 return
91 91
92 92 def pagecmd(orig, ui, options, cmd, cmdfunc):
93 93 p = ui.config("pager", "pager", os.environ.get("PAGER"))
94 94
95 95 if p:
96 96 attend = ui.configlist('pager', 'attend', attended)
97 97 auto = options['pager'] == 'auto'
98 98 always = util.parsebool(options['pager'])
99 99 if (always or auto and
100 100 (cmd in attend or
101 101 (cmd not in ui.configlist('pager', 'ignore') and not attend))):
102 102 ui.setconfig('ui', 'formatted', ui.formatted())
103 103 ui.setconfig('ui', 'interactive', False)
104 104 _runpager(p)
105 105 if ui.configbool('pager', 'quiet'):
106 106 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
107 107 return orig(ui, options, cmd, cmdfunc)
108 108
109 109 extensions.wrapfunction(dispatch, '_runcommand', pagecmd)
110 110
111 111 def extsetup(ui):
112 112 commands.globalopts.append(
113 113 ('', 'pager', 'auto',
114 114 _("when to paginate (boolean, always, auto, or never)"),
115 115 _('TYPE')))
116 116
117 117 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
@@ -1,183 +1,184 b''
1 1 # Mercurial extension to provide 'hg relink' command
2 2 #
3 3 # Copyright (C) 2007 Brendan Cully <brendan@kublai.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """recreates hardlinks between repository clones"""
9 9
10 10 from mercurial import hg, util
11 11 from mercurial.i18n import _
12 12 import os, stat
13 13
14 14 def relink(ui, repo, origin=None, **opts):
15 15 """recreate hardlinks between two repositories
16 16
17 17 When repositories are cloned locally, their data files will be
18 18 hardlinked so that they only use the space of a single repository.
19 19
20 20 Unfortunately, subsequent pulls into either repository will break
21 21 hardlinks for any files touched by the new changesets, even if
22 22 both repositories end up pulling the same changes.
23 23
24 24 Similarly, passing --rev to "hg clone" will fail to use any
25 25 hardlinks, falling back to a complete copy of the source
26 26 repository.
27 27
28 28 This command lets you recreate those hardlinks and reclaim that
29 29 wasted space.
30 30
31 31 This repository will be relinked to share space with ORIGIN, which
32 32 must be on the same local disk. If ORIGIN is omitted, looks for
33 33 "default-relink", then "default", in [paths].
34 34
35 35 Do not attempt any read operations on this repository while the
36 36 command is running. (Both repositories will be locked against
37 37 writes.)
38 38 """
39 if not hasattr(util, 'samefile') or not hasattr(util, 'samedevice'):
39 if (not util.safehasattr(util, 'samefile') or
40 not util.safehasattr(util, 'samedevice')):
40 41 raise util.Abort(_('hardlinks are not supported on this system'))
41 42 src = hg.repository(ui, ui.expandpath(origin or 'default-relink',
42 43 origin or 'default'))
43 44 if not src.local():
44 45 raise util.Abort(_('must specify local origin repository'))
45 46 ui.status(_('relinking %s to %s\n') % (src.store.path, repo.store.path))
46 47 if repo.root == src.root:
47 48 ui.status(_('there is nothing to relink\n'))
48 49 return
49 50
50 51 locallock = repo.lock()
51 52 try:
52 53 remotelock = src.lock()
53 54 try:
54 55 candidates = sorted(collect(src, ui))
55 56 targets = prune(candidates, src.store.path, repo.store.path, ui)
56 57 do_relink(src.store.path, repo.store.path, targets, ui)
57 58 finally:
58 59 remotelock.release()
59 60 finally:
60 61 locallock.release()
61 62
62 63 def collect(src, ui):
63 64 seplen = len(os.path.sep)
64 65 candidates = []
65 66 live = len(src['tip'].manifest())
66 67 # Your average repository has some files which were deleted before
67 68 # the tip revision. We account for that by assuming that there are
68 69 # 3 tracked files for every 2 live files as of the tip version of
69 70 # the repository.
70 71 #
71 72 # mozilla-central as of 2010-06-10 had a ratio of just over 7:5.
72 73 total = live * 3 // 2
73 74 src = src.store.path
74 75 pos = 0
75 76 ui.status(_("tip has %d files, estimated total number of files: %s\n")
76 77 % (live, total))
77 78 for dirpath, dirnames, filenames in os.walk(src):
78 79 dirnames.sort()
79 80 relpath = dirpath[len(src) + seplen:]
80 81 for filename in sorted(filenames):
81 82 if not filename[-2:] in ('.d', '.i'):
82 83 continue
83 84 st = os.stat(os.path.join(dirpath, filename))
84 85 if not stat.S_ISREG(st.st_mode):
85 86 continue
86 87 pos += 1
87 88 candidates.append((os.path.join(relpath, filename), st))
88 89 ui.progress(_('collecting'), pos, filename, _('files'), total)
89 90
90 91 ui.progress(_('collecting'), None)
91 92 ui.status(_('collected %d candidate storage files\n') % len(candidates))
92 93 return candidates
93 94
94 95 def prune(candidates, src, dst, ui):
95 96 def linkfilter(src, dst, st):
96 97 try:
97 98 ts = os.stat(dst)
98 99 except OSError:
99 100 # Destination doesn't have this file?
100 101 return False
101 102 if util.samefile(src, dst):
102 103 return False
103 104 if not util.samedevice(src, dst):
104 105 # No point in continuing
105 106 raise util.Abort(
106 107 _('source and destination are on different devices'))
107 108 if st.st_size != ts.st_size:
108 109 return False
109 110 return st
110 111
111 112 targets = []
112 113 total = len(candidates)
113 114 pos = 0
114 115 for fn, st in candidates:
115 116 pos += 1
116 117 srcpath = os.path.join(src, fn)
117 118 tgt = os.path.join(dst, fn)
118 119 ts = linkfilter(srcpath, tgt, st)
119 120 if not ts:
120 121 ui.debug('not linkable: %s\n' % fn)
121 122 continue
122 123 targets.append((fn, ts.st_size))
123 124 ui.progress(_('pruning'), pos, fn, _('files'), total)
124 125
125 126 ui.progress(_('pruning'), None)
126 127 ui.status(_('pruned down to %d probably relinkable files\n') % len(targets))
127 128 return targets
128 129
129 130 def do_relink(src, dst, files, ui):
130 131 def relinkfile(src, dst):
131 132 bak = dst + '.bak'
132 133 os.rename(dst, bak)
133 134 try:
134 135 util.oslink(src, dst)
135 136 except OSError:
136 137 os.rename(bak, dst)
137 138 raise
138 139 os.remove(bak)
139 140
140 141 CHUNKLEN = 65536
141 142 relinked = 0
142 143 savedbytes = 0
143 144
144 145 pos = 0
145 146 total = len(files)
146 147 for f, sz in files:
147 148 pos += 1
148 149 source = os.path.join(src, f)
149 150 tgt = os.path.join(dst, f)
150 151 # Binary mode, so that read() works correctly, especially on Windows
151 152 sfp = file(source, 'rb')
152 153 dfp = file(tgt, 'rb')
153 154 sin = sfp.read(CHUNKLEN)
154 155 while sin:
155 156 din = dfp.read(CHUNKLEN)
156 157 if sin != din:
157 158 break
158 159 sin = sfp.read(CHUNKLEN)
159 160 sfp.close()
160 161 dfp.close()
161 162 if sin:
162 163 ui.debug('not linkable: %s\n' % f)
163 164 continue
164 165 try:
165 166 relinkfile(source, tgt)
166 167 ui.progress(_('relinking'), pos, f, _('files'), total)
167 168 relinked += 1
168 169 savedbytes += sz
169 170 except OSError, inst:
170 171 ui.warn('%s: %s\n' % (tgt, str(inst)))
171 172
172 173 ui.progress(_('relinking'), None)
173 174
174 175 ui.status(_('relinked %d files (%s reclaimed)\n') %
175 176 (relinked, util.bytecount(savedbytes)))
176 177
177 178 cmdtable = {
178 179 'relink': (
179 180 relink,
180 181 [],
181 182 _('[ORIGIN]')
182 183 )
183 184 }
General Comments 0
You need to be logged in to leave comments. Login now