##// END OF EJS Templates
extensions: change magic "shipped with hg" string...
Augie Fackler -
r29841:d5883fd0 default
parent child Browse files
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,516 +1,516 b''
1 1 # synthrepo.py - repo synthesis
2 2 #
3 3 # Copyright 2012 Facebook
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''synthesize structurally interesting change history
9 9
10 10 This extension is useful for creating a repository with properties
11 11 that are statistically similar to an existing repository. During
12 12 analysis, a simple probability table is constructed from the history
13 13 of an existing repository. During synthesis, these properties are
14 14 reconstructed.
15 15
16 16 Properties that are analyzed and synthesized include the following:
17 17
18 18 - Lines added or removed when an existing file is modified
19 19 - Number and sizes of files added
20 20 - Number of files removed
21 21 - Line lengths
22 22 - Topological distance to parent changeset(s)
23 23 - Probability of a commit being a merge
24 24 - Probability of a newly added file being added to a new directory
25 25 - Interarrival time, and time zone, of commits
26 26 - Number of files in each directory
27 27
28 28 A few obvious properties that are not currently handled realistically:
29 29
30 30 - Merges are treated as regular commits with two parents, which is not
31 31 realistic
32 32 - Modifications are not treated as operations on hunks of lines, but
33 33 as insertions and deletions of randomly chosen single lines
34 34 - Committer ID (always random)
35 35 - Executability of files
36 36 - Symlinks and binary files are ignored
37 37 '''
38 38
39 39 from __future__ import absolute_import
40 40 import bisect
41 41 import collections
42 42 import itertools
43 43 import json
44 44 import os
45 45 import random
46 46 import sys
47 47 import time
48 48
49 49 from mercurial.i18n import _
50 50 from mercurial.node import (
51 51 nullid,
52 52 nullrev,
53 53 short,
54 54 )
55 55 from mercurial import (
56 56 cmdutil,
57 57 context,
58 58 error,
59 59 hg,
60 60 patch,
61 61 scmutil,
62 62 util,
63 63 )
64 64
65 # Note for extension authors: ONLY specify testedwith = 'internal' for
65 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
66 66 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
67 67 # be specifying the version(s) of Mercurial they are tested with, or
68 68 # leave the attribute unspecified.
69 testedwith = 'internal'
69 testedwith = 'ships-with-hg-core'
70 70
71 71 cmdtable = {}
72 72 command = cmdutil.command(cmdtable)
73 73
74 74 newfile = set(('new fi', 'rename', 'copy f', 'copy t'))
75 75
76 76 def zerodict():
77 77 return collections.defaultdict(lambda: 0)
78 78
79 79 def roundto(x, k):
80 80 if x > k * 2:
81 81 return int(round(x / float(k)) * k)
82 82 return int(round(x))
83 83
84 84 def parsegitdiff(lines):
85 85 filename, mar, lineadd, lineremove = None, None, zerodict(), 0
86 86 binary = False
87 87 for line in lines:
88 88 start = line[:6]
89 89 if start == 'diff -':
90 90 if filename:
91 91 yield filename, mar, lineadd, lineremove, binary
92 92 mar, lineadd, lineremove, binary = 'm', zerodict(), 0, False
93 93 filename = patch.gitre.match(line).group(1)
94 94 elif start in newfile:
95 95 mar = 'a'
96 96 elif start == 'GIT bi':
97 97 binary = True
98 98 elif start == 'delete':
99 99 mar = 'r'
100 100 elif start:
101 101 s = start[0]
102 102 if s == '-' and not line.startswith('--- '):
103 103 lineremove += 1
104 104 elif s == '+' and not line.startswith('+++ '):
105 105 lineadd[roundto(len(line) - 1, 5)] += 1
106 106 if filename:
107 107 yield filename, mar, lineadd, lineremove, binary
108 108
109 109 @command('analyze',
110 110 [('o', 'output', '', _('write output to given file'), _('FILE')),
111 111 ('r', 'rev', [], _('analyze specified revisions'), _('REV'))],
112 112 _('hg analyze'), optionalrepo=True)
113 113 def analyze(ui, repo, *revs, **opts):
114 114 '''create a simple model of a repository to use for later synthesis
115 115
116 116 This command examines every changeset in the given range (or all
117 117 of history if none are specified) and creates a simple statistical
118 118 model of the history of the repository. It also measures the directory
119 119 structure of the repository as checked out.
120 120
121 121 The model is written out to a JSON file, and can be used by
122 122 :hg:`synthesize` to create or augment a repository with synthetic
123 123 commits that have a structure that is statistically similar to the
124 124 analyzed repository.
125 125 '''
126 126 root = repo.root
127 127 if not root.endswith(os.path.sep):
128 128 root += os.path.sep
129 129
130 130 revs = list(revs)
131 131 revs.extend(opts['rev'])
132 132 if not revs:
133 133 revs = [':']
134 134
135 135 output = opts['output']
136 136 if not output:
137 137 output = os.path.basename(root) + '.json'
138 138
139 139 if output == '-':
140 140 fp = sys.stdout
141 141 else:
142 142 fp = open(output, 'w')
143 143
144 144 # Always obtain file counts of each directory in the given root directory.
145 145 def onerror(e):
146 146 ui.warn(_('error walking directory structure: %s\n') % e)
147 147
148 148 dirs = {}
149 149 rootprefixlen = len(root)
150 150 for dirpath, dirnames, filenames in os.walk(root, onerror=onerror):
151 151 dirpathfromroot = dirpath[rootprefixlen:]
152 152 dirs[dirpathfromroot] = len(filenames)
153 153 if '.hg' in dirnames:
154 154 dirnames.remove('.hg')
155 155
156 156 lineschanged = zerodict()
157 157 children = zerodict()
158 158 p1distance = zerodict()
159 159 p2distance = zerodict()
160 160 linesinfilesadded = zerodict()
161 161 fileschanged = zerodict()
162 162 filesadded = zerodict()
163 163 filesremoved = zerodict()
164 164 linelengths = zerodict()
165 165 interarrival = zerodict()
166 166 parents = zerodict()
167 167 dirsadded = zerodict()
168 168 tzoffset = zerodict()
169 169
170 170 # If a mercurial repo is available, also model the commit history.
171 171 if repo:
172 172 revs = scmutil.revrange(repo, revs)
173 173 revs.sort()
174 174
175 175 progress = ui.progress
176 176 _analyzing = _('analyzing')
177 177 _changesets = _('changesets')
178 178 _total = len(revs)
179 179
180 180 for i, rev in enumerate(revs):
181 181 progress(_analyzing, i, unit=_changesets, total=_total)
182 182 ctx = repo[rev]
183 183 pl = ctx.parents()
184 184 pctx = pl[0]
185 185 prev = pctx.rev()
186 186 children[prev] += 1
187 187 p1distance[rev - prev] += 1
188 188 parents[len(pl)] += 1
189 189 tzoffset[ctx.date()[1]] += 1
190 190 if len(pl) > 1:
191 191 p2distance[rev - pl[1].rev()] += 1
192 192 if prev == rev - 1:
193 193 lastctx = pctx
194 194 else:
195 195 lastctx = repo[rev - 1]
196 196 if lastctx.rev() != nullrev:
197 197 timedelta = ctx.date()[0] - lastctx.date()[0]
198 198 interarrival[roundto(timedelta, 300)] += 1
199 199 diff = sum((d.splitlines() for d in ctx.diff(pctx, git=True)), [])
200 200 fileadds, diradds, fileremoves, filechanges = 0, 0, 0, 0
201 201 for filename, mar, lineadd, lineremove, isbin in parsegitdiff(diff):
202 202 if isbin:
203 203 continue
204 204 added = sum(lineadd.itervalues(), 0)
205 205 if mar == 'm':
206 206 if added and lineremove:
207 207 lineschanged[roundto(added, 5),
208 208 roundto(lineremove, 5)] += 1
209 209 filechanges += 1
210 210 elif mar == 'a':
211 211 fileadds += 1
212 212 if '/' in filename:
213 213 filedir = filename.rsplit('/', 1)[0]
214 214 if filedir not in pctx.dirs():
215 215 diradds += 1
216 216 linesinfilesadded[roundto(added, 5)] += 1
217 217 elif mar == 'r':
218 218 fileremoves += 1
219 219 for length, count in lineadd.iteritems():
220 220 linelengths[length] += count
221 221 fileschanged[filechanges] += 1
222 222 filesadded[fileadds] += 1
223 223 dirsadded[diradds] += 1
224 224 filesremoved[fileremoves] += 1
225 225
226 226 invchildren = zerodict()
227 227
228 228 for rev, count in children.iteritems():
229 229 invchildren[count] += 1
230 230
231 231 if output != '-':
232 232 ui.status(_('writing output to %s\n') % output)
233 233
234 234 def pronk(d):
235 235 return sorted(d.iteritems(), key=lambda x: x[1], reverse=True)
236 236
237 237 json.dump({'revs': len(revs),
238 238 'initdirs': pronk(dirs),
239 239 'lineschanged': pronk(lineschanged),
240 240 'children': pronk(invchildren),
241 241 'fileschanged': pronk(fileschanged),
242 242 'filesadded': pronk(filesadded),
243 243 'linesinfilesadded': pronk(linesinfilesadded),
244 244 'dirsadded': pronk(dirsadded),
245 245 'filesremoved': pronk(filesremoved),
246 246 'linelengths': pronk(linelengths),
247 247 'parents': pronk(parents),
248 248 'p1distance': pronk(p1distance),
249 249 'p2distance': pronk(p2distance),
250 250 'interarrival': pronk(interarrival),
251 251 'tzoffset': pronk(tzoffset),
252 252 },
253 253 fp)
254 254 fp.close()
255 255
256 256 @command('synthesize',
257 257 [('c', 'count', 0, _('create given number of commits'), _('COUNT')),
258 258 ('', 'dict', '', _('path to a dictionary of words'), _('FILE')),
259 259 ('', 'initfiles', 0, _('initial file count to create'), _('COUNT'))],
260 260 _('hg synthesize [OPTION].. DESCFILE'))
261 261 def synthesize(ui, repo, descpath, **opts):
262 262 '''synthesize commits based on a model of an existing repository
263 263
264 264 The model must have been generated by :hg:`analyze`. Commits will
265 265 be generated randomly according to the probabilities described in
266 266 the model. If --initfiles is set, the repository will be seeded with
267 267 the given number files following the modeled repository's directory
268 268 structure.
269 269
270 270 When synthesizing new content, commit descriptions, and user
271 271 names, words will be chosen randomly from a dictionary that is
272 272 presumed to contain one word per line. Use --dict to specify the
273 273 path to an alternate dictionary to use.
274 274 '''
275 275 try:
276 276 fp = hg.openpath(ui, descpath)
277 277 except Exception as err:
278 278 raise error.Abort('%s: %s' % (descpath, err[0].strerror))
279 279 desc = json.load(fp)
280 280 fp.close()
281 281
282 282 def cdf(l):
283 283 if not l:
284 284 return [], []
285 285 vals, probs = zip(*sorted(l, key=lambda x: x[1], reverse=True))
286 286 t = float(sum(probs, 0))
287 287 s, cdfs = 0, []
288 288 for v in probs:
289 289 s += v
290 290 cdfs.append(s / t)
291 291 return vals, cdfs
292 292
293 293 lineschanged = cdf(desc['lineschanged'])
294 294 fileschanged = cdf(desc['fileschanged'])
295 295 filesadded = cdf(desc['filesadded'])
296 296 dirsadded = cdf(desc['dirsadded'])
297 297 filesremoved = cdf(desc['filesremoved'])
298 298 linelengths = cdf(desc['linelengths'])
299 299 parents = cdf(desc['parents'])
300 300 p1distance = cdf(desc['p1distance'])
301 301 p2distance = cdf(desc['p2distance'])
302 302 interarrival = cdf(desc['interarrival'])
303 303 linesinfilesadded = cdf(desc['linesinfilesadded'])
304 304 tzoffset = cdf(desc['tzoffset'])
305 305
306 306 dictfile = opts.get('dict') or '/usr/share/dict/words'
307 307 try:
308 308 fp = open(dictfile, 'rU')
309 309 except IOError as err:
310 310 raise error.Abort('%s: %s' % (dictfile, err.strerror))
311 311 words = fp.read().splitlines()
312 312 fp.close()
313 313
314 314 initdirs = {}
315 315 if desc['initdirs']:
316 316 for k, v in desc['initdirs']:
317 317 initdirs[k.encode('utf-8').replace('.hg', '_hg')] = v
318 318 initdirs = renamedirs(initdirs, words)
319 319 initdirscdf = cdf(initdirs)
320 320
321 321 def pick(cdf):
322 322 return cdf[0][bisect.bisect_left(cdf[1], random.random())]
323 323
324 324 def pickpath():
325 325 return os.path.join(pick(initdirscdf), random.choice(words))
326 326
327 327 def makeline(minimum=0):
328 328 total = max(minimum, pick(linelengths))
329 329 c, l = 0, []
330 330 while c < total:
331 331 w = random.choice(words)
332 332 c += len(w) + 1
333 333 l.append(w)
334 334 return ' '.join(l)
335 335
336 336 wlock = repo.wlock()
337 337 lock = repo.lock()
338 338
339 339 nevertouch = set(('.hgsub', '.hgignore', '.hgtags'))
340 340
341 341 progress = ui.progress
342 342 _synthesizing = _('synthesizing')
343 343 _files = _('initial files')
344 344 _changesets = _('changesets')
345 345
346 346 # Synthesize a single initial revision adding files to the repo according
347 347 # to the modeled directory structure.
348 348 initcount = int(opts['initfiles'])
349 349 if initcount and initdirs:
350 350 pctx = repo[None].parents()[0]
351 351 dirs = set(pctx.dirs())
352 352 files = {}
353 353
354 354 def validpath(path):
355 355 # Don't pick filenames which are already directory names.
356 356 if path in dirs:
357 357 return False
358 358 # Don't pick directories which were used as file names.
359 359 while path:
360 360 if path in files:
361 361 return False
362 362 path = os.path.dirname(path)
363 363 return True
364 364
365 365 for i in xrange(0, initcount):
366 366 ui.progress(_synthesizing, i, unit=_files, total=initcount)
367 367
368 368 path = pickpath()
369 369 while not validpath(path):
370 370 path = pickpath()
371 371 data = '%s contents\n' % path
372 372 files[path] = context.memfilectx(repo, path, data)
373 373 dir = os.path.dirname(path)
374 374 while dir and dir not in dirs:
375 375 dirs.add(dir)
376 376 dir = os.path.dirname(dir)
377 377
378 378 def filectxfn(repo, memctx, path):
379 379 return files[path]
380 380
381 381 ui.progress(_synthesizing, None)
382 382 message = 'synthesized wide repo with %d files' % (len(files),)
383 383 mc = context.memctx(repo, [pctx.node(), nullid], message,
384 384 files.iterkeys(), filectxfn, ui.username(),
385 385 '%d %d' % util.makedate())
386 386 initnode = mc.commit()
387 387 if ui.debugflag:
388 388 hexfn = hex
389 389 else:
390 390 hexfn = short
391 391 ui.status(_('added commit %s with %d files\n')
392 392 % (hexfn(initnode), len(files)))
393 393
394 394 # Synthesize incremental revisions to the repository, adding repo depth.
395 395 count = int(opts['count'])
396 396 heads = set(map(repo.changelog.rev, repo.heads()))
397 397 for i in xrange(count):
398 398 progress(_synthesizing, i, unit=_changesets, total=count)
399 399
400 400 node = repo.changelog.node
401 401 revs = len(repo)
402 402
403 403 def pickhead(heads, distance):
404 404 if heads:
405 405 lheads = sorted(heads)
406 406 rev = revs - min(pick(distance), revs)
407 407 if rev < lheads[-1]:
408 408 rev = lheads[bisect.bisect_left(lheads, rev)]
409 409 else:
410 410 rev = lheads[-1]
411 411 return rev, node(rev)
412 412 return nullrev, nullid
413 413
414 414 r1 = revs - min(pick(p1distance), revs)
415 415 p1 = node(r1)
416 416
417 417 # the number of heads will grow without bound if we use a pure
418 418 # model, so artificially constrain their proliferation
419 419 toomanyheads = len(heads) > random.randint(1, 20)
420 420 if p2distance[0] and (pick(parents) == 2 or toomanyheads):
421 421 r2, p2 = pickhead(heads.difference([r1]), p2distance)
422 422 else:
423 423 r2, p2 = nullrev, nullid
424 424
425 425 pl = [p1, p2]
426 426 pctx = repo[r1]
427 427 mf = pctx.manifest()
428 428 mfk = mf.keys()
429 429 changes = {}
430 430 if mfk:
431 431 for __ in xrange(pick(fileschanged)):
432 432 for __ in xrange(10):
433 433 fctx = pctx.filectx(random.choice(mfk))
434 434 path = fctx.path()
435 435 if not (path in nevertouch or fctx.isbinary() or
436 436 'l' in fctx.flags()):
437 437 break
438 438 lines = fctx.data().splitlines()
439 439 add, remove = pick(lineschanged)
440 440 for __ in xrange(remove):
441 441 if not lines:
442 442 break
443 443 del lines[random.randrange(0, len(lines))]
444 444 for __ in xrange(add):
445 445 lines.insert(random.randint(0, len(lines)), makeline())
446 446 path = fctx.path()
447 447 changes[path] = context.memfilectx(repo, path,
448 448 '\n'.join(lines) + '\n')
449 449 for __ in xrange(pick(filesremoved)):
450 450 path = random.choice(mfk)
451 451 for __ in xrange(10):
452 452 path = random.choice(mfk)
453 453 if path not in changes:
454 454 changes[path] = None
455 455 break
456 456 if filesadded:
457 457 dirs = list(pctx.dirs())
458 458 dirs.insert(0, '')
459 459 for __ in xrange(pick(filesadded)):
460 460 pathstr = ''
461 461 while pathstr in dirs:
462 462 path = [random.choice(dirs)]
463 463 if pick(dirsadded):
464 464 path.append(random.choice(words))
465 465 path.append(random.choice(words))
466 466 pathstr = '/'.join(filter(None, path))
467 467 data = '\n'.join(makeline()
468 468 for __ in xrange(pick(linesinfilesadded))) + '\n'
469 469 changes[pathstr] = context.memfilectx(repo, pathstr, data)
470 470 def filectxfn(repo, memctx, path):
471 471 return changes[path]
472 472 if not changes:
473 473 continue
474 474 if revs:
475 475 date = repo['tip'].date()[0] + pick(interarrival)
476 476 else:
477 477 date = time.time() - (86400 * count)
478 478 # dates in mercurial must be positive, fit in 32-bit signed integers.
479 479 date = min(0x7fffffff, max(0, date))
480 480 user = random.choice(words) + '@' + random.choice(words)
481 481 mc = context.memctx(repo, pl, makeline(minimum=2),
482 482 sorted(changes.iterkeys()),
483 483 filectxfn, user, '%d %d' % (date, pick(tzoffset)))
484 484 newnode = mc.commit()
485 485 heads.add(repo.changelog.rev(newnode))
486 486 heads.discard(r1)
487 487 heads.discard(r2)
488 488
489 489 lock.release()
490 490 wlock.release()
491 491
492 492 def renamedirs(dirs, words):
493 493 '''Randomly rename the directory names in the per-dir file count dict.'''
494 494 wordgen = itertools.cycle(words)
495 495 replacements = {'': ''}
496 496 def rename(dirpath):
497 497 '''Recursively rename the directory and all path prefixes.
498 498
499 499 The mapping from path to renamed path is stored for all path prefixes
500 500 as in dynamic programming, ensuring linear runtime and consistent
501 501 renaming regardless of iteration order through the model.
502 502 '''
503 503 if dirpath in replacements:
504 504 return replacements[dirpath]
505 505 head, _ = os.path.split(dirpath)
506 506 if head:
507 507 head = rename(head)
508 508 else:
509 509 head = ''
510 510 renamed = os.path.join(head, next(wordgen))
511 511 replacements[dirpath] = renamed
512 512 return renamed
513 513 result = []
514 514 for dirpath, count in dirs.iteritems():
515 515 result.append([rename(dirpath.lstrip(os.sep)), count])
516 516 return result
@@ -1,330 +1,330 b''
1 1 # acl.py - changeset access control for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''hooks for controlling repository access
9 9
10 10 This hook makes it possible to allow or deny write access to given
11 11 branches and paths of a repository when receiving incoming changesets
12 12 via pretxnchangegroup and pretxncommit.
13 13
14 14 The authorization is matched based on the local user name on the
15 15 system where the hook runs, and not the committer of the original
16 16 changeset (since the latter is merely informative).
17 17
18 18 The acl hook is best used along with a restricted shell like hgsh,
19 19 preventing authenticating users from doing anything other than pushing
20 20 or pulling. The hook is not safe to use if users have interactive
21 21 shell access, as they can then disable the hook. Nor is it safe if
22 22 remote users share an account, because then there is no way to
23 23 distinguish them.
24 24
25 25 The order in which access checks are performed is:
26 26
27 27 1) Deny list for branches (section ``acl.deny.branches``)
28 28 2) Allow list for branches (section ``acl.allow.branches``)
29 29 3) Deny list for paths (section ``acl.deny``)
30 30 4) Allow list for paths (section ``acl.allow``)
31 31
32 32 The allow and deny sections take key-value pairs.
33 33
34 34 Branch-based Access Control
35 35 ---------------------------
36 36
37 37 Use the ``acl.deny.branches`` and ``acl.allow.branches`` sections to
38 38 have branch-based access control. Keys in these sections can be
39 39 either:
40 40
41 41 - a branch name, or
42 42 - an asterisk, to match any branch;
43 43
44 44 The corresponding values can be either:
45 45
46 46 - a comma-separated list containing users and groups, or
47 47 - an asterisk, to match anyone;
48 48
49 49 You can add the "!" prefix to a user or group name to invert the sense
50 50 of the match.
51 51
52 52 Path-based Access Control
53 53 -------------------------
54 54
55 55 Use the ``acl.deny`` and ``acl.allow`` sections to have path-based
56 56 access control. Keys in these sections accept a subtree pattern (with
57 57 a glob syntax by default). The corresponding values follow the same
58 58 syntax as the other sections above.
59 59
60 60 Groups
61 61 ------
62 62
63 63 Group names must be prefixed with an ``@`` symbol. Specifying a group
64 64 name has the same effect as specifying all the users in that group.
65 65
66 66 You can define group members in the ``acl.groups`` section.
67 67 If a group name is not defined there, and Mercurial is running under
68 68 a Unix-like system, the list of users will be taken from the OS.
69 69 Otherwise, an exception will be raised.
70 70
71 71 Example Configuration
72 72 ---------------------
73 73
74 74 ::
75 75
76 76 [hooks]
77 77
78 78 # Use this if you want to check access restrictions at commit time
79 79 pretxncommit.acl = python:hgext.acl.hook
80 80
81 81 # Use this if you want to check access restrictions for pull, push,
82 82 # bundle and serve.
83 83 pretxnchangegroup.acl = python:hgext.acl.hook
84 84
85 85 [acl]
86 86 # Allow or deny access for incoming changes only if their source is
87 87 # listed here, let them pass otherwise. Source is "serve" for all
88 88 # remote access (http or ssh), "push", "pull" or "bundle" when the
89 89 # related commands are run locally.
90 90 # Default: serve
91 91 sources = serve
92 92
93 93 [acl.deny.branches]
94 94
95 95 # Everyone is denied to the frozen branch:
96 96 frozen-branch = *
97 97
98 98 # A bad user is denied on all branches:
99 99 * = bad-user
100 100
101 101 [acl.allow.branches]
102 102
103 103 # A few users are allowed on branch-a:
104 104 branch-a = user-1, user-2, user-3
105 105
106 106 # Only one user is allowed on branch-b:
107 107 branch-b = user-1
108 108
109 109 # The super user is allowed on any branch:
110 110 * = super-user
111 111
112 112 # Everyone is allowed on branch-for-tests:
113 113 branch-for-tests = *
114 114
115 115 [acl.deny]
116 116 # This list is checked first. If a match is found, acl.allow is not
117 117 # checked. All users are granted access if acl.deny is not present.
118 118 # Format for both lists: glob pattern = user, ..., @group, ...
119 119
120 120 # To match everyone, use an asterisk for the user:
121 121 # my/glob/pattern = *
122 122
123 123 # user6 will not have write access to any file:
124 124 ** = user6
125 125
126 126 # Group "hg-denied" will not have write access to any file:
127 127 ** = @hg-denied
128 128
129 129 # Nobody will be able to change "DONT-TOUCH-THIS.txt", despite
130 130 # everyone being able to change all other files. See below.
131 131 src/main/resources/DONT-TOUCH-THIS.txt = *
132 132
133 133 [acl.allow]
134 134 # if acl.allow is not present, all users are allowed by default
135 135 # empty acl.allow = no users allowed
136 136
137 137 # User "doc_writer" has write access to any file under the "docs"
138 138 # folder:
139 139 docs/** = doc_writer
140 140
141 141 # User "jack" and group "designers" have write access to any file
142 142 # under the "images" folder:
143 143 images/** = jack, @designers
144 144
145 145 # Everyone (except for "user6" and "@hg-denied" - see acl.deny above)
146 146 # will have write access to any file under the "resources" folder
147 147 # (except for 1 file. See acl.deny):
148 148 src/main/resources/** = *
149 149
150 150 .hgtags = release_engineer
151 151
152 152 Examples using the "!" prefix
153 153 .............................
154 154
155 155 Suppose there's a branch that only a given user (or group) should be able to
156 156 push to, and you don't want to restrict access to any other branch that may
157 157 be created.
158 158
159 159 The "!" prefix allows you to prevent anyone except a given user or group to
160 160 push changesets in a given branch or path.
161 161
162 162 In the examples below, we will:
163 163 1) Deny access to branch "ring" to anyone but user "gollum"
164 164 2) Deny access to branch "lake" to anyone but members of the group "hobbit"
165 165 3) Deny access to a file to anyone but user "gollum"
166 166
167 167 ::
168 168
169 169 [acl.allow.branches]
170 170 # Empty
171 171
172 172 [acl.deny.branches]
173 173
174 174 # 1) only 'gollum' can commit to branch 'ring';
175 175 # 'gollum' and anyone else can still commit to any other branch.
176 176 ring = !gollum
177 177
178 178 # 2) only members of the group 'hobbit' can commit to branch 'lake';
179 179 # 'hobbit' members and anyone else can still commit to any other branch.
180 180 lake = !@hobbit
181 181
182 182 # You can also deny access based on file paths:
183 183
184 184 [acl.allow]
185 185 # Empty
186 186
187 187 [acl.deny]
188 188 # 3) only 'gollum' can change the file below;
189 189 # 'gollum' and anyone else can still change any other file.
190 190 /misty/mountains/cave/ring = !gollum
191 191
192 192 '''
193 193
194 194 from __future__ import absolute_import
195 195
196 196 import getpass
197 197
198 198 from mercurial.i18n import _
199 199 from mercurial import (
200 200 error,
201 201 match,
202 202 util,
203 203 )
204 204
205 205 urlreq = util.urlreq
206 206
207 # Note for extension authors: ONLY specify testedwith = 'internal' for
207 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
208 208 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
209 209 # be specifying the version(s) of Mercurial they are tested with, or
210 210 # leave the attribute unspecified.
211 testedwith = 'internal'
211 testedwith = 'ships-with-hg-core'
212 212
213 213 def _getusers(ui, group):
214 214
215 215 # First, try to use group definition from section [acl.groups]
216 216 hgrcusers = ui.configlist('acl.groups', group)
217 217 if hgrcusers:
218 218 return hgrcusers
219 219
220 220 ui.debug('acl: "%s" not defined in [acl.groups]\n' % group)
221 221 # If no users found in group definition, get users from OS-level group
222 222 try:
223 223 return util.groupmembers(group)
224 224 except KeyError:
225 225 raise error.Abort(_("group '%s' is undefined") % group)
226 226
227 227 def _usermatch(ui, user, usersorgroups):
228 228
229 229 if usersorgroups == '*':
230 230 return True
231 231
232 232 for ug in usersorgroups.replace(',', ' ').split():
233 233
234 234 if ug.startswith('!'):
235 235 # Test for excluded user or group. Format:
236 236 # if ug is a user name: !username
237 237 # if ug is a group name: !@groupname
238 238 ug = ug[1:]
239 239 if not ug.startswith('@') and user != ug \
240 240 or ug.startswith('@') and user not in _getusers(ui, ug[1:]):
241 241 return True
242 242
243 243 # Test for user or group. Format:
244 244 # if ug is a user name: username
245 245 # if ug is a group name: @groupname
246 246 elif user == ug \
247 247 or ug.startswith('@') and user in _getusers(ui, ug[1:]):
248 248 return True
249 249
250 250 return False
251 251
252 252 def buildmatch(ui, repo, user, key):
253 253 '''return tuple of (match function, list enabled).'''
254 254 if not ui.has_section(key):
255 255 ui.debug('acl: %s not enabled\n' % key)
256 256 return None
257 257
258 258 pats = [pat for pat, users in ui.configitems(key)
259 259 if _usermatch(ui, user, users)]
260 260 ui.debug('acl: %s enabled, %d entries for user %s\n' %
261 261 (key, len(pats), user))
262 262
263 263 # Branch-based ACL
264 264 if not repo:
265 265 if pats:
266 266 # If there's an asterisk (meaning "any branch"), always return True;
267 267 # Otherwise, test if b is in pats
268 268 if '*' in pats:
269 269 return util.always
270 270 return lambda b: b in pats
271 271 return util.never
272 272
273 273 # Path-based ACL
274 274 if pats:
275 275 return match.match(repo.root, '', pats)
276 276 return util.never
277 277
278 278 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
279 279 if hooktype not in ['pretxnchangegroup', 'pretxncommit']:
280 280 raise error.Abort(_('config error - hook type "%s" cannot stop '
281 281 'incoming changesets nor commits') % hooktype)
282 282 if (hooktype == 'pretxnchangegroup' and
283 283 source not in ui.config('acl', 'sources', 'serve').split()):
284 284 ui.debug('acl: changes have source "%s" - skipping\n' % source)
285 285 return
286 286
287 287 user = None
288 288 if source == 'serve' and 'url' in kwargs:
289 289 url = kwargs['url'].split(':')
290 290 if url[0] == 'remote' and url[1].startswith('http'):
291 291 user = urlreq.unquote(url[3])
292 292
293 293 if user is None:
294 294 user = getpass.getuser()
295 295
296 296 ui.debug('acl: checking access for user "%s"\n' % user)
297 297
298 298 # deprecated config: acl.config
299 299 cfg = ui.config('acl', 'config')
300 300 if cfg:
301 301 ui.readconfig(cfg, sections=['acl.groups', 'acl.allow.branches',
302 302 'acl.deny.branches', 'acl.allow', 'acl.deny'])
303 303
304 304 allowbranches = buildmatch(ui, None, user, 'acl.allow.branches')
305 305 denybranches = buildmatch(ui, None, user, 'acl.deny.branches')
306 306 allow = buildmatch(ui, repo, user, 'acl.allow')
307 307 deny = buildmatch(ui, repo, user, 'acl.deny')
308 308
309 309 for rev in xrange(repo[node], len(repo)):
310 310 ctx = repo[rev]
311 311 branch = ctx.branch()
312 312 if denybranches and denybranches(branch):
313 313 raise error.Abort(_('acl: user "%s" denied on branch "%s"'
314 314 ' (changeset "%s")')
315 315 % (user, branch, ctx))
316 316 if allowbranches and not allowbranches(branch):
317 317 raise error.Abort(_('acl: user "%s" not allowed on branch "%s"'
318 318 ' (changeset "%s")')
319 319 % (user, branch, ctx))
320 320 ui.debug('acl: branch access granted: "%s" on branch "%s"\n'
321 321 % (ctx, branch))
322 322
323 323 for f in ctx.files():
324 324 if deny and deny(f):
325 325 raise error.Abort(_('acl: user "%s" denied on "%s"'
326 326 ' (changeset "%s")') % (user, f, ctx))
327 327 if allow and not allow(f):
328 328 raise error.Abort(_('acl: user "%s" not allowed on "%s"'
329 329 ' (changeset "%s")') % (user, f, ctx))
330 330 ui.debug('acl: path access granted: "%s"\n' % ctx)
@@ -1,250 +1,250 b''
1 1 # blackbox.py - log repository events to a file for post-mortem debugging
2 2 #
3 3 # Copyright 2010 Nicolas Dumazet
4 4 # Copyright 2013 Facebook, Inc.
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 """log repository events to a blackbox for debugging
10 10
11 11 Logs event information to .hg/blackbox.log to help debug and diagnose problems.
12 12 The events that get logged can be configured via the blackbox.track config key.
13 13
14 14 Examples::
15 15
16 16 [blackbox]
17 17 track = *
18 18 # dirty is *EXPENSIVE* (slow);
19 19 # each log entry indicates `+` if the repository is dirty, like :hg:`id`.
20 20 dirty = True
21 21 # record the source of log messages
22 22 logsource = True
23 23
24 24 [blackbox]
25 25 track = command, commandfinish, commandexception, exthook, pythonhook
26 26
27 27 [blackbox]
28 28 track = incoming
29 29
30 30 [blackbox]
31 31 # limit the size of a log file
32 32 maxsize = 1.5 MB
33 33 # rotate up to N log files when the current one gets too big
34 34 maxfiles = 3
35 35
36 36 """
37 37
38 38 from __future__ import absolute_import
39 39
40 40 import errno
41 41 import re
42 42
43 43 from mercurial.i18n import _
44 44 from mercurial.node import hex
45 45
46 46 from mercurial import (
47 47 cmdutil,
48 48 ui as uimod,
49 49 util,
50 50 )
51 51
52 52 cmdtable = {}
53 53 command = cmdutil.command(cmdtable)
54 # Note for extension authors: ONLY specify testedwith = 'internal' for
54 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
55 55 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
56 56 # be specifying the version(s) of Mercurial they are tested with, or
57 57 # leave the attribute unspecified.
58 testedwith = 'internal'
58 testedwith = 'ships-with-hg-core'
59 59 lastui = None
60 60
61 61 filehandles = {}
62 62
63 63 def _openlog(vfs):
64 64 path = vfs.join('blackbox.log')
65 65 if path in filehandles:
66 66 return filehandles[path]
67 67 filehandles[path] = fp = vfs('blackbox.log', 'a')
68 68 return fp
69 69
70 70 def _closelog(vfs):
71 71 path = vfs.join('blackbox.log')
72 72 fp = filehandles[path]
73 73 del filehandles[path]
74 74 fp.close()
75 75
76 76 def wrapui(ui):
77 77 class blackboxui(ui.__class__):
78 78 def __init__(self, src=None):
79 79 super(blackboxui, self).__init__(src)
80 80 if src is None:
81 81 self._partialinit()
82 82 else:
83 83 self._bbfp = getattr(src, '_bbfp', None)
84 84 self._bbinlog = False
85 85 self._bbrepo = getattr(src, '_bbrepo', None)
86 86 self._bbvfs = getattr(src, '_bbvfs', None)
87 87
88 88 def _partialinit(self):
89 89 if util.safehasattr(self, '_bbvfs'):
90 90 return
91 91 self._bbfp = None
92 92 self._bbinlog = False
93 93 self._bbrepo = None
94 94 self._bbvfs = None
95 95
96 96 def copy(self):
97 97 self._partialinit()
98 98 return self.__class__(self)
99 99
100 100 @util.propertycache
101 101 def track(self):
102 102 return self.configlist('blackbox', 'track', ['*'])
103 103
104 104 def _openlogfile(self):
105 105 def rotate(oldpath, newpath):
106 106 try:
107 107 self._bbvfs.unlink(newpath)
108 108 except OSError as err:
109 109 if err.errno != errno.ENOENT:
110 110 self.debug("warning: cannot remove '%s': %s\n" %
111 111 (newpath, err.strerror))
112 112 try:
113 113 if newpath:
114 114 self._bbvfs.rename(oldpath, newpath)
115 115 except OSError as err:
116 116 if err.errno != errno.ENOENT:
117 117 self.debug("warning: cannot rename '%s' to '%s': %s\n" %
118 118 (newpath, oldpath, err.strerror))
119 119
120 120 fp = _openlog(self._bbvfs)
121 121 maxsize = self.configbytes('blackbox', 'maxsize', 1048576)
122 122 if maxsize > 0:
123 123 st = self._bbvfs.fstat(fp)
124 124 if st.st_size >= maxsize:
125 125 path = fp.name
126 126 _closelog(self._bbvfs)
127 127 maxfiles = self.configint('blackbox', 'maxfiles', 7)
128 128 for i in xrange(maxfiles - 1, 1, -1):
129 129 rotate(oldpath='%s.%d' % (path, i - 1),
130 130 newpath='%s.%d' % (path, i))
131 131 rotate(oldpath=path,
132 132 newpath=maxfiles > 0 and path + '.1')
133 133 fp = _openlog(self._bbvfs)
134 134 return fp
135 135
136 136 def _bbwrite(self, fmt, *args):
137 137 self._bbfp.write(fmt % args)
138 138 self._bbfp.flush()
139 139
140 140 def log(self, event, *msg, **opts):
141 141 global lastui
142 142 super(blackboxui, self).log(event, *msg, **opts)
143 143 self._partialinit()
144 144
145 145 if not '*' in self.track and not event in self.track:
146 146 return
147 147
148 148 if self._bbfp:
149 149 ui = self
150 150 elif self._bbvfs:
151 151 try:
152 152 self._bbfp = self._openlogfile()
153 153 except (IOError, OSError) as err:
154 154 self.debug('warning: cannot write to blackbox.log: %s\n' %
155 155 err.strerror)
156 156 del self._bbvfs
157 157 self._bbfp = None
158 158 ui = self
159 159 else:
160 160 # certain ui instances exist outside the context of
161 161 # a repo, so just default to the last blackbox that
162 162 # was seen.
163 163 ui = lastui
164 164
165 165 if not ui or not ui._bbfp:
166 166 return
167 167 if not lastui or ui._bbrepo:
168 168 lastui = ui
169 169 if ui._bbinlog:
170 170 # recursion guard
171 171 return
172 172 try:
173 173 ui._bbinlog = True
174 174 date = util.datestr(None, '%Y/%m/%d %H:%M:%S')
175 175 user = util.getuser()
176 176 pid = str(util.getpid())
177 177 formattedmsg = msg[0] % msg[1:]
178 178 rev = '(unknown)'
179 179 changed = ''
180 180 if ui._bbrepo:
181 181 ctx = ui._bbrepo[None]
182 182 parents = ctx.parents()
183 183 rev = ('+'.join([hex(p.node()) for p in parents]))
184 184 if (ui.configbool('blackbox', 'dirty', False) and (
185 185 any(ui._bbrepo.status()) or
186 186 any(ctx.sub(s).dirty() for s in ctx.substate)
187 187 )):
188 188 changed = '+'
189 189 if ui.configbool('blackbox', 'logsource', False):
190 190 src = ' [%s]' % event
191 191 else:
192 192 src = ''
193 193 try:
194 194 ui._bbwrite('%s %s @%s%s (%s)%s> %s',
195 195 date, user, rev, changed, pid, src, formattedmsg)
196 196 except IOError as err:
197 197 self.debug('warning: cannot write to blackbox.log: %s\n' %
198 198 err.strerror)
199 199 finally:
200 200 ui._bbinlog = False
201 201
202 202 def setrepo(self, repo):
203 203 self._bbfp = None
204 204 self._bbinlog = False
205 205 self._bbrepo = repo
206 206 self._bbvfs = repo.vfs
207 207
208 208 ui.__class__ = blackboxui
209 209 uimod.ui = blackboxui
210 210
211 211 def uisetup(ui):
212 212 wrapui(ui)
213 213
214 214 def reposetup(ui, repo):
215 215 # During 'hg pull' a httppeer repo is created to represent the remote repo.
216 216 # It doesn't have a .hg directory to put a blackbox in, so we don't do
217 217 # the blackbox setup for it.
218 218 if not repo.local():
219 219 return
220 220
221 221 if util.safehasattr(ui, 'setrepo'):
222 222 ui.setrepo(repo)
223 223
224 224 @command('^blackbox',
225 225 [('l', 'limit', 10, _('the number of events to show')),
226 226 ],
227 227 _('hg blackbox [OPTION]...'))
228 228 def blackbox(ui, repo, *revs, **opts):
229 229 '''view the recent repository events
230 230 '''
231 231
232 232 if not repo.vfs.exists('blackbox.log'):
233 233 return
234 234
235 235 limit = opts.get('limit')
236 236 fp = repo.vfs('blackbox.log', 'r')
237 237 lines = fp.read().split('\n')
238 238
239 239 count = 0
240 240 output = []
241 241 for line in reversed(lines):
242 242 if count >= limit:
243 243 break
244 244
245 245 # count the commands by matching lines like: 2013/01/23 19:13:36 root>
246 246 if re.match('^\d{4}/\d{2}/\d{2} \d{2}:\d{2}:\d{2} .*> .*', line):
247 247 count += 1
248 248 output.append(line)
249 249
250 250 ui.status('\n'.join(reversed(output)))
@@ -1,928 +1,928 b''
1 1 # bugzilla.py - bugzilla integration for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''hooks for integrating with the Bugzilla bug tracker
10 10
11 11 This hook extension adds comments on bugs in Bugzilla when changesets
12 12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
13 13 the Mercurial template mechanism.
14 14
15 15 The bug references can optionally include an update for Bugzilla of the
16 16 hours spent working on the bug. Bugs can also be marked fixed.
17 17
18 18 Three basic modes of access to Bugzilla are provided:
19 19
20 20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
21 21
22 22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
23 23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
24 24
25 25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
26 26 using MySQL are supported. Requires Python MySQLdb.
27 27
28 28 Writing directly to the database is susceptible to schema changes, and
29 29 relies on a Bugzilla contrib script to send out bug change
30 30 notification emails. This script runs as the user running Mercurial,
31 31 must be run on the host with the Bugzilla install, and requires
32 32 permission to read Bugzilla configuration details and the necessary
33 33 MySQL user and password to have full access rights to the Bugzilla
34 34 database. For these reasons this access mode is now considered
35 35 deprecated, and will not be updated for new Bugzilla versions going
36 36 forward. Only adding comments is supported in this access mode.
37 37
38 38 Access via XMLRPC needs a Bugzilla username and password to be specified
39 39 in the configuration. Comments are added under that username. Since the
40 40 configuration must be readable by all Mercurial users, it is recommended
41 41 that the rights of that user are restricted in Bugzilla to the minimum
42 42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
43 43
44 44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
45 45 email to the Bugzilla email interface to submit comments to bugs.
46 46 The From: address in the email is set to the email address of the Mercurial
47 47 user, so the comment appears to come from the Mercurial user. In the event
48 48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
49 49 user, the email associated with the Bugzilla username used to log into
50 50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
51 51 works on all supported Bugzilla versions.
52 52
53 53 Configuration items common to all access modes:
54 54
55 55 bugzilla.version
56 56 The access type to use. Values recognized are:
57 57
58 58 :``xmlrpc``: Bugzilla XMLRPC interface.
59 59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
60 60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
61 61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
62 62 including 3.0.
63 63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
64 64 including 2.18.
65 65
66 66 bugzilla.regexp
67 67 Regular expression to match bug IDs for update in changeset commit message.
68 68 It must contain one "()" named group ``<ids>`` containing the bug
69 69 IDs separated by non-digit characters. It may also contain
70 70 a named group ``<hours>`` with a floating-point number giving the
71 71 hours worked on the bug. If no named groups are present, the first
72 72 "()" group is assumed to contain the bug IDs, and work time is not
73 73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
74 74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
75 75 variations thereof, followed by an hours number prefixed by ``h`` or
76 76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
77 77
78 78 bugzilla.fixregexp
79 79 Regular expression to match bug IDs for marking fixed in changeset
80 80 commit message. This must contain a "()" named group ``<ids>` containing
81 81 the bug IDs separated by non-digit characters. It may also contain
82 82 a named group ``<hours>`` with a floating-point number giving the
83 83 hours worked on the bug. If no named groups are present, the first
84 84 "()" group is assumed to contain the bug IDs, and work time is not
85 85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
86 86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
87 87 variations thereof, followed by an hours number prefixed by ``h`` or
88 88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
89 89
90 90 bugzilla.fixstatus
91 91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
92 92
93 93 bugzilla.fixresolution
94 94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
95 95
96 96 bugzilla.style
97 97 The style file to use when formatting comments.
98 98
99 99 bugzilla.template
100 100 Template to use when formatting comments. Overrides style if
101 101 specified. In addition to the usual Mercurial keywords, the
102 102 extension specifies:
103 103
104 104 :``{bug}``: The Bugzilla bug ID.
105 105 :``{root}``: The full pathname of the Mercurial repository.
106 106 :``{webroot}``: Stripped pathname of the Mercurial repository.
107 107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
108 108
109 109 Default ``changeset {node|short} in repo {root} refers to bug
110 110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
111 111
112 112 bugzilla.strip
113 113 The number of path separator characters to strip from the front of
114 114 the Mercurial repository path (``{root}`` in templates) to produce
115 115 ``{webroot}``. For example, a repository with ``{root}``
116 116 ``/var/local/my-project`` with a strip of 2 gives a value for
117 117 ``{webroot}`` of ``my-project``. Default 0.
118 118
119 119 web.baseurl
120 120 Base URL for browsing Mercurial repositories. Referenced from
121 121 templates as ``{hgweb}``.
122 122
123 123 Configuration items common to XMLRPC+email and MySQL access modes:
124 124
125 125 bugzilla.usermap
126 126 Path of file containing Mercurial committer email to Bugzilla user email
127 127 mappings. If specified, the file should contain one mapping per
128 128 line::
129 129
130 130 committer = Bugzilla user
131 131
132 132 See also the ``[usermap]`` section.
133 133
134 134 The ``[usermap]`` section is used to specify mappings of Mercurial
135 135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
136 136 Contains entries of the form ``committer = Bugzilla user``.
137 137
138 138 XMLRPC access mode configuration:
139 139
140 140 bugzilla.bzurl
141 141 The base URL for the Bugzilla installation.
142 142 Default ``http://localhost/bugzilla``.
143 143
144 144 bugzilla.user
145 145 The username to use to log into Bugzilla via XMLRPC. Default
146 146 ``bugs``.
147 147
148 148 bugzilla.password
149 149 The password for Bugzilla login.
150 150
151 151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
152 152 and also:
153 153
154 154 bugzilla.bzemail
155 155 The Bugzilla email address.
156 156
157 157 In addition, the Mercurial email settings must be configured. See the
158 158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
159 159
160 160 MySQL access mode configuration:
161 161
162 162 bugzilla.host
163 163 Hostname of the MySQL server holding the Bugzilla database.
164 164 Default ``localhost``.
165 165
166 166 bugzilla.db
167 167 Name of the Bugzilla database in MySQL. Default ``bugs``.
168 168
169 169 bugzilla.user
170 170 Username to use to access MySQL server. Default ``bugs``.
171 171
172 172 bugzilla.password
173 173 Password to use to access MySQL server.
174 174
175 175 bugzilla.timeout
176 176 Database connection timeout (seconds). Default 5.
177 177
178 178 bugzilla.bzuser
179 179 Fallback Bugzilla user name to record comments with, if changeset
180 180 committer cannot be found as a Bugzilla user.
181 181
182 182 bugzilla.bzdir
183 183 Bugzilla install directory. Used by default notify. Default
184 184 ``/var/www/html/bugzilla``.
185 185
186 186 bugzilla.notify
187 187 The command to run to get Bugzilla to send bug change notification
188 188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
189 189 id) and ``user`` (committer bugzilla email). Default depends on
190 190 version; from 2.18 it is "cd %(bzdir)s && perl -T
191 191 contrib/sendbugmail.pl %(id)s %(user)s".
192 192
193 193 Activating the extension::
194 194
195 195 [extensions]
196 196 bugzilla =
197 197
198 198 [hooks]
199 199 # run bugzilla hook on every change pulled or pushed in here
200 200 incoming.bugzilla = python:hgext.bugzilla.hook
201 201
202 202 Example configurations:
203 203
204 204 XMLRPC example configuration. This uses the Bugzilla at
205 205 ``http://my-project.org/bugzilla``, logging in as user
206 206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
207 207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
208 208 with a web interface at ``http://my-project.org/hg``. ::
209 209
210 210 [bugzilla]
211 211 bzurl=http://my-project.org/bugzilla
212 212 user=bugmail@my-project.org
213 213 password=plugh
214 214 version=xmlrpc
215 215 template=Changeset {node|short} in {root|basename}.
216 216 {hgweb}/{webroot}/rev/{node|short}\\n
217 217 {desc}\\n
218 218 strip=5
219 219
220 220 [web]
221 221 baseurl=http://my-project.org/hg
222 222
223 223 XMLRPC+email example configuration. This uses the Bugzilla at
224 224 ``http://my-project.org/bugzilla``, logging in as user
225 225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
226 226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
227 227 with a web interface at ``http://my-project.org/hg``. Bug comments
228 228 are sent to the Bugzilla email address
229 229 ``bugzilla@my-project.org``. ::
230 230
231 231 [bugzilla]
232 232 bzurl=http://my-project.org/bugzilla
233 233 user=bugmail@my-project.org
234 234 password=plugh
235 235 version=xmlrpc+email
236 236 bzemail=bugzilla@my-project.org
237 237 template=Changeset {node|short} in {root|basename}.
238 238 {hgweb}/{webroot}/rev/{node|short}\\n
239 239 {desc}\\n
240 240 strip=5
241 241
242 242 [web]
243 243 baseurl=http://my-project.org/hg
244 244
245 245 [usermap]
246 246 user@emaildomain.com=user.name@bugzilladomain.com
247 247
248 248 MySQL example configuration. This has a local Bugzilla 3.2 installation
249 249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
250 250 the Bugzilla database name is ``bugs`` and MySQL is
251 251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
252 252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
253 253 with a web interface at ``http://my-project.org/hg``. ::
254 254
255 255 [bugzilla]
256 256 host=localhost
257 257 password=XYZZY
258 258 version=3.0
259 259 bzuser=unknown@domain.com
260 260 bzdir=/opt/bugzilla-3.2
261 261 template=Changeset {node|short} in {root|basename}.
262 262 {hgweb}/{webroot}/rev/{node|short}\\n
263 263 {desc}\\n
264 264 strip=5
265 265
266 266 [web]
267 267 baseurl=http://my-project.org/hg
268 268
269 269 [usermap]
270 270 user@emaildomain.com=user.name@bugzilladomain.com
271 271
272 272 All the above add a comment to the Bugzilla bug record of the form::
273 273
274 274 Changeset 3b16791d6642 in repository-name.
275 275 http://my-project.org/hg/repository-name/rev/3b16791d6642
276 276
277 277 Changeset commit comment. Bug 1234.
278 278 '''
279 279
280 280 from __future__ import absolute_import
281 281
282 282 import re
283 283 import time
284 284
285 285 from mercurial.i18n import _
286 286 from mercurial.node import short
287 287 from mercurial import (
288 288 cmdutil,
289 289 error,
290 290 mail,
291 291 util,
292 292 )
293 293
294 294 urlparse = util.urlparse
295 295 xmlrpclib = util.xmlrpclib
296 296
297 # Note for extension authors: ONLY specify testedwith = 'internal' for
297 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
298 298 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
299 299 # be specifying the version(s) of Mercurial they are tested with, or
300 300 # leave the attribute unspecified.
301 testedwith = 'internal'
301 testedwith = 'ships-with-hg-core'
302 302
303 303 class bzaccess(object):
304 304 '''Base class for access to Bugzilla.'''
305 305
306 306 def __init__(self, ui):
307 307 self.ui = ui
308 308 usermap = self.ui.config('bugzilla', 'usermap')
309 309 if usermap:
310 310 self.ui.readconfig(usermap, sections=['usermap'])
311 311
312 312 def map_committer(self, user):
313 313 '''map name of committer to Bugzilla user name.'''
314 314 for committer, bzuser in self.ui.configitems('usermap'):
315 315 if committer.lower() == user.lower():
316 316 return bzuser
317 317 return user
318 318
319 319 # Methods to be implemented by access classes.
320 320 #
321 321 # 'bugs' is a dict keyed on bug id, where values are a dict holding
322 322 # updates to bug state. Recognized dict keys are:
323 323 #
324 324 # 'hours': Value, float containing work hours to be updated.
325 325 # 'fix': If key present, bug is to be marked fixed. Value ignored.
326 326
327 327 def filter_real_bug_ids(self, bugs):
328 328 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
329 329 pass
330 330
331 331 def filter_cset_known_bug_ids(self, node, bugs):
332 332 '''remove bug IDs where node occurs in comment text from bugs.'''
333 333 pass
334 334
335 335 def updatebug(self, bugid, newstate, text, committer):
336 336 '''update the specified bug. Add comment text and set new states.
337 337
338 338 If possible add the comment as being from the committer of
339 339 the changeset. Otherwise use the default Bugzilla user.
340 340 '''
341 341 pass
342 342
343 343 def notify(self, bugs, committer):
344 344 '''Force sending of Bugzilla notification emails.
345 345
346 346 Only required if the access method does not trigger notification
347 347 emails automatically.
348 348 '''
349 349 pass
350 350
351 351 # Bugzilla via direct access to MySQL database.
352 352 class bzmysql(bzaccess):
353 353 '''Support for direct MySQL access to Bugzilla.
354 354
355 355 The earliest Bugzilla version this is tested with is version 2.16.
356 356
357 357 If your Bugzilla is version 3.4 or above, you are strongly
358 358 recommended to use the XMLRPC access method instead.
359 359 '''
360 360
361 361 @staticmethod
362 362 def sql_buglist(ids):
363 363 '''return SQL-friendly list of bug ids'''
364 364 return '(' + ','.join(map(str, ids)) + ')'
365 365
366 366 _MySQLdb = None
367 367
368 368 def __init__(self, ui):
369 369 try:
370 370 import MySQLdb as mysql
371 371 bzmysql._MySQLdb = mysql
372 372 except ImportError as err:
373 373 raise error.Abort(_('python mysql support not available: %s') % err)
374 374
375 375 bzaccess.__init__(self, ui)
376 376
377 377 host = self.ui.config('bugzilla', 'host', 'localhost')
378 378 user = self.ui.config('bugzilla', 'user', 'bugs')
379 379 passwd = self.ui.config('bugzilla', 'password')
380 380 db = self.ui.config('bugzilla', 'db', 'bugs')
381 381 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
382 382 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
383 383 (host, db, user, '*' * len(passwd)))
384 384 self.conn = bzmysql._MySQLdb.connect(host=host,
385 385 user=user, passwd=passwd,
386 386 db=db,
387 387 connect_timeout=timeout)
388 388 self.cursor = self.conn.cursor()
389 389 self.longdesc_id = self.get_longdesc_id()
390 390 self.user_ids = {}
391 391 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
392 392
393 393 def run(self, *args, **kwargs):
394 394 '''run a query.'''
395 395 self.ui.note(_('query: %s %s\n') % (args, kwargs))
396 396 try:
397 397 self.cursor.execute(*args, **kwargs)
398 398 except bzmysql._MySQLdb.MySQLError:
399 399 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
400 400 raise
401 401
402 402 def get_longdesc_id(self):
403 403 '''get identity of longdesc field'''
404 404 self.run('select fieldid from fielddefs where name = "longdesc"')
405 405 ids = self.cursor.fetchall()
406 406 if len(ids) != 1:
407 407 raise error.Abort(_('unknown database schema'))
408 408 return ids[0][0]
409 409
410 410 def filter_real_bug_ids(self, bugs):
411 411 '''filter not-existing bugs from set.'''
412 412 self.run('select bug_id from bugs where bug_id in %s' %
413 413 bzmysql.sql_buglist(bugs.keys()))
414 414 existing = [id for (id,) in self.cursor.fetchall()]
415 415 for id in bugs.keys():
416 416 if id not in existing:
417 417 self.ui.status(_('bug %d does not exist\n') % id)
418 418 del bugs[id]
419 419
420 420 def filter_cset_known_bug_ids(self, node, bugs):
421 421 '''filter bug ids that already refer to this changeset from set.'''
422 422 self.run('''select bug_id from longdescs where
423 423 bug_id in %s and thetext like "%%%s%%"''' %
424 424 (bzmysql.sql_buglist(bugs.keys()), short(node)))
425 425 for (id,) in self.cursor.fetchall():
426 426 self.ui.status(_('bug %d already knows about changeset %s\n') %
427 427 (id, short(node)))
428 428 del bugs[id]
429 429
430 430 def notify(self, bugs, committer):
431 431 '''tell bugzilla to send mail.'''
432 432 self.ui.status(_('telling bugzilla to send mail:\n'))
433 433 (user, userid) = self.get_bugzilla_user(committer)
434 434 for id in bugs.keys():
435 435 self.ui.status(_(' bug %s\n') % id)
436 436 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
437 437 bzdir = self.ui.config('bugzilla', 'bzdir',
438 438 '/var/www/html/bugzilla')
439 439 try:
440 440 # Backwards-compatible with old notify string, which
441 441 # took one string. This will throw with a new format
442 442 # string.
443 443 cmd = cmdfmt % id
444 444 except TypeError:
445 445 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
446 446 self.ui.note(_('running notify command %s\n') % cmd)
447 447 fp = util.popen('(%s) 2>&1' % cmd)
448 448 out = fp.read()
449 449 ret = fp.close()
450 450 if ret:
451 451 self.ui.warn(out)
452 452 raise error.Abort(_('bugzilla notify command %s') %
453 453 util.explainexit(ret)[0])
454 454 self.ui.status(_('done\n'))
455 455
456 456 def get_user_id(self, user):
457 457 '''look up numeric bugzilla user id.'''
458 458 try:
459 459 return self.user_ids[user]
460 460 except KeyError:
461 461 try:
462 462 userid = int(user)
463 463 except ValueError:
464 464 self.ui.note(_('looking up user %s\n') % user)
465 465 self.run('''select userid from profiles
466 466 where login_name like %s''', user)
467 467 all = self.cursor.fetchall()
468 468 if len(all) != 1:
469 469 raise KeyError(user)
470 470 userid = int(all[0][0])
471 471 self.user_ids[user] = userid
472 472 return userid
473 473
474 474 def get_bugzilla_user(self, committer):
475 475 '''See if committer is a registered bugzilla user. Return
476 476 bugzilla username and userid if so. If not, return default
477 477 bugzilla username and userid.'''
478 478 user = self.map_committer(committer)
479 479 try:
480 480 userid = self.get_user_id(user)
481 481 except KeyError:
482 482 try:
483 483 defaultuser = self.ui.config('bugzilla', 'bzuser')
484 484 if not defaultuser:
485 485 raise error.Abort(_('cannot find bugzilla user id for %s') %
486 486 user)
487 487 userid = self.get_user_id(defaultuser)
488 488 user = defaultuser
489 489 except KeyError:
490 490 raise error.Abort(_('cannot find bugzilla user id for %s or %s')
491 491 % (user, defaultuser))
492 492 return (user, userid)
493 493
494 494 def updatebug(self, bugid, newstate, text, committer):
495 495 '''update bug state with comment text.
496 496
497 497 Try adding comment as committer of changeset, otherwise as
498 498 default bugzilla user.'''
499 499 if len(newstate) > 0:
500 500 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
501 501
502 502 (user, userid) = self.get_bugzilla_user(committer)
503 503 now = time.strftime('%Y-%m-%d %H:%M:%S')
504 504 self.run('''insert into longdescs
505 505 (bug_id, who, bug_when, thetext)
506 506 values (%s, %s, %s, %s)''',
507 507 (bugid, userid, now, text))
508 508 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
509 509 values (%s, %s, %s, %s)''',
510 510 (bugid, userid, now, self.longdesc_id))
511 511 self.conn.commit()
512 512
513 513 class bzmysql_2_18(bzmysql):
514 514 '''support for bugzilla 2.18 series.'''
515 515
516 516 def __init__(self, ui):
517 517 bzmysql.__init__(self, ui)
518 518 self.default_notify = \
519 519 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
520 520
521 521 class bzmysql_3_0(bzmysql_2_18):
522 522 '''support for bugzilla 3.0 series.'''
523 523
524 524 def __init__(self, ui):
525 525 bzmysql_2_18.__init__(self, ui)
526 526
527 527 def get_longdesc_id(self):
528 528 '''get identity of longdesc field'''
529 529 self.run('select id from fielddefs where name = "longdesc"')
530 530 ids = self.cursor.fetchall()
531 531 if len(ids) != 1:
532 532 raise error.Abort(_('unknown database schema'))
533 533 return ids[0][0]
534 534
535 535 # Bugzilla via XMLRPC interface.
536 536
537 537 class cookietransportrequest(object):
538 538 """A Transport request method that retains cookies over its lifetime.
539 539
540 540 The regular xmlrpclib transports ignore cookies. Which causes
541 541 a bit of a problem when you need a cookie-based login, as with
542 542 the Bugzilla XMLRPC interface prior to 4.4.3.
543 543
544 544 So this is a helper for defining a Transport which looks for
545 545 cookies being set in responses and saves them to add to all future
546 546 requests.
547 547 """
548 548
549 549 # Inspiration drawn from
550 550 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
551 551 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
552 552
553 553 cookies = []
554 554 def send_cookies(self, connection):
555 555 if self.cookies:
556 556 for cookie in self.cookies:
557 557 connection.putheader("Cookie", cookie)
558 558
559 559 def request(self, host, handler, request_body, verbose=0):
560 560 self.verbose = verbose
561 561 self.accept_gzip_encoding = False
562 562
563 563 # issue XML-RPC request
564 564 h = self.make_connection(host)
565 565 if verbose:
566 566 h.set_debuglevel(1)
567 567
568 568 self.send_request(h, handler, request_body)
569 569 self.send_host(h, host)
570 570 self.send_cookies(h)
571 571 self.send_user_agent(h)
572 572 self.send_content(h, request_body)
573 573
574 574 # Deal with differences between Python 2.4-2.6 and 2.7.
575 575 # In the former h is a HTTP(S). In the latter it's a
576 576 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
577 577 # HTTP(S) has an underlying HTTP(S)Connection, so extract
578 578 # that and use it.
579 579 try:
580 580 response = h.getresponse()
581 581 except AttributeError:
582 582 response = h._conn.getresponse()
583 583
584 584 # Add any cookie definitions to our list.
585 585 for header in response.msg.getallmatchingheaders("Set-Cookie"):
586 586 val = header.split(": ", 1)[1]
587 587 cookie = val.split(";", 1)[0]
588 588 self.cookies.append(cookie)
589 589
590 590 if response.status != 200:
591 591 raise xmlrpclib.ProtocolError(host + handler, response.status,
592 592 response.reason, response.msg.headers)
593 593
594 594 payload = response.read()
595 595 parser, unmarshaller = self.getparser()
596 596 parser.feed(payload)
597 597 parser.close()
598 598
599 599 return unmarshaller.close()
600 600
601 601 # The explicit calls to the underlying xmlrpclib __init__() methods are
602 602 # necessary. The xmlrpclib.Transport classes are old-style classes, and
603 603 # it turns out their __init__() doesn't get called when doing multiple
604 604 # inheritance with a new-style class.
605 605 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
606 606 def __init__(self, use_datetime=0):
607 607 if util.safehasattr(xmlrpclib.Transport, "__init__"):
608 608 xmlrpclib.Transport.__init__(self, use_datetime)
609 609
610 610 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
611 611 def __init__(self, use_datetime=0):
612 612 if util.safehasattr(xmlrpclib.Transport, "__init__"):
613 613 xmlrpclib.SafeTransport.__init__(self, use_datetime)
614 614
615 615 class bzxmlrpc(bzaccess):
616 616 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
617 617
618 618 Requires a minimum Bugzilla version 3.4.
619 619 """
620 620
621 621 def __init__(self, ui):
622 622 bzaccess.__init__(self, ui)
623 623
624 624 bzweb = self.ui.config('bugzilla', 'bzurl',
625 625 'http://localhost/bugzilla/')
626 626 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
627 627
628 628 user = self.ui.config('bugzilla', 'user', 'bugs')
629 629 passwd = self.ui.config('bugzilla', 'password')
630 630
631 631 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
632 632 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
633 633 'FIXED')
634 634
635 635 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
636 636 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
637 637 self.bzvermajor = int(ver[0])
638 638 self.bzverminor = int(ver[1])
639 639 login = self.bzproxy.User.login({'login': user, 'password': passwd,
640 640 'restrict_login': True})
641 641 self.bztoken = login.get('token', '')
642 642
643 643 def transport(self, uri):
644 644 if urlparse.urlparse(uri, "http")[0] == "https":
645 645 return cookiesafetransport()
646 646 else:
647 647 return cookietransport()
648 648
649 649 def get_bug_comments(self, id):
650 650 """Return a string with all comment text for a bug."""
651 651 c = self.bzproxy.Bug.comments({'ids': [id],
652 652 'include_fields': ['text'],
653 653 'token': self.bztoken})
654 654 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
655 655
656 656 def filter_real_bug_ids(self, bugs):
657 657 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
658 658 'include_fields': [],
659 659 'permissive': True,
660 660 'token': self.bztoken,
661 661 })
662 662 for badbug in probe['faults']:
663 663 id = badbug['id']
664 664 self.ui.status(_('bug %d does not exist\n') % id)
665 665 del bugs[id]
666 666
667 667 def filter_cset_known_bug_ids(self, node, bugs):
668 668 for id in sorted(bugs.keys()):
669 669 if self.get_bug_comments(id).find(short(node)) != -1:
670 670 self.ui.status(_('bug %d already knows about changeset %s\n') %
671 671 (id, short(node)))
672 672 del bugs[id]
673 673
674 674 def updatebug(self, bugid, newstate, text, committer):
675 675 args = {}
676 676 if 'hours' in newstate:
677 677 args['work_time'] = newstate['hours']
678 678
679 679 if self.bzvermajor >= 4:
680 680 args['ids'] = [bugid]
681 681 args['comment'] = {'body' : text}
682 682 if 'fix' in newstate:
683 683 args['status'] = self.fixstatus
684 684 args['resolution'] = self.fixresolution
685 685 args['token'] = self.bztoken
686 686 self.bzproxy.Bug.update(args)
687 687 else:
688 688 if 'fix' in newstate:
689 689 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
690 690 "to mark bugs fixed\n"))
691 691 args['id'] = bugid
692 692 args['comment'] = text
693 693 self.bzproxy.Bug.add_comment(args)
694 694
695 695 class bzxmlrpcemail(bzxmlrpc):
696 696 """Read data from Bugzilla via XMLRPC, send updates via email.
697 697
698 698 Advantages of sending updates via email:
699 699 1. Comments can be added as any user, not just logged in user.
700 700 2. Bug statuses or other fields not accessible via XMLRPC can
701 701 potentially be updated.
702 702
703 703 There is no XMLRPC function to change bug status before Bugzilla
704 704 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
705 705 But bugs can be marked fixed via email from 3.4 onwards.
706 706 """
707 707
708 708 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
709 709 # in-email fields are specified as '@<fieldname> = <value>'. In
710 710 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
711 711 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
712 712 # compatibility, but rather than rely on this use the new format for
713 713 # 4.0 onwards.
714 714
715 715 def __init__(self, ui):
716 716 bzxmlrpc.__init__(self, ui)
717 717
718 718 self.bzemail = self.ui.config('bugzilla', 'bzemail')
719 719 if not self.bzemail:
720 720 raise error.Abort(_("configuration 'bzemail' missing"))
721 721 mail.validateconfig(self.ui)
722 722
723 723 def makecommandline(self, fieldname, value):
724 724 if self.bzvermajor >= 4:
725 725 return "@%s %s" % (fieldname, str(value))
726 726 else:
727 727 if fieldname == "id":
728 728 fieldname = "bug_id"
729 729 return "@%s = %s" % (fieldname, str(value))
730 730
731 731 def send_bug_modify_email(self, bugid, commands, comment, committer):
732 732 '''send modification message to Bugzilla bug via email.
733 733
734 734 The message format is documented in the Bugzilla email_in.pl
735 735 specification. commands is a list of command lines, comment is the
736 736 comment text.
737 737
738 738 To stop users from crafting commit comments with
739 739 Bugzilla commands, specify the bug ID via the message body, rather
740 740 than the subject line, and leave a blank line after it.
741 741 '''
742 742 user = self.map_committer(committer)
743 743 matches = self.bzproxy.User.get({'match': [user],
744 744 'token': self.bztoken})
745 745 if not matches['users']:
746 746 user = self.ui.config('bugzilla', 'user', 'bugs')
747 747 matches = self.bzproxy.User.get({'match': [user],
748 748 'token': self.bztoken})
749 749 if not matches['users']:
750 750 raise error.Abort(_("default bugzilla user %s email not found")
751 751 % user)
752 752 user = matches['users'][0]['email']
753 753 commands.append(self.makecommandline("id", bugid))
754 754
755 755 text = "\n".join(commands) + "\n\n" + comment
756 756
757 757 _charsets = mail._charsets(self.ui)
758 758 user = mail.addressencode(self.ui, user, _charsets)
759 759 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
760 760 msg = mail.mimeencode(self.ui, text, _charsets)
761 761 msg['From'] = user
762 762 msg['To'] = bzemail
763 763 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
764 764 sendmail = mail.connect(self.ui)
765 765 sendmail(user, bzemail, msg.as_string())
766 766
767 767 def updatebug(self, bugid, newstate, text, committer):
768 768 cmds = []
769 769 if 'hours' in newstate:
770 770 cmds.append(self.makecommandline("work_time", newstate['hours']))
771 771 if 'fix' in newstate:
772 772 cmds.append(self.makecommandline("bug_status", self.fixstatus))
773 773 cmds.append(self.makecommandline("resolution", self.fixresolution))
774 774 self.send_bug_modify_email(bugid, cmds, text, committer)
775 775
776 776 class bugzilla(object):
777 777 # supported versions of bugzilla. different versions have
778 778 # different schemas.
779 779 _versions = {
780 780 '2.16': bzmysql,
781 781 '2.18': bzmysql_2_18,
782 782 '3.0': bzmysql_3_0,
783 783 'xmlrpc': bzxmlrpc,
784 784 'xmlrpc+email': bzxmlrpcemail
785 785 }
786 786
787 787 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
788 788 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
789 789 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
790 790
791 791 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
792 792 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
793 793 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
794 794 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
795 795
796 796 def __init__(self, ui, repo):
797 797 self.ui = ui
798 798 self.repo = repo
799 799
800 800 bzversion = self.ui.config('bugzilla', 'version')
801 801 try:
802 802 bzclass = bugzilla._versions[bzversion]
803 803 except KeyError:
804 804 raise error.Abort(_('bugzilla version %s not supported') %
805 805 bzversion)
806 806 self.bzdriver = bzclass(self.ui)
807 807
808 808 self.bug_re = re.compile(
809 809 self.ui.config('bugzilla', 'regexp',
810 810 bugzilla._default_bug_re), re.IGNORECASE)
811 811 self.fix_re = re.compile(
812 812 self.ui.config('bugzilla', 'fixregexp',
813 813 bugzilla._default_fix_re), re.IGNORECASE)
814 814 self.split_re = re.compile(r'\D+')
815 815
816 816 def find_bugs(self, ctx):
817 817 '''return bugs dictionary created from commit comment.
818 818
819 819 Extract bug info from changeset comments. Filter out any that are
820 820 not known to Bugzilla, and any that already have a reference to
821 821 the given changeset in their comments.
822 822 '''
823 823 start = 0
824 824 hours = 0.0
825 825 bugs = {}
826 826 bugmatch = self.bug_re.search(ctx.description(), start)
827 827 fixmatch = self.fix_re.search(ctx.description(), start)
828 828 while True:
829 829 bugattribs = {}
830 830 if not bugmatch and not fixmatch:
831 831 break
832 832 if not bugmatch:
833 833 m = fixmatch
834 834 elif not fixmatch:
835 835 m = bugmatch
836 836 else:
837 837 if bugmatch.start() < fixmatch.start():
838 838 m = bugmatch
839 839 else:
840 840 m = fixmatch
841 841 start = m.end()
842 842 if m is bugmatch:
843 843 bugmatch = self.bug_re.search(ctx.description(), start)
844 844 if 'fix' in bugattribs:
845 845 del bugattribs['fix']
846 846 else:
847 847 fixmatch = self.fix_re.search(ctx.description(), start)
848 848 bugattribs['fix'] = None
849 849
850 850 try:
851 851 ids = m.group('ids')
852 852 except IndexError:
853 853 ids = m.group(1)
854 854 try:
855 855 hours = float(m.group('hours'))
856 856 bugattribs['hours'] = hours
857 857 except IndexError:
858 858 pass
859 859 except TypeError:
860 860 pass
861 861 except ValueError:
862 862 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
863 863
864 864 for id in self.split_re.split(ids):
865 865 if not id:
866 866 continue
867 867 bugs[int(id)] = bugattribs
868 868 if bugs:
869 869 self.bzdriver.filter_real_bug_ids(bugs)
870 870 if bugs:
871 871 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
872 872 return bugs
873 873
874 874 def update(self, bugid, newstate, ctx):
875 875 '''update bugzilla bug with reference to changeset.'''
876 876
877 877 def webroot(root):
878 878 '''strip leading prefix of repo root and turn into
879 879 url-safe path.'''
880 880 count = int(self.ui.config('bugzilla', 'strip', 0))
881 881 root = util.pconvert(root)
882 882 while count > 0:
883 883 c = root.find('/')
884 884 if c == -1:
885 885 break
886 886 root = root[c + 1:]
887 887 count -= 1
888 888 return root
889 889
890 890 mapfile = None
891 891 tmpl = self.ui.config('bugzilla', 'template')
892 892 if not tmpl:
893 893 mapfile = self.ui.config('bugzilla', 'style')
894 894 if not mapfile and not tmpl:
895 895 tmpl = _('changeset {node|short} in repo {root} refers '
896 896 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
897 897 t = cmdutil.changeset_templater(self.ui, self.repo,
898 898 False, None, tmpl, mapfile, False)
899 899 self.ui.pushbuffer()
900 900 t.show(ctx, changes=ctx.changeset(),
901 901 bug=str(bugid),
902 902 hgweb=self.ui.config('web', 'baseurl'),
903 903 root=self.repo.root,
904 904 webroot=webroot(self.repo.root))
905 905 data = self.ui.popbuffer()
906 906 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
907 907
908 908 def notify(self, bugs, committer):
909 909 '''ensure Bugzilla users are notified of bug change.'''
910 910 self.bzdriver.notify(bugs, committer)
911 911
912 912 def hook(ui, repo, hooktype, node=None, **kwargs):
913 913 '''add comment to bugzilla for each changeset that refers to a
914 914 bugzilla bug id. only add a comment once per bug, so same change
915 915 seen multiple times does not fill bug with duplicate data.'''
916 916 if node is None:
917 917 raise error.Abort(_('hook type %s does not pass a changeset id') %
918 918 hooktype)
919 919 try:
920 920 bz = bugzilla(ui, repo)
921 921 ctx = repo[node]
922 922 bugs = bz.find_bugs(ctx)
923 923 if bugs:
924 924 for bug in bugs:
925 925 bz.update(bug, bugs[bug], ctx)
926 926 bz.notify(bugs, util.email(ctx.user()))
927 927 except Exception as e:
928 928 raise error.Abort(_('Bugzilla error: %s') % e)
@@ -1,190 +1,190 b''
1 1 # Copyright (C) 2015 - Mike Edgar <adgar@google.com>
2 2 #
3 3 # This extension enables removal of file content at a given revision,
4 4 # rewriting the data/metadata of successive revisions to preserve revision log
5 5 # integrity.
6 6
7 7 """erase file content at a given revision
8 8
9 9 The censor command instructs Mercurial to erase all content of a file at a given
10 10 revision *without updating the changeset hash.* This allows existing history to
11 11 remain valid while preventing future clones/pulls from receiving the erased
12 12 data.
13 13
14 14 Typical uses for censor are due to security or legal requirements, including::
15 15
16 16 * Passwords, private keys, cryptographic material
17 17 * Licensed data/code/libraries for which the license has expired
18 18 * Personally Identifiable Information or other private data
19 19
20 20 Censored nodes can interrupt mercurial's typical operation whenever the excised
21 21 data needs to be materialized. Some commands, like ``hg cat``/``hg revert``,
22 22 simply fail when asked to produce censored data. Others, like ``hg verify`` and
23 23 ``hg update``, must be capable of tolerating censored data to continue to
24 24 function in a meaningful way. Such commands only tolerate censored file
25 25 revisions if they are allowed by the "censor.policy=ignore" config option.
26 26 """
27 27
28 28 from __future__ import absolute_import
29 29
30 30 from mercurial.i18n import _
31 31 from mercurial.node import short
32 32
33 33 from mercurial import (
34 34 cmdutil,
35 35 error,
36 36 filelog,
37 37 lock as lockmod,
38 38 revlog,
39 39 scmutil,
40 40 util,
41 41 )
42 42
43 43 cmdtable = {}
44 44 command = cmdutil.command(cmdtable)
45 # Note for extension authors: ONLY specify testedwith = 'internal' for
45 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
46 46 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
47 47 # be specifying the version(s) of Mercurial they are tested with, or
48 48 # leave the attribute unspecified.
49 testedwith = 'internal'
49 testedwith = 'ships-with-hg-core'
50 50
51 51 @command('censor',
52 52 [('r', 'rev', '', _('censor file from specified revision'), _('REV')),
53 53 ('t', 'tombstone', '', _('replacement tombstone data'), _('TEXT'))],
54 54 _('-r REV [-t TEXT] [FILE]'))
55 55 def censor(ui, repo, path, rev='', tombstone='', **opts):
56 56 wlock = lock = None
57 57 try:
58 58 wlock = repo.wlock()
59 59 lock = repo.lock()
60 60 return _docensor(ui, repo, path, rev, tombstone, **opts)
61 61 finally:
62 62 lockmod.release(lock, wlock)
63 63
64 64 def _docensor(ui, repo, path, rev='', tombstone='', **opts):
65 65 if not path:
66 66 raise error.Abort(_('must specify file path to censor'))
67 67 if not rev:
68 68 raise error.Abort(_('must specify revision to censor'))
69 69
70 70 wctx = repo[None]
71 71
72 72 m = scmutil.match(wctx, (path,))
73 73 if m.anypats() or len(m.files()) != 1:
74 74 raise error.Abort(_('can only specify an explicit filename'))
75 75 path = m.files()[0]
76 76 flog = repo.file(path)
77 77 if not len(flog):
78 78 raise error.Abort(_('cannot censor file with no history'))
79 79
80 80 rev = scmutil.revsingle(repo, rev, rev).rev()
81 81 try:
82 82 ctx = repo[rev]
83 83 except KeyError:
84 84 raise error.Abort(_('invalid revision identifier %s') % rev)
85 85
86 86 try:
87 87 fctx = ctx.filectx(path)
88 88 except error.LookupError:
89 89 raise error.Abort(_('file does not exist at revision %s') % rev)
90 90
91 91 fnode = fctx.filenode()
92 92 headctxs = [repo[c] for c in repo.heads()]
93 93 heads = [c for c in headctxs if path in c and c.filenode(path) == fnode]
94 94 if heads:
95 95 headlist = ', '.join([short(c.node()) for c in heads])
96 96 raise error.Abort(_('cannot censor file in heads (%s)') % headlist,
97 97 hint=_('clean/delete and commit first'))
98 98
99 99 wp = wctx.parents()
100 100 if ctx.node() in [p.node() for p in wp]:
101 101 raise error.Abort(_('cannot censor working directory'),
102 102 hint=_('clean/delete/update first'))
103 103
104 104 flogv = flog.version & 0xFFFF
105 105 if flogv != revlog.REVLOGNG:
106 106 raise error.Abort(
107 107 _('censor does not support revlog version %d') % (flogv,))
108 108
109 109 tombstone = filelog.packmeta({"censored": tombstone}, "")
110 110
111 111 crev = fctx.filerev()
112 112
113 113 if len(tombstone) > flog.rawsize(crev):
114 114 raise error.Abort(_(
115 115 'censor tombstone must be no longer than censored data'))
116 116
117 117 # Using two files instead of one makes it easy to rewrite entry-by-entry
118 118 idxread = repo.svfs(flog.indexfile, 'r')
119 119 idxwrite = repo.svfs(flog.indexfile, 'wb', atomictemp=True)
120 120 if flog.version & revlog.REVLOGNGINLINEDATA:
121 121 dataread, datawrite = idxread, idxwrite
122 122 else:
123 123 dataread = repo.svfs(flog.datafile, 'r')
124 124 datawrite = repo.svfs(flog.datafile, 'wb', atomictemp=True)
125 125
126 126 # Copy all revlog data up to the entry to be censored.
127 127 rio = revlog.revlogio()
128 128 offset = flog.start(crev)
129 129
130 130 for chunk in util.filechunkiter(idxread, limit=crev * rio.size):
131 131 idxwrite.write(chunk)
132 132 for chunk in util.filechunkiter(dataread, limit=offset):
133 133 datawrite.write(chunk)
134 134
135 135 def rewriteindex(r, newoffs, newdata=None):
136 136 """Rewrite the index entry with a new data offset and optional new data.
137 137
138 138 The newdata argument, if given, is a tuple of three positive integers:
139 139 (new compressed, new uncompressed, added flag bits).
140 140 """
141 141 offlags, comp, uncomp, base, link, p1, p2, nodeid = flog.index[r]
142 142 flags = revlog.gettype(offlags)
143 143 if newdata:
144 144 comp, uncomp, nflags = newdata
145 145 flags |= nflags
146 146 offlags = revlog.offset_type(newoffs, flags)
147 147 e = (offlags, comp, uncomp, r, link, p1, p2, nodeid)
148 148 idxwrite.write(rio.packentry(e, None, flog.version, r))
149 149 idxread.seek(rio.size, 1)
150 150
151 151 def rewrite(r, offs, data, nflags=revlog.REVIDX_DEFAULT_FLAGS):
152 152 """Write the given full text to the filelog with the given data offset.
153 153
154 154 Returns:
155 155 The integer number of data bytes written, for tracking data offsets.
156 156 """
157 157 flag, compdata = flog.compress(data)
158 158 newcomp = len(flag) + len(compdata)
159 159 rewriteindex(r, offs, (newcomp, len(data), nflags))
160 160 datawrite.write(flag)
161 161 datawrite.write(compdata)
162 162 dataread.seek(flog.length(r), 1)
163 163 return newcomp
164 164
165 165 # Rewrite censored revlog entry with (padded) tombstone data.
166 166 pad = ' ' * (flog.rawsize(crev) - len(tombstone))
167 167 offset += rewrite(crev, offset, tombstone + pad, revlog.REVIDX_ISCENSORED)
168 168
169 169 # Rewrite all following filelog revisions fixing up offsets and deltas.
170 170 for srev in xrange(crev + 1, len(flog)):
171 171 if crev in flog.parentrevs(srev):
172 172 # Immediate children of censored node must be re-added as fulltext.
173 173 try:
174 174 revdata = flog.revision(srev)
175 175 except error.CensoredNodeError as e:
176 176 revdata = e.tombstone
177 177 dlen = rewrite(srev, offset, revdata)
178 178 else:
179 179 # Copy any other revision data verbatim after fixing up the offset.
180 180 rewriteindex(srev, offset)
181 181 dlen = flog.length(srev)
182 182 for chunk in util.filechunkiter(dataread, limit=dlen):
183 183 datawrite.write(chunk)
184 184 offset += dlen
185 185
186 186 idxread.close()
187 187 idxwrite.close()
188 188 if dataread is not idxread:
189 189 dataread.close()
190 190 datawrite.close()
@@ -1,643 +1,643 b''
1 1 # chgserver.py - command server extension for cHg
2 2 #
3 3 # Copyright 2011 Yuya Nishihara <yuya@tcha.org>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """command server extension for cHg (EXPERIMENTAL)
9 9
10 10 'S' channel (read/write)
11 11 propagate ui.system() request to client
12 12
13 13 'attachio' command
14 14 attach client's stdio passed by sendmsg()
15 15
16 16 'chdir' command
17 17 change current directory
18 18
19 19 'getpager' command
20 20 checks if pager is enabled and which pager should be executed
21 21
22 22 'setenv' command
23 23 replace os.environ completely
24 24
25 25 'setumask' command
26 26 set umask
27 27
28 28 'validate' command
29 29 reload the config and check if the server is up to date
30 30
31 31 Config
32 32 ------
33 33
34 34 ::
35 35
36 36 [chgserver]
37 37 idletimeout = 3600 # seconds, after which an idle server will exit
38 38 skiphash = False # whether to skip config or env change checks
39 39 """
40 40
41 41 from __future__ import absolute_import
42 42
43 43 import errno
44 44 import hashlib
45 45 import inspect
46 46 import os
47 47 import re
48 48 import signal
49 49 import struct
50 50 import sys
51 51 import time
52 52
53 53 from mercurial.i18n import _
54 54
55 55 from mercurial import (
56 56 cmdutil,
57 57 commands,
58 58 commandserver,
59 59 dispatch,
60 60 error,
61 61 extensions,
62 62 osutil,
63 63 util,
64 64 )
65 65
66 # Note for extension authors: ONLY specify testedwith = 'internal' for
66 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
67 67 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
68 68 # be specifying the version(s) of Mercurial they are tested with, or
69 69 # leave the attribute unspecified.
70 testedwith = 'internal'
70 testedwith = 'ships-with-hg-core'
71 71
72 72 _log = commandserver.log
73 73
74 74 def _hashlist(items):
75 75 """return sha1 hexdigest for a list"""
76 76 return hashlib.sha1(str(items)).hexdigest()
77 77
78 78 # sensitive config sections affecting confighash
79 79 _configsections = [
80 80 'alias', # affects global state commands.table
81 81 'extdiff', # uisetup will register new commands
82 82 'extensions',
83 83 ]
84 84
85 85 # sensitive environment variables affecting confighash
86 86 _envre = re.compile(r'''\A(?:
87 87 CHGHG
88 88 |HG.*
89 89 |LANG(?:UAGE)?
90 90 |LC_.*
91 91 |LD_.*
92 92 |PATH
93 93 |PYTHON.*
94 94 |TERM(?:INFO)?
95 95 |TZ
96 96 )\Z''', re.X)
97 97
98 98 def _confighash(ui):
99 99 """return a quick hash for detecting config/env changes
100 100
101 101 confighash is the hash of sensitive config items and environment variables.
102 102
103 103 for chgserver, it is designed that once confighash changes, the server is
104 104 not qualified to serve its client and should redirect the client to a new
105 105 server. different from mtimehash, confighash change will not mark the
106 106 server outdated and exit since the user can have different configs at the
107 107 same time.
108 108 """
109 109 sectionitems = []
110 110 for section in _configsections:
111 111 sectionitems.append(ui.configitems(section))
112 112 sectionhash = _hashlist(sectionitems)
113 113 envitems = [(k, v) for k, v in os.environ.iteritems() if _envre.match(k)]
114 114 envhash = _hashlist(sorted(envitems))
115 115 return sectionhash[:6] + envhash[:6]
116 116
117 117 def _getmtimepaths(ui):
118 118 """get a list of paths that should be checked to detect change
119 119
120 120 The list will include:
121 121 - extensions (will not cover all files for complex extensions)
122 122 - mercurial/__version__.py
123 123 - python binary
124 124 """
125 125 modules = [m for n, m in extensions.extensions(ui)]
126 126 try:
127 127 from mercurial import __version__
128 128 modules.append(__version__)
129 129 except ImportError:
130 130 pass
131 131 files = [sys.executable]
132 132 for m in modules:
133 133 try:
134 134 files.append(inspect.getabsfile(m))
135 135 except TypeError:
136 136 pass
137 137 return sorted(set(files))
138 138
139 139 def _mtimehash(paths):
140 140 """return a quick hash for detecting file changes
141 141
142 142 mtimehash calls stat on given paths and calculate a hash based on size and
143 143 mtime of each file. mtimehash does not read file content because reading is
144 144 expensive. therefore it's not 100% reliable for detecting content changes.
145 145 it's possible to return different hashes for same file contents.
146 146 it's also possible to return a same hash for different file contents for
147 147 some carefully crafted situation.
148 148
149 149 for chgserver, it is designed that once mtimehash changes, the server is
150 150 considered outdated immediately and should no longer provide service.
151 151
152 152 mtimehash is not included in confighash because we only know the paths of
153 153 extensions after importing them (there is imp.find_module but that faces
154 154 race conditions). We need to calculate confighash without importing.
155 155 """
156 156 def trystat(path):
157 157 try:
158 158 st = os.stat(path)
159 159 return (st.st_mtime, st.st_size)
160 160 except OSError:
161 161 # could be ENOENT, EPERM etc. not fatal in any case
162 162 pass
163 163 return _hashlist(map(trystat, paths))[:12]
164 164
165 165 class hashstate(object):
166 166 """a structure storing confighash, mtimehash, paths used for mtimehash"""
167 167 def __init__(self, confighash, mtimehash, mtimepaths):
168 168 self.confighash = confighash
169 169 self.mtimehash = mtimehash
170 170 self.mtimepaths = mtimepaths
171 171
172 172 @staticmethod
173 173 def fromui(ui, mtimepaths=None):
174 174 if mtimepaths is None:
175 175 mtimepaths = _getmtimepaths(ui)
176 176 confighash = _confighash(ui)
177 177 mtimehash = _mtimehash(mtimepaths)
178 178 _log('confighash = %s mtimehash = %s\n' % (confighash, mtimehash))
179 179 return hashstate(confighash, mtimehash, mtimepaths)
180 180
181 181 # copied from hgext/pager.py:uisetup()
182 182 def _setuppagercmd(ui, options, cmd):
183 183 if not ui.formatted():
184 184 return
185 185
186 186 p = ui.config("pager", "pager", os.environ.get("PAGER"))
187 187 usepager = False
188 188 always = util.parsebool(options['pager'])
189 189 auto = options['pager'] == 'auto'
190 190
191 191 if not p:
192 192 pass
193 193 elif always:
194 194 usepager = True
195 195 elif not auto:
196 196 usepager = False
197 197 else:
198 198 attended = ['annotate', 'cat', 'diff', 'export', 'glog', 'log', 'qdiff']
199 199 attend = ui.configlist('pager', 'attend', attended)
200 200 ignore = ui.configlist('pager', 'ignore')
201 201 cmds, _ = cmdutil.findcmd(cmd, commands.table)
202 202
203 203 for cmd in cmds:
204 204 var = 'attend-%s' % cmd
205 205 if ui.config('pager', var):
206 206 usepager = ui.configbool('pager', var)
207 207 break
208 208 if (cmd in attend or
209 209 (cmd not in ignore and not attend)):
210 210 usepager = True
211 211 break
212 212
213 213 if usepager:
214 214 ui.setconfig('ui', 'formatted', ui.formatted(), 'pager')
215 215 ui.setconfig('ui', 'interactive', False, 'pager')
216 216 return p
217 217
218 218 def _newchgui(srcui, csystem):
219 219 class chgui(srcui.__class__):
220 220 def __init__(self, src=None):
221 221 super(chgui, self).__init__(src)
222 222 if src:
223 223 self._csystem = getattr(src, '_csystem', csystem)
224 224 else:
225 225 self._csystem = csystem
226 226
227 227 def system(self, cmd, environ=None, cwd=None, onerr=None,
228 228 errprefix=None):
229 229 # fallback to the original system method if the output needs to be
230 230 # captured (to self._buffers), or the output stream is not stdout
231 231 # (e.g. stderr, cStringIO), because the chg client is not aware of
232 232 # these situations and will behave differently (write to stdout).
233 233 if (any(s[1] for s in self._bufferstates)
234 234 or not util.safehasattr(self.fout, 'fileno')
235 235 or self.fout.fileno() != sys.stdout.fileno()):
236 236 return super(chgui, self).system(cmd, environ, cwd, onerr,
237 237 errprefix)
238 238 # copied from mercurial/util.py:system()
239 239 self.flush()
240 240 def py2shell(val):
241 241 if val is None or val is False:
242 242 return '0'
243 243 if val is True:
244 244 return '1'
245 245 return str(val)
246 246 env = os.environ.copy()
247 247 if environ:
248 248 env.update((k, py2shell(v)) for k, v in environ.iteritems())
249 249 env['HG'] = util.hgexecutable()
250 250 rc = self._csystem(cmd, env, cwd)
251 251 if rc and onerr:
252 252 errmsg = '%s %s' % (os.path.basename(cmd.split(None, 1)[0]),
253 253 util.explainexit(rc)[0])
254 254 if errprefix:
255 255 errmsg = '%s: %s' % (errprefix, errmsg)
256 256 raise onerr(errmsg)
257 257 return rc
258 258
259 259 return chgui(srcui)
260 260
261 261 def _loadnewui(srcui, args):
262 262 newui = srcui.__class__()
263 263 for a in ['fin', 'fout', 'ferr', 'environ']:
264 264 setattr(newui, a, getattr(srcui, a))
265 265 if util.safehasattr(srcui, '_csystem'):
266 266 newui._csystem = srcui._csystem
267 267
268 268 # internal config: extensions.chgserver
269 269 newui.setconfig('extensions', 'chgserver',
270 270 srcui.config('extensions', 'chgserver'), '--config')
271 271
272 272 # command line args
273 273 args = args[:]
274 274 dispatch._parseconfig(newui, dispatch._earlygetopt(['--config'], args))
275 275
276 276 # stolen from tortoisehg.util.copydynamicconfig()
277 277 for section, name, value in srcui.walkconfig():
278 278 source = srcui.configsource(section, name)
279 279 if ':' in source or source == '--config':
280 280 # path:line or command line
281 281 continue
282 282 if source == 'none':
283 283 # ui.configsource returns 'none' by default
284 284 source = ''
285 285 newui.setconfig(section, name, value, source)
286 286
287 287 # load wd and repo config, copied from dispatch.py
288 288 cwds = dispatch._earlygetopt(['--cwd'], args)
289 289 cwd = cwds and os.path.realpath(cwds[-1]) or None
290 290 rpath = dispatch._earlygetopt(["-R", "--repository", "--repo"], args)
291 291 path, newlui = dispatch._getlocal(newui, rpath, wd=cwd)
292 292
293 293 return (newui, newlui)
294 294
295 295 class channeledsystem(object):
296 296 """Propagate ui.system() request in the following format:
297 297
298 298 payload length (unsigned int),
299 299 cmd, '\0',
300 300 cwd, '\0',
301 301 envkey, '=', val, '\0',
302 302 ...
303 303 envkey, '=', val
304 304
305 305 and waits:
306 306
307 307 exitcode length (unsigned int),
308 308 exitcode (int)
309 309 """
310 310 def __init__(self, in_, out, channel):
311 311 self.in_ = in_
312 312 self.out = out
313 313 self.channel = channel
314 314
315 315 def __call__(self, cmd, environ, cwd):
316 316 args = [util.quotecommand(cmd), os.path.abspath(cwd or '.')]
317 317 args.extend('%s=%s' % (k, v) for k, v in environ.iteritems())
318 318 data = '\0'.join(args)
319 319 self.out.write(struct.pack('>cI', self.channel, len(data)))
320 320 self.out.write(data)
321 321 self.out.flush()
322 322
323 323 length = self.in_.read(4)
324 324 length, = struct.unpack('>I', length)
325 325 if length != 4:
326 326 raise error.Abort(_('invalid response'))
327 327 rc, = struct.unpack('>i', self.in_.read(4))
328 328 return rc
329 329
330 330 _iochannels = [
331 331 # server.ch, ui.fp, mode
332 332 ('cin', 'fin', 'rb'),
333 333 ('cout', 'fout', 'wb'),
334 334 ('cerr', 'ferr', 'wb'),
335 335 ]
336 336
337 337 class chgcmdserver(commandserver.server):
338 338 def __init__(self, ui, repo, fin, fout, sock, hashstate, baseaddress):
339 339 super(chgcmdserver, self).__init__(
340 340 _newchgui(ui, channeledsystem(fin, fout, 'S')), repo, fin, fout)
341 341 self.clientsock = sock
342 342 self._oldios = [] # original (self.ch, ui.fp, fd) before "attachio"
343 343 self.hashstate = hashstate
344 344 self.baseaddress = baseaddress
345 345 if hashstate is not None:
346 346 self.capabilities = self.capabilities.copy()
347 347 self.capabilities['validate'] = chgcmdserver.validate
348 348
349 349 def cleanup(self):
350 350 super(chgcmdserver, self).cleanup()
351 351 # dispatch._runcatch() does not flush outputs if exception is not
352 352 # handled by dispatch._dispatch()
353 353 self.ui.flush()
354 354 self._restoreio()
355 355
356 356 def attachio(self):
357 357 """Attach to client's stdio passed via unix domain socket; all
358 358 channels except cresult will no longer be used
359 359 """
360 360 # tell client to sendmsg() with 1-byte payload, which makes it
361 361 # distinctive from "attachio\n" command consumed by client.read()
362 362 self.clientsock.sendall(struct.pack('>cI', 'I', 1))
363 363 clientfds = osutil.recvfds(self.clientsock.fileno())
364 364 _log('received fds: %r\n' % clientfds)
365 365
366 366 ui = self.ui
367 367 ui.flush()
368 368 first = self._saveio()
369 369 for fd, (cn, fn, mode) in zip(clientfds, _iochannels):
370 370 assert fd > 0
371 371 fp = getattr(ui, fn)
372 372 os.dup2(fd, fp.fileno())
373 373 os.close(fd)
374 374 if not first:
375 375 continue
376 376 # reset buffering mode when client is first attached. as we want
377 377 # to see output immediately on pager, the mode stays unchanged
378 378 # when client re-attached. ferr is unchanged because it should
379 379 # be unbuffered no matter if it is a tty or not.
380 380 if fn == 'ferr':
381 381 newfp = fp
382 382 else:
383 383 # make it line buffered explicitly because the default is
384 384 # decided on first write(), where fout could be a pager.
385 385 if fp.isatty():
386 386 bufsize = 1 # line buffered
387 387 else:
388 388 bufsize = -1 # system default
389 389 newfp = os.fdopen(fp.fileno(), mode, bufsize)
390 390 setattr(ui, fn, newfp)
391 391 setattr(self, cn, newfp)
392 392
393 393 self.cresult.write(struct.pack('>i', len(clientfds)))
394 394
395 395 def _saveio(self):
396 396 if self._oldios:
397 397 return False
398 398 ui = self.ui
399 399 for cn, fn, _mode in _iochannels:
400 400 ch = getattr(self, cn)
401 401 fp = getattr(ui, fn)
402 402 fd = os.dup(fp.fileno())
403 403 self._oldios.append((ch, fp, fd))
404 404 return True
405 405
406 406 def _restoreio(self):
407 407 ui = self.ui
408 408 for (ch, fp, fd), (cn, fn, _mode) in zip(self._oldios, _iochannels):
409 409 newfp = getattr(ui, fn)
410 410 # close newfp while it's associated with client; otherwise it
411 411 # would be closed when newfp is deleted
412 412 if newfp is not fp:
413 413 newfp.close()
414 414 # restore original fd: fp is open again
415 415 os.dup2(fd, fp.fileno())
416 416 os.close(fd)
417 417 setattr(self, cn, ch)
418 418 setattr(ui, fn, fp)
419 419 del self._oldios[:]
420 420
421 421 def validate(self):
422 422 """Reload the config and check if the server is up to date
423 423
424 424 Read a list of '\0' separated arguments.
425 425 Write a non-empty list of '\0' separated instruction strings or '\0'
426 426 if the list is empty.
427 427 An instruction string could be either:
428 428 - "unlink $path", the client should unlink the path to stop the
429 429 outdated server.
430 430 - "redirect $path", the client should attempt to connect to $path
431 431 first. If it does not work, start a new server. It implies
432 432 "reconnect".
433 433 - "exit $n", the client should exit directly with code n.
434 434 This may happen if we cannot parse the config.
435 435 - "reconnect", the client should close the connection and
436 436 reconnect.
437 437 If neither "reconnect" nor "redirect" is included in the instruction
438 438 list, the client can continue with this server after completing all
439 439 the instructions.
440 440 """
441 441 args = self._readlist()
442 442 try:
443 443 self.ui, lui = _loadnewui(self.ui, args)
444 444 except error.ParseError as inst:
445 445 dispatch._formatparse(self.ui.warn, inst)
446 446 self.ui.flush()
447 447 self.cresult.write('exit 255')
448 448 return
449 449 newhash = hashstate.fromui(lui, self.hashstate.mtimepaths)
450 450 insts = []
451 451 if newhash.mtimehash != self.hashstate.mtimehash:
452 452 addr = _hashaddress(self.baseaddress, self.hashstate.confighash)
453 453 insts.append('unlink %s' % addr)
454 454 # mtimehash is empty if one or more extensions fail to load.
455 455 # to be compatible with hg, still serve the client this time.
456 456 if self.hashstate.mtimehash:
457 457 insts.append('reconnect')
458 458 if newhash.confighash != self.hashstate.confighash:
459 459 addr = _hashaddress(self.baseaddress, newhash.confighash)
460 460 insts.append('redirect %s' % addr)
461 461 _log('validate: %s\n' % insts)
462 462 self.cresult.write('\0'.join(insts) or '\0')
463 463
464 464 def chdir(self):
465 465 """Change current directory
466 466
467 467 Note that the behavior of --cwd option is bit different from this.
468 468 It does not affect --config parameter.
469 469 """
470 470 path = self._readstr()
471 471 if not path:
472 472 return
473 473 _log('chdir to %r\n' % path)
474 474 os.chdir(path)
475 475
476 476 def setumask(self):
477 477 """Change umask"""
478 478 mask = struct.unpack('>I', self._read(4))[0]
479 479 _log('setumask %r\n' % mask)
480 480 os.umask(mask)
481 481
482 482 def getpager(self):
483 483 """Read cmdargs and write pager command to r-channel if enabled
484 484
485 485 If pager isn't enabled, this writes '\0' because channeledoutput
486 486 does not allow to write empty data.
487 487 """
488 488 args = self._readlist()
489 489 try:
490 490 cmd, _func, args, options, _cmdoptions = dispatch._parse(self.ui,
491 491 args)
492 492 except (error.Abort, error.AmbiguousCommand, error.CommandError,
493 493 error.UnknownCommand):
494 494 cmd = None
495 495 options = {}
496 496 if not cmd or 'pager' not in options:
497 497 self.cresult.write('\0')
498 498 return
499 499
500 500 pagercmd = _setuppagercmd(self.ui, options, cmd)
501 501 if pagercmd:
502 502 # Python's SIGPIPE is SIG_IGN by default. change to SIG_DFL so
503 503 # we can exit if the pipe to the pager is closed
504 504 if util.safehasattr(signal, 'SIGPIPE') and \
505 505 signal.getsignal(signal.SIGPIPE) == signal.SIG_IGN:
506 506 signal.signal(signal.SIGPIPE, signal.SIG_DFL)
507 507 self.cresult.write(pagercmd)
508 508 else:
509 509 self.cresult.write('\0')
510 510
511 511 def setenv(self):
512 512 """Clear and update os.environ
513 513
514 514 Note that not all variables can make an effect on the running process.
515 515 """
516 516 l = self._readlist()
517 517 try:
518 518 newenv = dict(s.split('=', 1) for s in l)
519 519 except ValueError:
520 520 raise ValueError('unexpected value in setenv request')
521 521 _log('setenv: %r\n' % sorted(newenv.keys()))
522 522 os.environ.clear()
523 523 os.environ.update(newenv)
524 524
525 525 capabilities = commandserver.server.capabilities.copy()
526 526 capabilities.update({'attachio': attachio,
527 527 'chdir': chdir,
528 528 'getpager': getpager,
529 529 'setenv': setenv,
530 530 'setumask': setumask})
531 531
532 532 def _tempaddress(address):
533 533 return '%s.%d.tmp' % (address, os.getpid())
534 534
535 535 def _hashaddress(address, hashstr):
536 536 return '%s-%s' % (address, hashstr)
537 537
538 538 class chgunixservicehandler(object):
539 539 """Set of operations for chg services"""
540 540
541 541 pollinterval = 1 # [sec]
542 542
543 543 def __init__(self, ui):
544 544 self.ui = ui
545 545 self._idletimeout = ui.configint('chgserver', 'idletimeout', 3600)
546 546 self._lastactive = time.time()
547 547
548 548 def bindsocket(self, sock, address):
549 549 self._inithashstate(address)
550 550 self._checkextensions()
551 551 self._bind(sock)
552 552 self._createsymlink()
553 553
554 554 def _inithashstate(self, address):
555 555 self._baseaddress = address
556 556 if self.ui.configbool('chgserver', 'skiphash', False):
557 557 self._hashstate = None
558 558 self._realaddress = address
559 559 return
560 560 self._hashstate = hashstate.fromui(self.ui)
561 561 self._realaddress = _hashaddress(address, self._hashstate.confighash)
562 562
563 563 def _checkextensions(self):
564 564 if not self._hashstate:
565 565 return
566 566 if extensions.notloaded():
567 567 # one or more extensions failed to load. mtimehash becomes
568 568 # meaningless because we do not know the paths of those extensions.
569 569 # set mtimehash to an illegal hash value to invalidate the server.
570 570 self._hashstate.mtimehash = ''
571 571
572 572 def _bind(self, sock):
573 573 # use a unique temp address so we can stat the file and do ownership
574 574 # check later
575 575 tempaddress = _tempaddress(self._realaddress)
576 576 util.bindunixsocket(sock, tempaddress)
577 577 self._socketstat = os.stat(tempaddress)
578 578 # rename will replace the old socket file if exists atomically. the
579 579 # old server will detect ownership change and exit.
580 580 util.rename(tempaddress, self._realaddress)
581 581
582 582 def _createsymlink(self):
583 583 if self._baseaddress == self._realaddress:
584 584 return
585 585 tempaddress = _tempaddress(self._baseaddress)
586 586 os.symlink(os.path.basename(self._realaddress), tempaddress)
587 587 util.rename(tempaddress, self._baseaddress)
588 588
589 589 def _issocketowner(self):
590 590 try:
591 591 stat = os.stat(self._realaddress)
592 592 return (stat.st_ino == self._socketstat.st_ino and
593 593 stat.st_mtime == self._socketstat.st_mtime)
594 594 except OSError:
595 595 return False
596 596
597 597 def unlinksocket(self, address):
598 598 if not self._issocketowner():
599 599 return
600 600 # it is possible to have a race condition here that we may
601 601 # remove another server's socket file. but that's okay
602 602 # since that server will detect and exit automatically and
603 603 # the client will start a new server on demand.
604 604 try:
605 605 os.unlink(self._realaddress)
606 606 except OSError as exc:
607 607 if exc.errno != errno.ENOENT:
608 608 raise
609 609
610 610 def printbanner(self, address):
611 611 # no "listening at" message should be printed to simulate hg behavior
612 612 pass
613 613
614 614 def shouldexit(self):
615 615 if not self._issocketowner():
616 616 self.ui.debug('%s is not owned, exiting.\n' % self._realaddress)
617 617 return True
618 618 if time.time() - self._lastactive > self._idletimeout:
619 619 self.ui.debug('being idle too long. exiting.\n')
620 620 return True
621 621 return False
622 622
623 623 def newconnection(self):
624 624 self._lastactive = time.time()
625 625
626 626 def createcmdserver(self, repo, conn, fin, fout):
627 627 return chgcmdserver(self.ui, repo, fin, fout, conn,
628 628 self._hashstate, self._baseaddress)
629 629
630 630 def chgunixservice(ui, repo, opts):
631 631 if repo:
632 632 # one chgserver can serve multiple repos. drop repo infomation
633 633 ui.setconfig('bundle', 'mainreporoot', '', 'repo')
634 634 h = chgunixservicehandler(ui)
635 635 return commandserver.unixforkingservice(ui, repo=None, opts=opts, handler=h)
636 636
637 637 def uisetup(ui):
638 638 commandserver._servicemap['chgunix'] = chgunixservice
639 639
640 640 # CHGINTERNALMARK is temporarily set by chg client to detect if chg will
641 641 # start another chg. drop it to avoid possible side effects.
642 642 if 'CHGINTERNALMARK' in os.environ:
643 643 del os.environ['CHGINTERNALMARK']
@@ -1,69 +1,69 b''
1 1 # Mercurial extension to provide the 'hg children' command
2 2 #
3 3 # Copyright 2007 by Intevation GmbH <intevation@intevation.de>
4 4 #
5 5 # Author(s):
6 6 # Thomas Arendsen Hein <thomas@intevation.de>
7 7 #
8 8 # This software may be used and distributed according to the terms of the
9 9 # GNU General Public License version 2 or any later version.
10 10
11 11 '''command to display child changesets (DEPRECATED)
12 12
13 13 This extension is deprecated. You should use :hg:`log -r
14 14 "children(REV)"` instead.
15 15 '''
16 16
17 17 from __future__ import absolute_import
18 18
19 19 from mercurial.i18n import _
20 20 from mercurial import (
21 21 cmdutil,
22 22 commands,
23 23 )
24 24
25 25 templateopts = commands.templateopts
26 26
27 27 cmdtable = {}
28 28 command = cmdutil.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'internal' for
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 31 # be specifying the version(s) of Mercurial they are tested with, or
32 32 # leave the attribute unspecified.
33 testedwith = 'internal'
33 testedwith = 'ships-with-hg-core'
34 34
35 35 @command('children',
36 36 [('r', 'rev', '',
37 37 _('show children of the specified revision'), _('REV')),
38 38 ] + templateopts,
39 39 _('hg children [-r REV] [FILE]'),
40 40 inferrepo=True)
41 41 def children(ui, repo, file_=None, **opts):
42 42 """show the children of the given or working directory revision
43 43
44 44 Print the children of the working directory's revisions. If a
45 45 revision is given via -r/--rev, the children of that revision will
46 46 be printed. If a file argument is given, revision in which the
47 47 file was last changed (after the working directory revision or the
48 48 argument to --rev if given) is printed.
49 49
50 50 Please use :hg:`log` instead::
51 51
52 52 hg children => hg log -r "children()"
53 53 hg children -r REV => hg log -r "children(REV)"
54 54
55 55 See :hg:`help log` and :hg:`help revsets.children`.
56 56
57 57 """
58 58 rev = opts.get('rev')
59 59 if file_:
60 60 fctx = repo.filectx(file_, changeid=rev)
61 61 childctxs = [fcctx.changectx() for fcctx in fctx.children()]
62 62 else:
63 63 ctx = repo[rev]
64 64 childctxs = ctx.children()
65 65
66 66 displayer = cmdutil.show_changeset(ui, repo, opts)
67 67 for cctx in childctxs:
68 68 displayer.show(cctx)
69 69 displayer.close()
@@ -1,211 +1,211 b''
1 1 # churn.py - create a graph of revisions count grouped by template
2 2 #
3 3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''command to display statistics about repository history'''
10 10
11 11 from __future__ import absolute_import
12 12
13 13 import datetime
14 14 import os
15 15 import time
16 16
17 17 from mercurial.i18n import _
18 18 from mercurial import (
19 19 cmdutil,
20 20 commands,
21 21 encoding,
22 22 patch,
23 23 scmutil,
24 24 util,
25 25 )
26 26
27 27 cmdtable = {}
28 28 command = cmdutil.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'internal' for
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 31 # be specifying the version(s) of Mercurial they are tested with, or
32 32 # leave the attribute unspecified.
33 testedwith = 'internal'
33 testedwith = 'ships-with-hg-core'
34 34
35 35 def maketemplater(ui, repo, tmpl):
36 36 return cmdutil.changeset_templater(ui, repo, False, None, tmpl, None, False)
37 37
38 38 def changedlines(ui, repo, ctx1, ctx2, fns):
39 39 added, removed = 0, 0
40 40 fmatch = scmutil.matchfiles(repo, fns)
41 41 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
42 42 for l in diff.split('\n'):
43 43 if l.startswith("+") and not l.startswith("+++ "):
44 44 added += 1
45 45 elif l.startswith("-") and not l.startswith("--- "):
46 46 removed += 1
47 47 return (added, removed)
48 48
49 49 def countrate(ui, repo, amap, *pats, **opts):
50 50 """Calculate stats"""
51 51 if opts.get('dateformat'):
52 52 def getkey(ctx):
53 53 t, tz = ctx.date()
54 54 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
55 55 return date.strftime(opts['dateformat'])
56 56 else:
57 57 tmpl = opts.get('oldtemplate') or opts.get('template')
58 58 tmpl = maketemplater(ui, repo, tmpl)
59 59 def getkey(ctx):
60 60 ui.pushbuffer()
61 61 tmpl.show(ctx)
62 62 return ui.popbuffer()
63 63
64 64 state = {'count': 0}
65 65 rate = {}
66 66 df = False
67 67 if opts.get('date'):
68 68 df = util.matchdate(opts['date'])
69 69
70 70 m = scmutil.match(repo[None], pats, opts)
71 71 def prep(ctx, fns):
72 72 rev = ctx.rev()
73 73 if df and not df(ctx.date()[0]): # doesn't match date format
74 74 return
75 75
76 76 key = getkey(ctx).strip()
77 77 key = amap.get(key, key) # alias remap
78 78 if opts.get('changesets'):
79 79 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
80 80 else:
81 81 parents = ctx.parents()
82 82 if len(parents) > 1:
83 83 ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
84 84 return
85 85
86 86 ctx1 = parents[0]
87 87 lines = changedlines(ui, repo, ctx1, ctx, fns)
88 88 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
89 89
90 90 state['count'] += 1
91 91 ui.progress(_('analyzing'), state['count'], total=len(repo),
92 92 unit=_('revisions'))
93 93
94 94 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
95 95 continue
96 96
97 97 ui.progress(_('analyzing'), None)
98 98
99 99 return rate
100 100
101 101
102 102 @command('churn',
103 103 [('r', 'rev', [],
104 104 _('count rate for the specified revision or revset'), _('REV')),
105 105 ('d', 'date', '',
106 106 _('count rate for revisions matching date spec'), _('DATE')),
107 107 ('t', 'oldtemplate', '',
108 108 _('template to group changesets (DEPRECATED)'), _('TEMPLATE')),
109 109 ('T', 'template', '{author|email}',
110 110 _('template to group changesets'), _('TEMPLATE')),
111 111 ('f', 'dateformat', '',
112 112 _('strftime-compatible format for grouping by date'), _('FORMAT')),
113 113 ('c', 'changesets', False, _('count rate by number of changesets')),
114 114 ('s', 'sort', False, _('sort by key (default: sort by count)')),
115 115 ('', 'diffstat', False, _('display added/removed lines separately')),
116 116 ('', 'aliases', '', _('file with email aliases'), _('FILE')),
117 117 ] + commands.walkopts,
118 118 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
119 119 inferrepo=True)
120 120 def churn(ui, repo, *pats, **opts):
121 121 '''histogram of changes to the repository
122 122
123 123 This command will display a histogram representing the number
124 124 of changed lines or revisions, grouped according to the given
125 125 template. The default template will group changes by author.
126 126 The --dateformat option may be used to group the results by
127 127 date instead.
128 128
129 129 Statistics are based on the number of changed lines, or
130 130 alternatively the number of matching revisions if the
131 131 --changesets option is specified.
132 132
133 133 Examples::
134 134
135 135 # display count of changed lines for every committer
136 136 hg churn -t "{author|email}"
137 137
138 138 # display daily activity graph
139 139 hg churn -f "%H" -s -c
140 140
141 141 # display activity of developers by month
142 142 hg churn -f "%Y-%m" -s -c
143 143
144 144 # display count of lines changed in every year
145 145 hg churn -f "%Y" -s
146 146
147 147 It is possible to map alternate email addresses to a main address
148 148 by providing a file using the following format::
149 149
150 150 <alias email> = <actual email>
151 151
152 152 Such a file may be specified with the --aliases option, otherwise
153 153 a .hgchurn file will be looked for in the working directory root.
154 154 Aliases will be split from the rightmost "=".
155 155 '''
156 156 def pad(s, l):
157 157 return s + " " * (l - encoding.colwidth(s))
158 158
159 159 amap = {}
160 160 aliases = opts.get('aliases')
161 161 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
162 162 aliases = repo.wjoin('.hgchurn')
163 163 if aliases:
164 164 for l in open(aliases, "r"):
165 165 try:
166 166 alias, actual = l.rsplit('=' in l and '=' or None, 1)
167 167 amap[alias.strip()] = actual.strip()
168 168 except ValueError:
169 169 l = l.strip()
170 170 if l:
171 171 ui.warn(_("skipping malformed alias: %s\n") % l)
172 172 continue
173 173
174 174 rate = countrate(ui, repo, amap, *pats, **opts).items()
175 175 if not rate:
176 176 return
177 177
178 178 if opts.get('sort'):
179 179 rate.sort()
180 180 else:
181 181 rate.sort(key=lambda x: (-sum(x[1]), x))
182 182
183 183 # Be careful not to have a zero maxcount (issue833)
184 184 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
185 185 maxname = max(len(k) for k, v in rate)
186 186
187 187 ttywidth = ui.termwidth()
188 188 ui.debug("assuming %i character terminal\n" % ttywidth)
189 189 width = ttywidth - maxname - 2 - 2 - 2
190 190
191 191 if opts.get('diffstat'):
192 192 width -= 15
193 193 def format(name, diffstat):
194 194 added, removed = diffstat
195 195 return "%s %15s %s%s\n" % (pad(name, maxname),
196 196 '+%d/-%d' % (added, removed),
197 197 ui.label('+' * charnum(added),
198 198 'diffstat.inserted'),
199 199 ui.label('-' * charnum(removed),
200 200 'diffstat.deleted'))
201 201 else:
202 202 width -= 6
203 203 def format(name, count):
204 204 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
205 205 '*' * charnum(sum(count)))
206 206
207 207 def charnum(count):
208 208 return int(round(count * width / maxcount))
209 209
210 210 for name, count in rate:
211 211 ui.write(format(name, count))
@@ -1,186 +1,186 b''
1 1 # This software may be used and distributed according to the terms of the
2 2 # GNU General Public License version 2 or any later version.
3 3
4 4 """advertise pre-generated bundles to seed clones
5 5
6 6 "clonebundles" is a server-side extension used to advertise the existence
7 7 of pre-generated, externally hosted bundle files to clients that are
8 8 cloning so that cloning can be faster, more reliable, and require less
9 9 resources on the server.
10 10
11 11 Cloning can be a CPU and I/O intensive operation on servers. Traditionally,
12 12 the server, in response to a client's request to clone, dynamically generates
13 13 a bundle containing the entire repository content and sends it to the client.
14 14 There is no caching on the server and the server will have to redundantly
15 15 generate the same outgoing bundle in response to each clone request. For
16 16 servers with large repositories or with high clone volume, the load from
17 17 clones can make scaling the server challenging and costly.
18 18
19 19 This extension provides server operators the ability to offload potentially
20 20 expensive clone load to an external service. Here's how it works.
21 21
22 22 1. A server operator establishes a mechanism for making bundle files available
23 23 on a hosting service where Mercurial clients can fetch them.
24 24 2. A manifest file listing available bundle URLs and some optional metadata
25 25 is added to the Mercurial repository on the server.
26 26 3. A client initiates a clone against a clone bundles aware server.
27 27 4. The client sees the server is advertising clone bundles and fetches the
28 28 manifest listing available bundles.
29 29 5. The client filters and sorts the available bundles based on what it
30 30 supports and prefers.
31 31 6. The client downloads and applies an available bundle from the
32 32 server-specified URL.
33 33 7. The client reconnects to the original server and performs the equivalent
34 34 of :hg:`pull` to retrieve all repository data not in the bundle. (The
35 35 repository could have been updated between when the bundle was created
36 36 and when the client started the clone.)
37 37
38 38 Instead of the server generating full repository bundles for every clone
39 39 request, it generates full bundles once and they are subsequently reused to
40 40 bootstrap new clones. The server may still transfer data at clone time.
41 41 However, this is only data that has been added/changed since the bundle was
42 42 created. For large, established repositories, this can reduce server load for
43 43 clones to less than 1% of original.
44 44
45 45 To work, this extension requires the following of server operators:
46 46
47 47 * Generating bundle files of repository content (typically periodically,
48 48 such as once per day).
49 49 * A file server that clients have network access to and that Python knows
50 50 how to talk to through its normal URL handling facility (typically an
51 51 HTTP server).
52 52 * A process for keeping the bundles manifest in sync with available bundle
53 53 files.
54 54
55 55 Strictly speaking, using a static file hosting server isn't required: a server
56 56 operator could use a dynamic service for retrieving bundle data. However,
57 57 static file hosting services are simple and scalable and should be sufficient
58 58 for most needs.
59 59
60 60 Bundle files can be generated with the :hg:`bundle` command. Typically
61 61 :hg:`bundle --all` is used to produce a bundle of the entire repository.
62 62
63 63 :hg:`debugcreatestreamclonebundle` can be used to produce a special
64 64 *streaming clone bundle*. These are bundle files that are extremely efficient
65 65 to produce and consume (read: fast). However, they are larger than
66 66 traditional bundle formats and require that clients support the exact set
67 67 of repository data store formats in use by the repository that created them.
68 68 Typically, a newer server can serve data that is compatible with older clients.
69 69 However, *streaming clone bundles* don't have this guarantee. **Server
70 70 operators need to be aware that newer versions of Mercurial may produce
71 71 streaming clone bundles incompatible with older Mercurial versions.**
72 72
73 73 A server operator is responsible for creating a ``.hg/clonebundles.manifest``
74 74 file containing the list of available bundle files suitable for seeding
75 75 clones. If this file does not exist, the repository will not advertise the
76 76 existence of clone bundles when clients connect.
77 77
78 78 The manifest file contains a newline (\n) delimited list of entries.
79 79
80 80 Each line in this file defines an available bundle. Lines have the format:
81 81
82 82 <URL> [<key>=<value>[ <key>=<value>]]
83 83
84 84 That is, a URL followed by an optional, space-delimited list of key=value
85 85 pairs describing additional properties of this bundle. Both keys and values
86 86 are URI encoded.
87 87
88 88 Keys in UPPERCASE are reserved for use by Mercurial and are defined below.
89 89 All non-uppercase keys can be used by site installations. An example use
90 90 for custom properties is to use the *datacenter* attribute to define which
91 91 data center a file is hosted in. Clients could then prefer a server in the
92 92 data center closest to them.
93 93
94 94 The following reserved keys are currently defined:
95 95
96 96 BUNDLESPEC
97 97 A "bundle specification" string that describes the type of the bundle.
98 98
99 99 These are string values that are accepted by the "--type" argument of
100 100 :hg:`bundle`.
101 101
102 102 The values are parsed in strict mode, which means they must be of the
103 103 "<compression>-<type>" form. See
104 104 mercurial.exchange.parsebundlespec() for more details.
105 105
106 106 :hg:`debugbundle --spec` can be used to print the bundle specification
107 107 string for a bundle file. The output of this command can be used verbatim
108 108 for the value of ``BUNDLESPEC`` (it is already escaped).
109 109
110 110 Clients will automatically filter out specifications that are unknown or
111 111 unsupported so they won't attempt to download something that likely won't
112 112 apply.
113 113
114 114 The actual value doesn't impact client behavior beyond filtering:
115 115 clients will still sniff the bundle type from the header of downloaded
116 116 files.
117 117
118 118 **Use of this key is highly recommended**, as it allows clients to
119 119 easily skip unsupported bundles. If this key is not defined, an old
120 120 client may attempt to apply a bundle that it is incapable of reading.
121 121
122 122 REQUIRESNI
123 123 Whether Server Name Indication (SNI) is required to connect to the URL.
124 124 SNI allows servers to use multiple certificates on the same IP. It is
125 125 somewhat common in CDNs and other hosting providers. Older Python
126 126 versions do not support SNI. Defining this attribute enables clients
127 127 with older Python versions to filter this entry without experiencing
128 128 an opaque SSL failure at connection time.
129 129
130 130 If this is defined, it is important to advertise a non-SNI fallback
131 131 URL or clients running old Python releases may not be able to clone
132 132 with the clonebundles facility.
133 133
134 134 Value should be "true".
135 135
136 136 Manifests can contain multiple entries. Assuming metadata is defined, clients
137 137 will filter entries from the manifest that they don't support. The remaining
138 138 entries are optionally sorted by client preferences
139 139 (``experimental.clonebundleprefers`` config option). The client then attempts
140 140 to fetch the bundle at the first URL in the remaining list.
141 141
142 142 **Errors when downloading a bundle will fail the entire clone operation:
143 143 clients do not automatically fall back to a traditional clone.** The reason
144 144 for this is that if a server is using clone bundles, it is probably doing so
145 145 because the feature is necessary to help it scale. In other words, there
146 146 is an assumption that clone load will be offloaded to another service and
147 147 that the Mercurial server isn't responsible for serving this clone load.
148 148 If that other service experiences issues and clients start mass falling back to
149 149 the original Mercurial server, the added clone load could overwhelm the server
150 150 due to unexpected load and effectively take it offline. Not having clients
151 151 automatically fall back to cloning from the original server mitigates this
152 152 scenario.
153 153
154 154 Because there is no automatic Mercurial server fallback on failure of the
155 155 bundle hosting service, it is important for server operators to view the bundle
156 156 hosting service as an extension of the Mercurial server in terms of
157 157 availability and service level agreements: if the bundle hosting service goes
158 158 down, so does the ability for clients to clone. Note: clients will see a
159 159 message informing them how to bypass the clone bundles facility when a failure
160 160 occurs. So server operators should prepare for some people to follow these
161 161 instructions when a failure occurs, thus driving more load to the original
162 162 Mercurial server when the bundle hosting service fails.
163 163 """
164 164
165 165 from __future__ import absolute_import
166 166
167 167 from mercurial import (
168 168 extensions,
169 169 wireproto,
170 170 )
171 171
172 testedwith = 'internal'
172 testedwith = 'ships-with-hg-core'
173 173
174 174 def capabilities(orig, repo, proto):
175 175 caps = orig(repo, proto)
176 176
177 177 # Only advertise if a manifest exists. This does add some I/O to requests.
178 178 # But this should be cheaper than a wasted network round trip due to
179 179 # missing file.
180 180 if repo.opener.exists('clonebundles.manifest'):
181 181 caps.append('clonebundles')
182 182
183 183 return caps
184 184
185 185 def extsetup(ui):
186 186 extensions.wrapfunction(wireproto, '_capabilities', capabilities)
@@ -1,665 +1,665 b''
1 1 # color.py color output for Mercurial commands
2 2 #
3 3 # Copyright (C) 2007 Kevin Christen <kevin.christen@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''colorize output from some commands
9 9
10 10 The color extension colorizes output from several Mercurial commands.
11 11 For example, the diff command shows additions in green and deletions
12 12 in red, while the status command shows modified files in magenta. Many
13 13 other commands have analogous colors. It is possible to customize
14 14 these colors.
15 15
16 16 Effects
17 17 -------
18 18
19 19 Other effects in addition to color, like bold and underlined text, are
20 20 also available. By default, the terminfo database is used to find the
21 21 terminal codes used to change color and effect. If terminfo is not
22 22 available, then effects are rendered with the ECMA-48 SGR control
23 23 function (aka ANSI escape codes).
24 24
25 25 The available effects in terminfo mode are 'blink', 'bold', 'dim',
26 26 'inverse', 'invisible', 'italic', 'standout', and 'underline'; in
27 27 ECMA-48 mode, the options are 'bold', 'inverse', 'italic', and
28 28 'underline'. How each is rendered depends on the terminal emulator.
29 29 Some may not be available for a given terminal type, and will be
30 30 silently ignored.
31 31
32 32 Labels
33 33 ------
34 34
35 35 Text receives color effects depending on the labels that it has. Many
36 36 default Mercurial commands emit labelled text. You can also define
37 37 your own labels in templates using the label function, see :hg:`help
38 38 templates`. A single portion of text may have more than one label. In
39 39 that case, effects given to the last label will override any other
40 40 effects. This includes the special "none" effect, which nullifies
41 41 other effects.
42 42
43 43 Labels are normally invisible. In order to see these labels and their
44 44 position in the text, use the global --color=debug option. The same
45 45 anchor text may be associated to multiple labels, e.g.
46 46
47 47 [log.changeset changeset.secret|changeset: 22611:6f0a53c8f587]
48 48
49 49 The following are the default effects for some default labels. Default
50 50 effects may be overridden from your configuration file::
51 51
52 52 [color]
53 53 status.modified = blue bold underline red_background
54 54 status.added = green bold
55 55 status.removed = red bold blue_background
56 56 status.deleted = cyan bold underline
57 57 status.unknown = magenta bold underline
58 58 status.ignored = black bold
59 59
60 60 # 'none' turns off all effects
61 61 status.clean = none
62 62 status.copied = none
63 63
64 64 qseries.applied = blue bold underline
65 65 qseries.unapplied = black bold
66 66 qseries.missing = red bold
67 67
68 68 diff.diffline = bold
69 69 diff.extended = cyan bold
70 70 diff.file_a = red bold
71 71 diff.file_b = green bold
72 72 diff.hunk = magenta
73 73 diff.deleted = red
74 74 diff.inserted = green
75 75 diff.changed = white
76 76 diff.tab =
77 77 diff.trailingwhitespace = bold red_background
78 78
79 79 # Blank so it inherits the style of the surrounding label
80 80 changeset.public =
81 81 changeset.draft =
82 82 changeset.secret =
83 83
84 84 resolve.unresolved = red bold
85 85 resolve.resolved = green bold
86 86
87 87 bookmarks.active = green
88 88
89 89 branches.active = none
90 90 branches.closed = black bold
91 91 branches.current = green
92 92 branches.inactive = none
93 93
94 94 tags.normal = green
95 95 tags.local = black bold
96 96
97 97 rebase.rebased = blue
98 98 rebase.remaining = red bold
99 99
100 100 shelve.age = cyan
101 101 shelve.newest = green bold
102 102 shelve.name = blue bold
103 103
104 104 histedit.remaining = red bold
105 105
106 106 Custom colors
107 107 -------------
108 108
109 109 Because there are only eight standard colors, this module allows you
110 110 to define color names for other color slots which might be available
111 111 for your terminal type, assuming terminfo mode. For instance::
112 112
113 113 color.brightblue = 12
114 114 color.pink = 207
115 115 color.orange = 202
116 116
117 117 to set 'brightblue' to color slot 12 (useful for 16 color terminals
118 118 that have brighter colors defined in the upper eight) and, 'pink' and
119 119 'orange' to colors in 256-color xterm's default color cube. These
120 120 defined colors may then be used as any of the pre-defined eight,
121 121 including appending '_background' to set the background to that color.
122 122
123 123 Modes
124 124 -----
125 125
126 126 By default, the color extension will use ANSI mode (or win32 mode on
127 127 Windows) if it detects a terminal. To override auto mode (to enable
128 128 terminfo mode, for example), set the following configuration option::
129 129
130 130 [color]
131 131 mode = terminfo
132 132
133 133 Any value other than 'ansi', 'win32', 'terminfo', or 'auto' will
134 134 disable color.
135 135
136 136 Note that on some systems, terminfo mode may cause problems when using
137 137 color with the pager extension and less -R. less with the -R option
138 138 will only display ECMA-48 color codes, and terminfo mode may sometimes
139 139 emit codes that less doesn't understand. You can work around this by
140 140 either using ansi mode (or auto mode), or by using less -r (which will
141 141 pass through all terminal control codes, not just color control
142 142 codes).
143 143
144 144 On some systems (such as MSYS in Windows), the terminal may support
145 145 a different color mode than the pager (activated via the "pager"
146 146 extension). It is possible to define separate modes depending on whether
147 147 the pager is active::
148 148
149 149 [color]
150 150 mode = auto
151 151 pagermode = ansi
152 152
153 153 If ``pagermode`` is not defined, the ``mode`` will be used.
154 154 '''
155 155
156 156 from __future__ import absolute_import
157 157
158 158 import os
159 159
160 160 from mercurial.i18n import _
161 161 from mercurial import (
162 162 cmdutil,
163 163 commands,
164 164 dispatch,
165 165 extensions,
166 166 subrepo,
167 167 ui as uimod,
168 168 util,
169 169 )
170 170
171 171 cmdtable = {}
172 172 command = cmdutil.command(cmdtable)
173 # Note for extension authors: ONLY specify testedwith = 'internal' for
173 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
174 174 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
175 175 # be specifying the version(s) of Mercurial they are tested with, or
176 176 # leave the attribute unspecified.
177 testedwith = 'internal'
177 testedwith = 'ships-with-hg-core'
178 178
179 179 # start and stop parameters for effects
180 180 _effects = {'none': 0, 'black': 30, 'red': 31, 'green': 32, 'yellow': 33,
181 181 'blue': 34, 'magenta': 35, 'cyan': 36, 'white': 37, 'bold': 1,
182 182 'italic': 3, 'underline': 4, 'inverse': 7, 'dim': 2,
183 183 'black_background': 40, 'red_background': 41,
184 184 'green_background': 42, 'yellow_background': 43,
185 185 'blue_background': 44, 'purple_background': 45,
186 186 'cyan_background': 46, 'white_background': 47}
187 187
188 188 def _terminfosetup(ui, mode):
189 189 '''Initialize terminfo data and the terminal if we're in terminfo mode.'''
190 190
191 191 global _terminfo_params
192 192 # If we failed to load curses, we go ahead and return.
193 193 if not _terminfo_params:
194 194 return
195 195 # Otherwise, see what the config file says.
196 196 if mode not in ('auto', 'terminfo'):
197 197 return
198 198
199 199 _terminfo_params.update((key[6:], (False, int(val)))
200 200 for key, val in ui.configitems('color')
201 201 if key.startswith('color.'))
202 202
203 203 try:
204 204 curses.setupterm()
205 205 except curses.error as e:
206 206 _terminfo_params = {}
207 207 return
208 208
209 209 for key, (b, e) in _terminfo_params.items():
210 210 if not b:
211 211 continue
212 212 if not curses.tigetstr(e):
213 213 # Most terminals don't support dim, invis, etc, so don't be
214 214 # noisy and use ui.debug().
215 215 ui.debug("no terminfo entry for %s\n" % e)
216 216 del _terminfo_params[key]
217 217 if not curses.tigetstr('setaf') or not curses.tigetstr('setab'):
218 218 # Only warn about missing terminfo entries if we explicitly asked for
219 219 # terminfo mode.
220 220 if mode == "terminfo":
221 221 ui.warn(_("no terminfo entry for setab/setaf: reverting to "
222 222 "ECMA-48 color\n"))
223 223 _terminfo_params = {}
224 224
225 225 def _modesetup(ui, coloropt):
226 226 global _terminfo_params
227 227
228 228 if coloropt == 'debug':
229 229 return 'debug'
230 230
231 231 auto = (coloropt == 'auto')
232 232 always = not auto and util.parsebool(coloropt)
233 233 if not always and not auto:
234 234 return None
235 235
236 236 formatted = always or (os.environ.get('TERM') != 'dumb' and ui.formatted())
237 237
238 238 mode = ui.config('color', 'mode', 'auto')
239 239
240 240 # If pager is active, color.pagermode overrides color.mode.
241 241 if getattr(ui, 'pageractive', False):
242 242 mode = ui.config('color', 'pagermode', mode)
243 243
244 244 realmode = mode
245 245 if mode == 'auto':
246 246 if os.name == 'nt':
247 247 term = os.environ.get('TERM')
248 248 # TERM won't be defined in a vanilla cmd.exe environment.
249 249
250 250 # UNIX-like environments on Windows such as Cygwin and MSYS will
251 251 # set TERM. They appear to make a best effort attempt at setting it
252 252 # to something appropriate. However, not all environments with TERM
253 253 # defined support ANSI. Since "ansi" could result in terminal
254 254 # gibberish, we error on the side of selecting "win32". However, if
255 255 # w32effects is not defined, we almost certainly don't support
256 256 # "win32", so don't even try.
257 257 if (term and 'xterm' in term) or not w32effects:
258 258 realmode = 'ansi'
259 259 else:
260 260 realmode = 'win32'
261 261 else:
262 262 realmode = 'ansi'
263 263
264 264 def modewarn():
265 265 # only warn if color.mode was explicitly set and we're in
266 266 # an interactive terminal
267 267 if mode == realmode and ui.interactive():
268 268 ui.warn(_('warning: failed to set color mode to %s\n') % mode)
269 269
270 270 if realmode == 'win32':
271 271 _terminfo_params = {}
272 272 if not w32effects:
273 273 modewarn()
274 274 return None
275 275 _effects.update(w32effects)
276 276 elif realmode == 'ansi':
277 277 _terminfo_params = {}
278 278 elif realmode == 'terminfo':
279 279 _terminfosetup(ui, mode)
280 280 if not _terminfo_params:
281 281 ## FIXME Shouldn't we return None in this case too?
282 282 modewarn()
283 283 realmode = 'ansi'
284 284 else:
285 285 return None
286 286
287 287 if always or (auto and formatted):
288 288 return realmode
289 289 return None
290 290
291 291 try:
292 292 import curses
293 293 # Mapping from effect name to terminfo attribute name or color number.
294 294 # This will also force-load the curses module.
295 295 _terminfo_params = {'none': (True, 'sgr0'),
296 296 'standout': (True, 'smso'),
297 297 'underline': (True, 'smul'),
298 298 'reverse': (True, 'rev'),
299 299 'inverse': (True, 'rev'),
300 300 'blink': (True, 'blink'),
301 301 'dim': (True, 'dim'),
302 302 'bold': (True, 'bold'),
303 303 'invisible': (True, 'invis'),
304 304 'italic': (True, 'sitm'),
305 305 'black': (False, curses.COLOR_BLACK),
306 306 'red': (False, curses.COLOR_RED),
307 307 'green': (False, curses.COLOR_GREEN),
308 308 'yellow': (False, curses.COLOR_YELLOW),
309 309 'blue': (False, curses.COLOR_BLUE),
310 310 'magenta': (False, curses.COLOR_MAGENTA),
311 311 'cyan': (False, curses.COLOR_CYAN),
312 312 'white': (False, curses.COLOR_WHITE)}
313 313 except ImportError:
314 314 _terminfo_params = {}
315 315
316 316 _styles = {'grep.match': 'red bold',
317 317 'grep.linenumber': 'green',
318 318 'grep.rev': 'green',
319 319 'grep.change': 'green',
320 320 'grep.sep': 'cyan',
321 321 'grep.filename': 'magenta',
322 322 'grep.user': 'magenta',
323 323 'grep.date': 'magenta',
324 324 'bookmarks.active': 'green',
325 325 'branches.active': 'none',
326 326 'branches.closed': 'black bold',
327 327 'branches.current': 'green',
328 328 'branches.inactive': 'none',
329 329 'diff.changed': 'white',
330 330 'diff.deleted': 'red',
331 331 'diff.diffline': 'bold',
332 332 'diff.extended': 'cyan bold',
333 333 'diff.file_a': 'red bold',
334 334 'diff.file_b': 'green bold',
335 335 'diff.hunk': 'magenta',
336 336 'diff.inserted': 'green',
337 337 'diff.tab': '',
338 338 'diff.trailingwhitespace': 'bold red_background',
339 339 'changeset.public' : '',
340 340 'changeset.draft' : '',
341 341 'changeset.secret' : '',
342 342 'diffstat.deleted': 'red',
343 343 'diffstat.inserted': 'green',
344 344 'histedit.remaining': 'red bold',
345 345 'ui.prompt': 'yellow',
346 346 'log.changeset': 'yellow',
347 347 'patchbomb.finalsummary': '',
348 348 'patchbomb.from': 'magenta',
349 349 'patchbomb.to': 'cyan',
350 350 'patchbomb.subject': 'green',
351 351 'patchbomb.diffstats': '',
352 352 'rebase.rebased': 'blue',
353 353 'rebase.remaining': 'red bold',
354 354 'resolve.resolved': 'green bold',
355 355 'resolve.unresolved': 'red bold',
356 356 'shelve.age': 'cyan',
357 357 'shelve.newest': 'green bold',
358 358 'shelve.name': 'blue bold',
359 359 'status.added': 'green bold',
360 360 'status.clean': 'none',
361 361 'status.copied': 'none',
362 362 'status.deleted': 'cyan bold underline',
363 363 'status.ignored': 'black bold',
364 364 'status.modified': 'blue bold',
365 365 'status.removed': 'red bold',
366 366 'status.unknown': 'magenta bold underline',
367 367 'tags.normal': 'green',
368 368 'tags.local': 'black bold'}
369 369
370 370
371 371 def _effect_str(effect):
372 372 '''Helper function for render_effects().'''
373 373
374 374 bg = False
375 375 if effect.endswith('_background'):
376 376 bg = True
377 377 effect = effect[:-11]
378 378 attr, val = _terminfo_params[effect]
379 379 if attr:
380 380 return curses.tigetstr(val)
381 381 elif bg:
382 382 return curses.tparm(curses.tigetstr('setab'), val)
383 383 else:
384 384 return curses.tparm(curses.tigetstr('setaf'), val)
385 385
386 386 def render_effects(text, effects):
387 387 'Wrap text in commands to turn on each effect.'
388 388 if not text:
389 389 return text
390 390 if not _terminfo_params:
391 391 start = [str(_effects[e]) for e in ['none'] + effects.split()]
392 392 start = '\033[' + ';'.join(start) + 'm'
393 393 stop = '\033[' + str(_effects['none']) + 'm'
394 394 else:
395 395 start = ''.join(_effect_str(effect)
396 396 for effect in ['none'] + effects.split())
397 397 stop = _effect_str('none')
398 398 return ''.join([start, text, stop])
399 399
400 400 def extstyles():
401 401 for name, ext in extensions.extensions():
402 402 _styles.update(getattr(ext, 'colortable', {}))
403 403
404 404 def valideffect(effect):
405 405 'Determine if the effect is valid or not.'
406 406 good = False
407 407 if not _terminfo_params and effect in _effects:
408 408 good = True
409 409 elif effect in _terminfo_params or effect[:-11] in _terminfo_params:
410 410 good = True
411 411 return good
412 412
413 413 def configstyles(ui):
414 414 for status, cfgeffects in ui.configitems('color'):
415 415 if '.' not in status or status.startswith('color.'):
416 416 continue
417 417 cfgeffects = ui.configlist('color', status)
418 418 if cfgeffects:
419 419 good = []
420 420 for e in cfgeffects:
421 421 if valideffect(e):
422 422 good.append(e)
423 423 else:
424 424 ui.warn(_("ignoring unknown color/effect %r "
425 425 "(configured in color.%s)\n")
426 426 % (e, status))
427 427 _styles[status] = ' '.join(good)
428 428
429 429 class colorui(uimod.ui):
430 430 _colormode = 'ansi'
431 431 def write(self, *args, **opts):
432 432 if self._colormode is None:
433 433 return super(colorui, self).write(*args, **opts)
434 434
435 435 label = opts.get('label', '')
436 436 if self._buffers and not opts.get('prompt', False):
437 437 if self._bufferapplylabels:
438 438 self._buffers[-1].extend(self.label(a, label) for a in args)
439 439 else:
440 440 self._buffers[-1].extend(args)
441 441 elif self._colormode == 'win32':
442 442 for a in args:
443 443 win32print(a, super(colorui, self).write, **opts)
444 444 else:
445 445 return super(colorui, self).write(
446 446 *[self.label(a, label) for a in args], **opts)
447 447
448 448 def write_err(self, *args, **opts):
449 449 if self._colormode is None:
450 450 return super(colorui, self).write_err(*args, **opts)
451 451
452 452 label = opts.get('label', '')
453 453 if self._bufferstates and self._bufferstates[-1][0]:
454 454 return self.write(*args, **opts)
455 455 if self._colormode == 'win32':
456 456 for a in args:
457 457 win32print(a, super(colorui, self).write_err, **opts)
458 458 else:
459 459 return super(colorui, self).write_err(
460 460 *[self.label(a, label) for a in args], **opts)
461 461
462 462 def showlabel(self, msg, label):
463 463 if label and msg:
464 464 if msg[-1] == '\n':
465 465 return "[%s|%s]\n" % (label, msg[:-1])
466 466 else:
467 467 return "[%s|%s]" % (label, msg)
468 468 else:
469 469 return msg
470 470
471 471 def label(self, msg, label):
472 472 if self._colormode is None:
473 473 return super(colorui, self).label(msg, label)
474 474
475 475 if self._colormode == 'debug':
476 476 return self.showlabel(msg, label)
477 477
478 478 effects = []
479 479 for l in label.split():
480 480 s = _styles.get(l, '')
481 481 if s:
482 482 effects.append(s)
483 483 elif valideffect(l):
484 484 effects.append(l)
485 485 effects = ' '.join(effects)
486 486 if effects:
487 487 return '\n'.join([render_effects(s, effects)
488 488 for s in msg.split('\n')])
489 489 return msg
490 490
491 491 def uisetup(ui):
492 492 if ui.plain():
493 493 return
494 494 if not isinstance(ui, colorui):
495 495 colorui.__bases__ = (ui.__class__,)
496 496 ui.__class__ = colorui
497 497 def colorcmd(orig, ui_, opts, cmd, cmdfunc):
498 498 mode = _modesetup(ui_, opts['color'])
499 499 colorui._colormode = mode
500 500 if mode and mode != 'debug':
501 501 extstyles()
502 502 configstyles(ui_)
503 503 return orig(ui_, opts, cmd, cmdfunc)
504 504 def colorgit(orig, gitsub, commands, env=None, stream=False, cwd=None):
505 505 if gitsub.ui._colormode and len(commands) and commands[0] == "diff":
506 506 # insert the argument in the front,
507 507 # the end of git diff arguments is used for paths
508 508 commands.insert(1, '--color')
509 509 return orig(gitsub, commands, env, stream, cwd)
510 510 extensions.wrapfunction(dispatch, '_runcommand', colorcmd)
511 511 extensions.wrapfunction(subrepo.gitsubrepo, '_gitnodir', colorgit)
512 512
513 513 def extsetup(ui):
514 514 commands.globalopts.append(
515 515 ('', 'color', 'auto',
516 516 # i18n: 'always', 'auto', 'never', and 'debug' are keywords
517 517 # and should not be translated
518 518 _("when to colorize (boolean, always, auto, never, or debug)"),
519 519 _('TYPE')))
520 520
521 521 @command('debugcolor', [], 'hg debugcolor')
522 522 def debugcolor(ui, repo, **opts):
523 523 global _styles
524 524 _styles = {}
525 525 for effect in _effects.keys():
526 526 _styles[effect] = effect
527 527 ui.write(('color mode: %s\n') % ui._colormode)
528 528 ui.write(_('available colors:\n'))
529 529 for label, colors in _styles.items():
530 530 ui.write(('%s\n') % colors, label=label)
531 531
532 532 if os.name != 'nt':
533 533 w32effects = None
534 534 else:
535 535 import ctypes
536 536 import re
537 537
538 538 _kernel32 = ctypes.windll.kernel32
539 539
540 540 _WORD = ctypes.c_ushort
541 541
542 542 _INVALID_HANDLE_VALUE = -1
543 543
544 544 class _COORD(ctypes.Structure):
545 545 _fields_ = [('X', ctypes.c_short),
546 546 ('Y', ctypes.c_short)]
547 547
548 548 class _SMALL_RECT(ctypes.Structure):
549 549 _fields_ = [('Left', ctypes.c_short),
550 550 ('Top', ctypes.c_short),
551 551 ('Right', ctypes.c_short),
552 552 ('Bottom', ctypes.c_short)]
553 553
554 554 class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure):
555 555 _fields_ = [('dwSize', _COORD),
556 556 ('dwCursorPosition', _COORD),
557 557 ('wAttributes', _WORD),
558 558 ('srWindow', _SMALL_RECT),
559 559 ('dwMaximumWindowSize', _COORD)]
560 560
561 561 _STD_OUTPUT_HANDLE = 0xfffffff5L # (DWORD)-11
562 562 _STD_ERROR_HANDLE = 0xfffffff4L # (DWORD)-12
563 563
564 564 _FOREGROUND_BLUE = 0x0001
565 565 _FOREGROUND_GREEN = 0x0002
566 566 _FOREGROUND_RED = 0x0004
567 567 _FOREGROUND_INTENSITY = 0x0008
568 568
569 569 _BACKGROUND_BLUE = 0x0010
570 570 _BACKGROUND_GREEN = 0x0020
571 571 _BACKGROUND_RED = 0x0040
572 572 _BACKGROUND_INTENSITY = 0x0080
573 573
574 574 _COMMON_LVB_REVERSE_VIDEO = 0x4000
575 575 _COMMON_LVB_UNDERSCORE = 0x8000
576 576
577 577 # http://msdn.microsoft.com/en-us/library/ms682088%28VS.85%29.aspx
578 578 w32effects = {
579 579 'none': -1,
580 580 'black': 0,
581 581 'red': _FOREGROUND_RED,
582 582 'green': _FOREGROUND_GREEN,
583 583 'yellow': _FOREGROUND_RED | _FOREGROUND_GREEN,
584 584 'blue': _FOREGROUND_BLUE,
585 585 'magenta': _FOREGROUND_BLUE | _FOREGROUND_RED,
586 586 'cyan': _FOREGROUND_BLUE | _FOREGROUND_GREEN,
587 587 'white': _FOREGROUND_RED | _FOREGROUND_GREEN | _FOREGROUND_BLUE,
588 588 'bold': _FOREGROUND_INTENSITY,
589 589 'black_background': 0x100, # unused value > 0x0f
590 590 'red_background': _BACKGROUND_RED,
591 591 'green_background': _BACKGROUND_GREEN,
592 592 'yellow_background': _BACKGROUND_RED | _BACKGROUND_GREEN,
593 593 'blue_background': _BACKGROUND_BLUE,
594 594 'purple_background': _BACKGROUND_BLUE | _BACKGROUND_RED,
595 595 'cyan_background': _BACKGROUND_BLUE | _BACKGROUND_GREEN,
596 596 'white_background': (_BACKGROUND_RED | _BACKGROUND_GREEN |
597 597 _BACKGROUND_BLUE),
598 598 'bold_background': _BACKGROUND_INTENSITY,
599 599 'underline': _COMMON_LVB_UNDERSCORE, # double-byte charsets only
600 600 'inverse': _COMMON_LVB_REVERSE_VIDEO, # double-byte charsets only
601 601 }
602 602
603 603 passthrough = set([_FOREGROUND_INTENSITY,
604 604 _BACKGROUND_INTENSITY,
605 605 _COMMON_LVB_UNDERSCORE,
606 606 _COMMON_LVB_REVERSE_VIDEO])
607 607
608 608 stdout = _kernel32.GetStdHandle(
609 609 _STD_OUTPUT_HANDLE) # don't close the handle returned
610 610 if stdout is None or stdout == _INVALID_HANDLE_VALUE:
611 611 w32effects = None
612 612 else:
613 613 csbi = _CONSOLE_SCREEN_BUFFER_INFO()
614 614 if not _kernel32.GetConsoleScreenBufferInfo(
615 615 stdout, ctypes.byref(csbi)):
616 616 # stdout may not support GetConsoleScreenBufferInfo()
617 617 # when called from subprocess or redirected
618 618 w32effects = None
619 619 else:
620 620 origattr = csbi.wAttributes
621 621 ansire = re.compile('\033\[([^m]*)m([^\033]*)(.*)',
622 622 re.MULTILINE | re.DOTALL)
623 623
624 624 def win32print(text, orig, **opts):
625 625 label = opts.get('label', '')
626 626 attr = origattr
627 627
628 628 def mapcolor(val, attr):
629 629 if val == -1:
630 630 return origattr
631 631 elif val in passthrough:
632 632 return attr | val
633 633 elif val > 0x0f:
634 634 return (val & 0x70) | (attr & 0x8f)
635 635 else:
636 636 return (val & 0x07) | (attr & 0xf8)
637 637
638 638 # determine console attributes based on labels
639 639 for l in label.split():
640 640 style = _styles.get(l, '')
641 641 for effect in style.split():
642 642 try:
643 643 attr = mapcolor(w32effects[effect], attr)
644 644 except KeyError:
645 645 # w32effects could not have certain attributes so we skip
646 646 # them if not found
647 647 pass
648 648 # hack to ensure regexp finds data
649 649 if not text.startswith('\033['):
650 650 text = '\033[m' + text
651 651
652 652 # Look for ANSI-like codes embedded in text
653 653 m = re.match(ansire, text)
654 654
655 655 try:
656 656 while m:
657 657 for sattr in m.group(1).split(';'):
658 658 if sattr:
659 659 attr = mapcolor(int(sattr), attr)
660 660 _kernel32.SetConsoleTextAttribute(stdout, attr)
661 661 orig(m.group(2), **opts)
662 662 m = re.match(ansire, m.group(3))
663 663 finally:
664 664 # Explicitly reset original attributes
665 665 _kernel32.SetConsoleTextAttribute(stdout, origattr)
@@ -1,458 +1,458 b''
1 1 # convert.py Foreign SCM converter
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''import revisions from foreign VCS repositories into Mercurial'''
9 9
10 10 from __future__ import absolute_import
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial import (
14 14 cmdutil,
15 15 registrar,
16 16 )
17 17
18 18 from . import (
19 19 convcmd,
20 20 cvsps,
21 21 subversion,
22 22 )
23 23
24 24 cmdtable = {}
25 25 command = cmdutil.command(cmdtable)
26 # Note for extension authors: ONLY specify testedwith = 'internal' for
26 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
27 27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
28 28 # be specifying the version(s) of Mercurial they are tested with, or
29 29 # leave the attribute unspecified.
30 testedwith = 'internal'
30 testedwith = 'ships-with-hg-core'
31 31
32 32 # Commands definition was moved elsewhere to ease demandload job.
33 33
34 34 @command('convert',
35 35 [('', 'authors', '',
36 36 _('username mapping filename (DEPRECATED) (use --authormap instead)'),
37 37 _('FILE')),
38 38 ('s', 'source-type', '', _('source repository type'), _('TYPE')),
39 39 ('d', 'dest-type', '', _('destination repository type'), _('TYPE')),
40 40 ('r', 'rev', [], _('import up to source revision REV'), _('REV')),
41 41 ('A', 'authormap', '', _('remap usernames using this file'), _('FILE')),
42 42 ('', 'filemap', '', _('remap file names using contents of file'),
43 43 _('FILE')),
44 44 ('', 'full', None,
45 45 _('apply filemap changes by converting all files again')),
46 46 ('', 'splicemap', '', _('splice synthesized history into place'),
47 47 _('FILE')),
48 48 ('', 'branchmap', '', _('change branch names while converting'),
49 49 _('FILE')),
50 50 ('', 'branchsort', None, _('try to sort changesets by branches')),
51 51 ('', 'datesort', None, _('try to sort changesets by date')),
52 52 ('', 'sourcesort', None, _('preserve source changesets order')),
53 53 ('', 'closesort', None, _('try to reorder closed revisions'))],
54 54 _('hg convert [OPTION]... SOURCE [DEST [REVMAP]]'),
55 55 norepo=True)
56 56 def convert(ui, src, dest=None, revmapfile=None, **opts):
57 57 """convert a foreign SCM repository to a Mercurial one.
58 58
59 59 Accepted source formats [identifiers]:
60 60
61 61 - Mercurial [hg]
62 62 - CVS [cvs]
63 63 - Darcs [darcs]
64 64 - git [git]
65 65 - Subversion [svn]
66 66 - Monotone [mtn]
67 67 - GNU Arch [gnuarch]
68 68 - Bazaar [bzr]
69 69 - Perforce [p4]
70 70
71 71 Accepted destination formats [identifiers]:
72 72
73 73 - Mercurial [hg]
74 74 - Subversion [svn] (history on branches is not preserved)
75 75
76 76 If no revision is given, all revisions will be converted.
77 77 Otherwise, convert will only import up to the named revision
78 78 (given in a format understood by the source).
79 79
80 80 If no destination directory name is specified, it defaults to the
81 81 basename of the source with ``-hg`` appended. If the destination
82 82 repository doesn't exist, it will be created.
83 83
84 84 By default, all sources except Mercurial will use --branchsort.
85 85 Mercurial uses --sourcesort to preserve original revision numbers
86 86 order. Sort modes have the following effects:
87 87
88 88 --branchsort convert from parent to child revision when possible,
89 89 which means branches are usually converted one after
90 90 the other. It generates more compact repositories.
91 91
92 92 --datesort sort revisions by date. Converted repositories have
93 93 good-looking changelogs but are often an order of
94 94 magnitude larger than the same ones generated by
95 95 --branchsort.
96 96
97 97 --sourcesort try to preserve source revisions order, only
98 98 supported by Mercurial sources.
99 99
100 100 --closesort try to move closed revisions as close as possible
101 101 to parent branches, only supported by Mercurial
102 102 sources.
103 103
104 104 If ``REVMAP`` isn't given, it will be put in a default location
105 105 (``<dest>/.hg/shamap`` by default). The ``REVMAP`` is a simple
106 106 text file that maps each source commit ID to the destination ID
107 107 for that revision, like so::
108 108
109 109 <source ID> <destination ID>
110 110
111 111 If the file doesn't exist, it's automatically created. It's
112 112 updated on each commit copied, so :hg:`convert` can be interrupted
113 113 and can be run repeatedly to copy new commits.
114 114
115 115 The authormap is a simple text file that maps each source commit
116 116 author to a destination commit author. It is handy for source SCMs
117 117 that use unix logins to identify authors (e.g.: CVS). One line per
118 118 author mapping and the line format is::
119 119
120 120 source author = destination author
121 121
122 122 Empty lines and lines starting with a ``#`` are ignored.
123 123
124 124 The filemap is a file that allows filtering and remapping of files
125 125 and directories. Each line can contain one of the following
126 126 directives::
127 127
128 128 include path/to/file-or-dir
129 129
130 130 exclude path/to/file-or-dir
131 131
132 132 rename path/to/source path/to/destination
133 133
134 134 Comment lines start with ``#``. A specified path matches if it
135 135 equals the full relative name of a file or one of its parent
136 136 directories. The ``include`` or ``exclude`` directive with the
137 137 longest matching path applies, so line order does not matter.
138 138
139 139 The ``include`` directive causes a file, or all files under a
140 140 directory, to be included in the destination repository. The default
141 141 if there are no ``include`` statements is to include everything.
142 142 If there are any ``include`` statements, nothing else is included.
143 143 The ``exclude`` directive causes files or directories to
144 144 be omitted. The ``rename`` directive renames a file or directory if
145 145 it is converted. To rename from a subdirectory into the root of
146 146 the repository, use ``.`` as the path to rename to.
147 147
148 148 ``--full`` will make sure the converted changesets contain exactly
149 149 the right files with the right content. It will make a full
150 150 conversion of all files, not just the ones that have
151 151 changed. Files that already are correct will not be changed. This
152 152 can be used to apply filemap changes when converting
153 153 incrementally. This is currently only supported for Mercurial and
154 154 Subversion.
155 155
156 156 The splicemap is a file that allows insertion of synthetic
157 157 history, letting you specify the parents of a revision. This is
158 158 useful if you want to e.g. give a Subversion merge two parents, or
159 159 graft two disconnected series of history together. Each entry
160 160 contains a key, followed by a space, followed by one or two
161 161 comma-separated values::
162 162
163 163 key parent1, parent2
164 164
165 165 The key is the revision ID in the source
166 166 revision control system whose parents should be modified (same
167 167 format as a key in .hg/shamap). The values are the revision IDs
168 168 (in either the source or destination revision control system) that
169 169 should be used as the new parents for that node. For example, if
170 170 you have merged "release-1.0" into "trunk", then you should
171 171 specify the revision on "trunk" as the first parent and the one on
172 172 the "release-1.0" branch as the second.
173 173
174 174 The branchmap is a file that allows you to rename a branch when it is
175 175 being brought in from whatever external repository. When used in
176 176 conjunction with a splicemap, it allows for a powerful combination
177 177 to help fix even the most badly mismanaged repositories and turn them
178 178 into nicely structured Mercurial repositories. The branchmap contains
179 179 lines of the form::
180 180
181 181 original_branch_name new_branch_name
182 182
183 183 where "original_branch_name" is the name of the branch in the
184 184 source repository, and "new_branch_name" is the name of the branch
185 185 is the destination repository. No whitespace is allowed in the
186 186 branch names. This can be used to (for instance) move code in one
187 187 repository from "default" to a named branch.
188 188
189 189 Mercurial Source
190 190 ################
191 191
192 192 The Mercurial source recognizes the following configuration
193 193 options, which you can set on the command line with ``--config``:
194 194
195 195 :convert.hg.ignoreerrors: ignore integrity errors when reading.
196 196 Use it to fix Mercurial repositories with missing revlogs, by
197 197 converting from and to Mercurial. Default is False.
198 198
199 199 :convert.hg.saverev: store original revision ID in changeset
200 200 (forces target IDs to change). It takes a boolean argument and
201 201 defaults to False.
202 202
203 203 :convert.hg.startrev: specify the initial Mercurial revision.
204 204 The default is 0.
205 205
206 206 :convert.hg.revs: revset specifying the source revisions to convert.
207 207
208 208 CVS Source
209 209 ##########
210 210
211 211 CVS source will use a sandbox (i.e. a checked-out copy) from CVS
212 212 to indicate the starting point of what will be converted. Direct
213 213 access to the repository files is not needed, unless of course the
214 214 repository is ``:local:``. The conversion uses the top level
215 215 directory in the sandbox to find the CVS repository, and then uses
216 216 CVS rlog commands to find files to convert. This means that unless
217 217 a filemap is given, all files under the starting directory will be
218 218 converted, and that any directory reorganization in the CVS
219 219 sandbox is ignored.
220 220
221 221 The following options can be used with ``--config``:
222 222
223 223 :convert.cvsps.cache: Set to False to disable remote log caching,
224 224 for testing and debugging purposes. Default is True.
225 225
226 226 :convert.cvsps.fuzz: Specify the maximum time (in seconds) that is
227 227 allowed between commits with identical user and log message in
228 228 a single changeset. When very large files were checked in as
229 229 part of a changeset then the default may not be long enough.
230 230 The default is 60.
231 231
232 232 :convert.cvsps.mergeto: Specify a regular expression to which
233 233 commit log messages are matched. If a match occurs, then the
234 234 conversion process will insert a dummy revision merging the
235 235 branch on which this log message occurs to the branch
236 236 indicated in the regex. Default is ``{{mergetobranch
237 237 ([-\\w]+)}}``
238 238
239 239 :convert.cvsps.mergefrom: Specify a regular expression to which
240 240 commit log messages are matched. If a match occurs, then the
241 241 conversion process will add the most recent revision on the
242 242 branch indicated in the regex as the second parent of the
243 243 changeset. Default is ``{{mergefrombranch ([-\\w]+)}}``
244 244
245 245 :convert.localtimezone: use local time (as determined by the TZ
246 246 environment variable) for changeset date/times. The default
247 247 is False (use UTC).
248 248
249 249 :hooks.cvslog: Specify a Python function to be called at the end of
250 250 gathering the CVS log. The function is passed a list with the
251 251 log entries, and can modify the entries in-place, or add or
252 252 delete them.
253 253
254 254 :hooks.cvschangesets: Specify a Python function to be called after
255 255 the changesets are calculated from the CVS log. The
256 256 function is passed a list with the changeset entries, and can
257 257 modify the changesets in-place, or add or delete them.
258 258
259 259 An additional "debugcvsps" Mercurial command allows the builtin
260 260 changeset merging code to be run without doing a conversion. Its
261 261 parameters and output are similar to that of cvsps 2.1. Please see
262 262 the command help for more details.
263 263
264 264 Subversion Source
265 265 #################
266 266
267 267 Subversion source detects classical trunk/branches/tags layouts.
268 268 By default, the supplied ``svn://repo/path/`` source URL is
269 269 converted as a single branch. If ``svn://repo/path/trunk`` exists
270 270 it replaces the default branch. If ``svn://repo/path/branches``
271 271 exists, its subdirectories are listed as possible branches. If
272 272 ``svn://repo/path/tags`` exists, it is looked for tags referencing
273 273 converted branches. Default ``trunk``, ``branches`` and ``tags``
274 274 values can be overridden with following options. Set them to paths
275 275 relative to the source URL, or leave them blank to disable auto
276 276 detection.
277 277
278 278 The following options can be set with ``--config``:
279 279
280 280 :convert.svn.branches: specify the directory containing branches.
281 281 The default is ``branches``.
282 282
283 283 :convert.svn.tags: specify the directory containing tags. The
284 284 default is ``tags``.
285 285
286 286 :convert.svn.trunk: specify the name of the trunk branch. The
287 287 default is ``trunk``.
288 288
289 289 :convert.localtimezone: use local time (as determined by the TZ
290 290 environment variable) for changeset date/times. The default
291 291 is False (use UTC).
292 292
293 293 Source history can be retrieved starting at a specific revision,
294 294 instead of being integrally converted. Only single branch
295 295 conversions are supported.
296 296
297 297 :convert.svn.startrev: specify start Subversion revision number.
298 298 The default is 0.
299 299
300 300 Git Source
301 301 ##########
302 302
303 303 The Git importer converts commits from all reachable branches (refs
304 304 in refs/heads) and remotes (refs in refs/remotes) to Mercurial.
305 305 Branches are converted to bookmarks with the same name, with the
306 306 leading 'refs/heads' stripped. Git submodules are converted to Git
307 307 subrepos in Mercurial.
308 308
309 309 The following options can be set with ``--config``:
310 310
311 311 :convert.git.similarity: specify how similar files modified in a
312 312 commit must be to be imported as renames or copies, as a
313 313 percentage between ``0`` (disabled) and ``100`` (files must be
314 314 identical). For example, ``90`` means that a delete/add pair will
315 315 be imported as a rename if more than 90% of the file hasn't
316 316 changed. The default is ``50``.
317 317
318 318 :convert.git.findcopiesharder: while detecting copies, look at all
319 319 files in the working copy instead of just changed ones. This
320 320 is very expensive for large projects, and is only effective when
321 321 ``convert.git.similarity`` is greater than 0. The default is False.
322 322
323 323 :convert.git.remoteprefix: remote refs are converted as bookmarks with
324 324 ``convert.git.remoteprefix`` as a prefix followed by a /. The default
325 325 is 'remote'.
326 326
327 327 :convert.git.skipsubmodules: does not convert root level .gitmodules files
328 328 or files with 160000 mode indicating a submodule. Default is False.
329 329
330 330 Perforce Source
331 331 ###############
332 332
333 333 The Perforce (P4) importer can be given a p4 depot path or a
334 334 client specification as source. It will convert all files in the
335 335 source to a flat Mercurial repository, ignoring labels, branches
336 336 and integrations. Note that when a depot path is given you then
337 337 usually should specify a target directory, because otherwise the
338 338 target may be named ``...-hg``.
339 339
340 340 The following options can be set with ``--config``:
341 341
342 342 :convert.p4.encoding: specify the encoding to use when decoding standard
343 343 output of the Perforce command line tool. The default is default system
344 344 encoding.
345 345
346 346 :convert.p4.startrev: specify initial Perforce revision (a
347 347 Perforce changelist number).
348 348
349 349 Mercurial Destination
350 350 #####################
351 351
352 352 The Mercurial destination will recognize Mercurial subrepositories in the
353 353 destination directory, and update the .hgsubstate file automatically if the
354 354 destination subrepositories contain the <dest>/<sub>/.hg/shamap file.
355 355 Converting a repository with subrepositories requires converting a single
356 356 repository at a time, from the bottom up.
357 357
358 358 .. container:: verbose
359 359
360 360 An example showing how to convert a repository with subrepositories::
361 361
362 362 # so convert knows the type when it sees a non empty destination
363 363 $ hg init converted
364 364
365 365 $ hg convert orig/sub1 converted/sub1
366 366 $ hg convert orig/sub2 converted/sub2
367 367 $ hg convert orig converted
368 368
369 369 The following options are supported:
370 370
371 371 :convert.hg.clonebranches: dispatch source branches in separate
372 372 clones. The default is False.
373 373
374 374 :convert.hg.tagsbranch: branch name for tag revisions, defaults to
375 375 ``default``.
376 376
377 377 :convert.hg.usebranchnames: preserve branch names. The default is
378 378 True.
379 379
380 380 :convert.hg.sourcename: records the given string as a 'convert_source' extra
381 381 value on each commit made in the target repository. The default is None.
382 382
383 383 All Destinations
384 384 ################
385 385
386 386 All destination types accept the following options:
387 387
388 388 :convert.skiptags: does not convert tags from the source repo to the target
389 389 repo. The default is False.
390 390 """
391 391 return convcmd.convert(ui, src, dest, revmapfile, **opts)
392 392
393 393 @command('debugsvnlog', [], 'hg debugsvnlog', norepo=True)
394 394 def debugsvnlog(ui, **opts):
395 395 return subversion.debugsvnlog(ui, **opts)
396 396
397 397 @command('debugcvsps',
398 398 [
399 399 # Main options shared with cvsps-2.1
400 400 ('b', 'branches', [], _('only return changes on specified branches')),
401 401 ('p', 'prefix', '', _('prefix to remove from file names')),
402 402 ('r', 'revisions', [],
403 403 _('only return changes after or between specified tags')),
404 404 ('u', 'update-cache', None, _("update cvs log cache")),
405 405 ('x', 'new-cache', None, _("create new cvs log cache")),
406 406 ('z', 'fuzz', 60, _('set commit time fuzz in seconds')),
407 407 ('', 'root', '', _('specify cvsroot')),
408 408 # Options specific to builtin cvsps
409 409 ('', 'parents', '', _('show parent changesets')),
410 410 ('', 'ancestors', '', _('show current changeset in ancestor branches')),
411 411 # Options that are ignored for compatibility with cvsps-2.1
412 412 ('A', 'cvs-direct', None, _('ignored for compatibility')),
413 413 ],
414 414 _('hg debugcvsps [OPTION]... [PATH]...'),
415 415 norepo=True)
416 416 def debugcvsps(ui, *args, **opts):
417 417 '''create changeset information from CVS
418 418
419 419 This command is intended as a debugging tool for the CVS to
420 420 Mercurial converter, and can be used as a direct replacement for
421 421 cvsps.
422 422
423 423 Hg debugcvsps reads the CVS rlog for current directory (or any
424 424 named directory) in the CVS repository, and converts the log to a
425 425 series of changesets based on matching commit log entries and
426 426 dates.'''
427 427 return cvsps.debugcvsps(ui, *args, **opts)
428 428
429 429 def kwconverted(ctx, name):
430 430 rev = ctx.extra().get('convert_revision', '')
431 431 if rev.startswith('svn:'):
432 432 if name == 'svnrev':
433 433 return str(subversion.revsplit(rev)[2])
434 434 elif name == 'svnpath':
435 435 return subversion.revsplit(rev)[1]
436 436 elif name == 'svnuuid':
437 437 return subversion.revsplit(rev)[0]
438 438 return rev
439 439
440 440 templatekeyword = registrar.templatekeyword()
441 441
442 442 @templatekeyword('svnrev')
443 443 def kwsvnrev(repo, ctx, **args):
444 444 """String. Converted subversion revision number."""
445 445 return kwconverted(ctx, 'svnrev')
446 446
447 447 @templatekeyword('svnpath')
448 448 def kwsvnpath(repo, ctx, **args):
449 449 """String. Converted subversion revision project path."""
450 450 return kwconverted(ctx, 'svnpath')
451 451
452 452 @templatekeyword('svnuuid')
453 453 def kwsvnuuid(repo, ctx, **args):
454 454 """String. Converted subversion revision repository identifier."""
455 455 return kwconverted(ctx, 'svnuuid')
456 456
457 457 # tell hggettext to extract docstrings from these functions:
458 458 i18nfunctions = [kwsvnrev, kwsvnpath, kwsvnuuid]
@@ -1,363 +1,363 b''
1 1 """automatically manage newlines in repository files
2 2
3 3 This extension allows you to manage the type of line endings (CRLF or
4 4 LF) that are used in the repository and in the local working
5 5 directory. That way you can get CRLF line endings on Windows and LF on
6 6 Unix/Mac, thereby letting everybody use their OS native line endings.
7 7
8 8 The extension reads its configuration from a versioned ``.hgeol``
9 9 configuration file found in the root of the working directory. The
10 10 ``.hgeol`` file use the same syntax as all other Mercurial
11 11 configuration files. It uses two sections, ``[patterns]`` and
12 12 ``[repository]``.
13 13
14 14 The ``[patterns]`` section specifies how line endings should be
15 15 converted between the working directory and the repository. The format is
16 16 specified by a file pattern. The first match is used, so put more
17 17 specific patterns first. The available line endings are ``LF``,
18 18 ``CRLF``, and ``BIN``.
19 19
20 20 Files with the declared format of ``CRLF`` or ``LF`` are always
21 21 checked out and stored in the repository in that format and files
22 22 declared to be binary (``BIN``) are left unchanged. Additionally,
23 23 ``native`` is an alias for checking out in the platform's default line
24 24 ending: ``LF`` on Unix (including Mac OS X) and ``CRLF`` on
25 25 Windows. Note that ``BIN`` (do nothing to line endings) is Mercurial's
26 26 default behavior; it is only needed if you need to override a later,
27 27 more general pattern.
28 28
29 29 The optional ``[repository]`` section specifies the line endings to
30 30 use for files stored in the repository. It has a single setting,
31 31 ``native``, which determines the storage line endings for files
32 32 declared as ``native`` in the ``[patterns]`` section. It can be set to
33 33 ``LF`` or ``CRLF``. The default is ``LF``. For example, this means
34 34 that on Windows, files configured as ``native`` (``CRLF`` by default)
35 35 will be converted to ``LF`` when stored in the repository. Files
36 36 declared as ``LF``, ``CRLF``, or ``BIN`` in the ``[patterns]`` section
37 37 are always stored as-is in the repository.
38 38
39 39 Example versioned ``.hgeol`` file::
40 40
41 41 [patterns]
42 42 **.py = native
43 43 **.vcproj = CRLF
44 44 **.txt = native
45 45 Makefile = LF
46 46 **.jpg = BIN
47 47
48 48 [repository]
49 49 native = LF
50 50
51 51 .. note::
52 52
53 53 The rules will first apply when files are touched in the working
54 54 directory, e.g. by updating to null and back to tip to touch all files.
55 55
56 56 The extension uses an optional ``[eol]`` section read from both the
57 57 normal Mercurial configuration files and the ``.hgeol`` file, with the
58 58 latter overriding the former. You can use that section to control the
59 59 overall behavior. There are three settings:
60 60
61 61 - ``eol.native`` (default ``os.linesep``) can be set to ``LF`` or
62 62 ``CRLF`` to override the default interpretation of ``native`` for
63 63 checkout. This can be used with :hg:`archive` on Unix, say, to
64 64 generate an archive where files have line endings for Windows.
65 65
66 66 - ``eol.only-consistent`` (default True) can be set to False to make
67 67 the extension convert files with inconsistent EOLs. Inconsistent
68 68 means that there is both ``CRLF`` and ``LF`` present in the file.
69 69 Such files are normally not touched under the assumption that they
70 70 have mixed EOLs on purpose.
71 71
72 72 - ``eol.fix-trailing-newline`` (default False) can be set to True to
73 73 ensure that converted files end with a EOL character (either ``\\n``
74 74 or ``\\r\\n`` as per the configured patterns).
75 75
76 76 The extension provides ``cleverencode:`` and ``cleverdecode:`` filters
77 77 like the deprecated win32text extension does. This means that you can
78 78 disable win32text and enable eol and your filters will still work. You
79 79 only need to these filters until you have prepared a ``.hgeol`` file.
80 80
81 81 The ``win32text.forbid*`` hooks provided by the win32text extension
82 82 have been unified into a single hook named ``eol.checkheadshook``. The
83 83 hook will lookup the expected line endings from the ``.hgeol`` file,
84 84 which means you must migrate to a ``.hgeol`` file first before using
85 85 the hook. ``eol.checkheadshook`` only checks heads, intermediate
86 86 invalid revisions will be pushed. To forbid them completely, use the
87 87 ``eol.checkallhook`` hook. These hooks are best used as
88 88 ``pretxnchangegroup`` hooks.
89 89
90 90 See :hg:`help patterns` for more information about the glob patterns
91 91 used.
92 92 """
93 93
94 94 from __future__ import absolute_import
95 95
96 96 import os
97 97 import re
98 98 from mercurial.i18n import _
99 99 from mercurial import (
100 100 config,
101 101 error,
102 102 extensions,
103 103 match,
104 104 util,
105 105 )
106 106
107 # Note for extension authors: ONLY specify testedwith = 'internal' for
107 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
108 108 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
109 109 # be specifying the version(s) of Mercurial they are tested with, or
110 110 # leave the attribute unspecified.
111 testedwith = 'internal'
111 testedwith = 'ships-with-hg-core'
112 112
113 113 # Matches a lone LF, i.e., one that is not part of CRLF.
114 114 singlelf = re.compile('(^|[^\r])\n')
115 115 # Matches a single EOL which can either be a CRLF where repeated CR
116 116 # are removed or a LF. We do not care about old Macintosh files, so a
117 117 # stray CR is an error.
118 118 eolre = re.compile('\r*\n')
119 119
120 120
121 121 def inconsistenteol(data):
122 122 return '\r\n' in data and singlelf.search(data)
123 123
124 124 def tolf(s, params, ui, **kwargs):
125 125 """Filter to convert to LF EOLs."""
126 126 if util.binary(s):
127 127 return s
128 128 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
129 129 return s
130 130 if (ui.configbool('eol', 'fix-trailing-newline', False)
131 131 and s and s[-1] != '\n'):
132 132 s = s + '\n'
133 133 return eolre.sub('\n', s)
134 134
135 135 def tocrlf(s, params, ui, **kwargs):
136 136 """Filter to convert to CRLF EOLs."""
137 137 if util.binary(s):
138 138 return s
139 139 if ui.configbool('eol', 'only-consistent', True) and inconsistenteol(s):
140 140 return s
141 141 if (ui.configbool('eol', 'fix-trailing-newline', False)
142 142 and s and s[-1] != '\n'):
143 143 s = s + '\n'
144 144 return eolre.sub('\r\n', s)
145 145
146 146 def isbinary(s, params):
147 147 """Filter to do nothing with the file."""
148 148 return s
149 149
150 150 filters = {
151 151 'to-lf': tolf,
152 152 'to-crlf': tocrlf,
153 153 'is-binary': isbinary,
154 154 # The following provide backwards compatibility with win32text
155 155 'cleverencode:': tolf,
156 156 'cleverdecode:': tocrlf
157 157 }
158 158
159 159 class eolfile(object):
160 160 def __init__(self, ui, root, data):
161 161 self._decode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
162 162 self._encode = {'LF': 'to-lf', 'CRLF': 'to-crlf', 'BIN': 'is-binary'}
163 163
164 164 self.cfg = config.config()
165 165 # Our files should not be touched. The pattern must be
166 166 # inserted first override a '** = native' pattern.
167 167 self.cfg.set('patterns', '.hg*', 'BIN', 'eol')
168 168 # We can then parse the user's patterns.
169 169 self.cfg.parse('.hgeol', data)
170 170
171 171 isrepolf = self.cfg.get('repository', 'native') != 'CRLF'
172 172 self._encode['NATIVE'] = isrepolf and 'to-lf' or 'to-crlf'
173 173 iswdlf = ui.config('eol', 'native', os.linesep) in ('LF', '\n')
174 174 self._decode['NATIVE'] = iswdlf and 'to-lf' or 'to-crlf'
175 175
176 176 include = []
177 177 exclude = []
178 178 for pattern, style in self.cfg.items('patterns'):
179 179 key = style.upper()
180 180 if key == 'BIN':
181 181 exclude.append(pattern)
182 182 else:
183 183 include.append(pattern)
184 184 # This will match the files for which we need to care
185 185 # about inconsistent newlines.
186 186 self.match = match.match(root, '', [], include, exclude)
187 187
188 188 def copytoui(self, ui):
189 189 for pattern, style in self.cfg.items('patterns'):
190 190 key = style.upper()
191 191 try:
192 192 ui.setconfig('decode', pattern, self._decode[key], 'eol')
193 193 ui.setconfig('encode', pattern, self._encode[key], 'eol')
194 194 except KeyError:
195 195 ui.warn(_("ignoring unknown EOL style '%s' from %s\n")
196 196 % (style, self.cfg.source('patterns', pattern)))
197 197 # eol.only-consistent can be specified in ~/.hgrc or .hgeol
198 198 for k, v in self.cfg.items('eol'):
199 199 ui.setconfig('eol', k, v, 'eol')
200 200
201 201 def checkrev(self, repo, ctx, files):
202 202 failed = []
203 203 for f in (files or ctx.files()):
204 204 if f not in ctx:
205 205 continue
206 206 for pattern, style in self.cfg.items('patterns'):
207 207 if not match.match(repo.root, '', [pattern])(f):
208 208 continue
209 209 target = self._encode[style.upper()]
210 210 data = ctx[f].data()
211 211 if (target == "to-lf" and "\r\n" in data
212 212 or target == "to-crlf" and singlelf.search(data)):
213 213 failed.append((f, target, str(ctx)))
214 214 break
215 215 return failed
216 216
217 217 def parseeol(ui, repo, nodes):
218 218 try:
219 219 for node in nodes:
220 220 try:
221 221 if node is None:
222 222 # Cannot use workingctx.data() since it would load
223 223 # and cache the filters before we configure them.
224 224 data = repo.wfile('.hgeol').read()
225 225 else:
226 226 data = repo[node]['.hgeol'].data()
227 227 return eolfile(ui, repo.root, data)
228 228 except (IOError, LookupError):
229 229 pass
230 230 except error.ParseError as inst:
231 231 ui.warn(_("warning: ignoring .hgeol file due to parse error "
232 232 "at %s: %s\n") % (inst.args[1], inst.args[0]))
233 233 return None
234 234
235 235 def _checkhook(ui, repo, node, headsonly):
236 236 # Get revisions to check and touched files at the same time
237 237 files = set()
238 238 revs = set()
239 239 for rev in xrange(repo[node].rev(), len(repo)):
240 240 revs.add(rev)
241 241 if headsonly:
242 242 ctx = repo[rev]
243 243 files.update(ctx.files())
244 244 for pctx in ctx.parents():
245 245 revs.discard(pctx.rev())
246 246 failed = []
247 247 for rev in revs:
248 248 ctx = repo[rev]
249 249 eol = parseeol(ui, repo, [ctx.node()])
250 250 if eol:
251 251 failed.extend(eol.checkrev(repo, ctx, files))
252 252
253 253 if failed:
254 254 eols = {'to-lf': 'CRLF', 'to-crlf': 'LF'}
255 255 msgs = []
256 256 for f, target, node in sorted(failed):
257 257 msgs.append(_(" %s in %s should not have %s line endings") %
258 258 (f, node, eols[target]))
259 259 raise error.Abort(_("end-of-line check failed:\n") + "\n".join(msgs))
260 260
261 261 def checkallhook(ui, repo, node, hooktype, **kwargs):
262 262 """verify that files have expected EOLs"""
263 263 _checkhook(ui, repo, node, False)
264 264
265 265 def checkheadshook(ui, repo, node, hooktype, **kwargs):
266 266 """verify that files have expected EOLs"""
267 267 _checkhook(ui, repo, node, True)
268 268
269 269 # "checkheadshook" used to be called "hook"
270 270 hook = checkheadshook
271 271
272 272 def preupdate(ui, repo, hooktype, parent1, parent2):
273 273 repo.loadeol([parent1])
274 274 return False
275 275
276 276 def uisetup(ui):
277 277 ui.setconfig('hooks', 'preupdate.eol', preupdate, 'eol')
278 278
279 279 def extsetup(ui):
280 280 try:
281 281 extensions.find('win32text')
282 282 ui.warn(_("the eol extension is incompatible with the "
283 283 "win32text extension\n"))
284 284 except KeyError:
285 285 pass
286 286
287 287
288 288 def reposetup(ui, repo):
289 289 uisetup(repo.ui)
290 290
291 291 if not repo.local():
292 292 return
293 293 for name, fn in filters.iteritems():
294 294 repo.adddatafilter(name, fn)
295 295
296 296 ui.setconfig('patch', 'eol', 'auto', 'eol')
297 297
298 298 class eolrepo(repo.__class__):
299 299
300 300 def loadeol(self, nodes):
301 301 eol = parseeol(self.ui, self, nodes)
302 302 if eol is None:
303 303 return None
304 304 eol.copytoui(self.ui)
305 305 return eol.match
306 306
307 307 def _hgcleardirstate(self):
308 308 self._eolfile = self.loadeol([None, 'tip'])
309 309 if not self._eolfile:
310 310 self._eolfile = util.never
311 311 return
312 312
313 313 try:
314 314 cachemtime = os.path.getmtime(self.join("eol.cache"))
315 315 except OSError:
316 316 cachemtime = 0
317 317
318 318 try:
319 319 eolmtime = os.path.getmtime(self.wjoin(".hgeol"))
320 320 except OSError:
321 321 eolmtime = 0
322 322
323 323 if eolmtime > cachemtime:
324 324 self.ui.debug("eol: detected change in .hgeol\n")
325 325 wlock = None
326 326 try:
327 327 wlock = self.wlock()
328 328 for f in self.dirstate:
329 329 if self.dirstate[f] == 'n':
330 330 # all normal files need to be looked at
331 331 # again since the new .hgeol file might no
332 332 # longer match a file it matched before
333 333 self.dirstate.normallookup(f)
334 334 # Create or touch the cache to update mtime
335 335 self.vfs("eol.cache", "w").close()
336 336 wlock.release()
337 337 except error.LockUnavailable:
338 338 # If we cannot lock the repository and clear the
339 339 # dirstate, then a commit might not see all files
340 340 # as modified. But if we cannot lock the
341 341 # repository, then we can also not make a commit,
342 342 # so ignore the error.
343 343 pass
344 344
345 345 def commitctx(self, ctx, haserror=False):
346 346 for f in sorted(ctx.added() + ctx.modified()):
347 347 if not self._eolfile(f):
348 348 continue
349 349 fctx = ctx[f]
350 350 if fctx is None:
351 351 continue
352 352 data = fctx.data()
353 353 if util.binary(data):
354 354 # We should not abort here, since the user should
355 355 # be able to say "** = native" to automatically
356 356 # have all non-binary files taken care of.
357 357 continue
358 358 if inconsistenteol(data):
359 359 raise error.Abort(_("inconsistent newline style "
360 360 "in %s\n") % f)
361 361 return super(eolrepo, self).commitctx(ctx, haserror)
362 362 repo.__class__ = eolrepo
363 363 repo._hgcleardirstate()
@@ -1,392 +1,392 b''
1 1 # extdiff.py - external diff program support for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to allow external programs to compare revisions
9 9
10 10 The extdiff Mercurial extension allows you to use external programs
11 11 to compare revisions, or revision with working directory. The external
12 12 diff programs are called with a configurable set of options and two
13 13 non-option arguments: paths to directories containing snapshots of
14 14 files to compare.
15 15
16 16 The extdiff extension also allows you to configure new diff commands, so
17 17 you do not need to type :hg:`extdiff -p kdiff3` always. ::
18 18
19 19 [extdiff]
20 20 # add new command that runs GNU diff(1) in 'context diff' mode
21 21 cdiff = gdiff -Nprc5
22 22 ## or the old way:
23 23 #cmd.cdiff = gdiff
24 24 #opts.cdiff = -Nprc5
25 25
26 26 # add new command called meld, runs meld (no need to name twice). If
27 27 # the meld executable is not available, the meld tool in [merge-tools]
28 28 # will be used, if available
29 29 meld =
30 30
31 31 # add new command called vimdiff, runs gvimdiff with DirDiff plugin
32 32 # (see http://www.vim.org/scripts/script.php?script_id=102) Non
33 33 # English user, be sure to put "let g:DirDiffDynamicDiffText = 1" in
34 34 # your .vimrc
35 35 vimdiff = gvim -f "+next" \\
36 36 "+execute 'DirDiff' fnameescape(argv(0)) fnameescape(argv(1))"
37 37
38 38 Tool arguments can include variables that are expanded at runtime::
39 39
40 40 $parent1, $plabel1 - filename, descriptive label of first parent
41 41 $child, $clabel - filename, descriptive label of child revision
42 42 $parent2, $plabel2 - filename, descriptive label of second parent
43 43 $root - repository root
44 44 $parent is an alias for $parent1.
45 45
46 46 The extdiff extension will look in your [diff-tools] and [merge-tools]
47 47 sections for diff tool arguments, when none are specified in [extdiff].
48 48
49 49 ::
50 50
51 51 [extdiff]
52 52 kdiff3 =
53 53
54 54 [diff-tools]
55 55 kdiff3.diffargs=--L1 '$plabel1' --L2 '$clabel' $parent $child
56 56
57 57 You can use -I/-X and list of file or directory names like normal
58 58 :hg:`diff` command. The extdiff extension makes snapshots of only
59 59 needed files, so running the external diff program will actually be
60 60 pretty fast (at least faster than having to compare the entire tree).
61 61 '''
62 62
63 63 from __future__ import absolute_import
64 64
65 65 import os
66 66 import re
67 67 import shlex
68 68 import shutil
69 69 import tempfile
70 70 from mercurial.i18n import _
71 71 from mercurial.node import (
72 72 nullid,
73 73 short,
74 74 )
75 75 from mercurial import (
76 76 archival,
77 77 cmdutil,
78 78 commands,
79 79 error,
80 80 filemerge,
81 81 scmutil,
82 82 util,
83 83 )
84 84
85 85 cmdtable = {}
86 86 command = cmdutil.command(cmdtable)
87 # Note for extension authors: ONLY specify testedwith = 'internal' for
87 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
88 88 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
89 89 # be specifying the version(s) of Mercurial they are tested with, or
90 90 # leave the attribute unspecified.
91 testedwith = 'internal'
91 testedwith = 'ships-with-hg-core'
92 92
93 93 def snapshot(ui, repo, files, node, tmproot, listsubrepos):
94 94 '''snapshot files as of some revision
95 95 if not using snapshot, -I/-X does not work and recursive diff
96 96 in tools like kdiff3 and meld displays too many files.'''
97 97 dirname = os.path.basename(repo.root)
98 98 if dirname == "":
99 99 dirname = "root"
100 100 if node is not None:
101 101 dirname = '%s.%s' % (dirname, short(node))
102 102 base = os.path.join(tmproot, dirname)
103 103 os.mkdir(base)
104 104 fns_and_mtime = []
105 105
106 106 if node is not None:
107 107 ui.note(_('making snapshot of %d files from rev %s\n') %
108 108 (len(files), short(node)))
109 109 else:
110 110 ui.note(_('making snapshot of %d files from working directory\n') %
111 111 (len(files)))
112 112
113 113 if files:
114 114 repo.ui.setconfig("ui", "archivemeta", False)
115 115
116 116 archival.archive(repo, base, node, 'files',
117 117 matchfn=scmutil.matchfiles(repo, files),
118 118 subrepos=listsubrepos)
119 119
120 120 for fn in sorted(files):
121 121 wfn = util.pconvert(fn)
122 122 ui.note(' %s\n' % wfn)
123 123
124 124 if node is None:
125 125 dest = os.path.join(base, wfn)
126 126
127 127 fns_and_mtime.append((dest, repo.wjoin(fn),
128 128 os.lstat(dest).st_mtime))
129 129 return dirname, fns_and_mtime
130 130
131 131 def dodiff(ui, repo, cmdline, pats, opts):
132 132 '''Do the actual diff:
133 133
134 134 - copy to a temp structure if diffing 2 internal revisions
135 135 - copy to a temp structure if diffing working revision with
136 136 another one and more than 1 file is changed
137 137 - just invoke the diff for a single file in the working dir
138 138 '''
139 139
140 140 revs = opts.get('rev')
141 141 change = opts.get('change')
142 142 do3way = '$parent2' in cmdline
143 143
144 144 if revs and change:
145 145 msg = _('cannot specify --rev and --change at the same time')
146 146 raise error.Abort(msg)
147 147 elif change:
148 148 node2 = scmutil.revsingle(repo, change, None).node()
149 149 node1a, node1b = repo.changelog.parents(node2)
150 150 else:
151 151 node1a, node2 = scmutil.revpair(repo, revs)
152 152 if not revs:
153 153 node1b = repo.dirstate.p2()
154 154 else:
155 155 node1b = nullid
156 156
157 157 # Disable 3-way merge if there is only one parent
158 158 if do3way:
159 159 if node1b == nullid:
160 160 do3way = False
161 161
162 162 subrepos=opts.get('subrepos')
163 163
164 164 matcher = scmutil.match(repo[node2], pats, opts)
165 165
166 166 if opts.get('patch'):
167 167 if subrepos:
168 168 raise error.Abort(_('--patch cannot be used with --subrepos'))
169 169 if node2 is None:
170 170 raise error.Abort(_('--patch requires two revisions'))
171 171 else:
172 172 mod_a, add_a, rem_a = map(set, repo.status(node1a, node2, matcher,
173 173 listsubrepos=subrepos)[:3])
174 174 if do3way:
175 175 mod_b, add_b, rem_b = map(set,
176 176 repo.status(node1b, node2, matcher,
177 177 listsubrepos=subrepos)[:3])
178 178 else:
179 179 mod_b, add_b, rem_b = set(), set(), set()
180 180 modadd = mod_a | add_a | mod_b | add_b
181 181 common = modadd | rem_a | rem_b
182 182 if not common:
183 183 return 0
184 184
185 185 tmproot = tempfile.mkdtemp(prefix='extdiff.')
186 186 try:
187 187 if not opts.get('patch'):
188 188 # Always make a copy of node1a (and node1b, if applicable)
189 189 dir1a_files = mod_a | rem_a | ((mod_b | add_b) - add_a)
190 190 dir1a = snapshot(ui, repo, dir1a_files, node1a, tmproot,
191 191 subrepos)[0]
192 192 rev1a = '@%d' % repo[node1a].rev()
193 193 if do3way:
194 194 dir1b_files = mod_b | rem_b | ((mod_a | add_a) - add_b)
195 195 dir1b = snapshot(ui, repo, dir1b_files, node1b, tmproot,
196 196 subrepos)[0]
197 197 rev1b = '@%d' % repo[node1b].rev()
198 198 else:
199 199 dir1b = None
200 200 rev1b = ''
201 201
202 202 fns_and_mtime = []
203 203
204 204 # If node2 in not the wc or there is >1 change, copy it
205 205 dir2root = ''
206 206 rev2 = ''
207 207 if node2:
208 208 dir2 = snapshot(ui, repo, modadd, node2, tmproot, subrepos)[0]
209 209 rev2 = '@%d' % repo[node2].rev()
210 210 elif len(common) > 1:
211 211 #we only actually need to get the files to copy back to
212 212 #the working dir in this case (because the other cases
213 213 #are: diffing 2 revisions or single file -- in which case
214 214 #the file is already directly passed to the diff tool).
215 215 dir2, fns_and_mtime = snapshot(ui, repo, modadd, None, tmproot,
216 216 subrepos)
217 217 else:
218 218 # This lets the diff tool open the changed file directly
219 219 dir2 = ''
220 220 dir2root = repo.root
221 221
222 222 label1a = rev1a
223 223 label1b = rev1b
224 224 label2 = rev2
225 225
226 226 # If only one change, diff the files instead of the directories
227 227 # Handle bogus modifies correctly by checking if the files exist
228 228 if len(common) == 1:
229 229 common_file = util.localpath(common.pop())
230 230 dir1a = os.path.join(tmproot, dir1a, common_file)
231 231 label1a = common_file + rev1a
232 232 if not os.path.isfile(dir1a):
233 233 dir1a = os.devnull
234 234 if do3way:
235 235 dir1b = os.path.join(tmproot, dir1b, common_file)
236 236 label1b = common_file + rev1b
237 237 if not os.path.isfile(dir1b):
238 238 dir1b = os.devnull
239 239 dir2 = os.path.join(dir2root, dir2, common_file)
240 240 label2 = common_file + rev2
241 241 else:
242 242 template = 'hg-%h.patch'
243 243 cmdutil.export(repo, [repo[node1a].rev(), repo[node2].rev()],
244 244 template=repo.vfs.reljoin(tmproot, template),
245 245 match=matcher)
246 246 label1a = cmdutil.makefilename(repo, template, node1a)
247 247 label2 = cmdutil.makefilename(repo, template, node2)
248 248 dir1a = repo.vfs.reljoin(tmproot, label1a)
249 249 dir2 = repo.vfs.reljoin(tmproot, label2)
250 250 dir1b = None
251 251 label1b = None
252 252 fns_and_mtime = []
253 253
254 254 # Function to quote file/dir names in the argument string.
255 255 # When not operating in 3-way mode, an empty string is
256 256 # returned for parent2
257 257 replace = {'parent': dir1a, 'parent1': dir1a, 'parent2': dir1b,
258 258 'plabel1': label1a, 'plabel2': label1b,
259 259 'clabel': label2, 'child': dir2,
260 260 'root': repo.root}
261 261 def quote(match):
262 262 pre = match.group(2)
263 263 key = match.group(3)
264 264 if not do3way and key == 'parent2':
265 265 return pre
266 266 return pre + util.shellquote(replace[key])
267 267
268 268 # Match parent2 first, so 'parent1?' will match both parent1 and parent
269 269 regex = (r'''(['"]?)([^\s'"$]*)'''
270 270 r'\$(parent2|parent1?|child|plabel1|plabel2|clabel|root)\1')
271 271 if not do3way and not re.search(regex, cmdline):
272 272 cmdline += ' $parent1 $child'
273 273 cmdline = re.sub(regex, quote, cmdline)
274 274
275 275 ui.debug('running %r in %s\n' % (cmdline, tmproot))
276 276 ui.system(cmdline, cwd=tmproot)
277 277
278 278 for copy_fn, working_fn, mtime in fns_and_mtime:
279 279 if os.lstat(copy_fn).st_mtime != mtime:
280 280 ui.debug('file changed while diffing. '
281 281 'Overwriting: %s (src: %s)\n' % (working_fn, copy_fn))
282 282 util.copyfile(copy_fn, working_fn)
283 283
284 284 return 1
285 285 finally:
286 286 ui.note(_('cleaning up temp directory\n'))
287 287 shutil.rmtree(tmproot)
288 288
289 289 extdiffopts = [
290 290 ('o', 'option', [],
291 291 _('pass option to comparison program'), _('OPT')),
292 292 ('r', 'rev', [], _('revision'), _('REV')),
293 293 ('c', 'change', '', _('change made by revision'), _('REV')),
294 294 ('', 'patch', None, _('compare patches for two revisions'))
295 295 ] + commands.walkopts + commands.subrepoopts
296 296
297 297 @command('extdiff',
298 298 [('p', 'program', '', _('comparison program to run'), _('CMD')),
299 299 ] + extdiffopts,
300 300 _('hg extdiff [OPT]... [FILE]...'),
301 301 inferrepo=True)
302 302 def extdiff(ui, repo, *pats, **opts):
303 303 '''use external program to diff repository (or selected files)
304 304
305 305 Show differences between revisions for the specified files, using
306 306 an external program. The default program used is diff, with
307 307 default options "-Npru".
308 308
309 309 To select a different program, use the -p/--program option. The
310 310 program will be passed the names of two directories to compare. To
311 311 pass additional options to the program, use -o/--option. These
312 312 will be passed before the names of the directories to compare.
313 313
314 314 When two revision arguments are given, then changes are shown
315 315 between those revisions. If only one revision is specified then
316 316 that revision is compared to the working directory, and, when no
317 317 revisions are specified, the working directory files are compared
318 318 to its parent.'''
319 319 program = opts.get('program')
320 320 option = opts.get('option')
321 321 if not program:
322 322 program = 'diff'
323 323 option = option or ['-Npru']
324 324 cmdline = ' '.join(map(util.shellquote, [program] + option))
325 325 return dodiff(ui, repo, cmdline, pats, opts)
326 326
327 327 class savedcmd(object):
328 328 """use external program to diff repository (or selected files)
329 329
330 330 Show differences between revisions for the specified files, using
331 331 the following program::
332 332
333 333 %(path)s
334 334
335 335 When two revision arguments are given, then changes are shown
336 336 between those revisions. If only one revision is specified then
337 337 that revision is compared to the working directory, and, when no
338 338 revisions are specified, the working directory files are compared
339 339 to its parent.
340 340 """
341 341
342 342 def __init__(self, path, cmdline):
343 343 # We can't pass non-ASCII through docstrings (and path is
344 344 # in an unknown encoding anyway)
345 345 docpath = path.encode("string-escape")
346 346 self.__doc__ = self.__doc__ % {'path': util.uirepr(docpath)}
347 347 self._cmdline = cmdline
348 348
349 349 def __call__(self, ui, repo, *pats, **opts):
350 350 options = ' '.join(map(util.shellquote, opts['option']))
351 351 if options:
352 352 options = ' ' + options
353 353 return dodiff(ui, repo, self._cmdline + options, pats, opts)
354 354
355 355 def uisetup(ui):
356 356 for cmd, path in ui.configitems('extdiff'):
357 357 path = util.expandpath(path)
358 358 if cmd.startswith('cmd.'):
359 359 cmd = cmd[4:]
360 360 if not path:
361 361 path = util.findexe(cmd)
362 362 if path is None:
363 363 path = filemerge.findexternaltool(ui, cmd) or cmd
364 364 diffopts = ui.config('extdiff', 'opts.' + cmd, '')
365 365 cmdline = util.shellquote(path)
366 366 if diffopts:
367 367 cmdline += ' ' + diffopts
368 368 elif cmd.startswith('opts.'):
369 369 continue
370 370 else:
371 371 if path:
372 372 # case "cmd = path opts"
373 373 cmdline = path
374 374 diffopts = len(shlex.split(cmdline)) > 1
375 375 else:
376 376 # case "cmd ="
377 377 path = util.findexe(cmd)
378 378 if path is None:
379 379 path = filemerge.findexternaltool(ui, cmd) or cmd
380 380 cmdline = util.shellquote(path)
381 381 diffopts = False
382 382 # look for diff arguments in [diff-tools] then [merge-tools]
383 383 if not diffopts:
384 384 args = ui.config('diff-tools', cmd+'.diffargs') or \
385 385 ui.config('merge-tools', cmd+'.diffargs')
386 386 if args:
387 387 cmdline += ' ' + args
388 388 command(cmd, extdiffopts[:], _('hg %s [OPTION]... [FILE]...') % cmd,
389 389 inferrepo=True)(savedcmd(path, cmdline))
390 390
391 391 # tell hggettext to extract docstrings from these functions:
392 392 i18nfunctions = [savedcmd]
@@ -1,165 +1,165 b''
1 1 # fetch.py - pull and merge remote changes
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''pull, update and merge in one command (DEPRECATED)'''
9 9
10 10 from __future__ import absolute_import
11 11
12 12 from mercurial.i18n import _
13 13 from mercurial.node import (
14 14 short,
15 15 )
16 16 from mercurial import (
17 17 cmdutil,
18 18 commands,
19 19 error,
20 20 exchange,
21 21 hg,
22 22 lock,
23 23 util,
24 24 )
25 25
26 26 release = lock.release
27 27 cmdtable = {}
28 28 command = cmdutil.command(cmdtable)
29 # Note for extension authors: ONLY specify testedwith = 'internal' for
29 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
30 30 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
31 31 # be specifying the version(s) of Mercurial they are tested with, or
32 32 # leave the attribute unspecified.
33 testedwith = 'internal'
33 testedwith = 'ships-with-hg-core'
34 34
35 35 @command('fetch',
36 36 [('r', 'rev', [],
37 37 _('a specific revision you would like to pull'), _('REV')),
38 38 ('e', 'edit', None, _('invoke editor on commit messages')),
39 39 ('', 'force-editor', None, _('edit commit message (DEPRECATED)')),
40 40 ('', 'switch-parent', None, _('switch parents when merging')),
41 41 ] + commands.commitopts + commands.commitopts2 + commands.remoteopts,
42 42 _('hg fetch [SOURCE]'))
43 43 def fetch(ui, repo, source='default', **opts):
44 44 '''pull changes from a remote repository, merge new changes if needed.
45 45
46 46 This finds all changes from the repository at the specified path
47 47 or URL and adds them to the local repository.
48 48
49 49 If the pulled changes add a new branch head, the head is
50 50 automatically merged, and the result of the merge is committed.
51 51 Otherwise, the working directory is updated to include the new
52 52 changes.
53 53
54 54 When a merge is needed, the working directory is first updated to
55 55 the newly pulled changes. Local changes are then merged into the
56 56 pulled changes. To switch the merge order, use --switch-parent.
57 57
58 58 See :hg:`help dates` for a list of formats valid for -d/--date.
59 59
60 60 Returns 0 on success.
61 61 '''
62 62
63 63 date = opts.get('date')
64 64 if date:
65 65 opts['date'] = util.parsedate(date)
66 66
67 67 parent, _p2 = repo.dirstate.parents()
68 68 branch = repo.dirstate.branch()
69 69 try:
70 70 branchnode = repo.branchtip(branch)
71 71 except error.RepoLookupError:
72 72 branchnode = None
73 73 if parent != branchnode:
74 74 raise error.Abort(_('working directory not at branch tip'),
75 75 hint=_("use 'hg update' to check out branch tip"))
76 76
77 77 wlock = lock = None
78 78 try:
79 79 wlock = repo.wlock()
80 80 lock = repo.lock()
81 81
82 82 cmdutil.bailifchanged(repo)
83 83
84 84 bheads = repo.branchheads(branch)
85 85 bheads = [head for head in bheads if len(repo[head].children()) == 0]
86 86 if len(bheads) > 1:
87 87 raise error.Abort(_('multiple heads in this branch '
88 88 '(use "hg heads ." and "hg merge" to merge)'))
89 89
90 90 other = hg.peer(repo, opts, ui.expandpath(source))
91 91 ui.status(_('pulling from %s\n') %
92 92 util.hidepassword(ui.expandpath(source)))
93 93 revs = None
94 94 if opts['rev']:
95 95 try:
96 96 revs = [other.lookup(rev) for rev in opts['rev']]
97 97 except error.CapabilityError:
98 98 err = _("other repository doesn't support revision lookup, "
99 99 "so a rev cannot be specified.")
100 100 raise error.Abort(err)
101 101
102 102 # Are there any changes at all?
103 103 modheads = exchange.pull(repo, other, heads=revs).cgresult
104 104 if modheads == 0:
105 105 return 0
106 106
107 107 # Is this a simple fast-forward along the current branch?
108 108 newheads = repo.branchheads(branch)
109 109 newchildren = repo.changelog.nodesbetween([parent], newheads)[2]
110 110 if len(newheads) == 1 and len(newchildren):
111 111 if newchildren[0] != parent:
112 112 return hg.update(repo, newchildren[0])
113 113 else:
114 114 return 0
115 115
116 116 # Are there more than one additional branch heads?
117 117 newchildren = [n for n in newchildren if n != parent]
118 118 newparent = parent
119 119 if newchildren:
120 120 newparent = newchildren[0]
121 121 hg.clean(repo, newparent)
122 122 newheads = [n for n in newheads if n != newparent]
123 123 if len(newheads) > 1:
124 124 ui.status(_('not merging with %d other new branch heads '
125 125 '(use "hg heads ." and "hg merge" to merge them)\n') %
126 126 (len(newheads) - 1))
127 127 return 1
128 128
129 129 if not newheads:
130 130 return 0
131 131
132 132 # Otherwise, let's merge.
133 133 err = False
134 134 if newheads:
135 135 # By default, we consider the repository we're pulling
136 136 # *from* as authoritative, so we merge our changes into
137 137 # theirs.
138 138 if opts['switch_parent']:
139 139 firstparent, secondparent = newparent, newheads[0]
140 140 else:
141 141 firstparent, secondparent = newheads[0], newparent
142 142 ui.status(_('updating to %d:%s\n') %
143 143 (repo.changelog.rev(firstparent),
144 144 short(firstparent)))
145 145 hg.clean(repo, firstparent)
146 146 ui.status(_('merging with %d:%s\n') %
147 147 (repo.changelog.rev(secondparent), short(secondparent)))
148 148 err = hg.merge(repo, secondparent, remind=False)
149 149
150 150 if not err:
151 151 # we don't translate commit messages
152 152 message = (cmdutil.logmessage(ui, opts) or
153 153 ('Automated merge with %s' %
154 154 util.removeauth(other.url())))
155 155 editopt = opts.get('edit') or opts.get('force_editor')
156 156 editor = cmdutil.getcommiteditor(edit=editopt, editform='fetch')
157 157 n = repo.commit(message, opts['user'], opts['date'], editor=editor)
158 158 ui.status(_('new changeset %d:%s merges remote changes '
159 159 'with local\n') % (repo.changelog.rev(n),
160 160 short(n)))
161 161
162 162 return err
163 163
164 164 finally:
165 165 release(lock, wlock)
@@ -1,695 +1,695 b''
1 1 # __init__.py - fsmonitor initialization and overrides
2 2 #
3 3 # Copyright 2013-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''Faster status operations with the Watchman file monitor (EXPERIMENTAL)
9 9
10 10 Integrates the file-watching program Watchman with Mercurial to produce faster
11 11 status results.
12 12
13 13 On a particular Linux system, for a real-world repository with over 400,000
14 14 files hosted on ext4, vanilla `hg status` takes 1.3 seconds. On the same
15 15 system, with fsmonitor it takes about 0.3 seconds.
16 16
17 17 fsmonitor requires no configuration -- it will tell Watchman about your
18 18 repository as necessary. You'll need to install Watchman from
19 19 https://facebook.github.io/watchman/ and make sure it is in your PATH.
20 20
21 21 The following configuration options exist:
22 22
23 23 ::
24 24
25 25 [fsmonitor]
26 26 mode = {off, on, paranoid}
27 27
28 28 When `mode = off`, fsmonitor will disable itself (similar to not loading the
29 29 extension at all). When `mode = on`, fsmonitor will be enabled (the default).
30 30 When `mode = paranoid`, fsmonitor will query both Watchman and the filesystem,
31 31 and ensure that the results are consistent.
32 32
33 33 ::
34 34
35 35 [fsmonitor]
36 36 timeout = (float)
37 37
38 38 A value, in seconds, that determines how long fsmonitor will wait for Watchman
39 39 to return results. Defaults to `2.0`.
40 40
41 41 ::
42 42
43 43 [fsmonitor]
44 44 blacklistusers = (list of userids)
45 45
46 46 A list of usernames for which fsmonitor will disable itself altogether.
47 47
48 48 ::
49 49
50 50 [fsmonitor]
51 51 walk_on_invalidate = (boolean)
52 52
53 53 Whether or not to walk the whole repo ourselves when our cached state has been
54 54 invalidated, for example when Watchman has been restarted or .hgignore rules
55 55 have been changed. Walking the repo in that case can result in competing for
56 56 I/O with Watchman. For large repos it is recommended to set this value to
57 57 false. You may wish to set this to true if you have a very fast filesystem
58 58 that can outpace the IPC overhead of getting the result data for the full repo
59 59 from Watchman. Defaults to false.
60 60
61 61 fsmonitor is incompatible with the largefiles and eol extensions, and
62 62 will disable itself if any of those are active.
63 63
64 64 '''
65 65
66 66 # Platforms Supported
67 67 # ===================
68 68 #
69 69 # **Linux:** *Stable*. Watchman and fsmonitor are both known to work reliably,
70 70 # even under severe loads.
71 71 #
72 72 # **Mac OS X:** *Stable*. The Mercurial test suite passes with fsmonitor
73 73 # turned on, on case-insensitive HFS+. There has been a reasonable amount of
74 74 # user testing under normal loads.
75 75 #
76 76 # **Solaris, BSD:** *Alpha*. watchman and fsmonitor are believed to work, but
77 77 # very little testing has been done.
78 78 #
79 79 # **Windows:** *Alpha*. Not in a release version of watchman or fsmonitor yet.
80 80 #
81 81 # Known Issues
82 82 # ============
83 83 #
84 84 # * fsmonitor will disable itself if any of the following extensions are
85 85 # enabled: largefiles, inotify, eol; or if the repository has subrepos.
86 86 # * fsmonitor will produce incorrect results if nested repos that are not
87 87 # subrepos exist. *Workaround*: add nested repo paths to your `.hgignore`.
88 88 #
89 89 # The issues related to nested repos and subrepos are probably not fundamental
90 90 # ones. Patches to fix them are welcome.
91 91
92 92 from __future__ import absolute_import
93 93
94 94 import hashlib
95 95 import os
96 96 import stat
97 97 import sys
98 98
99 99 from mercurial.i18n import _
100 100 from mercurial import (
101 101 context,
102 102 extensions,
103 103 localrepo,
104 104 merge,
105 105 pathutil,
106 106 scmutil,
107 107 util,
108 108 )
109 109 from mercurial import match as matchmod
110 110
111 111 from . import (
112 112 state,
113 113 watchmanclient,
114 114 )
115 115
116 # Note for extension authors: ONLY specify testedwith = 'internal' for
116 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
117 117 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
118 118 # be specifying the version(s) of Mercurial they are tested with, or
119 119 # leave the attribute unspecified.
120 testedwith = 'internal'
120 testedwith = 'ships-with-hg-core'
121 121
122 122 # This extension is incompatible with the following blacklisted extensions
123 123 # and will disable itself when encountering one of these:
124 124 _blacklist = ['largefiles', 'eol']
125 125
126 126 def _handleunavailable(ui, state, ex):
127 127 """Exception handler for Watchman interaction exceptions"""
128 128 if isinstance(ex, watchmanclient.Unavailable):
129 129 if ex.warn:
130 130 ui.warn(str(ex) + '\n')
131 131 if ex.invalidate:
132 132 state.invalidate()
133 133 ui.log('fsmonitor', 'Watchman unavailable: %s\n', ex.msg)
134 134 else:
135 135 ui.log('fsmonitor', 'Watchman exception: %s\n', ex)
136 136
137 137 def _hashignore(ignore):
138 138 """Calculate hash for ignore patterns and filenames
139 139
140 140 If this information changes between Mercurial invocations, we can't
141 141 rely on Watchman information anymore and have to re-scan the working
142 142 copy.
143 143
144 144 """
145 145 sha1 = hashlib.sha1()
146 146 if util.safehasattr(ignore, 'includepat'):
147 147 sha1.update(ignore.includepat)
148 148 sha1.update('\0\0')
149 149 if util.safehasattr(ignore, 'excludepat'):
150 150 sha1.update(ignore.excludepat)
151 151 sha1.update('\0\0')
152 152 if util.safehasattr(ignore, 'patternspat'):
153 153 sha1.update(ignore.patternspat)
154 154 sha1.update('\0\0')
155 155 if util.safehasattr(ignore, '_files'):
156 156 for f in ignore._files:
157 157 sha1.update(f)
158 158 sha1.update('\0')
159 159 return sha1.hexdigest()
160 160
161 161 def overridewalk(orig, self, match, subrepos, unknown, ignored, full=True):
162 162 '''Replacement for dirstate.walk, hooking into Watchman.
163 163
164 164 Whenever full is False, ignored is False, and the Watchman client is
165 165 available, use Watchman combined with saved state to possibly return only a
166 166 subset of files.'''
167 167 def bail():
168 168 return orig(match, subrepos, unknown, ignored, full=True)
169 169
170 170 if full or ignored or not self._watchmanclient.available():
171 171 return bail()
172 172 state = self._fsmonitorstate
173 173 clock, ignorehash, notefiles = state.get()
174 174 if not clock:
175 175 if state.walk_on_invalidate:
176 176 return bail()
177 177 # Initial NULL clock value, see
178 178 # https://facebook.github.io/watchman/docs/clockspec.html
179 179 clock = 'c:0:0'
180 180 notefiles = []
181 181
182 182 def fwarn(f, msg):
183 183 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
184 184 return False
185 185
186 186 def badtype(mode):
187 187 kind = _('unknown')
188 188 if stat.S_ISCHR(mode):
189 189 kind = _('character device')
190 190 elif stat.S_ISBLK(mode):
191 191 kind = _('block device')
192 192 elif stat.S_ISFIFO(mode):
193 193 kind = _('fifo')
194 194 elif stat.S_ISSOCK(mode):
195 195 kind = _('socket')
196 196 elif stat.S_ISDIR(mode):
197 197 kind = _('directory')
198 198 return _('unsupported file type (type is %s)') % kind
199 199
200 200 ignore = self._ignore
201 201 dirignore = self._dirignore
202 202 if unknown:
203 203 if _hashignore(ignore) != ignorehash and clock != 'c:0:0':
204 204 # ignore list changed -- can't rely on Watchman state any more
205 205 if state.walk_on_invalidate:
206 206 return bail()
207 207 notefiles = []
208 208 clock = 'c:0:0'
209 209 else:
210 210 # always ignore
211 211 ignore = util.always
212 212 dirignore = util.always
213 213
214 214 matchfn = match.matchfn
215 215 matchalways = match.always()
216 216 dmap = self._map
217 217 nonnormalset = getattr(self, '_nonnormalset', None)
218 218
219 219 copymap = self._copymap
220 220 getkind = stat.S_IFMT
221 221 dirkind = stat.S_IFDIR
222 222 regkind = stat.S_IFREG
223 223 lnkkind = stat.S_IFLNK
224 224 join = self._join
225 225 normcase = util.normcase
226 226 fresh_instance = False
227 227
228 228 exact = skipstep3 = False
229 229 if matchfn == match.exact: # match.exact
230 230 exact = True
231 231 dirignore = util.always # skip step 2
232 232 elif match.files() and not match.anypats(): # match.match, no patterns
233 233 skipstep3 = True
234 234
235 235 if not exact and self._checkcase:
236 236 # note that even though we could receive directory entries, we're only
237 237 # interested in checking if a file with the same name exists. So only
238 238 # normalize files if possible.
239 239 normalize = self._normalizefile
240 240 skipstep3 = False
241 241 else:
242 242 normalize = None
243 243
244 244 # step 1: find all explicit files
245 245 results, work, dirsnotfound = self._walkexplicit(match, subrepos)
246 246
247 247 skipstep3 = skipstep3 and not (work or dirsnotfound)
248 248 work = [d for d in work if not dirignore(d[0])]
249 249
250 250 if not work and (exact or skipstep3):
251 251 for s in subrepos:
252 252 del results[s]
253 253 del results['.hg']
254 254 return results
255 255
256 256 # step 2: query Watchman
257 257 try:
258 258 # Use the user-configured timeout for the query.
259 259 # Add a little slack over the top of the user query to allow for
260 260 # overheads while transferring the data
261 261 self._watchmanclient.settimeout(state.timeout + 0.1)
262 262 result = self._watchmanclient.command('query', {
263 263 'fields': ['mode', 'mtime', 'size', 'exists', 'name'],
264 264 'since': clock,
265 265 'expression': [
266 266 'not', [
267 267 'anyof', ['dirname', '.hg'],
268 268 ['name', '.hg', 'wholename']
269 269 ]
270 270 ],
271 271 'sync_timeout': int(state.timeout * 1000),
272 272 'empty_on_fresh_instance': state.walk_on_invalidate,
273 273 })
274 274 except Exception as ex:
275 275 _handleunavailable(self._ui, state, ex)
276 276 self._watchmanclient.clearconnection()
277 277 return bail()
278 278 else:
279 279 # We need to propagate the last observed clock up so that we
280 280 # can use it for our next query
281 281 state.setlastclock(result['clock'])
282 282 if result['is_fresh_instance']:
283 283 if state.walk_on_invalidate:
284 284 state.invalidate()
285 285 return bail()
286 286 fresh_instance = True
287 287 # Ignore any prior noteable files from the state info
288 288 notefiles = []
289 289
290 290 # for file paths which require normalization and we encounter a case
291 291 # collision, we store our own foldmap
292 292 if normalize:
293 293 foldmap = dict((normcase(k), k) for k in results)
294 294
295 295 switch_slashes = os.sep == '\\'
296 296 # The order of the results is, strictly speaking, undefined.
297 297 # For case changes on a case insensitive filesystem we may receive
298 298 # two entries, one with exists=True and another with exists=False.
299 299 # The exists=True entries in the same response should be interpreted
300 300 # as being happens-after the exists=False entries due to the way that
301 301 # Watchman tracks files. We use this property to reconcile deletes
302 302 # for name case changes.
303 303 for entry in result['files']:
304 304 fname = entry['name']
305 305 if switch_slashes:
306 306 fname = fname.replace('\\', '/')
307 307 if normalize:
308 308 normed = normcase(fname)
309 309 fname = normalize(fname, True, True)
310 310 foldmap[normed] = fname
311 311 fmode = entry['mode']
312 312 fexists = entry['exists']
313 313 kind = getkind(fmode)
314 314
315 315 if not fexists:
316 316 # if marked as deleted and we don't already have a change
317 317 # record, mark it as deleted. If we already have an entry
318 318 # for fname then it was either part of walkexplicit or was
319 319 # an earlier result that was a case change
320 320 if fname not in results and fname in dmap and (
321 321 matchalways or matchfn(fname)):
322 322 results[fname] = None
323 323 elif kind == dirkind:
324 324 if fname in dmap and (matchalways or matchfn(fname)):
325 325 results[fname] = None
326 326 elif kind == regkind or kind == lnkkind:
327 327 if fname in dmap:
328 328 if matchalways or matchfn(fname):
329 329 results[fname] = entry
330 330 elif (matchalways or matchfn(fname)) and not ignore(fname):
331 331 results[fname] = entry
332 332 elif fname in dmap and (matchalways or matchfn(fname)):
333 333 results[fname] = None
334 334
335 335 # step 3: query notable files we don't already know about
336 336 # XXX try not to iterate over the entire dmap
337 337 if normalize:
338 338 # any notable files that have changed case will already be handled
339 339 # above, so just check membership in the foldmap
340 340 notefiles = set((normalize(f, True, True) for f in notefiles
341 341 if normcase(f) not in foldmap))
342 342 visit = set((f for f in notefiles if (f not in results and matchfn(f)
343 343 and (f in dmap or not ignore(f)))))
344 344
345 345 if nonnormalset is not None and not fresh_instance:
346 346 if matchalways:
347 347 visit.update(f for f in nonnormalset if f not in results)
348 348 visit.update(f for f in copymap if f not in results)
349 349 else:
350 350 visit.update(f for f in nonnormalset
351 351 if f not in results and matchfn(f))
352 352 visit.update(f for f in copymap
353 353 if f not in results and matchfn(f))
354 354 else:
355 355 if matchalways:
356 356 visit.update(f for f, st in dmap.iteritems()
357 357 if (f not in results and
358 358 (st[2] < 0 or st[0] != 'n' or fresh_instance)))
359 359 visit.update(f for f in copymap if f not in results)
360 360 else:
361 361 visit.update(f for f, st in dmap.iteritems()
362 362 if (f not in results and
363 363 (st[2] < 0 or st[0] != 'n' or fresh_instance)
364 364 and matchfn(f)))
365 365 visit.update(f for f in copymap
366 366 if f not in results and matchfn(f))
367 367
368 368 audit = pathutil.pathauditor(self._root).check
369 369 auditpass = [f for f in visit if audit(f)]
370 370 auditpass.sort()
371 371 auditfail = visit.difference(auditpass)
372 372 for f in auditfail:
373 373 results[f] = None
374 374
375 375 nf = iter(auditpass).next
376 376 for st in util.statfiles([join(f) for f in auditpass]):
377 377 f = nf()
378 378 if st or f in dmap:
379 379 results[f] = st
380 380
381 381 for s in subrepos:
382 382 del results[s]
383 383 del results['.hg']
384 384 return results
385 385
386 386 def overridestatus(
387 387 orig, self, node1='.', node2=None, match=None, ignored=False,
388 388 clean=False, unknown=False, listsubrepos=False):
389 389 listignored = ignored
390 390 listclean = clean
391 391 listunknown = unknown
392 392
393 393 def _cmpsets(l1, l2):
394 394 try:
395 395 if 'FSMONITOR_LOG_FILE' in os.environ:
396 396 fn = os.environ['FSMONITOR_LOG_FILE']
397 397 f = open(fn, 'wb')
398 398 else:
399 399 fn = 'fsmonitorfail.log'
400 400 f = self.opener(fn, 'wb')
401 401 except (IOError, OSError):
402 402 self.ui.warn(_('warning: unable to write to %s\n') % fn)
403 403 return
404 404
405 405 try:
406 406 for i, (s1, s2) in enumerate(zip(l1, l2)):
407 407 if set(s1) != set(s2):
408 408 f.write('sets at position %d are unequal\n' % i)
409 409 f.write('watchman returned: %s\n' % s1)
410 410 f.write('stat returned: %s\n' % s2)
411 411 finally:
412 412 f.close()
413 413
414 414 if isinstance(node1, context.changectx):
415 415 ctx1 = node1
416 416 else:
417 417 ctx1 = self[node1]
418 418 if isinstance(node2, context.changectx):
419 419 ctx2 = node2
420 420 else:
421 421 ctx2 = self[node2]
422 422
423 423 working = ctx2.rev() is None
424 424 parentworking = working and ctx1 == self['.']
425 425 match = match or matchmod.always(self.root, self.getcwd())
426 426
427 427 # Maybe we can use this opportunity to update Watchman's state.
428 428 # Mercurial uses workingcommitctx and/or memctx to represent the part of
429 429 # the workingctx that is to be committed. So don't update the state in
430 430 # that case.
431 431 # HG_PENDING is set in the environment when the dirstate is being updated
432 432 # in the middle of a transaction; we must not update our state in that
433 433 # case, or we risk forgetting about changes in the working copy.
434 434 updatestate = (parentworking and match.always() and
435 435 not isinstance(ctx2, (context.workingcommitctx,
436 436 context.memctx)) and
437 437 'HG_PENDING' not in os.environ)
438 438
439 439 try:
440 440 if self._fsmonitorstate.walk_on_invalidate:
441 441 # Use a short timeout to query the current clock. If that
442 442 # takes too long then we assume that the service will be slow
443 443 # to answer our query.
444 444 # walk_on_invalidate indicates that we prefer to walk the
445 445 # tree ourselves because we can ignore portions that Watchman
446 446 # cannot and we tend to be faster in the warmer buffer cache
447 447 # cases.
448 448 self._watchmanclient.settimeout(0.1)
449 449 else:
450 450 # Give Watchman more time to potentially complete its walk
451 451 # and return the initial clock. In this mode we assume that
452 452 # the filesystem will be slower than parsing a potentially
453 453 # very large Watchman result set.
454 454 self._watchmanclient.settimeout(
455 455 self._fsmonitorstate.timeout + 0.1)
456 456 startclock = self._watchmanclient.getcurrentclock()
457 457 except Exception as ex:
458 458 self._watchmanclient.clearconnection()
459 459 _handleunavailable(self.ui, self._fsmonitorstate, ex)
460 460 # boo, Watchman failed. bail
461 461 return orig(node1, node2, match, listignored, listclean,
462 462 listunknown, listsubrepos)
463 463
464 464 if updatestate:
465 465 # We need info about unknown files. This may make things slower the
466 466 # first time, but whatever.
467 467 stateunknown = True
468 468 else:
469 469 stateunknown = listunknown
470 470
471 471 r = orig(node1, node2, match, listignored, listclean, stateunknown,
472 472 listsubrepos)
473 473 modified, added, removed, deleted, unknown, ignored, clean = r
474 474
475 475 if updatestate:
476 476 notefiles = modified + added + removed + deleted + unknown
477 477 self._fsmonitorstate.set(
478 478 self._fsmonitorstate.getlastclock() or startclock,
479 479 _hashignore(self.dirstate._ignore),
480 480 notefiles)
481 481
482 482 if not listunknown:
483 483 unknown = []
484 484
485 485 # don't do paranoid checks if we're not going to query Watchman anyway
486 486 full = listclean or match.traversedir is not None
487 487 if self._fsmonitorstate.mode == 'paranoid' and not full:
488 488 # run status again and fall back to the old walk this time
489 489 self.dirstate._fsmonitordisable = True
490 490
491 491 # shut the UI up
492 492 quiet = self.ui.quiet
493 493 self.ui.quiet = True
494 494 fout, ferr = self.ui.fout, self.ui.ferr
495 495 self.ui.fout = self.ui.ferr = open(os.devnull, 'wb')
496 496
497 497 try:
498 498 rv2 = orig(
499 499 node1, node2, match, listignored, listclean, listunknown,
500 500 listsubrepos)
501 501 finally:
502 502 self.dirstate._fsmonitordisable = False
503 503 self.ui.quiet = quiet
504 504 self.ui.fout, self.ui.ferr = fout, ferr
505 505
506 506 # clean isn't tested since it's set to True above
507 507 _cmpsets([modified, added, removed, deleted, unknown, ignored, clean],
508 508 rv2)
509 509 modified, added, removed, deleted, unknown, ignored, clean = rv2
510 510
511 511 return scmutil.status(
512 512 modified, added, removed, deleted, unknown, ignored, clean)
513 513
514 514 def makedirstate(cls):
515 515 class fsmonitordirstate(cls):
516 516 def _fsmonitorinit(self, fsmonitorstate, watchmanclient):
517 517 # _fsmonitordisable is used in paranoid mode
518 518 self._fsmonitordisable = False
519 519 self._fsmonitorstate = fsmonitorstate
520 520 self._watchmanclient = watchmanclient
521 521
522 522 def walk(self, *args, **kwargs):
523 523 orig = super(fsmonitordirstate, self).walk
524 524 if self._fsmonitordisable:
525 525 return orig(*args, **kwargs)
526 526 return overridewalk(orig, self, *args, **kwargs)
527 527
528 528 def rebuild(self, *args, **kwargs):
529 529 self._fsmonitorstate.invalidate()
530 530 return super(fsmonitordirstate, self).rebuild(*args, **kwargs)
531 531
532 532 def invalidate(self, *args, **kwargs):
533 533 self._fsmonitorstate.invalidate()
534 534 return super(fsmonitordirstate, self).invalidate(*args, **kwargs)
535 535
536 536 return fsmonitordirstate
537 537
538 538 def wrapdirstate(orig, self):
539 539 ds = orig(self)
540 540 # only override the dirstate when Watchman is available for the repo
541 541 if util.safehasattr(self, '_fsmonitorstate'):
542 542 ds.__class__ = makedirstate(ds.__class__)
543 543 ds._fsmonitorinit(self._fsmonitorstate, self._watchmanclient)
544 544 return ds
545 545
546 546 def extsetup(ui):
547 547 wrapfilecache(localrepo.localrepository, 'dirstate', wrapdirstate)
548 548 if sys.platform == 'darwin':
549 549 # An assist for avoiding the dangling-symlink fsevents bug
550 550 extensions.wrapfunction(os, 'symlink', wrapsymlink)
551 551
552 552 extensions.wrapfunction(merge, 'update', wrapupdate)
553 553
554 554 def wrapsymlink(orig, source, link_name):
555 555 ''' if we create a dangling symlink, also touch the parent dir
556 556 to encourage fsevents notifications to work more correctly '''
557 557 try:
558 558 return orig(source, link_name)
559 559 finally:
560 560 try:
561 561 os.utime(os.path.dirname(link_name), None)
562 562 except OSError:
563 563 pass
564 564
565 565 class state_update(object):
566 566 ''' This context mananger is responsible for dispatching the state-enter
567 567 and state-leave signals to the watchman service '''
568 568
569 569 def __init__(self, repo, node, distance, partial):
570 570 self.repo = repo
571 571 self.node = node
572 572 self.distance = distance
573 573 self.partial = partial
574 574
575 575 def __enter__(self):
576 576 self._state('state-enter')
577 577 return self
578 578
579 579 def __exit__(self, type_, value, tb):
580 580 status = 'ok' if type_ is None else 'failed'
581 581 self._state('state-leave', status=status)
582 582
583 583 def _state(self, cmd, status='ok'):
584 584 if not util.safehasattr(self.repo, '_watchmanclient'):
585 585 return
586 586 try:
587 587 commithash = self.repo[self.node].hex()
588 588 self.repo._watchmanclient.command(cmd, {
589 589 'name': 'hg.update',
590 590 'metadata': {
591 591 # the target revision
592 592 'rev': commithash,
593 593 # approximate number of commits between current and target
594 594 'distance': self.distance,
595 595 # success/failure (only really meaningful for state-leave)
596 596 'status': status,
597 597 # whether the working copy parent is changing
598 598 'partial': self.partial,
599 599 }})
600 600 except Exception as e:
601 601 # Swallow any errors; fire and forget
602 602 self.repo.ui.log(
603 603 'watchman', 'Exception %s while running %s\n', e, cmd)
604 604
605 605 # Bracket working copy updates with calls to the watchman state-enter
606 606 # and state-leave commands. This allows clients to perform more intelligent
607 607 # settling during bulk file change scenarios
608 608 # https://facebook.github.io/watchman/docs/cmd/subscribe.html#advanced-settling
609 609 def wrapupdate(orig, repo, node, branchmerge, force, ancestor=None,
610 610 mergeancestor=False, labels=None, matcher=None, **kwargs):
611 611
612 612 distance = 0
613 613 partial = True
614 614 if matcher is None or matcher.always():
615 615 partial = False
616 616 wc = repo[None]
617 617 parents = wc.parents()
618 618 if len(parents) == 2:
619 619 anc = repo.changelog.ancestor(parents[0].node(), parents[1].node())
620 620 ancrev = repo[anc].rev()
621 621 distance = abs(repo[node].rev() - ancrev)
622 622 elif len(parents) == 1:
623 623 distance = abs(repo[node].rev() - parents[0].rev())
624 624
625 625 with state_update(repo, node, distance, partial):
626 626 return orig(
627 627 repo, node, branchmerge, force, ancestor, mergeancestor,
628 628 labels, matcher, *kwargs)
629 629
630 630 def reposetup(ui, repo):
631 631 # We don't work with largefiles or inotify
632 632 exts = extensions.enabled()
633 633 for ext in _blacklist:
634 634 if ext in exts:
635 635 ui.warn(_('The fsmonitor extension is incompatible with the %s '
636 636 'extension and has been disabled.\n') % ext)
637 637 return
638 638
639 639 if util.safehasattr(repo, 'dirstate'):
640 640 # We don't work with subrepos either. Note that we can get passed in
641 641 # e.g. a statichttprepo, which throws on trying to access the substate.
642 642 # XXX This sucks.
643 643 try:
644 644 # if repo[None].substate can cause a dirstate parse, which is too
645 645 # slow. Instead, look for a file called hgsubstate,
646 646 if repo.wvfs.exists('.hgsubstate') or repo.wvfs.exists('.hgsub'):
647 647 return
648 648 except AttributeError:
649 649 return
650 650
651 651 fsmonitorstate = state.state(repo)
652 652 if fsmonitorstate.mode == 'off':
653 653 return
654 654
655 655 try:
656 656 client = watchmanclient.client(repo)
657 657 except Exception as ex:
658 658 _handleunavailable(ui, fsmonitorstate, ex)
659 659 return
660 660
661 661 repo._fsmonitorstate = fsmonitorstate
662 662 repo._watchmanclient = client
663 663
664 664 # at this point since fsmonitorstate wasn't present, repo.dirstate is
665 665 # not a fsmonitordirstate
666 666 repo.dirstate.__class__ = makedirstate(repo.dirstate.__class__)
667 667 # nuke the dirstate so that _fsmonitorinit and subsequent configuration
668 668 # changes take effect on it
669 669 del repo._filecache['dirstate']
670 670 delattr(repo.unfiltered(), 'dirstate')
671 671
672 672 class fsmonitorrepo(repo.__class__):
673 673 def status(self, *args, **kwargs):
674 674 orig = super(fsmonitorrepo, self).status
675 675 return overridestatus(orig, self, *args, **kwargs)
676 676
677 677 repo.__class__ = fsmonitorrepo
678 678
679 679 def wrapfilecache(cls, propname, wrapper):
680 680 """Wraps a filecache property. These can't be wrapped using the normal
681 681 wrapfunction. This should eventually go into upstream Mercurial.
682 682 """
683 683 assert callable(wrapper)
684 684 for currcls in cls.__mro__:
685 685 if propname in currcls.__dict__:
686 686 origfn = currcls.__dict__[propname].func
687 687 assert callable(origfn)
688 688 def wrap(*args, **kwargs):
689 689 return wrapper(origfn, *args, **kwargs)
690 690 currcls.__dict__[propname].func = wrap
691 691 break
692 692
693 693 if currcls is object:
694 694 raise AttributeError(
695 695 _("type '%s' has no property '%s'") % (cls, propname))
@@ -1,318 +1,318 b''
1 1 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 '''commands to sign and verify changesets'''
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import binascii
11 11 import os
12 12 import tempfile
13 13
14 14 from mercurial.i18n import _
15 15 from mercurial import (
16 16 cmdutil,
17 17 commands,
18 18 error,
19 19 match,
20 20 node as hgnode,
21 21 util,
22 22 )
23 23
24 24 cmdtable = {}
25 25 command = cmdutil.command(cmdtable)
26 # Note for extension authors: ONLY specify testedwith = 'internal' for
26 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
27 27 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
28 28 # be specifying the version(s) of Mercurial they are tested with, or
29 29 # leave the attribute unspecified.
30 testedwith = 'internal'
30 testedwith = 'ships-with-hg-core'
31 31
32 32 class gpg(object):
33 33 def __init__(self, path, key=None):
34 34 self.path = path
35 35 self.key = (key and " --local-user \"%s\"" % key) or ""
36 36
37 37 def sign(self, data):
38 38 gpgcmd = "%s --sign --detach-sign%s" % (self.path, self.key)
39 39 return util.filter(data, gpgcmd)
40 40
41 41 def verify(self, data, sig):
42 42 """ returns of the good and bad signatures"""
43 43 sigfile = datafile = None
44 44 try:
45 45 # create temporary files
46 46 fd, sigfile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".sig")
47 47 fp = os.fdopen(fd, 'wb')
48 48 fp.write(sig)
49 49 fp.close()
50 50 fd, datafile = tempfile.mkstemp(prefix="hg-gpg-", suffix=".txt")
51 51 fp = os.fdopen(fd, 'wb')
52 52 fp.write(data)
53 53 fp.close()
54 54 gpgcmd = ("%s --logger-fd 1 --status-fd 1 --verify "
55 55 "\"%s\" \"%s\"" % (self.path, sigfile, datafile))
56 56 ret = util.filter("", gpgcmd)
57 57 finally:
58 58 for f in (sigfile, datafile):
59 59 try:
60 60 if f:
61 61 os.unlink(f)
62 62 except OSError:
63 63 pass
64 64 keys = []
65 65 key, fingerprint = None, None
66 66 for l in ret.splitlines():
67 67 # see DETAILS in the gnupg documentation
68 68 # filter the logger output
69 69 if not l.startswith("[GNUPG:]"):
70 70 continue
71 71 l = l[9:]
72 72 if l.startswith("VALIDSIG"):
73 73 # fingerprint of the primary key
74 74 fingerprint = l.split()[10]
75 75 elif l.startswith("ERRSIG"):
76 76 key = l.split(" ", 3)[:2]
77 77 key.append("")
78 78 fingerprint = None
79 79 elif (l.startswith("GOODSIG") or
80 80 l.startswith("EXPSIG") or
81 81 l.startswith("EXPKEYSIG") or
82 82 l.startswith("BADSIG")):
83 83 if key is not None:
84 84 keys.append(key + [fingerprint])
85 85 key = l.split(" ", 2)
86 86 fingerprint = None
87 87 if key is not None:
88 88 keys.append(key + [fingerprint])
89 89 return keys
90 90
91 91 def newgpg(ui, **opts):
92 92 """create a new gpg instance"""
93 93 gpgpath = ui.config("gpg", "cmd", "gpg")
94 94 gpgkey = opts.get('key')
95 95 if not gpgkey:
96 96 gpgkey = ui.config("gpg", "key", None)
97 97 return gpg(gpgpath, gpgkey)
98 98
99 99 def sigwalk(repo):
100 100 """
101 101 walk over every sigs, yields a couple
102 102 ((node, version, sig), (filename, linenumber))
103 103 """
104 104 def parsefile(fileiter, context):
105 105 ln = 1
106 106 for l in fileiter:
107 107 if not l:
108 108 continue
109 109 yield (l.split(" ", 2), (context, ln))
110 110 ln += 1
111 111
112 112 # read the heads
113 113 fl = repo.file(".hgsigs")
114 114 for r in reversed(fl.heads()):
115 115 fn = ".hgsigs|%s" % hgnode.short(r)
116 116 for item in parsefile(fl.read(r).splitlines(), fn):
117 117 yield item
118 118 try:
119 119 # read local signatures
120 120 fn = "localsigs"
121 121 for item in parsefile(repo.vfs(fn), fn):
122 122 yield item
123 123 except IOError:
124 124 pass
125 125
126 126 def getkeys(ui, repo, mygpg, sigdata, context):
127 127 """get the keys who signed a data"""
128 128 fn, ln = context
129 129 node, version, sig = sigdata
130 130 prefix = "%s:%d" % (fn, ln)
131 131 node = hgnode.bin(node)
132 132
133 133 data = node2txt(repo, node, version)
134 134 sig = binascii.a2b_base64(sig)
135 135 keys = mygpg.verify(data, sig)
136 136
137 137 validkeys = []
138 138 # warn for expired key and/or sigs
139 139 for key in keys:
140 140 if key[0] == "ERRSIG":
141 141 ui.write(_("%s Unknown key ID \"%s\"\n")
142 142 % (prefix, shortkey(ui, key[1][:15])))
143 143 continue
144 144 if key[0] == "BADSIG":
145 145 ui.write(_("%s Bad signature from \"%s\"\n") % (prefix, key[2]))
146 146 continue
147 147 if key[0] == "EXPSIG":
148 148 ui.write(_("%s Note: Signature has expired"
149 149 " (signed by: \"%s\")\n") % (prefix, key[2]))
150 150 elif key[0] == "EXPKEYSIG":
151 151 ui.write(_("%s Note: This key has expired"
152 152 " (signed by: \"%s\")\n") % (prefix, key[2]))
153 153 validkeys.append((key[1], key[2], key[3]))
154 154 return validkeys
155 155
156 156 @command("sigs", [], _('hg sigs'))
157 157 def sigs(ui, repo):
158 158 """list signed changesets"""
159 159 mygpg = newgpg(ui)
160 160 revs = {}
161 161
162 162 for data, context in sigwalk(repo):
163 163 node, version, sig = data
164 164 fn, ln = context
165 165 try:
166 166 n = repo.lookup(node)
167 167 except KeyError:
168 168 ui.warn(_("%s:%d node does not exist\n") % (fn, ln))
169 169 continue
170 170 r = repo.changelog.rev(n)
171 171 keys = getkeys(ui, repo, mygpg, data, context)
172 172 if not keys:
173 173 continue
174 174 revs.setdefault(r, [])
175 175 revs[r].extend(keys)
176 176 for rev in sorted(revs, reverse=True):
177 177 for k in revs[rev]:
178 178 r = "%5d:%s" % (rev, hgnode.hex(repo.changelog.node(rev)))
179 179 ui.write("%-30s %s\n" % (keystr(ui, k), r))
180 180
181 181 @command("sigcheck", [], _('hg sigcheck REV'))
182 182 def sigcheck(ui, repo, rev):
183 183 """verify all the signatures there may be for a particular revision"""
184 184 mygpg = newgpg(ui)
185 185 rev = repo.lookup(rev)
186 186 hexrev = hgnode.hex(rev)
187 187 keys = []
188 188
189 189 for data, context in sigwalk(repo):
190 190 node, version, sig = data
191 191 if node == hexrev:
192 192 k = getkeys(ui, repo, mygpg, data, context)
193 193 if k:
194 194 keys.extend(k)
195 195
196 196 if not keys:
197 197 ui.write(_("no valid signature for %s\n") % hgnode.short(rev))
198 198 return
199 199
200 200 # print summary
201 201 ui.write(_("%s is signed by:\n") % hgnode.short(rev))
202 202 for key in keys:
203 203 ui.write(" %s\n" % keystr(ui, key))
204 204
205 205 def keystr(ui, key):
206 206 """associate a string to a key (username, comment)"""
207 207 keyid, user, fingerprint = key
208 208 comment = ui.config("gpg", fingerprint, None)
209 209 if comment:
210 210 return "%s (%s)" % (user, comment)
211 211 else:
212 212 return user
213 213
214 214 @command("sign",
215 215 [('l', 'local', None, _('make the signature local')),
216 216 ('f', 'force', None, _('sign even if the sigfile is modified')),
217 217 ('', 'no-commit', None, _('do not commit the sigfile after signing')),
218 218 ('k', 'key', '',
219 219 _('the key id to sign with'), _('ID')),
220 220 ('m', 'message', '',
221 221 _('use text as commit message'), _('TEXT')),
222 222 ('e', 'edit', False, _('invoke editor on commit messages')),
223 223 ] + commands.commitopts2,
224 224 _('hg sign [OPTION]... [REV]...'))
225 225 def sign(ui, repo, *revs, **opts):
226 226 """add a signature for the current or given revision
227 227
228 228 If no revision is given, the parent of the working directory is used,
229 229 or tip if no revision is checked out.
230 230
231 231 The ``gpg.cmd`` config setting can be used to specify the command
232 232 to run. A default key can be specified with ``gpg.key``.
233 233
234 234 See :hg:`help dates` for a list of formats valid for -d/--date.
235 235 """
236 236 with repo.wlock():
237 237 return _dosign(ui, repo, *revs, **opts)
238 238
239 239 def _dosign(ui, repo, *revs, **opts):
240 240 mygpg = newgpg(ui, **opts)
241 241 sigver = "0"
242 242 sigmessage = ""
243 243
244 244 date = opts.get('date')
245 245 if date:
246 246 opts['date'] = util.parsedate(date)
247 247
248 248 if revs:
249 249 nodes = [repo.lookup(n) for n in revs]
250 250 else:
251 251 nodes = [node for node in repo.dirstate.parents()
252 252 if node != hgnode.nullid]
253 253 if len(nodes) > 1:
254 254 raise error.Abort(_('uncommitted merge - please provide a '
255 255 'specific revision'))
256 256 if not nodes:
257 257 nodes = [repo.changelog.tip()]
258 258
259 259 for n in nodes:
260 260 hexnode = hgnode.hex(n)
261 261 ui.write(_("signing %d:%s\n") % (repo.changelog.rev(n),
262 262 hgnode.short(n)))
263 263 # build data
264 264 data = node2txt(repo, n, sigver)
265 265 sig = mygpg.sign(data)
266 266 if not sig:
267 267 raise error.Abort(_("error while signing"))
268 268 sig = binascii.b2a_base64(sig)
269 269 sig = sig.replace("\n", "")
270 270 sigmessage += "%s %s %s\n" % (hexnode, sigver, sig)
271 271
272 272 # write it
273 273 if opts['local']:
274 274 repo.vfs.append("localsigs", sigmessage)
275 275 return
276 276
277 277 if not opts["force"]:
278 278 msigs = match.exact(repo.root, '', ['.hgsigs'])
279 279 if any(repo.status(match=msigs, unknown=True, ignored=True)):
280 280 raise error.Abort(_("working copy of .hgsigs is changed "),
281 281 hint=_("please commit .hgsigs manually"))
282 282
283 283 sigsfile = repo.wfile(".hgsigs", "ab")
284 284 sigsfile.write(sigmessage)
285 285 sigsfile.close()
286 286
287 287 if '.hgsigs' not in repo.dirstate:
288 288 repo[None].add([".hgsigs"])
289 289
290 290 if opts["no_commit"]:
291 291 return
292 292
293 293 message = opts['message']
294 294 if not message:
295 295 # we don't translate commit messages
296 296 message = "\n".join(["Added signature for changeset %s"
297 297 % hgnode.short(n)
298 298 for n in nodes])
299 299 try:
300 300 editor = cmdutil.getcommiteditor(editform='gpg.sign', **opts)
301 301 repo.commit(message, opts['user'], opts['date'], match=msigs,
302 302 editor=editor)
303 303 except ValueError as inst:
304 304 raise error.Abort(str(inst))
305 305
306 306 def shortkey(ui, key):
307 307 if len(key) != 16:
308 308 ui.debug("key ID \"%s\" format error\n" % key)
309 309 return key
310 310
311 311 return key[-8:]
312 312
313 313 def node2txt(repo, node, ver):
314 314 """map a manifest into some text"""
315 315 if ver == "0":
316 316 return "%s\n" % hgnode.hex(node)
317 317 else:
318 318 raise error.Abort(_("unknown signature version"))
@@ -1,69 +1,69 b''
1 1 # ASCII graph log extension for Mercurial
2 2 #
3 3 # Copyright 2007 Joel Rosdahl <joel@rosdahl.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''command to view revision graphs from a shell (DEPRECATED)
9 9
10 10 The functionality of this extension has been include in core Mercurial
11 11 since version 2.3. Please use :hg:`log -G ...` instead.
12 12
13 13 This extension adds a --graph option to the incoming, outgoing and log
14 14 commands. When this options is given, an ASCII representation of the
15 15 revision graph is also shown.
16 16 '''
17 17
18 18 from __future__ import absolute_import
19 19
20 20 from mercurial.i18n import _
21 21 from mercurial import (
22 22 cmdutil,
23 23 commands,
24 24 )
25 25
26 26 cmdtable = {}
27 27 command = cmdutil.command(cmdtable)
28 # Note for extension authors: ONLY specify testedwith = 'internal' for
28 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
29 29 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
30 30 # be specifying the version(s) of Mercurial they are tested with, or
31 31 # leave the attribute unspecified.
32 testedwith = 'internal'
32 testedwith = 'ships-with-hg-core'
33 33
34 34 @command('glog',
35 35 [('f', 'follow', None,
36 36 _('follow changeset history, or file history across copies and renames')),
37 37 ('', 'follow-first', None,
38 38 _('only follow the first parent of merge changesets (DEPRECATED)')),
39 39 ('d', 'date', '', _('show revisions matching date spec'), _('DATE')),
40 40 ('C', 'copies', None, _('show copied files')),
41 41 ('k', 'keyword', [],
42 42 _('do case-insensitive search for a given text'), _('TEXT')),
43 43 ('r', 'rev', [], _('show the specified revision or revset'), _('REV')),
44 44 ('', 'removed', None, _('include revisions where files were removed')),
45 45 ('m', 'only-merges', None, _('show only merges (DEPRECATED)')),
46 46 ('u', 'user', [], _('revisions committed by user'), _('USER')),
47 47 ('', 'only-branch', [],
48 48 _('show only changesets within the given named branch (DEPRECATED)'),
49 49 _('BRANCH')),
50 50 ('b', 'branch', [],
51 51 _('show changesets within the given named branch'), _('BRANCH')),
52 52 ('P', 'prune', [],
53 53 _('do not display revision or any of its ancestors'), _('REV')),
54 54 ] + commands.logopts + commands.walkopts,
55 55 _('[OPTION]... [FILE]'),
56 56 inferrepo=True)
57 57 def glog(ui, repo, *pats, **opts):
58 58 """show revision history alongside an ASCII revision graph
59 59
60 60 Print a revision history alongside a revision graph drawn with
61 61 ASCII characters.
62 62
63 63 Nodes printed as an @ character are parents of the working
64 64 directory.
65 65
66 66 This is an alias to :hg:`log -G`.
67 67 """
68 68 opts['graph'] = True
69 69 return commands.log(ui, repo, *pats, **opts)
@@ -1,348 +1,348 b''
1 1 # Minimal support for git commands on an hg repository
2 2 #
3 3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''browse the repository in a graphical way
9 9
10 10 The hgk extension allows browsing the history of a repository in a
11 11 graphical way. It requires Tcl/Tk version 8.4 or later. (Tcl/Tk is not
12 12 distributed with Mercurial.)
13 13
14 14 hgk consists of two parts: a Tcl script that does the displaying and
15 15 querying of information, and an extension to Mercurial named hgk.py,
16 16 which provides hooks for hgk to get information. hgk can be found in
17 17 the contrib directory, and the extension is shipped in the hgext
18 18 repository, and needs to be enabled.
19 19
20 20 The :hg:`view` command will launch the hgk Tcl script. For this command
21 21 to work, hgk must be in your search path. Alternately, you can specify
22 22 the path to hgk in your configuration file::
23 23
24 24 [hgk]
25 25 path = /location/of/hgk
26 26
27 27 hgk can make use of the extdiff extension to visualize revisions.
28 28 Assuming you had already configured extdiff vdiff command, just add::
29 29
30 30 [hgk]
31 31 vdiff=vdiff
32 32
33 33 Revisions context menu will now display additional entries to fire
34 34 vdiff on hovered and selected revisions.
35 35 '''
36 36
37 37 from __future__ import absolute_import
38 38
39 39 import os
40 40
41 41 from mercurial.i18n import _
42 42 from mercurial.node import (
43 43 nullid,
44 44 nullrev,
45 45 short,
46 46 )
47 47 from mercurial import (
48 48 cmdutil,
49 49 commands,
50 50 obsolete,
51 51 patch,
52 52 scmutil,
53 53 )
54 54
55 55 cmdtable = {}
56 56 command = cmdutil.command(cmdtable)
57 # Note for extension authors: ONLY specify testedwith = 'internal' for
57 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
58 58 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
59 59 # be specifying the version(s) of Mercurial they are tested with, or
60 60 # leave the attribute unspecified.
61 testedwith = 'internal'
61 testedwith = 'ships-with-hg-core'
62 62
63 63 @command('debug-diff-tree',
64 64 [('p', 'patch', None, _('generate patch')),
65 65 ('r', 'recursive', None, _('recursive')),
66 66 ('P', 'pretty', None, _('pretty')),
67 67 ('s', 'stdin', None, _('stdin')),
68 68 ('C', 'copy', None, _('detect copies')),
69 69 ('S', 'search', "", _('search'))],
70 70 ('[OPTION]... NODE1 NODE2 [FILE]...'),
71 71 inferrepo=True)
72 72 def difftree(ui, repo, node1=None, node2=None, *files, **opts):
73 73 """diff trees from two commits"""
74 74 def __difftree(repo, node1, node2, files=[]):
75 75 assert node2 is not None
76 76 mmap = repo[node1].manifest()
77 77 mmap2 = repo[node2].manifest()
78 78 m = scmutil.match(repo[node1], files)
79 79 modified, added, removed = repo.status(node1, node2, m)[:3]
80 80 empty = short(nullid)
81 81
82 82 for f in modified:
83 83 # TODO get file permissions
84 84 ui.write((":100664 100664 %s %s M\t%s\t%s\n") %
85 85 (short(mmap[f]), short(mmap2[f]), f, f))
86 86 for f in added:
87 87 ui.write((":000000 100664 %s %s N\t%s\t%s\n") %
88 88 (empty, short(mmap2[f]), f, f))
89 89 for f in removed:
90 90 ui.write((":100664 000000 %s %s D\t%s\t%s\n") %
91 91 (short(mmap[f]), empty, f, f))
92 92 ##
93 93
94 94 while True:
95 95 if opts['stdin']:
96 96 try:
97 97 line = raw_input().split(' ')
98 98 node1 = line[0]
99 99 if len(line) > 1:
100 100 node2 = line[1]
101 101 else:
102 102 node2 = None
103 103 except EOFError:
104 104 break
105 105 node1 = repo.lookup(node1)
106 106 if node2:
107 107 node2 = repo.lookup(node2)
108 108 else:
109 109 node2 = node1
110 110 node1 = repo.changelog.parents(node1)[0]
111 111 if opts['patch']:
112 112 if opts['pretty']:
113 113 catcommit(ui, repo, node2, "")
114 114 m = scmutil.match(repo[node1], files)
115 115 diffopts = patch.difffeatureopts(ui)
116 116 diffopts.git = True
117 117 chunks = patch.diff(repo, node1, node2, match=m,
118 118 opts=diffopts)
119 119 for chunk in chunks:
120 120 ui.write(chunk)
121 121 else:
122 122 __difftree(repo, node1, node2, files=files)
123 123 if not opts['stdin']:
124 124 break
125 125
126 126 def catcommit(ui, repo, n, prefix, ctx=None):
127 127 nlprefix = '\n' + prefix
128 128 if ctx is None:
129 129 ctx = repo[n]
130 130 # use ctx.node() instead ??
131 131 ui.write(("tree %s\n" % short(ctx.changeset()[0])))
132 132 for p in ctx.parents():
133 133 ui.write(("parent %s\n" % p))
134 134
135 135 date = ctx.date()
136 136 description = ctx.description().replace("\0", "")
137 137 ui.write(("author %s %s %s\n" % (ctx.user(), int(date[0]), date[1])))
138 138
139 139 if 'committer' in ctx.extra():
140 140 ui.write(("committer %s\n" % ctx.extra()['committer']))
141 141
142 142 ui.write(("revision %d\n" % ctx.rev()))
143 143 ui.write(("branch %s\n" % ctx.branch()))
144 144 if obsolete.isenabled(repo, obsolete.createmarkersopt):
145 145 if ctx.obsolete():
146 146 ui.write(("obsolete\n"))
147 147 ui.write(("phase %s\n\n" % ctx.phasestr()))
148 148
149 149 if prefix != "":
150 150 ui.write("%s%s\n" % (prefix,
151 151 description.replace('\n', nlprefix).strip()))
152 152 else:
153 153 ui.write(description + "\n")
154 154 if prefix:
155 155 ui.write('\0')
156 156
157 157 @command('debug-merge-base', [], _('REV REV'))
158 158 def base(ui, repo, node1, node2):
159 159 """output common ancestor information"""
160 160 node1 = repo.lookup(node1)
161 161 node2 = repo.lookup(node2)
162 162 n = repo.changelog.ancestor(node1, node2)
163 163 ui.write(short(n) + "\n")
164 164
165 165 @command('debug-cat-file',
166 166 [('s', 'stdin', None, _('stdin'))],
167 167 _('[OPTION]... TYPE FILE'),
168 168 inferrepo=True)
169 169 def catfile(ui, repo, type=None, r=None, **opts):
170 170 """cat a specific revision"""
171 171 # in stdin mode, every line except the commit is prefixed with two
172 172 # spaces. This way the our caller can find the commit without magic
173 173 # strings
174 174 #
175 175 prefix = ""
176 176 if opts['stdin']:
177 177 try:
178 178 (type, r) = raw_input().split(' ')
179 179 prefix = " "
180 180 except EOFError:
181 181 return
182 182
183 183 else:
184 184 if not type or not r:
185 185 ui.warn(_("cat-file: type or revision not supplied\n"))
186 186 commands.help_(ui, 'cat-file')
187 187
188 188 while r:
189 189 if type != "commit":
190 190 ui.warn(_("aborting hg cat-file only understands commits\n"))
191 191 return 1
192 192 n = repo.lookup(r)
193 193 catcommit(ui, repo, n, prefix)
194 194 if opts['stdin']:
195 195 try:
196 196 (type, r) = raw_input().split(' ')
197 197 except EOFError:
198 198 break
199 199 else:
200 200 break
201 201
202 202 # git rev-tree is a confusing thing. You can supply a number of
203 203 # commit sha1s on the command line, and it walks the commit history
204 204 # telling you which commits are reachable from the supplied ones via
205 205 # a bitmask based on arg position.
206 206 # you can specify a commit to stop at by starting the sha1 with ^
207 207 def revtree(ui, args, repo, full="tree", maxnr=0, parents=False):
208 208 def chlogwalk():
209 209 count = len(repo)
210 210 i = count
211 211 l = [0] * 100
212 212 chunk = 100
213 213 while True:
214 214 if chunk > i:
215 215 chunk = i
216 216 i = 0
217 217 else:
218 218 i -= chunk
219 219
220 220 for x in xrange(chunk):
221 221 if i + x >= count:
222 222 l[chunk - x:] = [0] * (chunk - x)
223 223 break
224 224 if full is not None:
225 225 if (i + x) in repo:
226 226 l[x] = repo[i + x]
227 227 l[x].changeset() # force reading
228 228 else:
229 229 if (i + x) in repo:
230 230 l[x] = 1
231 231 for x in xrange(chunk - 1, -1, -1):
232 232 if l[x] != 0:
233 233 yield (i + x, full is not None and l[x] or None)
234 234 if i == 0:
235 235 break
236 236
237 237 # calculate and return the reachability bitmask for sha
238 238 def is_reachable(ar, reachable, sha):
239 239 if len(ar) == 0:
240 240 return 1
241 241 mask = 0
242 242 for i in xrange(len(ar)):
243 243 if sha in reachable[i]:
244 244 mask |= 1 << i
245 245
246 246 return mask
247 247
248 248 reachable = []
249 249 stop_sha1 = []
250 250 want_sha1 = []
251 251 count = 0
252 252
253 253 # figure out which commits they are asking for and which ones they
254 254 # want us to stop on
255 255 for i, arg in enumerate(args):
256 256 if arg.startswith('^'):
257 257 s = repo.lookup(arg[1:])
258 258 stop_sha1.append(s)
259 259 want_sha1.append(s)
260 260 elif arg != 'HEAD':
261 261 want_sha1.append(repo.lookup(arg))
262 262
263 263 # calculate the graph for the supplied commits
264 264 for i, n in enumerate(want_sha1):
265 265 reachable.append(set())
266 266 visit = [n]
267 267 reachable[i].add(n)
268 268 while visit:
269 269 n = visit.pop(0)
270 270 if n in stop_sha1:
271 271 continue
272 272 for p in repo.changelog.parents(n):
273 273 if p not in reachable[i]:
274 274 reachable[i].add(p)
275 275 visit.append(p)
276 276 if p in stop_sha1:
277 277 continue
278 278
279 279 # walk the repository looking for commits that are in our
280 280 # reachability graph
281 281 for i, ctx in chlogwalk():
282 282 if i not in repo:
283 283 continue
284 284 n = repo.changelog.node(i)
285 285 mask = is_reachable(want_sha1, reachable, n)
286 286 if mask:
287 287 parentstr = ""
288 288 if parents:
289 289 pp = repo.changelog.parents(n)
290 290 if pp[0] != nullid:
291 291 parentstr += " " + short(pp[0])
292 292 if pp[1] != nullid:
293 293 parentstr += " " + short(pp[1])
294 294 if not full:
295 295 ui.write("%s%s\n" % (short(n), parentstr))
296 296 elif full == "commit":
297 297 ui.write("%s%s\n" % (short(n), parentstr))
298 298 catcommit(ui, repo, n, ' ', ctx)
299 299 else:
300 300 (p1, p2) = repo.changelog.parents(n)
301 301 (h, h1, h2) = map(short, (n, p1, p2))
302 302 (i1, i2) = map(repo.changelog.rev, (p1, p2))
303 303
304 304 date = ctx.date()[0]
305 305 ui.write("%s %s:%s" % (date, h, mask))
306 306 mask = is_reachable(want_sha1, reachable, p1)
307 307 if i1 != nullrev and mask > 0:
308 308 ui.write("%s:%s " % (h1, mask)),
309 309 mask = is_reachable(want_sha1, reachable, p2)
310 310 if i2 != nullrev and mask > 0:
311 311 ui.write("%s:%s " % (h2, mask))
312 312 ui.write("\n")
313 313 if maxnr and count >= maxnr:
314 314 break
315 315 count += 1
316 316
317 317 # git rev-list tries to order things by date, and has the ability to stop
318 318 # at a given commit without walking the whole repo. TODO add the stop
319 319 # parameter
320 320 @command('debug-rev-list',
321 321 [('H', 'header', None, _('header')),
322 322 ('t', 'topo-order', None, _('topo-order')),
323 323 ('p', 'parents', None, _('parents')),
324 324 ('n', 'max-count', 0, _('max-count'))],
325 325 ('[OPTION]... REV...'))
326 326 def revlist(ui, repo, *revs, **opts):
327 327 """print revisions"""
328 328 if opts['header']:
329 329 full = "commit"
330 330 else:
331 331 full = None
332 332 copy = [x for x in revs]
333 333 revtree(ui, copy, repo, full, opts['max_count'], opts['parents'])
334 334
335 335 @command('view',
336 336 [('l', 'limit', '',
337 337 _('limit number of changes displayed'), _('NUM'))],
338 338 _('[-l LIMIT] [REVRANGE]'))
339 339 def view(ui, repo, *etc, **opts):
340 340 "start interactive history viewer"
341 341 os.chdir(repo.root)
342 342 optstr = ' '.join(['--%s %s' % (k, v) for k, v in opts.iteritems() if v])
343 343 if repo.filtername is None:
344 344 optstr += '--hidden'
345 345
346 346 cmd = ui.config("hgk", "path", "hgk") + " %s %s" % (optstr, " ".join(etc))
347 347 ui.debug("running %s\n" % cmd)
348 348 ui.system(cmd)
@@ -1,97 +1,97 b''
1 1 # highlight - syntax highlighting in hgweb, based on Pygments
2 2 #
3 3 # Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # The original module was split in an interface and an implementation
9 9 # file to defer pygments loading and speedup extension setup.
10 10
11 11 """syntax highlighting for hgweb (requires Pygments)
12 12
13 13 It depends on the Pygments syntax highlighting library:
14 14 http://pygments.org/
15 15
16 16 There are the following configuration options::
17 17
18 18 [web]
19 19 pygments_style = <style> (default: colorful)
20 20 highlightfiles = <fileset> (default: size('<5M'))
21 21 highlightonlymatchfilename = <bool> (default False)
22 22
23 23 ``highlightonlymatchfilename`` will only highlight files if their type could
24 24 be identified by their filename. When this is not enabled (the default),
25 25 Pygments will try very hard to identify the file type from content and any
26 26 match (even matches with a low confidence score) will be used.
27 27 """
28 28
29 29 from __future__ import absolute_import
30 30
31 31 from . import highlight
32 32 from mercurial.hgweb import (
33 33 common,
34 34 webcommands,
35 35 webutil,
36 36 )
37 37
38 38 from mercurial import (
39 39 encoding,
40 40 extensions,
41 41 fileset,
42 42 )
43 43
44 # Note for extension authors: ONLY specify testedwith = 'internal' for
44 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
45 45 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
46 46 # be specifying the version(s) of Mercurial they are tested with, or
47 47 # leave the attribute unspecified.
48 testedwith = 'internal'
48 testedwith = 'ships-with-hg-core'
49 49
50 50 def pygmentize(web, field, fctx, tmpl):
51 51 style = web.config('web', 'pygments_style', 'colorful')
52 52 expr = web.config('web', 'highlightfiles', "size('<5M')")
53 53 filenameonly = web.configbool('web', 'highlightonlymatchfilename', False)
54 54
55 55 ctx = fctx.changectx()
56 56 tree = fileset.parse(expr)
57 57 mctx = fileset.matchctx(ctx, subset=[fctx.path()], status=None)
58 58 if fctx.path() in fileset.getset(mctx, tree):
59 59 highlight.pygmentize(field, fctx, style, tmpl,
60 60 guessfilenameonly=filenameonly)
61 61
62 62 def filerevision_highlight(orig, web, req, tmpl, fctx):
63 63 mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
64 64 # only pygmentize for mimetype containing 'html' so we both match
65 65 # 'text/html' and possibly 'application/xhtml+xml' in the future
66 66 # so that we don't have to touch the extension when the mimetype
67 67 # for a template changes; also hgweb optimizes the case that a
68 68 # raw file is sent using rawfile() and doesn't call us, so we
69 69 # can't clash with the file's content-type here in case we
70 70 # pygmentize a html file
71 71 if 'html' in mt:
72 72 pygmentize(web, 'fileline', fctx, tmpl)
73 73
74 74 return orig(web, req, tmpl, fctx)
75 75
76 76 def annotate_highlight(orig, web, req, tmpl):
77 77 mt = ''.join(tmpl('mimetype', encoding=encoding.encoding))
78 78 if 'html' in mt:
79 79 fctx = webutil.filectx(web.repo, req)
80 80 pygmentize(web, 'annotateline', fctx, tmpl)
81 81
82 82 return orig(web, req, tmpl)
83 83
84 84 def generate_css(web, req, tmpl):
85 85 pg_style = web.config('web', 'pygments_style', 'colorful')
86 86 fmter = highlight.HtmlFormatter(style=pg_style)
87 87 req.respond(common.HTTP_OK, 'text/css')
88 88 return ['/* pygments_style = %s */\n\n' % pg_style,
89 89 fmter.get_style_defs('')]
90 90
91 91 def extsetup():
92 92 # monkeypatch in the new version
93 93 extensions.wrapfunction(webcommands, '_filerevision',
94 94 filerevision_highlight)
95 95 extensions.wrapfunction(webcommands, 'annotate', annotate_highlight)
96 96 webcommands.highlightcss = generate_css
97 97 webcommands.__all__.append('highlightcss')
@@ -1,1632 +1,1632 b''
1 1 # histedit.py - interactive history editing for mercurial
2 2 #
3 3 # Copyright 2009 Augie Fackler <raf@durin42.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """interactive history editing
8 8
9 9 With this extension installed, Mercurial gains one new command: histedit. Usage
10 10 is as follows, assuming the following history::
11 11
12 12 @ 3[tip] 7c2fd3b9020c 2009-04-27 18:04 -0500 durin42
13 13 | Add delta
14 14 |
15 15 o 2 030b686bedc4 2009-04-27 18:04 -0500 durin42
16 16 | Add gamma
17 17 |
18 18 o 1 c561b4e977df 2009-04-27 18:04 -0500 durin42
19 19 | Add beta
20 20 |
21 21 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
22 22 Add alpha
23 23
24 24 If you were to run ``hg histedit c561b4e977df``, you would see the following
25 25 file open in your editor::
26 26
27 27 pick c561b4e977df Add beta
28 28 pick 030b686bedc4 Add gamma
29 29 pick 7c2fd3b9020c Add delta
30 30
31 31 # Edit history between c561b4e977df and 7c2fd3b9020c
32 32 #
33 33 # Commits are listed from least to most recent
34 34 #
35 35 # Commands:
36 36 # p, pick = use commit
37 37 # e, edit = use commit, but stop for amending
38 38 # f, fold = use commit, but combine it with the one above
39 39 # r, roll = like fold, but discard this commit's description
40 40 # d, drop = remove commit from history
41 41 # m, mess = edit commit message without changing commit content
42 42 #
43 43
44 44 In this file, lines beginning with ``#`` are ignored. You must specify a rule
45 45 for each revision in your history. For example, if you had meant to add gamma
46 46 before beta, and then wanted to add delta in the same revision as beta, you
47 47 would reorganize the file to look like this::
48 48
49 49 pick 030b686bedc4 Add gamma
50 50 pick c561b4e977df Add beta
51 51 fold 7c2fd3b9020c Add delta
52 52
53 53 # Edit history between c561b4e977df and 7c2fd3b9020c
54 54 #
55 55 # Commits are listed from least to most recent
56 56 #
57 57 # Commands:
58 58 # p, pick = use commit
59 59 # e, edit = use commit, but stop for amending
60 60 # f, fold = use commit, but combine it with the one above
61 61 # r, roll = like fold, but discard this commit's description
62 62 # d, drop = remove commit from history
63 63 # m, mess = edit commit message without changing commit content
64 64 #
65 65
66 66 At which point you close the editor and ``histedit`` starts working. When you
67 67 specify a ``fold`` operation, ``histedit`` will open an editor when it folds
68 68 those revisions together, offering you a chance to clean up the commit message::
69 69
70 70 Add beta
71 71 ***
72 72 Add delta
73 73
74 74 Edit the commit message to your liking, then close the editor. For
75 75 this example, let's assume that the commit message was changed to
76 76 ``Add beta and delta.`` After histedit has run and had a chance to
77 77 remove any old or temporary revisions it needed, the history looks
78 78 like this::
79 79
80 80 @ 2[tip] 989b4d060121 2009-04-27 18:04 -0500 durin42
81 81 | Add beta and delta.
82 82 |
83 83 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
84 84 | Add gamma
85 85 |
86 86 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
87 87 Add alpha
88 88
89 89 Note that ``histedit`` does *not* remove any revisions (even its own temporary
90 90 ones) until after it has completed all the editing operations, so it will
91 91 probably perform several strip operations when it's done. For the above example,
92 92 it had to run strip twice. Strip can be slow depending on a variety of factors,
93 93 so you might need to be a little patient. You can choose to keep the original
94 94 revisions by passing the ``--keep`` flag.
95 95
96 96 The ``edit`` operation will drop you back to a command prompt,
97 97 allowing you to edit files freely, or even use ``hg record`` to commit
98 98 some changes as a separate commit. When you're done, any remaining
99 99 uncommitted changes will be committed as well. When done, run ``hg
100 100 histedit --continue`` to finish this step. You'll be prompted for a
101 101 new commit message, but the default commit message will be the
102 102 original message for the ``edit`` ed revision.
103 103
104 104 The ``message`` operation will give you a chance to revise a commit
105 105 message without changing the contents. It's a shortcut for doing
106 106 ``edit`` immediately followed by `hg histedit --continue``.
107 107
108 108 If ``histedit`` encounters a conflict when moving a revision (while
109 109 handling ``pick`` or ``fold``), it'll stop in a similar manner to
110 110 ``edit`` with the difference that it won't prompt you for a commit
111 111 message when done. If you decide at this point that you don't like how
112 112 much work it will be to rearrange history, or that you made a mistake,
113 113 you can use ``hg histedit --abort`` to abandon the new changes you
114 114 have made and return to the state before you attempted to edit your
115 115 history.
116 116
117 117 If we clone the histedit-ed example repository above and add four more
118 118 changes, such that we have the following history::
119 119
120 120 @ 6[tip] 038383181893 2009-04-27 18:04 -0500 stefan
121 121 | Add theta
122 122 |
123 123 o 5 140988835471 2009-04-27 18:04 -0500 stefan
124 124 | Add eta
125 125 |
126 126 o 4 122930637314 2009-04-27 18:04 -0500 stefan
127 127 | Add zeta
128 128 |
129 129 o 3 836302820282 2009-04-27 18:04 -0500 stefan
130 130 | Add epsilon
131 131 |
132 132 o 2 989b4d060121 2009-04-27 18:04 -0500 durin42
133 133 | Add beta and delta.
134 134 |
135 135 o 1 081603921c3f 2009-04-27 18:04 -0500 durin42
136 136 | Add gamma
137 137 |
138 138 o 0 d8d2fcd0e319 2009-04-27 18:04 -0500 durin42
139 139 Add alpha
140 140
141 141 If you run ``hg histedit --outgoing`` on the clone then it is the same
142 142 as running ``hg histedit 836302820282``. If you need plan to push to a
143 143 repository that Mercurial does not detect to be related to the source
144 144 repo, you can add a ``--force`` option.
145 145
146 146 Config
147 147 ------
148 148
149 149 Histedit rule lines are truncated to 80 characters by default. You
150 150 can customize this behavior by setting a different length in your
151 151 configuration file::
152 152
153 153 [histedit]
154 154 linelen = 120 # truncate rule lines at 120 characters
155 155
156 156 ``hg histedit`` attempts to automatically choose an appropriate base
157 157 revision to use. To change which base revision is used, define a
158 158 revset in your configuration file::
159 159
160 160 [histedit]
161 161 defaultrev = only(.) & draft()
162 162
163 163 By default each edited revision needs to be present in histedit commands.
164 164 To remove revision you need to use ``drop`` operation. You can configure
165 165 the drop to be implicit for missing commits by adding::
166 166
167 167 [histedit]
168 168 dropmissing = True
169 169
170 170 """
171 171
172 172 from __future__ import absolute_import
173 173
174 174 import errno
175 175 import os
176 176 import sys
177 177
178 178 from mercurial.i18n import _
179 179 from mercurial import (
180 180 bundle2,
181 181 cmdutil,
182 182 context,
183 183 copies,
184 184 destutil,
185 185 discovery,
186 186 error,
187 187 exchange,
188 188 extensions,
189 189 hg,
190 190 lock,
191 191 merge as mergemod,
192 192 node,
193 193 obsolete,
194 194 repair,
195 195 scmutil,
196 196 util,
197 197 )
198 198
199 199 pickle = util.pickle
200 200 release = lock.release
201 201 cmdtable = {}
202 202 command = cmdutil.command(cmdtable)
203 203
204 204 class _constraints(object):
205 205 # aborts if there are multiple rules for one node
206 206 noduplicates = 'noduplicates'
207 207 # abort if the node does belong to edited stack
208 208 forceother = 'forceother'
209 209 # abort if the node doesn't belong to edited stack
210 210 noother = 'noother'
211 211
212 212 @classmethod
213 213 def known(cls):
214 214 return set([v for k, v in cls.__dict__.items() if k[0] != '_'])
215 215
216 # Note for extension authors: ONLY specify testedwith = 'internal' for
216 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
217 217 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
218 218 # be specifying the version(s) of Mercurial they are tested with, or
219 219 # leave the attribute unspecified.
220 testedwith = 'internal'
220 testedwith = 'ships-with-hg-core'
221 221
222 222 actiontable = {}
223 223 primaryactions = set()
224 224 secondaryactions = set()
225 225 tertiaryactions = set()
226 226 internalactions = set()
227 227
228 228 def geteditcomment(ui, first, last):
229 229 """ construct the editor comment
230 230 The comment includes::
231 231 - an intro
232 232 - sorted primary commands
233 233 - sorted short commands
234 234 - sorted long commands
235 235 - additional hints
236 236
237 237 Commands are only included once.
238 238 """
239 239 intro = _("""Edit history between %s and %s
240 240
241 241 Commits are listed from least to most recent
242 242
243 243 You can reorder changesets by reordering the lines
244 244
245 245 Commands:
246 246 """)
247 247 actions = []
248 248 def addverb(v):
249 249 a = actiontable[v]
250 250 lines = a.message.split("\n")
251 251 if len(a.verbs):
252 252 v = ', '.join(sorted(a.verbs, key=lambda v: len(v)))
253 253 actions.append(" %s = %s" % (v, lines[0]))
254 254 actions.extend([' %s' for l in lines[1:]])
255 255
256 256 for v in (
257 257 sorted(primaryactions) +
258 258 sorted(secondaryactions) +
259 259 sorted(tertiaryactions)
260 260 ):
261 261 addverb(v)
262 262 actions.append('')
263 263
264 264 hints = []
265 265 if ui.configbool('histedit', 'dropmissing'):
266 266 hints.append("Deleting a changeset from the list "
267 267 "will DISCARD it from the edited history!")
268 268
269 269 lines = (intro % (first, last)).split('\n') + actions + hints
270 270
271 271 return ''.join(['# %s\n' % l if l else '#\n' for l in lines])
272 272
273 273 class histeditstate(object):
274 274 def __init__(self, repo, parentctxnode=None, actions=None, keep=None,
275 275 topmost=None, replacements=None, lock=None, wlock=None):
276 276 self.repo = repo
277 277 self.actions = actions
278 278 self.keep = keep
279 279 self.topmost = topmost
280 280 self.parentctxnode = parentctxnode
281 281 self.lock = lock
282 282 self.wlock = wlock
283 283 self.backupfile = None
284 284 if replacements is None:
285 285 self.replacements = []
286 286 else:
287 287 self.replacements = replacements
288 288
289 289 def read(self):
290 290 """Load histedit state from disk and set fields appropriately."""
291 291 try:
292 292 state = self.repo.vfs.read('histedit-state')
293 293 except IOError as err:
294 294 if err.errno != errno.ENOENT:
295 295 raise
296 296 cmdutil.wrongtooltocontinue(self.repo, _('histedit'))
297 297
298 298 if state.startswith('v1\n'):
299 299 data = self._load()
300 300 parentctxnode, rules, keep, topmost, replacements, backupfile = data
301 301 else:
302 302 data = pickle.loads(state)
303 303 parentctxnode, rules, keep, topmost, replacements = data
304 304 backupfile = None
305 305
306 306 self.parentctxnode = parentctxnode
307 307 rules = "\n".join(["%s %s" % (verb, rest) for [verb, rest] in rules])
308 308 actions = parserules(rules, self)
309 309 self.actions = actions
310 310 self.keep = keep
311 311 self.topmost = topmost
312 312 self.replacements = replacements
313 313 self.backupfile = backupfile
314 314
315 315 def write(self):
316 316 fp = self.repo.vfs('histedit-state', 'w')
317 317 fp.write('v1\n')
318 318 fp.write('%s\n' % node.hex(self.parentctxnode))
319 319 fp.write('%s\n' % node.hex(self.topmost))
320 320 fp.write('%s\n' % self.keep)
321 321 fp.write('%d\n' % len(self.actions))
322 322 for action in self.actions:
323 323 fp.write('%s\n' % action.tostate())
324 324 fp.write('%d\n' % len(self.replacements))
325 325 for replacement in self.replacements:
326 326 fp.write('%s%s\n' % (node.hex(replacement[0]), ''.join(node.hex(r)
327 327 for r in replacement[1])))
328 328 backupfile = self.backupfile
329 329 if not backupfile:
330 330 backupfile = ''
331 331 fp.write('%s\n' % backupfile)
332 332 fp.close()
333 333
334 334 def _load(self):
335 335 fp = self.repo.vfs('histedit-state', 'r')
336 336 lines = [l[:-1] for l in fp.readlines()]
337 337
338 338 index = 0
339 339 lines[index] # version number
340 340 index += 1
341 341
342 342 parentctxnode = node.bin(lines[index])
343 343 index += 1
344 344
345 345 topmost = node.bin(lines[index])
346 346 index += 1
347 347
348 348 keep = lines[index] == 'True'
349 349 index += 1
350 350
351 351 # Rules
352 352 rules = []
353 353 rulelen = int(lines[index])
354 354 index += 1
355 355 for i in xrange(rulelen):
356 356 ruleaction = lines[index]
357 357 index += 1
358 358 rule = lines[index]
359 359 index += 1
360 360 rules.append((ruleaction, rule))
361 361
362 362 # Replacements
363 363 replacements = []
364 364 replacementlen = int(lines[index])
365 365 index += 1
366 366 for i in xrange(replacementlen):
367 367 replacement = lines[index]
368 368 original = node.bin(replacement[:40])
369 369 succ = [node.bin(replacement[i:i + 40]) for i in
370 370 range(40, len(replacement), 40)]
371 371 replacements.append((original, succ))
372 372 index += 1
373 373
374 374 backupfile = lines[index]
375 375 index += 1
376 376
377 377 fp.close()
378 378
379 379 return parentctxnode, rules, keep, topmost, replacements, backupfile
380 380
381 381 def clear(self):
382 382 if self.inprogress():
383 383 self.repo.vfs.unlink('histedit-state')
384 384
385 385 def inprogress(self):
386 386 return self.repo.vfs.exists('histedit-state')
387 387
388 388
389 389 class histeditaction(object):
390 390 def __init__(self, state, node):
391 391 self.state = state
392 392 self.repo = state.repo
393 393 self.node = node
394 394
395 395 @classmethod
396 396 def fromrule(cls, state, rule):
397 397 """Parses the given rule, returning an instance of the histeditaction.
398 398 """
399 399 rulehash = rule.strip().split(' ', 1)[0]
400 400 try:
401 401 rev = node.bin(rulehash)
402 402 except TypeError:
403 403 raise error.ParseError("invalid changeset %s" % rulehash)
404 404 return cls(state, rev)
405 405
406 406 def verify(self, prev):
407 407 """ Verifies semantic correctness of the rule"""
408 408 repo = self.repo
409 409 ha = node.hex(self.node)
410 410 try:
411 411 self.node = repo[ha].node()
412 412 except error.RepoError:
413 413 raise error.ParseError(_('unknown changeset %s listed')
414 414 % ha[:12])
415 415
416 416 def torule(self):
417 417 """build a histedit rule line for an action
418 418
419 419 by default lines are in the form:
420 420 <hash> <rev> <summary>
421 421 """
422 422 ctx = self.repo[self.node]
423 423 summary = _getsummary(ctx)
424 424 line = '%s %s %d %s' % (self.verb, ctx, ctx.rev(), summary)
425 425 # trim to 75 columns by default so it's not stupidly wide in my editor
426 426 # (the 5 more are left for verb)
427 427 maxlen = self.repo.ui.configint('histedit', 'linelen', default=80)
428 428 maxlen = max(maxlen, 22) # avoid truncating hash
429 429 return util.ellipsis(line, maxlen)
430 430
431 431 def tostate(self):
432 432 """Print an action in format used by histedit state files
433 433 (the first line is a verb, the remainder is the second)
434 434 """
435 435 return "%s\n%s" % (self.verb, node.hex(self.node))
436 436
437 437 def constraints(self):
438 438 """Return a set of constrains that this action should be verified for
439 439 """
440 440 return set([_constraints.noduplicates, _constraints.noother])
441 441
442 442 def nodetoverify(self):
443 443 """Returns a node associated with the action that will be used for
444 444 verification purposes.
445 445
446 446 If the action doesn't correspond to node it should return None
447 447 """
448 448 return self.node
449 449
450 450 def run(self):
451 451 """Runs the action. The default behavior is simply apply the action's
452 452 rulectx onto the current parentctx."""
453 453 self.applychange()
454 454 self.continuedirty()
455 455 return self.continueclean()
456 456
457 457 def applychange(self):
458 458 """Applies the changes from this action's rulectx onto the current
459 459 parentctx, but does not commit them."""
460 460 repo = self.repo
461 461 rulectx = repo[self.node]
462 462 repo.ui.pushbuffer(error=True, labeled=True)
463 463 hg.update(repo, self.state.parentctxnode, quietempty=True)
464 464 stats = applychanges(repo.ui, repo, rulectx, {})
465 465 if stats and stats[3] > 0:
466 466 buf = repo.ui.popbuffer()
467 467 repo.ui.write(*buf)
468 468 raise error.InterventionRequired(
469 469 _('Fix up the change (%s %s)') %
470 470 (self.verb, node.short(self.node)),
471 471 hint=_('hg histedit --continue to resume'))
472 472 else:
473 473 repo.ui.popbuffer()
474 474
475 475 def continuedirty(self):
476 476 """Continues the action when changes have been applied to the working
477 477 copy. The default behavior is to commit the dirty changes."""
478 478 repo = self.repo
479 479 rulectx = repo[self.node]
480 480
481 481 editor = self.commiteditor()
482 482 commit = commitfuncfor(repo, rulectx)
483 483
484 484 commit(text=rulectx.description(), user=rulectx.user(),
485 485 date=rulectx.date(), extra=rulectx.extra(), editor=editor)
486 486
487 487 def commiteditor(self):
488 488 """The editor to be used to edit the commit message."""
489 489 return False
490 490
491 491 def continueclean(self):
492 492 """Continues the action when the working copy is clean. The default
493 493 behavior is to accept the current commit as the new version of the
494 494 rulectx."""
495 495 ctx = self.repo['.']
496 496 if ctx.node() == self.state.parentctxnode:
497 497 self.repo.ui.warn(_('%s: skipping changeset (no changes)\n') %
498 498 node.short(self.node))
499 499 return ctx, [(self.node, tuple())]
500 500 if ctx.node() == self.node:
501 501 # Nothing changed
502 502 return ctx, []
503 503 return ctx, [(self.node, (ctx.node(),))]
504 504
505 505 def commitfuncfor(repo, src):
506 506 """Build a commit function for the replacement of <src>
507 507
508 508 This function ensure we apply the same treatment to all changesets.
509 509
510 510 - Add a 'histedit_source' entry in extra.
511 511
512 512 Note that fold has its own separated logic because its handling is a bit
513 513 different and not easily factored out of the fold method.
514 514 """
515 515 phasemin = src.phase()
516 516 def commitfunc(**kwargs):
517 517 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
518 518 try:
519 519 repo.ui.setconfig('phases', 'new-commit', phasemin,
520 520 'histedit')
521 521 extra = kwargs.get('extra', {}).copy()
522 522 extra['histedit_source'] = src.hex()
523 523 kwargs['extra'] = extra
524 524 return repo.commit(**kwargs)
525 525 finally:
526 526 repo.ui.restoreconfig(phasebackup)
527 527 return commitfunc
528 528
529 529 def applychanges(ui, repo, ctx, opts):
530 530 """Merge changeset from ctx (only) in the current working directory"""
531 531 wcpar = repo.dirstate.parents()[0]
532 532 if ctx.p1().node() == wcpar:
533 533 # edits are "in place" we do not need to make any merge,
534 534 # just applies changes on parent for editing
535 535 cmdutil.revert(ui, repo, ctx, (wcpar, node.nullid), all=True)
536 536 stats = None
537 537 else:
538 538 try:
539 539 # ui.forcemerge is an internal variable, do not document
540 540 repo.ui.setconfig('ui', 'forcemerge', opts.get('tool', ''),
541 541 'histedit')
542 542 stats = mergemod.graft(repo, ctx, ctx.p1(), ['local', 'histedit'])
543 543 finally:
544 544 repo.ui.setconfig('ui', 'forcemerge', '', 'histedit')
545 545 return stats
546 546
547 547 def collapse(repo, first, last, commitopts, skipprompt=False):
548 548 """collapse the set of revisions from first to last as new one.
549 549
550 550 Expected commit options are:
551 551 - message
552 552 - date
553 553 - username
554 554 Commit message is edited in all cases.
555 555
556 556 This function works in memory."""
557 557 ctxs = list(repo.set('%d::%d', first, last))
558 558 if not ctxs:
559 559 return None
560 560 for c in ctxs:
561 561 if not c.mutable():
562 562 raise error.ParseError(
563 563 _("cannot fold into public change %s") % node.short(c.node()))
564 564 base = first.parents()[0]
565 565
566 566 # commit a new version of the old changeset, including the update
567 567 # collect all files which might be affected
568 568 files = set()
569 569 for ctx in ctxs:
570 570 files.update(ctx.files())
571 571
572 572 # Recompute copies (avoid recording a -> b -> a)
573 573 copied = copies.pathcopies(base, last)
574 574
575 575 # prune files which were reverted by the updates
576 576 files = [f for f in files if not cmdutil.samefile(f, last, base)]
577 577 # commit version of these files as defined by head
578 578 headmf = last.manifest()
579 579 def filectxfn(repo, ctx, path):
580 580 if path in headmf:
581 581 fctx = last[path]
582 582 flags = fctx.flags()
583 583 mctx = context.memfilectx(repo,
584 584 fctx.path(), fctx.data(),
585 585 islink='l' in flags,
586 586 isexec='x' in flags,
587 587 copied=copied.get(path))
588 588 return mctx
589 589 return None
590 590
591 591 if commitopts.get('message'):
592 592 message = commitopts['message']
593 593 else:
594 594 message = first.description()
595 595 user = commitopts.get('user')
596 596 date = commitopts.get('date')
597 597 extra = commitopts.get('extra')
598 598
599 599 parents = (first.p1().node(), first.p2().node())
600 600 editor = None
601 601 if not skipprompt:
602 602 editor = cmdutil.getcommiteditor(edit=True, editform='histedit.fold')
603 603 new = context.memctx(repo,
604 604 parents=parents,
605 605 text=message,
606 606 files=files,
607 607 filectxfn=filectxfn,
608 608 user=user,
609 609 date=date,
610 610 extra=extra,
611 611 editor=editor)
612 612 return repo.commitctx(new)
613 613
614 614 def _isdirtywc(repo):
615 615 return repo[None].dirty(missing=True)
616 616
617 617 def abortdirty():
618 618 raise error.Abort(_('working copy has pending changes'),
619 619 hint=_('amend, commit, or revert them and run histedit '
620 620 '--continue, or abort with histedit --abort'))
621 621
622 622 def action(verbs, message, priority=False, internal=False):
623 623 def wrap(cls):
624 624 assert not priority or not internal
625 625 verb = verbs[0]
626 626 if priority:
627 627 primaryactions.add(verb)
628 628 elif internal:
629 629 internalactions.add(verb)
630 630 elif len(verbs) > 1:
631 631 secondaryactions.add(verb)
632 632 else:
633 633 tertiaryactions.add(verb)
634 634
635 635 cls.verb = verb
636 636 cls.verbs = verbs
637 637 cls.message = message
638 638 for verb in verbs:
639 639 actiontable[verb] = cls
640 640 return cls
641 641 return wrap
642 642
643 643 @action(['pick', 'p'],
644 644 _('use commit'),
645 645 priority=True)
646 646 class pick(histeditaction):
647 647 def run(self):
648 648 rulectx = self.repo[self.node]
649 649 if rulectx.parents()[0].node() == self.state.parentctxnode:
650 650 self.repo.ui.debug('node %s unchanged\n' % node.short(self.node))
651 651 return rulectx, []
652 652
653 653 return super(pick, self).run()
654 654
655 655 @action(['edit', 'e'],
656 656 _('use commit, but stop for amending'),
657 657 priority=True)
658 658 class edit(histeditaction):
659 659 def run(self):
660 660 repo = self.repo
661 661 rulectx = repo[self.node]
662 662 hg.update(repo, self.state.parentctxnode, quietempty=True)
663 663 applychanges(repo.ui, repo, rulectx, {})
664 664 raise error.InterventionRequired(
665 665 _('Editing (%s), you may commit or record as needed now.')
666 666 % node.short(self.node),
667 667 hint=_('hg histedit --continue to resume'))
668 668
669 669 def commiteditor(self):
670 670 return cmdutil.getcommiteditor(edit=True, editform='histedit.edit')
671 671
672 672 @action(['fold', 'f'],
673 673 _('use commit, but combine it with the one above'))
674 674 class fold(histeditaction):
675 675 def verify(self, prev):
676 676 """ Verifies semantic correctness of the fold rule"""
677 677 super(fold, self).verify(prev)
678 678 repo = self.repo
679 679 if not prev:
680 680 c = repo[self.node].parents()[0]
681 681 elif not prev.verb in ('pick', 'base'):
682 682 return
683 683 else:
684 684 c = repo[prev.node]
685 685 if not c.mutable():
686 686 raise error.ParseError(
687 687 _("cannot fold into public change %s") % node.short(c.node()))
688 688
689 689
690 690 def continuedirty(self):
691 691 repo = self.repo
692 692 rulectx = repo[self.node]
693 693
694 694 commit = commitfuncfor(repo, rulectx)
695 695 commit(text='fold-temp-revision %s' % node.short(self.node),
696 696 user=rulectx.user(), date=rulectx.date(),
697 697 extra=rulectx.extra())
698 698
699 699 def continueclean(self):
700 700 repo = self.repo
701 701 ctx = repo['.']
702 702 rulectx = repo[self.node]
703 703 parentctxnode = self.state.parentctxnode
704 704 if ctx.node() == parentctxnode:
705 705 repo.ui.warn(_('%s: empty changeset\n') %
706 706 node.short(self.node))
707 707 return ctx, [(self.node, (parentctxnode,))]
708 708
709 709 parentctx = repo[parentctxnode]
710 710 newcommits = set(c.node() for c in repo.set('(%d::. - %d)', parentctx,
711 711 parentctx))
712 712 if not newcommits:
713 713 repo.ui.warn(_('%s: cannot fold - working copy is not a '
714 714 'descendant of previous commit %s\n') %
715 715 (node.short(self.node), node.short(parentctxnode)))
716 716 return ctx, [(self.node, (ctx.node(),))]
717 717
718 718 middlecommits = newcommits.copy()
719 719 middlecommits.discard(ctx.node())
720 720
721 721 return self.finishfold(repo.ui, repo, parentctx, rulectx, ctx.node(),
722 722 middlecommits)
723 723
724 724 def skipprompt(self):
725 725 """Returns true if the rule should skip the message editor.
726 726
727 727 For example, 'fold' wants to show an editor, but 'rollup'
728 728 doesn't want to.
729 729 """
730 730 return False
731 731
732 732 def mergedescs(self):
733 733 """Returns true if the rule should merge messages of multiple changes.
734 734
735 735 This exists mainly so that 'rollup' rules can be a subclass of
736 736 'fold'.
737 737 """
738 738 return True
739 739
740 740 def finishfold(self, ui, repo, ctx, oldctx, newnode, internalchanges):
741 741 parent = ctx.parents()[0].node()
742 742 repo.ui.pushbuffer()
743 743 hg.update(repo, parent)
744 744 repo.ui.popbuffer()
745 745 ### prepare new commit data
746 746 commitopts = {}
747 747 commitopts['user'] = ctx.user()
748 748 # commit message
749 749 if not self.mergedescs():
750 750 newmessage = ctx.description()
751 751 else:
752 752 newmessage = '\n***\n'.join(
753 753 [ctx.description()] +
754 754 [repo[r].description() for r in internalchanges] +
755 755 [oldctx.description()]) + '\n'
756 756 commitopts['message'] = newmessage
757 757 # date
758 758 commitopts['date'] = max(ctx.date(), oldctx.date())
759 759 extra = ctx.extra().copy()
760 760 # histedit_source
761 761 # note: ctx is likely a temporary commit but that the best we can do
762 762 # here. This is sufficient to solve issue3681 anyway.
763 763 extra['histedit_source'] = '%s,%s' % (ctx.hex(), oldctx.hex())
764 764 commitopts['extra'] = extra
765 765 phasebackup = repo.ui.backupconfig('phases', 'new-commit')
766 766 try:
767 767 phasemin = max(ctx.phase(), oldctx.phase())
768 768 repo.ui.setconfig('phases', 'new-commit', phasemin, 'histedit')
769 769 n = collapse(repo, ctx, repo[newnode], commitopts,
770 770 skipprompt=self.skipprompt())
771 771 finally:
772 772 repo.ui.restoreconfig(phasebackup)
773 773 if n is None:
774 774 return ctx, []
775 775 repo.ui.pushbuffer()
776 776 hg.update(repo, n)
777 777 repo.ui.popbuffer()
778 778 replacements = [(oldctx.node(), (newnode,)),
779 779 (ctx.node(), (n,)),
780 780 (newnode, (n,)),
781 781 ]
782 782 for ich in internalchanges:
783 783 replacements.append((ich, (n,)))
784 784 return repo[n], replacements
785 785
786 786 class base(histeditaction):
787 787 def constraints(self):
788 788 return set([_constraints.forceother])
789 789
790 790 def run(self):
791 791 if self.repo['.'].node() != self.node:
792 792 mergemod.update(self.repo, self.node, False, True)
793 793 # branchmerge, force)
794 794 return self.continueclean()
795 795
796 796 def continuedirty(self):
797 797 abortdirty()
798 798
799 799 def continueclean(self):
800 800 basectx = self.repo['.']
801 801 return basectx, []
802 802
803 803 @action(['_multifold'],
804 804 _(
805 805 """fold subclass used for when multiple folds happen in a row
806 806
807 807 We only want to fire the editor for the folded message once when
808 808 (say) four changes are folded down into a single change. This is
809 809 similar to rollup, but we should preserve both messages so that
810 810 when the last fold operation runs we can show the user all the
811 811 commit messages in their editor.
812 812 """),
813 813 internal=True)
814 814 class _multifold(fold):
815 815 def skipprompt(self):
816 816 return True
817 817
818 818 @action(["roll", "r"],
819 819 _("like fold, but discard this commit's description"))
820 820 class rollup(fold):
821 821 def mergedescs(self):
822 822 return False
823 823
824 824 def skipprompt(self):
825 825 return True
826 826
827 827 @action(["drop", "d"],
828 828 _('remove commit from history'))
829 829 class drop(histeditaction):
830 830 def run(self):
831 831 parentctx = self.repo[self.state.parentctxnode]
832 832 return parentctx, [(self.node, tuple())]
833 833
834 834 @action(["mess", "m"],
835 835 _('edit commit message without changing commit content'),
836 836 priority=True)
837 837 class message(histeditaction):
838 838 def commiteditor(self):
839 839 return cmdutil.getcommiteditor(edit=True, editform='histedit.mess')
840 840
841 841 def findoutgoing(ui, repo, remote=None, force=False, opts=None):
842 842 """utility function to find the first outgoing changeset
843 843
844 844 Used by initialization code"""
845 845 if opts is None:
846 846 opts = {}
847 847 dest = ui.expandpath(remote or 'default-push', remote or 'default')
848 848 dest, revs = hg.parseurl(dest, None)[:2]
849 849 ui.status(_('comparing with %s\n') % util.hidepassword(dest))
850 850
851 851 revs, checkout = hg.addbranchrevs(repo, repo, revs, None)
852 852 other = hg.peer(repo, opts, dest)
853 853
854 854 if revs:
855 855 revs = [repo.lookup(rev) for rev in revs]
856 856
857 857 outgoing = discovery.findcommonoutgoing(repo, other, revs, force=force)
858 858 if not outgoing.missing:
859 859 raise error.Abort(_('no outgoing ancestors'))
860 860 roots = list(repo.revs("roots(%ln)", outgoing.missing))
861 861 if 1 < len(roots):
862 862 msg = _('there are ambiguous outgoing revisions')
863 863 hint = _('see "hg help histedit" for more detail')
864 864 raise error.Abort(msg, hint=hint)
865 865 return repo.lookup(roots[0])
866 866
867 867
868 868 @command('histedit',
869 869 [('', 'commands', '',
870 870 _('read history edits from the specified file'), _('FILE')),
871 871 ('c', 'continue', False, _('continue an edit already in progress')),
872 872 ('', 'edit-plan', False, _('edit remaining actions list')),
873 873 ('k', 'keep', False,
874 874 _("don't strip old nodes after edit is complete")),
875 875 ('', 'abort', False, _('abort an edit in progress')),
876 876 ('o', 'outgoing', False, _('changesets not found in destination')),
877 877 ('f', 'force', False,
878 878 _('force outgoing even for unrelated repositories')),
879 879 ('r', 'rev', [], _('first revision to be edited'), _('REV'))],
880 880 _("[OPTIONS] ([ANCESTOR] | --outgoing [URL])"))
881 881 def histedit(ui, repo, *freeargs, **opts):
882 882 """interactively edit changeset history
883 883
884 884 This command lets you edit a linear series of changesets (up to
885 885 and including the working directory, which should be clean).
886 886 You can:
887 887
888 888 - `pick` to [re]order a changeset
889 889
890 890 - `drop` to omit changeset
891 891
892 892 - `mess` to reword the changeset commit message
893 893
894 894 - `fold` to combine it with the preceding changeset
895 895
896 896 - `roll` like fold, but discarding this commit's description
897 897
898 898 - `edit` to edit this changeset
899 899
900 900 There are a number of ways to select the root changeset:
901 901
902 902 - Specify ANCESTOR directly
903 903
904 904 - Use --outgoing -- it will be the first linear changeset not
905 905 included in destination. (See :hg:`help config.paths.default-push`)
906 906
907 907 - Otherwise, the value from the "histedit.defaultrev" config option
908 908 is used as a revset to select the base revision when ANCESTOR is not
909 909 specified. The first revision returned by the revset is used. By
910 910 default, this selects the editable history that is unique to the
911 911 ancestry of the working directory.
912 912
913 913 .. container:: verbose
914 914
915 915 If you use --outgoing, this command will abort if there are ambiguous
916 916 outgoing revisions. For example, if there are multiple branches
917 917 containing outgoing revisions.
918 918
919 919 Use "min(outgoing() and ::.)" or similar revset specification
920 920 instead of --outgoing to specify edit target revision exactly in
921 921 such ambiguous situation. See :hg:`help revsets` for detail about
922 922 selecting revisions.
923 923
924 924 .. container:: verbose
925 925
926 926 Examples:
927 927
928 928 - A number of changes have been made.
929 929 Revision 3 is no longer needed.
930 930
931 931 Start history editing from revision 3::
932 932
933 933 hg histedit -r 3
934 934
935 935 An editor opens, containing the list of revisions,
936 936 with specific actions specified::
937 937
938 938 pick 5339bf82f0ca 3 Zworgle the foobar
939 939 pick 8ef592ce7cc4 4 Bedazzle the zerlog
940 940 pick 0a9639fcda9d 5 Morgify the cromulancy
941 941
942 942 Additional information about the possible actions
943 943 to take appears below the list of revisions.
944 944
945 945 To remove revision 3 from the history,
946 946 its action (at the beginning of the relevant line)
947 947 is changed to 'drop'::
948 948
949 949 drop 5339bf82f0ca 3 Zworgle the foobar
950 950 pick 8ef592ce7cc4 4 Bedazzle the zerlog
951 951 pick 0a9639fcda9d 5 Morgify the cromulancy
952 952
953 953 - A number of changes have been made.
954 954 Revision 2 and 4 need to be swapped.
955 955
956 956 Start history editing from revision 2::
957 957
958 958 hg histedit -r 2
959 959
960 960 An editor opens, containing the list of revisions,
961 961 with specific actions specified::
962 962
963 963 pick 252a1af424ad 2 Blorb a morgwazzle
964 964 pick 5339bf82f0ca 3 Zworgle the foobar
965 965 pick 8ef592ce7cc4 4 Bedazzle the zerlog
966 966
967 967 To swap revision 2 and 4, its lines are swapped
968 968 in the editor::
969 969
970 970 pick 8ef592ce7cc4 4 Bedazzle the zerlog
971 971 pick 5339bf82f0ca 3 Zworgle the foobar
972 972 pick 252a1af424ad 2 Blorb a morgwazzle
973 973
974 974 Returns 0 on success, 1 if user intervention is required (not only
975 975 for intentional "edit" command, but also for resolving unexpected
976 976 conflicts).
977 977 """
978 978 state = histeditstate(repo)
979 979 try:
980 980 state.wlock = repo.wlock()
981 981 state.lock = repo.lock()
982 982 _histedit(ui, repo, state, *freeargs, **opts)
983 983 finally:
984 984 release(state.lock, state.wlock)
985 985
986 986 goalcontinue = 'continue'
987 987 goalabort = 'abort'
988 988 goaleditplan = 'edit-plan'
989 989 goalnew = 'new'
990 990
991 991 def _getgoal(opts):
992 992 if opts.get('continue'):
993 993 return goalcontinue
994 994 if opts.get('abort'):
995 995 return goalabort
996 996 if opts.get('edit_plan'):
997 997 return goaleditplan
998 998 return goalnew
999 999
1000 1000 def _readfile(path):
1001 1001 if path == '-':
1002 1002 return sys.stdin.read()
1003 1003 else:
1004 1004 with open(path, 'rb') as f:
1005 1005 return f.read()
1006 1006
1007 1007 def _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs):
1008 1008 # TODO only abort if we try to histedit mq patches, not just
1009 1009 # blanket if mq patches are applied somewhere
1010 1010 mq = getattr(repo, 'mq', None)
1011 1011 if mq and mq.applied:
1012 1012 raise error.Abort(_('source has mq patches applied'))
1013 1013
1014 1014 # basic argument incompatibility processing
1015 1015 outg = opts.get('outgoing')
1016 1016 editplan = opts.get('edit_plan')
1017 1017 abort = opts.get('abort')
1018 1018 force = opts.get('force')
1019 1019 if force and not outg:
1020 1020 raise error.Abort(_('--force only allowed with --outgoing'))
1021 1021 if goal == 'continue':
1022 1022 if any((outg, abort, revs, freeargs, rules, editplan)):
1023 1023 raise error.Abort(_('no arguments allowed with --continue'))
1024 1024 elif goal == 'abort':
1025 1025 if any((outg, revs, freeargs, rules, editplan)):
1026 1026 raise error.Abort(_('no arguments allowed with --abort'))
1027 1027 elif goal == 'edit-plan':
1028 1028 if any((outg, revs, freeargs)):
1029 1029 raise error.Abort(_('only --commands argument allowed with '
1030 1030 '--edit-plan'))
1031 1031 else:
1032 1032 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
1033 1033 raise error.Abort(_('history edit already in progress, try '
1034 1034 '--continue or --abort'))
1035 1035 if outg:
1036 1036 if revs:
1037 1037 raise error.Abort(_('no revisions allowed with --outgoing'))
1038 1038 if len(freeargs) > 1:
1039 1039 raise error.Abort(
1040 1040 _('only one repo argument allowed with --outgoing'))
1041 1041 else:
1042 1042 revs.extend(freeargs)
1043 1043 if len(revs) == 0:
1044 1044 defaultrev = destutil.desthistedit(ui, repo)
1045 1045 if defaultrev is not None:
1046 1046 revs.append(defaultrev)
1047 1047
1048 1048 if len(revs) != 1:
1049 1049 raise error.Abort(
1050 1050 _('histedit requires exactly one ancestor revision'))
1051 1051
1052 1052 def _histedit(ui, repo, state, *freeargs, **opts):
1053 1053 goal = _getgoal(opts)
1054 1054 revs = opts.get('rev', [])
1055 1055 rules = opts.get('commands', '')
1056 1056 state.keep = opts.get('keep', False)
1057 1057
1058 1058 _validateargs(ui, repo, state, freeargs, opts, goal, rules, revs)
1059 1059
1060 1060 # rebuild state
1061 1061 if goal == goalcontinue:
1062 1062 state.read()
1063 1063 state = bootstrapcontinue(ui, state, opts)
1064 1064 elif goal == goaleditplan:
1065 1065 _edithisteditplan(ui, repo, state, rules)
1066 1066 return
1067 1067 elif goal == goalabort:
1068 1068 _aborthistedit(ui, repo, state)
1069 1069 return
1070 1070 else:
1071 1071 # goal == goalnew
1072 1072 _newhistedit(ui, repo, state, revs, freeargs, opts)
1073 1073
1074 1074 _continuehistedit(ui, repo, state)
1075 1075 _finishhistedit(ui, repo, state)
1076 1076
1077 1077 def _continuehistedit(ui, repo, state):
1078 1078 """This function runs after either:
1079 1079 - bootstrapcontinue (if the goal is 'continue')
1080 1080 - _newhistedit (if the goal is 'new')
1081 1081 """
1082 1082 # preprocess rules so that we can hide inner folds from the user
1083 1083 # and only show one editor
1084 1084 actions = state.actions[:]
1085 1085 for idx, (action, nextact) in enumerate(
1086 1086 zip(actions, actions[1:] + [None])):
1087 1087 if action.verb == 'fold' and nextact and nextact.verb == 'fold':
1088 1088 state.actions[idx].__class__ = _multifold
1089 1089
1090 1090 total = len(state.actions)
1091 1091 pos = 0
1092 1092 while state.actions:
1093 1093 state.write()
1094 1094 actobj = state.actions.pop(0)
1095 1095 pos += 1
1096 1096 ui.progress(_("editing"), pos, actobj.torule(),
1097 1097 _('changes'), total)
1098 1098 ui.debug('histedit: processing %s %s\n' % (actobj.verb,\
1099 1099 actobj.torule()))
1100 1100 parentctx, replacement_ = actobj.run()
1101 1101 state.parentctxnode = parentctx.node()
1102 1102 state.replacements.extend(replacement_)
1103 1103 state.write()
1104 1104 ui.progress(_("editing"), None)
1105 1105
1106 1106 def _finishhistedit(ui, repo, state):
1107 1107 """This action runs when histedit is finishing its session"""
1108 1108 repo.ui.pushbuffer()
1109 1109 hg.update(repo, state.parentctxnode, quietempty=True)
1110 1110 repo.ui.popbuffer()
1111 1111
1112 1112 mapping, tmpnodes, created, ntm = processreplacement(state)
1113 1113 if mapping:
1114 1114 for prec, succs in mapping.iteritems():
1115 1115 if not succs:
1116 1116 ui.debug('histedit: %s is dropped\n' % node.short(prec))
1117 1117 else:
1118 1118 ui.debug('histedit: %s is replaced by %s\n' % (
1119 1119 node.short(prec), node.short(succs[0])))
1120 1120 if len(succs) > 1:
1121 1121 m = 'histedit: %s'
1122 1122 for n in succs[1:]:
1123 1123 ui.debug(m % node.short(n))
1124 1124
1125 1125 supportsmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
1126 1126 if supportsmarkers:
1127 1127 # Only create markers if the temp nodes weren't already removed.
1128 1128 obsolete.createmarkers(repo, ((repo[t],()) for t in sorted(tmpnodes)
1129 1129 if t in repo))
1130 1130 else:
1131 1131 cleanupnode(ui, repo, 'temp', tmpnodes)
1132 1132
1133 1133 if not state.keep:
1134 1134 if mapping:
1135 1135 movebookmarks(ui, repo, mapping, state.topmost, ntm)
1136 1136 # TODO update mq state
1137 1137 if supportsmarkers:
1138 1138 markers = []
1139 1139 # sort by revision number because it sound "right"
1140 1140 for prec in sorted(mapping, key=repo.changelog.rev):
1141 1141 succs = mapping[prec]
1142 1142 markers.append((repo[prec],
1143 1143 tuple(repo[s] for s in succs)))
1144 1144 if markers:
1145 1145 obsolete.createmarkers(repo, markers)
1146 1146 else:
1147 1147 cleanupnode(ui, repo, 'replaced', mapping)
1148 1148
1149 1149 state.clear()
1150 1150 if os.path.exists(repo.sjoin('undo')):
1151 1151 os.unlink(repo.sjoin('undo'))
1152 1152 if repo.vfs.exists('histedit-last-edit.txt'):
1153 1153 repo.vfs.unlink('histedit-last-edit.txt')
1154 1154
1155 1155 def _aborthistedit(ui, repo, state):
1156 1156 try:
1157 1157 state.read()
1158 1158 __, leafs, tmpnodes, __ = processreplacement(state)
1159 1159 ui.debug('restore wc to old parent %s\n'
1160 1160 % node.short(state.topmost))
1161 1161
1162 1162 # Recover our old commits if necessary
1163 1163 if not state.topmost in repo and state.backupfile:
1164 1164 backupfile = repo.join(state.backupfile)
1165 1165 f = hg.openpath(ui, backupfile)
1166 1166 gen = exchange.readbundle(ui, f, backupfile)
1167 1167 with repo.transaction('histedit.abort') as tr:
1168 1168 if not isinstance(gen, bundle2.unbundle20):
1169 1169 gen.apply(repo, 'histedit', 'bundle:' + backupfile)
1170 1170 if isinstance(gen, bundle2.unbundle20):
1171 1171 bundle2.applybundle(repo, gen, tr,
1172 1172 source='histedit',
1173 1173 url='bundle:' + backupfile)
1174 1174
1175 1175 os.remove(backupfile)
1176 1176
1177 1177 # check whether we should update away
1178 1178 if repo.unfiltered().revs('parents() and (%n or %ln::)',
1179 1179 state.parentctxnode, leafs | tmpnodes):
1180 1180 hg.clean(repo, state.topmost, show_stats=True, quietempty=True)
1181 1181 cleanupnode(ui, repo, 'created', tmpnodes)
1182 1182 cleanupnode(ui, repo, 'temp', leafs)
1183 1183 except Exception:
1184 1184 if state.inprogress():
1185 1185 ui.warn(_('warning: encountered an exception during histedit '
1186 1186 '--abort; the repository may not have been completely '
1187 1187 'cleaned up\n'))
1188 1188 raise
1189 1189 finally:
1190 1190 state.clear()
1191 1191
1192 1192 def _edithisteditplan(ui, repo, state, rules):
1193 1193 state.read()
1194 1194 if not rules:
1195 1195 comment = geteditcomment(ui,
1196 1196 node.short(state.parentctxnode),
1197 1197 node.short(state.topmost))
1198 1198 rules = ruleeditor(repo, ui, state.actions, comment)
1199 1199 else:
1200 1200 rules = _readfile(rules)
1201 1201 actions = parserules(rules, state)
1202 1202 ctxs = [repo[act.nodetoverify()] \
1203 1203 for act in state.actions if act.nodetoverify()]
1204 1204 warnverifyactions(ui, repo, actions, state, ctxs)
1205 1205 state.actions = actions
1206 1206 state.write()
1207 1207
1208 1208 def _newhistedit(ui, repo, state, revs, freeargs, opts):
1209 1209 outg = opts.get('outgoing')
1210 1210 rules = opts.get('commands', '')
1211 1211 force = opts.get('force')
1212 1212
1213 1213 cmdutil.checkunfinished(repo)
1214 1214 cmdutil.bailifchanged(repo)
1215 1215
1216 1216 topmost, empty = repo.dirstate.parents()
1217 1217 if outg:
1218 1218 if freeargs:
1219 1219 remote = freeargs[0]
1220 1220 else:
1221 1221 remote = None
1222 1222 root = findoutgoing(ui, repo, remote, force, opts)
1223 1223 else:
1224 1224 rr = list(repo.set('roots(%ld)', scmutil.revrange(repo, revs)))
1225 1225 if len(rr) != 1:
1226 1226 raise error.Abort(_('The specified revisions must have '
1227 1227 'exactly one common root'))
1228 1228 root = rr[0].node()
1229 1229
1230 1230 revs = between(repo, root, topmost, state.keep)
1231 1231 if not revs:
1232 1232 raise error.Abort(_('%s is not an ancestor of working directory') %
1233 1233 node.short(root))
1234 1234
1235 1235 ctxs = [repo[r] for r in revs]
1236 1236 if not rules:
1237 1237 comment = geteditcomment(ui, node.short(root), node.short(topmost))
1238 1238 actions = [pick(state, r) for r in revs]
1239 1239 rules = ruleeditor(repo, ui, actions, comment)
1240 1240 else:
1241 1241 rules = _readfile(rules)
1242 1242 actions = parserules(rules, state)
1243 1243 warnverifyactions(ui, repo, actions, state, ctxs)
1244 1244
1245 1245 parentctxnode = repo[root].parents()[0].node()
1246 1246
1247 1247 state.parentctxnode = parentctxnode
1248 1248 state.actions = actions
1249 1249 state.topmost = topmost
1250 1250 state.replacements = []
1251 1251
1252 1252 # Create a backup so we can always abort completely.
1253 1253 backupfile = None
1254 1254 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1255 1255 backupfile = repair._bundle(repo, [parentctxnode], [topmost], root,
1256 1256 'histedit')
1257 1257 state.backupfile = backupfile
1258 1258
1259 1259 def _getsummary(ctx):
1260 1260 # a common pattern is to extract the summary but default to the empty
1261 1261 # string
1262 1262 summary = ctx.description() or ''
1263 1263 if summary:
1264 1264 summary = summary.splitlines()[0]
1265 1265 return summary
1266 1266
1267 1267 def bootstrapcontinue(ui, state, opts):
1268 1268 repo = state.repo
1269 1269 if state.actions:
1270 1270 actobj = state.actions.pop(0)
1271 1271
1272 1272 if _isdirtywc(repo):
1273 1273 actobj.continuedirty()
1274 1274 if _isdirtywc(repo):
1275 1275 abortdirty()
1276 1276
1277 1277 parentctx, replacements = actobj.continueclean()
1278 1278
1279 1279 state.parentctxnode = parentctx.node()
1280 1280 state.replacements.extend(replacements)
1281 1281
1282 1282 return state
1283 1283
1284 1284 def between(repo, old, new, keep):
1285 1285 """select and validate the set of revision to edit
1286 1286
1287 1287 When keep is false, the specified set can't have children."""
1288 1288 ctxs = list(repo.set('%n::%n', old, new))
1289 1289 if ctxs and not keep:
1290 1290 if (not obsolete.isenabled(repo, obsolete.allowunstableopt) and
1291 1291 repo.revs('(%ld::) - (%ld)', ctxs, ctxs)):
1292 1292 raise error.Abort(_('can only histedit a changeset together '
1293 1293 'with all its descendants'))
1294 1294 if repo.revs('(%ld) and merge()', ctxs):
1295 1295 raise error.Abort(_('cannot edit history that contains merges'))
1296 1296 root = ctxs[0] # list is already sorted by repo.set
1297 1297 if not root.mutable():
1298 1298 raise error.Abort(_('cannot edit public changeset: %s') % root,
1299 1299 hint=_('see "hg help phases" for details'))
1300 1300 return [c.node() for c in ctxs]
1301 1301
1302 1302 def ruleeditor(repo, ui, actions, editcomment=""):
1303 1303 """open an editor to edit rules
1304 1304
1305 1305 rules are in the format [ [act, ctx], ...] like in state.rules
1306 1306 """
1307 1307 if repo.ui.configbool("experimental", "histedit.autoverb"):
1308 1308 newact = util.sortdict()
1309 1309 for act in actions:
1310 1310 ctx = repo[act.node]
1311 1311 summary = _getsummary(ctx)
1312 1312 fword = summary.split(' ', 1)[0].lower()
1313 1313 added = False
1314 1314
1315 1315 # if it doesn't end with the special character '!' just skip this
1316 1316 if fword.endswith('!'):
1317 1317 fword = fword[:-1]
1318 1318 if fword in primaryactions | secondaryactions | tertiaryactions:
1319 1319 act.verb = fword
1320 1320 # get the target summary
1321 1321 tsum = summary[len(fword) + 1:].lstrip()
1322 1322 # safe but slow: reverse iterate over the actions so we
1323 1323 # don't clash on two commits having the same summary
1324 1324 for na, l in reversed(list(newact.iteritems())):
1325 1325 actx = repo[na.node]
1326 1326 asum = _getsummary(actx)
1327 1327 if asum == tsum:
1328 1328 added = True
1329 1329 l.append(act)
1330 1330 break
1331 1331
1332 1332 if not added:
1333 1333 newact[act] = []
1334 1334
1335 1335 # copy over and flatten the new list
1336 1336 actions = []
1337 1337 for na, l in newact.iteritems():
1338 1338 actions.append(na)
1339 1339 actions += l
1340 1340
1341 1341 rules = '\n'.join([act.torule() for act in actions])
1342 1342 rules += '\n\n'
1343 1343 rules += editcomment
1344 1344 rules = ui.edit(rules, ui.username(), {'prefix': 'histedit'})
1345 1345
1346 1346 # Save edit rules in .hg/histedit-last-edit.txt in case
1347 1347 # the user needs to ask for help after something
1348 1348 # surprising happens.
1349 1349 f = open(repo.join('histedit-last-edit.txt'), 'w')
1350 1350 f.write(rules)
1351 1351 f.close()
1352 1352
1353 1353 return rules
1354 1354
1355 1355 def parserules(rules, state):
1356 1356 """Read the histedit rules string and return list of action objects """
1357 1357 rules = [l for l in (r.strip() for r in rules.splitlines())
1358 1358 if l and not l.startswith('#')]
1359 1359 actions = []
1360 1360 for r in rules:
1361 1361 if ' ' not in r:
1362 1362 raise error.ParseError(_('malformed line "%s"') % r)
1363 1363 verb, rest = r.split(' ', 1)
1364 1364
1365 1365 if verb not in actiontable:
1366 1366 raise error.ParseError(_('unknown action "%s"') % verb)
1367 1367
1368 1368 action = actiontable[verb].fromrule(state, rest)
1369 1369 actions.append(action)
1370 1370 return actions
1371 1371
1372 1372 def warnverifyactions(ui, repo, actions, state, ctxs):
1373 1373 try:
1374 1374 verifyactions(actions, state, ctxs)
1375 1375 except error.ParseError:
1376 1376 if repo.vfs.exists('histedit-last-edit.txt'):
1377 1377 ui.warn(_('warning: histedit rules saved '
1378 1378 'to: .hg/histedit-last-edit.txt\n'))
1379 1379 raise
1380 1380
1381 1381 def verifyactions(actions, state, ctxs):
1382 1382 """Verify that there exists exactly one action per given changeset and
1383 1383 other constraints.
1384 1384
1385 1385 Will abort if there are to many or too few rules, a malformed rule,
1386 1386 or a rule on a changeset outside of the user-given range.
1387 1387 """
1388 1388 expected = set(c.hex() for c in ctxs)
1389 1389 seen = set()
1390 1390 prev = None
1391 1391 for action in actions:
1392 1392 action.verify(prev)
1393 1393 prev = action
1394 1394 constraints = action.constraints()
1395 1395 for constraint in constraints:
1396 1396 if constraint not in _constraints.known():
1397 1397 raise error.ParseError(_('unknown constraint "%s"') %
1398 1398 constraint)
1399 1399
1400 1400 nodetoverify = action.nodetoverify()
1401 1401 if nodetoverify is not None:
1402 1402 ha = node.hex(nodetoverify)
1403 1403 if _constraints.noother in constraints and ha not in expected:
1404 1404 raise error.ParseError(
1405 1405 _('%s "%s" changeset was not a candidate')
1406 1406 % (action.verb, ha[:12]),
1407 1407 hint=_('only use listed changesets'))
1408 1408 if _constraints.forceother in constraints and ha in expected:
1409 1409 raise error.ParseError(
1410 1410 _('%s "%s" changeset was not an edited list candidate')
1411 1411 % (action.verb, ha[:12]),
1412 1412 hint=_('only use listed changesets'))
1413 1413 if _constraints.noduplicates in constraints and ha in seen:
1414 1414 raise error.ParseError(_(
1415 1415 'duplicated command for changeset %s') %
1416 1416 ha[:12])
1417 1417 seen.add(ha)
1418 1418 missing = sorted(expected - seen) # sort to stabilize output
1419 1419
1420 1420 if state.repo.ui.configbool('histedit', 'dropmissing'):
1421 1421 if len(actions) == 0:
1422 1422 raise error.ParseError(_('no rules provided'),
1423 1423 hint=_('use strip extension to remove commits'))
1424 1424
1425 1425 drops = [drop(state, node.bin(n)) for n in missing]
1426 1426 # put the in the beginning so they execute immediately and
1427 1427 # don't show in the edit-plan in the future
1428 1428 actions[:0] = drops
1429 1429 elif missing:
1430 1430 raise error.ParseError(_('missing rules for changeset %s') %
1431 1431 missing[0][:12],
1432 1432 hint=_('use "drop %s" to discard, see also: '
1433 1433 '"hg help -e histedit.config"') % missing[0][:12])
1434 1434
1435 1435 def adjustreplacementsfrommarkers(repo, oldreplacements):
1436 1436 """Adjust replacements from obsolescense markers
1437 1437
1438 1438 Replacements structure is originally generated based on
1439 1439 histedit's state and does not account for changes that are
1440 1440 not recorded there. This function fixes that by adding
1441 1441 data read from obsolescense markers"""
1442 1442 if not obsolete.isenabled(repo, obsolete.createmarkersopt):
1443 1443 return oldreplacements
1444 1444
1445 1445 unfi = repo.unfiltered()
1446 1446 nm = unfi.changelog.nodemap
1447 1447 obsstore = repo.obsstore
1448 1448 newreplacements = list(oldreplacements)
1449 1449 oldsuccs = [r[1] for r in oldreplacements]
1450 1450 # successors that have already been added to succstocheck once
1451 1451 seensuccs = set().union(*oldsuccs) # create a set from an iterable of tuples
1452 1452 succstocheck = list(seensuccs)
1453 1453 while succstocheck:
1454 1454 n = succstocheck.pop()
1455 1455 missing = nm.get(n) is None
1456 1456 markers = obsstore.successors.get(n, ())
1457 1457 if missing and not markers:
1458 1458 # dead end, mark it as such
1459 1459 newreplacements.append((n, ()))
1460 1460 for marker in markers:
1461 1461 nsuccs = marker[1]
1462 1462 newreplacements.append((n, nsuccs))
1463 1463 for nsucc in nsuccs:
1464 1464 if nsucc not in seensuccs:
1465 1465 seensuccs.add(nsucc)
1466 1466 succstocheck.append(nsucc)
1467 1467
1468 1468 return newreplacements
1469 1469
1470 1470 def processreplacement(state):
1471 1471 """process the list of replacements to return
1472 1472
1473 1473 1) the final mapping between original and created nodes
1474 1474 2) the list of temporary node created by histedit
1475 1475 3) the list of new commit created by histedit"""
1476 1476 replacements = adjustreplacementsfrommarkers(state.repo, state.replacements)
1477 1477 allsuccs = set()
1478 1478 replaced = set()
1479 1479 fullmapping = {}
1480 1480 # initialize basic set
1481 1481 # fullmapping records all operations recorded in replacement
1482 1482 for rep in replacements:
1483 1483 allsuccs.update(rep[1])
1484 1484 replaced.add(rep[0])
1485 1485 fullmapping.setdefault(rep[0], set()).update(rep[1])
1486 1486 new = allsuccs - replaced
1487 1487 tmpnodes = allsuccs & replaced
1488 1488 # Reduce content fullmapping into direct relation between original nodes
1489 1489 # and final node created during history edition
1490 1490 # Dropped changeset are replaced by an empty list
1491 1491 toproceed = set(fullmapping)
1492 1492 final = {}
1493 1493 while toproceed:
1494 1494 for x in list(toproceed):
1495 1495 succs = fullmapping[x]
1496 1496 for s in list(succs):
1497 1497 if s in toproceed:
1498 1498 # non final node with unknown closure
1499 1499 # We can't process this now
1500 1500 break
1501 1501 elif s in final:
1502 1502 # non final node, replace with closure
1503 1503 succs.remove(s)
1504 1504 succs.update(final[s])
1505 1505 else:
1506 1506 final[x] = succs
1507 1507 toproceed.remove(x)
1508 1508 # remove tmpnodes from final mapping
1509 1509 for n in tmpnodes:
1510 1510 del final[n]
1511 1511 # we expect all changes involved in final to exist in the repo
1512 1512 # turn `final` into list (topologically sorted)
1513 1513 nm = state.repo.changelog.nodemap
1514 1514 for prec, succs in final.items():
1515 1515 final[prec] = sorted(succs, key=nm.get)
1516 1516
1517 1517 # computed topmost element (necessary for bookmark)
1518 1518 if new:
1519 1519 newtopmost = sorted(new, key=state.repo.changelog.rev)[-1]
1520 1520 elif not final:
1521 1521 # Nothing rewritten at all. we won't need `newtopmost`
1522 1522 # It is the same as `oldtopmost` and `processreplacement` know it
1523 1523 newtopmost = None
1524 1524 else:
1525 1525 # every body died. The newtopmost is the parent of the root.
1526 1526 r = state.repo.changelog.rev
1527 1527 newtopmost = state.repo[sorted(final, key=r)[0]].p1().node()
1528 1528
1529 1529 return final, tmpnodes, new, newtopmost
1530 1530
1531 1531 def movebookmarks(ui, repo, mapping, oldtopmost, newtopmost):
1532 1532 """Move bookmark from old to newly created node"""
1533 1533 if not mapping:
1534 1534 # if nothing got rewritten there is not purpose for this function
1535 1535 return
1536 1536 moves = []
1537 1537 for bk, old in sorted(repo._bookmarks.iteritems()):
1538 1538 if old == oldtopmost:
1539 1539 # special case ensure bookmark stay on tip.
1540 1540 #
1541 1541 # This is arguably a feature and we may only want that for the
1542 1542 # active bookmark. But the behavior is kept compatible with the old
1543 1543 # version for now.
1544 1544 moves.append((bk, newtopmost))
1545 1545 continue
1546 1546 base = old
1547 1547 new = mapping.get(base, None)
1548 1548 if new is None:
1549 1549 continue
1550 1550 while not new:
1551 1551 # base is killed, trying with parent
1552 1552 base = repo[base].p1().node()
1553 1553 new = mapping.get(base, (base,))
1554 1554 # nothing to move
1555 1555 moves.append((bk, new[-1]))
1556 1556 if moves:
1557 1557 lock = tr = None
1558 1558 try:
1559 1559 lock = repo.lock()
1560 1560 tr = repo.transaction('histedit')
1561 1561 marks = repo._bookmarks
1562 1562 for mark, new in moves:
1563 1563 old = marks[mark]
1564 1564 ui.note(_('histedit: moving bookmarks %s from %s to %s\n')
1565 1565 % (mark, node.short(old), node.short(new)))
1566 1566 marks[mark] = new
1567 1567 marks.recordchange(tr)
1568 1568 tr.close()
1569 1569 finally:
1570 1570 release(tr, lock)
1571 1571
1572 1572 def cleanupnode(ui, repo, name, nodes):
1573 1573 """strip a group of nodes from the repository
1574 1574
1575 1575 The set of node to strip may contains unknown nodes."""
1576 1576 ui.debug('should strip %s nodes %s\n' %
1577 1577 (name, ', '.join([node.short(n) for n in nodes])))
1578 1578 with repo.lock():
1579 1579 # do not let filtering get in the way of the cleanse
1580 1580 # we should probably get rid of obsolescence marker created during the
1581 1581 # histedit, but we currently do not have such information.
1582 1582 repo = repo.unfiltered()
1583 1583 # Find all nodes that need to be stripped
1584 1584 # (we use %lr instead of %ln to silently ignore unknown items)
1585 1585 nm = repo.changelog.nodemap
1586 1586 nodes = sorted(n for n in nodes if n in nm)
1587 1587 roots = [c.node() for c in repo.set("roots(%ln)", nodes)]
1588 1588 for c in roots:
1589 1589 # We should process node in reverse order to strip tip most first.
1590 1590 # but this trigger a bug in changegroup hook.
1591 1591 # This would reduce bundle overhead
1592 1592 repair.strip(ui, repo, c)
1593 1593
1594 1594 def stripwrapper(orig, ui, repo, nodelist, *args, **kwargs):
1595 1595 if isinstance(nodelist, str):
1596 1596 nodelist = [nodelist]
1597 1597 if os.path.exists(os.path.join(repo.path, 'histedit-state')):
1598 1598 state = histeditstate(repo)
1599 1599 state.read()
1600 1600 histedit_nodes = set([action.nodetoverify() for action
1601 1601 in state.actions if action.nodetoverify()])
1602 1602 strip_nodes = set([repo[n].node() for n in nodelist])
1603 1603 common_nodes = histedit_nodes & strip_nodes
1604 1604 if common_nodes:
1605 1605 raise error.Abort(_("histedit in progress, can't strip %s")
1606 1606 % ', '.join(node.short(x) for x in common_nodes))
1607 1607 return orig(ui, repo, nodelist, *args, **kwargs)
1608 1608
1609 1609 extensions.wrapfunction(repair, 'strip', stripwrapper)
1610 1610
1611 1611 def summaryhook(ui, repo):
1612 1612 if not os.path.exists(repo.join('histedit-state')):
1613 1613 return
1614 1614 state = histeditstate(repo)
1615 1615 state.read()
1616 1616 if state.actions:
1617 1617 # i18n: column positioning for "hg summary"
1618 1618 ui.write(_('hist: %s (histedit --continue)\n') %
1619 1619 (ui.label(_('%d remaining'), 'histedit.remaining') %
1620 1620 len(state.actions)))
1621 1621
1622 1622 def extsetup(ui):
1623 1623 cmdutil.summaryhooks.add('histedit', summaryhook)
1624 1624 cmdutil.unfinishedstates.append(
1625 1625 ['histedit-state', False, True, _('histedit in progress'),
1626 1626 _("use 'hg histedit --continue' or 'hg histedit --abort'")])
1627 1627 cmdutil.afterresolvedstates.append(
1628 1628 ['histedit-state', _('hg histedit --continue')])
1629 1629 if ui.configbool("experimental", "histeditng"):
1630 1630 globals()['base'] = action(['base', 'b'],
1631 1631 _('checkout changeset and apply further changesets from there')
1632 1632 )(base)
@@ -1,491 +1,491 b''
1 1 # journal.py
2 2 #
3 3 # Copyright 2014-2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """Track previous positions of bookmarks (EXPERIMENTAL)
8 8
9 9 This extension adds a new command: `hg journal`, which shows you where
10 10 bookmarks were previously located.
11 11
12 12 """
13 13
14 14 from __future__ import absolute_import
15 15
16 16 import collections
17 17 import errno
18 18 import os
19 19 import weakref
20 20
21 21 from mercurial.i18n import _
22 22
23 23 from mercurial import (
24 24 bookmarks,
25 25 cmdutil,
26 26 commands,
27 27 dispatch,
28 28 error,
29 29 extensions,
30 30 hg,
31 31 localrepo,
32 32 lock,
33 33 node,
34 34 util,
35 35 )
36 36
37 37 from . import share
38 38
39 39 cmdtable = {}
40 40 command = cmdutil.command(cmdtable)
41 41
42 # Note for extension authors: ONLY specify testedwith = 'internal' for
42 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
43 43 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
44 44 # be specifying the version(s) of Mercurial they are tested with, or
45 45 # leave the attribute unspecified.
46 testedwith = 'internal'
46 testedwith = 'ships-with-hg-core'
47 47
48 48 # storage format version; increment when the format changes
49 49 storageversion = 0
50 50
51 51 # namespaces
52 52 bookmarktype = 'bookmark'
53 53 wdirparenttype = 'wdirparent'
54 54 # In a shared repository, what shared feature name is used
55 55 # to indicate this namespace is shared with the source?
56 56 sharednamespaces = {
57 57 bookmarktype: hg.sharedbookmarks,
58 58 }
59 59
60 60 # Journal recording, register hooks and storage object
61 61 def extsetup(ui):
62 62 extensions.wrapfunction(dispatch, 'runcommand', runcommand)
63 63 extensions.wrapfunction(bookmarks.bmstore, '_write', recordbookmarks)
64 64 extensions.wrapfunction(
65 65 localrepo.localrepository.dirstate, 'func', wrapdirstate)
66 66 extensions.wrapfunction(hg, 'postshare', wrappostshare)
67 67 extensions.wrapfunction(hg, 'copystore', unsharejournal)
68 68
69 69 def reposetup(ui, repo):
70 70 if repo.local():
71 71 repo.journal = journalstorage(repo)
72 72
73 73 def runcommand(orig, lui, repo, cmd, fullargs, *args):
74 74 """Track the command line options for recording in the journal"""
75 75 journalstorage.recordcommand(*fullargs)
76 76 return orig(lui, repo, cmd, fullargs, *args)
77 77
78 78 # hooks to record dirstate changes
79 79 def wrapdirstate(orig, repo):
80 80 """Make journal storage available to the dirstate object"""
81 81 dirstate = orig(repo)
82 82 if util.safehasattr(repo, 'journal'):
83 83 dirstate.journalstorage = repo.journal
84 84 dirstate.addparentchangecallback('journal', recorddirstateparents)
85 85 return dirstate
86 86
87 87 def recorddirstateparents(dirstate, old, new):
88 88 """Records all dirstate parent changes in the journal."""
89 89 old = list(old)
90 90 new = list(new)
91 91 if util.safehasattr(dirstate, 'journalstorage'):
92 92 # only record two hashes if there was a merge
93 93 oldhashes = old[:1] if old[1] == node.nullid else old
94 94 newhashes = new[:1] if new[1] == node.nullid else new
95 95 dirstate.journalstorage.record(
96 96 wdirparenttype, '.', oldhashes, newhashes)
97 97
98 98 # hooks to record bookmark changes (both local and remote)
99 99 def recordbookmarks(orig, store, fp):
100 100 """Records all bookmark changes in the journal."""
101 101 repo = store._repo
102 102 if util.safehasattr(repo, 'journal'):
103 103 oldmarks = bookmarks.bmstore(repo)
104 104 for mark, value in store.iteritems():
105 105 oldvalue = oldmarks.get(mark, node.nullid)
106 106 if value != oldvalue:
107 107 repo.journal.record(bookmarktype, mark, oldvalue, value)
108 108 return orig(store, fp)
109 109
110 110 # shared repository support
111 111 def _readsharedfeatures(repo):
112 112 """A set of shared features for this repository"""
113 113 try:
114 114 return set(repo.vfs.read('shared').splitlines())
115 115 except IOError as inst:
116 116 if inst.errno != errno.ENOENT:
117 117 raise
118 118 return set()
119 119
120 120 def _mergeentriesiter(*iterables, **kwargs):
121 121 """Given a set of sorted iterables, yield the next entry in merged order
122 122
123 123 Note that by default entries go from most recent to oldest.
124 124 """
125 125 order = kwargs.pop('order', max)
126 126 iterables = [iter(it) for it in iterables]
127 127 # this tracks still active iterables; iterables are deleted as they are
128 128 # exhausted, which is why this is a dictionary and why each entry also
129 129 # stores the key. Entries are mutable so we can store the next value each
130 130 # time.
131 131 iterable_map = {}
132 132 for key, it in enumerate(iterables):
133 133 try:
134 134 iterable_map[key] = [next(it), key, it]
135 135 except StopIteration:
136 136 # empty entry, can be ignored
137 137 pass
138 138
139 139 while iterable_map:
140 140 value, key, it = order(iterable_map.itervalues())
141 141 yield value
142 142 try:
143 143 iterable_map[key][0] = next(it)
144 144 except StopIteration:
145 145 # this iterable is empty, remove it from consideration
146 146 del iterable_map[key]
147 147
148 148 def wrappostshare(orig, sourcerepo, destrepo, **kwargs):
149 149 """Mark this shared working copy as sharing journal information"""
150 150 with destrepo.wlock():
151 151 orig(sourcerepo, destrepo, **kwargs)
152 152 with destrepo.vfs('shared', 'a') as fp:
153 153 fp.write('journal\n')
154 154
155 155 def unsharejournal(orig, ui, repo, repopath):
156 156 """Copy shared journal entries into this repo when unsharing"""
157 157 if (repo.path == repopath and repo.shared() and
158 158 util.safehasattr(repo, 'journal')):
159 159 sharedrepo = share._getsrcrepo(repo)
160 160 sharedfeatures = _readsharedfeatures(repo)
161 161 if sharedrepo and sharedfeatures > set(['journal']):
162 162 # there is a shared repository and there are shared journal entries
163 163 # to copy. move shared date over from source to destination but
164 164 # move the local file first
165 165 if repo.vfs.exists('journal'):
166 166 journalpath = repo.join('journal')
167 167 util.rename(journalpath, journalpath + '.bak')
168 168 storage = repo.journal
169 169 local = storage._open(
170 170 repo.vfs, filename='journal.bak', _newestfirst=False)
171 171 shared = (
172 172 e for e in storage._open(sharedrepo.vfs, _newestfirst=False)
173 173 if sharednamespaces.get(e.namespace) in sharedfeatures)
174 174 for entry in _mergeentriesiter(local, shared, order=min):
175 175 storage._write(repo.vfs, entry)
176 176
177 177 return orig(ui, repo, repopath)
178 178
179 179 class journalentry(collections.namedtuple(
180 180 'journalentry',
181 181 'timestamp user command namespace name oldhashes newhashes')):
182 182 """Individual journal entry
183 183
184 184 * timestamp: a mercurial (time, timezone) tuple
185 185 * user: the username that ran the command
186 186 * namespace: the entry namespace, an opaque string
187 187 * name: the name of the changed item, opaque string with meaning in the
188 188 namespace
189 189 * command: the hg command that triggered this record
190 190 * oldhashes: a tuple of one or more binary hashes for the old location
191 191 * newhashes: a tuple of one or more binary hashes for the new location
192 192
193 193 Handles serialisation from and to the storage format. Fields are
194 194 separated by newlines, hashes are written out in hex separated by commas,
195 195 timestamp and timezone are separated by a space.
196 196
197 197 """
198 198 @classmethod
199 199 def fromstorage(cls, line):
200 200 (time, user, command, namespace, name,
201 201 oldhashes, newhashes) = line.split('\n')
202 202 timestamp, tz = time.split()
203 203 timestamp, tz = float(timestamp), int(tz)
204 204 oldhashes = tuple(node.bin(hash) for hash in oldhashes.split(','))
205 205 newhashes = tuple(node.bin(hash) for hash in newhashes.split(','))
206 206 return cls(
207 207 (timestamp, tz), user, command, namespace, name,
208 208 oldhashes, newhashes)
209 209
210 210 def __str__(self):
211 211 """String representation for storage"""
212 212 time = ' '.join(map(str, self.timestamp))
213 213 oldhashes = ','.join([node.hex(hash) for hash in self.oldhashes])
214 214 newhashes = ','.join([node.hex(hash) for hash in self.newhashes])
215 215 return '\n'.join((
216 216 time, self.user, self.command, self.namespace, self.name,
217 217 oldhashes, newhashes))
218 218
219 219 class journalstorage(object):
220 220 """Storage for journal entries
221 221
222 222 Entries are divided over two files; one with entries that pertain to the
223 223 local working copy *only*, and one with entries that are shared across
224 224 multiple working copies when shared using the share extension.
225 225
226 226 Entries are stored with NUL bytes as separators. See the journalentry
227 227 class for the per-entry structure.
228 228
229 229 The file format starts with an integer version, delimited by a NUL.
230 230
231 231 This storage uses a dedicated lock; this makes it easier to avoid issues
232 232 with adding entries that added when the regular wlock is unlocked (e.g.
233 233 the dirstate).
234 234
235 235 """
236 236 _currentcommand = ()
237 237 _lockref = None
238 238
239 239 def __init__(self, repo):
240 240 self.user = util.getuser()
241 241 self.ui = repo.ui
242 242 self.vfs = repo.vfs
243 243
244 244 # is this working copy using a shared storage?
245 245 self.sharedfeatures = self.sharedvfs = None
246 246 if repo.shared():
247 247 features = _readsharedfeatures(repo)
248 248 sharedrepo = share._getsrcrepo(repo)
249 249 if sharedrepo is not None and 'journal' in features:
250 250 self.sharedvfs = sharedrepo.vfs
251 251 self.sharedfeatures = features
252 252
253 253 # track the current command for recording in journal entries
254 254 @property
255 255 def command(self):
256 256 commandstr = ' '.join(
257 257 map(util.shellquote, journalstorage._currentcommand))
258 258 if '\n' in commandstr:
259 259 # truncate multi-line commands
260 260 commandstr = commandstr.partition('\n')[0] + ' ...'
261 261 return commandstr
262 262
263 263 @classmethod
264 264 def recordcommand(cls, *fullargs):
265 265 """Set the current hg arguments, stored with recorded entries"""
266 266 # Set the current command on the class because we may have started
267 267 # with a non-local repo (cloning for example).
268 268 cls._currentcommand = fullargs
269 269
270 270 def jlock(self, vfs):
271 271 """Create a lock for the journal file"""
272 272 if self._lockref and self._lockref():
273 273 raise error.Abort(_('journal lock does not support nesting'))
274 274 desc = _('journal of %s') % vfs.base
275 275 try:
276 276 l = lock.lock(vfs, 'journal.lock', 0, desc=desc)
277 277 except error.LockHeld as inst:
278 278 self.ui.warn(
279 279 _("waiting for lock on %s held by %r\n") % (desc, inst.locker))
280 280 # default to 600 seconds timeout
281 281 l = lock.lock(
282 282 vfs, 'journal.lock',
283 283 int(self.ui.config("ui", "timeout", "600")), desc=desc)
284 284 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
285 285 self._lockref = weakref.ref(l)
286 286 return l
287 287
288 288 def record(self, namespace, name, oldhashes, newhashes):
289 289 """Record a new journal entry
290 290
291 291 * namespace: an opaque string; this can be used to filter on the type
292 292 of recorded entries.
293 293 * name: the name defining this entry; for bookmarks, this is the
294 294 bookmark name. Can be filtered on when retrieving entries.
295 295 * oldhashes and newhashes: each a single binary hash, or a list of
296 296 binary hashes. These represent the old and new position of the named
297 297 item.
298 298
299 299 """
300 300 if not isinstance(oldhashes, list):
301 301 oldhashes = [oldhashes]
302 302 if not isinstance(newhashes, list):
303 303 newhashes = [newhashes]
304 304
305 305 entry = journalentry(
306 306 util.makedate(), self.user, self.command, namespace, name,
307 307 oldhashes, newhashes)
308 308
309 309 vfs = self.vfs
310 310 if self.sharedvfs is not None:
311 311 # write to the shared repository if this feature is being
312 312 # shared between working copies.
313 313 if sharednamespaces.get(namespace) in self.sharedfeatures:
314 314 vfs = self.sharedvfs
315 315
316 316 self._write(vfs, entry)
317 317
318 318 def _write(self, vfs, entry):
319 319 with self.jlock(vfs):
320 320 version = None
321 321 # open file in amend mode to ensure it is created if missing
322 322 with vfs('journal', mode='a+b', atomictemp=True) as f:
323 323 f.seek(0, os.SEEK_SET)
324 324 # Read just enough bytes to get a version number (up to 2
325 325 # digits plus separator)
326 326 version = f.read(3).partition('\0')[0]
327 327 if version and version != str(storageversion):
328 328 # different version of the storage. Exit early (and not
329 329 # write anything) if this is not a version we can handle or
330 330 # the file is corrupt. In future, perhaps rotate the file
331 331 # instead?
332 332 self.ui.warn(
333 333 _("unsupported journal file version '%s'\n") % version)
334 334 return
335 335 if not version:
336 336 # empty file, write version first
337 337 f.write(str(storageversion) + '\0')
338 338 f.seek(0, os.SEEK_END)
339 339 f.write(str(entry) + '\0')
340 340
341 341 def filtered(self, namespace=None, name=None):
342 342 """Yield all journal entries with the given namespace or name
343 343
344 344 Both the namespace and the name are optional; if neither is given all
345 345 entries in the journal are produced.
346 346
347 347 Matching supports regular expressions by using the `re:` prefix
348 348 (use `literal:` to match names or namespaces that start with `re:`)
349 349
350 350 """
351 351 if namespace is not None:
352 352 namespace = util.stringmatcher(namespace)[-1]
353 353 if name is not None:
354 354 name = util.stringmatcher(name)[-1]
355 355 for entry in self:
356 356 if namespace is not None and not namespace(entry.namespace):
357 357 continue
358 358 if name is not None and not name(entry.name):
359 359 continue
360 360 yield entry
361 361
362 362 def __iter__(self):
363 363 """Iterate over the storage
364 364
365 365 Yields journalentry instances for each contained journal record.
366 366
367 367 """
368 368 local = self._open(self.vfs)
369 369
370 370 if self.sharedvfs is None:
371 371 return local
372 372
373 373 # iterate over both local and shared entries, but only those
374 374 # shared entries that are among the currently shared features
375 375 shared = (
376 376 e for e in self._open(self.sharedvfs)
377 377 if sharednamespaces.get(e.namespace) in self.sharedfeatures)
378 378 return _mergeentriesiter(local, shared)
379 379
380 380 def _open(self, vfs, filename='journal', _newestfirst=True):
381 381 if not vfs.exists(filename):
382 382 return
383 383
384 384 with vfs(filename) as f:
385 385 raw = f.read()
386 386
387 387 lines = raw.split('\0')
388 388 version = lines and lines[0]
389 389 if version != str(storageversion):
390 390 version = version or _('not available')
391 391 raise error.Abort(_("unknown journal file version '%s'") % version)
392 392
393 393 # Skip the first line, it's a version number. Normally we iterate over
394 394 # these in reverse order to list newest first; only when copying across
395 395 # a shared storage do we forgo reversing.
396 396 lines = lines[1:]
397 397 if _newestfirst:
398 398 lines = reversed(lines)
399 399 for line in lines:
400 400 if not line:
401 401 continue
402 402 yield journalentry.fromstorage(line)
403 403
404 404 # journal reading
405 405 # log options that don't make sense for journal
406 406 _ignoreopts = ('no-merges', 'graph')
407 407 @command(
408 408 'journal', [
409 409 ('', 'all', None, 'show history for all names'),
410 410 ('c', 'commits', None, 'show commit metadata'),
411 411 ] + [opt for opt in commands.logopts if opt[1] not in _ignoreopts],
412 412 '[OPTION]... [BOOKMARKNAME]')
413 413 def journal(ui, repo, *args, **opts):
414 414 """show the previous position of bookmarks and the working copy
415 415
416 416 The journal is used to see the previous commits that bookmarks and the
417 417 working copy pointed to. By default the previous locations for the working
418 418 copy. Passing a bookmark name will show all the previous positions of
419 419 that bookmark. Use the --all switch to show previous locations for all
420 420 bookmarks and the working copy; each line will then include the bookmark
421 421 name, or '.' for the working copy, as well.
422 422
423 423 If `name` starts with `re:`, the remainder of the name is treated as
424 424 a regular expression. To match a name that actually starts with `re:`,
425 425 use the prefix `literal:`.
426 426
427 427 By default hg journal only shows the commit hash and the command that was
428 428 running at that time. -v/--verbose will show the prior hash, the user, and
429 429 the time at which it happened.
430 430
431 431 Use -c/--commits to output log information on each commit hash; at this
432 432 point you can use the usual `--patch`, `--git`, `--stat` and `--template`
433 433 switches to alter the log output for these.
434 434
435 435 `hg journal -T json` can be used to produce machine readable output.
436 436
437 437 """
438 438 name = '.'
439 439 if opts.get('all'):
440 440 if args:
441 441 raise error.Abort(
442 442 _("You can't combine --all and filtering on a name"))
443 443 name = None
444 444 if args:
445 445 name = args[0]
446 446
447 447 fm = ui.formatter('journal', opts)
448 448
449 449 if opts.get("template") != "json":
450 450 if name is None:
451 451 displayname = _('the working copy and bookmarks')
452 452 else:
453 453 displayname = "'%s'" % name
454 454 ui.status(_("previous locations of %s:\n") % displayname)
455 455
456 456 limit = cmdutil.loglimit(opts)
457 457 entry = None
458 458 for count, entry in enumerate(repo.journal.filtered(name=name)):
459 459 if count == limit:
460 460 break
461 461 newhashesstr = fm.formatlist(map(fm.hexfunc, entry.newhashes),
462 462 name='node', sep=',')
463 463 oldhashesstr = fm.formatlist(map(fm.hexfunc, entry.oldhashes),
464 464 name='node', sep=',')
465 465
466 466 fm.startitem()
467 467 fm.condwrite(ui.verbose, 'oldhashes', '%s -> ', oldhashesstr)
468 468 fm.write('newhashes', '%s', newhashesstr)
469 469 fm.condwrite(ui.verbose, 'user', ' %-8s', entry.user)
470 470 fm.condwrite(
471 471 opts.get('all') or name.startswith('re:'),
472 472 'name', ' %-8s', entry.name)
473 473
474 474 timestring = fm.formatdate(entry.timestamp, '%Y-%m-%d %H:%M %1%2')
475 475 fm.condwrite(ui.verbose, 'date', ' %s', timestring)
476 476 fm.write('command', ' %s\n', entry.command)
477 477
478 478 if opts.get("commits"):
479 479 displayer = cmdutil.show_changeset(ui, repo, opts, buffered=False)
480 480 for hash in entry.newhashes:
481 481 try:
482 482 ctx = repo[hash]
483 483 displayer.show(ctx)
484 484 except error.RepoLookupError as e:
485 485 fm.write('repolookuperror', "%s\n\n", str(e))
486 486 displayer.close()
487 487
488 488 fm.end()
489 489
490 490 if entry is None:
491 491 ui.status(_("no recorded locations\n"))
@@ -1,758 +1,758 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a Distributed SCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <https://mercurial-scm.org/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56
57 57 The more specific you are in your filename patterns the less you
58 58 lose speed in huge repositories.
59 59
60 60 For [keywordmaps] template mapping and expansion demonstration and
61 61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 62 available templates and filters.
63 63
64 64 Three additional date template filters are provided:
65 65
66 66 :``utcdate``: "2006/09/18 15:13:13"
67 67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69 69
70 70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 71 replaced with customized keywords and templates. Again, run
72 72 :hg:`kwdemo` to control the results of your configuration changes.
73 73
74 74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 75 to avoid storing expanded keywords in the change history.
76 76
77 77 To force expansion after enabling it, or a configuration change, run
78 78 :hg:`kwexpand`.
79 79
80 80 Expansions spanning more than one line and incremental expansions,
81 81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 82 {desc}" expands to the first line of the changeset description.
83 83 '''
84 84
85 85
86 86 from __future__ import absolute_import
87 87
88 88 import os
89 89 import re
90 90 import tempfile
91 91
92 92 from mercurial.i18n import _
93 93 from mercurial.hgweb import webcommands
94 94
95 95 from mercurial import (
96 96 cmdutil,
97 97 commands,
98 98 context,
99 99 dispatch,
100 100 error,
101 101 extensions,
102 102 filelog,
103 103 localrepo,
104 104 match,
105 105 patch,
106 106 pathutil,
107 107 registrar,
108 108 scmutil,
109 109 templatefilters,
110 110 util,
111 111 )
112 112
113 113 cmdtable = {}
114 114 command = cmdutil.command(cmdtable)
115 # Note for extension authors: ONLY specify testedwith = 'internal' for
115 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
116 116 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
117 117 # be specifying the version(s) of Mercurial they are tested with, or
118 118 # leave the attribute unspecified.
119 testedwith = 'internal'
119 testedwith = 'ships-with-hg-core'
120 120
121 121 # hg commands that do not act on keywords
122 122 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
123 123 ' outgoing push tip verify convert email glog')
124 124
125 125 # hg commands that trigger expansion only when writing to working dir,
126 126 # not when reading filelog, and unexpand when reading from working dir
127 127 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
128 128 ' unshelve rebase graft backout histedit fetch')
129 129
130 130 # names of extensions using dorecord
131 131 recordextensions = 'record'
132 132
133 133 colortable = {
134 134 'kwfiles.enabled': 'green bold',
135 135 'kwfiles.deleted': 'cyan bold underline',
136 136 'kwfiles.enabledunknown': 'green',
137 137 'kwfiles.ignored': 'bold',
138 138 'kwfiles.ignoredunknown': 'none'
139 139 }
140 140
141 141 templatefilter = registrar.templatefilter()
142 142
143 143 # date like in cvs' $Date
144 144 @templatefilter('utcdate')
145 145 def utcdate(text):
146 146 '''Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
147 147 '''
148 148 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
149 149 # date like in svn's $Date
150 150 @templatefilter('svnisodate')
151 151 def svnisodate(text):
152 152 '''Date. Returns a date in this format: "2009-08-18 13:00:13
153 153 +0200 (Tue, 18 Aug 2009)".
154 154 '''
155 155 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
156 156 # date like in svn's $Id
157 157 @templatefilter('svnutcdate')
158 158 def svnutcdate(text):
159 159 '''Date. Returns a UTC-date in this format: "2009-08-18
160 160 11:00:13Z".
161 161 '''
162 162 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
163 163
164 164 # make keyword tools accessible
165 165 kwtools = {'templater': None, 'hgcmd': ''}
166 166
167 167 def _defaultkwmaps(ui):
168 168 '''Returns default keywordmaps according to keywordset configuration.'''
169 169 templates = {
170 170 'Revision': '{node|short}',
171 171 'Author': '{author|user}',
172 172 }
173 173 kwsets = ({
174 174 'Date': '{date|utcdate}',
175 175 'RCSfile': '{file|basename},v',
176 176 'RCSFile': '{file|basename},v', # kept for backwards compatibility
177 177 # with hg-keyword
178 178 'Source': '{root}/{file},v',
179 179 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
180 180 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
181 181 }, {
182 182 'Date': '{date|svnisodate}',
183 183 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
184 184 'LastChangedRevision': '{node|short}',
185 185 'LastChangedBy': '{author|user}',
186 186 'LastChangedDate': '{date|svnisodate}',
187 187 })
188 188 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
189 189 return templates
190 190
191 191 def _shrinktext(text, subfunc):
192 192 '''Helper for keyword expansion removal in text.
193 193 Depending on subfunc also returns number of substitutions.'''
194 194 return subfunc(r'$\1$', text)
195 195
196 196 def _preselect(wstatus, changed):
197 197 '''Retrieves modified and added files from a working directory state
198 198 and returns the subset of each contained in given changed files
199 199 retrieved from a change context.'''
200 200 modified = [f for f in wstatus.modified if f in changed]
201 201 added = [f for f in wstatus.added if f in changed]
202 202 return modified, added
203 203
204 204
205 205 class kwtemplater(object):
206 206 '''
207 207 Sets up keyword templates, corresponding keyword regex, and
208 208 provides keyword substitution functions.
209 209 '''
210 210
211 211 def __init__(self, ui, repo, inc, exc):
212 212 self.ui = ui
213 213 self.repo = repo
214 214 self.match = match.match(repo.root, '', [], inc, exc)
215 215 self.restrict = kwtools['hgcmd'] in restricted.split()
216 216 self.postcommit = False
217 217
218 218 kwmaps = self.ui.configitems('keywordmaps')
219 219 if kwmaps: # override default templates
220 220 self.templates = dict(kwmaps)
221 221 else:
222 222 self.templates = _defaultkwmaps(self.ui)
223 223
224 224 @util.propertycache
225 225 def escape(self):
226 226 '''Returns bar-separated and escaped keywords.'''
227 227 return '|'.join(map(re.escape, self.templates.keys()))
228 228
229 229 @util.propertycache
230 230 def rekw(self):
231 231 '''Returns regex for unexpanded keywords.'''
232 232 return re.compile(r'\$(%s)\$' % self.escape)
233 233
234 234 @util.propertycache
235 235 def rekwexp(self):
236 236 '''Returns regex for expanded keywords.'''
237 237 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
238 238
239 239 def substitute(self, data, path, ctx, subfunc):
240 240 '''Replaces keywords in data with expanded template.'''
241 241 def kwsub(mobj):
242 242 kw = mobj.group(1)
243 243 ct = cmdutil.changeset_templater(self.ui, self.repo, False, None,
244 244 self.templates[kw], '', False)
245 245 self.ui.pushbuffer()
246 246 ct.show(ctx, root=self.repo.root, file=path)
247 247 ekw = templatefilters.firstline(self.ui.popbuffer())
248 248 return '$%s: %s $' % (kw, ekw)
249 249 return subfunc(kwsub, data)
250 250
251 251 def linkctx(self, path, fileid):
252 252 '''Similar to filelog.linkrev, but returns a changectx.'''
253 253 return self.repo.filectx(path, fileid=fileid).changectx()
254 254
255 255 def expand(self, path, node, data):
256 256 '''Returns data with keywords expanded.'''
257 257 if not self.restrict and self.match(path) and not util.binary(data):
258 258 ctx = self.linkctx(path, node)
259 259 return self.substitute(data, path, ctx, self.rekw.sub)
260 260 return data
261 261
262 262 def iskwfile(self, cand, ctx):
263 263 '''Returns subset of candidates which are configured for keyword
264 264 expansion but are not symbolic links.'''
265 265 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
266 266
267 267 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
268 268 '''Overwrites selected files expanding/shrinking keywords.'''
269 269 if self.restrict or lookup or self.postcommit: # exclude kw_copy
270 270 candidates = self.iskwfile(candidates, ctx)
271 271 if not candidates:
272 272 return
273 273 kwcmd = self.restrict and lookup # kwexpand/kwshrink
274 274 if self.restrict or expand and lookup:
275 275 mf = ctx.manifest()
276 276 if self.restrict or rekw:
277 277 re_kw = self.rekw
278 278 else:
279 279 re_kw = self.rekwexp
280 280 if expand:
281 281 msg = _('overwriting %s expanding keywords\n')
282 282 else:
283 283 msg = _('overwriting %s shrinking keywords\n')
284 284 for f in candidates:
285 285 if self.restrict:
286 286 data = self.repo.file(f).read(mf[f])
287 287 else:
288 288 data = self.repo.wread(f)
289 289 if util.binary(data):
290 290 continue
291 291 if expand:
292 292 parents = ctx.parents()
293 293 if lookup:
294 294 ctx = self.linkctx(f, mf[f])
295 295 elif self.restrict and len(parents) > 1:
296 296 # merge commit
297 297 # in case of conflict f is in modified state during
298 298 # merge, even if f does not differ from f in parent
299 299 for p in parents:
300 300 if f in p and not p[f].cmp(ctx[f]):
301 301 ctx = p[f].changectx()
302 302 break
303 303 data, found = self.substitute(data, f, ctx, re_kw.subn)
304 304 elif self.restrict:
305 305 found = re_kw.search(data)
306 306 else:
307 307 data, found = _shrinktext(data, re_kw.subn)
308 308 if found:
309 309 self.ui.note(msg % f)
310 310 fp = self.repo.wvfs(f, "wb", atomictemp=True)
311 311 fp.write(data)
312 312 fp.close()
313 313 if kwcmd:
314 314 self.repo.dirstate.normal(f)
315 315 elif self.postcommit:
316 316 self.repo.dirstate.normallookup(f)
317 317
318 318 def shrink(self, fname, text):
319 319 '''Returns text with all keyword substitutions removed.'''
320 320 if self.match(fname) and not util.binary(text):
321 321 return _shrinktext(text, self.rekwexp.sub)
322 322 return text
323 323
324 324 def shrinklines(self, fname, lines):
325 325 '''Returns lines with keyword substitutions removed.'''
326 326 if self.match(fname):
327 327 text = ''.join(lines)
328 328 if not util.binary(text):
329 329 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
330 330 return lines
331 331
332 332 def wread(self, fname, data):
333 333 '''If in restricted mode returns data read from wdir with
334 334 keyword substitutions removed.'''
335 335 if self.restrict:
336 336 return self.shrink(fname, data)
337 337 return data
338 338
339 339 class kwfilelog(filelog.filelog):
340 340 '''
341 341 Subclass of filelog to hook into its read, add, cmp methods.
342 342 Keywords are "stored" unexpanded, and processed on reading.
343 343 '''
344 344 def __init__(self, opener, kwt, path):
345 345 super(kwfilelog, self).__init__(opener, path)
346 346 self.kwt = kwt
347 347 self.path = path
348 348
349 349 def read(self, node):
350 350 '''Expands keywords when reading filelog.'''
351 351 data = super(kwfilelog, self).read(node)
352 352 if self.renamed(node):
353 353 return data
354 354 return self.kwt.expand(self.path, node, data)
355 355
356 356 def add(self, text, meta, tr, link, p1=None, p2=None):
357 357 '''Removes keyword substitutions when adding to filelog.'''
358 358 text = self.kwt.shrink(self.path, text)
359 359 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
360 360
361 361 def cmp(self, node, text):
362 362 '''Removes keyword substitutions for comparison.'''
363 363 text = self.kwt.shrink(self.path, text)
364 364 return super(kwfilelog, self).cmp(node, text)
365 365
366 366 def _status(ui, repo, wctx, kwt, *pats, **opts):
367 367 '''Bails out if [keyword] configuration is not active.
368 368 Returns status of working directory.'''
369 369 if kwt:
370 370 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
371 371 unknown=opts.get('unknown') or opts.get('all'))
372 372 if ui.configitems('keyword'):
373 373 raise error.Abort(_('[keyword] patterns cannot match'))
374 374 raise error.Abort(_('no [keyword] patterns configured'))
375 375
376 376 def _kwfwrite(ui, repo, expand, *pats, **opts):
377 377 '''Selects files and passes them to kwtemplater.overwrite.'''
378 378 wctx = repo[None]
379 379 if len(wctx.parents()) > 1:
380 380 raise error.Abort(_('outstanding uncommitted merge'))
381 381 kwt = kwtools['templater']
382 382 with repo.wlock():
383 383 status = _status(ui, repo, wctx, kwt, *pats, **opts)
384 384 if status.modified or status.added or status.removed or status.deleted:
385 385 raise error.Abort(_('outstanding uncommitted changes'))
386 386 kwt.overwrite(wctx, status.clean, True, expand)
387 387
388 388 @command('kwdemo',
389 389 [('d', 'default', None, _('show default keyword template maps')),
390 390 ('f', 'rcfile', '',
391 391 _('read maps from rcfile'), _('FILE'))],
392 392 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
393 393 optionalrepo=True)
394 394 def demo(ui, repo, *args, **opts):
395 395 '''print [keywordmaps] configuration and an expansion example
396 396
397 397 Show current, custom, or default keyword template maps and their
398 398 expansions.
399 399
400 400 Extend the current configuration by specifying maps as arguments
401 401 and using -f/--rcfile to source an external hgrc file.
402 402
403 403 Use -d/--default to disable current configuration.
404 404
405 405 See :hg:`help templates` for information on templates and filters.
406 406 '''
407 407 def demoitems(section, items):
408 408 ui.write('[%s]\n' % section)
409 409 for k, v in sorted(items):
410 410 ui.write('%s = %s\n' % (k, v))
411 411
412 412 fn = 'demo.txt'
413 413 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
414 414 ui.note(_('creating temporary repository at %s\n') % tmpdir)
415 415 if repo is None:
416 416 baseui = ui
417 417 else:
418 418 baseui = repo.baseui
419 419 repo = localrepo.localrepository(baseui, tmpdir, True)
420 420 ui.setconfig('keyword', fn, '', 'keyword')
421 421 svn = ui.configbool('keywordset', 'svn')
422 422 # explicitly set keywordset for demo output
423 423 ui.setconfig('keywordset', 'svn', svn, 'keyword')
424 424
425 425 uikwmaps = ui.configitems('keywordmaps')
426 426 if args or opts.get('rcfile'):
427 427 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
428 428 if uikwmaps:
429 429 ui.status(_('\textending current template maps\n'))
430 430 if opts.get('default') or not uikwmaps:
431 431 if svn:
432 432 ui.status(_('\toverriding default svn keywordset\n'))
433 433 else:
434 434 ui.status(_('\toverriding default cvs keywordset\n'))
435 435 if opts.get('rcfile'):
436 436 ui.readconfig(opts.get('rcfile'))
437 437 if args:
438 438 # simulate hgrc parsing
439 439 rcmaps = '[keywordmaps]\n%s\n' % '\n'.join(args)
440 440 repo.vfs.write('hgrc', rcmaps)
441 441 ui.readconfig(repo.join('hgrc'))
442 442 kwmaps = dict(ui.configitems('keywordmaps'))
443 443 elif opts.get('default'):
444 444 if svn:
445 445 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
446 446 else:
447 447 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
448 448 kwmaps = _defaultkwmaps(ui)
449 449 if uikwmaps:
450 450 ui.status(_('\tdisabling current template maps\n'))
451 451 for k, v in kwmaps.iteritems():
452 452 ui.setconfig('keywordmaps', k, v, 'keyword')
453 453 else:
454 454 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
455 455 if uikwmaps:
456 456 kwmaps = dict(uikwmaps)
457 457 else:
458 458 kwmaps = _defaultkwmaps(ui)
459 459
460 460 uisetup(ui)
461 461 reposetup(ui, repo)
462 462 ui.write(('[extensions]\nkeyword =\n'))
463 463 demoitems('keyword', ui.configitems('keyword'))
464 464 demoitems('keywordset', ui.configitems('keywordset'))
465 465 demoitems('keywordmaps', kwmaps.iteritems())
466 466 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
467 467 repo.wvfs.write(fn, keywords)
468 468 repo[None].add([fn])
469 469 ui.note(_('\nkeywords written to %s:\n') % fn)
470 470 ui.note(keywords)
471 471 with repo.wlock():
472 472 repo.dirstate.setbranch('demobranch')
473 473 for name, cmd in ui.configitems('hooks'):
474 474 if name.split('.', 1)[0].find('commit') > -1:
475 475 repo.ui.setconfig('hooks', name, '', 'keyword')
476 476 msg = _('hg keyword configuration and expansion example')
477 477 ui.note(("hg ci -m '%s'\n" % msg))
478 478 repo.commit(text=msg)
479 479 ui.status(_('\n\tkeywords expanded\n'))
480 480 ui.write(repo.wread(fn))
481 481 repo.wvfs.rmtree(repo.root)
482 482
483 483 @command('kwexpand',
484 484 commands.walkopts,
485 485 _('hg kwexpand [OPTION]... [FILE]...'),
486 486 inferrepo=True)
487 487 def expand(ui, repo, *pats, **opts):
488 488 '''expand keywords in the working directory
489 489
490 490 Run after (re)enabling keyword expansion.
491 491
492 492 kwexpand refuses to run if given files contain local changes.
493 493 '''
494 494 # 3rd argument sets expansion to True
495 495 _kwfwrite(ui, repo, True, *pats, **opts)
496 496
497 497 @command('kwfiles',
498 498 [('A', 'all', None, _('show keyword status flags of all files')),
499 499 ('i', 'ignore', None, _('show files excluded from expansion')),
500 500 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
501 501 ] + commands.walkopts,
502 502 _('hg kwfiles [OPTION]... [FILE]...'),
503 503 inferrepo=True)
504 504 def files(ui, repo, *pats, **opts):
505 505 '''show files configured for keyword expansion
506 506
507 507 List which files in the working directory are matched by the
508 508 [keyword] configuration patterns.
509 509
510 510 Useful to prevent inadvertent keyword expansion and to speed up
511 511 execution by including only files that are actual candidates for
512 512 expansion.
513 513
514 514 See :hg:`help keyword` on how to construct patterns both for
515 515 inclusion and exclusion of files.
516 516
517 517 With -A/--all and -v/--verbose the codes used to show the status
518 518 of files are::
519 519
520 520 K = keyword expansion candidate
521 521 k = keyword expansion candidate (not tracked)
522 522 I = ignored
523 523 i = ignored (not tracked)
524 524 '''
525 525 kwt = kwtools['templater']
526 526 wctx = repo[None]
527 527 status = _status(ui, repo, wctx, kwt, *pats, **opts)
528 528 if pats:
529 529 cwd = repo.getcwd()
530 530 else:
531 531 cwd = ''
532 532 files = []
533 533 if not opts.get('unknown') or opts.get('all'):
534 534 files = sorted(status.modified + status.added + status.clean)
535 535 kwfiles = kwt.iskwfile(files, wctx)
536 536 kwdeleted = kwt.iskwfile(status.deleted, wctx)
537 537 kwunknown = kwt.iskwfile(status.unknown, wctx)
538 538 if not opts.get('ignore') or opts.get('all'):
539 539 showfiles = kwfiles, kwdeleted, kwunknown
540 540 else:
541 541 showfiles = [], [], []
542 542 if opts.get('all') or opts.get('ignore'):
543 543 showfiles += ([f for f in files if f not in kwfiles],
544 544 [f for f in status.unknown if f not in kwunknown])
545 545 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
546 546 kwstates = zip(kwlabels, 'K!kIi', showfiles)
547 547 fm = ui.formatter('kwfiles', opts)
548 548 fmt = '%.0s%s\n'
549 549 if opts.get('all') or ui.verbose:
550 550 fmt = '%s %s\n'
551 551 for kwstate, char, filenames in kwstates:
552 552 label = 'kwfiles.' + kwstate
553 553 for f in filenames:
554 554 fm.startitem()
555 555 fm.write('kwstatus path', fmt, char,
556 556 repo.pathto(f, cwd), label=label)
557 557 fm.end()
558 558
559 559 @command('kwshrink',
560 560 commands.walkopts,
561 561 _('hg kwshrink [OPTION]... [FILE]...'),
562 562 inferrepo=True)
563 563 def shrink(ui, repo, *pats, **opts):
564 564 '''revert expanded keywords in the working directory
565 565
566 566 Must be run before changing/disabling active keywords.
567 567
568 568 kwshrink refuses to run if given files contain local changes.
569 569 '''
570 570 # 3rd argument sets expansion to False
571 571 _kwfwrite(ui, repo, False, *pats, **opts)
572 572
573 573
574 574 def uisetup(ui):
575 575 ''' Monkeypatches dispatch._parse to retrieve user command.'''
576 576
577 577 def kwdispatch_parse(orig, ui, args):
578 578 '''Monkeypatch dispatch._parse to obtain running hg command.'''
579 579 cmd, func, args, options, cmdoptions = orig(ui, args)
580 580 kwtools['hgcmd'] = cmd
581 581 return cmd, func, args, options, cmdoptions
582 582
583 583 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
584 584
585 585 def reposetup(ui, repo):
586 586 '''Sets up repo as kwrepo for keyword substitution.
587 587 Overrides file method to return kwfilelog instead of filelog
588 588 if file matches user configuration.
589 589 Wraps commit to overwrite configured files with updated
590 590 keyword substitutions.
591 591 Monkeypatches patch and webcommands.'''
592 592
593 593 try:
594 594 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
595 595 or '.hg' in util.splitpath(repo.root)
596 596 or repo._url.startswith('bundle:')):
597 597 return
598 598 except AttributeError:
599 599 pass
600 600
601 601 inc, exc = [], ['.hg*']
602 602 for pat, opt in ui.configitems('keyword'):
603 603 if opt != 'ignore':
604 604 inc.append(pat)
605 605 else:
606 606 exc.append(pat)
607 607 if not inc:
608 608 return
609 609
610 610 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
611 611
612 612 class kwrepo(repo.__class__):
613 613 def file(self, f):
614 614 if f[0] == '/':
615 615 f = f[1:]
616 616 return kwfilelog(self.svfs, kwt, f)
617 617
618 618 def wread(self, filename):
619 619 data = super(kwrepo, self).wread(filename)
620 620 return kwt.wread(filename, data)
621 621
622 622 def commit(self, *args, **opts):
623 623 # use custom commitctx for user commands
624 624 # other extensions can still wrap repo.commitctx directly
625 625 self.commitctx = self.kwcommitctx
626 626 try:
627 627 return super(kwrepo, self).commit(*args, **opts)
628 628 finally:
629 629 del self.commitctx
630 630
631 631 def kwcommitctx(self, ctx, error=False):
632 632 n = super(kwrepo, self).commitctx(ctx, error)
633 633 # no lock needed, only called from repo.commit() which already locks
634 634 if not kwt.postcommit:
635 635 restrict = kwt.restrict
636 636 kwt.restrict = True
637 637 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
638 638 False, True)
639 639 kwt.restrict = restrict
640 640 return n
641 641
642 642 def rollback(self, dryrun=False, force=False):
643 643 wlock = self.wlock()
644 644 origrestrict = kwt.restrict
645 645 try:
646 646 if not dryrun:
647 647 changed = self['.'].files()
648 648 ret = super(kwrepo, self).rollback(dryrun, force)
649 649 if not dryrun:
650 650 ctx = self['.']
651 651 modified, added = _preselect(ctx.status(), changed)
652 652 kwt.restrict = False
653 653 kwt.overwrite(ctx, modified, True, True)
654 654 kwt.overwrite(ctx, added, True, False)
655 655 return ret
656 656 finally:
657 657 kwt.restrict = origrestrict
658 658 wlock.release()
659 659
660 660 # monkeypatches
661 661 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
662 662 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
663 663 rejects or conflicts due to expanded keywords in working dir.'''
664 664 orig(self, ui, gp, backend, store, eolmode)
665 665 # shrink keywords read from working dir
666 666 self.lines = kwt.shrinklines(self.fname, self.lines)
667 667
668 668 def kwdiff(orig, *args, **kwargs):
669 669 '''Monkeypatch patch.diff to avoid expansion.'''
670 670 kwt.restrict = True
671 671 return orig(*args, **kwargs)
672 672
673 673 def kwweb_skip(orig, web, req, tmpl):
674 674 '''Wraps webcommands.x turning off keyword expansion.'''
675 675 kwt.match = util.never
676 676 return orig(web, req, tmpl)
677 677
678 678 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
679 679 '''Wraps cmdutil.amend expanding keywords after amend.'''
680 680 with repo.wlock():
681 681 kwt.postcommit = True
682 682 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
683 683 if newid != old.node():
684 684 ctx = repo[newid]
685 685 kwt.restrict = True
686 686 kwt.overwrite(ctx, ctx.files(), False, True)
687 687 kwt.restrict = False
688 688 return newid
689 689
690 690 def kw_copy(orig, ui, repo, pats, opts, rename=False):
691 691 '''Wraps cmdutil.copy so that copy/rename destinations do not
692 692 contain expanded keywords.
693 693 Note that the source of a regular file destination may also be a
694 694 symlink:
695 695 hg cp sym x -> x is symlink
696 696 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
697 697 For the latter we have to follow the symlink to find out whether its
698 698 target is configured for expansion and we therefore must unexpand the
699 699 keywords in the destination.'''
700 700 with repo.wlock():
701 701 orig(ui, repo, pats, opts, rename)
702 702 if opts.get('dry_run'):
703 703 return
704 704 wctx = repo[None]
705 705 cwd = repo.getcwd()
706 706
707 707 def haskwsource(dest):
708 708 '''Returns true if dest is a regular file and configured for
709 709 expansion or a symlink which points to a file configured for
710 710 expansion. '''
711 711 source = repo.dirstate.copied(dest)
712 712 if 'l' in wctx.flags(source):
713 713 source = pathutil.canonpath(repo.root, cwd,
714 714 os.path.realpath(source))
715 715 return kwt.match(source)
716 716
717 717 candidates = [f for f in repo.dirstate.copies() if
718 718 'l' not in wctx.flags(f) and haskwsource(f)]
719 719 kwt.overwrite(wctx, candidates, False, False)
720 720
721 721 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
722 722 '''Wraps record.dorecord expanding keywords after recording.'''
723 723 with repo.wlock():
724 724 # record returns 0 even when nothing has changed
725 725 # therefore compare nodes before and after
726 726 kwt.postcommit = True
727 727 ctx = repo['.']
728 728 wstatus = ctx.status()
729 729 ret = orig(ui, repo, commitfunc, *pats, **opts)
730 730 recctx = repo['.']
731 731 if ctx != recctx:
732 732 modified, added = _preselect(wstatus, recctx.files())
733 733 kwt.restrict = False
734 734 kwt.overwrite(recctx, modified, False, True)
735 735 kwt.overwrite(recctx, added, False, True, True)
736 736 kwt.restrict = True
737 737 return ret
738 738
739 739 def kwfilectx_cmp(orig, self, fctx):
740 740 # keyword affects data size, comparing wdir and filelog size does
741 741 # not make sense
742 742 if (fctx._filenode is None and
743 743 (self._repo._encodefilterpats or
744 744 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
745 745 self.size() - 4 == fctx.size()) or
746 746 self.size() == fctx.size()):
747 747 return self._filelog.cmp(self._filenode, fctx.data())
748 748 return True
749 749
750 750 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
751 751 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
752 752 extensions.wrapfunction(patch, 'diff', kwdiff)
753 753 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
754 754 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
755 755 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
756 756 for c in 'annotate changeset rev filediff diff'.split():
757 757 extensions.wrapfunction(webcommands, c, kwweb_skip)
758 758 repo.__class__ = kwrepo
@@ -1,140 +1,140 b''
1 1 # Copyright 2009-2010 Gregory P. Ward
2 2 # Copyright 2009-2010 Intelerad Medical Systems Incorporated
3 3 # Copyright 2010-2011 Fog Creek Software
4 4 # Copyright 2010-2011 Unity Technologies
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''track large binary files
10 10
11 11 Large binary files tend to be not very compressible, not very
12 12 diffable, and not at all mergeable. Such files are not handled
13 13 efficiently by Mercurial's storage format (revlog), which is based on
14 14 compressed binary deltas; storing large binary files as regular
15 15 Mercurial files wastes bandwidth and disk space and increases
16 16 Mercurial's memory usage. The largefiles extension addresses these
17 17 problems by adding a centralized client-server layer on top of
18 18 Mercurial: largefiles live in a *central store* out on the network
19 19 somewhere, and you only fetch the revisions that you need when you
20 20 need them.
21 21
22 22 largefiles works by maintaining a "standin file" in .hglf/ for each
23 23 largefile. The standins are small (41 bytes: an SHA-1 hash plus
24 24 newline) and are tracked by Mercurial. Largefile revisions are
25 25 identified by the SHA-1 hash of their contents, which is written to
26 26 the standin. largefiles uses that revision ID to get/put largefile
27 27 revisions from/to the central store. This saves both disk space and
28 28 bandwidth, since you don't need to retrieve all historical revisions
29 29 of large files when you clone or pull.
30 30
31 31 To start a new repository or add new large binary files, just add
32 32 --large to your :hg:`add` command. For example::
33 33
34 34 $ dd if=/dev/urandom of=randomdata count=2000
35 35 $ hg add --large randomdata
36 36 $ hg commit -m "add randomdata as a largefile"
37 37
38 38 When you push a changeset that adds/modifies largefiles to a remote
39 39 repository, its largefile revisions will be uploaded along with it.
40 40 Note that the remote Mercurial must also have the largefiles extension
41 41 enabled for this to work.
42 42
43 43 When you pull a changeset that affects largefiles from a remote
44 44 repository, the largefiles for the changeset will by default not be
45 45 pulled down. However, when you update to such a revision, any
46 46 largefiles needed by that revision are downloaded and cached (if
47 47 they have never been downloaded before). One way to pull largefiles
48 48 when pulling is thus to use --update, which will update your working
49 49 copy to the latest pulled revision (and thereby downloading any new
50 50 largefiles).
51 51
52 52 If you want to pull largefiles you don't need for update yet, then
53 53 you can use pull with the `--lfrev` option or the :hg:`lfpull` command.
54 54
55 55 If you know you are pulling from a non-default location and want to
56 56 download all the largefiles that correspond to the new changesets at
57 57 the same time, then you can pull with `--lfrev "pulled()"`.
58 58
59 59 If you just want to ensure that you will have the largefiles needed to
60 60 merge or rebase with new heads that you are pulling, then you can pull
61 61 with `--lfrev "head(pulled())"` flag to pre-emptively download any largefiles
62 62 that are new in the heads you are pulling.
63 63
64 64 Keep in mind that network access may now be required to update to
65 65 changesets that you have not previously updated to. The nature of the
66 66 largefiles extension means that updating is no longer guaranteed to
67 67 be a local-only operation.
68 68
69 69 If you already have large files tracked by Mercurial without the
70 70 largefiles extension, you will need to convert your repository in
71 71 order to benefit from largefiles. This is done with the
72 72 :hg:`lfconvert` command::
73 73
74 74 $ hg lfconvert --size 10 oldrepo newrepo
75 75
76 76 In repositories that already have largefiles in them, any new file
77 77 over 10MB will automatically be added as a largefile. To change this
78 78 threshold, set ``largefiles.minsize`` in your Mercurial config file
79 79 to the minimum size in megabytes to track as a largefile, or use the
80 80 --lfsize option to the add command (also in megabytes)::
81 81
82 82 [largefiles]
83 83 minsize = 2
84 84
85 85 $ hg add --lfsize 2
86 86
87 87 The ``largefiles.patterns`` config option allows you to specify a list
88 88 of filename patterns (see :hg:`help patterns`) that should always be
89 89 tracked as largefiles::
90 90
91 91 [largefiles]
92 92 patterns =
93 93 *.jpg
94 94 re:.*\.(png|bmp)$
95 95 library.zip
96 96 content/audio/*
97 97
98 98 Files that match one of these patterns will be added as largefiles
99 99 regardless of their size.
100 100
101 101 The ``largefiles.minsize`` and ``largefiles.patterns`` config options
102 102 will be ignored for any repositories not already containing a
103 103 largefile. To add the first largefile to a repository, you must
104 104 explicitly do so with the --large flag passed to the :hg:`add`
105 105 command.
106 106 '''
107 107 from __future__ import absolute_import
108 108
109 109 from mercurial import (
110 110 hg,
111 111 localrepo,
112 112 )
113 113
114 114 from . import (
115 115 lfcommands,
116 116 overrides,
117 117 proto,
118 118 reposetup,
119 119 uisetup as uisetupmod,
120 120 )
121 121
122 # Note for extension authors: ONLY specify testedwith = 'internal' for
122 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
123 123 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
124 124 # be specifying the version(s) of Mercurial they are tested with, or
125 125 # leave the attribute unspecified.
126 testedwith = 'internal'
126 testedwith = 'ships-with-hg-core'
127 127
128 128 reposetup = reposetup.reposetup
129 129
130 130 def featuresetup(ui, supported):
131 131 # don't die on seeing a repo with the largefiles requirement
132 132 supported |= set(['largefiles'])
133 133
134 134 def uisetup(ui):
135 135 localrepo.localrepository.featuresetupfuncs.add(featuresetup)
136 136 hg.wirepeersetupfuncs.append(proto.wirereposetup)
137 137 uisetupmod.uisetup(ui)
138 138
139 139 cmdtable = lfcommands.cmdtable
140 140 revsetpredicate = overrides.revsetpredicate
@@ -1,129 +1,129 b''
1 1 # logtoprocess.py - send ui.log() data to a subprocess
2 2 #
3 3 # Copyright 2016 Facebook, Inc.
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 """Send ui.log() data to a subprocess (EXPERIMENTAL)
8 8
9 9 This extension lets you specify a shell command per ui.log() event,
10 10 sending all remaining arguments to as environment variables to that command.
11 11
12 12 Each positional argument to the method results in a `MSG[N]` key in the
13 13 environment, starting at 1 (so `MSG1`, `MSG2`, etc.). Each keyword argument
14 14 is set as a `OPT_UPPERCASE_KEY` variable (so the key is uppercased, and
15 15 prefixed with `OPT_`). The original event name is passed in the `EVENT`
16 16 environment variable, and the process ID of mercurial is given in `HGPID`.
17 17
18 18 So given a call `ui.log('foo', 'bar', 'baz', spam='eggs'), a script configured
19 19 for the `foo` event can expect an environment with `MSG1=bar`, `MSG2=baz`, and
20 20 `OPT_SPAM=eggs`.
21 21
22 22 Scripts are configured in the `[logtoprocess]` section, each key an event name.
23 23 For example::
24 24
25 25 [logtoprocess]
26 26 commandexception = echo "$MSG2$MSG3" > /var/log/mercurial_exceptions.log
27 27
28 28 would log the warning message and traceback of any failed command dispatch.
29 29
30 30 Scripts are run asychronously as detached daemon processes; mercurial will
31 31 not ensure that they exit cleanly.
32 32
33 33 """
34 34
35 35 from __future__ import absolute_import
36 36
37 37 import itertools
38 38 import os
39 39 import platform
40 40 import subprocess
41 41 import sys
42 42
43 # Note for extension authors: ONLY specify testedwith = 'internal' for
43 # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for
44 44 # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should
45 45 # be specifying the version(s) of Mercurial they are tested with, or
46 46 # leave the attribute unspecified.
47 testedwith = 'internal'
47 testedwith = 'ships-with-hg-core'
48 48
49 49 def uisetup(ui):
50 50 if platform.system() == 'Windows':
51 51 # no fork on Windows, but we can create a detached process
52 52 # https://msdn.microsoft.com/en-us/library/windows/desktop/ms684863.aspx
53 53 # No stdlib constant exists for this value
54 54 DETACHED_PROCESS = 0x00000008
55 55 _creationflags = DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP
56 56
57 57 def runshellcommand(script, env):
58 58 # we can't use close_fds *and* redirect stdin. I'm not sure that we
59 59 # need to because the detached process has no console connection.
60 60 subprocess.Popen(
61 61 script, shell=True, env=env, close_fds=True,
62 62 creationflags=_creationflags)
63 63 else:
64 64 def runshellcommand(script, env):
65 65 # double-fork to completely detach from the parent process
66 66 # based on http://code.activestate.com/recipes/278731
67 67 pid = os.fork()
68 68 if pid:
69 69 # parent
70 70 return
71 71 # subprocess.Popen() forks again, all we need to add is
72 72 # flag the new process as a new session.
73 73 if sys.version_info < (3, 2):
74 74 newsession = {'preexec_fn': os.setsid}
75 75 else:
76 76 newsession = {'start_new_session': True}
77 77 try:
78 78 # connect stdin to devnull to make sure the subprocess can't
79 79 # muck up that stream for mercurial.
80 80 subprocess.Popen(
81 81 script, shell=True, stdin=open(os.devnull, 'r'), env=env,
82 82 close_fds=True, **newsession)
83 83 finally:
84 84 # mission accomplished, this child needs to exit and not
85 85 # continue the hg process here.
86 86 os._exit(0)
87 87
88 88 class logtoprocessui(ui.__class__):
89 89 def log(self, event, *msg, **opts):
90 90 """Map log events to external commands
91 91
92 92 Arguments are passed on as environment variables.
93 93
94 94 """
95 95 script = self.config('logtoprocess', event)
96 96 if script:
97 97 if msg:
98 98 # try to format the log message given the remaining
99 99 # arguments
100 100 try:
101 101 # Python string formatting with % either uses a
102 102 # dictionary *or* tuple, but not both. If we have
103 103 # keyword options, assume we need a mapping.
104 104 formatted = msg[0] % (opts or msg[1:])
105 105 except (TypeError, KeyError):
106 106 # Failed to apply the arguments, ignore
107 107 formatted = msg[0]
108 108 messages = (formatted,) + msg[1:]
109 109 else:
110 110 messages = msg
111 111 # positional arguments are listed as MSG[N] keys in the
112 112 # environment
113 113 msgpairs = (
114 114 ('MSG{0:d}'.format(i), str(m))
115 115 for i, m in enumerate(messages, 1))
116 116 # keyword arguments get prefixed with OPT_ and uppercased
117 117 optpairs = (
118 118 ('OPT_{0}'.format(key.upper()), str(value))
119 119 for key, value in opts.iteritems())
120 120 env = dict(itertools.chain(os.environ.items(),
121 121 msgpairs, optpairs),
122 122 EVENT=event, HGPID=str(os.getpid()))
123 123 # Connect stdin to /dev/null to prevent child processes messing
124 124 # with mercurial's stdin.
125 125 runshellcommand(script, env)
126 126 return super(logtoprocessui, self).log(event, *msg, **opts)
127 127
128 128 # Replace the class for this instance and all clones created from it:
129 129 ui.__class__ = logtoprocessui
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now