##// END OF EJS Templates
templater: remove noop calls of parsestring(s, quoted=False) (API)...
Yuya Nishihara -
r24987:fd7287f0 default
parent child Browse files
Show More
@@ -1,912 +1,910 b''
1 1 # bugzilla.py - bugzilla integration for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 # Copyright 2011-4 Jim Hague <jim.hague@acm.org>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''hooks for integrating with the Bugzilla bug tracker
10 10
11 11 This hook extension adds comments on bugs in Bugzilla when changesets
12 12 that refer to bugs by Bugzilla ID are seen. The comment is formatted using
13 13 the Mercurial template mechanism.
14 14
15 15 The bug references can optionally include an update for Bugzilla of the
16 16 hours spent working on the bug. Bugs can also be marked fixed.
17 17
18 18 Three basic modes of access to Bugzilla are provided:
19 19
20 20 1. Access via the Bugzilla XMLRPC interface. Requires Bugzilla 3.4 or later.
21 21
22 22 2. Check data via the Bugzilla XMLRPC interface and submit bug change
23 23 via email to Bugzilla email interface. Requires Bugzilla 3.4 or later.
24 24
25 25 3. Writing directly to the Bugzilla database. Only Bugzilla installations
26 26 using MySQL are supported. Requires Python MySQLdb.
27 27
28 28 Writing directly to the database is susceptible to schema changes, and
29 29 relies on a Bugzilla contrib script to send out bug change
30 30 notification emails. This script runs as the user running Mercurial,
31 31 must be run on the host with the Bugzilla install, and requires
32 32 permission to read Bugzilla configuration details and the necessary
33 33 MySQL user and password to have full access rights to the Bugzilla
34 34 database. For these reasons this access mode is now considered
35 35 deprecated, and will not be updated for new Bugzilla versions going
36 36 forward. Only adding comments is supported in this access mode.
37 37
38 38 Access via XMLRPC needs a Bugzilla username and password to be specified
39 39 in the configuration. Comments are added under that username. Since the
40 40 configuration must be readable by all Mercurial users, it is recommended
41 41 that the rights of that user are restricted in Bugzilla to the minimum
42 42 necessary to add comments. Marking bugs fixed requires Bugzilla 4.0 and later.
43 43
44 44 Access via XMLRPC/email uses XMLRPC to query Bugzilla, but sends
45 45 email to the Bugzilla email interface to submit comments to bugs.
46 46 The From: address in the email is set to the email address of the Mercurial
47 47 user, so the comment appears to come from the Mercurial user. In the event
48 48 that the Mercurial user email is not recognized by Bugzilla as a Bugzilla
49 49 user, the email associated with the Bugzilla username used to log into
50 50 Bugzilla is used instead as the source of the comment. Marking bugs fixed
51 51 works on all supported Bugzilla versions.
52 52
53 53 Configuration items common to all access modes:
54 54
55 55 bugzilla.version
56 56 The access type to use. Values recognized are:
57 57
58 58 :``xmlrpc``: Bugzilla XMLRPC interface.
59 59 :``xmlrpc+email``: Bugzilla XMLRPC and email interfaces.
60 60 :``3.0``: MySQL access, Bugzilla 3.0 and later.
61 61 :``2.18``: MySQL access, Bugzilla 2.18 and up to but not
62 62 including 3.0.
63 63 :``2.16``: MySQL access, Bugzilla 2.16 and up to but not
64 64 including 2.18.
65 65
66 66 bugzilla.regexp
67 67 Regular expression to match bug IDs for update in changeset commit message.
68 68 It must contain one "()" named group ``<ids>`` containing the bug
69 69 IDs separated by non-digit characters. It may also contain
70 70 a named group ``<hours>`` with a floating-point number giving the
71 71 hours worked on the bug. If no named groups are present, the first
72 72 "()" group is assumed to contain the bug IDs, and work time is not
73 73 updated. The default expression matches ``Bug 1234``, ``Bug no. 1234``,
74 74 ``Bug number 1234``, ``Bugs 1234,5678``, ``Bug 1234 and 5678`` and
75 75 variations thereof, followed by an hours number prefixed by ``h`` or
76 76 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
77 77
78 78 bugzilla.fixregexp
79 79 Regular expression to match bug IDs for marking fixed in changeset
80 80 commit message. This must contain a "()" named group ``<ids>` containing
81 81 the bug IDs separated by non-digit characters. It may also contain
82 82 a named group ``<hours>`` with a floating-point number giving the
83 83 hours worked on the bug. If no named groups are present, the first
84 84 "()" group is assumed to contain the bug IDs, and work time is not
85 85 updated. The default expression matches ``Fixes 1234``, ``Fixes bug 1234``,
86 86 ``Fixes bugs 1234,5678``, ``Fixes 1234 and 5678`` and
87 87 variations thereof, followed by an hours number prefixed by ``h`` or
88 88 ``hours``, e.g. ``hours 1.5``. Matching is case insensitive.
89 89
90 90 bugzilla.fixstatus
91 91 The status to set a bug to when marking fixed. Default ``RESOLVED``.
92 92
93 93 bugzilla.fixresolution
94 94 The resolution to set a bug to when marking fixed. Default ``FIXED``.
95 95
96 96 bugzilla.style
97 97 The style file to use when formatting comments.
98 98
99 99 bugzilla.template
100 100 Template to use when formatting comments. Overrides style if
101 101 specified. In addition to the usual Mercurial keywords, the
102 102 extension specifies:
103 103
104 104 :``{bug}``: The Bugzilla bug ID.
105 105 :``{root}``: The full pathname of the Mercurial repository.
106 106 :``{webroot}``: Stripped pathname of the Mercurial repository.
107 107 :``{hgweb}``: Base URL for browsing Mercurial repositories.
108 108
109 109 Default ``changeset {node|short} in repo {root} refers to bug
110 110 {bug}.\\ndetails:\\n\\t{desc|tabindent}``
111 111
112 112 bugzilla.strip
113 113 The number of path separator characters to strip from the front of
114 114 the Mercurial repository path (``{root}`` in templates) to produce
115 115 ``{webroot}``. For example, a repository with ``{root}``
116 116 ``/var/local/my-project`` with a strip of 2 gives a value for
117 117 ``{webroot}`` of ``my-project``. Default 0.
118 118
119 119 web.baseurl
120 120 Base URL for browsing Mercurial repositories. Referenced from
121 121 templates as ``{hgweb}``.
122 122
123 123 Configuration items common to XMLRPC+email and MySQL access modes:
124 124
125 125 bugzilla.usermap
126 126 Path of file containing Mercurial committer email to Bugzilla user email
127 127 mappings. If specified, the file should contain one mapping per
128 128 line::
129 129
130 130 committer = Bugzilla user
131 131
132 132 See also the ``[usermap]`` section.
133 133
134 134 The ``[usermap]`` section is used to specify mappings of Mercurial
135 135 committer email to Bugzilla user email. See also ``bugzilla.usermap``.
136 136 Contains entries of the form ``committer = Bugzilla user``.
137 137
138 138 XMLRPC access mode configuration:
139 139
140 140 bugzilla.bzurl
141 141 The base URL for the Bugzilla installation.
142 142 Default ``http://localhost/bugzilla``.
143 143
144 144 bugzilla.user
145 145 The username to use to log into Bugzilla via XMLRPC. Default
146 146 ``bugs``.
147 147
148 148 bugzilla.password
149 149 The password for Bugzilla login.
150 150
151 151 XMLRPC+email access mode uses the XMLRPC access mode configuration items,
152 152 and also:
153 153
154 154 bugzilla.bzemail
155 155 The Bugzilla email address.
156 156
157 157 In addition, the Mercurial email settings must be configured. See the
158 158 documentation in hgrc(5), sections ``[email]`` and ``[smtp]``.
159 159
160 160 MySQL access mode configuration:
161 161
162 162 bugzilla.host
163 163 Hostname of the MySQL server holding the Bugzilla database.
164 164 Default ``localhost``.
165 165
166 166 bugzilla.db
167 167 Name of the Bugzilla database in MySQL. Default ``bugs``.
168 168
169 169 bugzilla.user
170 170 Username to use to access MySQL server. Default ``bugs``.
171 171
172 172 bugzilla.password
173 173 Password to use to access MySQL server.
174 174
175 175 bugzilla.timeout
176 176 Database connection timeout (seconds). Default 5.
177 177
178 178 bugzilla.bzuser
179 179 Fallback Bugzilla user name to record comments with, if changeset
180 180 committer cannot be found as a Bugzilla user.
181 181
182 182 bugzilla.bzdir
183 183 Bugzilla install directory. Used by default notify. Default
184 184 ``/var/www/html/bugzilla``.
185 185
186 186 bugzilla.notify
187 187 The command to run to get Bugzilla to send bug change notification
188 188 emails. Substitutes from a map with 3 keys, ``bzdir``, ``id`` (bug
189 189 id) and ``user`` (committer bugzilla email). Default depends on
190 190 version; from 2.18 it is "cd %(bzdir)s && perl -T
191 191 contrib/sendbugmail.pl %(id)s %(user)s".
192 192
193 193 Activating the extension::
194 194
195 195 [extensions]
196 196 bugzilla =
197 197
198 198 [hooks]
199 199 # run bugzilla hook on every change pulled or pushed in here
200 200 incoming.bugzilla = python:hgext.bugzilla.hook
201 201
202 202 Example configurations:
203 203
204 204 XMLRPC example configuration. This uses the Bugzilla at
205 205 ``http://my-project.org/bugzilla``, logging in as user
206 206 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
207 207 collection of Mercurial repositories in ``/var/local/hg/repos/``,
208 208 with a web interface at ``http://my-project.org/hg``. ::
209 209
210 210 [bugzilla]
211 211 bzurl=http://my-project.org/bugzilla
212 212 user=bugmail@my-project.org
213 213 password=plugh
214 214 version=xmlrpc
215 215 template=Changeset {node|short} in {root|basename}.
216 216 {hgweb}/{webroot}/rev/{node|short}\\n
217 217 {desc}\\n
218 218 strip=5
219 219
220 220 [web]
221 221 baseurl=http://my-project.org/hg
222 222
223 223 XMLRPC+email example configuration. This uses the Bugzilla at
224 224 ``http://my-project.org/bugzilla``, logging in as user
225 225 ``bugmail@my-project.org`` with password ``plugh``. It is used with a
226 226 collection of Mercurial repositories in ``/var/local/hg/repos/``,
227 227 with a web interface at ``http://my-project.org/hg``. Bug comments
228 228 are sent to the Bugzilla email address
229 229 ``bugzilla@my-project.org``. ::
230 230
231 231 [bugzilla]
232 232 bzurl=http://my-project.org/bugzilla
233 233 user=bugmail@my-project.org
234 234 password=plugh
235 235 version=xmlrpc+email
236 236 bzemail=bugzilla@my-project.org
237 237 template=Changeset {node|short} in {root|basename}.
238 238 {hgweb}/{webroot}/rev/{node|short}\\n
239 239 {desc}\\n
240 240 strip=5
241 241
242 242 [web]
243 243 baseurl=http://my-project.org/hg
244 244
245 245 [usermap]
246 246 user@emaildomain.com=user.name@bugzilladomain.com
247 247
248 248 MySQL example configuration. This has a local Bugzilla 3.2 installation
249 249 in ``/opt/bugzilla-3.2``. The MySQL database is on ``localhost``,
250 250 the Bugzilla database name is ``bugs`` and MySQL is
251 251 accessed with MySQL username ``bugs`` password ``XYZZY``. It is used
252 252 with a collection of Mercurial repositories in ``/var/local/hg/repos/``,
253 253 with a web interface at ``http://my-project.org/hg``. ::
254 254
255 255 [bugzilla]
256 256 host=localhost
257 257 password=XYZZY
258 258 version=3.0
259 259 bzuser=unknown@domain.com
260 260 bzdir=/opt/bugzilla-3.2
261 261 template=Changeset {node|short} in {root|basename}.
262 262 {hgweb}/{webroot}/rev/{node|short}\\n
263 263 {desc}\\n
264 264 strip=5
265 265
266 266 [web]
267 267 baseurl=http://my-project.org/hg
268 268
269 269 [usermap]
270 270 user@emaildomain.com=user.name@bugzilladomain.com
271 271
272 272 All the above add a comment to the Bugzilla bug record of the form::
273 273
274 274 Changeset 3b16791d6642 in repository-name.
275 275 http://my-project.org/hg/repository-name/rev/3b16791d6642
276 276
277 277 Changeset commit comment. Bug 1234.
278 278 '''
279 279
280 280 from mercurial.i18n import _
281 281 from mercurial.node import short
282 from mercurial import cmdutil, mail, templater, util
282 from mercurial import cmdutil, mail, util
283 283 import re, time, urlparse, xmlrpclib
284 284
285 285 testedwith = 'internal'
286 286
287 287 class bzaccess(object):
288 288 '''Base class for access to Bugzilla.'''
289 289
290 290 def __init__(self, ui):
291 291 self.ui = ui
292 292 usermap = self.ui.config('bugzilla', 'usermap')
293 293 if usermap:
294 294 self.ui.readconfig(usermap, sections=['usermap'])
295 295
296 296 def map_committer(self, user):
297 297 '''map name of committer to Bugzilla user name.'''
298 298 for committer, bzuser in self.ui.configitems('usermap'):
299 299 if committer.lower() == user.lower():
300 300 return bzuser
301 301 return user
302 302
303 303 # Methods to be implemented by access classes.
304 304 #
305 305 # 'bugs' is a dict keyed on bug id, where values are a dict holding
306 306 # updates to bug state. Recognized dict keys are:
307 307 #
308 308 # 'hours': Value, float containing work hours to be updated.
309 309 # 'fix': If key present, bug is to be marked fixed. Value ignored.
310 310
311 311 def filter_real_bug_ids(self, bugs):
312 312 '''remove bug IDs that do not exist in Bugzilla from bugs.'''
313 313 pass
314 314
315 315 def filter_cset_known_bug_ids(self, node, bugs):
316 316 '''remove bug IDs where node occurs in comment text from bugs.'''
317 317 pass
318 318
319 319 def updatebug(self, bugid, newstate, text, committer):
320 320 '''update the specified bug. Add comment text and set new states.
321 321
322 322 If possible add the comment as being from the committer of
323 323 the changeset. Otherwise use the default Bugzilla user.
324 324 '''
325 325 pass
326 326
327 327 def notify(self, bugs, committer):
328 328 '''Force sending of Bugzilla notification emails.
329 329
330 330 Only required if the access method does not trigger notification
331 331 emails automatically.
332 332 '''
333 333 pass
334 334
335 335 # Bugzilla via direct access to MySQL database.
336 336 class bzmysql(bzaccess):
337 337 '''Support for direct MySQL access to Bugzilla.
338 338
339 339 The earliest Bugzilla version this is tested with is version 2.16.
340 340
341 341 If your Bugzilla is version 3.4 or above, you are strongly
342 342 recommended to use the XMLRPC access method instead.
343 343 '''
344 344
345 345 @staticmethod
346 346 def sql_buglist(ids):
347 347 '''return SQL-friendly list of bug ids'''
348 348 return '(' + ','.join(map(str, ids)) + ')'
349 349
350 350 _MySQLdb = None
351 351
352 352 def __init__(self, ui):
353 353 try:
354 354 import MySQLdb as mysql
355 355 bzmysql._MySQLdb = mysql
356 356 except ImportError, err:
357 357 raise util.Abort(_('python mysql support not available: %s') % err)
358 358
359 359 bzaccess.__init__(self, ui)
360 360
361 361 host = self.ui.config('bugzilla', 'host', 'localhost')
362 362 user = self.ui.config('bugzilla', 'user', 'bugs')
363 363 passwd = self.ui.config('bugzilla', 'password')
364 364 db = self.ui.config('bugzilla', 'db', 'bugs')
365 365 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
366 366 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
367 367 (host, db, user, '*' * len(passwd)))
368 368 self.conn = bzmysql._MySQLdb.connect(host=host,
369 369 user=user, passwd=passwd,
370 370 db=db,
371 371 connect_timeout=timeout)
372 372 self.cursor = self.conn.cursor()
373 373 self.longdesc_id = self.get_longdesc_id()
374 374 self.user_ids = {}
375 375 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
376 376
377 377 def run(self, *args, **kwargs):
378 378 '''run a query.'''
379 379 self.ui.note(_('query: %s %s\n') % (args, kwargs))
380 380 try:
381 381 self.cursor.execute(*args, **kwargs)
382 382 except bzmysql._MySQLdb.MySQLError:
383 383 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
384 384 raise
385 385
386 386 def get_longdesc_id(self):
387 387 '''get identity of longdesc field'''
388 388 self.run('select fieldid from fielddefs where name = "longdesc"')
389 389 ids = self.cursor.fetchall()
390 390 if len(ids) != 1:
391 391 raise util.Abort(_('unknown database schema'))
392 392 return ids[0][0]
393 393
394 394 def filter_real_bug_ids(self, bugs):
395 395 '''filter not-existing bugs from set.'''
396 396 self.run('select bug_id from bugs where bug_id in %s' %
397 397 bzmysql.sql_buglist(bugs.keys()))
398 398 existing = [id for (id,) in self.cursor.fetchall()]
399 399 for id in bugs.keys():
400 400 if id not in existing:
401 401 self.ui.status(_('bug %d does not exist\n') % id)
402 402 del bugs[id]
403 403
404 404 def filter_cset_known_bug_ids(self, node, bugs):
405 405 '''filter bug ids that already refer to this changeset from set.'''
406 406 self.run('''select bug_id from longdescs where
407 407 bug_id in %s and thetext like "%%%s%%"''' %
408 408 (bzmysql.sql_buglist(bugs.keys()), short(node)))
409 409 for (id,) in self.cursor.fetchall():
410 410 self.ui.status(_('bug %d already knows about changeset %s\n') %
411 411 (id, short(node)))
412 412 del bugs[id]
413 413
414 414 def notify(self, bugs, committer):
415 415 '''tell bugzilla to send mail.'''
416 416 self.ui.status(_('telling bugzilla to send mail:\n'))
417 417 (user, userid) = self.get_bugzilla_user(committer)
418 418 for id in bugs.keys():
419 419 self.ui.status(_(' bug %s\n') % id)
420 420 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
421 421 bzdir = self.ui.config('bugzilla', 'bzdir',
422 422 '/var/www/html/bugzilla')
423 423 try:
424 424 # Backwards-compatible with old notify string, which
425 425 # took one string. This will throw with a new format
426 426 # string.
427 427 cmd = cmdfmt % id
428 428 except TypeError:
429 429 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
430 430 self.ui.note(_('running notify command %s\n') % cmd)
431 431 fp = util.popen('(%s) 2>&1' % cmd)
432 432 out = fp.read()
433 433 ret = fp.close()
434 434 if ret:
435 435 self.ui.warn(out)
436 436 raise util.Abort(_('bugzilla notify command %s') %
437 437 util.explainexit(ret)[0])
438 438 self.ui.status(_('done\n'))
439 439
440 440 def get_user_id(self, user):
441 441 '''look up numeric bugzilla user id.'''
442 442 try:
443 443 return self.user_ids[user]
444 444 except KeyError:
445 445 try:
446 446 userid = int(user)
447 447 except ValueError:
448 448 self.ui.note(_('looking up user %s\n') % user)
449 449 self.run('''select userid from profiles
450 450 where login_name like %s''', user)
451 451 all = self.cursor.fetchall()
452 452 if len(all) != 1:
453 453 raise KeyError(user)
454 454 userid = int(all[0][0])
455 455 self.user_ids[user] = userid
456 456 return userid
457 457
458 458 def get_bugzilla_user(self, committer):
459 459 '''See if committer is a registered bugzilla user. Return
460 460 bugzilla username and userid if so. If not, return default
461 461 bugzilla username and userid.'''
462 462 user = self.map_committer(committer)
463 463 try:
464 464 userid = self.get_user_id(user)
465 465 except KeyError:
466 466 try:
467 467 defaultuser = self.ui.config('bugzilla', 'bzuser')
468 468 if not defaultuser:
469 469 raise util.Abort(_('cannot find bugzilla user id for %s') %
470 470 user)
471 471 userid = self.get_user_id(defaultuser)
472 472 user = defaultuser
473 473 except KeyError:
474 474 raise util.Abort(_('cannot find bugzilla user id for %s or %s')
475 475 % (user, defaultuser))
476 476 return (user, userid)
477 477
478 478 def updatebug(self, bugid, newstate, text, committer):
479 479 '''update bug state with comment text.
480 480
481 481 Try adding comment as committer of changeset, otherwise as
482 482 default bugzilla user.'''
483 483 if len(newstate) > 0:
484 484 self.ui.warn(_("Bugzilla/MySQL cannot update bug state\n"))
485 485
486 486 (user, userid) = self.get_bugzilla_user(committer)
487 487 now = time.strftime('%Y-%m-%d %H:%M:%S')
488 488 self.run('''insert into longdescs
489 489 (bug_id, who, bug_when, thetext)
490 490 values (%s, %s, %s, %s)''',
491 491 (bugid, userid, now, text))
492 492 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
493 493 values (%s, %s, %s, %s)''',
494 494 (bugid, userid, now, self.longdesc_id))
495 495 self.conn.commit()
496 496
497 497 class bzmysql_2_18(bzmysql):
498 498 '''support for bugzilla 2.18 series.'''
499 499
500 500 def __init__(self, ui):
501 501 bzmysql.__init__(self, ui)
502 502 self.default_notify = \
503 503 "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
504 504
505 505 class bzmysql_3_0(bzmysql_2_18):
506 506 '''support for bugzilla 3.0 series.'''
507 507
508 508 def __init__(self, ui):
509 509 bzmysql_2_18.__init__(self, ui)
510 510
511 511 def get_longdesc_id(self):
512 512 '''get identity of longdesc field'''
513 513 self.run('select id from fielddefs where name = "longdesc"')
514 514 ids = self.cursor.fetchall()
515 515 if len(ids) != 1:
516 516 raise util.Abort(_('unknown database schema'))
517 517 return ids[0][0]
518 518
519 519 # Bugzilla via XMLRPC interface.
520 520
521 521 class cookietransportrequest(object):
522 522 """A Transport request method that retains cookies over its lifetime.
523 523
524 524 The regular xmlrpclib transports ignore cookies. Which causes
525 525 a bit of a problem when you need a cookie-based login, as with
526 526 the Bugzilla XMLRPC interface prior to 4.4.3.
527 527
528 528 So this is a helper for defining a Transport which looks for
529 529 cookies being set in responses and saves them to add to all future
530 530 requests.
531 531 """
532 532
533 533 # Inspiration drawn from
534 534 # http://blog.godson.in/2010/09/how-to-make-python-xmlrpclib-client.html
535 535 # http://www.itkovian.net/base/transport-class-for-pythons-xml-rpc-lib/
536 536
537 537 cookies = []
538 538 def send_cookies(self, connection):
539 539 if self.cookies:
540 540 for cookie in self.cookies:
541 541 connection.putheader("Cookie", cookie)
542 542
543 543 def request(self, host, handler, request_body, verbose=0):
544 544 self.verbose = verbose
545 545 self.accept_gzip_encoding = False
546 546
547 547 # issue XML-RPC request
548 548 h = self.make_connection(host)
549 549 if verbose:
550 550 h.set_debuglevel(1)
551 551
552 552 self.send_request(h, handler, request_body)
553 553 self.send_host(h, host)
554 554 self.send_cookies(h)
555 555 self.send_user_agent(h)
556 556 self.send_content(h, request_body)
557 557
558 558 # Deal with differences between Python 2.4-2.6 and 2.7.
559 559 # In the former h is a HTTP(S). In the latter it's a
560 560 # HTTP(S)Connection. Luckily, the 2.4-2.6 implementation of
561 561 # HTTP(S) has an underlying HTTP(S)Connection, so extract
562 562 # that and use it.
563 563 try:
564 564 response = h.getresponse()
565 565 except AttributeError:
566 566 response = h._conn.getresponse()
567 567
568 568 # Add any cookie definitions to our list.
569 569 for header in response.msg.getallmatchingheaders("Set-Cookie"):
570 570 val = header.split(": ", 1)[1]
571 571 cookie = val.split(";", 1)[0]
572 572 self.cookies.append(cookie)
573 573
574 574 if response.status != 200:
575 575 raise xmlrpclib.ProtocolError(host + handler, response.status,
576 576 response.reason, response.msg.headers)
577 577
578 578 payload = response.read()
579 579 parser, unmarshaller = self.getparser()
580 580 parser.feed(payload)
581 581 parser.close()
582 582
583 583 return unmarshaller.close()
584 584
585 585 # The explicit calls to the underlying xmlrpclib __init__() methods are
586 586 # necessary. The xmlrpclib.Transport classes are old-style classes, and
587 587 # it turns out their __init__() doesn't get called when doing multiple
588 588 # inheritance with a new-style class.
589 589 class cookietransport(cookietransportrequest, xmlrpclib.Transport):
590 590 def __init__(self, use_datetime=0):
591 591 if util.safehasattr(xmlrpclib.Transport, "__init__"):
592 592 xmlrpclib.Transport.__init__(self, use_datetime)
593 593
594 594 class cookiesafetransport(cookietransportrequest, xmlrpclib.SafeTransport):
595 595 def __init__(self, use_datetime=0):
596 596 if util.safehasattr(xmlrpclib.Transport, "__init__"):
597 597 xmlrpclib.SafeTransport.__init__(self, use_datetime)
598 598
599 599 class bzxmlrpc(bzaccess):
600 600 """Support for access to Bugzilla via the Bugzilla XMLRPC API.
601 601
602 602 Requires a minimum Bugzilla version 3.4.
603 603 """
604 604
605 605 def __init__(self, ui):
606 606 bzaccess.__init__(self, ui)
607 607
608 608 bzweb = self.ui.config('bugzilla', 'bzurl',
609 609 'http://localhost/bugzilla/')
610 610 bzweb = bzweb.rstrip("/") + "/xmlrpc.cgi"
611 611
612 612 user = self.ui.config('bugzilla', 'user', 'bugs')
613 613 passwd = self.ui.config('bugzilla', 'password')
614 614
615 615 self.fixstatus = self.ui.config('bugzilla', 'fixstatus', 'RESOLVED')
616 616 self.fixresolution = self.ui.config('bugzilla', 'fixresolution',
617 617 'FIXED')
618 618
619 619 self.bzproxy = xmlrpclib.ServerProxy(bzweb, self.transport(bzweb))
620 620 ver = self.bzproxy.Bugzilla.version()['version'].split('.')
621 621 self.bzvermajor = int(ver[0])
622 622 self.bzverminor = int(ver[1])
623 623 login = self.bzproxy.User.login({'login': user, 'password': passwd,
624 624 'restrict_login': True})
625 625 self.bztoken = login.get('token', '')
626 626
627 627 def transport(self, uri):
628 628 if urlparse.urlparse(uri, "http")[0] == "https":
629 629 return cookiesafetransport()
630 630 else:
631 631 return cookietransport()
632 632
633 633 def get_bug_comments(self, id):
634 634 """Return a string with all comment text for a bug."""
635 635 c = self.bzproxy.Bug.comments({'ids': [id],
636 636 'include_fields': ['text'],
637 637 'token': self.bztoken})
638 638 return ''.join([t['text'] for t in c['bugs'][str(id)]['comments']])
639 639
640 640 def filter_real_bug_ids(self, bugs):
641 641 probe = self.bzproxy.Bug.get({'ids': sorted(bugs.keys()),
642 642 'include_fields': [],
643 643 'permissive': True,
644 644 'token': self.bztoken,
645 645 })
646 646 for badbug in probe['faults']:
647 647 id = badbug['id']
648 648 self.ui.status(_('bug %d does not exist\n') % id)
649 649 del bugs[id]
650 650
651 651 def filter_cset_known_bug_ids(self, node, bugs):
652 652 for id in sorted(bugs.keys()):
653 653 if self.get_bug_comments(id).find(short(node)) != -1:
654 654 self.ui.status(_('bug %d already knows about changeset %s\n') %
655 655 (id, short(node)))
656 656 del bugs[id]
657 657
658 658 def updatebug(self, bugid, newstate, text, committer):
659 659 args = {}
660 660 if 'hours' in newstate:
661 661 args['work_time'] = newstate['hours']
662 662
663 663 if self.bzvermajor >= 4:
664 664 args['ids'] = [bugid]
665 665 args['comment'] = {'body' : text}
666 666 if 'fix' in newstate:
667 667 args['status'] = self.fixstatus
668 668 args['resolution'] = self.fixresolution
669 669 args['token'] = self.bztoken
670 670 self.bzproxy.Bug.update(args)
671 671 else:
672 672 if 'fix' in newstate:
673 673 self.ui.warn(_("Bugzilla/XMLRPC needs Bugzilla 4.0 or later "
674 674 "to mark bugs fixed\n"))
675 675 args['id'] = bugid
676 676 args['comment'] = text
677 677 self.bzproxy.Bug.add_comment(args)
678 678
679 679 class bzxmlrpcemail(bzxmlrpc):
680 680 """Read data from Bugzilla via XMLRPC, send updates via email.
681 681
682 682 Advantages of sending updates via email:
683 683 1. Comments can be added as any user, not just logged in user.
684 684 2. Bug statuses or other fields not accessible via XMLRPC can
685 685 potentially be updated.
686 686
687 687 There is no XMLRPC function to change bug status before Bugzilla
688 688 4.0, so bugs cannot be marked fixed via XMLRPC before Bugzilla 4.0.
689 689 But bugs can be marked fixed via email from 3.4 onwards.
690 690 """
691 691
692 692 # The email interface changes subtly between 3.4 and 3.6. In 3.4,
693 693 # in-email fields are specified as '@<fieldname> = <value>'. In
694 694 # 3.6 this becomes '@<fieldname> <value>'. And fieldname @bug_id
695 695 # in 3.4 becomes @id in 3.6. 3.6 and 4.0 both maintain backwards
696 696 # compatibility, but rather than rely on this use the new format for
697 697 # 4.0 onwards.
698 698
699 699 def __init__(self, ui):
700 700 bzxmlrpc.__init__(self, ui)
701 701
702 702 self.bzemail = self.ui.config('bugzilla', 'bzemail')
703 703 if not self.bzemail:
704 704 raise util.Abort(_("configuration 'bzemail' missing"))
705 705 mail.validateconfig(self.ui)
706 706
707 707 def makecommandline(self, fieldname, value):
708 708 if self.bzvermajor >= 4:
709 709 return "@%s %s" % (fieldname, str(value))
710 710 else:
711 711 if fieldname == "id":
712 712 fieldname = "bug_id"
713 713 return "@%s = %s" % (fieldname, str(value))
714 714
715 715 def send_bug_modify_email(self, bugid, commands, comment, committer):
716 716 '''send modification message to Bugzilla bug via email.
717 717
718 718 The message format is documented in the Bugzilla email_in.pl
719 719 specification. commands is a list of command lines, comment is the
720 720 comment text.
721 721
722 722 To stop users from crafting commit comments with
723 723 Bugzilla commands, specify the bug ID via the message body, rather
724 724 than the subject line, and leave a blank line after it.
725 725 '''
726 726 user = self.map_committer(committer)
727 727 matches = self.bzproxy.User.get({'match': [user],
728 728 'token': self.bztoken})
729 729 if not matches['users']:
730 730 user = self.ui.config('bugzilla', 'user', 'bugs')
731 731 matches = self.bzproxy.User.get({'match': [user],
732 732 'token': self.bztoken})
733 733 if not matches['users']:
734 734 raise util.Abort(_("default bugzilla user %s email not found") %
735 735 user)
736 736 user = matches['users'][0]['email']
737 737 commands.append(self.makecommandline("id", bugid))
738 738
739 739 text = "\n".join(commands) + "\n\n" + comment
740 740
741 741 _charsets = mail._charsets(self.ui)
742 742 user = mail.addressencode(self.ui, user, _charsets)
743 743 bzemail = mail.addressencode(self.ui, self.bzemail, _charsets)
744 744 msg = mail.mimeencode(self.ui, text, _charsets)
745 745 msg['From'] = user
746 746 msg['To'] = bzemail
747 747 msg['Subject'] = mail.headencode(self.ui, "Bug modification", _charsets)
748 748 sendmail = mail.connect(self.ui)
749 749 sendmail(user, bzemail, msg.as_string())
750 750
751 751 def updatebug(self, bugid, newstate, text, committer):
752 752 cmds = []
753 753 if 'hours' in newstate:
754 754 cmds.append(self.makecommandline("work_time", newstate['hours']))
755 755 if 'fix' in newstate:
756 756 cmds.append(self.makecommandline("bug_status", self.fixstatus))
757 757 cmds.append(self.makecommandline("resolution", self.fixresolution))
758 758 self.send_bug_modify_email(bugid, cmds, text, committer)
759 759
760 760 class bugzilla(object):
761 761 # supported versions of bugzilla. different versions have
762 762 # different schemas.
763 763 _versions = {
764 764 '2.16': bzmysql,
765 765 '2.18': bzmysql_2_18,
766 766 '3.0': bzmysql_3_0,
767 767 'xmlrpc': bzxmlrpc,
768 768 'xmlrpc+email': bzxmlrpcemail
769 769 }
770 770
771 771 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
772 772 r'(?P<ids>(?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
773 773 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
774 774
775 775 _default_fix_re = (r'fix(?:es)?\s*(?:bugs?\s*)?,?\s*'
776 776 r'(?:nos?\.?|num(?:ber)?s?)?\s*'
777 777 r'(?P<ids>(?:#?\d+\s*(?:,?\s*(?:and)?)?\s*)+)'
778 778 r'\.?\s*(?:h(?:ours?)?\s*(?P<hours>\d*(?:\.\d+)?))?')
779 779
780 780 def __init__(self, ui, repo):
781 781 self.ui = ui
782 782 self.repo = repo
783 783
784 784 bzversion = self.ui.config('bugzilla', 'version')
785 785 try:
786 786 bzclass = bugzilla._versions[bzversion]
787 787 except KeyError:
788 788 raise util.Abort(_('bugzilla version %s not supported') %
789 789 bzversion)
790 790 self.bzdriver = bzclass(self.ui)
791 791
792 792 self.bug_re = re.compile(
793 793 self.ui.config('bugzilla', 'regexp',
794 794 bugzilla._default_bug_re), re.IGNORECASE)
795 795 self.fix_re = re.compile(
796 796 self.ui.config('bugzilla', 'fixregexp',
797 797 bugzilla._default_fix_re), re.IGNORECASE)
798 798 self.split_re = re.compile(r'\D+')
799 799
800 800 def find_bugs(self, ctx):
801 801 '''return bugs dictionary created from commit comment.
802 802
803 803 Extract bug info from changeset comments. Filter out any that are
804 804 not known to Bugzilla, and any that already have a reference to
805 805 the given changeset in their comments.
806 806 '''
807 807 start = 0
808 808 hours = 0.0
809 809 bugs = {}
810 810 bugmatch = self.bug_re.search(ctx.description(), start)
811 811 fixmatch = self.fix_re.search(ctx.description(), start)
812 812 while True:
813 813 bugattribs = {}
814 814 if not bugmatch and not fixmatch:
815 815 break
816 816 if not bugmatch:
817 817 m = fixmatch
818 818 elif not fixmatch:
819 819 m = bugmatch
820 820 else:
821 821 if bugmatch.start() < fixmatch.start():
822 822 m = bugmatch
823 823 else:
824 824 m = fixmatch
825 825 start = m.end()
826 826 if m is bugmatch:
827 827 bugmatch = self.bug_re.search(ctx.description(), start)
828 828 if 'fix' in bugattribs:
829 829 del bugattribs['fix']
830 830 else:
831 831 fixmatch = self.fix_re.search(ctx.description(), start)
832 832 bugattribs['fix'] = None
833 833
834 834 try:
835 835 ids = m.group('ids')
836 836 except IndexError:
837 837 ids = m.group(1)
838 838 try:
839 839 hours = float(m.group('hours'))
840 840 bugattribs['hours'] = hours
841 841 except IndexError:
842 842 pass
843 843 except TypeError:
844 844 pass
845 845 except ValueError:
846 846 self.ui.status(_("%s: invalid hours\n") % m.group('hours'))
847 847
848 848 for id in self.split_re.split(ids):
849 849 if not id:
850 850 continue
851 851 bugs[int(id)] = bugattribs
852 852 if bugs:
853 853 self.bzdriver.filter_real_bug_ids(bugs)
854 854 if bugs:
855 855 self.bzdriver.filter_cset_known_bug_ids(ctx.node(), bugs)
856 856 return bugs
857 857
858 858 def update(self, bugid, newstate, ctx):
859 859 '''update bugzilla bug with reference to changeset.'''
860 860
861 861 def webroot(root):
862 862 '''strip leading prefix of repo root and turn into
863 863 url-safe path.'''
864 864 count = int(self.ui.config('bugzilla', 'strip', 0))
865 865 root = util.pconvert(root)
866 866 while count > 0:
867 867 c = root.find('/')
868 868 if c == -1:
869 869 break
870 870 root = root[c + 1:]
871 871 count -= 1
872 872 return root
873 873
874 874 mapfile = self.ui.config('bugzilla', 'style')
875 875 tmpl = self.ui.config('bugzilla', 'template')
876 876 if not mapfile and not tmpl:
877 877 tmpl = _('changeset {node|short} in repo {root} refers '
878 878 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
879 if tmpl:
880 tmpl = templater.parsestring(tmpl, quoted=False)
881 879 t = cmdutil.changeset_templater(self.ui, self.repo,
882 880 False, None, tmpl, mapfile, False)
883 881 self.ui.pushbuffer()
884 882 t.show(ctx, changes=ctx.changeset(),
885 883 bug=str(bugid),
886 884 hgweb=self.ui.config('web', 'baseurl'),
887 885 root=self.repo.root,
888 886 webroot=webroot(self.repo.root))
889 887 data = self.ui.popbuffer()
890 888 self.bzdriver.updatebug(bugid, newstate, data, util.email(ctx.user()))
891 889
892 890 def notify(self, bugs, committer):
893 891 '''ensure Bugzilla users are notified of bug change.'''
894 892 self.bzdriver.notify(bugs, committer)
895 893
896 894 def hook(ui, repo, hooktype, node=None, **kwargs):
897 895 '''add comment to bugzilla for each changeset that refers to a
898 896 bugzilla bug id. only add a comment once per bug, so same change
899 897 seen multiple times does not fill bug with duplicate data.'''
900 898 if node is None:
901 899 raise util.Abort(_('hook type %s does not pass a changeset id') %
902 900 hooktype)
903 901 try:
904 902 bz = bugzilla(ui, repo)
905 903 ctx = repo[node]
906 904 bugs = bz.find_bugs(ctx)
907 905 if bugs:
908 906 for bug in bugs:
909 907 bz.update(bug, bugs[bug], ctx)
910 908 bz.notify(bugs, util.email(ctx.user()))
911 909 except Exception, e:
912 910 raise util.Abort(_('Bugzilla error: %s') % e)
@@ -1,202 +1,201 b''
1 1 # churn.py - create a graph of revisions count grouped by template
2 2 #
3 3 # Copyright 2006 Josef "Jeff" Sipek <jeffpc@josefsipek.net>
4 4 # Copyright 2008 Alexander Solovyov <piranha@piranha.org.ua>
5 5 #
6 6 # This software may be used and distributed according to the terms of the
7 7 # GNU General Public License version 2 or any later version.
8 8
9 9 '''command to display statistics about repository history'''
10 10
11 11 from mercurial.i18n import _
12 from mercurial import patch, cmdutil, scmutil, util, templater, commands
12 from mercurial import patch, cmdutil, scmutil, util, commands
13 13 from mercurial import encoding
14 14 import os
15 15 import time, datetime
16 16
17 17 cmdtable = {}
18 18 command = cmdutil.command(cmdtable)
19 19 testedwith = 'internal'
20 20
21 21 def maketemplater(ui, repo, tmpl):
22 tmpl = templater.parsestring(tmpl, quoted=False)
23 22 try:
24 23 t = cmdutil.changeset_templater(ui, repo, False, None, tmpl,
25 24 None, False)
26 25 except SyntaxError, inst:
27 26 raise util.Abort(inst.args[0])
28 27 return t
29 28
30 29 def changedlines(ui, repo, ctx1, ctx2, fns):
31 30 added, removed = 0, 0
32 31 fmatch = scmutil.matchfiles(repo, fns)
33 32 diff = ''.join(patch.diff(repo, ctx1.node(), ctx2.node(), fmatch))
34 33 for l in diff.split('\n'):
35 34 if l.startswith("+") and not l.startswith("+++ "):
36 35 added += 1
37 36 elif l.startswith("-") and not l.startswith("--- "):
38 37 removed += 1
39 38 return (added, removed)
40 39
41 40 def countrate(ui, repo, amap, *pats, **opts):
42 41 """Calculate stats"""
43 42 if opts.get('dateformat'):
44 43 def getkey(ctx):
45 44 t, tz = ctx.date()
46 45 date = datetime.datetime(*time.gmtime(float(t) - tz)[:6])
47 46 return date.strftime(opts['dateformat'])
48 47 else:
49 48 tmpl = opts.get('oldtemplate') or opts.get('template')
50 49 tmpl = maketemplater(ui, repo, tmpl)
51 50 def getkey(ctx):
52 51 ui.pushbuffer()
53 52 tmpl.show(ctx)
54 53 return ui.popbuffer()
55 54
56 55 state = {'count': 0}
57 56 rate = {}
58 57 df = False
59 58 if opts.get('date'):
60 59 df = util.matchdate(opts['date'])
61 60
62 61 m = scmutil.match(repo[None], pats, opts)
63 62 def prep(ctx, fns):
64 63 rev = ctx.rev()
65 64 if df and not df(ctx.date()[0]): # doesn't match date format
66 65 return
67 66
68 67 key = getkey(ctx).strip()
69 68 key = amap.get(key, key) # alias remap
70 69 if opts.get('changesets'):
71 70 rate[key] = (rate.get(key, (0,))[0] + 1, 0)
72 71 else:
73 72 parents = ctx.parents()
74 73 if len(parents) > 1:
75 74 ui.note(_('revision %d is a merge, ignoring...\n') % (rev,))
76 75 return
77 76
78 77 ctx1 = parents[0]
79 78 lines = changedlines(ui, repo, ctx1, ctx, fns)
80 79 rate[key] = [r + l for r, l in zip(rate.get(key, (0, 0)), lines)]
81 80
82 81 state['count'] += 1
83 82 ui.progress(_('analyzing'), state['count'], total=len(repo))
84 83
85 84 for ctx in cmdutil.walkchangerevs(repo, m, opts, prep):
86 85 continue
87 86
88 87 ui.progress(_('analyzing'), None)
89 88
90 89 return rate
91 90
92 91
93 92 @command('churn',
94 93 [('r', 'rev', [],
95 94 _('count rate for the specified revision or revset'), _('REV')),
96 95 ('d', 'date', '',
97 96 _('count rate for revisions matching date spec'), _('DATE')),
98 97 ('t', 'oldtemplate', '',
99 98 _('template to group changesets (DEPRECATED)'), _('TEMPLATE')),
100 99 ('T', 'template', '{author|email}',
101 100 _('template to group changesets'), _('TEMPLATE')),
102 101 ('f', 'dateformat', '',
103 102 _('strftime-compatible format for grouping by date'), _('FORMAT')),
104 103 ('c', 'changesets', False, _('count rate by number of changesets')),
105 104 ('s', 'sort', False, _('sort by key (default: sort by count)')),
106 105 ('', 'diffstat', False, _('display added/removed lines separately')),
107 106 ('', 'aliases', '', _('file with email aliases'), _('FILE')),
108 107 ] + commands.walkopts,
109 108 _("hg churn [-d DATE] [-r REV] [--aliases FILE] [FILE]"),
110 109 inferrepo=True)
111 110 def churn(ui, repo, *pats, **opts):
112 111 '''histogram of changes to the repository
113 112
114 113 This command will display a histogram representing the number
115 114 of changed lines or revisions, grouped according to the given
116 115 template. The default template will group changes by author.
117 116 The --dateformat option may be used to group the results by
118 117 date instead.
119 118
120 119 Statistics are based on the number of changed lines, or
121 120 alternatively the number of matching revisions if the
122 121 --changesets option is specified.
123 122
124 123 Examples::
125 124
126 125 # display count of changed lines for every committer
127 126 hg churn -t "{author|email}"
128 127
129 128 # display daily activity graph
130 129 hg churn -f "%H" -s -c
131 130
132 131 # display activity of developers by month
133 132 hg churn -f "%Y-%m" -s -c
134 133
135 134 # display count of lines changed in every year
136 135 hg churn -f "%Y" -s
137 136
138 137 It is possible to map alternate email addresses to a main address
139 138 by providing a file using the following format::
140 139
141 140 <alias email> = <actual email>
142 141
143 142 Such a file may be specified with the --aliases option, otherwise
144 143 a .hgchurn file will be looked for in the working directory root.
145 144 Aliases will be split from the rightmost "=".
146 145 '''
147 146 def pad(s, l):
148 147 return s + " " * (l - encoding.colwidth(s))
149 148
150 149 amap = {}
151 150 aliases = opts.get('aliases')
152 151 if not aliases and os.path.exists(repo.wjoin('.hgchurn')):
153 152 aliases = repo.wjoin('.hgchurn')
154 153 if aliases:
155 154 for l in open(aliases, "r"):
156 155 try:
157 156 alias, actual = l.rsplit('=' in l and '=' or None, 1)
158 157 amap[alias.strip()] = actual.strip()
159 158 except ValueError:
160 159 l = l.strip()
161 160 if l:
162 161 ui.warn(_("skipping malformed alias: %s\n") % l)
163 162 continue
164 163
165 164 rate = countrate(ui, repo, amap, *pats, **opts).items()
166 165 if not rate:
167 166 return
168 167
169 168 if opts.get('sort'):
170 169 rate.sort()
171 170 else:
172 171 rate.sort(key=lambda x: (-sum(x[1]), x))
173 172
174 173 # Be careful not to have a zero maxcount (issue833)
175 174 maxcount = float(max(sum(v) for k, v in rate)) or 1.0
176 175 maxname = max(len(k) for k, v in rate)
177 176
178 177 ttywidth = ui.termwidth()
179 178 ui.debug("assuming %i character terminal\n" % ttywidth)
180 179 width = ttywidth - maxname - 2 - 2 - 2
181 180
182 181 if opts.get('diffstat'):
183 182 width -= 15
184 183 def format(name, diffstat):
185 184 added, removed = diffstat
186 185 return "%s %15s %s%s\n" % (pad(name, maxname),
187 186 '+%d/-%d' % (added, removed),
188 187 ui.label('+' * charnum(added),
189 188 'diffstat.inserted'),
190 189 ui.label('-' * charnum(removed),
191 190 'diffstat.deleted'))
192 191 else:
193 192 width -= 6
194 193 def format(name, count):
195 194 return "%s %6d %s\n" % (pad(name, maxname), sum(count),
196 195 '*' * charnum(sum(count)))
197 196
198 197 def charnum(count):
199 198 return int(round(count * width / maxcount))
200 199
201 200 for name, count in rate:
202 201 ui.write(format(name, count))
@@ -1,282 +1,281 b''
1 1 # Copyright (C) 2007-8 Brendan Cully <brendan@kublai.com>
2 2 #
3 3 # This software may be used and distributed according to the terms of the
4 4 # GNU General Public License version 2 or any later version.
5 5
6 6 """hooks for integrating with the CIA.vc notification service
7 7
8 8 This is meant to be run as a changegroup or incoming hook. To
9 9 configure it, set the following options in your hgrc::
10 10
11 11 [cia]
12 12 # your registered CIA user name
13 13 user = foo
14 14 # the name of the project in CIA
15 15 project = foo
16 16 # the module (subproject) (optional)
17 17 #module = foo
18 18 # Append a diffstat to the log message (optional)
19 19 #diffstat = False
20 20 # Template to use for log messages (optional)
21 21 #template = {desc}\\n{baseurl}{webroot}/rev/{node}-- {diffstat}
22 22 # Style to use (optional)
23 23 #style = foo
24 24 # The URL of the CIA notification service (optional)
25 25 # You can use mailto: URLs to send by email, e.g.
26 26 # mailto:cia@cia.vc
27 27 # Make sure to set email.from if you do this.
28 28 #url = http://cia.vc/
29 29 # print message instead of sending it (optional)
30 30 #test = False
31 31 # number of slashes to strip for url paths
32 32 #strip = 0
33 33
34 34 [hooks]
35 35 # one of these:
36 36 changegroup.cia = python:hgcia.hook
37 37 #incoming.cia = python:hgcia.hook
38 38
39 39 [web]
40 40 # If you want hyperlinks (optional)
41 41 baseurl = http://server/path/to/repo
42 42 """
43 43
44 44 from mercurial.i18n import _
45 45 from mercurial.node import bin, short
46 from mercurial import cmdutil, patch, templater, util, mail
46 from mercurial import cmdutil, patch, util, mail
47 47 import email.Parser
48 48
49 49 import socket, xmlrpclib
50 50 from xml.sax import saxutils
51 51 testedwith = 'internal'
52 52
53 53 socket_timeout = 30 # seconds
54 54 if util.safehasattr(socket, 'setdefaulttimeout'):
55 55 # set a timeout for the socket so you don't have to wait so looooong
56 56 # when cia.vc is having problems. requires python >= 2.3:
57 57 socket.setdefaulttimeout(socket_timeout)
58 58
59 59 HGCIA_VERSION = '0.1'
60 60 HGCIA_URL = 'http://hg.kublai.com/mercurial/hgcia'
61 61
62 62
63 63 class ciamsg(object):
64 64 """ A CIA message """
65 65 def __init__(self, cia, ctx):
66 66 self.cia = cia
67 67 self.ctx = ctx
68 68 self.url = self.cia.url
69 69 if self.url:
70 70 self.url += self.cia.root
71 71
72 72 def fileelem(self, path, uri, action):
73 73 if uri:
74 74 uri = ' uri=%s' % saxutils.quoteattr(uri)
75 75 return '<file%s action=%s>%s</file>' % (
76 76 uri, saxutils.quoteattr(action), saxutils.escape(path))
77 77
78 78 def fileelems(self):
79 79 n = self.ctx.node()
80 80 f = self.cia.repo.status(self.ctx.p1().node(), n)
81 81 url = self.url or ''
82 82 if url and url[-1] == '/':
83 83 url = url[:-1]
84 84 elems = []
85 85 for path in f.modified:
86 86 uri = '%s/diff/%s/%s' % (url, short(n), path)
87 87 elems.append(self.fileelem(path, url and uri, 'modify'))
88 88 for path in f.added:
89 89 # TODO: copy/rename ?
90 90 uri = '%s/file/%s/%s' % (url, short(n), path)
91 91 elems.append(self.fileelem(path, url and uri, 'add'))
92 92 for path in f.removed:
93 93 elems.append(self.fileelem(path, '', 'remove'))
94 94
95 95 return '\n'.join(elems)
96 96
97 97 def sourceelem(self, project, module=None, branch=None):
98 98 msg = ['<source>', '<project>%s</project>' % saxutils.escape(project)]
99 99 if module:
100 100 msg.append('<module>%s</module>' % saxutils.escape(module))
101 101 if branch:
102 102 msg.append('<branch>%s</branch>' % saxutils.escape(branch))
103 103 msg.append('</source>')
104 104
105 105 return '\n'.join(msg)
106 106
107 107 def diffstat(self):
108 108 class patchbuf(object):
109 109 def __init__(self):
110 110 self.lines = []
111 111 # diffstat is stupid
112 112 self.name = 'cia'
113 113 def write(self, data):
114 114 self.lines += data.splitlines(True)
115 115 def close(self):
116 116 pass
117 117
118 118 n = self.ctx.node()
119 119 pbuf = patchbuf()
120 120 cmdutil.export(self.cia.repo, [n], fp=pbuf)
121 121 return patch.diffstat(pbuf.lines) or ''
122 122
123 123 def logmsg(self):
124 124 if self.cia.diffstat:
125 125 diffstat = self.diffstat()
126 126 else:
127 127 diffstat = ''
128 128 self.cia.ui.pushbuffer()
129 129 self.cia.templater.show(self.ctx, changes=self.ctx.changeset(),
130 130 baseurl=self.cia.ui.config('web', 'baseurl'),
131 131 url=self.url, diffstat=diffstat,
132 132 webroot=self.cia.root)
133 133 return self.cia.ui.popbuffer()
134 134
135 135 def xml(self):
136 136 n = short(self.ctx.node())
137 137 src = self.sourceelem(self.cia.project, module=self.cia.module,
138 138 branch=self.ctx.branch())
139 139 # unix timestamp
140 140 dt = self.ctx.date()
141 141 timestamp = dt[0]
142 142
143 143 author = saxutils.escape(self.ctx.user())
144 144 rev = '%d:%s' % (self.ctx.rev(), n)
145 145 log = saxutils.escape(self.logmsg())
146 146
147 147 url = self.url
148 148 if url and url[-1] == '/':
149 149 url = url[:-1]
150 150 url = url and '<url>%s/rev/%s</url>' % (saxutils.escape(url), n) or ''
151 151
152 152 msg = """
153 153 <message>
154 154 <generator>
155 155 <name>Mercurial (hgcia)</name>
156 156 <version>%s</version>
157 157 <url>%s</url>
158 158 <user>%s</user>
159 159 </generator>
160 160 %s
161 161 <body>
162 162 <commit>
163 163 <author>%s</author>
164 164 <version>%s</version>
165 165 <log>%s</log>
166 166 %s
167 167 <files>%s</files>
168 168 </commit>
169 169 </body>
170 170 <timestamp>%d</timestamp>
171 171 </message>
172 172 """ % \
173 173 (HGCIA_VERSION, saxutils.escape(HGCIA_URL),
174 174 saxutils.escape(self.cia.user), src, author, rev, log, url,
175 175 self.fileelems(), timestamp)
176 176
177 177 return msg
178 178
179 179
180 180 class hgcia(object):
181 181 """ CIA notification class """
182 182
183 183 deftemplate = '{desc}'
184 184 dstemplate = '{desc}\n-- \n{diffstat}'
185 185
186 186 def __init__(self, ui, repo):
187 187 self.ui = ui
188 188 self.repo = repo
189 189
190 190 self.ciaurl = self.ui.config('cia', 'url', 'http://cia.vc')
191 191 self.user = self.ui.config('cia', 'user')
192 192 self.project = self.ui.config('cia', 'project')
193 193 self.module = self.ui.config('cia', 'module')
194 194 self.diffstat = self.ui.configbool('cia', 'diffstat')
195 195 self.emailfrom = self.ui.config('email', 'from')
196 196 self.dryrun = self.ui.configbool('cia', 'test')
197 197 self.url = self.ui.config('web', 'baseurl')
198 198 # Default to -1 for backward compatibility
199 199 self.stripcount = int(self.ui.config('cia', 'strip', -1))
200 200 self.root = self.strip(self.repo.root)
201 201
202 202 style = self.ui.config('cia', 'style')
203 203 template = self.ui.config('cia', 'template')
204 204 if not template:
205 205 if self.diffstat:
206 206 template = self.dstemplate
207 207 else:
208 208 template = self.deftemplate
209 template = templater.parsestring(template, quoted=False)
210 209 t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
211 210 template, style, False)
212 211 self.templater = t
213 212
214 213 def strip(self, path):
215 214 '''strip leading slashes from local path, turn into web-safe path.'''
216 215
217 216 path = util.pconvert(path)
218 217 count = self.stripcount
219 218 if count < 0:
220 219 return ''
221 220 while count > 0:
222 221 c = path.find('/')
223 222 if c == -1:
224 223 break
225 224 path = path[c + 1:]
226 225 count -= 1
227 226 return path
228 227
229 228 def sendrpc(self, msg):
230 229 srv = xmlrpclib.Server(self.ciaurl)
231 230 res = srv.hub.deliver(msg)
232 231 if res is not True and res != 'queued.':
233 232 raise util.Abort(_('%s returned an error: %s') %
234 233 (self.ciaurl, res))
235 234
236 235 def sendemail(self, address, data):
237 236 p = email.Parser.Parser()
238 237 msg = p.parsestr(data)
239 238 msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
240 239 msg['To'] = address
241 240 msg['From'] = self.emailfrom
242 241 msg['Subject'] = 'DeliverXML'
243 242 msg['Content-type'] = 'text/xml'
244 243 msgtext = msg.as_string()
245 244
246 245 self.ui.status(_('hgcia: sending update to %s\n') % address)
247 246 mail.sendmail(self.ui, util.email(self.emailfrom),
248 247 [address], msgtext)
249 248
250 249
251 250 def hook(ui, repo, hooktype, node=None, url=None, **kwargs):
252 251 """ send CIA notification """
253 252 def sendmsg(cia, ctx):
254 253 msg = ciamsg(cia, ctx).xml()
255 254 if cia.dryrun:
256 255 ui.write(msg)
257 256 elif cia.ciaurl.startswith('mailto:'):
258 257 if not cia.emailfrom:
259 258 raise util.Abort(_('email.from must be defined when '
260 259 'sending by email'))
261 260 cia.sendemail(cia.ciaurl[7:], msg)
262 261 else:
263 262 cia.sendrpc(msg)
264 263
265 264 n = bin(node)
266 265 cia = hgcia(ui, repo)
267 266 if not cia.user:
268 267 ui.debug('cia: no user specified')
269 268 return
270 269 if not cia.project:
271 270 ui.debug('cia: no project specified')
272 271 return
273 272 if hooktype == 'changegroup':
274 273 start = repo.changelog.rev(n)
275 274 end = len(repo.changelog)
276 275 for rev in xrange(start, end):
277 276 n = repo.changelog.node(rev)
278 277 ctx = repo.changectx(n)
279 278 sendmsg(cia, ctx)
280 279 else:
281 280 ctx = repo.changectx(n)
282 281 sendmsg(cia, ctx)
@@ -1,743 +1,742 b''
1 1 # keyword.py - $Keyword$ expansion for Mercurial
2 2 #
3 3 # Copyright 2007-2015 Christian Ebert <blacktrash@gmx.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 #
8 8 # $Id$
9 9 #
10 10 # Keyword expansion hack against the grain of a Distributed SCM
11 11 #
12 12 # There are many good reasons why this is not needed in a distributed
13 13 # SCM, still it may be useful in very small projects based on single
14 14 # files (like LaTeX packages), that are mostly addressed to an
15 15 # audience not running a version control system.
16 16 #
17 17 # For in-depth discussion refer to
18 18 # <http://mercurial.selenic.com/wiki/KeywordPlan>.
19 19 #
20 20 # Keyword expansion is based on Mercurial's changeset template mappings.
21 21 #
22 22 # Binary files are not touched.
23 23 #
24 24 # Files to act upon/ignore are specified in the [keyword] section.
25 25 # Customized keyword template mappings in the [keywordmaps] section.
26 26 #
27 27 # Run "hg help keyword" and "hg kwdemo" to get info on configuration.
28 28
29 29 '''expand keywords in tracked files
30 30
31 31 This extension expands RCS/CVS-like or self-customized $Keywords$ in
32 32 tracked text files selected by your configuration.
33 33
34 34 Keywords are only expanded in local repositories and not stored in the
35 35 change history. The mechanism can be regarded as a convenience for the
36 36 current user or for archive distribution.
37 37
38 38 Keywords expand to the changeset data pertaining to the latest change
39 39 relative to the working directory parent of each file.
40 40
41 41 Configuration is done in the [keyword], [keywordset] and [keywordmaps]
42 42 sections of hgrc files.
43 43
44 44 Example::
45 45
46 46 [keyword]
47 47 # expand keywords in every python file except those matching "x*"
48 48 **.py =
49 49 x* = ignore
50 50
51 51 [keywordset]
52 52 # prefer svn- over cvs-like default keywordmaps
53 53 svn = True
54 54
55 55 .. note::
56 56
57 57 The more specific you are in your filename patterns the less you
58 58 lose speed in huge repositories.
59 59
60 60 For [keywordmaps] template mapping and expansion demonstration and
61 61 control run :hg:`kwdemo`. See :hg:`help templates` for a list of
62 62 available templates and filters.
63 63
64 64 Three additional date template filters are provided:
65 65
66 66 :``utcdate``: "2006/09/18 15:13:13"
67 67 :``svnutcdate``: "2006-09-18 15:13:13Z"
68 68 :``svnisodate``: "2006-09-18 08:13:13 -700 (Mon, 18 Sep 2006)"
69 69
70 70 The default template mappings (view with :hg:`kwdemo -d`) can be
71 71 replaced with customized keywords and templates. Again, run
72 72 :hg:`kwdemo` to control the results of your configuration changes.
73 73
74 74 Before changing/disabling active keywords, you must run :hg:`kwshrink`
75 75 to avoid storing expanded keywords in the change history.
76 76
77 77 To force expansion after enabling it, or a configuration change, run
78 78 :hg:`kwexpand`.
79 79
80 80 Expansions spanning more than one line and incremental expansions,
81 81 like CVS' $Log$, are not supported. A keyword template map "Log =
82 82 {desc}" expands to the first line of the changeset description.
83 83 '''
84 84
85 85 from mercurial import commands, context, cmdutil, dispatch, filelog, extensions
86 from mercurial import localrepo, match, patch, templatefilters, templater, util
86 from mercurial import localrepo, match, patch, templatefilters, util
87 87 from mercurial import scmutil, pathutil
88 88 from mercurial.hgweb import webcommands
89 89 from mercurial.i18n import _
90 90 import os, re, tempfile
91 91
92 92 cmdtable = {}
93 93 command = cmdutil.command(cmdtable)
94 94 testedwith = 'internal'
95 95
96 96 # hg commands that do not act on keywords
97 97 nokwcommands = ('add addremove annotate bundle export grep incoming init log'
98 98 ' outgoing push tip verify convert email glog')
99 99
100 100 # hg commands that trigger expansion only when writing to working dir,
101 101 # not when reading filelog, and unexpand when reading from working dir
102 102 restricted = ('merge kwexpand kwshrink record qrecord resolve transplant'
103 103 ' unshelve rebase graft backout histedit fetch')
104 104
105 105 # names of extensions using dorecord
106 106 recordextensions = 'record'
107 107
108 108 colortable = {
109 109 'kwfiles.enabled': 'green bold',
110 110 'kwfiles.deleted': 'cyan bold underline',
111 111 'kwfiles.enabledunknown': 'green',
112 112 'kwfiles.ignored': 'bold',
113 113 'kwfiles.ignoredunknown': 'none'
114 114 }
115 115
116 116 # date like in cvs' $Date
117 117 def utcdate(text):
118 118 ''':utcdate: Date. Returns a UTC-date in this format: "2009/08/18 11:00:13".
119 119 '''
120 120 return util.datestr((util.parsedate(text)[0], 0), '%Y/%m/%d %H:%M:%S')
121 121 # date like in svn's $Date
122 122 def svnisodate(text):
123 123 ''':svnisodate: Date. Returns a date in this format: "2009-08-18 13:00:13
124 124 +0200 (Tue, 18 Aug 2009)".
125 125 '''
126 126 return util.datestr(text, '%Y-%m-%d %H:%M:%S %1%2 (%a, %d %b %Y)')
127 127 # date like in svn's $Id
128 128 def svnutcdate(text):
129 129 ''':svnutcdate: Date. Returns a UTC-date in this format: "2009-08-18
130 130 11:00:13Z".
131 131 '''
132 132 return util.datestr((util.parsedate(text)[0], 0), '%Y-%m-%d %H:%M:%SZ')
133 133
134 134 templatefilters.filters.update({'utcdate': utcdate,
135 135 'svnisodate': svnisodate,
136 136 'svnutcdate': svnutcdate})
137 137
138 138 # make keyword tools accessible
139 139 kwtools = {'templater': None, 'hgcmd': ''}
140 140
141 141 def _defaultkwmaps(ui):
142 142 '''Returns default keywordmaps according to keywordset configuration.'''
143 143 templates = {
144 144 'Revision': '{node|short}',
145 145 'Author': '{author|user}',
146 146 }
147 147 kwsets = ({
148 148 'Date': '{date|utcdate}',
149 149 'RCSfile': '{file|basename},v',
150 150 'RCSFile': '{file|basename},v', # kept for backwards compatibility
151 151 # with hg-keyword
152 152 'Source': '{root}/{file},v',
153 153 'Id': '{file|basename},v {node|short} {date|utcdate} {author|user}',
154 154 'Header': '{root}/{file},v {node|short} {date|utcdate} {author|user}',
155 155 }, {
156 156 'Date': '{date|svnisodate}',
157 157 'Id': '{file|basename},v {node|short} {date|svnutcdate} {author|user}',
158 158 'LastChangedRevision': '{node|short}',
159 159 'LastChangedBy': '{author|user}',
160 160 'LastChangedDate': '{date|svnisodate}',
161 161 })
162 162 templates.update(kwsets[ui.configbool('keywordset', 'svn')])
163 163 return templates
164 164
165 165 def _shrinktext(text, subfunc):
166 166 '''Helper for keyword expansion removal in text.
167 167 Depending on subfunc also returns number of substitutions.'''
168 168 return subfunc(r'$\1$', text)
169 169
170 170 def _preselect(wstatus, changed):
171 171 '''Retrieves modified and added files from a working directory state
172 172 and returns the subset of each contained in given changed files
173 173 retrieved from a change context.'''
174 174 modified = [f for f in wstatus.modified if f in changed]
175 175 added = [f for f in wstatus.added if f in changed]
176 176 return modified, added
177 177
178 178
179 179 class kwtemplater(object):
180 180 '''
181 181 Sets up keyword templates, corresponding keyword regex, and
182 182 provides keyword substitution functions.
183 183 '''
184 184
185 185 def __init__(self, ui, repo, inc, exc):
186 186 self.ui = ui
187 187 self.repo = repo
188 188 self.match = match.match(repo.root, '', [], inc, exc)
189 189 self.restrict = kwtools['hgcmd'] in restricted.split()
190 190 self.postcommit = False
191 191
192 192 kwmaps = self.ui.configitems('keywordmaps')
193 193 if kwmaps: # override default templates
194 self.templates = dict((k, templater.parsestring(v, False))
195 for k, v in kwmaps)
194 self.templates = dict(kwmaps)
196 195 else:
197 196 self.templates = _defaultkwmaps(self.ui)
198 197
199 198 @util.propertycache
200 199 def escape(self):
201 200 '''Returns bar-separated and escaped keywords.'''
202 201 return '|'.join(map(re.escape, self.templates.keys()))
203 202
204 203 @util.propertycache
205 204 def rekw(self):
206 205 '''Returns regex for unexpanded keywords.'''
207 206 return re.compile(r'\$(%s)\$' % self.escape)
208 207
209 208 @util.propertycache
210 209 def rekwexp(self):
211 210 '''Returns regex for expanded keywords.'''
212 211 return re.compile(r'\$(%s): [^$\n\r]*? \$' % self.escape)
213 212
214 213 def substitute(self, data, path, ctx, subfunc):
215 214 '''Replaces keywords in data with expanded template.'''
216 215 def kwsub(mobj):
217 216 kw = mobj.group(1)
218 217 ct = cmdutil.changeset_templater(self.ui, self.repo, False, None,
219 218 self.templates[kw], '', False)
220 219 self.ui.pushbuffer()
221 220 ct.show(ctx, root=self.repo.root, file=path)
222 221 ekw = templatefilters.firstline(self.ui.popbuffer())
223 222 return '$%s: %s $' % (kw, ekw)
224 223 return subfunc(kwsub, data)
225 224
226 225 def linkctx(self, path, fileid):
227 226 '''Similar to filelog.linkrev, but returns a changectx.'''
228 227 return self.repo.filectx(path, fileid=fileid).changectx()
229 228
230 229 def expand(self, path, node, data):
231 230 '''Returns data with keywords expanded.'''
232 231 if not self.restrict and self.match(path) and not util.binary(data):
233 232 ctx = self.linkctx(path, node)
234 233 return self.substitute(data, path, ctx, self.rekw.sub)
235 234 return data
236 235
237 236 def iskwfile(self, cand, ctx):
238 237 '''Returns subset of candidates which are configured for keyword
239 238 expansion but are not symbolic links.'''
240 239 return [f for f in cand if self.match(f) and 'l' not in ctx.flags(f)]
241 240
242 241 def overwrite(self, ctx, candidates, lookup, expand, rekw=False):
243 242 '''Overwrites selected files expanding/shrinking keywords.'''
244 243 if self.restrict or lookup or self.postcommit: # exclude kw_copy
245 244 candidates = self.iskwfile(candidates, ctx)
246 245 if not candidates:
247 246 return
248 247 kwcmd = self.restrict and lookup # kwexpand/kwshrink
249 248 if self.restrict or expand and lookup:
250 249 mf = ctx.manifest()
251 250 if self.restrict or rekw:
252 251 re_kw = self.rekw
253 252 else:
254 253 re_kw = self.rekwexp
255 254 if expand:
256 255 msg = _('overwriting %s expanding keywords\n')
257 256 else:
258 257 msg = _('overwriting %s shrinking keywords\n')
259 258 for f in candidates:
260 259 if self.restrict:
261 260 data = self.repo.file(f).read(mf[f])
262 261 else:
263 262 data = self.repo.wread(f)
264 263 if util.binary(data):
265 264 continue
266 265 if expand:
267 266 parents = ctx.parents()
268 267 if lookup:
269 268 ctx = self.linkctx(f, mf[f])
270 269 elif self.restrict and len(parents) > 1:
271 270 # merge commit
272 271 # in case of conflict f is in modified state during
273 272 # merge, even if f does not differ from f in parent
274 273 for p in parents:
275 274 if f in p and not p[f].cmp(ctx[f]):
276 275 ctx = p[f].changectx()
277 276 break
278 277 data, found = self.substitute(data, f, ctx, re_kw.subn)
279 278 elif self.restrict:
280 279 found = re_kw.search(data)
281 280 else:
282 281 data, found = _shrinktext(data, re_kw.subn)
283 282 if found:
284 283 self.ui.note(msg % f)
285 284 fp = self.repo.wvfs(f, "wb", atomictemp=True)
286 285 fp.write(data)
287 286 fp.close()
288 287 if kwcmd:
289 288 self.repo.dirstate.normal(f)
290 289 elif self.postcommit:
291 290 self.repo.dirstate.normallookup(f)
292 291
293 292 def shrink(self, fname, text):
294 293 '''Returns text with all keyword substitutions removed.'''
295 294 if self.match(fname) and not util.binary(text):
296 295 return _shrinktext(text, self.rekwexp.sub)
297 296 return text
298 297
299 298 def shrinklines(self, fname, lines):
300 299 '''Returns lines with keyword substitutions removed.'''
301 300 if self.match(fname):
302 301 text = ''.join(lines)
303 302 if not util.binary(text):
304 303 return _shrinktext(text, self.rekwexp.sub).splitlines(True)
305 304 return lines
306 305
307 306 def wread(self, fname, data):
308 307 '''If in restricted mode returns data read from wdir with
309 308 keyword substitutions removed.'''
310 309 if self.restrict:
311 310 return self.shrink(fname, data)
312 311 return data
313 312
314 313 class kwfilelog(filelog.filelog):
315 314 '''
316 315 Subclass of filelog to hook into its read, add, cmp methods.
317 316 Keywords are "stored" unexpanded, and processed on reading.
318 317 '''
319 318 def __init__(self, opener, kwt, path):
320 319 super(kwfilelog, self).__init__(opener, path)
321 320 self.kwt = kwt
322 321 self.path = path
323 322
324 323 def read(self, node):
325 324 '''Expands keywords when reading filelog.'''
326 325 data = super(kwfilelog, self).read(node)
327 326 if self.renamed(node):
328 327 return data
329 328 return self.kwt.expand(self.path, node, data)
330 329
331 330 def add(self, text, meta, tr, link, p1=None, p2=None):
332 331 '''Removes keyword substitutions when adding to filelog.'''
333 332 text = self.kwt.shrink(self.path, text)
334 333 return super(kwfilelog, self).add(text, meta, tr, link, p1, p2)
335 334
336 335 def cmp(self, node, text):
337 336 '''Removes keyword substitutions for comparison.'''
338 337 text = self.kwt.shrink(self.path, text)
339 338 return super(kwfilelog, self).cmp(node, text)
340 339
341 340 def _status(ui, repo, wctx, kwt, *pats, **opts):
342 341 '''Bails out if [keyword] configuration is not active.
343 342 Returns status of working directory.'''
344 343 if kwt:
345 344 return repo.status(match=scmutil.match(wctx, pats, opts), clean=True,
346 345 unknown=opts.get('unknown') or opts.get('all'))
347 346 if ui.configitems('keyword'):
348 347 raise util.Abort(_('[keyword] patterns cannot match'))
349 348 raise util.Abort(_('no [keyword] patterns configured'))
350 349
351 350 def _kwfwrite(ui, repo, expand, *pats, **opts):
352 351 '''Selects files and passes them to kwtemplater.overwrite.'''
353 352 wctx = repo[None]
354 353 if len(wctx.parents()) > 1:
355 354 raise util.Abort(_('outstanding uncommitted merge'))
356 355 kwt = kwtools['templater']
357 356 wlock = repo.wlock()
358 357 try:
359 358 status = _status(ui, repo, wctx, kwt, *pats, **opts)
360 359 if status.modified or status.added or status.removed or status.deleted:
361 360 raise util.Abort(_('outstanding uncommitted changes'))
362 361 kwt.overwrite(wctx, status.clean, True, expand)
363 362 finally:
364 363 wlock.release()
365 364
366 365 @command('kwdemo',
367 366 [('d', 'default', None, _('show default keyword template maps')),
368 367 ('f', 'rcfile', '',
369 368 _('read maps from rcfile'), _('FILE'))],
370 369 _('hg kwdemo [-d] [-f RCFILE] [TEMPLATEMAP]...'),
371 370 optionalrepo=True)
372 371 def demo(ui, repo, *args, **opts):
373 372 '''print [keywordmaps] configuration and an expansion example
374 373
375 374 Show current, custom, or default keyword template maps and their
376 375 expansions.
377 376
378 377 Extend the current configuration by specifying maps as arguments
379 378 and using -f/--rcfile to source an external hgrc file.
380 379
381 380 Use -d/--default to disable current configuration.
382 381
383 382 See :hg:`help templates` for information on templates and filters.
384 383 '''
385 384 def demoitems(section, items):
386 385 ui.write('[%s]\n' % section)
387 386 for k, v in sorted(items):
388 387 ui.write('%s = %s\n' % (k, v))
389 388
390 389 fn = 'demo.txt'
391 390 tmpdir = tempfile.mkdtemp('', 'kwdemo.')
392 391 ui.note(_('creating temporary repository at %s\n') % tmpdir)
393 392 repo = localrepo.localrepository(repo.baseui, tmpdir, True)
394 393 ui.setconfig('keyword', fn, '', 'keyword')
395 394 svn = ui.configbool('keywordset', 'svn')
396 395 # explicitly set keywordset for demo output
397 396 ui.setconfig('keywordset', 'svn', svn, 'keyword')
398 397
399 398 uikwmaps = ui.configitems('keywordmaps')
400 399 if args or opts.get('rcfile'):
401 400 ui.status(_('\n\tconfiguration using custom keyword template maps\n'))
402 401 if uikwmaps:
403 402 ui.status(_('\textending current template maps\n'))
404 403 if opts.get('default') or not uikwmaps:
405 404 if svn:
406 405 ui.status(_('\toverriding default svn keywordset\n'))
407 406 else:
408 407 ui.status(_('\toverriding default cvs keywordset\n'))
409 408 if opts.get('rcfile'):
410 409 ui.readconfig(opts.get('rcfile'))
411 410 if args:
412 411 # simulate hgrc parsing
413 412 rcmaps = ['[keywordmaps]\n'] + [a + '\n' for a in args]
414 413 fp = repo.vfs('hgrc', 'w')
415 414 fp.writelines(rcmaps)
416 415 fp.close()
417 416 ui.readconfig(repo.join('hgrc'))
418 417 kwmaps = dict(ui.configitems('keywordmaps'))
419 418 elif opts.get('default'):
420 419 if svn:
421 420 ui.status(_('\n\tconfiguration using default svn keywordset\n'))
422 421 else:
423 422 ui.status(_('\n\tconfiguration using default cvs keywordset\n'))
424 423 kwmaps = _defaultkwmaps(ui)
425 424 if uikwmaps:
426 425 ui.status(_('\tdisabling current template maps\n'))
427 426 for k, v in kwmaps.iteritems():
428 427 ui.setconfig('keywordmaps', k, v, 'keyword')
429 428 else:
430 429 ui.status(_('\n\tconfiguration using current keyword template maps\n'))
431 430 if uikwmaps:
432 431 kwmaps = dict(uikwmaps)
433 432 else:
434 433 kwmaps = _defaultkwmaps(ui)
435 434
436 435 uisetup(ui)
437 436 reposetup(ui, repo)
438 437 ui.write('[extensions]\nkeyword =\n')
439 438 demoitems('keyword', ui.configitems('keyword'))
440 439 demoitems('keywordset', ui.configitems('keywordset'))
441 440 demoitems('keywordmaps', kwmaps.iteritems())
442 441 keywords = '$' + '$\n$'.join(sorted(kwmaps.keys())) + '$\n'
443 442 repo.wvfs.write(fn, keywords)
444 443 repo[None].add([fn])
445 444 ui.note(_('\nkeywords written to %s:\n') % fn)
446 445 ui.note(keywords)
447 446 wlock = repo.wlock()
448 447 try:
449 448 repo.dirstate.setbranch('demobranch')
450 449 finally:
451 450 wlock.release()
452 451 for name, cmd in ui.configitems('hooks'):
453 452 if name.split('.', 1)[0].find('commit') > -1:
454 453 repo.ui.setconfig('hooks', name, '', 'keyword')
455 454 msg = _('hg keyword configuration and expansion example')
456 455 ui.note(("hg ci -m '%s'\n" % msg))
457 456 repo.commit(text=msg)
458 457 ui.status(_('\n\tkeywords expanded\n'))
459 458 ui.write(repo.wread(fn))
460 459 repo.wvfs.rmtree(repo.root)
461 460
462 461 @command('kwexpand',
463 462 commands.walkopts,
464 463 _('hg kwexpand [OPTION]... [FILE]...'),
465 464 inferrepo=True)
466 465 def expand(ui, repo, *pats, **opts):
467 466 '''expand keywords in the working directory
468 467
469 468 Run after (re)enabling keyword expansion.
470 469
471 470 kwexpand refuses to run if given files contain local changes.
472 471 '''
473 472 # 3rd argument sets expansion to True
474 473 _kwfwrite(ui, repo, True, *pats, **opts)
475 474
476 475 @command('kwfiles',
477 476 [('A', 'all', None, _('show keyword status flags of all files')),
478 477 ('i', 'ignore', None, _('show files excluded from expansion')),
479 478 ('u', 'unknown', None, _('only show unknown (not tracked) files')),
480 479 ] + commands.walkopts,
481 480 _('hg kwfiles [OPTION]... [FILE]...'),
482 481 inferrepo=True)
483 482 def files(ui, repo, *pats, **opts):
484 483 '''show files configured for keyword expansion
485 484
486 485 List which files in the working directory are matched by the
487 486 [keyword] configuration patterns.
488 487
489 488 Useful to prevent inadvertent keyword expansion and to speed up
490 489 execution by including only files that are actual candidates for
491 490 expansion.
492 491
493 492 See :hg:`help keyword` on how to construct patterns both for
494 493 inclusion and exclusion of files.
495 494
496 495 With -A/--all and -v/--verbose the codes used to show the status
497 496 of files are::
498 497
499 498 K = keyword expansion candidate
500 499 k = keyword expansion candidate (not tracked)
501 500 I = ignored
502 501 i = ignored (not tracked)
503 502 '''
504 503 kwt = kwtools['templater']
505 504 wctx = repo[None]
506 505 status = _status(ui, repo, wctx, kwt, *pats, **opts)
507 506 if pats:
508 507 cwd = repo.getcwd()
509 508 else:
510 509 cwd = ''
511 510 files = []
512 511 if not opts.get('unknown') or opts.get('all'):
513 512 files = sorted(status.modified + status.added + status.clean)
514 513 kwfiles = kwt.iskwfile(files, wctx)
515 514 kwdeleted = kwt.iskwfile(status.deleted, wctx)
516 515 kwunknown = kwt.iskwfile(status.unknown, wctx)
517 516 if not opts.get('ignore') or opts.get('all'):
518 517 showfiles = kwfiles, kwdeleted, kwunknown
519 518 else:
520 519 showfiles = [], [], []
521 520 if opts.get('all') or opts.get('ignore'):
522 521 showfiles += ([f for f in files if f not in kwfiles],
523 522 [f for f in status.unknown if f not in kwunknown])
524 523 kwlabels = 'enabled deleted enabledunknown ignored ignoredunknown'.split()
525 524 kwstates = zip(kwlabels, 'K!kIi', showfiles)
526 525 fm = ui.formatter('kwfiles', opts)
527 526 fmt = '%.0s%s\n'
528 527 if opts.get('all') or ui.verbose:
529 528 fmt = '%s %s\n'
530 529 for kwstate, char, filenames in kwstates:
531 530 label = 'kwfiles.' + kwstate
532 531 for f in filenames:
533 532 fm.startitem()
534 533 fm.write('kwstatus path', fmt, char,
535 534 repo.pathto(f, cwd), label=label)
536 535 fm.end()
537 536
538 537 @command('kwshrink',
539 538 commands.walkopts,
540 539 _('hg kwshrink [OPTION]... [FILE]...'),
541 540 inferrepo=True)
542 541 def shrink(ui, repo, *pats, **opts):
543 542 '''revert expanded keywords in the working directory
544 543
545 544 Must be run before changing/disabling active keywords.
546 545
547 546 kwshrink refuses to run if given files contain local changes.
548 547 '''
549 548 # 3rd argument sets expansion to False
550 549 _kwfwrite(ui, repo, False, *pats, **opts)
551 550
552 551
553 552 def uisetup(ui):
554 553 ''' Monkeypatches dispatch._parse to retrieve user command.'''
555 554
556 555 def kwdispatch_parse(orig, ui, args):
557 556 '''Monkeypatch dispatch._parse to obtain running hg command.'''
558 557 cmd, func, args, options, cmdoptions = orig(ui, args)
559 558 kwtools['hgcmd'] = cmd
560 559 return cmd, func, args, options, cmdoptions
561 560
562 561 extensions.wrapfunction(dispatch, '_parse', kwdispatch_parse)
563 562
564 563 def reposetup(ui, repo):
565 564 '''Sets up repo as kwrepo for keyword substitution.
566 565 Overrides file method to return kwfilelog instead of filelog
567 566 if file matches user configuration.
568 567 Wraps commit to overwrite configured files with updated
569 568 keyword substitutions.
570 569 Monkeypatches patch and webcommands.'''
571 570
572 571 try:
573 572 if (not repo.local() or kwtools['hgcmd'] in nokwcommands.split()
574 573 or '.hg' in util.splitpath(repo.root)
575 574 or repo._url.startswith('bundle:')):
576 575 return
577 576 except AttributeError:
578 577 pass
579 578
580 579 inc, exc = [], ['.hg*']
581 580 for pat, opt in ui.configitems('keyword'):
582 581 if opt != 'ignore':
583 582 inc.append(pat)
584 583 else:
585 584 exc.append(pat)
586 585 if not inc:
587 586 return
588 587
589 588 kwtools['templater'] = kwt = kwtemplater(ui, repo, inc, exc)
590 589
591 590 class kwrepo(repo.__class__):
592 591 def file(self, f):
593 592 if f[0] == '/':
594 593 f = f[1:]
595 594 return kwfilelog(self.svfs, kwt, f)
596 595
597 596 def wread(self, filename):
598 597 data = super(kwrepo, self).wread(filename)
599 598 return kwt.wread(filename, data)
600 599
601 600 def commit(self, *args, **opts):
602 601 # use custom commitctx for user commands
603 602 # other extensions can still wrap repo.commitctx directly
604 603 self.commitctx = self.kwcommitctx
605 604 try:
606 605 return super(kwrepo, self).commit(*args, **opts)
607 606 finally:
608 607 del self.commitctx
609 608
610 609 def kwcommitctx(self, ctx, error=False):
611 610 n = super(kwrepo, self).commitctx(ctx, error)
612 611 # no lock needed, only called from repo.commit() which already locks
613 612 if not kwt.postcommit:
614 613 restrict = kwt.restrict
615 614 kwt.restrict = True
616 615 kwt.overwrite(self[n], sorted(ctx.added() + ctx.modified()),
617 616 False, True)
618 617 kwt.restrict = restrict
619 618 return n
620 619
621 620 def rollback(self, dryrun=False, force=False):
622 621 wlock = self.wlock()
623 622 try:
624 623 if not dryrun:
625 624 changed = self['.'].files()
626 625 ret = super(kwrepo, self).rollback(dryrun, force)
627 626 if not dryrun:
628 627 ctx = self['.']
629 628 modified, added = _preselect(ctx.status(), changed)
630 629 kwt.overwrite(ctx, modified, True, True)
631 630 kwt.overwrite(ctx, added, True, False)
632 631 return ret
633 632 finally:
634 633 wlock.release()
635 634
636 635 # monkeypatches
637 636 def kwpatchfile_init(orig, self, ui, gp, backend, store, eolmode=None):
638 637 '''Monkeypatch/wrap patch.patchfile.__init__ to avoid
639 638 rejects or conflicts due to expanded keywords in working dir.'''
640 639 orig(self, ui, gp, backend, store, eolmode)
641 640 # shrink keywords read from working dir
642 641 self.lines = kwt.shrinklines(self.fname, self.lines)
643 642
644 643 def kwdiff(orig, *args, **kwargs):
645 644 '''Monkeypatch patch.diff to avoid expansion.'''
646 645 kwt.restrict = True
647 646 return orig(*args, **kwargs)
648 647
649 648 def kwweb_skip(orig, web, req, tmpl):
650 649 '''Wraps webcommands.x turning off keyword expansion.'''
651 650 kwt.match = util.never
652 651 return orig(web, req, tmpl)
653 652
654 653 def kw_amend(orig, ui, repo, commitfunc, old, extra, pats, opts):
655 654 '''Wraps cmdutil.amend expanding keywords after amend.'''
656 655 wlock = repo.wlock()
657 656 try:
658 657 kwt.postcommit = True
659 658 newid = orig(ui, repo, commitfunc, old, extra, pats, opts)
660 659 if newid != old.node():
661 660 ctx = repo[newid]
662 661 kwt.restrict = True
663 662 kwt.overwrite(ctx, ctx.files(), False, True)
664 663 kwt.restrict = False
665 664 return newid
666 665 finally:
667 666 wlock.release()
668 667
669 668 def kw_copy(orig, ui, repo, pats, opts, rename=False):
670 669 '''Wraps cmdutil.copy so that copy/rename destinations do not
671 670 contain expanded keywords.
672 671 Note that the source of a regular file destination may also be a
673 672 symlink:
674 673 hg cp sym x -> x is symlink
675 674 cp sym x; hg cp -A sym x -> x is file (maybe expanded keywords)
676 675 For the latter we have to follow the symlink to find out whether its
677 676 target is configured for expansion and we therefore must unexpand the
678 677 keywords in the destination.'''
679 678 wlock = repo.wlock()
680 679 try:
681 680 orig(ui, repo, pats, opts, rename)
682 681 if opts.get('dry_run'):
683 682 return
684 683 wctx = repo[None]
685 684 cwd = repo.getcwd()
686 685
687 686 def haskwsource(dest):
688 687 '''Returns true if dest is a regular file and configured for
689 688 expansion or a symlink which points to a file configured for
690 689 expansion. '''
691 690 source = repo.dirstate.copied(dest)
692 691 if 'l' in wctx.flags(source):
693 692 source = pathutil.canonpath(repo.root, cwd,
694 693 os.path.realpath(source))
695 694 return kwt.match(source)
696 695
697 696 candidates = [f for f in repo.dirstate.copies() if
698 697 'l' not in wctx.flags(f) and haskwsource(f)]
699 698 kwt.overwrite(wctx, candidates, False, False)
700 699 finally:
701 700 wlock.release()
702 701
703 702 def kw_dorecord(orig, ui, repo, commitfunc, *pats, **opts):
704 703 '''Wraps record.dorecord expanding keywords after recording.'''
705 704 wlock = repo.wlock()
706 705 try:
707 706 # record returns 0 even when nothing has changed
708 707 # therefore compare nodes before and after
709 708 kwt.postcommit = True
710 709 ctx = repo['.']
711 710 wstatus = ctx.status()
712 711 ret = orig(ui, repo, commitfunc, *pats, **opts)
713 712 recctx = repo['.']
714 713 if ctx != recctx:
715 714 modified, added = _preselect(wstatus, recctx.files())
716 715 kwt.restrict = False
717 716 kwt.overwrite(recctx, modified, False, True)
718 717 kwt.overwrite(recctx, added, False, True, True)
719 718 kwt.restrict = True
720 719 return ret
721 720 finally:
722 721 wlock.release()
723 722
724 723 def kwfilectx_cmp(orig, self, fctx):
725 724 # keyword affects data size, comparing wdir and filelog size does
726 725 # not make sense
727 726 if (fctx._filerev is None and
728 727 (self._repo._encodefilterpats or
729 728 kwt.match(fctx.path()) and 'l' not in fctx.flags() or
730 729 self.size() - 4 == fctx.size()) or
731 730 self.size() == fctx.size()):
732 731 return self._filelog.cmp(self._filenode, fctx.data())
733 732 return True
734 733
735 734 extensions.wrapfunction(context.filectx, 'cmp', kwfilectx_cmp)
736 735 extensions.wrapfunction(patch.patchfile, '__init__', kwpatchfile_init)
737 736 extensions.wrapfunction(patch, 'diff', kwdiff)
738 737 extensions.wrapfunction(cmdutil, 'amend', kw_amend)
739 738 extensions.wrapfunction(cmdutil, 'copy', kw_copy)
740 739 extensions.wrapfunction(cmdutil, 'dorecord', kw_dorecord)
741 740 for c in 'annotate changeset rev filediff diff'.split():
742 741 extensions.wrapfunction(webcommands, c, kwweb_skip)
743 742 repo.__class__ = kwrepo
@@ -1,417 +1,415 b''
1 1 # notify.py - email notifications for mercurial
2 2 #
3 3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 '''hooks for sending email push notifications
9 9
10 10 This extension implements hooks to send email notifications when
11 11 changesets are sent from or received by the local repository.
12 12
13 13 First, enable the extension as explained in :hg:`help extensions`, and
14 14 register the hook you want to run. ``incoming`` and ``changegroup`` hooks
15 15 are run when changesets are received, while ``outgoing`` hooks are for
16 16 changesets sent to another repository::
17 17
18 18 [hooks]
19 19 # one email for each incoming changeset
20 20 incoming.notify = python:hgext.notify.hook
21 21 # one email for all incoming changesets
22 22 changegroup.notify = python:hgext.notify.hook
23 23
24 24 # one email for all outgoing changesets
25 25 outgoing.notify = python:hgext.notify.hook
26 26
27 27 This registers the hooks. To enable notification, subscribers must
28 28 be assigned to repositories. The ``[usersubs]`` section maps multiple
29 29 repositories to a given recipient. The ``[reposubs]`` section maps
30 30 multiple recipients to a single repository::
31 31
32 32 [usersubs]
33 33 # key is subscriber email, value is a comma-separated list of repo patterns
34 34 user@host = pattern
35 35
36 36 [reposubs]
37 37 # key is repo pattern, value is a comma-separated list of subscriber emails
38 38 pattern = user@host
39 39
40 40 A ``pattern`` is a ``glob`` matching the absolute path to a repository,
41 41 optionally combined with a revset expression. A revset expression, if
42 42 present, is separated from the glob by a hash. Example::
43 43
44 44 [reposubs]
45 45 */widgets#branch(release) = qa-team@example.com
46 46
47 47 This sends to ``qa-team@example.com`` whenever a changeset on the ``release``
48 48 branch triggers a notification in any repository ending in ``widgets``.
49 49
50 50 In order to place them under direct user management, ``[usersubs]`` and
51 51 ``[reposubs]`` sections may be placed in a separate ``hgrc`` file and
52 52 incorporated by reference::
53 53
54 54 [notify]
55 55 config = /path/to/subscriptionsfile
56 56
57 57 Notifications will not be sent until the ``notify.test`` value is set
58 58 to ``False``; see below.
59 59
60 60 Notifications content can be tweaked with the following configuration entries:
61 61
62 62 notify.test
63 63 If ``True``, print messages to stdout instead of sending them. Default: True.
64 64
65 65 notify.sources
66 66 Space-separated list of change sources. Notifications are activated only
67 67 when a changeset's source is in this list. Sources may be:
68 68
69 69 :``serve``: changesets received via http or ssh
70 70 :``pull``: changesets received via ``hg pull``
71 71 :``unbundle``: changesets received via ``hg unbundle``
72 72 :``push``: changesets sent or received via ``hg push``
73 73 :``bundle``: changesets sent via ``hg unbundle``
74 74
75 75 Default: serve.
76 76
77 77 notify.strip
78 78 Number of leading slashes to strip from url paths. By default, notifications
79 79 reference repositories with their absolute path. ``notify.strip`` lets you
80 80 turn them into relative paths. For example, ``notify.strip=3`` will change
81 81 ``/long/path/repository`` into ``repository``. Default: 0.
82 82
83 83 notify.domain
84 84 Default email domain for sender or recipients with no explicit domain.
85 85
86 86 notify.style
87 87 Style file to use when formatting emails.
88 88
89 89 notify.template
90 90 Template to use when formatting emails.
91 91
92 92 notify.incoming
93 93 Template to use when run as an incoming hook, overriding ``notify.template``.
94 94
95 95 notify.outgoing
96 96 Template to use when run as an outgoing hook, overriding ``notify.template``.
97 97
98 98 notify.changegroup
99 99 Template to use when running as a changegroup hook, overriding
100 100 ``notify.template``.
101 101
102 102 notify.maxdiff
103 103 Maximum number of diff lines to include in notification email. Set to 0
104 104 to disable the diff, or -1 to include all of it. Default: 300.
105 105
106 106 notify.maxsubject
107 107 Maximum number of characters in email's subject line. Default: 67.
108 108
109 109 notify.diffstat
110 110 Set to True to include a diffstat before diff content. Default: True.
111 111
112 112 notify.merge
113 113 If True, send notifications for merge changesets. Default: True.
114 114
115 115 notify.mbox
116 116 If set, append mails to this mbox file instead of sending. Default: None.
117 117
118 118 notify.fromauthor
119 119 If set, use the committer of the first changeset in a changegroup for
120 120 the "From" field of the notification mail. If not set, take the user
121 121 from the pushing repo. Default: False.
122 122
123 123 If set, the following entries will also be used to customize the
124 124 notifications:
125 125
126 126 email.from
127 127 Email ``From`` address to use if none can be found in the generated
128 128 email content.
129 129
130 130 web.baseurl
131 131 Root repository URL to combine with repository paths when making
132 132 references. See also ``notify.strip``.
133 133
134 134 '''
135 135
136 136 import email, socket, time
137 137 # On python2.4 you have to import this by name or they fail to
138 138 # load. This was not a problem on Python 2.7.
139 139 import email.Parser
140 140 from mercurial.i18n import _
141 from mercurial import patch, cmdutil, templater, util, mail
141 from mercurial import patch, cmdutil, util, mail
142 142 import fnmatch
143 143
144 144 testedwith = 'internal'
145 145
146 146 # template for single changeset can include email headers.
147 147 single_template = '''
148 148 Subject: changeset in {webroot}: {desc|firstline|strip}
149 149 From: {author}
150 150
151 151 changeset {node|short} in {root}
152 152 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
153 153 description:
154 154 \t{desc|tabindent|strip}
155 155 '''.lstrip()
156 156
157 157 # template for multiple changesets should not contain email headers,
158 158 # because only first set of headers will be used and result will look
159 159 # strange.
160 160 multiple_template = '''
161 161 changeset {node|short} in {root}
162 162 details: {baseurl}{webroot}?cmd=changeset;node={node|short}
163 163 summary: {desc|firstline}
164 164 '''
165 165
166 166 deftemplates = {
167 167 'changegroup': multiple_template,
168 168 }
169 169
170 170 class notifier(object):
171 171 '''email notification class.'''
172 172
173 173 def __init__(self, ui, repo, hooktype):
174 174 self.ui = ui
175 175 cfg = self.ui.config('notify', 'config')
176 176 if cfg:
177 177 self.ui.readconfig(cfg, sections=['usersubs', 'reposubs'])
178 178 self.repo = repo
179 179 self.stripcount = int(self.ui.config('notify', 'strip', 0))
180 180 self.root = self.strip(self.repo.root)
181 181 self.domain = self.ui.config('notify', 'domain')
182 182 self.mbox = self.ui.config('notify', 'mbox')
183 183 self.test = self.ui.configbool('notify', 'test', True)
184 184 self.charsets = mail._charsets(self.ui)
185 185 self.subs = self.subscribers()
186 186 self.merge = self.ui.configbool('notify', 'merge', True)
187 187
188 188 mapfile = self.ui.config('notify', 'style')
189 189 template = (self.ui.config('notify', hooktype) or
190 190 self.ui.config('notify', 'template'))
191 191 if not mapfile and not template:
192 192 template = deftemplates.get(hooktype) or single_template
193 if template:
194 template = templater.parsestring(template, quoted=False)
195 193 self.t = cmdutil.changeset_templater(self.ui, self.repo, False, None,
196 194 template, mapfile, False)
197 195
198 196 def strip(self, path):
199 197 '''strip leading slashes from local path, turn into web-safe path.'''
200 198
201 199 path = util.pconvert(path)
202 200 count = self.stripcount
203 201 while count > 0:
204 202 c = path.find('/')
205 203 if c == -1:
206 204 break
207 205 path = path[c + 1:]
208 206 count -= 1
209 207 return path
210 208
211 209 def fixmail(self, addr):
212 210 '''try to clean up email addresses.'''
213 211
214 212 addr = util.email(addr.strip())
215 213 if self.domain:
216 214 a = addr.find('@localhost')
217 215 if a != -1:
218 216 addr = addr[:a]
219 217 if '@' not in addr:
220 218 return addr + '@' + self.domain
221 219 return addr
222 220
223 221 def subscribers(self):
224 222 '''return list of email addresses of subscribers to this repo.'''
225 223 subs = set()
226 224 for user, pats in self.ui.configitems('usersubs'):
227 225 for pat in pats.split(','):
228 226 if '#' in pat:
229 227 pat, revs = pat.split('#', 1)
230 228 else:
231 229 revs = None
232 230 if fnmatch.fnmatch(self.repo.root, pat.strip()):
233 231 subs.add((self.fixmail(user), revs))
234 232 for pat, users in self.ui.configitems('reposubs'):
235 233 if '#' in pat:
236 234 pat, revs = pat.split('#', 1)
237 235 else:
238 236 revs = None
239 237 if fnmatch.fnmatch(self.repo.root, pat):
240 238 for user in users.split(','):
241 239 subs.add((self.fixmail(user), revs))
242 240 return [(mail.addressencode(self.ui, s, self.charsets, self.test), r)
243 241 for s, r in sorted(subs)]
244 242
245 243 def node(self, ctx, **props):
246 244 '''format one changeset, unless it is a suppressed merge.'''
247 245 if not self.merge and len(ctx.parents()) > 1:
248 246 return False
249 247 self.t.show(ctx, changes=ctx.changeset(),
250 248 baseurl=self.ui.config('web', 'baseurl'),
251 249 root=self.repo.root, webroot=self.root, **props)
252 250 return True
253 251
254 252 def skipsource(self, source):
255 253 '''true if incoming changes from this source should be skipped.'''
256 254 ok_sources = self.ui.config('notify', 'sources', 'serve').split()
257 255 return source not in ok_sources
258 256
259 257 def send(self, ctx, count, data):
260 258 '''send message.'''
261 259
262 260 # Select subscribers by revset
263 261 subs = set()
264 262 for sub, spec in self.subs:
265 263 if spec is None:
266 264 subs.add(sub)
267 265 continue
268 266 revs = self.repo.revs('%r and %d:', spec, ctx.rev())
269 267 if len(revs):
270 268 subs.add(sub)
271 269 continue
272 270 if len(subs) == 0:
273 271 self.ui.debug('notify: no subscribers to selected repo '
274 272 'and revset\n')
275 273 return
276 274
277 275 p = email.Parser.Parser()
278 276 try:
279 277 msg = p.parsestr(data)
280 278 except email.Errors.MessageParseError, inst:
281 279 raise util.Abort(inst)
282 280
283 281 # store sender and subject
284 282 sender, subject = msg['From'], msg['Subject']
285 283 del msg['From'], msg['Subject']
286 284
287 285 if not msg.is_multipart():
288 286 # create fresh mime message from scratch
289 287 # (multipart templates must take care of this themselves)
290 288 headers = msg.items()
291 289 payload = msg.get_payload()
292 290 # for notification prefer readability over data precision
293 291 msg = mail.mimeencode(self.ui, payload, self.charsets, self.test)
294 292 # reinstate custom headers
295 293 for k, v in headers:
296 294 msg[k] = v
297 295
298 296 msg['Date'] = util.datestr(format="%a, %d %b %Y %H:%M:%S %1%2")
299 297
300 298 # try to make subject line exist and be useful
301 299 if not subject:
302 300 if count > 1:
303 301 subject = _('%s: %d new changesets') % (self.root, count)
304 302 else:
305 303 s = ctx.description().lstrip().split('\n', 1)[0].rstrip()
306 304 subject = '%s: %s' % (self.root, s)
307 305 maxsubject = int(self.ui.config('notify', 'maxsubject', 67))
308 306 if maxsubject:
309 307 subject = util.ellipsis(subject, maxsubject)
310 308 msg['Subject'] = mail.headencode(self.ui, subject,
311 309 self.charsets, self.test)
312 310
313 311 # try to make message have proper sender
314 312 if not sender:
315 313 sender = self.ui.config('email', 'from') or self.ui.username()
316 314 if '@' not in sender or '@localhost' in sender:
317 315 sender = self.fixmail(sender)
318 316 msg['From'] = mail.addressencode(self.ui, sender,
319 317 self.charsets, self.test)
320 318
321 319 msg['X-Hg-Notification'] = 'changeset %s' % ctx
322 320 if not msg['Message-Id']:
323 321 msg['Message-Id'] = ('<hg.%s.%s.%s@%s>' %
324 322 (ctx, int(time.time()),
325 323 hash(self.repo.root), socket.getfqdn()))
326 324 msg['To'] = ', '.join(sorted(subs))
327 325
328 326 msgtext = msg.as_string()
329 327 if self.test:
330 328 self.ui.write(msgtext)
331 329 if not msgtext.endswith('\n'):
332 330 self.ui.write('\n')
333 331 else:
334 332 self.ui.status(_('notify: sending %d subscribers %d changes\n') %
335 333 (len(subs), count))
336 334 mail.sendmail(self.ui, util.email(msg['From']),
337 335 subs, msgtext, mbox=self.mbox)
338 336
339 337 def diff(self, ctx, ref=None):
340 338
341 339 maxdiff = int(self.ui.config('notify', 'maxdiff', 300))
342 340 prev = ctx.p1().node()
343 341 if ref:
344 342 ref = ref.node()
345 343 else:
346 344 ref = ctx.node()
347 345 chunks = patch.diff(self.repo, prev, ref,
348 346 opts=patch.diffallopts(self.ui))
349 347 difflines = ''.join(chunks).splitlines()
350 348
351 349 if self.ui.configbool('notify', 'diffstat', True):
352 350 s = patch.diffstat(difflines)
353 351 # s may be nil, don't include the header if it is
354 352 if s:
355 353 self.ui.write('\ndiffstat:\n\n%s' % s)
356 354
357 355 if maxdiff == 0:
358 356 return
359 357 elif maxdiff > 0 and len(difflines) > maxdiff:
360 358 msg = _('\ndiffs (truncated from %d to %d lines):\n\n')
361 359 self.ui.write(msg % (len(difflines), maxdiff))
362 360 difflines = difflines[:maxdiff]
363 361 elif difflines:
364 362 self.ui.write(_('\ndiffs (%d lines):\n\n') % len(difflines))
365 363
366 364 self.ui.write("\n".join(difflines))
367 365
368 366 def hook(ui, repo, hooktype, node=None, source=None, **kwargs):
369 367 '''send email notifications to interested subscribers.
370 368
371 369 if used as changegroup hook, send one email for all changesets in
372 370 changegroup. else send one email per changeset.'''
373 371
374 372 n = notifier(ui, repo, hooktype)
375 373 ctx = repo[node]
376 374
377 375 if not n.subs:
378 376 ui.debug('notify: no subscribers to repository %s\n' % n.root)
379 377 return
380 378 if n.skipsource(source):
381 379 ui.debug('notify: changes have source "%s" - skipping\n' % source)
382 380 return
383 381
384 382 ui.pushbuffer()
385 383 data = ''
386 384 count = 0
387 385 author = ''
388 386 if hooktype == 'changegroup' or hooktype == 'outgoing':
389 387 start, end = ctx.rev(), len(repo)
390 388 for rev in xrange(start, end):
391 389 if n.node(repo[rev]):
392 390 count += 1
393 391 if not author:
394 392 author = repo[rev].user()
395 393 else:
396 394 data += ui.popbuffer()
397 395 ui.note(_('notify: suppressing notification for merge %d:%s\n')
398 396 % (rev, repo[rev].hex()[:12]))
399 397 ui.pushbuffer()
400 398 if count:
401 399 n.diff(ctx, repo['tip'])
402 400 else:
403 401 if not n.node(ctx):
404 402 ui.popbuffer()
405 403 ui.note(_('notify: suppressing notification for merge %d:%s\n') %
406 404 (ctx.rev(), ctx.hex()[:12]))
407 405 return
408 406 count += 1
409 407 n.diff(ctx)
410 408
411 409 data += ui.popbuffer()
412 410 fromauthor = ui.config('notify', 'fromauthor')
413 411 if author and fromauthor:
414 412 data = '\n'.join(['From: %s' % author, data])
415 413
416 414 if count:
417 415 n.send(ctx, count, data)
@@ -1,3261 +1,3261 b''
1 1 # cmdutil.py - help for command processing in mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import os, sys, errno, re, tempfile, cStringIO, shutil
11 11 import util, scmutil, templater, patch, error, templatekw, revlog, copies
12 12 import match as matchmod
13 13 import context, repair, graphmod, revset, phases, obsolete, pathutil
14 14 import changelog
15 15 import bookmarks
16 16 import encoding
17 17 import crecord as crecordmod
18 18 import lock as lockmod
19 19
20 20 def parsealiases(cmd):
21 21 return cmd.lstrip("^").split("|")
22 22
23 23 def setupwrapcolorwrite(ui):
24 24 # wrap ui.write so diff output can be labeled/colorized
25 25 def wrapwrite(orig, *args, **kw):
26 26 label = kw.pop('label', '')
27 27 for chunk, l in patch.difflabel(lambda: args):
28 28 orig(chunk, label=label + l)
29 29
30 30 oldwrite = ui.write
31 31 def wrap(*args, **kwargs):
32 32 return wrapwrite(oldwrite, *args, **kwargs)
33 33 setattr(ui, 'write', wrap)
34 34 return oldwrite
35 35
36 36 def filterchunks(ui, originalhunks, usecurses, testfile):
37 37 if usecurses:
38 38 if testfile:
39 39 recordfn = crecordmod.testdecorator(testfile,
40 40 crecordmod.testchunkselector)
41 41 else:
42 42 recordfn = crecordmod.chunkselector
43 43
44 44 return crecordmod.filterpatch(ui, originalhunks, recordfn)
45 45
46 46 else:
47 47 return patch.filterpatch(ui, originalhunks)
48 48
49 49 def recordfilter(ui, originalhunks):
50 50 usecurses = ui.configbool('experimental', 'crecord', False)
51 51 testfile = ui.config('experimental', 'crecordtest', None)
52 52 oldwrite = setupwrapcolorwrite(ui)
53 53 try:
54 54 newchunks = filterchunks(ui, originalhunks, usecurses, testfile)
55 55 finally:
56 56 ui.write = oldwrite
57 57 return newchunks
58 58
59 59 def dorecord(ui, repo, commitfunc, cmdsuggest, backupall,
60 60 filterfn, *pats, **opts):
61 61 import merge as mergemod
62 62 hunkclasses = (crecordmod.uihunk, patch.recordhunk)
63 63 ishunk = lambda x: isinstance(x, hunkclasses)
64 64
65 65 if not ui.interactive():
66 66 raise util.Abort(_('running non-interactively, use %s instead') %
67 67 cmdsuggest)
68 68
69 69 # make sure username is set before going interactive
70 70 if not opts.get('user'):
71 71 ui.username() # raise exception, username not provided
72 72
73 73 def recordfunc(ui, repo, message, match, opts):
74 74 """This is generic record driver.
75 75
76 76 Its job is to interactively filter local changes, and
77 77 accordingly prepare working directory into a state in which the
78 78 job can be delegated to a non-interactive commit command such as
79 79 'commit' or 'qrefresh'.
80 80
81 81 After the actual job is done by non-interactive command, the
82 82 working directory is restored to its original state.
83 83
84 84 In the end we'll record interesting changes, and everything else
85 85 will be left in place, so the user can continue working.
86 86 """
87 87
88 88 checkunfinished(repo, commit=True)
89 89 merge = len(repo[None].parents()) > 1
90 90 if merge:
91 91 raise util.Abort(_('cannot partially commit a merge '
92 92 '(use "hg commit" instead)'))
93 93
94 94 status = repo.status(match=match)
95 95 diffopts = patch.difffeatureopts(ui, opts=opts, whitespace=True)
96 96 diffopts.nodates = True
97 97 diffopts.git = True
98 98 originaldiff = patch.diff(repo, changes=status, opts=diffopts)
99 99 originalchunks = patch.parsepatch(originaldiff)
100 100
101 101 # 1. filter patch, so we have intending-to apply subset of it
102 102 try:
103 103 chunks = filterfn(ui, originalchunks)
104 104 except patch.PatchError, err:
105 105 raise util.Abort(_('error parsing patch: %s') % err)
106 106
107 107 # We need to keep a backup of files that have been newly added and
108 108 # modified during the recording process because there is a previous
109 109 # version without the edit in the workdir
110 110 newlyaddedandmodifiedfiles = set()
111 111 for chunk in chunks:
112 112 if ishunk(chunk) and chunk.header.isnewfile() and chunk not in \
113 113 originalchunks:
114 114 newlyaddedandmodifiedfiles.add(chunk.header.filename())
115 115 contenders = set()
116 116 for h in chunks:
117 117 try:
118 118 contenders.update(set(h.files()))
119 119 except AttributeError:
120 120 pass
121 121
122 122 changed = status.modified + status.added + status.removed
123 123 newfiles = [f for f in changed if f in contenders]
124 124 if not newfiles:
125 125 ui.status(_('no changes to record\n'))
126 126 return 0
127 127
128 128 modified = set(status.modified)
129 129
130 130 # 2. backup changed files, so we can restore them in the end
131 131
132 132 if backupall:
133 133 tobackup = changed
134 134 else:
135 135 tobackup = [f for f in newfiles if f in modified or f in \
136 136 newlyaddedandmodifiedfiles]
137 137 backups = {}
138 138 if tobackup:
139 139 backupdir = repo.join('record-backups')
140 140 try:
141 141 os.mkdir(backupdir)
142 142 except OSError, err:
143 143 if err.errno != errno.EEXIST:
144 144 raise
145 145 try:
146 146 # backup continues
147 147 for f in tobackup:
148 148 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
149 149 dir=backupdir)
150 150 os.close(fd)
151 151 ui.debug('backup %r as %r\n' % (f, tmpname))
152 152 util.copyfile(repo.wjoin(f), tmpname)
153 153 shutil.copystat(repo.wjoin(f), tmpname)
154 154 backups[f] = tmpname
155 155
156 156 fp = cStringIO.StringIO()
157 157 for c in chunks:
158 158 fname = c.filename()
159 159 if fname in backups:
160 160 c.write(fp)
161 161 dopatch = fp.tell()
162 162 fp.seek(0)
163 163
164 164 [os.unlink(repo.wjoin(c)) for c in newlyaddedandmodifiedfiles]
165 165 # 3a. apply filtered patch to clean repo (clean)
166 166 if backups:
167 167 # Equivalent to hg.revert
168 168 choices = lambda key: key in backups
169 169 mergemod.update(repo, repo.dirstate.p1(),
170 170 False, True, choices)
171 171
172 172 # 3b. (apply)
173 173 if dopatch:
174 174 try:
175 175 ui.debug('applying patch\n')
176 176 ui.debug(fp.getvalue())
177 177 patch.internalpatch(ui, repo, fp, 1, eolmode=None)
178 178 except patch.PatchError, err:
179 179 raise util.Abort(str(err))
180 180 del fp
181 181
182 182 # 4. We prepared working directory according to filtered
183 183 # patch. Now is the time to delegate the job to
184 184 # commit/qrefresh or the like!
185 185
186 186 # Make all of the pathnames absolute.
187 187 newfiles = [repo.wjoin(nf) for nf in newfiles]
188 188 return commitfunc(ui, repo, *newfiles, **opts)
189 189 finally:
190 190 # 5. finally restore backed-up files
191 191 try:
192 192 for realname, tmpname in backups.iteritems():
193 193 ui.debug('restoring %r to %r\n' % (tmpname, realname))
194 194 util.copyfile(tmpname, repo.wjoin(realname))
195 195 # Our calls to copystat() here and above are a
196 196 # hack to trick any editors that have f open that
197 197 # we haven't modified them.
198 198 #
199 199 # Also note that this racy as an editor could
200 200 # notice the file's mtime before we've finished
201 201 # writing it.
202 202 shutil.copystat(tmpname, repo.wjoin(realname))
203 203 os.unlink(tmpname)
204 204 if tobackup:
205 205 os.rmdir(backupdir)
206 206 except OSError:
207 207 pass
208 208
209 209 return commit(ui, repo, recordfunc, pats, opts)
210 210
211 211 def findpossible(cmd, table, strict=False):
212 212 """
213 213 Return cmd -> (aliases, command table entry)
214 214 for each matching command.
215 215 Return debug commands (or their aliases) only if no normal command matches.
216 216 """
217 217 choice = {}
218 218 debugchoice = {}
219 219
220 220 if cmd in table:
221 221 # short-circuit exact matches, "log" alias beats "^log|history"
222 222 keys = [cmd]
223 223 else:
224 224 keys = table.keys()
225 225
226 226 allcmds = []
227 227 for e in keys:
228 228 aliases = parsealiases(e)
229 229 allcmds.extend(aliases)
230 230 found = None
231 231 if cmd in aliases:
232 232 found = cmd
233 233 elif not strict:
234 234 for a in aliases:
235 235 if a.startswith(cmd):
236 236 found = a
237 237 break
238 238 if found is not None:
239 239 if aliases[0].startswith("debug") or found.startswith("debug"):
240 240 debugchoice[found] = (aliases, table[e])
241 241 else:
242 242 choice[found] = (aliases, table[e])
243 243
244 244 if not choice and debugchoice:
245 245 choice = debugchoice
246 246
247 247 return choice, allcmds
248 248
249 249 def findcmd(cmd, table, strict=True):
250 250 """Return (aliases, command table entry) for command string."""
251 251 choice, allcmds = findpossible(cmd, table, strict)
252 252
253 253 if cmd in choice:
254 254 return choice[cmd]
255 255
256 256 if len(choice) > 1:
257 257 clist = choice.keys()
258 258 clist.sort()
259 259 raise error.AmbiguousCommand(cmd, clist)
260 260
261 261 if choice:
262 262 return choice.values()[0]
263 263
264 264 raise error.UnknownCommand(cmd, allcmds)
265 265
266 266 def findrepo(p):
267 267 while not os.path.isdir(os.path.join(p, ".hg")):
268 268 oldp, p = p, os.path.dirname(p)
269 269 if p == oldp:
270 270 return None
271 271
272 272 return p
273 273
274 274 def bailifchanged(repo, merge=True):
275 275 if merge and repo.dirstate.p2() != nullid:
276 276 raise util.Abort(_('outstanding uncommitted merge'))
277 277 modified, added, removed, deleted = repo.status()[:4]
278 278 if modified or added or removed or deleted:
279 279 raise util.Abort(_('uncommitted changes'))
280 280 ctx = repo[None]
281 281 for s in sorted(ctx.substate):
282 282 ctx.sub(s).bailifchanged()
283 283
284 284 def logmessage(ui, opts):
285 285 """ get the log message according to -m and -l option """
286 286 message = opts.get('message')
287 287 logfile = opts.get('logfile')
288 288
289 289 if message and logfile:
290 290 raise util.Abort(_('options --message and --logfile are mutually '
291 291 'exclusive'))
292 292 if not message and logfile:
293 293 try:
294 294 if logfile == '-':
295 295 message = ui.fin.read()
296 296 else:
297 297 message = '\n'.join(util.readfile(logfile).splitlines())
298 298 except IOError, inst:
299 299 raise util.Abort(_("can't read commit message '%s': %s") %
300 300 (logfile, inst.strerror))
301 301 return message
302 302
303 303 def mergeeditform(ctxorbool, baseformname):
304 304 """return appropriate editform name (referencing a committemplate)
305 305
306 306 'ctxorbool' is either a ctx to be committed, or a bool indicating whether
307 307 merging is committed.
308 308
309 309 This returns baseformname with '.merge' appended if it is a merge,
310 310 otherwise '.normal' is appended.
311 311 """
312 312 if isinstance(ctxorbool, bool):
313 313 if ctxorbool:
314 314 return baseformname + ".merge"
315 315 elif 1 < len(ctxorbool.parents()):
316 316 return baseformname + ".merge"
317 317
318 318 return baseformname + ".normal"
319 319
320 320 def getcommiteditor(edit=False, finishdesc=None, extramsg=None,
321 321 editform='', **opts):
322 322 """get appropriate commit message editor according to '--edit' option
323 323
324 324 'finishdesc' is a function to be called with edited commit message
325 325 (= 'description' of the new changeset) just after editing, but
326 326 before checking empty-ness. It should return actual text to be
327 327 stored into history. This allows to change description before
328 328 storing.
329 329
330 330 'extramsg' is a extra message to be shown in the editor instead of
331 331 'Leave message empty to abort commit' line. 'HG: ' prefix and EOL
332 332 is automatically added.
333 333
334 334 'editform' is a dot-separated list of names, to distinguish
335 335 the purpose of commit text editing.
336 336
337 337 'getcommiteditor' returns 'commitforceeditor' regardless of
338 338 'edit', if one of 'finishdesc' or 'extramsg' is specified, because
339 339 they are specific for usage in MQ.
340 340 """
341 341 if edit or finishdesc or extramsg:
342 342 return lambda r, c, s: commitforceeditor(r, c, s,
343 343 finishdesc=finishdesc,
344 344 extramsg=extramsg,
345 345 editform=editform)
346 346 elif editform:
347 347 return lambda r, c, s: commiteditor(r, c, s, editform=editform)
348 348 else:
349 349 return commiteditor
350 350
351 351 def loglimit(opts):
352 352 """get the log limit according to option -l/--limit"""
353 353 limit = opts.get('limit')
354 354 if limit:
355 355 try:
356 356 limit = int(limit)
357 357 except ValueError:
358 358 raise util.Abort(_('limit must be a positive integer'))
359 359 if limit <= 0:
360 360 raise util.Abort(_('limit must be positive'))
361 361 else:
362 362 limit = None
363 363 return limit
364 364
365 365 def makefilename(repo, pat, node, desc=None,
366 366 total=None, seqno=None, revwidth=None, pathname=None):
367 367 node_expander = {
368 368 'H': lambda: hex(node),
369 369 'R': lambda: str(repo.changelog.rev(node)),
370 370 'h': lambda: short(node),
371 371 'm': lambda: re.sub('[^\w]', '_', str(desc))
372 372 }
373 373 expander = {
374 374 '%': lambda: '%',
375 375 'b': lambda: os.path.basename(repo.root),
376 376 }
377 377
378 378 try:
379 379 if node:
380 380 expander.update(node_expander)
381 381 if node:
382 382 expander['r'] = (lambda:
383 383 str(repo.changelog.rev(node)).zfill(revwidth or 0))
384 384 if total is not None:
385 385 expander['N'] = lambda: str(total)
386 386 if seqno is not None:
387 387 expander['n'] = lambda: str(seqno)
388 388 if total is not None and seqno is not None:
389 389 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
390 390 if pathname is not None:
391 391 expander['s'] = lambda: os.path.basename(pathname)
392 392 expander['d'] = lambda: os.path.dirname(pathname) or '.'
393 393 expander['p'] = lambda: pathname
394 394
395 395 newname = []
396 396 patlen = len(pat)
397 397 i = 0
398 398 while i < patlen:
399 399 c = pat[i]
400 400 if c == '%':
401 401 i += 1
402 402 c = pat[i]
403 403 c = expander[c]()
404 404 newname.append(c)
405 405 i += 1
406 406 return ''.join(newname)
407 407 except KeyError, inst:
408 408 raise util.Abort(_("invalid format spec '%%%s' in output filename") %
409 409 inst.args[0])
410 410
411 411 def makefileobj(repo, pat, node=None, desc=None, total=None,
412 412 seqno=None, revwidth=None, mode='wb', modemap=None,
413 413 pathname=None):
414 414
415 415 writable = mode not in ('r', 'rb')
416 416
417 417 if not pat or pat == '-':
418 418 if writable:
419 419 fp = repo.ui.fout
420 420 else:
421 421 fp = repo.ui.fin
422 422 if util.safehasattr(fp, 'fileno'):
423 423 return os.fdopen(os.dup(fp.fileno()), mode)
424 424 else:
425 425 # if this fp can't be duped properly, return
426 426 # a dummy object that can be closed
427 427 class wrappedfileobj(object):
428 428 noop = lambda x: None
429 429 def __init__(self, f):
430 430 self.f = f
431 431 def __getattr__(self, attr):
432 432 if attr == 'close':
433 433 return self.noop
434 434 else:
435 435 return getattr(self.f, attr)
436 436
437 437 return wrappedfileobj(fp)
438 438 if util.safehasattr(pat, 'write') and writable:
439 439 return pat
440 440 if util.safehasattr(pat, 'read') and 'r' in mode:
441 441 return pat
442 442 fn = makefilename(repo, pat, node, desc, total, seqno, revwidth, pathname)
443 443 if modemap is not None:
444 444 mode = modemap.get(fn, mode)
445 445 if mode == 'wb':
446 446 modemap[fn] = 'ab'
447 447 return open(fn, mode)
448 448
449 449 def openrevlog(repo, cmd, file_, opts):
450 450 """opens the changelog, manifest, a filelog or a given revlog"""
451 451 cl = opts['changelog']
452 452 mf = opts['manifest']
453 453 msg = None
454 454 if cl and mf:
455 455 msg = _('cannot specify --changelog and --manifest at the same time')
456 456 elif cl or mf:
457 457 if file_:
458 458 msg = _('cannot specify filename with --changelog or --manifest')
459 459 elif not repo:
460 460 msg = _('cannot specify --changelog or --manifest '
461 461 'without a repository')
462 462 if msg:
463 463 raise util.Abort(msg)
464 464
465 465 r = None
466 466 if repo:
467 467 if cl:
468 468 r = repo.unfiltered().changelog
469 469 elif mf:
470 470 r = repo.manifest
471 471 elif file_:
472 472 filelog = repo.file(file_)
473 473 if len(filelog):
474 474 r = filelog
475 475 if not r:
476 476 if not file_:
477 477 raise error.CommandError(cmd, _('invalid arguments'))
478 478 if not os.path.isfile(file_):
479 479 raise util.Abort(_("revlog '%s' not found") % file_)
480 480 r = revlog.revlog(scmutil.opener(os.getcwd(), audit=False),
481 481 file_[:-2] + ".i")
482 482 return r
483 483
484 484 def copy(ui, repo, pats, opts, rename=False):
485 485 # called with the repo lock held
486 486 #
487 487 # hgsep => pathname that uses "/" to separate directories
488 488 # ossep => pathname that uses os.sep to separate directories
489 489 cwd = repo.getcwd()
490 490 targets = {}
491 491 after = opts.get("after")
492 492 dryrun = opts.get("dry_run")
493 493 wctx = repo[None]
494 494
495 495 def walkpat(pat):
496 496 srcs = []
497 497 if after:
498 498 badstates = '?'
499 499 else:
500 500 badstates = '?r'
501 501 m = scmutil.match(repo[None], [pat], opts, globbed=True)
502 502 for abs in repo.walk(m):
503 503 state = repo.dirstate[abs]
504 504 rel = m.rel(abs)
505 505 exact = m.exact(abs)
506 506 if state in badstates:
507 507 if exact and state == '?':
508 508 ui.warn(_('%s: not copying - file is not managed\n') % rel)
509 509 if exact and state == 'r':
510 510 ui.warn(_('%s: not copying - file has been marked for'
511 511 ' remove\n') % rel)
512 512 continue
513 513 # abs: hgsep
514 514 # rel: ossep
515 515 srcs.append((abs, rel, exact))
516 516 return srcs
517 517
518 518 # abssrc: hgsep
519 519 # relsrc: ossep
520 520 # otarget: ossep
521 521 def copyfile(abssrc, relsrc, otarget, exact):
522 522 abstarget = pathutil.canonpath(repo.root, cwd, otarget)
523 523 if '/' in abstarget:
524 524 # We cannot normalize abstarget itself, this would prevent
525 525 # case only renames, like a => A.
526 526 abspath, absname = abstarget.rsplit('/', 1)
527 527 abstarget = repo.dirstate.normalize(abspath) + '/' + absname
528 528 reltarget = repo.pathto(abstarget, cwd)
529 529 target = repo.wjoin(abstarget)
530 530 src = repo.wjoin(abssrc)
531 531 state = repo.dirstate[abstarget]
532 532
533 533 scmutil.checkportable(ui, abstarget)
534 534
535 535 # check for collisions
536 536 prevsrc = targets.get(abstarget)
537 537 if prevsrc is not None:
538 538 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
539 539 (reltarget, repo.pathto(abssrc, cwd),
540 540 repo.pathto(prevsrc, cwd)))
541 541 return
542 542
543 543 # check for overwrites
544 544 exists = os.path.lexists(target)
545 545 samefile = False
546 546 if exists and abssrc != abstarget:
547 547 if (repo.dirstate.normalize(abssrc) ==
548 548 repo.dirstate.normalize(abstarget)):
549 549 if not rename:
550 550 ui.warn(_("%s: can't copy - same file\n") % reltarget)
551 551 return
552 552 exists = False
553 553 samefile = True
554 554
555 555 if not after and exists or after and state in 'mn':
556 556 if not opts['force']:
557 557 ui.warn(_('%s: not overwriting - file exists\n') %
558 558 reltarget)
559 559 return
560 560
561 561 if after:
562 562 if not exists:
563 563 if rename:
564 564 ui.warn(_('%s: not recording move - %s does not exist\n') %
565 565 (relsrc, reltarget))
566 566 else:
567 567 ui.warn(_('%s: not recording copy - %s does not exist\n') %
568 568 (relsrc, reltarget))
569 569 return
570 570 elif not dryrun:
571 571 try:
572 572 if exists:
573 573 os.unlink(target)
574 574 targetdir = os.path.dirname(target) or '.'
575 575 if not os.path.isdir(targetdir):
576 576 os.makedirs(targetdir)
577 577 if samefile:
578 578 tmp = target + "~hgrename"
579 579 os.rename(src, tmp)
580 580 os.rename(tmp, target)
581 581 else:
582 582 util.copyfile(src, target)
583 583 srcexists = True
584 584 except IOError, inst:
585 585 if inst.errno == errno.ENOENT:
586 586 ui.warn(_('%s: deleted in working directory\n') % relsrc)
587 587 srcexists = False
588 588 else:
589 589 ui.warn(_('%s: cannot copy - %s\n') %
590 590 (relsrc, inst.strerror))
591 591 return True # report a failure
592 592
593 593 if ui.verbose or not exact:
594 594 if rename:
595 595 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
596 596 else:
597 597 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
598 598
599 599 targets[abstarget] = abssrc
600 600
601 601 # fix up dirstate
602 602 scmutil.dirstatecopy(ui, repo, wctx, abssrc, abstarget,
603 603 dryrun=dryrun, cwd=cwd)
604 604 if rename and not dryrun:
605 605 if not after and srcexists and not samefile:
606 606 util.unlinkpath(repo.wjoin(abssrc))
607 607 wctx.forget([abssrc])
608 608
609 609 # pat: ossep
610 610 # dest ossep
611 611 # srcs: list of (hgsep, hgsep, ossep, bool)
612 612 # return: function that takes hgsep and returns ossep
613 613 def targetpathfn(pat, dest, srcs):
614 614 if os.path.isdir(pat):
615 615 abspfx = pathutil.canonpath(repo.root, cwd, pat)
616 616 abspfx = util.localpath(abspfx)
617 617 if destdirexists:
618 618 striplen = len(os.path.split(abspfx)[0])
619 619 else:
620 620 striplen = len(abspfx)
621 621 if striplen:
622 622 striplen += len(os.sep)
623 623 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
624 624 elif destdirexists:
625 625 res = lambda p: os.path.join(dest,
626 626 os.path.basename(util.localpath(p)))
627 627 else:
628 628 res = lambda p: dest
629 629 return res
630 630
631 631 # pat: ossep
632 632 # dest ossep
633 633 # srcs: list of (hgsep, hgsep, ossep, bool)
634 634 # return: function that takes hgsep and returns ossep
635 635 def targetpathafterfn(pat, dest, srcs):
636 636 if matchmod.patkind(pat):
637 637 # a mercurial pattern
638 638 res = lambda p: os.path.join(dest,
639 639 os.path.basename(util.localpath(p)))
640 640 else:
641 641 abspfx = pathutil.canonpath(repo.root, cwd, pat)
642 642 if len(abspfx) < len(srcs[0][0]):
643 643 # A directory. Either the target path contains the last
644 644 # component of the source path or it does not.
645 645 def evalpath(striplen):
646 646 score = 0
647 647 for s in srcs:
648 648 t = os.path.join(dest, util.localpath(s[0])[striplen:])
649 649 if os.path.lexists(t):
650 650 score += 1
651 651 return score
652 652
653 653 abspfx = util.localpath(abspfx)
654 654 striplen = len(abspfx)
655 655 if striplen:
656 656 striplen += len(os.sep)
657 657 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
658 658 score = evalpath(striplen)
659 659 striplen1 = len(os.path.split(abspfx)[0])
660 660 if striplen1:
661 661 striplen1 += len(os.sep)
662 662 if evalpath(striplen1) > score:
663 663 striplen = striplen1
664 664 res = lambda p: os.path.join(dest,
665 665 util.localpath(p)[striplen:])
666 666 else:
667 667 # a file
668 668 if destdirexists:
669 669 res = lambda p: os.path.join(dest,
670 670 os.path.basename(util.localpath(p)))
671 671 else:
672 672 res = lambda p: dest
673 673 return res
674 674
675 675 pats = scmutil.expandpats(pats)
676 676 if not pats:
677 677 raise util.Abort(_('no source or destination specified'))
678 678 if len(pats) == 1:
679 679 raise util.Abort(_('no destination specified'))
680 680 dest = pats.pop()
681 681 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
682 682 if not destdirexists:
683 683 if len(pats) > 1 or matchmod.patkind(pats[0]):
684 684 raise util.Abort(_('with multiple sources, destination must be an '
685 685 'existing directory'))
686 686 if util.endswithsep(dest):
687 687 raise util.Abort(_('destination %s is not a directory') % dest)
688 688
689 689 tfn = targetpathfn
690 690 if after:
691 691 tfn = targetpathafterfn
692 692 copylist = []
693 693 for pat in pats:
694 694 srcs = walkpat(pat)
695 695 if not srcs:
696 696 continue
697 697 copylist.append((tfn(pat, dest, srcs), srcs))
698 698 if not copylist:
699 699 raise util.Abort(_('no files to copy'))
700 700
701 701 errors = 0
702 702 for targetpath, srcs in copylist:
703 703 for abssrc, relsrc, exact in srcs:
704 704 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
705 705 errors += 1
706 706
707 707 if errors:
708 708 ui.warn(_('(consider using --after)\n'))
709 709
710 710 return errors != 0
711 711
712 712 def service(opts, parentfn=None, initfn=None, runfn=None, logfile=None,
713 713 runargs=None, appendpid=False):
714 714 '''Run a command as a service.'''
715 715
716 716 def writepid(pid):
717 717 if opts['pid_file']:
718 718 if appendpid:
719 719 mode = 'a'
720 720 else:
721 721 mode = 'w'
722 722 fp = open(opts['pid_file'], mode)
723 723 fp.write(str(pid) + '\n')
724 724 fp.close()
725 725
726 726 if opts['daemon'] and not opts['daemon_pipefds']:
727 727 # Signal child process startup with file removal
728 728 lockfd, lockpath = tempfile.mkstemp(prefix='hg-service-')
729 729 os.close(lockfd)
730 730 try:
731 731 if not runargs:
732 732 runargs = util.hgcmd() + sys.argv[1:]
733 733 runargs.append('--daemon-pipefds=%s' % lockpath)
734 734 # Don't pass --cwd to the child process, because we've already
735 735 # changed directory.
736 736 for i in xrange(1, len(runargs)):
737 737 if runargs[i].startswith('--cwd='):
738 738 del runargs[i]
739 739 break
740 740 elif runargs[i].startswith('--cwd'):
741 741 del runargs[i:i + 2]
742 742 break
743 743 def condfn():
744 744 return not os.path.exists(lockpath)
745 745 pid = util.rundetached(runargs, condfn)
746 746 if pid < 0:
747 747 raise util.Abort(_('child process failed to start'))
748 748 writepid(pid)
749 749 finally:
750 750 try:
751 751 os.unlink(lockpath)
752 752 except OSError, e:
753 753 if e.errno != errno.ENOENT:
754 754 raise
755 755 if parentfn:
756 756 return parentfn(pid)
757 757 else:
758 758 return
759 759
760 760 if initfn:
761 761 initfn()
762 762
763 763 if not opts['daemon']:
764 764 writepid(os.getpid())
765 765
766 766 if opts['daemon_pipefds']:
767 767 lockpath = opts['daemon_pipefds']
768 768 try:
769 769 os.setsid()
770 770 except AttributeError:
771 771 pass
772 772 os.unlink(lockpath)
773 773 util.hidewindow()
774 774 sys.stdout.flush()
775 775 sys.stderr.flush()
776 776
777 777 nullfd = os.open(os.devnull, os.O_RDWR)
778 778 logfilefd = nullfd
779 779 if logfile:
780 780 logfilefd = os.open(logfile, os.O_RDWR | os.O_CREAT | os.O_APPEND)
781 781 os.dup2(nullfd, 0)
782 782 os.dup2(logfilefd, 1)
783 783 os.dup2(logfilefd, 2)
784 784 if nullfd not in (0, 1, 2):
785 785 os.close(nullfd)
786 786 if logfile and logfilefd not in (0, 1, 2):
787 787 os.close(logfilefd)
788 788
789 789 if runfn:
790 790 return runfn()
791 791
792 792 def tryimportone(ui, repo, hunk, parents, opts, msgs, updatefunc):
793 793 """Utility function used by commands.import to import a single patch
794 794
795 795 This function is explicitly defined here to help the evolve extension to
796 796 wrap this part of the import logic.
797 797
798 798 The API is currently a bit ugly because it a simple code translation from
799 799 the import command. Feel free to make it better.
800 800
801 801 :hunk: a patch (as a binary string)
802 802 :parents: nodes that will be parent of the created commit
803 803 :opts: the full dict of option passed to the import command
804 804 :msgs: list to save commit message to.
805 805 (used in case we need to save it when failing)
806 806 :updatefunc: a function that update a repo to a given node
807 807 updatefunc(<repo>, <node>)
808 808 """
809 809 tmpname, message, user, date, branch, nodeid, p1, p2 = \
810 810 patch.extract(ui, hunk)
811 811
812 812 update = not opts.get('bypass')
813 813 strip = opts["strip"]
814 814 prefix = opts["prefix"]
815 815 sim = float(opts.get('similarity') or 0)
816 816 if not tmpname:
817 817 return (None, None, False)
818 818 msg = _('applied to working directory')
819 819
820 820 rejects = False
821 821
822 822 try:
823 823 cmdline_message = logmessage(ui, opts)
824 824 if cmdline_message:
825 825 # pickup the cmdline msg
826 826 message = cmdline_message
827 827 elif message:
828 828 # pickup the patch msg
829 829 message = message.strip()
830 830 else:
831 831 # launch the editor
832 832 message = None
833 833 ui.debug('message:\n%s\n' % message)
834 834
835 835 if len(parents) == 1:
836 836 parents.append(repo[nullid])
837 837 if opts.get('exact'):
838 838 if not nodeid or not p1:
839 839 raise util.Abort(_('not a Mercurial patch'))
840 840 p1 = repo[p1]
841 841 p2 = repo[p2 or nullid]
842 842 elif p2:
843 843 try:
844 844 p1 = repo[p1]
845 845 p2 = repo[p2]
846 846 # Without any options, consider p2 only if the
847 847 # patch is being applied on top of the recorded
848 848 # first parent.
849 849 if p1 != parents[0]:
850 850 p1 = parents[0]
851 851 p2 = repo[nullid]
852 852 except error.RepoError:
853 853 p1, p2 = parents
854 854 if p2.node() == nullid:
855 855 ui.warn(_("warning: import the patch as a normal revision\n"
856 856 "(use --exact to import the patch as a merge)\n"))
857 857 else:
858 858 p1, p2 = parents
859 859
860 860 n = None
861 861 if update:
862 862 repo.dirstate.beginparentchange()
863 863 if p1 != parents[0]:
864 864 updatefunc(repo, p1.node())
865 865 if p2 != parents[1]:
866 866 repo.setparents(p1.node(), p2.node())
867 867
868 868 if opts.get('exact') or opts.get('import_branch'):
869 869 repo.dirstate.setbranch(branch or 'default')
870 870
871 871 partial = opts.get('partial', False)
872 872 files = set()
873 873 try:
874 874 patch.patch(ui, repo, tmpname, strip=strip, prefix=prefix,
875 875 files=files, eolmode=None, similarity=sim / 100.0)
876 876 except patch.PatchError, e:
877 877 if not partial:
878 878 raise util.Abort(str(e))
879 879 if partial:
880 880 rejects = True
881 881
882 882 files = list(files)
883 883 if opts.get('no_commit'):
884 884 if message:
885 885 msgs.append(message)
886 886 else:
887 887 if opts.get('exact') or p2:
888 888 # If you got here, you either use --force and know what
889 889 # you are doing or used --exact or a merge patch while
890 890 # being updated to its first parent.
891 891 m = None
892 892 else:
893 893 m = scmutil.matchfiles(repo, files or [])
894 894 editform = mergeeditform(repo[None], 'import.normal')
895 895 if opts.get('exact'):
896 896 editor = None
897 897 else:
898 898 editor = getcommiteditor(editform=editform, **opts)
899 899 n = repo.commit(message, opts.get('user') or user,
900 900 opts.get('date') or date, match=m,
901 901 editor=editor, force=partial)
902 902 repo.dirstate.endparentchange()
903 903 else:
904 904 if opts.get('exact') or opts.get('import_branch'):
905 905 branch = branch or 'default'
906 906 else:
907 907 branch = p1.branch()
908 908 store = patch.filestore()
909 909 try:
910 910 files = set()
911 911 try:
912 912 patch.patchrepo(ui, repo, p1, store, tmpname, strip, prefix,
913 913 files, eolmode=None)
914 914 except patch.PatchError, e:
915 915 raise util.Abort(str(e))
916 916 if opts.get('exact'):
917 917 editor = None
918 918 else:
919 919 editor = getcommiteditor(editform='import.bypass')
920 920 memctx = context.makememctx(repo, (p1.node(), p2.node()),
921 921 message,
922 922 opts.get('user') or user,
923 923 opts.get('date') or date,
924 924 branch, files, store,
925 925 editor=editor)
926 926 n = memctx.commit()
927 927 finally:
928 928 store.close()
929 929 if opts.get('exact') and opts.get('no_commit'):
930 930 # --exact with --no-commit is still useful in that it does merge
931 931 # and branch bits
932 932 ui.warn(_("warning: can't check exact import with --no-commit\n"))
933 933 elif opts.get('exact') and hex(n) != nodeid:
934 934 raise util.Abort(_('patch is damaged or loses information'))
935 935 if n:
936 936 # i18n: refers to a short changeset id
937 937 msg = _('created %s') % short(n)
938 938 return (msg, n, rejects)
939 939 finally:
940 940 os.unlink(tmpname)
941 941
942 942 def export(repo, revs, template='hg-%h.patch', fp=None, switch_parent=False,
943 943 opts=None):
944 944 '''export changesets as hg patches.'''
945 945
946 946 total = len(revs)
947 947 revwidth = max([len(str(rev)) for rev in revs])
948 948 filemode = {}
949 949
950 950 def single(rev, seqno, fp):
951 951 ctx = repo[rev]
952 952 node = ctx.node()
953 953 parents = [p.node() for p in ctx.parents() if p]
954 954 branch = ctx.branch()
955 955 if switch_parent:
956 956 parents.reverse()
957 957
958 958 if parents:
959 959 prev = parents[0]
960 960 else:
961 961 prev = nullid
962 962
963 963 shouldclose = False
964 964 if not fp and len(template) > 0:
965 965 desc_lines = ctx.description().rstrip().split('\n')
966 966 desc = desc_lines[0] #Commit always has a first line.
967 967 fp = makefileobj(repo, template, node, desc=desc, total=total,
968 968 seqno=seqno, revwidth=revwidth, mode='wb',
969 969 modemap=filemode)
970 970 if fp != template:
971 971 shouldclose = True
972 972 if fp and fp != sys.stdout and util.safehasattr(fp, 'name'):
973 973 repo.ui.note("%s\n" % fp.name)
974 974
975 975 if not fp:
976 976 write = repo.ui.write
977 977 else:
978 978 def write(s, **kw):
979 979 fp.write(s)
980 980
981 981 write("# HG changeset patch\n")
982 982 write("# User %s\n" % ctx.user())
983 983 write("# Date %d %d\n" % ctx.date())
984 984 write("# %s\n" % util.datestr(ctx.date()))
985 985 if branch and branch != 'default':
986 986 write("# Branch %s\n" % branch)
987 987 write("# Node ID %s\n" % hex(node))
988 988 write("# Parent %s\n" % hex(prev))
989 989 if len(parents) > 1:
990 990 write("# Parent %s\n" % hex(parents[1]))
991 991 write(ctx.description().rstrip())
992 992 write("\n\n")
993 993
994 994 for chunk, label in patch.diffui(repo, prev, node, opts=opts):
995 995 write(chunk, label=label)
996 996
997 997 if shouldclose:
998 998 fp.close()
999 999
1000 1000 for seqno, rev in enumerate(revs):
1001 1001 single(rev, seqno + 1, fp)
1002 1002
1003 1003 def diffordiffstat(ui, repo, diffopts, node1, node2, match,
1004 1004 changes=None, stat=False, fp=None, prefix='',
1005 1005 root='', listsubrepos=False):
1006 1006 '''show diff or diffstat.'''
1007 1007 if fp is None:
1008 1008 write = ui.write
1009 1009 else:
1010 1010 def write(s, **kw):
1011 1011 fp.write(s)
1012 1012
1013 1013 if root:
1014 1014 relroot = pathutil.canonpath(repo.root, repo.getcwd(), root)
1015 1015 else:
1016 1016 relroot = ''
1017 1017 if relroot != '':
1018 1018 # XXX relative roots currently don't work if the root is within a
1019 1019 # subrepo
1020 1020 uirelroot = match.uipath(relroot)
1021 1021 relroot += '/'
1022 1022 for matchroot in match.files():
1023 1023 if not matchroot.startswith(relroot):
1024 1024 ui.warn(_('warning: %s not inside relative root %s\n') % (
1025 1025 match.uipath(matchroot), uirelroot))
1026 1026
1027 1027 if stat:
1028 1028 diffopts = diffopts.copy(context=0)
1029 1029 width = 80
1030 1030 if not ui.plain():
1031 1031 width = ui.termwidth()
1032 1032 chunks = patch.diff(repo, node1, node2, match, changes, diffopts,
1033 1033 prefix=prefix, relroot=relroot)
1034 1034 for chunk, label in patch.diffstatui(util.iterlines(chunks),
1035 1035 width=width,
1036 1036 git=diffopts.git):
1037 1037 write(chunk, label=label)
1038 1038 else:
1039 1039 for chunk, label in patch.diffui(repo, node1, node2, match,
1040 1040 changes, diffopts, prefix=prefix,
1041 1041 relroot=relroot):
1042 1042 write(chunk, label=label)
1043 1043
1044 1044 if listsubrepos:
1045 1045 ctx1 = repo[node1]
1046 1046 ctx2 = repo[node2]
1047 1047 for subpath, sub in scmutil.itersubrepos(ctx1, ctx2):
1048 1048 tempnode2 = node2
1049 1049 try:
1050 1050 if node2 is not None:
1051 1051 tempnode2 = ctx2.substate[subpath][1]
1052 1052 except KeyError:
1053 1053 # A subrepo that existed in node1 was deleted between node1 and
1054 1054 # node2 (inclusive). Thus, ctx2's substate won't contain that
1055 1055 # subpath. The best we can do is to ignore it.
1056 1056 tempnode2 = None
1057 1057 submatch = matchmod.narrowmatcher(subpath, match)
1058 1058 sub.diff(ui, diffopts, tempnode2, submatch, changes=changes,
1059 1059 stat=stat, fp=fp, prefix=prefix)
1060 1060
1061 1061 class changeset_printer(object):
1062 1062 '''show changeset information when templating not requested.'''
1063 1063
1064 1064 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1065 1065 self.ui = ui
1066 1066 self.repo = repo
1067 1067 self.buffered = buffered
1068 1068 self.matchfn = matchfn
1069 1069 self.diffopts = diffopts
1070 1070 self.header = {}
1071 1071 self.hunk = {}
1072 1072 self.lastheader = None
1073 1073 self.footer = None
1074 1074
1075 1075 def flush(self, rev):
1076 1076 if rev in self.header:
1077 1077 h = self.header[rev]
1078 1078 if h != self.lastheader:
1079 1079 self.lastheader = h
1080 1080 self.ui.write(h)
1081 1081 del self.header[rev]
1082 1082 if rev in self.hunk:
1083 1083 self.ui.write(self.hunk[rev])
1084 1084 del self.hunk[rev]
1085 1085 return 1
1086 1086 return 0
1087 1087
1088 1088 def close(self):
1089 1089 if self.footer:
1090 1090 self.ui.write(self.footer)
1091 1091
1092 1092 def show(self, ctx, copies=None, matchfn=None, **props):
1093 1093 if self.buffered:
1094 1094 self.ui.pushbuffer()
1095 1095 self._show(ctx, copies, matchfn, props)
1096 1096 self.hunk[ctx.rev()] = self.ui.popbuffer(labeled=True)
1097 1097 else:
1098 1098 self._show(ctx, copies, matchfn, props)
1099 1099
1100 1100 def _show(self, ctx, copies, matchfn, props):
1101 1101 '''show a single changeset or file revision'''
1102 1102 changenode = ctx.node()
1103 1103 rev = ctx.rev()
1104 1104 if self.ui.debugflag:
1105 1105 hexfunc = hex
1106 1106 else:
1107 1107 hexfunc = short
1108 1108 if rev is None:
1109 1109 pctx = ctx.p1()
1110 1110 revnode = (pctx.rev(), hexfunc(pctx.node()) + '+')
1111 1111 else:
1112 1112 revnode = (rev, hexfunc(changenode))
1113 1113
1114 1114 if self.ui.quiet:
1115 1115 self.ui.write("%d:%s\n" % revnode, label='log.node')
1116 1116 return
1117 1117
1118 1118 date = util.datestr(ctx.date())
1119 1119
1120 1120 # i18n: column positioning for "hg log"
1121 1121 self.ui.write(_("changeset: %d:%s\n") % revnode,
1122 1122 label='log.changeset changeset.%s' % ctx.phasestr())
1123 1123
1124 1124 # branches are shown first before any other names due to backwards
1125 1125 # compatibility
1126 1126 branch = ctx.branch()
1127 1127 # don't show the default branch name
1128 1128 if branch != 'default':
1129 1129 # i18n: column positioning for "hg log"
1130 1130 self.ui.write(_("branch: %s\n") % branch,
1131 1131 label='log.branch')
1132 1132
1133 1133 for name, ns in self.repo.names.iteritems():
1134 1134 # branches has special logic already handled above, so here we just
1135 1135 # skip it
1136 1136 if name == 'branches':
1137 1137 continue
1138 1138 # we will use the templatename as the color name since those two
1139 1139 # should be the same
1140 1140 for name in ns.names(self.repo, changenode):
1141 1141 self.ui.write(ns.logfmt % name,
1142 1142 label='log.%s' % ns.colorname)
1143 1143 if self.ui.debugflag:
1144 1144 # i18n: column positioning for "hg log"
1145 1145 self.ui.write(_("phase: %s\n") % ctx.phasestr(),
1146 1146 label='log.phase')
1147 1147 for pctx in self._meaningful_parentrevs(ctx):
1148 1148 label = 'log.parent changeset.%s' % pctx.phasestr()
1149 1149 # i18n: column positioning for "hg log"
1150 1150 self.ui.write(_("parent: %d:%s\n")
1151 1151 % (pctx.rev(), hexfunc(pctx.node())),
1152 1152 label=label)
1153 1153
1154 1154 if self.ui.debugflag and rev is not None:
1155 1155 mnode = ctx.manifestnode()
1156 1156 # i18n: column positioning for "hg log"
1157 1157 self.ui.write(_("manifest: %d:%s\n") %
1158 1158 (self.repo.manifest.rev(mnode), hex(mnode)),
1159 1159 label='ui.debug log.manifest')
1160 1160 # i18n: column positioning for "hg log"
1161 1161 self.ui.write(_("user: %s\n") % ctx.user(),
1162 1162 label='log.user')
1163 1163 # i18n: column positioning for "hg log"
1164 1164 self.ui.write(_("date: %s\n") % date,
1165 1165 label='log.date')
1166 1166
1167 1167 if self.ui.debugflag:
1168 1168 files = ctx.p1().status(ctx)[:3]
1169 1169 for key, value in zip([# i18n: column positioning for "hg log"
1170 1170 _("files:"),
1171 1171 # i18n: column positioning for "hg log"
1172 1172 _("files+:"),
1173 1173 # i18n: column positioning for "hg log"
1174 1174 _("files-:")], files):
1175 1175 if value:
1176 1176 self.ui.write("%-12s %s\n" % (key, " ".join(value)),
1177 1177 label='ui.debug log.files')
1178 1178 elif ctx.files() and self.ui.verbose:
1179 1179 # i18n: column positioning for "hg log"
1180 1180 self.ui.write(_("files: %s\n") % " ".join(ctx.files()),
1181 1181 label='ui.note log.files')
1182 1182 if copies and self.ui.verbose:
1183 1183 copies = ['%s (%s)' % c for c in copies]
1184 1184 # i18n: column positioning for "hg log"
1185 1185 self.ui.write(_("copies: %s\n") % ' '.join(copies),
1186 1186 label='ui.note log.copies')
1187 1187
1188 1188 extra = ctx.extra()
1189 1189 if extra and self.ui.debugflag:
1190 1190 for key, value in sorted(extra.items()):
1191 1191 # i18n: column positioning for "hg log"
1192 1192 self.ui.write(_("extra: %s=%s\n")
1193 1193 % (key, value.encode('string_escape')),
1194 1194 label='ui.debug log.extra')
1195 1195
1196 1196 description = ctx.description().strip()
1197 1197 if description:
1198 1198 if self.ui.verbose:
1199 1199 self.ui.write(_("description:\n"),
1200 1200 label='ui.note log.description')
1201 1201 self.ui.write(description,
1202 1202 label='ui.note log.description')
1203 1203 self.ui.write("\n\n")
1204 1204 else:
1205 1205 # i18n: column positioning for "hg log"
1206 1206 self.ui.write(_("summary: %s\n") %
1207 1207 description.splitlines()[0],
1208 1208 label='log.summary')
1209 1209 self.ui.write("\n")
1210 1210
1211 1211 self.showpatch(changenode, matchfn)
1212 1212
1213 1213 def showpatch(self, node, matchfn):
1214 1214 if not matchfn:
1215 1215 matchfn = self.matchfn
1216 1216 if matchfn:
1217 1217 stat = self.diffopts.get('stat')
1218 1218 diff = self.diffopts.get('patch')
1219 1219 diffopts = patch.diffallopts(self.ui, self.diffopts)
1220 1220 prev = self.repo.changelog.parents(node)[0]
1221 1221 if stat:
1222 1222 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1223 1223 match=matchfn, stat=True)
1224 1224 if diff:
1225 1225 if stat:
1226 1226 self.ui.write("\n")
1227 1227 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1228 1228 match=matchfn, stat=False)
1229 1229 self.ui.write("\n")
1230 1230
1231 1231 def _meaningful_parentrevs(self, ctx):
1232 1232 """Return list of meaningful (or all if debug) parentrevs for rev.
1233 1233
1234 1234 For merges (two non-nullrev revisions) both parents are meaningful.
1235 1235 Otherwise the first parent revision is considered meaningful if it
1236 1236 is not the preceding revision.
1237 1237 """
1238 1238 parents = ctx.parents()
1239 1239 if len(parents) > 1:
1240 1240 return parents
1241 1241 if self.ui.debugflag:
1242 1242 return [parents[0], self.repo['null']]
1243 1243 if parents[0].rev() >= scmutil.intrev(self.repo, ctx.rev()) - 1:
1244 1244 return []
1245 1245 return parents
1246 1246
1247 1247 class jsonchangeset(changeset_printer):
1248 1248 '''format changeset information.'''
1249 1249
1250 1250 def __init__(self, ui, repo, matchfn, diffopts, buffered):
1251 1251 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1252 1252 self.cache = {}
1253 1253 self._first = True
1254 1254
1255 1255 def close(self):
1256 1256 if not self._first:
1257 1257 self.ui.write("\n]\n")
1258 1258 else:
1259 1259 self.ui.write("[]\n")
1260 1260
1261 1261 def _show(self, ctx, copies, matchfn, props):
1262 1262 '''show a single changeset or file revision'''
1263 1263 rev = ctx.rev()
1264 1264 if rev is None:
1265 1265 jrev = jnode = 'null'
1266 1266 else:
1267 1267 jrev = str(rev)
1268 1268 jnode = '"%s"' % hex(ctx.node())
1269 1269 j = encoding.jsonescape
1270 1270
1271 1271 if self._first:
1272 1272 self.ui.write("[\n {")
1273 1273 self._first = False
1274 1274 else:
1275 1275 self.ui.write(",\n {")
1276 1276
1277 1277 if self.ui.quiet:
1278 1278 self.ui.write('\n "rev": %s' % jrev)
1279 1279 self.ui.write(',\n "node": %s' % jnode)
1280 1280 self.ui.write('\n }')
1281 1281 return
1282 1282
1283 1283 self.ui.write('\n "rev": %s' % jrev)
1284 1284 self.ui.write(',\n "node": %s' % jnode)
1285 1285 self.ui.write(',\n "branch": "%s"' % j(ctx.branch()))
1286 1286 self.ui.write(',\n "phase": "%s"' % ctx.phasestr())
1287 1287 self.ui.write(',\n "user": "%s"' % j(ctx.user()))
1288 1288 self.ui.write(',\n "date": [%d, %d]' % ctx.date())
1289 1289 self.ui.write(',\n "desc": "%s"' % j(ctx.description()))
1290 1290
1291 1291 self.ui.write(',\n "bookmarks": [%s]' %
1292 1292 ", ".join('"%s"' % j(b) for b in ctx.bookmarks()))
1293 1293 self.ui.write(',\n "tags": [%s]' %
1294 1294 ", ".join('"%s"' % j(t) for t in ctx.tags()))
1295 1295 self.ui.write(',\n "parents": [%s]' %
1296 1296 ", ".join('"%s"' % c.hex() for c in ctx.parents()))
1297 1297
1298 1298 if self.ui.debugflag:
1299 1299 if rev is None:
1300 1300 jmanifestnode = 'null'
1301 1301 else:
1302 1302 jmanifestnode = '"%s"' % hex(ctx.manifestnode())
1303 1303 self.ui.write(',\n "manifest": %s' % jmanifestnode)
1304 1304
1305 1305 self.ui.write(',\n "extra": {%s}' %
1306 1306 ", ".join('"%s": "%s"' % (j(k), j(v))
1307 1307 for k, v in ctx.extra().items()))
1308 1308
1309 1309 files = ctx.p1().status(ctx)
1310 1310 self.ui.write(',\n "modified": [%s]' %
1311 1311 ", ".join('"%s"' % j(f) for f in files[0]))
1312 1312 self.ui.write(',\n "added": [%s]' %
1313 1313 ", ".join('"%s"' % j(f) for f in files[1]))
1314 1314 self.ui.write(',\n "removed": [%s]' %
1315 1315 ", ".join('"%s"' % j(f) for f in files[2]))
1316 1316
1317 1317 elif self.ui.verbose:
1318 1318 self.ui.write(',\n "files": [%s]' %
1319 1319 ", ".join('"%s"' % j(f) for f in ctx.files()))
1320 1320
1321 1321 if copies:
1322 1322 self.ui.write(',\n "copies": {%s}' %
1323 1323 ", ".join('"%s": "%s"' % (j(k), j(v))
1324 1324 for k, v in copies))
1325 1325
1326 1326 matchfn = self.matchfn
1327 1327 if matchfn:
1328 1328 stat = self.diffopts.get('stat')
1329 1329 diff = self.diffopts.get('patch')
1330 1330 diffopts = patch.difffeatureopts(self.ui, self.diffopts, git=True)
1331 1331 node, prev = ctx.node(), ctx.p1().node()
1332 1332 if stat:
1333 1333 self.ui.pushbuffer()
1334 1334 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1335 1335 match=matchfn, stat=True)
1336 1336 self.ui.write(',\n "diffstat": "%s"' % j(self.ui.popbuffer()))
1337 1337 if diff:
1338 1338 self.ui.pushbuffer()
1339 1339 diffordiffstat(self.ui, self.repo, diffopts, prev, node,
1340 1340 match=matchfn, stat=False)
1341 1341 self.ui.write(',\n "diff": "%s"' % j(self.ui.popbuffer()))
1342 1342
1343 1343 self.ui.write("\n }")
1344 1344
1345 1345 class changeset_templater(changeset_printer):
1346 1346 '''format changeset information.'''
1347 1347
1348 1348 def __init__(self, ui, repo, matchfn, diffopts, tmpl, mapfile, buffered):
1349 1349 changeset_printer.__init__(self, ui, repo, matchfn, diffopts, buffered)
1350 1350 formatnode = ui.debugflag and (lambda x: x) or (lambda x: x[:12])
1351 1351 defaulttempl = {
1352 1352 'parent': '{rev}:{node|formatnode} ',
1353 1353 'manifest': '{rev}:{node|formatnode}',
1354 1354 'file_copy': '{name} ({source})',
1355 1355 'extra': '{key}={value|stringescape}'
1356 1356 }
1357 1357 # filecopy is preserved for compatibility reasons
1358 1358 defaulttempl['filecopy'] = defaulttempl['file_copy']
1359 1359 self.t = templater.templater(mapfile, {'formatnode': formatnode},
1360 1360 cache=defaulttempl)
1361 1361 if tmpl:
1362 1362 self.t.cache['changeset'] = tmpl
1363 1363
1364 1364 self.cache = {}
1365 1365
1366 1366 def _show(self, ctx, copies, matchfn, props):
1367 1367 '''show a single changeset or file revision'''
1368 1368
1369 1369 showlist = templatekw.showlist
1370 1370
1371 1371 # showparents() behaviour depends on ui trace level which
1372 1372 # causes unexpected behaviours at templating level and makes
1373 1373 # it harder to extract it in a standalone function. Its
1374 1374 # behaviour cannot be changed so leave it here for now.
1375 1375 def showparents(**args):
1376 1376 ctx = args['ctx']
1377 1377 parents = [[('rev', p.rev()),
1378 1378 ('node', p.hex()),
1379 1379 ('phase', p.phasestr())]
1380 1380 for p in self._meaningful_parentrevs(ctx)]
1381 1381 return showlist('parent', parents, **args)
1382 1382
1383 1383 props = props.copy()
1384 1384 props.update(templatekw.keywords)
1385 1385 props['parents'] = showparents
1386 1386 props['templ'] = self.t
1387 1387 props['ctx'] = ctx
1388 1388 props['repo'] = self.repo
1389 1389 props['revcache'] = {'copies': copies}
1390 1390 props['cache'] = self.cache
1391 1391
1392 1392 # find correct templates for current mode
1393 1393
1394 1394 tmplmodes = [
1395 1395 (True, None),
1396 1396 (self.ui.verbose, 'verbose'),
1397 1397 (self.ui.quiet, 'quiet'),
1398 1398 (self.ui.debugflag, 'debug'),
1399 1399 ]
1400 1400
1401 1401 types = {'header': '', 'footer':'', 'changeset': 'changeset'}
1402 1402 for mode, postfix in tmplmodes:
1403 1403 for type in types:
1404 1404 cur = postfix and ('%s_%s' % (type, postfix)) or type
1405 1405 if mode and cur in self.t:
1406 1406 types[type] = cur
1407 1407
1408 1408 try:
1409 1409
1410 1410 # write header
1411 1411 if types['header']:
1412 1412 h = templater.stringify(self.t(types['header'], **props))
1413 1413 if self.buffered:
1414 1414 self.header[ctx.rev()] = h
1415 1415 else:
1416 1416 if self.lastheader != h:
1417 1417 self.lastheader = h
1418 1418 self.ui.write(h)
1419 1419
1420 1420 # write changeset metadata, then patch if requested
1421 1421 key = types['changeset']
1422 1422 self.ui.write(templater.stringify(self.t(key, **props)))
1423 1423 self.showpatch(ctx.node(), matchfn)
1424 1424
1425 1425 if types['footer']:
1426 1426 if not self.footer:
1427 1427 self.footer = templater.stringify(self.t(types['footer'],
1428 1428 **props))
1429 1429
1430 1430 except KeyError, inst:
1431 1431 msg = _("%s: no key named '%s'")
1432 1432 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
1433 1433 except SyntaxError, inst:
1434 1434 raise util.Abort('%s: %s' % (self.t.mapfile, inst.args[0]))
1435 1435
1436 1436 def gettemplate(ui, tmpl, style):
1437 1437 """
1438 1438 Find the template matching the given template spec or style.
1439 1439 """
1440 1440
1441 1441 # ui settings
1442 1442 if not tmpl and not style: # template are stronger than style
1443 1443 tmpl = ui.config('ui', 'logtemplate')
1444 1444 if tmpl:
1445 1445 try:
1446 1446 tmpl = templater.parsestring(tmpl)
1447 1447 except SyntaxError:
1448 tmpl = templater.parsestring(tmpl, quoted=False)
1448 pass
1449 1449 return tmpl, None
1450 1450 else:
1451 1451 style = util.expandpath(ui.config('ui', 'style', ''))
1452 1452
1453 1453 if not tmpl and style:
1454 1454 mapfile = style
1455 1455 if not os.path.split(mapfile)[0]:
1456 1456 mapname = (templater.templatepath('map-cmdline.' + mapfile)
1457 1457 or templater.templatepath(mapfile))
1458 1458 if mapname:
1459 1459 mapfile = mapname
1460 1460 return None, mapfile
1461 1461
1462 1462 if not tmpl:
1463 1463 return None, None
1464 1464
1465 1465 # looks like a literal template?
1466 1466 if '{' in tmpl:
1467 1467 return tmpl, None
1468 1468
1469 1469 # perhaps a stock style?
1470 1470 if not os.path.split(tmpl)[0]:
1471 1471 mapname = (templater.templatepath('map-cmdline.' + tmpl)
1472 1472 or templater.templatepath(tmpl))
1473 1473 if mapname and os.path.isfile(mapname):
1474 1474 return None, mapname
1475 1475
1476 1476 # perhaps it's a reference to [templates]
1477 1477 t = ui.config('templates', tmpl)
1478 1478 if t:
1479 1479 try:
1480 1480 tmpl = templater.parsestring(t)
1481 1481 except SyntaxError:
1482 tmpl = templater.parsestring(t, quoted=False)
1482 tmpl = t
1483 1483 return tmpl, None
1484 1484
1485 1485 if tmpl == 'list':
1486 1486 ui.write(_("available styles: %s\n") % templater.stylelist())
1487 1487 raise util.Abort(_("specify a template"))
1488 1488
1489 1489 # perhaps it's a path to a map or a template
1490 1490 if ('/' in tmpl or '\\' in tmpl) and os.path.isfile(tmpl):
1491 1491 # is it a mapfile for a style?
1492 1492 if os.path.basename(tmpl).startswith("map-"):
1493 1493 return None, os.path.realpath(tmpl)
1494 1494 tmpl = open(tmpl).read()
1495 1495 return tmpl, None
1496 1496
1497 1497 # constant string?
1498 1498 return tmpl, None
1499 1499
1500 1500 def show_changeset(ui, repo, opts, buffered=False):
1501 1501 """show one changeset using template or regular display.
1502 1502
1503 1503 Display format will be the first non-empty hit of:
1504 1504 1. option 'template'
1505 1505 2. option 'style'
1506 1506 3. [ui] setting 'logtemplate'
1507 1507 4. [ui] setting 'style'
1508 1508 If all of these values are either the unset or the empty string,
1509 1509 regular display via changeset_printer() is done.
1510 1510 """
1511 1511 # options
1512 1512 matchfn = None
1513 1513 if opts.get('patch') or opts.get('stat'):
1514 1514 matchfn = scmutil.matchall(repo)
1515 1515
1516 1516 if opts.get('template') == 'json':
1517 1517 return jsonchangeset(ui, repo, matchfn, opts, buffered)
1518 1518
1519 1519 tmpl, mapfile = gettemplate(ui, opts.get('template'), opts.get('style'))
1520 1520
1521 1521 if not tmpl and not mapfile:
1522 1522 return changeset_printer(ui, repo, matchfn, opts, buffered)
1523 1523
1524 1524 try:
1525 1525 t = changeset_templater(ui, repo, matchfn, opts, tmpl, mapfile,
1526 1526 buffered)
1527 1527 except SyntaxError, inst:
1528 1528 raise util.Abort(inst.args[0])
1529 1529 return t
1530 1530
1531 1531 def showmarker(ui, marker):
1532 1532 """utility function to display obsolescence marker in a readable way
1533 1533
1534 1534 To be used by debug function."""
1535 1535 ui.write(hex(marker.precnode()))
1536 1536 for repl in marker.succnodes():
1537 1537 ui.write(' ')
1538 1538 ui.write(hex(repl))
1539 1539 ui.write(' %X ' % marker.flags())
1540 1540 parents = marker.parentnodes()
1541 1541 if parents is not None:
1542 1542 ui.write('{%s} ' % ', '.join(hex(p) for p in parents))
1543 1543 ui.write('(%s) ' % util.datestr(marker.date()))
1544 1544 ui.write('{%s}' % (', '.join('%r: %r' % t for t in
1545 1545 sorted(marker.metadata().items())
1546 1546 if t[0] != 'date')))
1547 1547 ui.write('\n')
1548 1548
1549 1549 def finddate(ui, repo, date):
1550 1550 """Find the tipmost changeset that matches the given date spec"""
1551 1551
1552 1552 df = util.matchdate(date)
1553 1553 m = scmutil.matchall(repo)
1554 1554 results = {}
1555 1555
1556 1556 def prep(ctx, fns):
1557 1557 d = ctx.date()
1558 1558 if df(d[0]):
1559 1559 results[ctx.rev()] = d
1560 1560
1561 1561 for ctx in walkchangerevs(repo, m, {'rev': None}, prep):
1562 1562 rev = ctx.rev()
1563 1563 if rev in results:
1564 1564 ui.status(_("found revision %s from %s\n") %
1565 1565 (rev, util.datestr(results[rev])))
1566 1566 return str(rev)
1567 1567
1568 1568 raise util.Abort(_("revision matching date not found"))
1569 1569
1570 1570 def increasingwindows(windowsize=8, sizelimit=512):
1571 1571 while True:
1572 1572 yield windowsize
1573 1573 if windowsize < sizelimit:
1574 1574 windowsize *= 2
1575 1575
1576 1576 class FileWalkError(Exception):
1577 1577 pass
1578 1578
1579 1579 def walkfilerevs(repo, match, follow, revs, fncache):
1580 1580 '''Walks the file history for the matched files.
1581 1581
1582 1582 Returns the changeset revs that are involved in the file history.
1583 1583
1584 1584 Throws FileWalkError if the file history can't be walked using
1585 1585 filelogs alone.
1586 1586 '''
1587 1587 wanted = set()
1588 1588 copies = []
1589 1589 minrev, maxrev = min(revs), max(revs)
1590 1590 def filerevgen(filelog, last):
1591 1591 """
1592 1592 Only files, no patterns. Check the history of each file.
1593 1593
1594 1594 Examines filelog entries within minrev, maxrev linkrev range
1595 1595 Returns an iterator yielding (linkrev, parentlinkrevs, copied)
1596 1596 tuples in backwards order
1597 1597 """
1598 1598 cl_count = len(repo)
1599 1599 revs = []
1600 1600 for j in xrange(0, last + 1):
1601 1601 linkrev = filelog.linkrev(j)
1602 1602 if linkrev < minrev:
1603 1603 continue
1604 1604 # only yield rev for which we have the changelog, it can
1605 1605 # happen while doing "hg log" during a pull or commit
1606 1606 if linkrev >= cl_count:
1607 1607 break
1608 1608
1609 1609 parentlinkrevs = []
1610 1610 for p in filelog.parentrevs(j):
1611 1611 if p != nullrev:
1612 1612 parentlinkrevs.append(filelog.linkrev(p))
1613 1613 n = filelog.node(j)
1614 1614 revs.append((linkrev, parentlinkrevs,
1615 1615 follow and filelog.renamed(n)))
1616 1616
1617 1617 return reversed(revs)
1618 1618 def iterfiles():
1619 1619 pctx = repo['.']
1620 1620 for filename in match.files():
1621 1621 if follow:
1622 1622 if filename not in pctx:
1623 1623 raise util.Abort(_('cannot follow file not in parent '
1624 1624 'revision: "%s"') % filename)
1625 1625 yield filename, pctx[filename].filenode()
1626 1626 else:
1627 1627 yield filename, None
1628 1628 for filename_node in copies:
1629 1629 yield filename_node
1630 1630
1631 1631 for file_, node in iterfiles():
1632 1632 filelog = repo.file(file_)
1633 1633 if not len(filelog):
1634 1634 if node is None:
1635 1635 # A zero count may be a directory or deleted file, so
1636 1636 # try to find matching entries on the slow path.
1637 1637 if follow:
1638 1638 raise util.Abort(
1639 1639 _('cannot follow nonexistent file: "%s"') % file_)
1640 1640 raise FileWalkError("Cannot walk via filelog")
1641 1641 else:
1642 1642 continue
1643 1643
1644 1644 if node is None:
1645 1645 last = len(filelog) - 1
1646 1646 else:
1647 1647 last = filelog.rev(node)
1648 1648
1649 1649 # keep track of all ancestors of the file
1650 1650 ancestors = set([filelog.linkrev(last)])
1651 1651
1652 1652 # iterate from latest to oldest revision
1653 1653 for rev, flparentlinkrevs, copied in filerevgen(filelog, last):
1654 1654 if not follow:
1655 1655 if rev > maxrev:
1656 1656 continue
1657 1657 else:
1658 1658 # Note that last might not be the first interesting
1659 1659 # rev to us:
1660 1660 # if the file has been changed after maxrev, we'll
1661 1661 # have linkrev(last) > maxrev, and we still need
1662 1662 # to explore the file graph
1663 1663 if rev not in ancestors:
1664 1664 continue
1665 1665 # XXX insert 1327 fix here
1666 1666 if flparentlinkrevs:
1667 1667 ancestors.update(flparentlinkrevs)
1668 1668
1669 1669 fncache.setdefault(rev, []).append(file_)
1670 1670 wanted.add(rev)
1671 1671 if copied:
1672 1672 copies.append(copied)
1673 1673
1674 1674 return wanted
1675 1675
1676 1676 class _followfilter(object):
1677 1677 def __init__(self, repo, onlyfirst=False):
1678 1678 self.repo = repo
1679 1679 self.startrev = nullrev
1680 1680 self.roots = set()
1681 1681 self.onlyfirst = onlyfirst
1682 1682
1683 1683 def match(self, rev):
1684 1684 def realparents(rev):
1685 1685 if self.onlyfirst:
1686 1686 return self.repo.changelog.parentrevs(rev)[0:1]
1687 1687 else:
1688 1688 return filter(lambda x: x != nullrev,
1689 1689 self.repo.changelog.parentrevs(rev))
1690 1690
1691 1691 if self.startrev == nullrev:
1692 1692 self.startrev = rev
1693 1693 return True
1694 1694
1695 1695 if rev > self.startrev:
1696 1696 # forward: all descendants
1697 1697 if not self.roots:
1698 1698 self.roots.add(self.startrev)
1699 1699 for parent in realparents(rev):
1700 1700 if parent in self.roots:
1701 1701 self.roots.add(rev)
1702 1702 return True
1703 1703 else:
1704 1704 # backwards: all parents
1705 1705 if not self.roots:
1706 1706 self.roots.update(realparents(self.startrev))
1707 1707 if rev in self.roots:
1708 1708 self.roots.remove(rev)
1709 1709 self.roots.update(realparents(rev))
1710 1710 return True
1711 1711
1712 1712 return False
1713 1713
1714 1714 def walkchangerevs(repo, match, opts, prepare):
1715 1715 '''Iterate over files and the revs in which they changed.
1716 1716
1717 1717 Callers most commonly need to iterate backwards over the history
1718 1718 in which they are interested. Doing so has awful (quadratic-looking)
1719 1719 performance, so we use iterators in a "windowed" way.
1720 1720
1721 1721 We walk a window of revisions in the desired order. Within the
1722 1722 window, we first walk forwards to gather data, then in the desired
1723 1723 order (usually backwards) to display it.
1724 1724
1725 1725 This function returns an iterator yielding contexts. Before
1726 1726 yielding each context, the iterator will first call the prepare
1727 1727 function on each context in the window in forward order.'''
1728 1728
1729 1729 follow = opts.get('follow') or opts.get('follow_first')
1730 1730 revs = _logrevs(repo, opts)
1731 1731 if not revs:
1732 1732 return []
1733 1733 wanted = set()
1734 1734 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1735 1735 fncache = {}
1736 1736 change = repo.changectx
1737 1737
1738 1738 # First step is to fill wanted, the set of revisions that we want to yield.
1739 1739 # When it does not induce extra cost, we also fill fncache for revisions in
1740 1740 # wanted: a cache of filenames that were changed (ctx.files()) and that
1741 1741 # match the file filtering conditions.
1742 1742
1743 1743 if match.always():
1744 1744 # No files, no patterns. Display all revs.
1745 1745 wanted = revs
1746 1746
1747 1747 if not slowpath and match.files():
1748 1748 # We only have to read through the filelog to find wanted revisions
1749 1749
1750 1750 try:
1751 1751 wanted = walkfilerevs(repo, match, follow, revs, fncache)
1752 1752 except FileWalkError:
1753 1753 slowpath = True
1754 1754
1755 1755 # We decided to fall back to the slowpath because at least one
1756 1756 # of the paths was not a file. Check to see if at least one of them
1757 1757 # existed in history, otherwise simply return
1758 1758 for path in match.files():
1759 1759 if path == '.' or path in repo.store:
1760 1760 break
1761 1761 else:
1762 1762 return []
1763 1763
1764 1764 if slowpath:
1765 1765 # We have to read the changelog to match filenames against
1766 1766 # changed files
1767 1767
1768 1768 if follow:
1769 1769 raise util.Abort(_('can only follow copies/renames for explicit '
1770 1770 'filenames'))
1771 1771
1772 1772 # The slow path checks files modified in every changeset.
1773 1773 # This is really slow on large repos, so compute the set lazily.
1774 1774 class lazywantedset(object):
1775 1775 def __init__(self):
1776 1776 self.set = set()
1777 1777 self.revs = set(revs)
1778 1778
1779 1779 # No need to worry about locality here because it will be accessed
1780 1780 # in the same order as the increasing window below.
1781 1781 def __contains__(self, value):
1782 1782 if value in self.set:
1783 1783 return True
1784 1784 elif not value in self.revs:
1785 1785 return False
1786 1786 else:
1787 1787 self.revs.discard(value)
1788 1788 ctx = change(value)
1789 1789 matches = filter(match, ctx.files())
1790 1790 if matches:
1791 1791 fncache[value] = matches
1792 1792 self.set.add(value)
1793 1793 return True
1794 1794 return False
1795 1795
1796 1796 def discard(self, value):
1797 1797 self.revs.discard(value)
1798 1798 self.set.discard(value)
1799 1799
1800 1800 wanted = lazywantedset()
1801 1801
1802 1802 # it might be worthwhile to do this in the iterator if the rev range
1803 1803 # is descending and the prune args are all within that range
1804 1804 for rev in opts.get('prune', ()):
1805 1805 rev = repo[rev].rev()
1806 1806 ff = _followfilter(repo)
1807 1807 stop = min(revs[0], revs[-1])
1808 1808 for x in xrange(rev, stop - 1, -1):
1809 1809 if ff.match(x):
1810 1810 wanted = wanted - [x]
1811 1811
1812 1812 # Now that wanted is correctly initialized, we can iterate over the
1813 1813 # revision range, yielding only revisions in wanted.
1814 1814 def iterate():
1815 1815 if follow and not match.files():
1816 1816 ff = _followfilter(repo, onlyfirst=opts.get('follow_first'))
1817 1817 def want(rev):
1818 1818 return ff.match(rev) and rev in wanted
1819 1819 else:
1820 1820 def want(rev):
1821 1821 return rev in wanted
1822 1822
1823 1823 it = iter(revs)
1824 1824 stopiteration = False
1825 1825 for windowsize in increasingwindows():
1826 1826 nrevs = []
1827 1827 for i in xrange(windowsize):
1828 1828 try:
1829 1829 rev = it.next()
1830 1830 if want(rev):
1831 1831 nrevs.append(rev)
1832 1832 except (StopIteration):
1833 1833 stopiteration = True
1834 1834 break
1835 1835 for rev in sorted(nrevs):
1836 1836 fns = fncache.get(rev)
1837 1837 ctx = change(rev)
1838 1838 if not fns:
1839 1839 def fns_generator():
1840 1840 for f in ctx.files():
1841 1841 if match(f):
1842 1842 yield f
1843 1843 fns = fns_generator()
1844 1844 prepare(ctx, fns)
1845 1845 for rev in nrevs:
1846 1846 yield change(rev)
1847 1847
1848 1848 if stopiteration:
1849 1849 break
1850 1850
1851 1851 return iterate()
1852 1852
1853 1853 def _makefollowlogfilematcher(repo, files, followfirst):
1854 1854 # When displaying a revision with --patch --follow FILE, we have
1855 1855 # to know which file of the revision must be diffed. With
1856 1856 # --follow, we want the names of the ancestors of FILE in the
1857 1857 # revision, stored in "fcache". "fcache" is populated by
1858 1858 # reproducing the graph traversal already done by --follow revset
1859 1859 # and relating linkrevs to file names (which is not "correct" but
1860 1860 # good enough).
1861 1861 fcache = {}
1862 1862 fcacheready = [False]
1863 1863 pctx = repo['.']
1864 1864
1865 1865 def populate():
1866 1866 for fn in files:
1867 1867 for i in ((pctx[fn],), pctx[fn].ancestors(followfirst=followfirst)):
1868 1868 for c in i:
1869 1869 fcache.setdefault(c.linkrev(), set()).add(c.path())
1870 1870
1871 1871 def filematcher(rev):
1872 1872 if not fcacheready[0]:
1873 1873 # Lazy initialization
1874 1874 fcacheready[0] = True
1875 1875 populate()
1876 1876 return scmutil.matchfiles(repo, fcache.get(rev, []))
1877 1877
1878 1878 return filematcher
1879 1879
1880 1880 def _makenofollowlogfilematcher(repo, pats, opts):
1881 1881 '''hook for extensions to override the filematcher for non-follow cases'''
1882 1882 return None
1883 1883
1884 1884 def _makelogrevset(repo, pats, opts, revs):
1885 1885 """Return (expr, filematcher) where expr is a revset string built
1886 1886 from log options and file patterns or None. If --stat or --patch
1887 1887 are not passed filematcher is None. Otherwise it is a callable
1888 1888 taking a revision number and returning a match objects filtering
1889 1889 the files to be detailed when displaying the revision.
1890 1890 """
1891 1891 opt2revset = {
1892 1892 'no_merges': ('not merge()', None),
1893 1893 'only_merges': ('merge()', None),
1894 1894 '_ancestors': ('ancestors(%(val)s)', None),
1895 1895 '_fancestors': ('_firstancestors(%(val)s)', None),
1896 1896 '_descendants': ('descendants(%(val)s)', None),
1897 1897 '_fdescendants': ('_firstdescendants(%(val)s)', None),
1898 1898 '_matchfiles': ('_matchfiles(%(val)s)', None),
1899 1899 'date': ('date(%(val)r)', None),
1900 1900 'branch': ('branch(%(val)r)', ' or '),
1901 1901 '_patslog': ('filelog(%(val)r)', ' or '),
1902 1902 '_patsfollow': ('follow(%(val)r)', ' or '),
1903 1903 '_patsfollowfirst': ('_followfirst(%(val)r)', ' or '),
1904 1904 'keyword': ('keyword(%(val)r)', ' or '),
1905 1905 'prune': ('not (%(val)r or ancestors(%(val)r))', ' and '),
1906 1906 'user': ('user(%(val)r)', ' or '),
1907 1907 }
1908 1908
1909 1909 opts = dict(opts)
1910 1910 # follow or not follow?
1911 1911 follow = opts.get('follow') or opts.get('follow_first')
1912 1912 if opts.get('follow_first'):
1913 1913 followfirst = 1
1914 1914 else:
1915 1915 followfirst = 0
1916 1916 # --follow with FILE behaviour depends on revs...
1917 1917 it = iter(revs)
1918 1918 startrev = it.next()
1919 1919 try:
1920 1920 followdescendants = startrev < it.next()
1921 1921 except (StopIteration):
1922 1922 followdescendants = False
1923 1923
1924 1924 # branch and only_branch are really aliases and must be handled at
1925 1925 # the same time
1926 1926 opts['branch'] = opts.get('branch', []) + opts.get('only_branch', [])
1927 1927 opts['branch'] = [repo.lookupbranch(b) for b in opts['branch']]
1928 1928 # pats/include/exclude are passed to match.match() directly in
1929 1929 # _matchfiles() revset but walkchangerevs() builds its matcher with
1930 1930 # scmutil.match(). The difference is input pats are globbed on
1931 1931 # platforms without shell expansion (windows).
1932 1932 wctx = repo[None]
1933 1933 match, pats = scmutil.matchandpats(wctx, pats, opts)
1934 1934 slowpath = match.anypats() or (match.files() and opts.get('removed'))
1935 1935 if not slowpath:
1936 1936 for f in match.files():
1937 1937 if follow and f not in wctx:
1938 1938 # If the file exists, it may be a directory, so let it
1939 1939 # take the slow path.
1940 1940 if os.path.exists(repo.wjoin(f)):
1941 1941 slowpath = True
1942 1942 continue
1943 1943 else:
1944 1944 raise util.Abort(_('cannot follow file not in parent '
1945 1945 'revision: "%s"') % f)
1946 1946 filelog = repo.file(f)
1947 1947 if not filelog:
1948 1948 # A zero count may be a directory or deleted file, so
1949 1949 # try to find matching entries on the slow path.
1950 1950 if follow:
1951 1951 raise util.Abort(
1952 1952 _('cannot follow nonexistent file: "%s"') % f)
1953 1953 slowpath = True
1954 1954
1955 1955 # We decided to fall back to the slowpath because at least one
1956 1956 # of the paths was not a file. Check to see if at least one of them
1957 1957 # existed in history - in that case, we'll continue down the
1958 1958 # slowpath; otherwise, we can turn off the slowpath
1959 1959 if slowpath:
1960 1960 for path in match.files():
1961 1961 if path == '.' or path in repo.store:
1962 1962 break
1963 1963 else:
1964 1964 slowpath = False
1965 1965
1966 1966 fpats = ('_patsfollow', '_patsfollowfirst')
1967 1967 fnopats = (('_ancestors', '_fancestors'),
1968 1968 ('_descendants', '_fdescendants'))
1969 1969 if slowpath:
1970 1970 # See walkchangerevs() slow path.
1971 1971 #
1972 1972 # pats/include/exclude cannot be represented as separate
1973 1973 # revset expressions as their filtering logic applies at file
1974 1974 # level. For instance "-I a -X a" matches a revision touching
1975 1975 # "a" and "b" while "file(a) and not file(b)" does
1976 1976 # not. Besides, filesets are evaluated against the working
1977 1977 # directory.
1978 1978 matchargs = ['r:', 'd:relpath']
1979 1979 for p in pats:
1980 1980 matchargs.append('p:' + p)
1981 1981 for p in opts.get('include', []):
1982 1982 matchargs.append('i:' + p)
1983 1983 for p in opts.get('exclude', []):
1984 1984 matchargs.append('x:' + p)
1985 1985 matchargs = ','.join(('%r' % p) for p in matchargs)
1986 1986 opts['_matchfiles'] = matchargs
1987 1987 if follow:
1988 1988 opts[fnopats[0][followfirst]] = '.'
1989 1989 else:
1990 1990 if follow:
1991 1991 if pats:
1992 1992 # follow() revset interprets its file argument as a
1993 1993 # manifest entry, so use match.files(), not pats.
1994 1994 opts[fpats[followfirst]] = list(match.files())
1995 1995 else:
1996 1996 op = fnopats[followdescendants][followfirst]
1997 1997 opts[op] = 'rev(%d)' % startrev
1998 1998 else:
1999 1999 opts['_patslog'] = list(pats)
2000 2000
2001 2001 filematcher = None
2002 2002 if opts.get('patch') or opts.get('stat'):
2003 2003 # When following files, track renames via a special matcher.
2004 2004 # If we're forced to take the slowpath it means we're following
2005 2005 # at least one pattern/directory, so don't bother with rename tracking.
2006 2006 if follow and not match.always() and not slowpath:
2007 2007 # _makefollowlogfilematcher expects its files argument to be
2008 2008 # relative to the repo root, so use match.files(), not pats.
2009 2009 filematcher = _makefollowlogfilematcher(repo, match.files(),
2010 2010 followfirst)
2011 2011 else:
2012 2012 filematcher = _makenofollowlogfilematcher(repo, pats, opts)
2013 2013 if filematcher is None:
2014 2014 filematcher = lambda rev: match
2015 2015
2016 2016 expr = []
2017 2017 for op, val in sorted(opts.iteritems()):
2018 2018 if not val:
2019 2019 continue
2020 2020 if op not in opt2revset:
2021 2021 continue
2022 2022 revop, andor = opt2revset[op]
2023 2023 if '%(val)' not in revop:
2024 2024 expr.append(revop)
2025 2025 else:
2026 2026 if not isinstance(val, list):
2027 2027 e = revop % {'val': val}
2028 2028 else:
2029 2029 e = '(' + andor.join((revop % {'val': v}) for v in val) + ')'
2030 2030 expr.append(e)
2031 2031
2032 2032 if expr:
2033 2033 expr = '(' + ' and '.join(expr) + ')'
2034 2034 else:
2035 2035 expr = None
2036 2036 return expr, filematcher
2037 2037
2038 2038 def _logrevs(repo, opts):
2039 2039 # Default --rev value depends on --follow but --follow behaviour
2040 2040 # depends on revisions resolved from --rev...
2041 2041 follow = opts.get('follow') or opts.get('follow_first')
2042 2042 if opts.get('rev'):
2043 2043 revs = scmutil.revrange(repo, opts['rev'])
2044 2044 elif follow and repo.dirstate.p1() == nullid:
2045 2045 revs = revset.baseset()
2046 2046 elif follow:
2047 2047 revs = repo.revs('reverse(:.)')
2048 2048 else:
2049 2049 revs = revset.spanset(repo)
2050 2050 revs.reverse()
2051 2051 return revs
2052 2052
2053 2053 def getgraphlogrevs(repo, pats, opts):
2054 2054 """Return (revs, expr, filematcher) where revs is an iterable of
2055 2055 revision numbers, expr is a revset string built from log options
2056 2056 and file patterns or None, and used to filter 'revs'. If --stat or
2057 2057 --patch are not passed filematcher is None. Otherwise it is a
2058 2058 callable taking a revision number and returning a match objects
2059 2059 filtering the files to be detailed when displaying the revision.
2060 2060 """
2061 2061 limit = loglimit(opts)
2062 2062 revs = _logrevs(repo, opts)
2063 2063 if not revs:
2064 2064 return revset.baseset(), None, None
2065 2065 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2066 2066 if opts.get('rev'):
2067 2067 # User-specified revs might be unsorted, but don't sort before
2068 2068 # _makelogrevset because it might depend on the order of revs
2069 2069 revs.sort(reverse=True)
2070 2070 if expr:
2071 2071 # Revset matchers often operate faster on revisions in changelog
2072 2072 # order, because most filters deal with the changelog.
2073 2073 revs.reverse()
2074 2074 matcher = revset.match(repo.ui, expr)
2075 2075 # Revset matches can reorder revisions. "A or B" typically returns
2076 2076 # returns the revision matching A then the revision matching B. Sort
2077 2077 # again to fix that.
2078 2078 revs = matcher(repo, revs)
2079 2079 revs.sort(reverse=True)
2080 2080 if limit is not None:
2081 2081 limitedrevs = []
2082 2082 for idx, rev in enumerate(revs):
2083 2083 if idx >= limit:
2084 2084 break
2085 2085 limitedrevs.append(rev)
2086 2086 revs = revset.baseset(limitedrevs)
2087 2087
2088 2088 return revs, expr, filematcher
2089 2089
2090 2090 def getlogrevs(repo, pats, opts):
2091 2091 """Return (revs, expr, filematcher) where revs is an iterable of
2092 2092 revision numbers, expr is a revset string built from log options
2093 2093 and file patterns or None, and used to filter 'revs'. If --stat or
2094 2094 --patch are not passed filematcher is None. Otherwise it is a
2095 2095 callable taking a revision number and returning a match objects
2096 2096 filtering the files to be detailed when displaying the revision.
2097 2097 """
2098 2098 limit = loglimit(opts)
2099 2099 revs = _logrevs(repo, opts)
2100 2100 if not revs:
2101 2101 return revset.baseset([]), None, None
2102 2102 expr, filematcher = _makelogrevset(repo, pats, opts, revs)
2103 2103 if expr:
2104 2104 # Revset matchers often operate faster on revisions in changelog
2105 2105 # order, because most filters deal with the changelog.
2106 2106 if not opts.get('rev'):
2107 2107 revs.reverse()
2108 2108 matcher = revset.match(repo.ui, expr)
2109 2109 # Revset matches can reorder revisions. "A or B" typically returns
2110 2110 # returns the revision matching A then the revision matching B. Sort
2111 2111 # again to fix that.
2112 2112 revs = matcher(repo, revs)
2113 2113 if not opts.get('rev'):
2114 2114 revs.sort(reverse=True)
2115 2115 if limit is not None:
2116 2116 count = 0
2117 2117 limitedrevs = []
2118 2118 it = iter(revs)
2119 2119 while count < limit:
2120 2120 try:
2121 2121 limitedrevs.append(it.next())
2122 2122 except (StopIteration):
2123 2123 break
2124 2124 count += 1
2125 2125 revs = revset.baseset(limitedrevs)
2126 2126
2127 2127 return revs, expr, filematcher
2128 2128
2129 2129 def displaygraph(ui, dag, displayer, showparents, edgefn, getrenamed=None,
2130 2130 filematcher=None):
2131 2131 seen, state = [], graphmod.asciistate()
2132 2132 for rev, type, ctx, parents in dag:
2133 2133 char = 'o'
2134 2134 if ctx.node() in showparents:
2135 2135 char = '@'
2136 2136 elif ctx.obsolete():
2137 2137 char = 'x'
2138 2138 elif ctx.closesbranch():
2139 2139 char = '_'
2140 2140 copies = None
2141 2141 if getrenamed and ctx.rev():
2142 2142 copies = []
2143 2143 for fn in ctx.files():
2144 2144 rename = getrenamed(fn, ctx.rev())
2145 2145 if rename:
2146 2146 copies.append((fn, rename[0]))
2147 2147 revmatchfn = None
2148 2148 if filematcher is not None:
2149 2149 revmatchfn = filematcher(ctx.rev())
2150 2150 displayer.show(ctx, copies=copies, matchfn=revmatchfn)
2151 2151 lines = displayer.hunk.pop(rev).split('\n')
2152 2152 if not lines[-1]:
2153 2153 del lines[-1]
2154 2154 displayer.flush(rev)
2155 2155 edges = edgefn(type, char, lines, seen, rev, parents)
2156 2156 for type, char, lines, coldata in edges:
2157 2157 graphmod.ascii(ui, state, type, char, lines, coldata)
2158 2158 displayer.close()
2159 2159
2160 2160 def graphlog(ui, repo, *pats, **opts):
2161 2161 # Parameters are identical to log command ones
2162 2162 revs, expr, filematcher = getgraphlogrevs(repo, pats, opts)
2163 2163 revdag = graphmod.dagwalker(repo, revs)
2164 2164
2165 2165 getrenamed = None
2166 2166 if opts.get('copies'):
2167 2167 endrev = None
2168 2168 if opts.get('rev'):
2169 2169 endrev = scmutil.revrange(repo, opts.get('rev')).max() + 1
2170 2170 getrenamed = templatekw.getrenamedfn(repo, endrev=endrev)
2171 2171 displayer = show_changeset(ui, repo, opts, buffered=True)
2172 2172 showparents = [ctx.node() for ctx in repo[None].parents()]
2173 2173 displaygraph(ui, revdag, displayer, showparents,
2174 2174 graphmod.asciiedges, getrenamed, filematcher)
2175 2175
2176 2176 def checkunsupportedgraphflags(pats, opts):
2177 2177 for op in ["newest_first"]:
2178 2178 if op in opts and opts[op]:
2179 2179 raise util.Abort(_("-G/--graph option is incompatible with --%s")
2180 2180 % op.replace("_", "-"))
2181 2181
2182 2182 def graphrevs(repo, nodes, opts):
2183 2183 limit = loglimit(opts)
2184 2184 nodes.reverse()
2185 2185 if limit is not None:
2186 2186 nodes = nodes[:limit]
2187 2187 return graphmod.nodes(repo, nodes)
2188 2188
2189 2189 def add(ui, repo, match, prefix, explicitonly, **opts):
2190 2190 join = lambda f: os.path.join(prefix, f)
2191 2191 bad = []
2192 2192 oldbad = match.bad
2193 2193 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2194 2194 names = []
2195 2195 wctx = repo[None]
2196 2196 cca = None
2197 2197 abort, warn = scmutil.checkportabilityalert(ui)
2198 2198 if abort or warn:
2199 2199 cca = scmutil.casecollisionauditor(ui, abort, repo.dirstate)
2200 2200 for f in wctx.walk(match):
2201 2201 exact = match.exact(f)
2202 2202 if exact or not explicitonly and f not in wctx and repo.wvfs.lexists(f):
2203 2203 if cca:
2204 2204 cca(f)
2205 2205 names.append(f)
2206 2206 if ui.verbose or not exact:
2207 2207 ui.status(_('adding %s\n') % match.rel(f))
2208 2208
2209 2209 for subpath in sorted(wctx.substate):
2210 2210 sub = wctx.sub(subpath)
2211 2211 try:
2212 2212 submatch = matchmod.narrowmatcher(subpath, match)
2213 2213 if opts.get('subrepos'):
2214 2214 bad.extend(sub.add(ui, submatch, prefix, False, **opts))
2215 2215 else:
2216 2216 bad.extend(sub.add(ui, submatch, prefix, True, **opts))
2217 2217 except error.LookupError:
2218 2218 ui.status(_("skipping missing subrepository: %s\n")
2219 2219 % join(subpath))
2220 2220
2221 2221 if not opts.get('dry_run'):
2222 2222 rejected = wctx.add(names, prefix)
2223 2223 bad.extend(f for f in rejected if f in match.files())
2224 2224 return bad
2225 2225
2226 2226 def forget(ui, repo, match, prefix, explicitonly):
2227 2227 join = lambda f: os.path.join(prefix, f)
2228 2228 bad = []
2229 2229 oldbad = match.bad
2230 2230 match.bad = lambda x, y: bad.append(x) or oldbad(x, y)
2231 2231 wctx = repo[None]
2232 2232 forgot = []
2233 2233 s = repo.status(match=match, clean=True)
2234 2234 forget = sorted(s[0] + s[1] + s[3] + s[6])
2235 2235 if explicitonly:
2236 2236 forget = [f for f in forget if match.exact(f)]
2237 2237
2238 2238 for subpath in sorted(wctx.substate):
2239 2239 sub = wctx.sub(subpath)
2240 2240 try:
2241 2241 submatch = matchmod.narrowmatcher(subpath, match)
2242 2242 subbad, subforgot = sub.forget(submatch, prefix)
2243 2243 bad.extend([subpath + '/' + f for f in subbad])
2244 2244 forgot.extend([subpath + '/' + f for f in subforgot])
2245 2245 except error.LookupError:
2246 2246 ui.status(_("skipping missing subrepository: %s\n")
2247 2247 % join(subpath))
2248 2248
2249 2249 if not explicitonly:
2250 2250 for f in match.files():
2251 2251 if f not in repo.dirstate and not repo.wvfs.isdir(f):
2252 2252 if f not in forgot:
2253 2253 if repo.wvfs.exists(f):
2254 2254 # Don't complain if the exact case match wasn't given.
2255 2255 # But don't do this until after checking 'forgot', so
2256 2256 # that subrepo files aren't normalized, and this op is
2257 2257 # purely from data cached by the status walk above.
2258 2258 if repo.dirstate.normalize(f) in repo.dirstate:
2259 2259 continue
2260 2260 ui.warn(_('not removing %s: '
2261 2261 'file is already untracked\n')
2262 2262 % match.rel(f))
2263 2263 bad.append(f)
2264 2264
2265 2265 for f in forget:
2266 2266 if ui.verbose or not match.exact(f):
2267 2267 ui.status(_('removing %s\n') % match.rel(f))
2268 2268
2269 2269 rejected = wctx.forget(forget, prefix)
2270 2270 bad.extend(f for f in rejected if f in match.files())
2271 2271 forgot.extend(f for f in forget if f not in rejected)
2272 2272 return bad, forgot
2273 2273
2274 2274 def files(ui, ctx, m, fm, fmt, subrepos):
2275 2275 rev = ctx.rev()
2276 2276 ret = 1
2277 2277 ds = ctx.repo().dirstate
2278 2278
2279 2279 for f in ctx.matches(m):
2280 2280 if rev is None and ds[f] == 'r':
2281 2281 continue
2282 2282 fm.startitem()
2283 2283 if ui.verbose:
2284 2284 fc = ctx[f]
2285 2285 fm.write('size flags', '% 10d % 1s ', fc.size(), fc.flags())
2286 2286 fm.data(abspath=f)
2287 2287 fm.write('path', fmt, m.rel(f))
2288 2288 ret = 0
2289 2289
2290 2290 if subrepos:
2291 2291 for subpath in sorted(ctx.substate):
2292 2292 sub = ctx.sub(subpath)
2293 2293 try:
2294 2294 submatch = matchmod.narrowmatcher(subpath, m)
2295 2295 if sub.printfiles(ui, submatch, fm, fmt) == 0:
2296 2296 ret = 0
2297 2297 except error.LookupError:
2298 2298 ui.status(_("skipping missing subrepository: %s\n")
2299 2299 % m.abs(subpath))
2300 2300
2301 2301 return ret
2302 2302
2303 2303 def remove(ui, repo, m, prefix, after, force, subrepos):
2304 2304 join = lambda f: os.path.join(prefix, f)
2305 2305 ret = 0
2306 2306 s = repo.status(match=m, clean=True)
2307 2307 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2308 2308
2309 2309 wctx = repo[None]
2310 2310
2311 2311 for subpath in sorted(wctx.substate):
2312 2312 def matchessubrepo(matcher, subpath):
2313 2313 if matcher.exact(subpath):
2314 2314 return True
2315 2315 for f in matcher.files():
2316 2316 if f.startswith(subpath):
2317 2317 return True
2318 2318 return False
2319 2319
2320 2320 if subrepos or matchessubrepo(m, subpath):
2321 2321 sub = wctx.sub(subpath)
2322 2322 try:
2323 2323 submatch = matchmod.narrowmatcher(subpath, m)
2324 2324 if sub.removefiles(submatch, prefix, after, force, subrepos):
2325 2325 ret = 1
2326 2326 except error.LookupError:
2327 2327 ui.status(_("skipping missing subrepository: %s\n")
2328 2328 % join(subpath))
2329 2329
2330 2330 # warn about failure to delete explicit files/dirs
2331 2331 deleteddirs = util.dirs(deleted)
2332 2332 for f in m.files():
2333 2333 def insubrepo():
2334 2334 for subpath in wctx.substate:
2335 2335 if f.startswith(subpath):
2336 2336 return True
2337 2337 return False
2338 2338
2339 2339 isdir = f in deleteddirs or wctx.hasdir(f)
2340 2340 if f in repo.dirstate or isdir or f == '.' or insubrepo():
2341 2341 continue
2342 2342
2343 2343 if repo.wvfs.exists(f):
2344 2344 if repo.wvfs.isdir(f):
2345 2345 ui.warn(_('not removing %s: no tracked files\n')
2346 2346 % m.rel(f))
2347 2347 else:
2348 2348 ui.warn(_('not removing %s: file is untracked\n')
2349 2349 % m.rel(f))
2350 2350 # missing files will generate a warning elsewhere
2351 2351 ret = 1
2352 2352
2353 2353 if force:
2354 2354 list = modified + deleted + clean + added
2355 2355 elif after:
2356 2356 list = deleted
2357 2357 for f in modified + added + clean:
2358 2358 ui.warn(_('not removing %s: file still exists\n') % m.rel(f))
2359 2359 ret = 1
2360 2360 else:
2361 2361 list = deleted + clean
2362 2362 for f in modified:
2363 2363 ui.warn(_('not removing %s: file is modified (use -f'
2364 2364 ' to force removal)\n') % m.rel(f))
2365 2365 ret = 1
2366 2366 for f in added:
2367 2367 ui.warn(_('not removing %s: file has been marked for add'
2368 2368 ' (use forget to undo)\n') % m.rel(f))
2369 2369 ret = 1
2370 2370
2371 2371 for f in sorted(list):
2372 2372 if ui.verbose or not m.exact(f):
2373 2373 ui.status(_('removing %s\n') % m.rel(f))
2374 2374
2375 2375 wlock = repo.wlock()
2376 2376 try:
2377 2377 if not after:
2378 2378 for f in list:
2379 2379 if f in added:
2380 2380 continue # we never unlink added files on remove
2381 2381 util.unlinkpath(repo.wjoin(f), ignoremissing=True)
2382 2382 repo[None].forget(list)
2383 2383 finally:
2384 2384 wlock.release()
2385 2385
2386 2386 return ret
2387 2387
2388 2388 def cat(ui, repo, ctx, matcher, prefix, **opts):
2389 2389 err = 1
2390 2390
2391 2391 def write(path):
2392 2392 fp = makefileobj(repo, opts.get('output'), ctx.node(),
2393 2393 pathname=os.path.join(prefix, path))
2394 2394 data = ctx[path].data()
2395 2395 if opts.get('decode'):
2396 2396 data = repo.wwritedata(path, data)
2397 2397 fp.write(data)
2398 2398 fp.close()
2399 2399
2400 2400 # Automation often uses hg cat on single files, so special case it
2401 2401 # for performance to avoid the cost of parsing the manifest.
2402 2402 if len(matcher.files()) == 1 and not matcher.anypats():
2403 2403 file = matcher.files()[0]
2404 2404 mf = repo.manifest
2405 2405 mfnode = ctx.manifestnode()
2406 2406 if mfnode and mf.find(mfnode, file)[0]:
2407 2407 write(file)
2408 2408 return 0
2409 2409
2410 2410 # Don't warn about "missing" files that are really in subrepos
2411 2411 bad = matcher.bad
2412 2412
2413 2413 def badfn(path, msg):
2414 2414 for subpath in ctx.substate:
2415 2415 if path.startswith(subpath):
2416 2416 return
2417 2417 bad(path, msg)
2418 2418
2419 2419 matcher.bad = badfn
2420 2420
2421 2421 for abs in ctx.walk(matcher):
2422 2422 write(abs)
2423 2423 err = 0
2424 2424
2425 2425 matcher.bad = bad
2426 2426
2427 2427 for subpath in sorted(ctx.substate):
2428 2428 sub = ctx.sub(subpath)
2429 2429 try:
2430 2430 submatch = matchmod.narrowmatcher(subpath, matcher)
2431 2431
2432 2432 if not sub.cat(submatch, os.path.join(prefix, sub._path),
2433 2433 **opts):
2434 2434 err = 0
2435 2435 except error.RepoLookupError:
2436 2436 ui.status(_("skipping missing subrepository: %s\n")
2437 2437 % os.path.join(prefix, subpath))
2438 2438
2439 2439 return err
2440 2440
2441 2441 def commit(ui, repo, commitfunc, pats, opts):
2442 2442 '''commit the specified files or all outstanding changes'''
2443 2443 date = opts.get('date')
2444 2444 if date:
2445 2445 opts['date'] = util.parsedate(date)
2446 2446 message = logmessage(ui, opts)
2447 2447 matcher = scmutil.match(repo[None], pats, opts)
2448 2448
2449 2449 # extract addremove carefully -- this function can be called from a command
2450 2450 # that doesn't support addremove
2451 2451 if opts.get('addremove'):
2452 2452 if scmutil.addremove(repo, matcher, "", opts) != 0:
2453 2453 raise util.Abort(
2454 2454 _("failed to mark all new/missing files as added/removed"))
2455 2455
2456 2456 return commitfunc(ui, repo, message, matcher, opts)
2457 2457
2458 2458 def amend(ui, repo, commitfunc, old, extra, pats, opts):
2459 2459 # amend will reuse the existing user if not specified, but the obsolete
2460 2460 # marker creation requires that the current user's name is specified.
2461 2461 if obsolete.isenabled(repo, obsolete.createmarkersopt):
2462 2462 ui.username() # raise exception if username not set
2463 2463
2464 2464 ui.note(_('amending changeset %s\n') % old)
2465 2465 base = old.p1()
2466 2466
2467 2467 wlock = lock = newid = None
2468 2468 try:
2469 2469 wlock = repo.wlock()
2470 2470 lock = repo.lock()
2471 2471 tr = repo.transaction('amend')
2472 2472 try:
2473 2473 # See if we got a message from -m or -l, if not, open the editor
2474 2474 # with the message of the changeset to amend
2475 2475 message = logmessage(ui, opts)
2476 2476 # ensure logfile does not conflict with later enforcement of the
2477 2477 # message. potential logfile content has been processed by
2478 2478 # `logmessage` anyway.
2479 2479 opts.pop('logfile')
2480 2480 # First, do a regular commit to record all changes in the working
2481 2481 # directory (if there are any)
2482 2482 ui.callhooks = False
2483 2483 currentbookmark = repo._activebookmark
2484 2484 try:
2485 2485 repo._activebookmark = None
2486 2486 opts['message'] = 'temporary amend commit for %s' % old
2487 2487 node = commit(ui, repo, commitfunc, pats, opts)
2488 2488 finally:
2489 2489 repo._activebookmark = currentbookmark
2490 2490 ui.callhooks = True
2491 2491 ctx = repo[node]
2492 2492
2493 2493 # Participating changesets:
2494 2494 #
2495 2495 # node/ctx o - new (intermediate) commit that contains changes
2496 2496 # | from working dir to go into amending commit
2497 2497 # | (or a workingctx if there were no changes)
2498 2498 # |
2499 2499 # old o - changeset to amend
2500 2500 # |
2501 2501 # base o - parent of amending changeset
2502 2502
2503 2503 # Update extra dict from amended commit (e.g. to preserve graft
2504 2504 # source)
2505 2505 extra.update(old.extra())
2506 2506
2507 2507 # Also update it from the intermediate commit or from the wctx
2508 2508 extra.update(ctx.extra())
2509 2509
2510 2510 if len(old.parents()) > 1:
2511 2511 # ctx.files() isn't reliable for merges, so fall back to the
2512 2512 # slower repo.status() method
2513 2513 files = set([fn for st in repo.status(base, old)[:3]
2514 2514 for fn in st])
2515 2515 else:
2516 2516 files = set(old.files())
2517 2517
2518 2518 # Second, we use either the commit we just did, or if there were no
2519 2519 # changes the parent of the working directory as the version of the
2520 2520 # files in the final amend commit
2521 2521 if node:
2522 2522 ui.note(_('copying changeset %s to %s\n') % (ctx, base))
2523 2523
2524 2524 user = ctx.user()
2525 2525 date = ctx.date()
2526 2526 # Recompute copies (avoid recording a -> b -> a)
2527 2527 copied = copies.pathcopies(base, ctx)
2528 2528 if old.p2:
2529 2529 copied.update(copies.pathcopies(old.p2(), ctx))
2530 2530
2531 2531 # Prune files which were reverted by the updates: if old
2532 2532 # introduced file X and our intermediate commit, node,
2533 2533 # renamed that file, then those two files are the same and
2534 2534 # we can discard X from our list of files. Likewise if X
2535 2535 # was deleted, it's no longer relevant
2536 2536 files.update(ctx.files())
2537 2537
2538 2538 def samefile(f):
2539 2539 if f in ctx.manifest():
2540 2540 a = ctx.filectx(f)
2541 2541 if f in base.manifest():
2542 2542 b = base.filectx(f)
2543 2543 return (not a.cmp(b)
2544 2544 and a.flags() == b.flags())
2545 2545 else:
2546 2546 return False
2547 2547 else:
2548 2548 return f not in base.manifest()
2549 2549 files = [f for f in files if not samefile(f)]
2550 2550
2551 2551 def filectxfn(repo, ctx_, path):
2552 2552 try:
2553 2553 fctx = ctx[path]
2554 2554 flags = fctx.flags()
2555 2555 mctx = context.memfilectx(repo,
2556 2556 fctx.path(), fctx.data(),
2557 2557 islink='l' in flags,
2558 2558 isexec='x' in flags,
2559 2559 copied=copied.get(path))
2560 2560 return mctx
2561 2561 except KeyError:
2562 2562 return None
2563 2563 else:
2564 2564 ui.note(_('copying changeset %s to %s\n') % (old, base))
2565 2565
2566 2566 # Use version of files as in the old cset
2567 2567 def filectxfn(repo, ctx_, path):
2568 2568 try:
2569 2569 return old.filectx(path)
2570 2570 except KeyError:
2571 2571 return None
2572 2572
2573 2573 user = opts.get('user') or old.user()
2574 2574 date = opts.get('date') or old.date()
2575 2575 editform = mergeeditform(old, 'commit.amend')
2576 2576 editor = getcommiteditor(editform=editform, **opts)
2577 2577 if not message:
2578 2578 editor = getcommiteditor(edit=True, editform=editform)
2579 2579 message = old.description()
2580 2580
2581 2581 pureextra = extra.copy()
2582 2582 extra['amend_source'] = old.hex()
2583 2583
2584 2584 new = context.memctx(repo,
2585 2585 parents=[base.node(), old.p2().node()],
2586 2586 text=message,
2587 2587 files=files,
2588 2588 filectxfn=filectxfn,
2589 2589 user=user,
2590 2590 date=date,
2591 2591 extra=extra,
2592 2592 editor=editor)
2593 2593
2594 2594 newdesc = changelog.stripdesc(new.description())
2595 2595 if ((not node)
2596 2596 and newdesc == old.description()
2597 2597 and user == old.user()
2598 2598 and date == old.date()
2599 2599 and pureextra == old.extra()):
2600 2600 # nothing changed. continuing here would create a new node
2601 2601 # anyway because of the amend_source noise.
2602 2602 #
2603 2603 # This not what we expect from amend.
2604 2604 return old.node()
2605 2605
2606 2606 ph = repo.ui.config('phases', 'new-commit', phases.draft)
2607 2607 try:
2608 2608 if opts.get('secret'):
2609 2609 commitphase = 'secret'
2610 2610 else:
2611 2611 commitphase = old.phase()
2612 2612 repo.ui.setconfig('phases', 'new-commit', commitphase, 'amend')
2613 2613 newid = repo.commitctx(new)
2614 2614 finally:
2615 2615 repo.ui.setconfig('phases', 'new-commit', ph, 'amend')
2616 2616 if newid != old.node():
2617 2617 # Reroute the working copy parent to the new changeset
2618 2618 repo.setparents(newid, nullid)
2619 2619
2620 2620 # Move bookmarks from old parent to amend commit
2621 2621 bms = repo.nodebookmarks(old.node())
2622 2622 if bms:
2623 2623 marks = repo._bookmarks
2624 2624 for bm in bms:
2625 2625 marks[bm] = newid
2626 2626 marks.write()
2627 2627 #commit the whole amend process
2628 2628 createmarkers = obsolete.isenabled(repo, obsolete.createmarkersopt)
2629 2629 if createmarkers and newid != old.node():
2630 2630 # mark the new changeset as successor of the rewritten one
2631 2631 new = repo[newid]
2632 2632 obs = [(old, (new,))]
2633 2633 if node:
2634 2634 obs.append((ctx, ()))
2635 2635
2636 2636 obsolete.createmarkers(repo, obs)
2637 2637 tr.close()
2638 2638 finally:
2639 2639 tr.release()
2640 2640 if not createmarkers and newid != old.node():
2641 2641 # Strip the intermediate commit (if there was one) and the amended
2642 2642 # commit
2643 2643 if node:
2644 2644 ui.note(_('stripping intermediate changeset %s\n') % ctx)
2645 2645 ui.note(_('stripping amended changeset %s\n') % old)
2646 2646 repair.strip(ui, repo, old.node(), topic='amend-backup')
2647 2647 finally:
2648 2648 if newid is None:
2649 2649 repo.dirstate.invalidate()
2650 2650 lockmod.release(lock, wlock)
2651 2651 return newid
2652 2652
2653 2653 def commiteditor(repo, ctx, subs, editform=''):
2654 2654 if ctx.description():
2655 2655 return ctx.description()
2656 2656 return commitforceeditor(repo, ctx, subs, editform=editform)
2657 2657
2658 2658 def commitforceeditor(repo, ctx, subs, finishdesc=None, extramsg=None,
2659 2659 editform=''):
2660 2660 if not extramsg:
2661 2661 extramsg = _("Leave message empty to abort commit.")
2662 2662
2663 2663 forms = [e for e in editform.split('.') if e]
2664 2664 forms.insert(0, 'changeset')
2665 2665 while forms:
2666 2666 tmpl = repo.ui.config('committemplate', '.'.join(forms))
2667 2667 if tmpl:
2668 2668 committext = buildcommittemplate(repo, ctx, subs, extramsg, tmpl)
2669 2669 break
2670 2670 forms.pop()
2671 2671 else:
2672 2672 committext = buildcommittext(repo, ctx, subs, extramsg)
2673 2673
2674 2674 # run editor in the repository root
2675 2675 olddir = os.getcwd()
2676 2676 os.chdir(repo.root)
2677 2677 text = repo.ui.edit(committext, ctx.user(), ctx.extra(), editform=editform)
2678 2678 text = re.sub("(?m)^HG:.*(\n|$)", "", text)
2679 2679 os.chdir(olddir)
2680 2680
2681 2681 if finishdesc:
2682 2682 text = finishdesc(text)
2683 2683 if not text.strip():
2684 2684 raise util.Abort(_("empty commit message"))
2685 2685
2686 2686 return text
2687 2687
2688 2688 def buildcommittemplate(repo, ctx, subs, extramsg, tmpl):
2689 2689 ui = repo.ui
2690 2690 tmpl, mapfile = gettemplate(ui, tmpl, None)
2691 2691
2692 2692 try:
2693 2693 t = changeset_templater(ui, repo, None, {}, tmpl, mapfile, False)
2694 2694 except SyntaxError, inst:
2695 2695 raise util.Abort(inst.args[0])
2696 2696
2697 2697 for k, v in repo.ui.configitems('committemplate'):
2698 2698 if k != 'changeset':
2699 2699 t.t.cache[k] = v
2700 2700
2701 2701 if not extramsg:
2702 2702 extramsg = '' # ensure that extramsg is string
2703 2703
2704 2704 ui.pushbuffer()
2705 2705 t.show(ctx, extramsg=extramsg)
2706 2706 return ui.popbuffer()
2707 2707
2708 2708 def buildcommittext(repo, ctx, subs, extramsg):
2709 2709 edittext = []
2710 2710 modified, added, removed = ctx.modified(), ctx.added(), ctx.removed()
2711 2711 if ctx.description():
2712 2712 edittext.append(ctx.description())
2713 2713 edittext.append("")
2714 2714 edittext.append("") # Empty line between message and comments.
2715 2715 edittext.append(_("HG: Enter commit message."
2716 2716 " Lines beginning with 'HG:' are removed."))
2717 2717 edittext.append("HG: %s" % extramsg)
2718 2718 edittext.append("HG: --")
2719 2719 edittext.append(_("HG: user: %s") % ctx.user())
2720 2720 if ctx.p2():
2721 2721 edittext.append(_("HG: branch merge"))
2722 2722 if ctx.branch():
2723 2723 edittext.append(_("HG: branch '%s'") % ctx.branch())
2724 2724 if bookmarks.isactivewdirparent(repo):
2725 2725 edittext.append(_("HG: bookmark '%s'") % repo._activebookmark)
2726 2726 edittext.extend([_("HG: subrepo %s") % s for s in subs])
2727 2727 edittext.extend([_("HG: added %s") % f for f in added])
2728 2728 edittext.extend([_("HG: changed %s") % f for f in modified])
2729 2729 edittext.extend([_("HG: removed %s") % f for f in removed])
2730 2730 if not added and not modified and not removed:
2731 2731 edittext.append(_("HG: no files changed"))
2732 2732 edittext.append("")
2733 2733
2734 2734 return "\n".join(edittext)
2735 2735
2736 2736 def commitstatus(repo, node, branch, bheads=None, opts={}):
2737 2737 ctx = repo[node]
2738 2738 parents = ctx.parents()
2739 2739
2740 2740 if (not opts.get('amend') and bheads and node not in bheads and not
2741 2741 [x for x in parents if x.node() in bheads and x.branch() == branch]):
2742 2742 repo.ui.status(_('created new head\n'))
2743 2743 # The message is not printed for initial roots. For the other
2744 2744 # changesets, it is printed in the following situations:
2745 2745 #
2746 2746 # Par column: for the 2 parents with ...
2747 2747 # N: null or no parent
2748 2748 # B: parent is on another named branch
2749 2749 # C: parent is a regular non head changeset
2750 2750 # H: parent was a branch head of the current branch
2751 2751 # Msg column: whether we print "created new head" message
2752 2752 # In the following, it is assumed that there already exists some
2753 2753 # initial branch heads of the current branch, otherwise nothing is
2754 2754 # printed anyway.
2755 2755 #
2756 2756 # Par Msg Comment
2757 2757 # N N y additional topo root
2758 2758 #
2759 2759 # B N y additional branch root
2760 2760 # C N y additional topo head
2761 2761 # H N n usual case
2762 2762 #
2763 2763 # B B y weird additional branch root
2764 2764 # C B y branch merge
2765 2765 # H B n merge with named branch
2766 2766 #
2767 2767 # C C y additional head from merge
2768 2768 # C H n merge with a head
2769 2769 #
2770 2770 # H H n head merge: head count decreases
2771 2771
2772 2772 if not opts.get('close_branch'):
2773 2773 for r in parents:
2774 2774 if r.closesbranch() and r.branch() == branch:
2775 2775 repo.ui.status(_('reopening closed branch head %d\n') % r)
2776 2776
2777 2777 if repo.ui.debugflag:
2778 2778 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx.hex()))
2779 2779 elif repo.ui.verbose:
2780 2780 repo.ui.write(_('committed changeset %d:%s\n') % (int(ctx), ctx))
2781 2781
2782 2782 def revert(ui, repo, ctx, parents, *pats, **opts):
2783 2783 parent, p2 = parents
2784 2784 node = ctx.node()
2785 2785
2786 2786 mf = ctx.manifest()
2787 2787 if node == p2:
2788 2788 parent = p2
2789 2789 if node == parent:
2790 2790 pmf = mf
2791 2791 else:
2792 2792 pmf = None
2793 2793
2794 2794 # need all matching names in dirstate and manifest of target rev,
2795 2795 # so have to walk both. do not print errors if files exist in one
2796 2796 # but not other. in both cases, filesets should be evaluated against
2797 2797 # workingctx to get consistent result (issue4497). this means 'set:**'
2798 2798 # cannot be used to select missing files from target rev.
2799 2799
2800 2800 # `names` is a mapping for all elements in working copy and target revision
2801 2801 # The mapping is in the form:
2802 2802 # <asb path in repo> -> (<path from CWD>, <exactly specified by matcher?>)
2803 2803 names = {}
2804 2804
2805 2805 wlock = repo.wlock()
2806 2806 try:
2807 2807 ## filling of the `names` mapping
2808 2808 # walk dirstate to fill `names`
2809 2809
2810 2810 interactive = opts.get('interactive', False)
2811 2811 wctx = repo[None]
2812 2812 m = scmutil.match(wctx, pats, opts)
2813 2813
2814 2814 # we'll need this later
2815 2815 targetsubs = sorted(s for s in wctx.substate if m(s))
2816 2816
2817 2817 if not m.always():
2818 2818 m.bad = lambda x, y: False
2819 2819 for abs in repo.walk(m):
2820 2820 names[abs] = m.rel(abs), m.exact(abs)
2821 2821
2822 2822 # walk target manifest to fill `names`
2823 2823
2824 2824 def badfn(path, msg):
2825 2825 if path in names:
2826 2826 return
2827 2827 if path in ctx.substate:
2828 2828 return
2829 2829 path_ = path + '/'
2830 2830 for f in names:
2831 2831 if f.startswith(path_):
2832 2832 return
2833 2833 ui.warn("%s: %s\n" % (m.rel(path), msg))
2834 2834
2835 2835 m.bad = badfn
2836 2836 for abs in ctx.walk(m):
2837 2837 if abs not in names:
2838 2838 names[abs] = m.rel(abs), m.exact(abs)
2839 2839
2840 2840 # Find status of all file in `names`.
2841 2841 m = scmutil.matchfiles(repo, names)
2842 2842
2843 2843 changes = repo.status(node1=node, match=m,
2844 2844 unknown=True, ignored=True, clean=True)
2845 2845 else:
2846 2846 changes = repo.status(node1=node, match=m)
2847 2847 for kind in changes:
2848 2848 for abs in kind:
2849 2849 names[abs] = m.rel(abs), m.exact(abs)
2850 2850
2851 2851 m = scmutil.matchfiles(repo, names)
2852 2852
2853 2853 modified = set(changes.modified)
2854 2854 added = set(changes.added)
2855 2855 removed = set(changes.removed)
2856 2856 _deleted = set(changes.deleted)
2857 2857 unknown = set(changes.unknown)
2858 2858 unknown.update(changes.ignored)
2859 2859 clean = set(changes.clean)
2860 2860 modadded = set()
2861 2861
2862 2862 # split between files known in target manifest and the others
2863 2863 smf = set(mf)
2864 2864
2865 2865 # determine the exact nature of the deleted changesets
2866 2866 deladded = _deleted - smf
2867 2867 deleted = _deleted - deladded
2868 2868
2869 2869 # We need to account for the state of the file in the dirstate,
2870 2870 # even when we revert against something else than parent. This will
2871 2871 # slightly alter the behavior of revert (doing back up or not, delete
2872 2872 # or just forget etc).
2873 2873 if parent == node:
2874 2874 dsmodified = modified
2875 2875 dsadded = added
2876 2876 dsremoved = removed
2877 2877 # store all local modifications, useful later for rename detection
2878 2878 localchanges = dsmodified | dsadded
2879 2879 modified, added, removed = set(), set(), set()
2880 2880 else:
2881 2881 changes = repo.status(node1=parent, match=m)
2882 2882 dsmodified = set(changes.modified)
2883 2883 dsadded = set(changes.added)
2884 2884 dsremoved = set(changes.removed)
2885 2885 # store all local modifications, useful later for rename detection
2886 2886 localchanges = dsmodified | dsadded
2887 2887
2888 2888 # only take into account for removes between wc and target
2889 2889 clean |= dsremoved - removed
2890 2890 dsremoved &= removed
2891 2891 # distinct between dirstate remove and other
2892 2892 removed -= dsremoved
2893 2893
2894 2894 modadded = added & dsmodified
2895 2895 added -= modadded
2896 2896
2897 2897 # tell newly modified apart.
2898 2898 dsmodified &= modified
2899 2899 dsmodified |= modified & dsadded # dirstate added may needs backup
2900 2900 modified -= dsmodified
2901 2901
2902 2902 # We need to wait for some post-processing to update this set
2903 2903 # before making the distinction. The dirstate will be used for
2904 2904 # that purpose.
2905 2905 dsadded = added
2906 2906
2907 2907 # in case of merge, files that are actually added can be reported as
2908 2908 # modified, we need to post process the result
2909 2909 if p2 != nullid:
2910 2910 if pmf is None:
2911 2911 # only need parent manifest in the merge case,
2912 2912 # so do not read by default
2913 2913 pmf = repo[parent].manifest()
2914 2914 mergeadd = dsmodified - set(pmf)
2915 2915 dsadded |= mergeadd
2916 2916 dsmodified -= mergeadd
2917 2917
2918 2918 # if f is a rename, update `names` to also revert the source
2919 2919 cwd = repo.getcwd()
2920 2920 for f in localchanges:
2921 2921 src = repo.dirstate.copied(f)
2922 2922 # XXX should we check for rename down to target node?
2923 2923 if src and src not in names and repo.dirstate[src] == 'r':
2924 2924 dsremoved.add(src)
2925 2925 names[src] = (repo.pathto(src, cwd), True)
2926 2926
2927 2927 # distinguish between file to forget and the other
2928 2928 added = set()
2929 2929 for abs in dsadded:
2930 2930 if repo.dirstate[abs] != 'a':
2931 2931 added.add(abs)
2932 2932 dsadded -= added
2933 2933
2934 2934 for abs in deladded:
2935 2935 if repo.dirstate[abs] == 'a':
2936 2936 dsadded.add(abs)
2937 2937 deladded -= dsadded
2938 2938
2939 2939 # For files marked as removed, we check if an unknown file is present at
2940 2940 # the same path. If a such file exists it may need to be backed up.
2941 2941 # Making the distinction at this stage helps have simpler backup
2942 2942 # logic.
2943 2943 removunk = set()
2944 2944 for abs in removed:
2945 2945 target = repo.wjoin(abs)
2946 2946 if os.path.lexists(target):
2947 2947 removunk.add(abs)
2948 2948 removed -= removunk
2949 2949
2950 2950 dsremovunk = set()
2951 2951 for abs in dsremoved:
2952 2952 target = repo.wjoin(abs)
2953 2953 if os.path.lexists(target):
2954 2954 dsremovunk.add(abs)
2955 2955 dsremoved -= dsremovunk
2956 2956
2957 2957 # action to be actually performed by revert
2958 2958 # (<list of file>, message>) tuple
2959 2959 actions = {'revert': ([], _('reverting %s\n')),
2960 2960 'add': ([], _('adding %s\n')),
2961 2961 'remove': ([], _('removing %s\n')),
2962 2962 'drop': ([], _('removing %s\n')),
2963 2963 'forget': ([], _('forgetting %s\n')),
2964 2964 'undelete': ([], _('undeleting %s\n')),
2965 2965 'noop': (None, _('no changes needed to %s\n')),
2966 2966 'unknown': (None, _('file not managed: %s\n')),
2967 2967 }
2968 2968
2969 2969 # "constant" that convey the backup strategy.
2970 2970 # All set to `discard` if `no-backup` is set do avoid checking
2971 2971 # no_backup lower in the code.
2972 2972 # These values are ordered for comparison purposes
2973 2973 backup = 2 # unconditionally do backup
2974 2974 check = 1 # check if the existing file differs from target
2975 2975 discard = 0 # never do backup
2976 2976 if opts.get('no_backup'):
2977 2977 backup = check = discard
2978 2978
2979 2979 backupanddel = actions['remove']
2980 2980 if not opts.get('no_backup'):
2981 2981 backupanddel = actions['drop']
2982 2982
2983 2983 disptable = (
2984 2984 # dispatch table:
2985 2985 # file state
2986 2986 # action
2987 2987 # make backup
2988 2988
2989 2989 ## Sets that results that will change file on disk
2990 2990 # Modified compared to target, no local change
2991 2991 (modified, actions['revert'], discard),
2992 2992 # Modified compared to target, but local file is deleted
2993 2993 (deleted, actions['revert'], discard),
2994 2994 # Modified compared to target, local change
2995 2995 (dsmodified, actions['revert'], backup),
2996 2996 # Added since target
2997 2997 (added, actions['remove'], discard),
2998 2998 # Added in working directory
2999 2999 (dsadded, actions['forget'], discard),
3000 3000 # Added since target, have local modification
3001 3001 (modadded, backupanddel, backup),
3002 3002 # Added since target but file is missing in working directory
3003 3003 (deladded, actions['drop'], discard),
3004 3004 # Removed since target, before working copy parent
3005 3005 (removed, actions['add'], discard),
3006 3006 # Same as `removed` but an unknown file exists at the same path
3007 3007 (removunk, actions['add'], check),
3008 3008 # Removed since targe, marked as such in working copy parent
3009 3009 (dsremoved, actions['undelete'], discard),
3010 3010 # Same as `dsremoved` but an unknown file exists at the same path
3011 3011 (dsremovunk, actions['undelete'], check),
3012 3012 ## the following sets does not result in any file changes
3013 3013 # File with no modification
3014 3014 (clean, actions['noop'], discard),
3015 3015 # Existing file, not tracked anywhere
3016 3016 (unknown, actions['unknown'], discard),
3017 3017 )
3018 3018
3019 3019 for abs, (rel, exact) in sorted(names.items()):
3020 3020 # target file to be touch on disk (relative to cwd)
3021 3021 target = repo.wjoin(abs)
3022 3022 # search the entry in the dispatch table.
3023 3023 # if the file is in any of these sets, it was touched in the working
3024 3024 # directory parent and we are sure it needs to be reverted.
3025 3025 for table, (xlist, msg), dobackup in disptable:
3026 3026 if abs not in table:
3027 3027 continue
3028 3028 if xlist is not None:
3029 3029 xlist.append(abs)
3030 3030 if dobackup and (backup <= dobackup
3031 3031 or wctx[abs].cmp(ctx[abs])):
3032 3032 bakname = "%s.orig" % rel
3033 3033 ui.note(_('saving current version of %s as %s\n') %
3034 3034 (rel, bakname))
3035 3035 if not opts.get('dry_run'):
3036 3036 if interactive:
3037 3037 util.copyfile(target, bakname)
3038 3038 else:
3039 3039 util.rename(target, bakname)
3040 3040 if ui.verbose or not exact:
3041 3041 if not isinstance(msg, basestring):
3042 3042 msg = msg(abs)
3043 3043 ui.status(msg % rel)
3044 3044 elif exact:
3045 3045 ui.warn(msg % rel)
3046 3046 break
3047 3047
3048 3048 if not opts.get('dry_run'):
3049 3049 needdata = ('revert', 'add', 'undelete')
3050 3050 _revertprefetch(repo, ctx, *[actions[name][0] for name in needdata])
3051 3051 _performrevert(repo, parents, ctx, actions, interactive)
3052 3052
3053 3053 if targetsubs:
3054 3054 # Revert the subrepos on the revert list
3055 3055 for sub in targetsubs:
3056 3056 try:
3057 3057 wctx.sub(sub).revert(ctx.substate[sub], *pats, **opts)
3058 3058 except KeyError:
3059 3059 raise util.Abort("subrepository '%s' does not exist in %s!"
3060 3060 % (sub, short(ctx.node())))
3061 3061 finally:
3062 3062 wlock.release()
3063 3063
3064 3064 def _revertprefetch(repo, ctx, *files):
3065 3065 """Let extension changing the storage layer prefetch content"""
3066 3066 pass
3067 3067
3068 3068 def _performrevert(repo, parents, ctx, actions, interactive=False):
3069 3069 """function that actually perform all the actions computed for revert
3070 3070
3071 3071 This is an independent function to let extension to plug in and react to
3072 3072 the imminent revert.
3073 3073
3074 3074 Make sure you have the working directory locked when calling this function.
3075 3075 """
3076 3076 parent, p2 = parents
3077 3077 node = ctx.node()
3078 3078 def checkout(f):
3079 3079 fc = ctx[f]
3080 3080 return repo.wwrite(f, fc.data(), fc.flags())
3081 3081
3082 3082 audit_path = pathutil.pathauditor(repo.root)
3083 3083 for f in actions['forget'][0]:
3084 3084 repo.dirstate.drop(f)
3085 3085 for f in actions['remove'][0]:
3086 3086 audit_path(f)
3087 3087 try:
3088 3088 util.unlinkpath(repo.wjoin(f))
3089 3089 except OSError:
3090 3090 pass
3091 3091 repo.dirstate.remove(f)
3092 3092 for f in actions['drop'][0]:
3093 3093 audit_path(f)
3094 3094 repo.dirstate.remove(f)
3095 3095
3096 3096 normal = None
3097 3097 if node == parent:
3098 3098 # We're reverting to our parent. If possible, we'd like status
3099 3099 # to report the file as clean. We have to use normallookup for
3100 3100 # merges to avoid losing information about merged/dirty files.
3101 3101 if p2 != nullid:
3102 3102 normal = repo.dirstate.normallookup
3103 3103 else:
3104 3104 normal = repo.dirstate.normal
3105 3105
3106 3106 if interactive:
3107 3107 # Prompt the user for changes to revert
3108 3108 torevert = [repo.wjoin(f) for f in actions['revert'][0]]
3109 3109 m = scmutil.match(ctx, torevert, {})
3110 3110 diff = patch.diff(repo, None, ctx.node(), m)
3111 3111 originalchunks = patch.parsepatch(diff)
3112 3112 try:
3113 3113 chunks = recordfilter(repo.ui, originalchunks)
3114 3114 except patch.PatchError, err:
3115 3115 raise util.Abort(_('error parsing patch: %s') % err)
3116 3116
3117 3117 # Apply changes
3118 3118 fp = cStringIO.StringIO()
3119 3119 for c in chunks:
3120 3120 c.write(fp)
3121 3121 dopatch = fp.tell()
3122 3122 fp.seek(0)
3123 3123 if dopatch:
3124 3124 try:
3125 3125 patch.internalpatch(repo.ui, repo, fp, 1, eolmode=None)
3126 3126 except patch.PatchError, err:
3127 3127 raise util.Abort(str(err))
3128 3128 del fp
3129 3129 else:
3130 3130 for f in actions['revert'][0]:
3131 3131 wsize = checkout(f)
3132 3132 if normal:
3133 3133 normal(f)
3134 3134 elif wsize == repo.dirstate._map[f][2]:
3135 3135 # changes may be overlooked without normallookup,
3136 3136 # if size isn't changed at reverting
3137 3137 repo.dirstate.normallookup(f)
3138 3138
3139 3139 for f in actions['add'][0]:
3140 3140 checkout(f)
3141 3141 repo.dirstate.add(f)
3142 3142
3143 3143 normal = repo.dirstate.normallookup
3144 3144 if node == parent and p2 == nullid:
3145 3145 normal = repo.dirstate.normal
3146 3146 for f in actions['undelete'][0]:
3147 3147 checkout(f)
3148 3148 normal(f)
3149 3149
3150 3150 copied = copies.pathcopies(repo[parent], ctx)
3151 3151
3152 3152 for f in actions['add'][0] + actions['undelete'][0] + actions['revert'][0]:
3153 3153 if f in copied:
3154 3154 repo.dirstate.copy(copied[f], f)
3155 3155
3156 3156 def command(table):
3157 3157 """Returns a function object to be used as a decorator for making commands.
3158 3158
3159 3159 This function receives a command table as its argument. The table should
3160 3160 be a dict.
3161 3161
3162 3162 The returned function can be used as a decorator for adding commands
3163 3163 to that command table. This function accepts multiple arguments to define
3164 3164 a command.
3165 3165
3166 3166 The first argument is the command name.
3167 3167
3168 3168 The options argument is an iterable of tuples defining command arguments.
3169 3169 See ``mercurial.fancyopts.fancyopts()`` for the format of each tuple.
3170 3170
3171 3171 The synopsis argument defines a short, one line summary of how to use the
3172 3172 command. This shows up in the help output.
3173 3173
3174 3174 The norepo argument defines whether the command does not require a
3175 3175 local repository. Most commands operate against a repository, thus the
3176 3176 default is False.
3177 3177
3178 3178 The optionalrepo argument defines whether the command optionally requires
3179 3179 a local repository.
3180 3180
3181 3181 The inferrepo argument defines whether to try to find a repository from the
3182 3182 command line arguments. If True, arguments will be examined for potential
3183 3183 repository locations. See ``findrepo()``. If a repository is found, it
3184 3184 will be used.
3185 3185 """
3186 3186 def cmd(name, options=(), synopsis=None, norepo=False, optionalrepo=False,
3187 3187 inferrepo=False):
3188 3188 def decorator(func):
3189 3189 if synopsis:
3190 3190 table[name] = func, list(options), synopsis
3191 3191 else:
3192 3192 table[name] = func, list(options)
3193 3193
3194 3194 if norepo:
3195 3195 # Avoid import cycle.
3196 3196 import commands
3197 3197 commands.norepo += ' %s' % ' '.join(parsealiases(name))
3198 3198
3199 3199 if optionalrepo:
3200 3200 import commands
3201 3201 commands.optionalrepo += ' %s' % ' '.join(parsealiases(name))
3202 3202
3203 3203 if inferrepo:
3204 3204 import commands
3205 3205 commands.inferrepo += ' %s' % ' '.join(parsealiases(name))
3206 3206
3207 3207 return func
3208 3208 return decorator
3209 3209
3210 3210 return cmd
3211 3211
3212 3212 # a list of (ui, repo, otherpeer, opts, missing) functions called by
3213 3213 # commands.outgoing. "missing" is "missing" of the result of
3214 3214 # "findcommonoutgoing()"
3215 3215 outgoinghooks = util.hooks()
3216 3216
3217 3217 # a list of (ui, repo) functions called by commands.summary
3218 3218 summaryhooks = util.hooks()
3219 3219
3220 3220 # a list of (ui, repo, opts, changes) functions called by commands.summary.
3221 3221 #
3222 3222 # functions should return tuple of booleans below, if 'changes' is None:
3223 3223 # (whether-incomings-are-needed, whether-outgoings-are-needed)
3224 3224 #
3225 3225 # otherwise, 'changes' is a tuple of tuples below:
3226 3226 # - (sourceurl, sourcebranch, sourcepeer, incoming)
3227 3227 # - (desturl, destbranch, destpeer, outgoing)
3228 3228 summaryremotehooks = util.hooks()
3229 3229
3230 3230 # A list of state files kept by multistep operations like graft.
3231 3231 # Since graft cannot be aborted, it is considered 'clearable' by update.
3232 3232 # note: bisect is intentionally excluded
3233 3233 # (state file, clearable, allowcommit, error, hint)
3234 3234 unfinishedstates = [
3235 3235 ('graftstate', True, False, _('graft in progress'),
3236 3236 _("use 'hg graft --continue' or 'hg update' to abort")),
3237 3237 ('updatestate', True, False, _('last update was interrupted'),
3238 3238 _("use 'hg update' to get a consistent checkout"))
3239 3239 ]
3240 3240
3241 3241 def checkunfinished(repo, commit=False):
3242 3242 '''Look for an unfinished multistep operation, like graft, and abort
3243 3243 if found. It's probably good to check this right before
3244 3244 bailifchanged().
3245 3245 '''
3246 3246 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3247 3247 if commit and allowcommit:
3248 3248 continue
3249 3249 if repo.vfs.exists(f):
3250 3250 raise util.Abort(msg, hint=hint)
3251 3251
3252 3252 def clearunfinished(repo):
3253 3253 '''Check for unfinished operations (as above), and clear the ones
3254 3254 that are clearable.
3255 3255 '''
3256 3256 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3257 3257 if not clearable and repo.vfs.exists(f):
3258 3258 raise util.Abort(msg, hint=hint)
3259 3259 for f, clearable, allowcommit, msg, hint in unfinishedstates:
3260 3260 if clearable and repo.vfs.exists(f):
3261 3261 util.unlink(repo.join(f))
@@ -1,479 +1,478 b''
1 1 # filemerge.py - file-level merge handling for Mercurial
2 2 #
3 3 # Copyright 2006, 2007, 2008 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import short
9 9 from i18n import _
10 10 import util, simplemerge, match, error, templater, templatekw
11 11 import os, tempfile, re, filecmp
12 12 import tagmerge
13 13
14 14 def _toolstr(ui, tool, part, default=""):
15 15 return ui.config("merge-tools", tool + "." + part, default)
16 16
17 17 def _toolbool(ui, tool, part, default=False):
18 18 return ui.configbool("merge-tools", tool + "." + part, default)
19 19
20 20 def _toollist(ui, tool, part, default=[]):
21 21 return ui.configlist("merge-tools", tool + "." + part, default)
22 22
23 23 internals = {}
24 24 # Merge tools to document.
25 25 internalsdoc = {}
26 26
27 27 def internaltool(name, trymerge, onfailure=None):
28 28 '''return a decorator for populating internal merge tool table'''
29 29 def decorator(func):
30 30 fullname = ':' + name
31 31 func.__doc__ = "``%s``\n" % fullname + func.__doc__.strip()
32 32 internals[fullname] = func
33 33 internals['internal:' + name] = func
34 34 internalsdoc[fullname] = func
35 35 func.trymerge = trymerge
36 36 func.onfailure = onfailure
37 37 return func
38 38 return decorator
39 39
40 40 def _findtool(ui, tool):
41 41 if tool in internals:
42 42 return tool
43 43 return findexternaltool(ui, tool)
44 44
45 45 def findexternaltool(ui, tool):
46 46 for kn in ("regkey", "regkeyalt"):
47 47 k = _toolstr(ui, tool, kn)
48 48 if not k:
49 49 continue
50 50 p = util.lookupreg(k, _toolstr(ui, tool, "regname"))
51 51 if p:
52 52 p = util.findexe(p + _toolstr(ui, tool, "regappend"))
53 53 if p:
54 54 return p
55 55 exe = _toolstr(ui, tool, "executable", tool)
56 56 return util.findexe(util.expandpath(exe))
57 57
58 58 def _picktool(repo, ui, path, binary, symlink):
59 59 def check(tool, pat, symlink, binary):
60 60 tmsg = tool
61 61 if pat:
62 62 tmsg += " specified for " + pat
63 63 if not _findtool(ui, tool):
64 64 if pat: # explicitly requested tool deserves a warning
65 65 ui.warn(_("couldn't find merge tool %s\n") % tmsg)
66 66 else: # configured but non-existing tools are more silent
67 67 ui.note(_("couldn't find merge tool %s\n") % tmsg)
68 68 elif symlink and not _toolbool(ui, tool, "symlink"):
69 69 ui.warn(_("tool %s can't handle symlinks\n") % tmsg)
70 70 elif binary and not _toolbool(ui, tool, "binary"):
71 71 ui.warn(_("tool %s can't handle binary\n") % tmsg)
72 72 elif not util.gui() and _toolbool(ui, tool, "gui"):
73 73 ui.warn(_("tool %s requires a GUI\n") % tmsg)
74 74 else:
75 75 return True
76 76 return False
77 77
78 78 # forcemerge comes from command line arguments, highest priority
79 79 force = ui.config('ui', 'forcemerge')
80 80 if force:
81 81 toolpath = _findtool(ui, force)
82 82 if toolpath:
83 83 return (force, util.shellquote(toolpath))
84 84 else:
85 85 # mimic HGMERGE if given tool not found
86 86 return (force, force)
87 87
88 88 # HGMERGE takes next precedence
89 89 hgmerge = os.environ.get("HGMERGE")
90 90 if hgmerge:
91 91 return (hgmerge, hgmerge)
92 92
93 93 # then patterns
94 94 for pat, tool in ui.configitems("merge-patterns"):
95 95 mf = match.match(repo.root, '', [pat])
96 96 if mf(path) and check(tool, pat, symlink, False):
97 97 toolpath = _findtool(ui, tool)
98 98 return (tool, util.shellquote(toolpath))
99 99
100 100 # then merge tools
101 101 tools = {}
102 102 for k, v in ui.configitems("merge-tools"):
103 103 t = k.split('.')[0]
104 104 if t not in tools:
105 105 tools[t] = int(_toolstr(ui, t, "priority", "0"))
106 106 names = tools.keys()
107 107 tools = sorted([(-p, t) for t, p in tools.items()])
108 108 uimerge = ui.config("ui", "merge")
109 109 if uimerge:
110 110 if uimerge not in names:
111 111 return (uimerge, uimerge)
112 112 tools.insert(0, (None, uimerge)) # highest priority
113 113 tools.append((None, "hgmerge")) # the old default, if found
114 114 for p, t in tools:
115 115 if check(t, None, symlink, binary):
116 116 toolpath = _findtool(ui, t)
117 117 return (t, util.shellquote(toolpath))
118 118
119 119 # internal merge or prompt as last resort
120 120 if symlink or binary:
121 121 return ":prompt", None
122 122 return ":merge", None
123 123
124 124 def _eoltype(data):
125 125 "Guess the EOL type of a file"
126 126 if '\0' in data: # binary
127 127 return None
128 128 if '\r\n' in data: # Windows
129 129 return '\r\n'
130 130 if '\r' in data: # Old Mac
131 131 return '\r'
132 132 if '\n' in data: # UNIX
133 133 return '\n'
134 134 return None # unknown
135 135
136 136 def _matcheol(file, origfile):
137 137 "Convert EOL markers in a file to match origfile"
138 138 tostyle = _eoltype(util.readfile(origfile))
139 139 if tostyle:
140 140 data = util.readfile(file)
141 141 style = _eoltype(data)
142 142 if style:
143 143 newdata = data.replace(style, tostyle)
144 144 if newdata != data:
145 145 util.writefile(file, newdata)
146 146
147 147 @internaltool('prompt', False)
148 148 def _iprompt(repo, mynode, orig, fcd, fco, fca, toolconf):
149 149 """Asks the user which of the local or the other version to keep as
150 150 the merged version."""
151 151 ui = repo.ui
152 152 fd = fcd.path()
153 153
154 154 if ui.promptchoice(_(" no tool found to merge %s\n"
155 155 "keep (l)ocal or take (o)ther?"
156 156 "$$ &Local $$ &Other") % fd, 0):
157 157 return _iother(repo, mynode, orig, fcd, fco, fca, toolconf)
158 158 else:
159 159 return _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf)
160 160
161 161 @internaltool('local', False)
162 162 def _ilocal(repo, mynode, orig, fcd, fco, fca, toolconf):
163 163 """Uses the local version of files as the merged version."""
164 164 return 0
165 165
166 166 @internaltool('other', False)
167 167 def _iother(repo, mynode, orig, fcd, fco, fca, toolconf):
168 168 """Uses the other version of files as the merged version."""
169 169 repo.wwrite(fcd.path(), fco.data(), fco.flags())
170 170 return 0
171 171
172 172 @internaltool('fail', False)
173 173 def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf):
174 174 """
175 175 Rather than attempting to merge files that were modified on both
176 176 branches, it marks them as unresolved. The resolve command must be
177 177 used to resolve these conflicts."""
178 178 return 1
179 179
180 180 def _premerge(repo, toolconf, files, labels=None):
181 181 tool, toolpath, binary, symlink = toolconf
182 182 if symlink:
183 183 return 1
184 184 a, b, c, back = files
185 185
186 186 ui = repo.ui
187 187
188 188 validkeep = ['keep', 'keep-merge3']
189 189
190 190 # do we attempt to simplemerge first?
191 191 try:
192 192 premerge = _toolbool(ui, tool, "premerge", not binary)
193 193 except error.ConfigError:
194 194 premerge = _toolstr(ui, tool, "premerge").lower()
195 195 if premerge not in validkeep:
196 196 _valid = ', '.join(["'" + v + "'" for v in validkeep])
197 197 raise error.ConfigError(_("%s.premerge not valid "
198 198 "('%s' is neither boolean nor %s)") %
199 199 (tool, premerge, _valid))
200 200
201 201 if premerge:
202 202 if premerge == 'keep-merge3':
203 203 if not labels:
204 204 labels = _defaultconflictlabels
205 205 if len(labels) < 3:
206 206 labels.append('base')
207 207 r = simplemerge.simplemerge(ui, a, b, c, quiet=True, label=labels)
208 208 if not r:
209 209 ui.debug(" premerge successful\n")
210 210 return 0
211 211 if premerge not in validkeep:
212 212 util.copyfile(back, a) # restore from backup and try again
213 213 return 1 # continue merging
214 214
215 215 @internaltool('merge', True,
216 216 _("merging %s incomplete! "
217 217 "(edit conflicts, then use 'hg resolve --mark')\n"))
218 218 def _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
219 219 """
220 220 Uses the internal non-interactive simple merge algorithm for merging
221 221 files. It will fail if there are any conflicts and leave markers in
222 222 the partially merged file. Markers will have two sections, one for each side
223 223 of merge."""
224 224 tool, toolpath, binary, symlink = toolconf
225 225 if symlink:
226 226 repo.ui.warn(_('warning: internal :merge cannot merge symlinks '
227 227 'for %s\n') % fcd.path())
228 228 return False, 1
229 229 r = _premerge(repo, toolconf, files, labels=labels)
230 230 if r:
231 231 a, b, c, back = files
232 232
233 233 ui = repo.ui
234 234
235 235 r = simplemerge.simplemerge(ui, a, b, c, label=labels)
236 236 return True, r
237 237 return False, 0
238 238
239 239 @internaltool('merge3', True,
240 240 _("merging %s incomplete! "
241 241 "(edit conflicts, then use 'hg resolve --mark')\n"))
242 242 def _imerge3(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
243 243 """
244 244 Uses the internal non-interactive simple merge algorithm for merging
245 245 files. It will fail if there are any conflicts and leave markers in
246 246 the partially merged file. Marker will have three sections, one from each
247 247 side of the merge and one for the base content."""
248 248 if not labels:
249 249 labels = _defaultconflictlabels
250 250 if len(labels) < 3:
251 251 labels.append('base')
252 252 return _imerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels)
253 253
254 254 @internaltool('tagmerge', True,
255 255 _("automatic tag merging of %s failed! "
256 256 "(use 'hg resolve --tool :merge' or another merge "
257 257 "tool of your choice)\n"))
258 258 def _itagmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
259 259 """
260 260 Uses the internal tag merge algorithm (experimental).
261 261 """
262 262 return tagmerge.merge(repo, fcd, fco, fca)
263 263
264 264 @internaltool('dump', True)
265 265 def _idump(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
266 266 """
267 267 Creates three versions of the files to merge, containing the
268 268 contents of local, other and base. These files can then be used to
269 269 perform a merge manually. If the file to be merged is named
270 270 ``a.txt``, these files will accordingly be named ``a.txt.local``,
271 271 ``a.txt.other`` and ``a.txt.base`` and they will be placed in the
272 272 same directory as ``a.txt``."""
273 273 r = _premerge(repo, toolconf, files, labels=labels)
274 274 if r:
275 275 a, b, c, back = files
276 276
277 277 fd = fcd.path()
278 278
279 279 util.copyfile(a, a + ".local")
280 280 repo.wwrite(fd + ".other", fco.data(), fco.flags())
281 281 repo.wwrite(fd + ".base", fca.data(), fca.flags())
282 282 return False, r
283 283
284 284 def _xmerge(repo, mynode, orig, fcd, fco, fca, toolconf, files, labels=None):
285 285 r = _premerge(repo, toolconf, files, labels=labels)
286 286 if r:
287 287 tool, toolpath, binary, symlink = toolconf
288 288 a, b, c, back = files
289 289 out = ""
290 290 env = {'HG_FILE': fcd.path(),
291 291 'HG_MY_NODE': short(mynode),
292 292 'HG_OTHER_NODE': str(fco.changectx()),
293 293 'HG_BASE_NODE': str(fca.changectx()),
294 294 'HG_MY_ISLINK': 'l' in fcd.flags(),
295 295 'HG_OTHER_ISLINK': 'l' in fco.flags(),
296 296 'HG_BASE_ISLINK': 'l' in fca.flags(),
297 297 }
298 298
299 299 ui = repo.ui
300 300
301 301 args = _toolstr(ui, tool, "args", '$local $base $other')
302 302 if "$output" in args:
303 303 out, a = a, back # read input from backup, write to original
304 304 replace = {'local': a, 'base': b, 'other': c, 'output': out}
305 305 args = util.interpolate(r'\$', replace, args,
306 306 lambda s: util.shellquote(util.localpath(s)))
307 307 cmd = toolpath + ' ' + args
308 308 repo.ui.debug('launching merge tool: %s\n' % cmd)
309 309 r = ui.system(cmd, cwd=repo.root, environ=env)
310 310 repo.ui.debug('merge tool returned: %s\n' % r)
311 311 return True, r
312 312 return False, 0
313 313
314 314 def _formatconflictmarker(repo, ctx, template, label, pad):
315 315 """Applies the given template to the ctx, prefixed by the label.
316 316
317 317 Pad is the minimum width of the label prefix, so that multiple markers
318 318 can have aligned templated parts.
319 319 """
320 320 if ctx.node() is None:
321 321 ctx = ctx.p1()
322 322
323 323 props = templatekw.keywords.copy()
324 324 props['templ'] = template
325 325 props['ctx'] = ctx
326 326 props['repo'] = repo
327 327 templateresult = template('conflictmarker', **props)
328 328
329 329 label = ('%s:' % label).ljust(pad + 1)
330 330 mark = '%s %s' % (label, templater.stringify(templateresult))
331 331
332 332 if mark:
333 333 mark = mark.splitlines()[0] # split for safety
334 334
335 335 # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ')
336 336 return util.ellipsis(mark, 80 - 8)
337 337
338 338 _defaultconflictmarker = ('{node|short} ' +
339 339 '{ifeq(tags, "tip", "", "{tags} ")}' +
340 340 '{if(bookmarks, "{bookmarks} ")}' +
341 341 '{ifeq(branch, "default", "", "{branch} ")}' +
342 342 '- {author|user}: {desc|firstline}')
343 343
344 344 _defaultconflictlabels = ['local', 'other']
345 345
346 346 def _formatlabels(repo, fcd, fco, fca, labels):
347 347 """Formats the given labels using the conflict marker template.
348 348
349 349 Returns a list of formatted labels.
350 350 """
351 351 cd = fcd.changectx()
352 352 co = fco.changectx()
353 353 ca = fca.changectx()
354 354
355 355 ui = repo.ui
356 356 template = ui.config('ui', 'mergemarkertemplate', _defaultconflictmarker)
357 template = templater.parsestring(template, quoted=False)
358 357 tmpl = templater.templater(None, cache={'conflictmarker': template})
359 358
360 359 pad = max(len(l) for l in labels)
361 360
362 361 newlabels = [_formatconflictmarker(repo, cd, tmpl, labels[0], pad),
363 362 _formatconflictmarker(repo, co, tmpl, labels[1], pad)]
364 363 if len(labels) > 2:
365 364 newlabels.append(_formatconflictmarker(repo, ca, tmpl, labels[2], pad))
366 365 return newlabels
367 366
368 367 def filemerge(repo, mynode, orig, fcd, fco, fca, labels=None):
369 368 """perform a 3-way merge in the working directory
370 369
371 370 mynode = parent node before merge
372 371 orig = original local filename before merge
373 372 fco = other file context
374 373 fca = ancestor file context
375 374 fcd = local file context for current/destination file
376 375 """
377 376
378 377 def temp(prefix, ctx):
379 378 pre = "%s~%s." % (os.path.basename(ctx.path()), prefix)
380 379 (fd, name) = tempfile.mkstemp(prefix=pre)
381 380 data = repo.wwritedata(ctx.path(), ctx.data())
382 381 f = os.fdopen(fd, "wb")
383 382 f.write(data)
384 383 f.close()
385 384 return name
386 385
387 386 if not fco.cmp(fcd): # files identical?
388 387 return None
389 388
390 389 ui = repo.ui
391 390 fd = fcd.path()
392 391 binary = fcd.isbinary() or fco.isbinary() or fca.isbinary()
393 392 symlink = 'l' in fcd.flags() + fco.flags()
394 393 tool, toolpath = _picktool(repo, ui, fd, binary, symlink)
395 394 ui.debug("picked tool '%s' for %s (binary %s symlink %s)\n" %
396 395 (tool, fd, binary, symlink))
397 396
398 397 if tool in internals:
399 398 func = internals[tool]
400 399 trymerge = func.trymerge
401 400 onfailure = func.onfailure
402 401 else:
403 402 func = _xmerge
404 403 trymerge = True
405 404 onfailure = _("merging %s failed!\n")
406 405
407 406 toolconf = tool, toolpath, binary, symlink
408 407
409 408 if not trymerge:
410 409 return func(repo, mynode, orig, fcd, fco, fca, toolconf)
411 410
412 411 a = repo.wjoin(fd)
413 412 b = temp("base", fca)
414 413 c = temp("other", fco)
415 414 back = a + ".orig"
416 415 util.copyfile(a, back)
417 416
418 417 if orig != fco.path():
419 418 ui.status(_("merging %s and %s to %s\n") % (orig, fco.path(), fd))
420 419 else:
421 420 ui.status(_("merging %s\n") % fd)
422 421
423 422 ui.debug("my %s other %s ancestor %s\n" % (fcd, fco, fca))
424 423
425 424 markerstyle = ui.config('ui', 'mergemarkers', 'basic')
426 425 if not labels:
427 426 labels = _defaultconflictlabels
428 427 if markerstyle != 'basic':
429 428 labels = _formatlabels(repo, fcd, fco, fca, labels)
430 429
431 430 needcheck, r = func(repo, mynode, orig, fcd, fco, fca, toolconf,
432 431 (a, b, c, back), labels=labels)
433 432 if not needcheck:
434 433 if r:
435 434 if onfailure:
436 435 ui.warn(onfailure % fd)
437 436 else:
438 437 util.unlink(back)
439 438
440 439 util.unlink(b)
441 440 util.unlink(c)
442 441 return r
443 442
444 443 if not r and (_toolbool(ui, tool, "checkconflicts") or
445 444 'conflicts' in _toollist(ui, tool, "check")):
446 445 if re.search("^(<<<<<<< .*|=======|>>>>>>> .*)$", fcd.data(),
447 446 re.MULTILINE):
448 447 r = 1
449 448
450 449 checked = False
451 450 if 'prompt' in _toollist(ui, tool, "check"):
452 451 checked = True
453 452 if ui.promptchoice(_("was merge of '%s' successful (yn)?"
454 453 "$$ &Yes $$ &No") % fd, 1):
455 454 r = 1
456 455
457 456 if not r and not checked and (_toolbool(ui, tool, "checkchanged") or
458 457 'changed' in _toollist(ui, tool, "check")):
459 458 if filecmp.cmp(a, back):
460 459 if ui.promptchoice(_(" output file %s appears unchanged\n"
461 460 "was merge successful (yn)?"
462 461 "$$ &Yes $$ &No") % fd, 1):
463 462 r = 1
464 463
465 464 if _toolbool(ui, tool, "fixeol"):
466 465 _matcheol(a, back)
467 466
468 467 if r:
469 468 if onfailure:
470 469 ui.warn(onfailure % fd)
471 470 else:
472 471 util.unlink(back)
473 472
474 473 util.unlink(b)
475 474 util.unlink(c)
476 475 return r
477 476
478 477 # tell hggettext to extract docstrings from these functions:
479 478 i18nfunctions = internals.values()
@@ -1,824 +1,821 b''
1 1 # templater.py - template expansion for output
2 2 #
3 3 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import os, re
10 10 import util, config, templatefilters, templatekw, parser, error
11 11 import revset as revsetmod
12 12 import types
13 13 import minirst
14 14
15 15 # template parsing
16 16
17 17 elements = {
18 18 "(": (20, ("group", 1, ")"), ("func", 1, ")")),
19 19 ",": (2, None, ("list", 2)),
20 20 "|": (5, None, ("|", 5)),
21 21 "%": (6, None, ("%", 6)),
22 22 ")": (0, None, None),
23 23 "symbol": (0, ("symbol",), None),
24 24 "string": (0, ("string",), None),
25 25 "rawstring": (0, ("rawstring",), None),
26 26 "end": (0, None, None),
27 27 }
28 28
29 29 def tokenizer(data):
30 30 program, start, end = data
31 31 pos = start
32 32 while pos < end:
33 33 c = program[pos]
34 34 if c.isspace(): # skip inter-token whitespace
35 35 pass
36 36 elif c in "(,)%|": # handle simple operators
37 37 yield (c, None, pos)
38 38 elif (c in '"\'' or c == 'r' and
39 39 program[pos:pos + 2] in ("r'", 'r"')): # handle quoted strings
40 40 if c == 'r':
41 41 pos += 1
42 42 c = program[pos]
43 43 decode = False
44 44 else:
45 45 decode = True
46 46 pos += 1
47 47 s = pos
48 48 while pos < end: # find closing quote
49 49 d = program[pos]
50 50 if decode and d == '\\': # skip over escaped characters
51 51 pos += 2
52 52 continue
53 53 if d == c:
54 54 if not decode:
55 55 yield ('rawstring', program[s:pos], s)
56 56 break
57 57 yield ('string', program[s:pos], s)
58 58 break
59 59 pos += 1
60 60 else:
61 61 raise error.ParseError(_("unterminated string"), s)
62 62 elif c.isalnum() or c in '_':
63 63 s = pos
64 64 pos += 1
65 65 while pos < end: # find end of symbol
66 66 d = program[pos]
67 67 if not (d.isalnum() or d == "_"):
68 68 break
69 69 pos += 1
70 70 sym = program[s:pos]
71 71 yield ('symbol', sym, s)
72 72 pos -= 1
73 73 elif c == '}':
74 74 pos += 1
75 75 break
76 76 else:
77 77 raise error.ParseError(_("syntax error"), pos)
78 78 pos += 1
79 79 yield ('end', None, pos)
80 80
81 81 def compiletemplate(tmpl, context, strtoken="string"):
82 82 parsed = []
83 83 pos, stop = 0, len(tmpl)
84 84 p = parser.parser(tokenizer, elements)
85 85 while pos < stop:
86 86 n = tmpl.find('{', pos)
87 87 if n < 0:
88 88 parsed.append((strtoken, tmpl[pos:]))
89 89 break
90 90 bs = (n - pos) - len(tmpl[pos:n].rstrip('\\'))
91 91 if strtoken == 'string' and bs % 2 == 1:
92 92 # escaped (e.g. '\{', '\\\{', but not '\\{' nor r'\{')
93 93 parsed.append((strtoken, (tmpl[pos:n - 1] + "{")))
94 94 pos = n + 1
95 95 continue
96 96 if n > pos:
97 97 parsed.append((strtoken, tmpl[pos:n]))
98 98
99 99 pd = [tmpl, n + 1, stop]
100 100 parseres, pos = p.parse(pd)
101 101 parsed.append(parseres)
102 102
103 103 return [compileexp(e, context) for e in parsed]
104 104
105 105 def compileexp(exp, context):
106 106 t = exp[0]
107 107 if t in methods:
108 108 return methods[t](exp, context)
109 109 raise error.ParseError(_("unknown method '%s'") % t)
110 110
111 111 # template evaluation
112 112
113 113 def getsymbol(exp):
114 114 if exp[0] == 'symbol':
115 115 return exp[1]
116 116 raise error.ParseError(_("expected a symbol, got '%s'") % exp[0])
117 117
118 118 def getlist(x):
119 119 if not x:
120 120 return []
121 121 if x[0] == 'list':
122 122 return getlist(x[1]) + [x[2]]
123 123 return [x]
124 124
125 125 def getfilter(exp, context):
126 126 f = getsymbol(exp)
127 127 if f not in context._filters:
128 128 raise error.ParseError(_("unknown function '%s'") % f)
129 129 return context._filters[f]
130 130
131 131 def gettemplate(exp, context):
132 132 if exp[0] == 'string' or exp[0] == 'rawstring':
133 133 return compiletemplate(exp[1], context, strtoken=exp[0])
134 134 if exp[0] == 'symbol':
135 135 return context._load(exp[1])
136 136 raise error.ParseError(_("expected template specifier"))
137 137
138 138 def runstring(context, mapping, data):
139 139 return data.decode("string-escape")
140 140
141 141 def runrawstring(context, mapping, data):
142 142 return data
143 143
144 144 def runsymbol(context, mapping, key):
145 145 v = mapping.get(key)
146 146 if v is None:
147 147 v = context._defaults.get(key)
148 148 if v is None:
149 149 try:
150 150 v = context.process(key, mapping)
151 151 except TemplateNotFound:
152 152 v = ''
153 153 if callable(v):
154 154 return v(**mapping)
155 155 if isinstance(v, types.GeneratorType):
156 156 v = list(v)
157 157 return v
158 158
159 159 def buildfilter(exp, context):
160 160 func, data = compileexp(exp[1], context)
161 161 filt = getfilter(exp[2], context)
162 162 return (runfilter, (func, data, filt))
163 163
164 164 def runfilter(context, mapping, data):
165 165 func, data, filt = data
166 166 # func() may return string, generator of strings or arbitrary object such
167 167 # as date tuple, but filter does not want generator.
168 168 thing = func(context, mapping, data)
169 169 if isinstance(thing, types.GeneratorType):
170 170 thing = stringify(thing)
171 171 try:
172 172 return filt(thing)
173 173 except (ValueError, AttributeError, TypeError):
174 174 if isinstance(data, tuple):
175 175 dt = data[1]
176 176 else:
177 177 dt = data
178 178 raise util.Abort(_("template filter '%s' is not compatible with "
179 179 "keyword '%s'") % (filt.func_name, dt))
180 180
181 181 def buildmap(exp, context):
182 182 func, data = compileexp(exp[1], context)
183 183 ctmpl = gettemplate(exp[2], context)
184 184 return (runmap, (func, data, ctmpl))
185 185
186 186 def runtemplate(context, mapping, template):
187 187 for func, data in template:
188 188 yield func(context, mapping, data)
189 189
190 190 def runmap(context, mapping, data):
191 191 func, data, ctmpl = data
192 192 d = func(context, mapping, data)
193 193 if callable(d):
194 194 d = d()
195 195
196 196 lm = mapping.copy()
197 197
198 198 for i in d:
199 199 if isinstance(i, dict):
200 200 lm.update(i)
201 201 lm['originalnode'] = mapping.get('node')
202 202 yield runtemplate(context, lm, ctmpl)
203 203 else:
204 204 # v is not an iterable of dicts, this happen when 'key'
205 205 # has been fully expanded already and format is useless.
206 206 # If so, return the expanded value.
207 207 yield i
208 208
209 209 def buildfunc(exp, context):
210 210 n = getsymbol(exp[1])
211 211 args = [compileexp(x, context) for x in getlist(exp[2])]
212 212 if n in funcs:
213 213 f = funcs[n]
214 214 return (f, args)
215 215 if n in context._filters:
216 216 if len(args) != 1:
217 217 raise error.ParseError(_("filter %s expects one argument") % n)
218 218 f = context._filters[n]
219 219 return (runfilter, (args[0][0], args[0][1], f))
220 220 raise error.ParseError(_("unknown function '%s'") % n)
221 221
222 222 def date(context, mapping, args):
223 223 """:date(date[, fmt]): Format a date. See :hg:`help dates` for formatting
224 224 strings."""
225 225 if not (1 <= len(args) <= 2):
226 226 # i18n: "date" is a keyword
227 227 raise error.ParseError(_("date expects one or two arguments"))
228 228
229 229 date = args[0][0](context, mapping, args[0][1])
230 230 fmt = None
231 231 if len(args) == 2:
232 232 fmt = stringify(args[1][0](context, mapping, args[1][1]))
233 233 try:
234 234 if fmt is None:
235 235 return util.datestr(date)
236 236 else:
237 237 return util.datestr(date, fmt)
238 238 except (TypeError, ValueError):
239 239 # i18n: "date" is a keyword
240 240 raise error.ParseError(_("date expects a date information"))
241 241
242 242 def diff(context, mapping, args):
243 243 """:diff([includepattern [, excludepattern]]): Show a diff, optionally
244 244 specifying files to include or exclude."""
245 245 if len(args) > 2:
246 246 # i18n: "diff" is a keyword
247 247 raise error.ParseError(_("diff expects one, two or no arguments"))
248 248
249 249 def getpatterns(i):
250 250 if i < len(args):
251 251 s = args[i][1].strip()
252 252 if s:
253 253 return [s]
254 254 return []
255 255
256 256 ctx = mapping['ctx']
257 257 chunks = ctx.diff(match=ctx.match([], getpatterns(0), getpatterns(1)))
258 258
259 259 return ''.join(chunks)
260 260
261 261 def fill(context, mapping, args):
262 262 """:fill(text[, width[, initialident[, hangindent]]]): Fill many
263 263 paragraphs with optional indentation. See the "fill" filter."""
264 264 if not (1 <= len(args) <= 4):
265 265 # i18n: "fill" is a keyword
266 266 raise error.ParseError(_("fill expects one to four arguments"))
267 267
268 268 text = stringify(args[0][0](context, mapping, args[0][1]))
269 269 width = 76
270 270 initindent = ''
271 271 hangindent = ''
272 272 if 2 <= len(args) <= 4:
273 273 try:
274 274 width = int(stringify(args[1][0](context, mapping, args[1][1])))
275 275 except ValueError:
276 276 # i18n: "fill" is a keyword
277 277 raise error.ParseError(_("fill expects an integer width"))
278 278 try:
279 279 initindent = stringify(_evalifliteral(args[2], context, mapping))
280 280 hangindent = stringify(_evalifliteral(args[3], context, mapping))
281 281 except IndexError:
282 282 pass
283 283
284 284 return templatefilters.fill(text, width, initindent, hangindent)
285 285
286 286 def pad(context, mapping, args):
287 287 """:pad(text, width[, fillchar=' '[, right=False]]): Pad text with a
288 288 fill character."""
289 289 if not (2 <= len(args) <= 4):
290 290 # i18n: "pad" is a keyword
291 291 raise error.ParseError(_("pad() expects two to four arguments"))
292 292
293 293 width = int(args[1][1])
294 294
295 295 text = stringify(args[0][0](context, mapping, args[0][1]))
296 296 if args[0][0] == runstring:
297 297 text = stringify(runtemplate(context, mapping,
298 298 compiletemplate(text, context)))
299 299
300 300 right = False
301 301 fillchar = ' '
302 302 if len(args) > 2:
303 303 fillchar = stringify(args[2][0](context, mapping, args[2][1]))
304 304 if len(args) > 3:
305 305 right = util.parsebool(args[3][1])
306 306
307 307 if right:
308 308 return text.rjust(width, fillchar)
309 309 else:
310 310 return text.ljust(width, fillchar)
311 311
312 312 def get(context, mapping, args):
313 313 """:get(dict, key): Get an attribute/key from an object. Some keywords
314 314 are complex types. This function allows you to obtain the value of an
315 315 attribute on these type."""
316 316 if len(args) != 2:
317 317 # i18n: "get" is a keyword
318 318 raise error.ParseError(_("get() expects two arguments"))
319 319
320 320 dictarg = args[0][0](context, mapping, args[0][1])
321 321 if not util.safehasattr(dictarg, 'get'):
322 322 # i18n: "get" is a keyword
323 323 raise error.ParseError(_("get() expects a dict as first argument"))
324 324
325 325 key = args[1][0](context, mapping, args[1][1])
326 326 yield dictarg.get(key)
327 327
328 328 def _evalifliteral(arg, context, mapping):
329 329 t = stringify(arg[0](context, mapping, arg[1]))
330 330 if arg[0] == runstring or arg[0] == runrawstring:
331 331 yield runtemplate(context, mapping,
332 332 compiletemplate(t, context, strtoken='rawstring'))
333 333 else:
334 334 yield t
335 335
336 336 def if_(context, mapping, args):
337 337 """:if(expr, then[, else]): Conditionally execute based on the result of
338 338 an expression."""
339 339 if not (2 <= len(args) <= 3):
340 340 # i18n: "if" is a keyword
341 341 raise error.ParseError(_("if expects two or three arguments"))
342 342
343 343 test = stringify(args[0][0](context, mapping, args[0][1]))
344 344 if test:
345 345 yield _evalifliteral(args[1], context, mapping)
346 346 elif len(args) == 3:
347 347 yield _evalifliteral(args[2], context, mapping)
348 348
349 349 def ifcontains(context, mapping, args):
350 350 """:ifcontains(search, thing, then[, else]): Conditionally execute based
351 351 on whether the item "search" is in "thing"."""
352 352 if not (3 <= len(args) <= 4):
353 353 # i18n: "ifcontains" is a keyword
354 354 raise error.ParseError(_("ifcontains expects three or four arguments"))
355 355
356 356 item = stringify(args[0][0](context, mapping, args[0][1]))
357 357 items = args[1][0](context, mapping, args[1][1])
358 358
359 359 if item in items:
360 360 yield _evalifliteral(args[2], context, mapping)
361 361 elif len(args) == 4:
362 362 yield _evalifliteral(args[3], context, mapping)
363 363
364 364 def ifeq(context, mapping, args):
365 365 """:ifeq(expr1, expr2, then[, else]): Conditionally execute based on
366 366 whether 2 items are equivalent."""
367 367 if not (3 <= len(args) <= 4):
368 368 # i18n: "ifeq" is a keyword
369 369 raise error.ParseError(_("ifeq expects three or four arguments"))
370 370
371 371 test = stringify(args[0][0](context, mapping, args[0][1]))
372 372 match = stringify(args[1][0](context, mapping, args[1][1]))
373 373 if test == match:
374 374 yield _evalifliteral(args[2], context, mapping)
375 375 elif len(args) == 4:
376 376 yield _evalifliteral(args[3], context, mapping)
377 377
378 378 def join(context, mapping, args):
379 379 """:join(list, sep): Join items in a list with a delimiter."""
380 380 if not (1 <= len(args) <= 2):
381 381 # i18n: "join" is a keyword
382 382 raise error.ParseError(_("join expects one or two arguments"))
383 383
384 384 joinset = args[0][0](context, mapping, args[0][1])
385 385 if callable(joinset):
386 386 jf = joinset.joinfmt
387 387 joinset = [jf(x) for x in joinset()]
388 388
389 389 joiner = " "
390 390 if len(args) > 1:
391 391 joiner = stringify(args[1][0](context, mapping, args[1][1]))
392 392
393 393 first = True
394 394 for x in joinset:
395 395 if first:
396 396 first = False
397 397 else:
398 398 yield joiner
399 399 yield x
400 400
401 401 def label(context, mapping, args):
402 402 """:label(label, expr): Apply a label to generated content. Content with
403 403 a label applied can result in additional post-processing, such as
404 404 automatic colorization."""
405 405 if len(args) != 2:
406 406 # i18n: "label" is a keyword
407 407 raise error.ParseError(_("label expects two arguments"))
408 408
409 409 # ignore args[0] (the label string) since this is supposed to be a a no-op
410 410 yield _evalifliteral(args[1], context, mapping)
411 411
412 412 def revset(context, mapping, args):
413 413 """:revset(query[, formatargs...]): Execute a revision set query. See
414 414 :hg:`help revset`."""
415 415 if not len(args) > 0:
416 416 # i18n: "revset" is a keyword
417 417 raise error.ParseError(_("revset expects one or more arguments"))
418 418
419 419 raw = args[0][1]
420 420 ctx = mapping['ctx']
421 421 repo = ctx.repo()
422 422
423 423 def query(expr):
424 424 m = revsetmod.match(repo.ui, expr)
425 425 return m(repo)
426 426
427 427 if len(args) > 1:
428 428 formatargs = list([a[0](context, mapping, a[1]) for a in args[1:]])
429 429 revs = query(revsetmod.formatspec(raw, *formatargs))
430 430 revs = list([str(r) for r in revs])
431 431 else:
432 432 revsetcache = mapping['cache'].setdefault("revsetcache", {})
433 433 if raw in revsetcache:
434 434 revs = revsetcache[raw]
435 435 else:
436 436 revs = query(raw)
437 437 revs = list([str(r) for r in revs])
438 438 revsetcache[raw] = revs
439 439
440 440 return templatekw.showlist("revision", revs, **mapping)
441 441
442 442 def rstdoc(context, mapping, args):
443 443 """:rstdoc(text, style): Format ReStructuredText."""
444 444 if len(args) != 2:
445 445 # i18n: "rstdoc" is a keyword
446 446 raise error.ParseError(_("rstdoc expects two arguments"))
447 447
448 448 text = stringify(args[0][0](context, mapping, args[0][1]))
449 449 style = stringify(args[1][0](context, mapping, args[1][1]))
450 450
451 451 return minirst.format(text, style=style, keep=['verbose'])
452 452
453 453 def shortest(context, mapping, args):
454 454 """:shortest(node, minlength=4): Obtain the shortest representation of
455 455 a node."""
456 456 if not (1 <= len(args) <= 2):
457 457 # i18n: "shortest" is a keyword
458 458 raise error.ParseError(_("shortest() expects one or two arguments"))
459 459
460 460 node = stringify(args[0][0](context, mapping, args[0][1]))
461 461
462 462 minlength = 4
463 463 if len(args) > 1:
464 464 minlength = int(args[1][1])
465 465
466 466 cl = mapping['ctx']._repo.changelog
467 467 def isvalid(test):
468 468 try:
469 469 try:
470 470 cl.index.partialmatch(test)
471 471 except AttributeError:
472 472 # Pure mercurial doesn't support partialmatch on the index.
473 473 # Fallback to the slow way.
474 474 if cl._partialmatch(test) is None:
475 475 return False
476 476
477 477 try:
478 478 i = int(test)
479 479 # if we are a pure int, then starting with zero will not be
480 480 # confused as a rev; or, obviously, if the int is larger than
481 481 # the value of the tip rev
482 482 if test[0] == '0' or i > len(cl):
483 483 return True
484 484 return False
485 485 except ValueError:
486 486 return True
487 487 except error.RevlogError:
488 488 return False
489 489
490 490 shortest = node
491 491 startlength = max(6, minlength)
492 492 length = startlength
493 493 while True:
494 494 test = node[:length]
495 495 if isvalid(test):
496 496 shortest = test
497 497 if length == minlength or length > startlength:
498 498 return shortest
499 499 length -= 1
500 500 else:
501 501 length += 1
502 502 if len(shortest) <= length:
503 503 return shortest
504 504
505 505 def strip(context, mapping, args):
506 506 """:strip(text[, chars]): Strip characters from a string."""
507 507 if not (1 <= len(args) <= 2):
508 508 # i18n: "strip" is a keyword
509 509 raise error.ParseError(_("strip expects one or two arguments"))
510 510
511 511 text = stringify(args[0][0](context, mapping, args[0][1]))
512 512 if len(args) == 2:
513 513 chars = stringify(args[1][0](context, mapping, args[1][1]))
514 514 return text.strip(chars)
515 515 return text.strip()
516 516
517 517 def sub(context, mapping, args):
518 518 """:sub(pattern, replacement, expression): Perform text substitution
519 519 using regular expressions."""
520 520 if len(args) != 3:
521 521 # i18n: "sub" is a keyword
522 522 raise error.ParseError(_("sub expects three arguments"))
523 523
524 524 pat = stringify(args[0][0](context, mapping, args[0][1]))
525 525 rpl = stringify(args[1][0](context, mapping, args[1][1]))
526 526 src = stringify(_evalifliteral(args[2], context, mapping))
527 527 yield re.sub(pat, rpl, src)
528 528
529 529 def startswith(context, mapping, args):
530 530 """:startswith(pattern, text): Returns the value from the "text" argument
531 531 if it begins with the content from the "pattern" argument."""
532 532 if len(args) != 2:
533 533 # i18n: "startswith" is a keyword
534 534 raise error.ParseError(_("startswith expects two arguments"))
535 535
536 536 patn = stringify(args[0][0](context, mapping, args[0][1]))
537 537 text = stringify(args[1][0](context, mapping, args[1][1]))
538 538 if text.startswith(patn):
539 539 return text
540 540 return ''
541 541
542 542
543 543 def word(context, mapping, args):
544 544 """:word(number, text[, separator]): Return the nth word from a string."""
545 545 if not (2 <= len(args) <= 3):
546 546 # i18n: "word" is a keyword
547 547 raise error.ParseError(_("word expects two or three arguments, got %d")
548 548 % len(args))
549 549
550 550 try:
551 551 num = int(stringify(args[0][0](context, mapping, args[0][1])))
552 552 except ValueError:
553 553 # i18n: "word" is a keyword
554 554 raise error.ParseError(
555 555 _("Use strings like '3' for numbers passed to word function"))
556 556 text = stringify(args[1][0](context, mapping, args[1][1]))
557 557 if len(args) == 3:
558 558 splitter = stringify(args[2][0](context, mapping, args[2][1]))
559 559 else:
560 560 splitter = None
561 561
562 562 tokens = text.split(splitter)
563 563 if num >= len(tokens):
564 564 return ''
565 565 else:
566 566 return tokens[num]
567 567
568 568 methods = {
569 569 "string": lambda e, c: (runstring, e[1]),
570 570 "rawstring": lambda e, c: (runrawstring, e[1]),
571 571 "symbol": lambda e, c: (runsymbol, e[1]),
572 572 "group": lambda e, c: compileexp(e[1], c),
573 573 # ".": buildmember,
574 574 "|": buildfilter,
575 575 "%": buildmap,
576 576 "func": buildfunc,
577 577 }
578 578
579 579 funcs = {
580 580 "date": date,
581 581 "diff": diff,
582 582 "fill": fill,
583 583 "get": get,
584 584 "if": if_,
585 585 "ifcontains": ifcontains,
586 586 "ifeq": ifeq,
587 587 "join": join,
588 588 "label": label,
589 589 "pad": pad,
590 590 "revset": revset,
591 591 "rstdoc": rstdoc,
592 592 "shortest": shortest,
593 593 "startswith": startswith,
594 594 "strip": strip,
595 595 "sub": sub,
596 596 "word": word,
597 597 }
598 598
599 599 # template engine
600 600
601 601 stringify = templatefilters.stringify
602 602
603 603 def _flatten(thing):
604 604 '''yield a single stream from a possibly nested set of iterators'''
605 605 if isinstance(thing, str):
606 606 yield thing
607 607 elif not util.safehasattr(thing, '__iter__'):
608 608 if thing is not None:
609 609 yield str(thing)
610 610 else:
611 611 for i in thing:
612 612 if isinstance(i, str):
613 613 yield i
614 614 elif not util.safehasattr(i, '__iter__'):
615 615 if i is not None:
616 616 yield str(i)
617 617 elif i is not None:
618 618 for j in _flatten(i):
619 619 yield j
620 620
621 def parsestring(s, quoted=True):
622 '''unwrap quotes if quoted is True'''
623 if quoted:
624 if len(s) < 2 or s[0] != s[-1]:
625 raise SyntaxError(_('unmatched quotes'))
626 # de-backslash-ify only <\">. it is invalid syntax in non-string part of
627 # template, but we are likely to escape <"> in quoted string and it was
628 # accepted before, thanks to issue4290. <\\"> is unmodified because it
629 # is ambiguous and it was processed as such before 2.8.1.
630 #
631 # template result
632 # --------- ------------------------
633 # {\"\"} parse error
634 # "{""}" {""} -> <>
635 # "{\"\"}" {""} -> <>
636 # {"\""} {"\""} -> <">
637 # '{"\""}' {"\""} -> <">
638 # "{"\""}" parse error (don't care)
639 q = s[0]
640 return s[1:-1].replace('\\\\' + q, '\\\\\\' + q).replace('\\' + q, q)
641
642 return s
621 def parsestring(s):
622 '''unwrap quotes'''
623 if len(s) < 2 or s[0] != s[-1]:
624 raise SyntaxError(_('unmatched quotes'))
625 # de-backslash-ify only <\">. it is invalid syntax in non-string part of
626 # template, but we are likely to escape <"> in quoted string and it was
627 # accepted before, thanks to issue4290. <\\"> is unmodified because it
628 # is ambiguous and it was processed as such before 2.8.1.
629 #
630 # template result
631 # --------- ------------------------
632 # {\"\"} parse error
633 # "{""}" {""} -> <>
634 # "{\"\"}" {""} -> <>
635 # {"\""} {"\""} -> <">
636 # '{"\""}' {"\""} -> <">
637 # "{"\""}" parse error (don't care)
638 q = s[0]
639 return s[1:-1].replace('\\\\' + q, '\\\\\\' + q).replace('\\' + q, q)
643 640
644 641 class engine(object):
645 642 '''template expansion engine.
646 643
647 644 template expansion works like this. a map file contains key=value
648 645 pairs. if value is quoted, it is treated as string. otherwise, it
649 646 is treated as name of template file.
650 647
651 648 templater is asked to expand a key in map. it looks up key, and
652 649 looks for strings like this: {foo}. it expands {foo} by looking up
653 650 foo in map, and substituting it. expansion is recursive: it stops
654 651 when there is no more {foo} to replace.
655 652
656 653 expansion also allows formatting and filtering.
657 654
658 655 format uses key to expand each item in list. syntax is
659 656 {key%format}.
660 657
661 658 filter uses function to transform value. syntax is
662 659 {key|filter1|filter2|...}.'''
663 660
664 661 def __init__(self, loader, filters={}, defaults={}):
665 662 self._loader = loader
666 663 self._filters = filters
667 664 self._defaults = defaults
668 665 self._cache = {}
669 666
670 667 def _load(self, t):
671 668 '''load, parse, and cache a template'''
672 669 if t not in self._cache:
673 670 self._cache[t] = compiletemplate(self._loader(t), self)
674 671 return self._cache[t]
675 672
676 673 def process(self, t, mapping):
677 674 '''Perform expansion. t is name of map element to expand.
678 675 mapping contains added elements for use during expansion. Is a
679 676 generator.'''
680 677 return _flatten(runtemplate(self, mapping, self._load(t)))
681 678
682 679 engines = {'default': engine}
683 680
684 681 def stylelist():
685 682 paths = templatepaths()
686 683 if not paths:
687 684 return _('no templates found, try `hg debuginstall` for more info')
688 685 dirlist = os.listdir(paths[0])
689 686 stylelist = []
690 687 for file in dirlist:
691 688 split = file.split(".")
692 689 if split[0] == "map-cmdline":
693 690 stylelist.append(split[1])
694 691 return ", ".join(sorted(stylelist))
695 692
696 693 class TemplateNotFound(util.Abort):
697 694 pass
698 695
699 696 class templater(object):
700 697
701 698 def __init__(self, mapfile, filters={}, defaults={}, cache={},
702 699 minchunk=1024, maxchunk=65536):
703 700 '''set up template engine.
704 701 mapfile is name of file to read map definitions from.
705 702 filters is dict of functions. each transforms a value into another.
706 703 defaults is dict of default map definitions.'''
707 704 self.mapfile = mapfile or 'template'
708 705 self.cache = cache.copy()
709 706 self.map = {}
710 707 if mapfile:
711 708 self.base = os.path.dirname(mapfile)
712 709 else:
713 710 self.base = ''
714 711 self.filters = templatefilters.filters.copy()
715 712 self.filters.update(filters)
716 713 self.defaults = defaults
717 714 self.minchunk, self.maxchunk = minchunk, maxchunk
718 715 self.ecache = {}
719 716
720 717 if not mapfile:
721 718 return
722 719 if not os.path.exists(mapfile):
723 720 raise util.Abort(_("style '%s' not found") % mapfile,
724 721 hint=_("available styles: %s") % stylelist())
725 722
726 723 conf = config.config()
727 724 conf.read(mapfile)
728 725
729 726 for key, val in conf[''].items():
730 727 if not val:
731 728 raise SyntaxError(_('%s: missing value') % conf.source('', key))
732 729 if val[0] in "'\"":
733 730 try:
734 731 self.cache[key] = parsestring(val)
735 732 except SyntaxError, inst:
736 733 raise SyntaxError('%s: %s' %
737 734 (conf.source('', key), inst.args[0]))
738 735 else:
739 736 val = 'default', val
740 737 if ':' in val[1]:
741 738 val = val[1].split(':', 1)
742 739 self.map[key] = val[0], os.path.join(self.base, val[1])
743 740
744 741 def __contains__(self, key):
745 742 return key in self.cache or key in self.map
746 743
747 744 def load(self, t):
748 745 '''Get the template for the given template name. Use a local cache.'''
749 746 if t not in self.cache:
750 747 try:
751 748 self.cache[t] = util.readfile(self.map[t][1])
752 749 except KeyError, inst:
753 750 raise TemplateNotFound(_('"%s" not in template map') %
754 751 inst.args[0])
755 752 except IOError, inst:
756 753 raise IOError(inst.args[0], _('template file %s: %s') %
757 754 (self.map[t][1], inst.args[1]))
758 755 return self.cache[t]
759 756
760 757 def __call__(self, t, **mapping):
761 758 ttype = t in self.map and self.map[t][0] or 'default'
762 759 if ttype not in self.ecache:
763 760 self.ecache[ttype] = engines[ttype](self.load,
764 761 self.filters, self.defaults)
765 762 proc = self.ecache[ttype]
766 763
767 764 stream = proc.process(t, mapping)
768 765 if self.minchunk:
769 766 stream = util.increasingchunks(stream, min=self.minchunk,
770 767 max=self.maxchunk)
771 768 return stream
772 769
773 770 def templatepaths():
774 771 '''return locations used for template files.'''
775 772 pathsrel = ['templates']
776 773 paths = [os.path.normpath(os.path.join(util.datapath, f))
777 774 for f in pathsrel]
778 775 return [p for p in paths if os.path.isdir(p)]
779 776
780 777 def templatepath(name):
781 778 '''return location of template file. returns None if not found.'''
782 779 for p in templatepaths():
783 780 f = os.path.join(p, name)
784 781 if os.path.exists(f):
785 782 return f
786 783 return None
787 784
788 785 def stylemap(styles, paths=None):
789 786 """Return path to mapfile for a given style.
790 787
791 788 Searches mapfile in the following locations:
792 789 1. templatepath/style/map
793 790 2. templatepath/map-style
794 791 3. templatepath/map
795 792 """
796 793
797 794 if paths is None:
798 795 paths = templatepaths()
799 796 elif isinstance(paths, str):
800 797 paths = [paths]
801 798
802 799 if isinstance(styles, str):
803 800 styles = [styles]
804 801
805 802 for style in styles:
806 803 # only plain name is allowed to honor template paths
807 804 if (not style
808 805 or style in (os.curdir, os.pardir)
809 806 or os.sep in style
810 807 or os.altsep and os.altsep in style):
811 808 continue
812 809 locations = [os.path.join(style, 'map'), 'map-' + style]
813 810 locations.append('map')
814 811
815 812 for path in paths:
816 813 for location in locations:
817 814 mapfile = os.path.join(path, location)
818 815 if os.path.isfile(mapfile):
819 816 return style, mapfile
820 817
821 818 raise RuntimeError("No hgweb templates found in %r" % paths)
822 819
823 820 # tell hggettext to extract docstrings from these functions:
824 821 i18nfunctions = funcs.values()
General Comments 0
You need to be logged in to leave comments. Login now