##// END OF EJS Templates
replace set-like dictionaries with real sets...
Martin Geisler -
r8152:08e1baf9 default
parent child Browse files
Show More
@@ -1,417 +1,417 b''
1 # bugzilla.py - bugzilla integration for mercurial
1 # bugzilla.py - bugzilla integration for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''Bugzilla integration
8 '''Bugzilla integration
9
9
10 This hook extension adds comments on bugs in Bugzilla when changesets
10 This hook extension adds comments on bugs in Bugzilla when changesets
11 that refer to bugs by Bugzilla ID are seen. The hook does not change
11 that refer to bugs by Bugzilla ID are seen. The hook does not change
12 bug status.
12 bug status.
13
13
14 The hook updates the Bugzilla database directly. Only Bugzilla
14 The hook updates the Bugzilla database directly. Only Bugzilla
15 installations using MySQL are supported.
15 installations using MySQL are supported.
16
16
17 The hook relies on a Bugzilla script to send bug change notification
17 The hook relies on a Bugzilla script to send bug change notification
18 emails. That script changes between Bugzilla versions; the
18 emails. That script changes between Bugzilla versions; the
19 'processmail' script used prior to 2.18 is replaced in 2.18 and
19 'processmail' script used prior to 2.18 is replaced in 2.18 and
20 subsequent versions by 'config/sendbugmail.pl'. Note that these will
20 subsequent versions by 'config/sendbugmail.pl'. Note that these will
21 be run by Mercurial as the user pushing the change; you will need to
21 be run by Mercurial as the user pushing the change; you will need to
22 ensure the Bugzilla install file permissions are set appropriately.
22 ensure the Bugzilla install file permissions are set appropriately.
23
23
24 Configuring the extension:
24 Configuring the extension:
25
25
26 [bugzilla]
26 [bugzilla]
27
27
28 host Hostname of the MySQL server holding the Bugzilla
28 host Hostname of the MySQL server holding the Bugzilla
29 database.
29 database.
30 db Name of the Bugzilla database in MySQL. Default 'bugs'.
30 db Name of the Bugzilla database in MySQL. Default 'bugs'.
31 user Username to use to access MySQL server. Default 'bugs'.
31 user Username to use to access MySQL server. Default 'bugs'.
32 password Password to use to access MySQL server.
32 password Password to use to access MySQL server.
33 timeout Database connection timeout (seconds). Default 5.
33 timeout Database connection timeout (seconds). Default 5.
34 version Bugzilla version. Specify '3.0' for Bugzilla versions
34 version Bugzilla version. Specify '3.0' for Bugzilla versions
35 3.0 and later, '2.18' for Bugzilla versions from 2.18
35 3.0 and later, '2.18' for Bugzilla versions from 2.18
36 and '2.16' for versions prior to 2.18.
36 and '2.16' for versions prior to 2.18.
37 bzuser Fallback Bugzilla user name to record comments with, if
37 bzuser Fallback Bugzilla user name to record comments with, if
38 changeset committer cannot be found as a Bugzilla user.
38 changeset committer cannot be found as a Bugzilla user.
39 bzdir Bugzilla install directory. Used by default notify.
39 bzdir Bugzilla install directory. Used by default notify.
40 Default '/var/www/html/bugzilla'.
40 Default '/var/www/html/bugzilla'.
41 notify The command to run to get Bugzilla to send bug change
41 notify The command to run to get Bugzilla to send bug change
42 notification emails. Substitutes from a map with 3
42 notification emails. Substitutes from a map with 3
43 keys, 'bzdir', 'id' (bug id) and 'user' (committer
43 keys, 'bzdir', 'id' (bug id) and 'user' (committer
44 bugzilla email). Default depends on version; from 2.18
44 bugzilla email). Default depends on version; from 2.18
45 it is "cd %(bzdir)s && perl -T contrib/sendbugmail.pl
45 it is "cd %(bzdir)s && perl -T contrib/sendbugmail.pl
46 %(id)s %(user)s".
46 %(id)s %(user)s".
47 regexp Regular expression to match bug IDs in changeset commit
47 regexp Regular expression to match bug IDs in changeset commit
48 message. Must contain one "()" group. The default
48 message. Must contain one "()" group. The default
49 expression matches 'Bug 1234', 'Bug no. 1234', 'Bug
49 expression matches 'Bug 1234', 'Bug no. 1234', 'Bug
50 number 1234', 'Bugs 1234,5678', 'Bug 1234 and 5678' and
50 number 1234', 'Bugs 1234,5678', 'Bug 1234 and 5678' and
51 variations thereof. Matching is case insensitive.
51 variations thereof. Matching is case insensitive.
52 style The style file to use when formatting comments.
52 style The style file to use when formatting comments.
53 template Template to use when formatting comments. Overrides
53 template Template to use when formatting comments. Overrides
54 style if specified. In addition to the usual Mercurial
54 style if specified. In addition to the usual Mercurial
55 keywords, the extension specifies:
55 keywords, the extension specifies:
56 {bug} The Bugzilla bug ID.
56 {bug} The Bugzilla bug ID.
57 {root} The full pathname of the Mercurial
57 {root} The full pathname of the Mercurial
58 repository.
58 repository.
59 {webroot} Stripped pathname of the Mercurial
59 {webroot} Stripped pathname of the Mercurial
60 repository.
60 repository.
61 {hgweb} Base URL for browsing Mercurial
61 {hgweb} Base URL for browsing Mercurial
62 repositories.
62 repositories.
63 Default 'changeset {node|short} in repo {root} refers '
63 Default 'changeset {node|short} in repo {root} refers '
64 'to bug {bug}.\\ndetails:\\n\\t{desc|tabindent}'
64 'to bug {bug}.\\ndetails:\\n\\t{desc|tabindent}'
65 strip The number of slashes to strip from the front of {root}
65 strip The number of slashes to strip from the front of {root}
66 to produce {webroot}. Default 0.
66 to produce {webroot}. Default 0.
67 usermap Path of file containing Mercurial committer ID to
67 usermap Path of file containing Mercurial committer ID to
68 Bugzilla user ID mappings. If specified, the file
68 Bugzilla user ID mappings. If specified, the file
69 should contain one mapping per line,
69 should contain one mapping per line,
70 "committer"="Bugzilla user". See also the [usermap]
70 "committer"="Bugzilla user". See also the [usermap]
71 section.
71 section.
72
72
73 [usermap]
73 [usermap]
74 Any entries in this section specify mappings of Mercurial
74 Any entries in this section specify mappings of Mercurial
75 committer ID to Bugzilla user ID. See also [bugzilla].usermap.
75 committer ID to Bugzilla user ID. See also [bugzilla].usermap.
76 "committer"="Bugzilla user"
76 "committer"="Bugzilla user"
77
77
78 [web]
78 [web]
79 baseurl Base URL for browsing Mercurial repositories. Reference
79 baseurl Base URL for browsing Mercurial repositories. Reference
80 from templates as {hgweb}.
80 from templates as {hgweb}.
81
81
82 Activating the extension:
82 Activating the extension:
83
83
84 [extensions]
84 [extensions]
85 hgext.bugzilla =
85 hgext.bugzilla =
86
86
87 [hooks]
87 [hooks]
88 # run bugzilla hook on every change pulled or pushed in here
88 # run bugzilla hook on every change pulled or pushed in here
89 incoming.bugzilla = python:hgext.bugzilla.hook
89 incoming.bugzilla = python:hgext.bugzilla.hook
90
90
91 Example configuration:
91 Example configuration:
92
92
93 This example configuration is for a collection of Mercurial
93 This example configuration is for a collection of Mercurial
94 repositories in /var/local/hg/repos/ used with a local Bugzilla 3.2
94 repositories in /var/local/hg/repos/ used with a local Bugzilla 3.2
95 installation in /opt/bugzilla-3.2.
95 installation in /opt/bugzilla-3.2.
96
96
97 [bugzilla]
97 [bugzilla]
98 host=localhost
98 host=localhost
99 password=XYZZY
99 password=XYZZY
100 version=3.0
100 version=3.0
101 bzuser=unknown@domain.com
101 bzuser=unknown@domain.com
102 bzdir=/opt/bugzilla-3.2
102 bzdir=/opt/bugzilla-3.2
103 template=Changeset {node|short} in {root|basename}.\\n{hgweb}/{webroot}/rev/{node|short}\\n\\n{desc}\\n
103 template=Changeset {node|short} in {root|basename}.\\n{hgweb}/{webroot}/rev/{node|short}\\n\\n{desc}\\n
104 strip=5
104 strip=5
105
105
106 [web]
106 [web]
107 baseurl=http://dev.domain.com/hg
107 baseurl=http://dev.domain.com/hg
108
108
109 [usermap]
109 [usermap]
110 user@emaildomain.com=user.name@bugzilladomain.com
110 user@emaildomain.com=user.name@bugzilladomain.com
111
111
112 Commits add a comment to the Bugzilla bug record of the form:
112 Commits add a comment to the Bugzilla bug record of the form:
113
113
114 Changeset 3b16791d6642 in repository-name.
114 Changeset 3b16791d6642 in repository-name.
115 http://dev.domain.com/hg/repository-name/rev/3b16791d6642
115 http://dev.domain.com/hg/repository-name/rev/3b16791d6642
116
116
117 Changeset commit comment. Bug 1234.
117 Changeset commit comment. Bug 1234.
118 '''
118 '''
119
119
120 from mercurial.i18n import _
120 from mercurial.i18n import _
121 from mercurial.node import short
121 from mercurial.node import short
122 from mercurial import cmdutil, templater, util
122 from mercurial import cmdutil, templater, util
123 import re, time
123 import re, time
124
124
125 MySQLdb = None
125 MySQLdb = None
126
126
127 def buglist(ids):
127 def buglist(ids):
128 return '(' + ','.join(map(str, ids)) + ')'
128 return '(' + ','.join(map(str, ids)) + ')'
129
129
130 class bugzilla_2_16(object):
130 class bugzilla_2_16(object):
131 '''support for bugzilla version 2.16.'''
131 '''support for bugzilla version 2.16.'''
132
132
133 def __init__(self, ui):
133 def __init__(self, ui):
134 self.ui = ui
134 self.ui = ui
135 host = self.ui.config('bugzilla', 'host', 'localhost')
135 host = self.ui.config('bugzilla', 'host', 'localhost')
136 user = self.ui.config('bugzilla', 'user', 'bugs')
136 user = self.ui.config('bugzilla', 'user', 'bugs')
137 passwd = self.ui.config('bugzilla', 'password')
137 passwd = self.ui.config('bugzilla', 'password')
138 db = self.ui.config('bugzilla', 'db', 'bugs')
138 db = self.ui.config('bugzilla', 'db', 'bugs')
139 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
139 timeout = int(self.ui.config('bugzilla', 'timeout', 5))
140 usermap = self.ui.config('bugzilla', 'usermap')
140 usermap = self.ui.config('bugzilla', 'usermap')
141 if usermap:
141 if usermap:
142 self.ui.readconfig(usermap, 'usermap')
142 self.ui.readconfig(usermap, 'usermap')
143 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
143 self.ui.note(_('connecting to %s:%s as %s, password %s\n') %
144 (host, db, user, '*' * len(passwd)))
144 (host, db, user, '*' * len(passwd)))
145 self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd,
145 self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd,
146 db=db, connect_timeout=timeout)
146 db=db, connect_timeout=timeout)
147 self.cursor = self.conn.cursor()
147 self.cursor = self.conn.cursor()
148 self.longdesc_id = self.get_longdesc_id()
148 self.longdesc_id = self.get_longdesc_id()
149 self.user_ids = {}
149 self.user_ids = {}
150 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
150 self.default_notify = "cd %(bzdir)s && ./processmail %(id)s %(user)s"
151
151
152 def run(self, *args, **kwargs):
152 def run(self, *args, **kwargs):
153 '''run a query.'''
153 '''run a query.'''
154 self.ui.note(_('query: %s %s\n') % (args, kwargs))
154 self.ui.note(_('query: %s %s\n') % (args, kwargs))
155 try:
155 try:
156 self.cursor.execute(*args, **kwargs)
156 self.cursor.execute(*args, **kwargs)
157 except MySQLdb.MySQLError:
157 except MySQLdb.MySQLError:
158 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
158 self.ui.note(_('failed query: %s %s\n') % (args, kwargs))
159 raise
159 raise
160
160
161 def get_longdesc_id(self):
161 def get_longdesc_id(self):
162 '''get identity of longdesc field'''
162 '''get identity of longdesc field'''
163 self.run('select fieldid from fielddefs where name = "longdesc"')
163 self.run('select fieldid from fielddefs where name = "longdesc"')
164 ids = self.cursor.fetchall()
164 ids = self.cursor.fetchall()
165 if len(ids) != 1:
165 if len(ids) != 1:
166 raise util.Abort(_('unknown database schema'))
166 raise util.Abort(_('unknown database schema'))
167 return ids[0][0]
167 return ids[0][0]
168
168
169 def filter_real_bug_ids(self, ids):
169 def filter_real_bug_ids(self, ids):
170 '''filter not-existing bug ids from list.'''
170 '''filter not-existing bug ids from list.'''
171 self.run('select bug_id from bugs where bug_id in %s' % buglist(ids))
171 self.run('select bug_id from bugs where bug_id in %s' % buglist(ids))
172 return util.sort([c[0] for c in self.cursor.fetchall()])
172 return util.sort([c[0] for c in self.cursor.fetchall()])
173
173
174 def filter_unknown_bug_ids(self, node, ids):
174 def filter_unknown_bug_ids(self, node, ids):
175 '''filter bug ids from list that already refer to this changeset.'''
175 '''filter bug ids from list that already refer to this changeset.'''
176
176
177 self.run('''select bug_id from longdescs where
177 self.run('''select bug_id from longdescs where
178 bug_id in %s and thetext like "%%%s%%"''' %
178 bug_id in %s and thetext like "%%%s%%"''' %
179 (buglist(ids), short(node)))
179 (buglist(ids), short(node)))
180 unknown = dict.fromkeys(ids)
180 unknown = set(ids)
181 for (id,) in self.cursor.fetchall():
181 for (id,) in self.cursor.fetchall():
182 self.ui.status(_('bug %d already knows about changeset %s\n') %
182 self.ui.status(_('bug %d already knows about changeset %s\n') %
183 (id, short(node)))
183 (id, short(node)))
184 unknown.pop(id, None)
184 unknown.discard(id)
185 return util.sort(unknown.keys())
185 return util.sort(unknown)
186
186
187 def notify(self, ids, committer):
187 def notify(self, ids, committer):
188 '''tell bugzilla to send mail.'''
188 '''tell bugzilla to send mail.'''
189
189
190 self.ui.status(_('telling bugzilla to send mail:\n'))
190 self.ui.status(_('telling bugzilla to send mail:\n'))
191 (user, userid) = self.get_bugzilla_user(committer)
191 (user, userid) = self.get_bugzilla_user(committer)
192 for id in ids:
192 for id in ids:
193 self.ui.status(_(' bug %s\n') % id)
193 self.ui.status(_(' bug %s\n') % id)
194 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
194 cmdfmt = self.ui.config('bugzilla', 'notify', self.default_notify)
195 bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
195 bzdir = self.ui.config('bugzilla', 'bzdir', '/var/www/html/bugzilla')
196 try:
196 try:
197 # Backwards-compatible with old notify string, which
197 # Backwards-compatible with old notify string, which
198 # took one string. This will throw with a new format
198 # took one string. This will throw with a new format
199 # string.
199 # string.
200 cmd = cmdfmt % id
200 cmd = cmdfmt % id
201 except TypeError:
201 except TypeError:
202 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
202 cmd = cmdfmt % {'bzdir': bzdir, 'id': id, 'user': user}
203 self.ui.note(_('running notify command %s\n') % cmd)
203 self.ui.note(_('running notify command %s\n') % cmd)
204 fp = util.popen('(%s) 2>&1' % cmd)
204 fp = util.popen('(%s) 2>&1' % cmd)
205 out = fp.read()
205 out = fp.read()
206 ret = fp.close()
206 ret = fp.close()
207 if ret:
207 if ret:
208 self.ui.warn(out)
208 self.ui.warn(out)
209 raise util.Abort(_('bugzilla notify command %s') %
209 raise util.Abort(_('bugzilla notify command %s') %
210 util.explain_exit(ret)[0])
210 util.explain_exit(ret)[0])
211 self.ui.status(_('done\n'))
211 self.ui.status(_('done\n'))
212
212
213 def get_user_id(self, user):
213 def get_user_id(self, user):
214 '''look up numeric bugzilla user id.'''
214 '''look up numeric bugzilla user id.'''
215 try:
215 try:
216 return self.user_ids[user]
216 return self.user_ids[user]
217 except KeyError:
217 except KeyError:
218 try:
218 try:
219 userid = int(user)
219 userid = int(user)
220 except ValueError:
220 except ValueError:
221 self.ui.note(_('looking up user %s\n') % user)
221 self.ui.note(_('looking up user %s\n') % user)
222 self.run('''select userid from profiles
222 self.run('''select userid from profiles
223 where login_name like %s''', user)
223 where login_name like %s''', user)
224 all = self.cursor.fetchall()
224 all = self.cursor.fetchall()
225 if len(all) != 1:
225 if len(all) != 1:
226 raise KeyError(user)
226 raise KeyError(user)
227 userid = int(all[0][0])
227 userid = int(all[0][0])
228 self.user_ids[user] = userid
228 self.user_ids[user] = userid
229 return userid
229 return userid
230
230
231 def map_committer(self, user):
231 def map_committer(self, user):
232 '''map name of committer to bugzilla user name.'''
232 '''map name of committer to bugzilla user name.'''
233 for committer, bzuser in self.ui.configitems('usermap'):
233 for committer, bzuser in self.ui.configitems('usermap'):
234 if committer.lower() == user.lower():
234 if committer.lower() == user.lower():
235 return bzuser
235 return bzuser
236 return user
236 return user
237
237
238 def get_bugzilla_user(self, committer):
238 def get_bugzilla_user(self, committer):
239 '''see if committer is a registered bugzilla user. Return
239 '''see if committer is a registered bugzilla user. Return
240 bugzilla username and userid if so. If not, return default
240 bugzilla username and userid if so. If not, return default
241 bugzilla username and userid.'''
241 bugzilla username and userid.'''
242 user = self.map_committer(committer)
242 user = self.map_committer(committer)
243 try:
243 try:
244 userid = self.get_user_id(user)
244 userid = self.get_user_id(user)
245 except KeyError:
245 except KeyError:
246 try:
246 try:
247 defaultuser = self.ui.config('bugzilla', 'bzuser')
247 defaultuser = self.ui.config('bugzilla', 'bzuser')
248 if not defaultuser:
248 if not defaultuser:
249 raise util.Abort(_('cannot find bugzilla user id for %s') %
249 raise util.Abort(_('cannot find bugzilla user id for %s') %
250 user)
250 user)
251 userid = self.get_user_id(defaultuser)
251 userid = self.get_user_id(defaultuser)
252 user = defaultuser
252 user = defaultuser
253 except KeyError:
253 except KeyError:
254 raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
254 raise util.Abort(_('cannot find bugzilla user id for %s or %s') %
255 (user, defaultuser))
255 (user, defaultuser))
256 return (user, userid)
256 return (user, userid)
257
257
258 def add_comment(self, bugid, text, committer):
258 def add_comment(self, bugid, text, committer):
259 '''add comment to bug. try adding comment as committer of
259 '''add comment to bug. try adding comment as committer of
260 changeset, otherwise as default bugzilla user.'''
260 changeset, otherwise as default bugzilla user.'''
261 (user, userid) = self.get_bugzilla_user(committer)
261 (user, userid) = self.get_bugzilla_user(committer)
262 now = time.strftime('%Y-%m-%d %H:%M:%S')
262 now = time.strftime('%Y-%m-%d %H:%M:%S')
263 self.run('''insert into longdescs
263 self.run('''insert into longdescs
264 (bug_id, who, bug_when, thetext)
264 (bug_id, who, bug_when, thetext)
265 values (%s, %s, %s, %s)''',
265 values (%s, %s, %s, %s)''',
266 (bugid, userid, now, text))
266 (bugid, userid, now, text))
267 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
267 self.run('''insert into bugs_activity (bug_id, who, bug_when, fieldid)
268 values (%s, %s, %s, %s)''',
268 values (%s, %s, %s, %s)''',
269 (bugid, userid, now, self.longdesc_id))
269 (bugid, userid, now, self.longdesc_id))
270 self.conn.commit()
270 self.conn.commit()
271
271
272 class bugzilla_2_18(bugzilla_2_16):
272 class bugzilla_2_18(bugzilla_2_16):
273 '''support for bugzilla 2.18 series.'''
273 '''support for bugzilla 2.18 series.'''
274
274
275 def __init__(self, ui):
275 def __init__(self, ui):
276 bugzilla_2_16.__init__(self, ui)
276 bugzilla_2_16.__init__(self, ui)
277 self.default_notify = "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
277 self.default_notify = "cd %(bzdir)s && perl -T contrib/sendbugmail.pl %(id)s %(user)s"
278
278
279 class bugzilla_3_0(bugzilla_2_18):
279 class bugzilla_3_0(bugzilla_2_18):
280 '''support for bugzilla 3.0 series.'''
280 '''support for bugzilla 3.0 series.'''
281
281
282 def __init__(self, ui):
282 def __init__(self, ui):
283 bugzilla_2_18.__init__(self, ui)
283 bugzilla_2_18.__init__(self, ui)
284
284
285 def get_longdesc_id(self):
285 def get_longdesc_id(self):
286 '''get identity of longdesc field'''
286 '''get identity of longdesc field'''
287 self.run('select id from fielddefs where name = "longdesc"')
287 self.run('select id from fielddefs where name = "longdesc"')
288 ids = self.cursor.fetchall()
288 ids = self.cursor.fetchall()
289 if len(ids) != 1:
289 if len(ids) != 1:
290 raise util.Abort(_('unknown database schema'))
290 raise util.Abort(_('unknown database schema'))
291 return ids[0][0]
291 return ids[0][0]
292
292
293 class bugzilla(object):
293 class bugzilla(object):
294 # supported versions of bugzilla. different versions have
294 # supported versions of bugzilla. different versions have
295 # different schemas.
295 # different schemas.
296 _versions = {
296 _versions = {
297 '2.16': bugzilla_2_16,
297 '2.16': bugzilla_2_16,
298 '2.18': bugzilla_2_18,
298 '2.18': bugzilla_2_18,
299 '3.0': bugzilla_3_0
299 '3.0': bugzilla_3_0
300 }
300 }
301
301
302 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
302 _default_bug_re = (r'bugs?\s*,?\s*(?:#|nos?\.?|num(?:ber)?s?)?\s*'
303 r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
303 r'((?:\d+\s*(?:,?\s*(?:and)?)?\s*)+)')
304
304
305 _bz = None
305 _bz = None
306
306
307 def __init__(self, ui, repo):
307 def __init__(self, ui, repo):
308 self.ui = ui
308 self.ui = ui
309 self.repo = repo
309 self.repo = repo
310
310
311 def bz(self):
311 def bz(self):
312 '''return object that knows how to talk to bugzilla version in
312 '''return object that knows how to talk to bugzilla version in
313 use.'''
313 use.'''
314
314
315 if bugzilla._bz is None:
315 if bugzilla._bz is None:
316 bzversion = self.ui.config('bugzilla', 'version')
316 bzversion = self.ui.config('bugzilla', 'version')
317 try:
317 try:
318 bzclass = bugzilla._versions[bzversion]
318 bzclass = bugzilla._versions[bzversion]
319 except KeyError:
319 except KeyError:
320 raise util.Abort(_('bugzilla version %s not supported') %
320 raise util.Abort(_('bugzilla version %s not supported') %
321 bzversion)
321 bzversion)
322 bugzilla._bz = bzclass(self.ui)
322 bugzilla._bz = bzclass(self.ui)
323 return bugzilla._bz
323 return bugzilla._bz
324
324
325 def __getattr__(self, key):
325 def __getattr__(self, key):
326 return getattr(self.bz(), key)
326 return getattr(self.bz(), key)
327
327
328 _bug_re = None
328 _bug_re = None
329 _split_re = None
329 _split_re = None
330
330
331 def find_bug_ids(self, ctx):
331 def find_bug_ids(self, ctx):
332 '''find valid bug ids that are referred to in changeset
332 '''find valid bug ids that are referred to in changeset
333 comments and that do not already have references to this
333 comments and that do not already have references to this
334 changeset.'''
334 changeset.'''
335
335
336 if bugzilla._bug_re is None:
336 if bugzilla._bug_re is None:
337 bugzilla._bug_re = re.compile(
337 bugzilla._bug_re = re.compile(
338 self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
338 self.ui.config('bugzilla', 'regexp', bugzilla._default_bug_re),
339 re.IGNORECASE)
339 re.IGNORECASE)
340 bugzilla._split_re = re.compile(r'\D+')
340 bugzilla._split_re = re.compile(r'\D+')
341 start = 0
341 start = 0
342 ids = {}
342 ids = {}
343 while True:
343 while True:
344 m = bugzilla._bug_re.search(ctx.description(), start)
344 m = bugzilla._bug_re.search(ctx.description(), start)
345 if not m:
345 if not m:
346 break
346 break
347 start = m.end()
347 start = m.end()
348 for id in bugzilla._split_re.split(m.group(1)):
348 for id in bugzilla._split_re.split(m.group(1)):
349 if not id: continue
349 if not id: continue
350 ids[int(id)] = 1
350 ids[int(id)] = 1
351 ids = ids.keys()
351 ids = ids.keys()
352 if ids:
352 if ids:
353 ids = self.filter_real_bug_ids(ids)
353 ids = self.filter_real_bug_ids(ids)
354 if ids:
354 if ids:
355 ids = self.filter_unknown_bug_ids(ctx.node(), ids)
355 ids = self.filter_unknown_bug_ids(ctx.node(), ids)
356 return ids
356 return ids
357
357
358 def update(self, bugid, ctx):
358 def update(self, bugid, ctx):
359 '''update bugzilla bug with reference to changeset.'''
359 '''update bugzilla bug with reference to changeset.'''
360
360
361 def webroot(root):
361 def webroot(root):
362 '''strip leading prefix of repo root and turn into
362 '''strip leading prefix of repo root and turn into
363 url-safe path.'''
363 url-safe path.'''
364 count = int(self.ui.config('bugzilla', 'strip', 0))
364 count = int(self.ui.config('bugzilla', 'strip', 0))
365 root = util.pconvert(root)
365 root = util.pconvert(root)
366 while count > 0:
366 while count > 0:
367 c = root.find('/')
367 c = root.find('/')
368 if c == -1:
368 if c == -1:
369 break
369 break
370 root = root[c+1:]
370 root = root[c+1:]
371 count -= 1
371 count -= 1
372 return root
372 return root
373
373
374 mapfile = self.ui.config('bugzilla', 'style')
374 mapfile = self.ui.config('bugzilla', 'style')
375 tmpl = self.ui.config('bugzilla', 'template')
375 tmpl = self.ui.config('bugzilla', 'template')
376 t = cmdutil.changeset_templater(self.ui, self.repo,
376 t = cmdutil.changeset_templater(self.ui, self.repo,
377 False, None, mapfile, False)
377 False, None, mapfile, False)
378 if not mapfile and not tmpl:
378 if not mapfile and not tmpl:
379 tmpl = _('changeset {node|short} in repo {root} refers '
379 tmpl = _('changeset {node|short} in repo {root} refers '
380 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
380 'to bug {bug}.\ndetails:\n\t{desc|tabindent}')
381 if tmpl:
381 if tmpl:
382 tmpl = templater.parsestring(tmpl, quoted=False)
382 tmpl = templater.parsestring(tmpl, quoted=False)
383 t.use_template(tmpl)
383 t.use_template(tmpl)
384 self.ui.pushbuffer()
384 self.ui.pushbuffer()
385 t.show(ctx, changes=ctx.changeset(),
385 t.show(ctx, changes=ctx.changeset(),
386 bug=str(bugid),
386 bug=str(bugid),
387 hgweb=self.ui.config('web', 'baseurl'),
387 hgweb=self.ui.config('web', 'baseurl'),
388 root=self.repo.root,
388 root=self.repo.root,
389 webroot=webroot(self.repo.root))
389 webroot=webroot(self.repo.root))
390 data = self.ui.popbuffer()
390 data = self.ui.popbuffer()
391 self.add_comment(bugid, data, util.email(ctx.user()))
391 self.add_comment(bugid, data, util.email(ctx.user()))
392
392
393 def hook(ui, repo, hooktype, node=None, **kwargs):
393 def hook(ui, repo, hooktype, node=None, **kwargs):
394 '''add comment to bugzilla for each changeset that refers to a
394 '''add comment to bugzilla for each changeset that refers to a
395 bugzilla bug id. only add a comment once per bug, so same change
395 bugzilla bug id. only add a comment once per bug, so same change
396 seen multiple times does not fill bug with duplicate data.'''
396 seen multiple times does not fill bug with duplicate data.'''
397 try:
397 try:
398 import MySQLdb as mysql
398 import MySQLdb as mysql
399 global MySQLdb
399 global MySQLdb
400 MySQLdb = mysql
400 MySQLdb = mysql
401 except ImportError, err:
401 except ImportError, err:
402 raise util.Abort(_('python mysql support not available: %s') % err)
402 raise util.Abort(_('python mysql support not available: %s') % err)
403
403
404 if node is None:
404 if node is None:
405 raise util.Abort(_('hook type %s does not pass a changeset id') %
405 raise util.Abort(_('hook type %s does not pass a changeset id') %
406 hooktype)
406 hooktype)
407 try:
407 try:
408 bz = bugzilla(ui, repo)
408 bz = bugzilla(ui, repo)
409 ctx = repo[node]
409 ctx = repo[node]
410 ids = bz.find_bug_ids(ctx)
410 ids = bz.find_bug_ids(ctx)
411 if ids:
411 if ids:
412 for id in ids:
412 for id in ids:
413 bz.update(id, ctx)
413 bz.update(id, ctx)
414 bz.notify(ids, util.email(ctx.user()))
414 bz.notify(ids, util.email(ctx.user()))
415 except MySQLdb.MySQLError, err:
415 except MySQLdb.MySQLError, err:
416 raise util.Abort(_('database error: %s') % err[1])
416 raise util.Abort(_('database error: %s') % err[1])
417
417
@@ -1,2613 +1,2611 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 '''patch management and development
8 '''patch management and development
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use "hg help command" for more details):
17 Common tasks (use "hg help command" for more details):
18
18
19 prepare repository to work with patches qinit
19 prepare repository to work with patches qinit
20 create new patch qnew
20 create new patch qnew
21 import existing patch qimport
21 import existing patch qimport
22
22
23 print patch series qseries
23 print patch series qseries
24 print applied patches qapplied
24 print applied patches qapplied
25 print name of top applied patch qtop
25 print name of top applied patch qtop
26
26
27 add known patch to applied stack qpush
27 add known patch to applied stack qpush
28 remove patch from applied stack qpop
28 remove patch from applied stack qpop
29 refresh contents of top applied patch qrefresh
29 refresh contents of top applied patch qrefresh
30 '''
30 '''
31
31
32 from mercurial.i18n import _
32 from mercurial.i18n import _
33 from mercurial.node import bin, hex, short, nullid, nullrev
33 from mercurial.node import bin, hex, short, nullid, nullrev
34 from mercurial.lock import release
34 from mercurial.lock import release
35 from mercurial import commands, cmdutil, hg, patch, util
35 from mercurial import commands, cmdutil, hg, patch, util
36 from mercurial import repair, extensions, url, error
36 from mercurial import repair, extensions, url, error
37 import os, sys, re, errno
37 import os, sys, re, errno
38
38
39 commands.norepo += " qclone"
39 commands.norepo += " qclone"
40
40
41 # Patch names looks like unix-file names.
41 # Patch names looks like unix-file names.
42 # They must be joinable with queue directory and result in the patch path.
42 # They must be joinable with queue directory and result in the patch path.
43 normname = util.normpath
43 normname = util.normpath
44
44
45 class statusentry:
45 class statusentry:
46 def __init__(self, rev, name=None):
46 def __init__(self, rev, name=None):
47 if not name:
47 if not name:
48 fields = rev.split(':', 1)
48 fields = rev.split(':', 1)
49 if len(fields) == 2:
49 if len(fields) == 2:
50 self.rev, self.name = fields
50 self.rev, self.name = fields
51 else:
51 else:
52 self.rev, self.name = None, None
52 self.rev, self.name = None, None
53 else:
53 else:
54 self.rev, self.name = rev, name
54 self.rev, self.name = rev, name
55
55
56 def __str__(self):
56 def __str__(self):
57 return self.rev + ':' + self.name
57 return self.rev + ':' + self.name
58
58
59 class patchheader(object):
59 class patchheader(object):
60 def __init__(self, message, comments, user, date, haspatch):
60 def __init__(self, message, comments, user, date, haspatch):
61 self.message = message
61 self.message = message
62 self.comments = comments
62 self.comments = comments
63 self.user = user
63 self.user = user
64 self.date = date
64 self.date = date
65 self.haspatch = haspatch
65 self.haspatch = haspatch
66
66
67 def setuser(self, user):
67 def setuser(self, user):
68 if not self.setheader(['From: ', '# User '], user):
68 if not self.setheader(['From: ', '# User '], user):
69 try:
69 try:
70 patchheaderat = self.comments.index('# HG changeset patch')
70 patchheaderat = self.comments.index('# HG changeset patch')
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
71 self.comments.insert(patchheaderat + 1,'# User ' + user)
72 except ValueError:
72 except ValueError:
73 self.comments = ['From: ' + user, ''] + self.comments
73 self.comments = ['From: ' + user, ''] + self.comments
74 self.user = user
74 self.user = user
75
75
76 def setdate(self, date):
76 def setdate(self, date):
77 if self.setheader(['# Date '], date):
77 if self.setheader(['# Date '], date):
78 self.date = date
78 self.date = date
79
79
80 def setmessage(self, message):
80 def setmessage(self, message):
81 if self.comments:
81 if self.comments:
82 self._delmsg()
82 self._delmsg()
83 self.message = [message]
83 self.message = [message]
84 self.comments += self.message
84 self.comments += self.message
85
85
86 def setheader(self, prefixes, new):
86 def setheader(self, prefixes, new):
87 '''Update all references to a field in the patch header.
87 '''Update all references to a field in the patch header.
88 If none found, add it email style.'''
88 If none found, add it email style.'''
89 res = False
89 res = False
90 for prefix in prefixes:
90 for prefix in prefixes:
91 for i in xrange(len(self.comments)):
91 for i in xrange(len(self.comments)):
92 if self.comments[i].startswith(prefix):
92 if self.comments[i].startswith(prefix):
93 self.comments[i] = prefix + new
93 self.comments[i] = prefix + new
94 res = True
94 res = True
95 break
95 break
96 return res
96 return res
97
97
98 def __str__(self):
98 def __str__(self):
99 if not self.comments:
99 if not self.comments:
100 return ''
100 return ''
101 return '\n'.join(self.comments) + '\n\n'
101 return '\n'.join(self.comments) + '\n\n'
102
102
103 def _delmsg(self):
103 def _delmsg(self):
104 '''Remove existing message, keeping the rest of the comments fields.
104 '''Remove existing message, keeping the rest of the comments fields.
105 If comments contains 'subject: ', message will prepend
105 If comments contains 'subject: ', message will prepend
106 the field and a blank line.'''
106 the field and a blank line.'''
107 if self.message:
107 if self.message:
108 subj = 'subject: ' + self.message[0].lower()
108 subj = 'subject: ' + self.message[0].lower()
109 for i in xrange(len(self.comments)):
109 for i in xrange(len(self.comments)):
110 if subj == self.comments[i].lower():
110 if subj == self.comments[i].lower():
111 del self.comments[i]
111 del self.comments[i]
112 self.message = self.message[2:]
112 self.message = self.message[2:]
113 break
113 break
114 ci = 0
114 ci = 0
115 for mi in xrange(len(self.message)):
115 for mi in xrange(len(self.message)):
116 while self.message[mi] != self.comments[ci]:
116 while self.message[mi] != self.comments[ci]:
117 ci += 1
117 ci += 1
118 del self.comments[ci]
118 del self.comments[ci]
119
119
120 class queue:
120 class queue:
121 def __init__(self, ui, path, patchdir=None):
121 def __init__(self, ui, path, patchdir=None):
122 self.basepath = path
122 self.basepath = path
123 self.path = patchdir or os.path.join(path, "patches")
123 self.path = patchdir or os.path.join(path, "patches")
124 self.opener = util.opener(self.path)
124 self.opener = util.opener(self.path)
125 self.ui = ui
125 self.ui = ui
126 self.applied = []
126 self.applied = []
127 self.full_series = []
127 self.full_series = []
128 self.applied_dirty = 0
128 self.applied_dirty = 0
129 self.series_dirty = 0
129 self.series_dirty = 0
130 self.series_path = "series"
130 self.series_path = "series"
131 self.status_path = "status"
131 self.status_path = "status"
132 self.guards_path = "guards"
132 self.guards_path = "guards"
133 self.active_guards = None
133 self.active_guards = None
134 self.guards_dirty = False
134 self.guards_dirty = False
135 self._diffopts = None
135 self._diffopts = None
136
136
137 if os.path.exists(self.join(self.series_path)):
137 if os.path.exists(self.join(self.series_path)):
138 self.full_series = self.opener(self.series_path).read().splitlines()
138 self.full_series = self.opener(self.series_path).read().splitlines()
139 self.parse_series()
139 self.parse_series()
140
140
141 if os.path.exists(self.join(self.status_path)):
141 if os.path.exists(self.join(self.status_path)):
142 lines = self.opener(self.status_path).read().splitlines()
142 lines = self.opener(self.status_path).read().splitlines()
143 self.applied = [statusentry(l) for l in lines]
143 self.applied = [statusentry(l) for l in lines]
144
144
145 def diffopts(self):
145 def diffopts(self):
146 if self._diffopts is None:
146 if self._diffopts is None:
147 self._diffopts = patch.diffopts(self.ui)
147 self._diffopts = patch.diffopts(self.ui)
148 return self._diffopts
148 return self._diffopts
149
149
150 def join(self, *p):
150 def join(self, *p):
151 return os.path.join(self.path, *p)
151 return os.path.join(self.path, *p)
152
152
153 def find_series(self, patch):
153 def find_series(self, patch):
154 pre = re.compile("(\s*)([^#]+)")
154 pre = re.compile("(\s*)([^#]+)")
155 index = 0
155 index = 0
156 for l in self.full_series:
156 for l in self.full_series:
157 m = pre.match(l)
157 m = pre.match(l)
158 if m:
158 if m:
159 s = m.group(2)
159 s = m.group(2)
160 s = s.rstrip()
160 s = s.rstrip()
161 if s == patch:
161 if s == patch:
162 return index
162 return index
163 index += 1
163 index += 1
164 return None
164 return None
165
165
166 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
166 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
167
167
168 def parse_series(self):
168 def parse_series(self):
169 self.series = []
169 self.series = []
170 self.series_guards = []
170 self.series_guards = []
171 for l in self.full_series:
171 for l in self.full_series:
172 h = l.find('#')
172 h = l.find('#')
173 if h == -1:
173 if h == -1:
174 patch = l
174 patch = l
175 comment = ''
175 comment = ''
176 elif h == 0:
176 elif h == 0:
177 continue
177 continue
178 else:
178 else:
179 patch = l[:h]
179 patch = l[:h]
180 comment = l[h:]
180 comment = l[h:]
181 patch = patch.strip()
181 patch = patch.strip()
182 if patch:
182 if patch:
183 if patch in self.series:
183 if patch in self.series:
184 raise util.Abort(_('%s appears more than once in %s') %
184 raise util.Abort(_('%s appears more than once in %s') %
185 (patch, self.join(self.series_path)))
185 (patch, self.join(self.series_path)))
186 self.series.append(patch)
186 self.series.append(patch)
187 self.series_guards.append(self.guard_re.findall(comment))
187 self.series_guards.append(self.guard_re.findall(comment))
188
188
189 def check_guard(self, guard):
189 def check_guard(self, guard):
190 if not guard:
190 if not guard:
191 return _('guard cannot be an empty string')
191 return _('guard cannot be an empty string')
192 bad_chars = '# \t\r\n\f'
192 bad_chars = '# \t\r\n\f'
193 first = guard[0]
193 first = guard[0]
194 for c in '-+':
194 for c in '-+':
195 if first == c:
195 if first == c:
196 return (_('guard %r starts with invalid character: %r') %
196 return (_('guard %r starts with invalid character: %r') %
197 (guard, c))
197 (guard, c))
198 for c in bad_chars:
198 for c in bad_chars:
199 if c in guard:
199 if c in guard:
200 return _('invalid character in guard %r: %r') % (guard, c)
200 return _('invalid character in guard %r: %r') % (guard, c)
201
201
202 def set_active(self, guards):
202 def set_active(self, guards):
203 for guard in guards:
203 for guard in guards:
204 bad = self.check_guard(guard)
204 bad = self.check_guard(guard)
205 if bad:
205 if bad:
206 raise util.Abort(bad)
206 raise util.Abort(bad)
207 guards = util.sort(set(guards))
207 guards = util.sort(set(guards))
208 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
208 self.ui.debug(_('active guards: %s\n') % ' '.join(guards))
209 self.active_guards = guards
209 self.active_guards = guards
210 self.guards_dirty = True
210 self.guards_dirty = True
211
211
212 def active(self):
212 def active(self):
213 if self.active_guards is None:
213 if self.active_guards is None:
214 self.active_guards = []
214 self.active_guards = []
215 try:
215 try:
216 guards = self.opener(self.guards_path).read().split()
216 guards = self.opener(self.guards_path).read().split()
217 except IOError, err:
217 except IOError, err:
218 if err.errno != errno.ENOENT: raise
218 if err.errno != errno.ENOENT: raise
219 guards = []
219 guards = []
220 for i, guard in enumerate(guards):
220 for i, guard in enumerate(guards):
221 bad = self.check_guard(guard)
221 bad = self.check_guard(guard)
222 if bad:
222 if bad:
223 self.ui.warn('%s:%d: %s\n' %
223 self.ui.warn('%s:%d: %s\n' %
224 (self.join(self.guards_path), i + 1, bad))
224 (self.join(self.guards_path), i + 1, bad))
225 else:
225 else:
226 self.active_guards.append(guard)
226 self.active_guards.append(guard)
227 return self.active_guards
227 return self.active_guards
228
228
229 def set_guards(self, idx, guards):
229 def set_guards(self, idx, guards):
230 for g in guards:
230 for g in guards:
231 if len(g) < 2:
231 if len(g) < 2:
232 raise util.Abort(_('guard %r too short') % g)
232 raise util.Abort(_('guard %r too short') % g)
233 if g[0] not in '-+':
233 if g[0] not in '-+':
234 raise util.Abort(_('guard %r starts with invalid char') % g)
234 raise util.Abort(_('guard %r starts with invalid char') % g)
235 bad = self.check_guard(g[1:])
235 bad = self.check_guard(g[1:])
236 if bad:
236 if bad:
237 raise util.Abort(bad)
237 raise util.Abort(bad)
238 drop = self.guard_re.sub('', self.full_series[idx])
238 drop = self.guard_re.sub('', self.full_series[idx])
239 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
239 self.full_series[idx] = drop + ''.join([' #' + g for g in guards])
240 self.parse_series()
240 self.parse_series()
241 self.series_dirty = True
241 self.series_dirty = True
242
242
243 def pushable(self, idx):
243 def pushable(self, idx):
244 if isinstance(idx, str):
244 if isinstance(idx, str):
245 idx = self.series.index(idx)
245 idx = self.series.index(idx)
246 patchguards = self.series_guards[idx]
246 patchguards = self.series_guards[idx]
247 if not patchguards:
247 if not patchguards:
248 return True, None
248 return True, None
249 guards = self.active()
249 guards = self.active()
250 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
250 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
251 if exactneg:
251 if exactneg:
252 return False, exactneg[0]
252 return False, exactneg[0]
253 pos = [g for g in patchguards if g[0] == '+']
253 pos = [g for g in patchguards if g[0] == '+']
254 exactpos = [g for g in pos if g[1:] in guards]
254 exactpos = [g for g in pos if g[1:] in guards]
255 if pos:
255 if pos:
256 if exactpos:
256 if exactpos:
257 return True, exactpos[0]
257 return True, exactpos[0]
258 return False, pos
258 return False, pos
259 return True, ''
259 return True, ''
260
260
261 def explain_pushable(self, idx, all_patches=False):
261 def explain_pushable(self, idx, all_patches=False):
262 write = all_patches and self.ui.write or self.ui.warn
262 write = all_patches and self.ui.write or self.ui.warn
263 if all_patches or self.ui.verbose:
263 if all_patches or self.ui.verbose:
264 if isinstance(idx, str):
264 if isinstance(idx, str):
265 idx = self.series.index(idx)
265 idx = self.series.index(idx)
266 pushable, why = self.pushable(idx)
266 pushable, why = self.pushable(idx)
267 if all_patches and pushable:
267 if all_patches and pushable:
268 if why is None:
268 if why is None:
269 write(_('allowing %s - no guards in effect\n') %
269 write(_('allowing %s - no guards in effect\n') %
270 self.series[idx])
270 self.series[idx])
271 else:
271 else:
272 if not why:
272 if not why:
273 write(_('allowing %s - no matching negative guards\n') %
273 write(_('allowing %s - no matching negative guards\n') %
274 self.series[idx])
274 self.series[idx])
275 else:
275 else:
276 write(_('allowing %s - guarded by %r\n') %
276 write(_('allowing %s - guarded by %r\n') %
277 (self.series[idx], why))
277 (self.series[idx], why))
278 if not pushable:
278 if not pushable:
279 if why:
279 if why:
280 write(_('skipping %s - guarded by %r\n') %
280 write(_('skipping %s - guarded by %r\n') %
281 (self.series[idx], why))
281 (self.series[idx], why))
282 else:
282 else:
283 write(_('skipping %s - no matching guards\n') %
283 write(_('skipping %s - no matching guards\n') %
284 self.series[idx])
284 self.series[idx])
285
285
286 def save_dirty(self):
286 def save_dirty(self):
287 def write_list(items, path):
287 def write_list(items, path):
288 fp = self.opener(path, 'w')
288 fp = self.opener(path, 'w')
289 for i in items:
289 for i in items:
290 fp.write("%s\n" % i)
290 fp.write("%s\n" % i)
291 fp.close()
291 fp.close()
292 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
292 if self.applied_dirty: write_list(map(str, self.applied), self.status_path)
293 if self.series_dirty: write_list(self.full_series, self.series_path)
293 if self.series_dirty: write_list(self.full_series, self.series_path)
294 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
294 if self.guards_dirty: write_list(self.active_guards, self.guards_path)
295
295
296 def readheaders(self, patch):
296 def readheaders(self, patch):
297 def eatdiff(lines):
297 def eatdiff(lines):
298 while lines:
298 while lines:
299 l = lines[-1]
299 l = lines[-1]
300 if (l.startswith("diff -") or
300 if (l.startswith("diff -") or
301 l.startswith("Index:") or
301 l.startswith("Index:") or
302 l.startswith("===========")):
302 l.startswith("===========")):
303 del lines[-1]
303 del lines[-1]
304 else:
304 else:
305 break
305 break
306 def eatempty(lines):
306 def eatempty(lines):
307 while lines:
307 while lines:
308 l = lines[-1]
308 l = lines[-1]
309 if re.match('\s*$', l):
309 if re.match('\s*$', l):
310 del lines[-1]
310 del lines[-1]
311 else:
311 else:
312 break
312 break
313
313
314 pf = self.join(patch)
314 pf = self.join(patch)
315 message = []
315 message = []
316 comments = []
316 comments = []
317 user = None
317 user = None
318 date = None
318 date = None
319 format = None
319 format = None
320 subject = None
320 subject = None
321 diffstart = 0
321 diffstart = 0
322
322
323 for line in file(pf):
323 for line in file(pf):
324 line = line.rstrip()
324 line = line.rstrip()
325 if line.startswith('diff --git'):
325 if line.startswith('diff --git'):
326 diffstart = 2
326 diffstart = 2
327 break
327 break
328 if diffstart:
328 if diffstart:
329 if line.startswith('+++ '):
329 if line.startswith('+++ '):
330 diffstart = 2
330 diffstart = 2
331 break
331 break
332 if line.startswith("--- "):
332 if line.startswith("--- "):
333 diffstart = 1
333 diffstart = 1
334 continue
334 continue
335 elif format == "hgpatch":
335 elif format == "hgpatch":
336 # parse values when importing the result of an hg export
336 # parse values when importing the result of an hg export
337 if line.startswith("# User "):
337 if line.startswith("# User "):
338 user = line[7:]
338 user = line[7:]
339 elif line.startswith("# Date "):
339 elif line.startswith("# Date "):
340 date = line[7:]
340 date = line[7:]
341 elif not line.startswith("# ") and line:
341 elif not line.startswith("# ") and line:
342 message.append(line)
342 message.append(line)
343 format = None
343 format = None
344 elif line == '# HG changeset patch':
344 elif line == '# HG changeset patch':
345 format = "hgpatch"
345 format = "hgpatch"
346 elif (format != "tagdone" and (line.startswith("Subject: ") or
346 elif (format != "tagdone" and (line.startswith("Subject: ") or
347 line.startswith("subject: "))):
347 line.startswith("subject: "))):
348 subject = line[9:]
348 subject = line[9:]
349 format = "tag"
349 format = "tag"
350 elif (format != "tagdone" and (line.startswith("From: ") or
350 elif (format != "tagdone" and (line.startswith("From: ") or
351 line.startswith("from: "))):
351 line.startswith("from: "))):
352 user = line[6:]
352 user = line[6:]
353 format = "tag"
353 format = "tag"
354 elif format == "tag" and line == "":
354 elif format == "tag" and line == "":
355 # when looking for tags (subject: from: etc) they
355 # when looking for tags (subject: from: etc) they
356 # end once you find a blank line in the source
356 # end once you find a blank line in the source
357 format = "tagdone"
357 format = "tagdone"
358 elif message or line:
358 elif message or line:
359 message.append(line)
359 message.append(line)
360 comments.append(line)
360 comments.append(line)
361
361
362 eatdiff(message)
362 eatdiff(message)
363 eatdiff(comments)
363 eatdiff(comments)
364 eatempty(message)
364 eatempty(message)
365 eatempty(comments)
365 eatempty(comments)
366
366
367 # make sure message isn't empty
367 # make sure message isn't empty
368 if format and format.startswith("tag") and subject:
368 if format and format.startswith("tag") and subject:
369 message.insert(0, "")
369 message.insert(0, "")
370 message.insert(0, subject)
370 message.insert(0, subject)
371 return patchheader(message, comments, user, date, diffstart > 1)
371 return patchheader(message, comments, user, date, diffstart > 1)
372
372
373 def removeundo(self, repo):
373 def removeundo(self, repo):
374 undo = repo.sjoin('undo')
374 undo = repo.sjoin('undo')
375 if not os.path.exists(undo):
375 if not os.path.exists(undo):
376 return
376 return
377 try:
377 try:
378 os.unlink(undo)
378 os.unlink(undo)
379 except OSError, inst:
379 except OSError, inst:
380 self.ui.warn(_('error removing undo: %s\n') % str(inst))
380 self.ui.warn(_('error removing undo: %s\n') % str(inst))
381
381
382 def printdiff(self, repo, node1, node2=None, files=None,
382 def printdiff(self, repo, node1, node2=None, files=None,
383 fp=None, changes=None, opts={}):
383 fp=None, changes=None, opts={}):
384 m = cmdutil.match(repo, files, opts)
384 m = cmdutil.match(repo, files, opts)
385 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
385 chunks = patch.diff(repo, node1, node2, m, changes, self.diffopts())
386 write = fp is None and repo.ui.write or fp.write
386 write = fp is None and repo.ui.write or fp.write
387 for chunk in chunks:
387 for chunk in chunks:
388 write(chunk)
388 write(chunk)
389
389
390 def mergeone(self, repo, mergeq, head, patch, rev):
390 def mergeone(self, repo, mergeq, head, patch, rev):
391 # first try just applying the patch
391 # first try just applying the patch
392 (err, n) = self.apply(repo, [ patch ], update_status=False,
392 (err, n) = self.apply(repo, [ patch ], update_status=False,
393 strict=True, merge=rev)
393 strict=True, merge=rev)
394
394
395 if err == 0:
395 if err == 0:
396 return (err, n)
396 return (err, n)
397
397
398 if n is None:
398 if n is None:
399 raise util.Abort(_("apply failed for patch %s") % patch)
399 raise util.Abort(_("apply failed for patch %s") % patch)
400
400
401 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
401 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
402
402
403 # apply failed, strip away that rev and merge.
403 # apply failed, strip away that rev and merge.
404 hg.clean(repo, head)
404 hg.clean(repo, head)
405 self.strip(repo, n, update=False, backup='strip')
405 self.strip(repo, n, update=False, backup='strip')
406
406
407 ctx = repo[rev]
407 ctx = repo[rev]
408 ret = hg.merge(repo, rev)
408 ret = hg.merge(repo, rev)
409 if ret:
409 if ret:
410 raise util.Abort(_("update returned %d") % ret)
410 raise util.Abort(_("update returned %d") % ret)
411 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
411 n = repo.commit(None, ctx.description(), ctx.user(), force=1)
412 if n == None:
412 if n == None:
413 raise util.Abort(_("repo commit failed"))
413 raise util.Abort(_("repo commit failed"))
414 try:
414 try:
415 ph = mergeq.readheaders(patch)
415 ph = mergeq.readheaders(patch)
416 except:
416 except:
417 raise util.Abort(_("unable to read %s") % patch)
417 raise util.Abort(_("unable to read %s") % patch)
418
418
419 patchf = self.opener(patch, "w")
419 patchf = self.opener(patch, "w")
420 comments = str(ph)
420 comments = str(ph)
421 if comments:
421 if comments:
422 patchf.write(comments)
422 patchf.write(comments)
423 self.printdiff(repo, head, n, fp=patchf)
423 self.printdiff(repo, head, n, fp=patchf)
424 patchf.close()
424 patchf.close()
425 self.removeundo(repo)
425 self.removeundo(repo)
426 return (0, n)
426 return (0, n)
427
427
428 def qparents(self, repo, rev=None):
428 def qparents(self, repo, rev=None):
429 if rev is None:
429 if rev is None:
430 (p1, p2) = repo.dirstate.parents()
430 (p1, p2) = repo.dirstate.parents()
431 if p2 == nullid:
431 if p2 == nullid:
432 return p1
432 return p1
433 if len(self.applied) == 0:
433 if len(self.applied) == 0:
434 return None
434 return None
435 return bin(self.applied[-1].rev)
435 return bin(self.applied[-1].rev)
436 pp = repo.changelog.parents(rev)
436 pp = repo.changelog.parents(rev)
437 if pp[1] != nullid:
437 if pp[1] != nullid:
438 arevs = [ x.rev for x in self.applied ]
438 arevs = [ x.rev for x in self.applied ]
439 p0 = hex(pp[0])
439 p0 = hex(pp[0])
440 p1 = hex(pp[1])
440 p1 = hex(pp[1])
441 if p0 in arevs:
441 if p0 in arevs:
442 return pp[0]
442 return pp[0]
443 if p1 in arevs:
443 if p1 in arevs:
444 return pp[1]
444 return pp[1]
445 return pp[0]
445 return pp[0]
446
446
447 def mergepatch(self, repo, mergeq, series):
447 def mergepatch(self, repo, mergeq, series):
448 if len(self.applied) == 0:
448 if len(self.applied) == 0:
449 # each of the patches merged in will have two parents. This
449 # each of the patches merged in will have two parents. This
450 # can confuse the qrefresh, qdiff, and strip code because it
450 # can confuse the qrefresh, qdiff, and strip code because it
451 # needs to know which parent is actually in the patch queue.
451 # needs to know which parent is actually in the patch queue.
452 # so, we insert a merge marker with only one parent. This way
452 # so, we insert a merge marker with only one parent. This way
453 # the first patch in the queue is never a merge patch
453 # the first patch in the queue is never a merge patch
454 #
454 #
455 pname = ".hg.patches.merge.marker"
455 pname = ".hg.patches.merge.marker"
456 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
456 n = repo.commit(None, '[mq]: merge marker', user=None, force=1)
457 self.removeundo(repo)
457 self.removeundo(repo)
458 self.applied.append(statusentry(hex(n), pname))
458 self.applied.append(statusentry(hex(n), pname))
459 self.applied_dirty = 1
459 self.applied_dirty = 1
460
460
461 head = self.qparents(repo)
461 head = self.qparents(repo)
462
462
463 for patch in series:
463 for patch in series:
464 patch = mergeq.lookup(patch, strict=True)
464 patch = mergeq.lookup(patch, strict=True)
465 if not patch:
465 if not patch:
466 self.ui.warn(_("patch %s does not exist\n") % patch)
466 self.ui.warn(_("patch %s does not exist\n") % patch)
467 return (1, None)
467 return (1, None)
468 pushable, reason = self.pushable(patch)
468 pushable, reason = self.pushable(patch)
469 if not pushable:
469 if not pushable:
470 self.explain_pushable(patch, all_patches=True)
470 self.explain_pushable(patch, all_patches=True)
471 continue
471 continue
472 info = mergeq.isapplied(patch)
472 info = mergeq.isapplied(patch)
473 if not info:
473 if not info:
474 self.ui.warn(_("patch %s is not applied\n") % patch)
474 self.ui.warn(_("patch %s is not applied\n") % patch)
475 return (1, None)
475 return (1, None)
476 rev = bin(info[1])
476 rev = bin(info[1])
477 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
477 (err, head) = self.mergeone(repo, mergeq, head, patch, rev)
478 if head:
478 if head:
479 self.applied.append(statusentry(hex(head), patch))
479 self.applied.append(statusentry(hex(head), patch))
480 self.applied_dirty = 1
480 self.applied_dirty = 1
481 if err:
481 if err:
482 return (err, head)
482 return (err, head)
483 self.save_dirty()
483 self.save_dirty()
484 return (0, head)
484 return (0, head)
485
485
486 def patch(self, repo, patchfile):
486 def patch(self, repo, patchfile):
487 '''Apply patchfile to the working directory.
487 '''Apply patchfile to the working directory.
488 patchfile: file name of patch'''
488 patchfile: file name of patch'''
489 files = {}
489 files = {}
490 try:
490 try:
491 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
491 fuzz = patch.patch(patchfile, self.ui, strip=1, cwd=repo.root,
492 files=files)
492 files=files)
493 except Exception, inst:
493 except Exception, inst:
494 self.ui.note(str(inst) + '\n')
494 self.ui.note(str(inst) + '\n')
495 if not self.ui.verbose:
495 if not self.ui.verbose:
496 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
496 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
497 return (False, files, False)
497 return (False, files, False)
498
498
499 return (True, files, fuzz)
499 return (True, files, fuzz)
500
500
501 def apply(self, repo, series, list=False, update_status=True,
501 def apply(self, repo, series, list=False, update_status=True,
502 strict=False, patchdir=None, merge=None, all_files={}):
502 strict=False, patchdir=None, merge=None, all_files={}):
503 wlock = lock = tr = None
503 wlock = lock = tr = None
504 try:
504 try:
505 wlock = repo.wlock()
505 wlock = repo.wlock()
506 lock = repo.lock()
506 lock = repo.lock()
507 tr = repo.transaction()
507 tr = repo.transaction()
508 try:
508 try:
509 ret = self._apply(repo, series, list, update_status,
509 ret = self._apply(repo, series, list, update_status,
510 strict, patchdir, merge, all_files=all_files)
510 strict, patchdir, merge, all_files=all_files)
511 tr.close()
511 tr.close()
512 self.save_dirty()
512 self.save_dirty()
513 return ret
513 return ret
514 except:
514 except:
515 try:
515 try:
516 tr.abort()
516 tr.abort()
517 finally:
517 finally:
518 repo.invalidate()
518 repo.invalidate()
519 repo.dirstate.invalidate()
519 repo.dirstate.invalidate()
520 raise
520 raise
521 finally:
521 finally:
522 del tr
522 del tr
523 release(lock, wlock)
523 release(lock, wlock)
524 self.removeundo(repo)
524 self.removeundo(repo)
525
525
526 def _apply(self, repo, series, list=False, update_status=True,
526 def _apply(self, repo, series, list=False, update_status=True,
527 strict=False, patchdir=None, merge=None, all_files={}):
527 strict=False, patchdir=None, merge=None, all_files={}):
528 # TODO unify with commands.py
528 # TODO unify with commands.py
529 if not patchdir:
529 if not patchdir:
530 patchdir = self.path
530 patchdir = self.path
531 err = 0
531 err = 0
532 n = None
532 n = None
533 for patchname in series:
533 for patchname in series:
534 pushable, reason = self.pushable(patchname)
534 pushable, reason = self.pushable(patchname)
535 if not pushable:
535 if not pushable:
536 self.explain_pushable(patchname, all_patches=True)
536 self.explain_pushable(patchname, all_patches=True)
537 continue
537 continue
538 self.ui.warn(_("applying %s\n") % patchname)
538 self.ui.warn(_("applying %s\n") % patchname)
539 pf = os.path.join(patchdir, patchname)
539 pf = os.path.join(patchdir, patchname)
540
540
541 try:
541 try:
542 ph = self.readheaders(patchname)
542 ph = self.readheaders(patchname)
543 except:
543 except:
544 self.ui.warn(_("Unable to read %s\n") % patchname)
544 self.ui.warn(_("Unable to read %s\n") % patchname)
545 err = 1
545 err = 1
546 break
546 break
547
547
548 message = ph.message
548 message = ph.message
549 if not message:
549 if not message:
550 message = _("imported patch %s\n") % patchname
550 message = _("imported patch %s\n") % patchname
551 else:
551 else:
552 if list:
552 if list:
553 message.append(_("\nimported patch %s") % patchname)
553 message.append(_("\nimported patch %s") % patchname)
554 message = '\n'.join(message)
554 message = '\n'.join(message)
555
555
556 if ph.haspatch:
556 if ph.haspatch:
557 (patcherr, files, fuzz) = self.patch(repo, pf)
557 (patcherr, files, fuzz) = self.patch(repo, pf)
558 all_files.update(files)
558 all_files.update(files)
559 patcherr = not patcherr
559 patcherr = not patcherr
560 else:
560 else:
561 self.ui.warn(_("patch %s is empty\n") % patchname)
561 self.ui.warn(_("patch %s is empty\n") % patchname)
562 patcherr, files, fuzz = 0, [], 0
562 patcherr, files, fuzz = 0, [], 0
563
563
564 if merge and files:
564 if merge and files:
565 # Mark as removed/merged and update dirstate parent info
565 # Mark as removed/merged and update dirstate parent info
566 removed = []
566 removed = []
567 merged = []
567 merged = []
568 for f in files:
568 for f in files:
569 if os.path.exists(repo.wjoin(f)):
569 if os.path.exists(repo.wjoin(f)):
570 merged.append(f)
570 merged.append(f)
571 else:
571 else:
572 removed.append(f)
572 removed.append(f)
573 for f in removed:
573 for f in removed:
574 repo.dirstate.remove(f)
574 repo.dirstate.remove(f)
575 for f in merged:
575 for f in merged:
576 repo.dirstate.merge(f)
576 repo.dirstate.merge(f)
577 p1, p2 = repo.dirstate.parents()
577 p1, p2 = repo.dirstate.parents()
578 repo.dirstate.setparents(p1, merge)
578 repo.dirstate.setparents(p1, merge)
579
579
580 files = patch.updatedir(self.ui, repo, files)
580 files = patch.updatedir(self.ui, repo, files)
581 match = cmdutil.matchfiles(repo, files or [])
581 match = cmdutil.matchfiles(repo, files or [])
582 n = repo.commit(files, message, ph.user, ph.date, match=match,
582 n = repo.commit(files, message, ph.user, ph.date, match=match,
583 force=True)
583 force=True)
584
584
585 if n == None:
585 if n == None:
586 raise util.Abort(_("repo commit failed"))
586 raise util.Abort(_("repo commit failed"))
587
587
588 if update_status:
588 if update_status:
589 self.applied.append(statusentry(hex(n), patchname))
589 self.applied.append(statusentry(hex(n), patchname))
590
590
591 if patcherr:
591 if patcherr:
592 self.ui.warn(_("patch failed, rejects left in working dir\n"))
592 self.ui.warn(_("patch failed, rejects left in working dir\n"))
593 err = 1
593 err = 1
594 break
594 break
595
595
596 if fuzz and strict:
596 if fuzz and strict:
597 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
597 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
598 err = 1
598 err = 1
599 break
599 break
600 return (err, n)
600 return (err, n)
601
601
602 def _clean_series(self, patches):
602 def _clean_series(self, patches):
603 indices = util.sort([self.find_series(p) for p in patches])
603 indices = util.sort([self.find_series(p) for p in patches])
604 for i in indices[-1::-1]:
604 for i in indices[-1::-1]:
605 del self.full_series[i]
605 del self.full_series[i]
606 self.parse_series()
606 self.parse_series()
607 self.series_dirty = 1
607 self.series_dirty = 1
608
608
609 def finish(self, repo, revs):
609 def finish(self, repo, revs):
610 revs.sort()
610 revs.sort()
611 firstrev = repo[self.applied[0].rev].rev()
611 firstrev = repo[self.applied[0].rev].rev()
612 appliedbase = 0
612 appliedbase = 0
613 patches = []
613 patches = []
614 for rev in util.sort(revs):
614 for rev in util.sort(revs):
615 if rev < firstrev:
615 if rev < firstrev:
616 raise util.Abort(_('revision %d is not managed') % rev)
616 raise util.Abort(_('revision %d is not managed') % rev)
617 base = bin(self.applied[appliedbase].rev)
617 base = bin(self.applied[appliedbase].rev)
618 node = repo.changelog.node(rev)
618 node = repo.changelog.node(rev)
619 if node != base:
619 if node != base:
620 raise util.Abort(_('cannot delete revision %d above '
620 raise util.Abort(_('cannot delete revision %d above '
621 'applied patches') % rev)
621 'applied patches') % rev)
622 patches.append(self.applied[appliedbase].name)
622 patches.append(self.applied[appliedbase].name)
623 appliedbase += 1
623 appliedbase += 1
624
624
625 r = self.qrepo()
625 r = self.qrepo()
626 if r:
626 if r:
627 r.remove(patches, True)
627 r.remove(patches, True)
628 else:
628 else:
629 for p in patches:
629 for p in patches:
630 os.unlink(self.join(p))
630 os.unlink(self.join(p))
631
631
632 del self.applied[:appliedbase]
632 del self.applied[:appliedbase]
633 self.applied_dirty = 1
633 self.applied_dirty = 1
634 self._clean_series(patches)
634 self._clean_series(patches)
635
635
636 def delete(self, repo, patches, opts):
636 def delete(self, repo, patches, opts):
637 if not patches and not opts.get('rev'):
637 if not patches and not opts.get('rev'):
638 raise util.Abort(_('qdelete requires at least one revision or '
638 raise util.Abort(_('qdelete requires at least one revision or '
639 'patch name'))
639 'patch name'))
640
640
641 realpatches = []
641 realpatches = []
642 for patch in patches:
642 for patch in patches:
643 patch = self.lookup(patch, strict=True)
643 patch = self.lookup(patch, strict=True)
644 info = self.isapplied(patch)
644 info = self.isapplied(patch)
645 if info:
645 if info:
646 raise util.Abort(_("cannot delete applied patch %s") % patch)
646 raise util.Abort(_("cannot delete applied patch %s") % patch)
647 if patch not in self.series:
647 if patch not in self.series:
648 raise util.Abort(_("patch %s not in series file") % patch)
648 raise util.Abort(_("patch %s not in series file") % patch)
649 realpatches.append(patch)
649 realpatches.append(patch)
650
650
651 appliedbase = 0
651 appliedbase = 0
652 if opts.get('rev'):
652 if opts.get('rev'):
653 if not self.applied:
653 if not self.applied:
654 raise util.Abort(_('no patches applied'))
654 raise util.Abort(_('no patches applied'))
655 revs = cmdutil.revrange(repo, opts['rev'])
655 revs = cmdutil.revrange(repo, opts['rev'])
656 if len(revs) > 1 and revs[0] > revs[1]:
656 if len(revs) > 1 and revs[0] > revs[1]:
657 revs.reverse()
657 revs.reverse()
658 for rev in revs:
658 for rev in revs:
659 if appliedbase >= len(self.applied):
659 if appliedbase >= len(self.applied):
660 raise util.Abort(_("revision %d is not managed") % rev)
660 raise util.Abort(_("revision %d is not managed") % rev)
661
661
662 base = bin(self.applied[appliedbase].rev)
662 base = bin(self.applied[appliedbase].rev)
663 node = repo.changelog.node(rev)
663 node = repo.changelog.node(rev)
664 if node != base:
664 if node != base:
665 raise util.Abort(_("cannot delete revision %d above "
665 raise util.Abort(_("cannot delete revision %d above "
666 "applied patches") % rev)
666 "applied patches") % rev)
667 realpatches.append(self.applied[appliedbase].name)
667 realpatches.append(self.applied[appliedbase].name)
668 appliedbase += 1
668 appliedbase += 1
669
669
670 if not opts.get('keep'):
670 if not opts.get('keep'):
671 r = self.qrepo()
671 r = self.qrepo()
672 if r:
672 if r:
673 r.remove(realpatches, True)
673 r.remove(realpatches, True)
674 else:
674 else:
675 for p in realpatches:
675 for p in realpatches:
676 os.unlink(self.join(p))
676 os.unlink(self.join(p))
677
677
678 if appliedbase:
678 if appliedbase:
679 del self.applied[:appliedbase]
679 del self.applied[:appliedbase]
680 self.applied_dirty = 1
680 self.applied_dirty = 1
681 self._clean_series(realpatches)
681 self._clean_series(realpatches)
682
682
683 def check_toppatch(self, repo):
683 def check_toppatch(self, repo):
684 if len(self.applied) > 0:
684 if len(self.applied) > 0:
685 top = bin(self.applied[-1].rev)
685 top = bin(self.applied[-1].rev)
686 pp = repo.dirstate.parents()
686 pp = repo.dirstate.parents()
687 if top not in pp:
687 if top not in pp:
688 raise util.Abort(_("working directory revision is not qtip"))
688 raise util.Abort(_("working directory revision is not qtip"))
689 return top
689 return top
690 return None
690 return None
691 def check_localchanges(self, repo, force=False, refresh=True):
691 def check_localchanges(self, repo, force=False, refresh=True):
692 m, a, r, d = repo.status()[:4]
692 m, a, r, d = repo.status()[:4]
693 if m or a or r or d:
693 if m or a or r or d:
694 if not force:
694 if not force:
695 if refresh:
695 if refresh:
696 raise util.Abort(_("local changes found, refresh first"))
696 raise util.Abort(_("local changes found, refresh first"))
697 else:
697 else:
698 raise util.Abort(_("local changes found"))
698 raise util.Abort(_("local changes found"))
699 return m, a, r, d
699 return m, a, r, d
700
700
701 _reserved = ('series', 'status', 'guards')
701 _reserved = ('series', 'status', 'guards')
702 def check_reserved_name(self, name):
702 def check_reserved_name(self, name):
703 if (name in self._reserved or name.startswith('.hg')
703 if (name in self._reserved or name.startswith('.hg')
704 or name.startswith('.mq')):
704 or name.startswith('.mq')):
705 raise util.Abort(_('"%s" cannot be used as the name of a patch')
705 raise util.Abort(_('"%s" cannot be used as the name of a patch')
706 % name)
706 % name)
707
707
708 def new(self, repo, patchfn, *pats, **opts):
708 def new(self, repo, patchfn, *pats, **opts):
709 """options:
709 """options:
710 msg: a string or a no-argument function returning a string
710 msg: a string or a no-argument function returning a string
711 """
711 """
712 msg = opts.get('msg')
712 msg = opts.get('msg')
713 force = opts.get('force')
713 force = opts.get('force')
714 user = opts.get('user')
714 user = opts.get('user')
715 date = opts.get('date')
715 date = opts.get('date')
716 if date:
716 if date:
717 date = util.parsedate(date)
717 date = util.parsedate(date)
718 self.check_reserved_name(patchfn)
718 self.check_reserved_name(patchfn)
719 if os.path.exists(self.join(patchfn)):
719 if os.path.exists(self.join(patchfn)):
720 raise util.Abort(_('patch "%s" already exists') % patchfn)
720 raise util.Abort(_('patch "%s" already exists') % patchfn)
721 if opts.get('include') or opts.get('exclude') or pats:
721 if opts.get('include') or opts.get('exclude') or pats:
722 match = cmdutil.match(repo, pats, opts)
722 match = cmdutil.match(repo, pats, opts)
723 # detect missing files in pats
723 # detect missing files in pats
724 def badfn(f, msg):
724 def badfn(f, msg):
725 raise util.Abort('%s: %s' % (f, msg))
725 raise util.Abort('%s: %s' % (f, msg))
726 match.bad = badfn
726 match.bad = badfn
727 m, a, r, d = repo.status(match=match)[:4]
727 m, a, r, d = repo.status(match=match)[:4]
728 else:
728 else:
729 m, a, r, d = self.check_localchanges(repo, force)
729 m, a, r, d = self.check_localchanges(repo, force)
730 match = cmdutil.matchfiles(repo, m + a + r)
730 match = cmdutil.matchfiles(repo, m + a + r)
731 commitfiles = m + a + r
731 commitfiles = m + a + r
732 self.check_toppatch(repo)
732 self.check_toppatch(repo)
733 insert = self.full_series_end()
733 insert = self.full_series_end()
734 wlock = repo.wlock()
734 wlock = repo.wlock()
735 try:
735 try:
736 # if patch file write fails, abort early
736 # if patch file write fails, abort early
737 p = self.opener(patchfn, "w")
737 p = self.opener(patchfn, "w")
738 try:
738 try:
739 if date:
739 if date:
740 p.write("# HG changeset patch\n")
740 p.write("# HG changeset patch\n")
741 if user:
741 if user:
742 p.write("# User " + user + "\n")
742 p.write("# User " + user + "\n")
743 p.write("# Date %d %d\n\n" % date)
743 p.write("# Date %d %d\n\n" % date)
744 elif user:
744 elif user:
745 p.write("From: " + user + "\n\n")
745 p.write("From: " + user + "\n\n")
746
746
747 if callable(msg):
747 if callable(msg):
748 msg = msg()
748 msg = msg()
749 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
749 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
750 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
750 n = repo.commit(commitfiles, commitmsg, user, date, match=match, force=True)
751 if n == None:
751 if n == None:
752 raise util.Abort(_("repo commit failed"))
752 raise util.Abort(_("repo commit failed"))
753 try:
753 try:
754 self.full_series[insert:insert] = [patchfn]
754 self.full_series[insert:insert] = [patchfn]
755 self.applied.append(statusentry(hex(n), patchfn))
755 self.applied.append(statusentry(hex(n), patchfn))
756 self.parse_series()
756 self.parse_series()
757 self.series_dirty = 1
757 self.series_dirty = 1
758 self.applied_dirty = 1
758 self.applied_dirty = 1
759 if msg:
759 if msg:
760 msg = msg + "\n\n"
760 msg = msg + "\n\n"
761 p.write(msg)
761 p.write(msg)
762 if commitfiles:
762 if commitfiles:
763 diffopts = self.diffopts()
763 diffopts = self.diffopts()
764 if opts.get('git'): diffopts.git = True
764 if opts.get('git'): diffopts.git = True
765 parent = self.qparents(repo, n)
765 parent = self.qparents(repo, n)
766 chunks = patch.diff(repo, node1=parent, node2=n,
766 chunks = patch.diff(repo, node1=parent, node2=n,
767 match=match, opts=diffopts)
767 match=match, opts=diffopts)
768 for chunk in chunks:
768 for chunk in chunks:
769 p.write(chunk)
769 p.write(chunk)
770 p.close()
770 p.close()
771 wlock.release()
771 wlock.release()
772 wlock = None
772 wlock = None
773 r = self.qrepo()
773 r = self.qrepo()
774 if r: r.add([patchfn])
774 if r: r.add([patchfn])
775 except:
775 except:
776 repo.rollback()
776 repo.rollback()
777 raise
777 raise
778 except Exception:
778 except Exception:
779 patchpath = self.join(patchfn)
779 patchpath = self.join(patchfn)
780 try:
780 try:
781 os.unlink(patchpath)
781 os.unlink(patchpath)
782 except:
782 except:
783 self.ui.warn(_('error unlinking %s\n') % patchpath)
783 self.ui.warn(_('error unlinking %s\n') % patchpath)
784 raise
784 raise
785 self.removeundo(repo)
785 self.removeundo(repo)
786 finally:
786 finally:
787 release(wlock)
787 release(wlock)
788
788
789 def strip(self, repo, rev, update=True, backup="all", force=None):
789 def strip(self, repo, rev, update=True, backup="all", force=None):
790 wlock = lock = None
790 wlock = lock = None
791 try:
791 try:
792 wlock = repo.wlock()
792 wlock = repo.wlock()
793 lock = repo.lock()
793 lock = repo.lock()
794
794
795 if update:
795 if update:
796 self.check_localchanges(repo, force=force, refresh=False)
796 self.check_localchanges(repo, force=force, refresh=False)
797 urev = self.qparents(repo, rev)
797 urev = self.qparents(repo, rev)
798 hg.clean(repo, urev)
798 hg.clean(repo, urev)
799 repo.dirstate.write()
799 repo.dirstate.write()
800
800
801 self.removeundo(repo)
801 self.removeundo(repo)
802 repair.strip(self.ui, repo, rev, backup)
802 repair.strip(self.ui, repo, rev, backup)
803 # strip may have unbundled a set of backed up revisions after
803 # strip may have unbundled a set of backed up revisions after
804 # the actual strip
804 # the actual strip
805 self.removeundo(repo)
805 self.removeundo(repo)
806 finally:
806 finally:
807 release(lock, wlock)
807 release(lock, wlock)
808
808
809 def isapplied(self, patch):
809 def isapplied(self, patch):
810 """returns (index, rev, patch)"""
810 """returns (index, rev, patch)"""
811 for i in xrange(len(self.applied)):
811 for i in xrange(len(self.applied)):
812 a = self.applied[i]
812 a = self.applied[i]
813 if a.name == patch:
813 if a.name == patch:
814 return (i, a.rev, a.name)
814 return (i, a.rev, a.name)
815 return None
815 return None
816
816
817 # if the exact patch name does not exist, we try a few
817 # if the exact patch name does not exist, we try a few
818 # variations. If strict is passed, we try only #1
818 # variations. If strict is passed, we try only #1
819 #
819 #
820 # 1) a number to indicate an offset in the series file
820 # 1) a number to indicate an offset in the series file
821 # 2) a unique substring of the patch name was given
821 # 2) a unique substring of the patch name was given
822 # 3) patchname[-+]num to indicate an offset in the series file
822 # 3) patchname[-+]num to indicate an offset in the series file
823 def lookup(self, patch, strict=False):
823 def lookup(self, patch, strict=False):
824 patch = patch and str(patch)
824 patch = patch and str(patch)
825
825
826 def partial_name(s):
826 def partial_name(s):
827 if s in self.series:
827 if s in self.series:
828 return s
828 return s
829 matches = [x for x in self.series if s in x]
829 matches = [x for x in self.series if s in x]
830 if len(matches) > 1:
830 if len(matches) > 1:
831 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
831 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
832 for m in matches:
832 for m in matches:
833 self.ui.warn(' %s\n' % m)
833 self.ui.warn(' %s\n' % m)
834 return None
834 return None
835 if matches:
835 if matches:
836 return matches[0]
836 return matches[0]
837 if len(self.series) > 0 and len(self.applied) > 0:
837 if len(self.series) > 0 and len(self.applied) > 0:
838 if s == 'qtip':
838 if s == 'qtip':
839 return self.series[self.series_end(True)-1]
839 return self.series[self.series_end(True)-1]
840 if s == 'qbase':
840 if s == 'qbase':
841 return self.series[0]
841 return self.series[0]
842 return None
842 return None
843
843
844 if patch == None:
844 if patch == None:
845 return None
845 return None
846 if patch in self.series:
846 if patch in self.series:
847 return patch
847 return patch
848
848
849 if not os.path.isfile(self.join(patch)):
849 if not os.path.isfile(self.join(patch)):
850 try:
850 try:
851 sno = int(patch)
851 sno = int(patch)
852 except(ValueError, OverflowError):
852 except(ValueError, OverflowError):
853 pass
853 pass
854 else:
854 else:
855 if -len(self.series) <= sno < len(self.series):
855 if -len(self.series) <= sno < len(self.series):
856 return self.series[sno]
856 return self.series[sno]
857
857
858 if not strict:
858 if not strict:
859 res = partial_name(patch)
859 res = partial_name(patch)
860 if res:
860 if res:
861 return res
861 return res
862 minus = patch.rfind('-')
862 minus = patch.rfind('-')
863 if minus >= 0:
863 if minus >= 0:
864 res = partial_name(patch[:minus])
864 res = partial_name(patch[:minus])
865 if res:
865 if res:
866 i = self.series.index(res)
866 i = self.series.index(res)
867 try:
867 try:
868 off = int(patch[minus+1:] or 1)
868 off = int(patch[minus+1:] or 1)
869 except(ValueError, OverflowError):
869 except(ValueError, OverflowError):
870 pass
870 pass
871 else:
871 else:
872 if i - off >= 0:
872 if i - off >= 0:
873 return self.series[i - off]
873 return self.series[i - off]
874 plus = patch.rfind('+')
874 plus = patch.rfind('+')
875 if plus >= 0:
875 if plus >= 0:
876 res = partial_name(patch[:plus])
876 res = partial_name(patch[:plus])
877 if res:
877 if res:
878 i = self.series.index(res)
878 i = self.series.index(res)
879 try:
879 try:
880 off = int(patch[plus+1:] or 1)
880 off = int(patch[plus+1:] or 1)
881 except(ValueError, OverflowError):
881 except(ValueError, OverflowError):
882 pass
882 pass
883 else:
883 else:
884 if i + off < len(self.series):
884 if i + off < len(self.series):
885 return self.series[i + off]
885 return self.series[i + off]
886 raise util.Abort(_("patch %s not in series") % patch)
886 raise util.Abort(_("patch %s not in series") % patch)
887
887
888 def push(self, repo, patch=None, force=False, list=False,
888 def push(self, repo, patch=None, force=False, list=False,
889 mergeq=None, all=False):
889 mergeq=None, all=False):
890 wlock = repo.wlock()
890 wlock = repo.wlock()
891 if repo.dirstate.parents()[0] != repo.changelog.tip():
891 if repo.dirstate.parents()[0] != repo.changelog.tip():
892 self.ui.status(_("(working directory not at tip)\n"))
892 self.ui.status(_("(working directory not at tip)\n"))
893
893
894 if not self.series:
894 if not self.series:
895 self.ui.warn(_('no patches in series\n'))
895 self.ui.warn(_('no patches in series\n'))
896 return 0
896 return 0
897
897
898 try:
898 try:
899 patch = self.lookup(patch)
899 patch = self.lookup(patch)
900 # Suppose our series file is: A B C and the current 'top'
900 # Suppose our series file is: A B C and the current 'top'
901 # patch is B. qpush C should be performed (moving forward)
901 # patch is B. qpush C should be performed (moving forward)
902 # qpush B is a NOP (no change) qpush A is an error (can't
902 # qpush B is a NOP (no change) qpush A is an error (can't
903 # go backwards with qpush)
903 # go backwards with qpush)
904 if patch:
904 if patch:
905 info = self.isapplied(patch)
905 info = self.isapplied(patch)
906 if info:
906 if info:
907 if info[0] < len(self.applied) - 1:
907 if info[0] < len(self.applied) - 1:
908 raise util.Abort(
908 raise util.Abort(
909 _("cannot push to a previous patch: %s") % patch)
909 _("cannot push to a previous patch: %s") % patch)
910 self.ui.warn(
910 self.ui.warn(
911 _('qpush: %s is already at the top\n') % patch)
911 _('qpush: %s is already at the top\n') % patch)
912 return
912 return
913 pushable, reason = self.pushable(patch)
913 pushable, reason = self.pushable(patch)
914 if not pushable:
914 if not pushable:
915 if reason:
915 if reason:
916 reason = _('guarded by %r') % reason
916 reason = _('guarded by %r') % reason
917 else:
917 else:
918 reason = _('no matching guards')
918 reason = _('no matching guards')
919 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
919 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
920 return 1
920 return 1
921 elif all:
921 elif all:
922 patch = self.series[-1]
922 patch = self.series[-1]
923 if self.isapplied(patch):
923 if self.isapplied(patch):
924 self.ui.warn(_('all patches are currently applied\n'))
924 self.ui.warn(_('all patches are currently applied\n'))
925 return 0
925 return 0
926
926
927 # Following the above example, starting at 'top' of B:
927 # Following the above example, starting at 'top' of B:
928 # qpush should be performed (pushes C), but a subsequent
928 # qpush should be performed (pushes C), but a subsequent
929 # qpush without an argument is an error (nothing to
929 # qpush without an argument is an error (nothing to
930 # apply). This allows a loop of "...while hg qpush..." to
930 # apply). This allows a loop of "...while hg qpush..." to
931 # work as it detects an error when done
931 # work as it detects an error when done
932 start = self.series_end()
932 start = self.series_end()
933 if start == len(self.series):
933 if start == len(self.series):
934 self.ui.warn(_('patch series already fully applied\n'))
934 self.ui.warn(_('patch series already fully applied\n'))
935 return 1
935 return 1
936 if not force:
936 if not force:
937 self.check_localchanges(repo)
937 self.check_localchanges(repo)
938
938
939 self.applied_dirty = 1
939 self.applied_dirty = 1
940 if start > 0:
940 if start > 0:
941 self.check_toppatch(repo)
941 self.check_toppatch(repo)
942 if not patch:
942 if not patch:
943 patch = self.series[start]
943 patch = self.series[start]
944 end = start + 1
944 end = start + 1
945 else:
945 else:
946 end = self.series.index(patch, start) + 1
946 end = self.series.index(patch, start) + 1
947 s = self.series[start:end]
947 s = self.series[start:end]
948 all_files = {}
948 all_files = {}
949 try:
949 try:
950 if mergeq:
950 if mergeq:
951 ret = self.mergepatch(repo, mergeq, s)
951 ret = self.mergepatch(repo, mergeq, s)
952 else:
952 else:
953 ret = self.apply(repo, s, list, all_files=all_files)
953 ret = self.apply(repo, s, list, all_files=all_files)
954 except:
954 except:
955 self.ui.warn(_('cleaning up working directory...'))
955 self.ui.warn(_('cleaning up working directory...'))
956 node = repo.dirstate.parents()[0]
956 node = repo.dirstate.parents()[0]
957 hg.revert(repo, node, None)
957 hg.revert(repo, node, None)
958 unknown = repo.status(unknown=True)[4]
958 unknown = repo.status(unknown=True)[4]
959 # only remove unknown files that we know we touched or
959 # only remove unknown files that we know we touched or
960 # created while patching
960 # created while patching
961 for f in unknown:
961 for f in unknown:
962 if f in all_files:
962 if f in all_files:
963 util.unlink(repo.wjoin(f))
963 util.unlink(repo.wjoin(f))
964 self.ui.warn(_('done\n'))
964 self.ui.warn(_('done\n'))
965 raise
965 raise
966 top = self.applied[-1].name
966 top = self.applied[-1].name
967 if ret[0]:
967 if ret[0]:
968 self.ui.write(_("errors during apply, please fix and "
968 self.ui.write(_("errors during apply, please fix and "
969 "refresh %s\n") % top)
969 "refresh %s\n") % top)
970 else:
970 else:
971 self.ui.write(_("now at: %s\n") % top)
971 self.ui.write(_("now at: %s\n") % top)
972 return ret[0]
972 return ret[0]
973 finally:
973 finally:
974 wlock.release()
974 wlock.release()
975
975
976 def pop(self, repo, patch=None, force=False, update=True, all=False):
976 def pop(self, repo, patch=None, force=False, update=True, all=False):
977 def getfile(f, rev, flags):
977 def getfile(f, rev, flags):
978 t = repo.file(f).read(rev)
978 t = repo.file(f).read(rev)
979 repo.wwrite(f, t, flags)
979 repo.wwrite(f, t, flags)
980
980
981 wlock = repo.wlock()
981 wlock = repo.wlock()
982 try:
982 try:
983 if patch:
983 if patch:
984 # index, rev, patch
984 # index, rev, patch
985 info = self.isapplied(patch)
985 info = self.isapplied(patch)
986 if not info:
986 if not info:
987 patch = self.lookup(patch)
987 patch = self.lookup(patch)
988 info = self.isapplied(patch)
988 info = self.isapplied(patch)
989 if not info:
989 if not info:
990 raise util.Abort(_("patch %s is not applied") % patch)
990 raise util.Abort(_("patch %s is not applied") % patch)
991
991
992 if len(self.applied) == 0:
992 if len(self.applied) == 0:
993 # Allow qpop -a to work repeatedly,
993 # Allow qpop -a to work repeatedly,
994 # but not qpop without an argument
994 # but not qpop without an argument
995 self.ui.warn(_("no patches applied\n"))
995 self.ui.warn(_("no patches applied\n"))
996 return not all
996 return not all
997
997
998 if all:
998 if all:
999 start = 0
999 start = 0
1000 elif patch:
1000 elif patch:
1001 start = info[0] + 1
1001 start = info[0] + 1
1002 else:
1002 else:
1003 start = len(self.applied) - 1
1003 start = len(self.applied) - 1
1004
1004
1005 if start >= len(self.applied):
1005 if start >= len(self.applied):
1006 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1006 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1007 return
1007 return
1008
1008
1009 if not update:
1009 if not update:
1010 parents = repo.dirstate.parents()
1010 parents = repo.dirstate.parents()
1011 rr = [ bin(x.rev) for x in self.applied ]
1011 rr = [ bin(x.rev) for x in self.applied ]
1012 for p in parents:
1012 for p in parents:
1013 if p in rr:
1013 if p in rr:
1014 self.ui.warn(_("qpop: forcing dirstate update\n"))
1014 self.ui.warn(_("qpop: forcing dirstate update\n"))
1015 update = True
1015 update = True
1016 else:
1016 else:
1017 parents = [p.hex() for p in repo[None].parents()]
1017 parents = [p.hex() for p in repo[None].parents()]
1018 needupdate = False
1018 needupdate = False
1019 for entry in self.applied[start:]:
1019 for entry in self.applied[start:]:
1020 if entry.rev in parents:
1020 if entry.rev in parents:
1021 needupdate = True
1021 needupdate = True
1022 break
1022 break
1023 update = needupdate
1023 update = needupdate
1024
1024
1025 if not force and update:
1025 if not force and update:
1026 self.check_localchanges(repo)
1026 self.check_localchanges(repo)
1027
1027
1028 self.applied_dirty = 1
1028 self.applied_dirty = 1
1029 end = len(self.applied)
1029 end = len(self.applied)
1030 rev = bin(self.applied[start].rev)
1030 rev = bin(self.applied[start].rev)
1031 if update:
1031 if update:
1032 top = self.check_toppatch(repo)
1032 top = self.check_toppatch(repo)
1033
1033
1034 try:
1034 try:
1035 heads = repo.changelog.heads(rev)
1035 heads = repo.changelog.heads(rev)
1036 except error.LookupError:
1036 except error.LookupError:
1037 node = short(rev)
1037 node = short(rev)
1038 raise util.Abort(_('trying to pop unknown node %s') % node)
1038 raise util.Abort(_('trying to pop unknown node %s') % node)
1039
1039
1040 if heads != [bin(self.applied[-1].rev)]:
1040 if heads != [bin(self.applied[-1].rev)]:
1041 raise util.Abort(_("popping would remove a revision not "
1041 raise util.Abort(_("popping would remove a revision not "
1042 "managed by this patch queue"))
1042 "managed by this patch queue"))
1043
1043
1044 # we know there are no local changes, so we can make a simplified
1044 # we know there are no local changes, so we can make a simplified
1045 # form of hg.update.
1045 # form of hg.update.
1046 if update:
1046 if update:
1047 qp = self.qparents(repo, rev)
1047 qp = self.qparents(repo, rev)
1048 changes = repo.changelog.read(qp)
1048 changes = repo.changelog.read(qp)
1049 mmap = repo.manifest.read(changes[0])
1049 mmap = repo.manifest.read(changes[0])
1050 m, a, r, d = repo.status(qp, top)[:4]
1050 m, a, r, d = repo.status(qp, top)[:4]
1051 if d:
1051 if d:
1052 raise util.Abort(_("deletions found between repo revs"))
1052 raise util.Abort(_("deletions found between repo revs"))
1053 for f in m:
1053 for f in m:
1054 getfile(f, mmap[f], mmap.flags(f))
1054 getfile(f, mmap[f], mmap.flags(f))
1055 for f in r:
1055 for f in r:
1056 getfile(f, mmap[f], mmap.flags(f))
1056 getfile(f, mmap[f], mmap.flags(f))
1057 for f in m + r:
1057 for f in m + r:
1058 repo.dirstate.normal(f)
1058 repo.dirstate.normal(f)
1059 for f in a:
1059 for f in a:
1060 try:
1060 try:
1061 os.unlink(repo.wjoin(f))
1061 os.unlink(repo.wjoin(f))
1062 except OSError, e:
1062 except OSError, e:
1063 if e.errno != errno.ENOENT:
1063 if e.errno != errno.ENOENT:
1064 raise
1064 raise
1065 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1065 try: os.removedirs(os.path.dirname(repo.wjoin(f)))
1066 except: pass
1066 except: pass
1067 repo.dirstate.forget(f)
1067 repo.dirstate.forget(f)
1068 repo.dirstate.setparents(qp, nullid)
1068 repo.dirstate.setparents(qp, nullid)
1069 del self.applied[start:end]
1069 del self.applied[start:end]
1070 self.strip(repo, rev, update=False, backup='strip')
1070 self.strip(repo, rev, update=False, backup='strip')
1071 if len(self.applied):
1071 if len(self.applied):
1072 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1072 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1073 else:
1073 else:
1074 self.ui.write(_("patch queue now empty\n"))
1074 self.ui.write(_("patch queue now empty\n"))
1075 finally:
1075 finally:
1076 wlock.release()
1076 wlock.release()
1077
1077
1078 def diff(self, repo, pats, opts):
1078 def diff(self, repo, pats, opts):
1079 top = self.check_toppatch(repo)
1079 top = self.check_toppatch(repo)
1080 if not top:
1080 if not top:
1081 self.ui.write(_("no patches applied\n"))
1081 self.ui.write(_("no patches applied\n"))
1082 return
1082 return
1083 qp = self.qparents(repo, top)
1083 qp = self.qparents(repo, top)
1084 self._diffopts = patch.diffopts(self.ui, opts)
1084 self._diffopts = patch.diffopts(self.ui, opts)
1085 self.printdiff(repo, qp, files=pats, opts=opts)
1085 self.printdiff(repo, qp, files=pats, opts=opts)
1086
1086
1087 def refresh(self, repo, pats=None, **opts):
1087 def refresh(self, repo, pats=None, **opts):
1088 if len(self.applied) == 0:
1088 if len(self.applied) == 0:
1089 self.ui.write(_("no patches applied\n"))
1089 self.ui.write(_("no patches applied\n"))
1090 return 1
1090 return 1
1091 msg = opts.get('msg', '').rstrip()
1091 msg = opts.get('msg', '').rstrip()
1092 newuser = opts.get('user')
1092 newuser = opts.get('user')
1093 newdate = opts.get('date')
1093 newdate = opts.get('date')
1094 if newdate:
1094 if newdate:
1095 newdate = '%d %d' % util.parsedate(newdate)
1095 newdate = '%d %d' % util.parsedate(newdate)
1096 wlock = repo.wlock()
1096 wlock = repo.wlock()
1097 try:
1097 try:
1098 self.check_toppatch(repo)
1098 self.check_toppatch(repo)
1099 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1099 (top, patchfn) = (self.applied[-1].rev, self.applied[-1].name)
1100 top = bin(top)
1100 top = bin(top)
1101 if repo.changelog.heads(top) != [top]:
1101 if repo.changelog.heads(top) != [top]:
1102 raise util.Abort(_("cannot refresh a revision with children"))
1102 raise util.Abort(_("cannot refresh a revision with children"))
1103 cparents = repo.changelog.parents(top)
1103 cparents = repo.changelog.parents(top)
1104 patchparent = self.qparents(repo, top)
1104 patchparent = self.qparents(repo, top)
1105 ph = self.readheaders(patchfn)
1105 ph = self.readheaders(patchfn)
1106
1106
1107 patchf = self.opener(patchfn, 'r')
1107 patchf = self.opener(patchfn, 'r')
1108
1108
1109 # if the patch was a git patch, refresh it as a git patch
1109 # if the patch was a git patch, refresh it as a git patch
1110 for line in patchf:
1110 for line in patchf:
1111 if line.startswith('diff --git'):
1111 if line.startswith('diff --git'):
1112 self.diffopts().git = True
1112 self.diffopts().git = True
1113 break
1113 break
1114
1114
1115 if msg:
1115 if msg:
1116 ph.setmessage(msg)
1116 ph.setmessage(msg)
1117 if newuser:
1117 if newuser:
1118 ph.setuser(newuser)
1118 ph.setuser(newuser)
1119 if newdate:
1119 if newdate:
1120 ph.setdate(newdate)
1120 ph.setdate(newdate)
1121
1121
1122 # only commit new patch when write is complete
1122 # only commit new patch when write is complete
1123 patchf = self.opener(patchfn, 'w', atomictemp=True)
1123 patchf = self.opener(patchfn, 'w', atomictemp=True)
1124
1124
1125 patchf.seek(0)
1125 patchf.seek(0)
1126 patchf.truncate()
1126 patchf.truncate()
1127
1127
1128 comments = str(ph)
1128 comments = str(ph)
1129 if comments:
1129 if comments:
1130 patchf.write(comments)
1130 patchf.write(comments)
1131
1131
1132 if opts.get('git'):
1132 if opts.get('git'):
1133 self.diffopts().git = True
1133 self.diffopts().git = True
1134 tip = repo.changelog.tip()
1134 tip = repo.changelog.tip()
1135 if top == tip:
1135 if top == tip:
1136 # if the top of our patch queue is also the tip, there is an
1136 # if the top of our patch queue is also the tip, there is an
1137 # optimization here. We update the dirstate in place and strip
1137 # optimization here. We update the dirstate in place and strip
1138 # off the tip commit. Then just commit the current directory
1138 # off the tip commit. Then just commit the current directory
1139 # tree. We can also send repo.commit the list of files
1139 # tree. We can also send repo.commit the list of files
1140 # changed to speed up the diff
1140 # changed to speed up the diff
1141 #
1141 #
1142 # in short mode, we only diff the files included in the
1142 # in short mode, we only diff the files included in the
1143 # patch already plus specified files
1143 # patch already plus specified files
1144 #
1144 #
1145 # this should really read:
1145 # this should really read:
1146 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1146 # mm, dd, aa, aa2 = repo.status(tip, patchparent)[:4]
1147 # but we do it backwards to take advantage of manifest/chlog
1147 # but we do it backwards to take advantage of manifest/chlog
1148 # caching against the next repo.status call
1148 # caching against the next repo.status call
1149 #
1149 #
1150 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1150 mm, aa, dd, aa2 = repo.status(patchparent, tip)[:4]
1151 changes = repo.changelog.read(tip)
1151 changes = repo.changelog.read(tip)
1152 man = repo.manifest.read(changes[0])
1152 man = repo.manifest.read(changes[0])
1153 aaa = aa[:]
1153 aaa = aa[:]
1154 matchfn = cmdutil.match(repo, pats, opts)
1154 matchfn = cmdutil.match(repo, pats, opts)
1155 if opts.get('short'):
1155 if opts.get('short'):
1156 # if amending a patch, we start with existing
1156 # if amending a patch, we start with existing
1157 # files plus specified files - unfiltered
1157 # files plus specified files - unfiltered
1158 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1158 match = cmdutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1159 # filter with inc/exl options
1159 # filter with inc/exl options
1160 matchfn = cmdutil.match(repo, opts=opts)
1160 matchfn = cmdutil.match(repo, opts=opts)
1161 else:
1161 else:
1162 match = cmdutil.matchall(repo)
1162 match = cmdutil.matchall(repo)
1163 m, a, r, d = repo.status(match=match)[:4]
1163 m, a, r, d = repo.status(match=match)[:4]
1164
1164
1165 # we might end up with files that were added between
1165 # we might end up with files that were added between
1166 # tip and the dirstate parent, but then changed in the
1166 # tip and the dirstate parent, but then changed in the
1167 # local dirstate. in this case, we want them to only
1167 # local dirstate. in this case, we want them to only
1168 # show up in the added section
1168 # show up in the added section
1169 for x in m:
1169 for x in m:
1170 if x not in aa:
1170 if x not in aa:
1171 mm.append(x)
1171 mm.append(x)
1172 # we might end up with files added by the local dirstate that
1172 # we might end up with files added by the local dirstate that
1173 # were deleted by the patch. In this case, they should only
1173 # were deleted by the patch. In this case, they should only
1174 # show up in the changed section.
1174 # show up in the changed section.
1175 for x in a:
1175 for x in a:
1176 if x in dd:
1176 if x in dd:
1177 del dd[dd.index(x)]
1177 del dd[dd.index(x)]
1178 mm.append(x)
1178 mm.append(x)
1179 else:
1179 else:
1180 aa.append(x)
1180 aa.append(x)
1181 # make sure any files deleted in the local dirstate
1181 # make sure any files deleted in the local dirstate
1182 # are not in the add or change column of the patch
1182 # are not in the add or change column of the patch
1183 forget = []
1183 forget = []
1184 for x in d + r:
1184 for x in d + r:
1185 if x in aa:
1185 if x in aa:
1186 del aa[aa.index(x)]
1186 del aa[aa.index(x)]
1187 forget.append(x)
1187 forget.append(x)
1188 continue
1188 continue
1189 elif x in mm:
1189 elif x in mm:
1190 del mm[mm.index(x)]
1190 del mm[mm.index(x)]
1191 dd.append(x)
1191 dd.append(x)
1192
1192
1193 m = list(set(mm))
1193 m = list(set(mm))
1194 r = list(set(dd))
1194 r = list(set(dd))
1195 a = list(set(aa))
1195 a = list(set(aa))
1196 c = [filter(matchfn, l) for l in (m, a, r)]
1196 c = [filter(matchfn, l) for l in (m, a, r)]
1197 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1197 match = cmdutil.matchfiles(repo, set(c[0] + c[1] + c[2]))
1198 chunks = patch.diff(repo, patchparent, match=match,
1198 chunks = patch.diff(repo, patchparent, match=match,
1199 changes=c, opts=self.diffopts())
1199 changes=c, opts=self.diffopts())
1200 for chunk in chunks:
1200 for chunk in chunks:
1201 patchf.write(chunk)
1201 patchf.write(chunk)
1202
1202
1203 try:
1203 try:
1204 if self.diffopts().git:
1204 if self.diffopts().git:
1205 copies = {}
1205 copies = {}
1206 for dst in a:
1206 for dst in a:
1207 src = repo.dirstate.copied(dst)
1207 src = repo.dirstate.copied(dst)
1208 # during qfold, the source file for copies may
1208 # during qfold, the source file for copies may
1209 # be removed. Treat this as a simple add.
1209 # be removed. Treat this as a simple add.
1210 if src is not None and src in repo.dirstate:
1210 if src is not None and src in repo.dirstate:
1211 copies.setdefault(src, []).append(dst)
1211 copies.setdefault(src, []).append(dst)
1212 repo.dirstate.add(dst)
1212 repo.dirstate.add(dst)
1213 # remember the copies between patchparent and tip
1213 # remember the copies between patchparent and tip
1214 for dst in aaa:
1214 for dst in aaa:
1215 f = repo.file(dst)
1215 f = repo.file(dst)
1216 src = f.renamed(man[dst])
1216 src = f.renamed(man[dst])
1217 if src:
1217 if src:
1218 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1218 copies.setdefault(src[0], []).extend(copies.get(dst, []))
1219 if dst in a:
1219 if dst in a:
1220 copies[src[0]].append(dst)
1220 copies[src[0]].append(dst)
1221 # we can't copy a file created by the patch itself
1221 # we can't copy a file created by the patch itself
1222 if dst in copies:
1222 if dst in copies:
1223 del copies[dst]
1223 del copies[dst]
1224 for src, dsts in copies.iteritems():
1224 for src, dsts in copies.iteritems():
1225 for dst in dsts:
1225 for dst in dsts:
1226 repo.dirstate.copy(src, dst)
1226 repo.dirstate.copy(src, dst)
1227 else:
1227 else:
1228 for dst in a:
1228 for dst in a:
1229 repo.dirstate.add(dst)
1229 repo.dirstate.add(dst)
1230 # Drop useless copy information
1230 # Drop useless copy information
1231 for f in list(repo.dirstate.copies()):
1231 for f in list(repo.dirstate.copies()):
1232 repo.dirstate.copy(None, f)
1232 repo.dirstate.copy(None, f)
1233 for f in r:
1233 for f in r:
1234 repo.dirstate.remove(f)
1234 repo.dirstate.remove(f)
1235 # if the patch excludes a modified file, mark that
1235 # if the patch excludes a modified file, mark that
1236 # file with mtime=0 so status can see it.
1236 # file with mtime=0 so status can see it.
1237 mm = []
1237 mm = []
1238 for i in xrange(len(m)-1, -1, -1):
1238 for i in xrange(len(m)-1, -1, -1):
1239 if not matchfn(m[i]):
1239 if not matchfn(m[i]):
1240 mm.append(m[i])
1240 mm.append(m[i])
1241 del m[i]
1241 del m[i]
1242 for f in m:
1242 for f in m:
1243 repo.dirstate.normal(f)
1243 repo.dirstate.normal(f)
1244 for f in mm:
1244 for f in mm:
1245 repo.dirstate.normallookup(f)
1245 repo.dirstate.normallookup(f)
1246 for f in forget:
1246 for f in forget:
1247 repo.dirstate.forget(f)
1247 repo.dirstate.forget(f)
1248
1248
1249 if not msg:
1249 if not msg:
1250 if not ph.message:
1250 if not ph.message:
1251 message = "[mq]: %s\n" % patchfn
1251 message = "[mq]: %s\n" % patchfn
1252 else:
1252 else:
1253 message = "\n".join(ph.message)
1253 message = "\n".join(ph.message)
1254 else:
1254 else:
1255 message = msg
1255 message = msg
1256
1256
1257 user = ph.user or changes[1]
1257 user = ph.user or changes[1]
1258
1258
1259 # assumes strip can roll itself back if interrupted
1259 # assumes strip can roll itself back if interrupted
1260 repo.dirstate.setparents(*cparents)
1260 repo.dirstate.setparents(*cparents)
1261 self.applied.pop()
1261 self.applied.pop()
1262 self.applied_dirty = 1
1262 self.applied_dirty = 1
1263 self.strip(repo, top, update=False,
1263 self.strip(repo, top, update=False,
1264 backup='strip')
1264 backup='strip')
1265 except:
1265 except:
1266 repo.dirstate.invalidate()
1266 repo.dirstate.invalidate()
1267 raise
1267 raise
1268
1268
1269 try:
1269 try:
1270 # might be nice to attempt to roll back strip after this
1270 # might be nice to attempt to roll back strip after this
1271 patchf.rename()
1271 patchf.rename()
1272 n = repo.commit(match.files(), message, user, ph.date,
1272 n = repo.commit(match.files(), message, user, ph.date,
1273 match=match, force=1)
1273 match=match, force=1)
1274 self.applied.append(statusentry(hex(n), patchfn))
1274 self.applied.append(statusentry(hex(n), patchfn))
1275 except:
1275 except:
1276 ctx = repo[cparents[0]]
1276 ctx = repo[cparents[0]]
1277 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1277 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1278 self.save_dirty()
1278 self.save_dirty()
1279 self.ui.warn(_('refresh interrupted while patch was popped! '
1279 self.ui.warn(_('refresh interrupted while patch was popped! '
1280 '(revert --all, qpush to recover)\n'))
1280 '(revert --all, qpush to recover)\n'))
1281 raise
1281 raise
1282 else:
1282 else:
1283 self.printdiff(repo, patchparent, fp=patchf)
1283 self.printdiff(repo, patchparent, fp=patchf)
1284 patchf.rename()
1284 patchf.rename()
1285 added = repo.status()[1]
1285 added = repo.status()[1]
1286 for a in added:
1286 for a in added:
1287 f = repo.wjoin(a)
1287 f = repo.wjoin(a)
1288 try:
1288 try:
1289 os.unlink(f)
1289 os.unlink(f)
1290 except OSError, e:
1290 except OSError, e:
1291 if e.errno != errno.ENOENT:
1291 if e.errno != errno.ENOENT:
1292 raise
1292 raise
1293 try: os.removedirs(os.path.dirname(f))
1293 try: os.removedirs(os.path.dirname(f))
1294 except: pass
1294 except: pass
1295 # forget the file copies in the dirstate
1295 # forget the file copies in the dirstate
1296 # push should readd the files later on
1296 # push should readd the files later on
1297 repo.dirstate.forget(a)
1297 repo.dirstate.forget(a)
1298 self.pop(repo, force=True)
1298 self.pop(repo, force=True)
1299 self.push(repo, force=True)
1299 self.push(repo, force=True)
1300 finally:
1300 finally:
1301 wlock.release()
1301 wlock.release()
1302 self.removeundo(repo)
1302 self.removeundo(repo)
1303
1303
1304 def init(self, repo, create=False):
1304 def init(self, repo, create=False):
1305 if not create and os.path.isdir(self.path):
1305 if not create and os.path.isdir(self.path):
1306 raise util.Abort(_("patch queue directory already exists"))
1306 raise util.Abort(_("patch queue directory already exists"))
1307 try:
1307 try:
1308 os.mkdir(self.path)
1308 os.mkdir(self.path)
1309 except OSError, inst:
1309 except OSError, inst:
1310 if inst.errno != errno.EEXIST or not create:
1310 if inst.errno != errno.EEXIST or not create:
1311 raise
1311 raise
1312 if create:
1312 if create:
1313 return self.qrepo(create=True)
1313 return self.qrepo(create=True)
1314
1314
1315 def unapplied(self, repo, patch=None):
1315 def unapplied(self, repo, patch=None):
1316 if patch and patch not in self.series:
1316 if patch and patch not in self.series:
1317 raise util.Abort(_("patch %s is not in series file") % patch)
1317 raise util.Abort(_("patch %s is not in series file") % patch)
1318 if not patch:
1318 if not patch:
1319 start = self.series_end()
1319 start = self.series_end()
1320 else:
1320 else:
1321 start = self.series.index(patch) + 1
1321 start = self.series.index(patch) + 1
1322 unapplied = []
1322 unapplied = []
1323 for i in xrange(start, len(self.series)):
1323 for i in xrange(start, len(self.series)):
1324 pushable, reason = self.pushable(i)
1324 pushable, reason = self.pushable(i)
1325 if pushable:
1325 if pushable:
1326 unapplied.append((i, self.series[i]))
1326 unapplied.append((i, self.series[i]))
1327 self.explain_pushable(i)
1327 self.explain_pushable(i)
1328 return unapplied
1328 return unapplied
1329
1329
1330 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1330 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1331 summary=False):
1331 summary=False):
1332 def displayname(patchname):
1332 def displayname(patchname):
1333 if summary:
1333 if summary:
1334 ph = self.readheaders(patchname)
1334 ph = self.readheaders(patchname)
1335 msg = ph.message
1335 msg = ph.message
1336 msg = msg and ': ' + msg[0] or ': '
1336 msg = msg and ': ' + msg[0] or ': '
1337 else:
1337 else:
1338 msg = ''
1338 msg = ''
1339 return '%s%s' % (patchname, msg)
1339 return '%s%s' % (patchname, msg)
1340
1340
1341 applied = dict.fromkeys([p.name for p in self.applied])
1341 applied = set([p.name for p in self.applied])
1342 if length is None:
1342 if length is None:
1343 length = len(self.series) - start
1343 length = len(self.series) - start
1344 if not missing:
1344 if not missing:
1345 for i in xrange(start, start+length):
1345 for i in xrange(start, start+length):
1346 patch = self.series[i]
1346 patch = self.series[i]
1347 if patch in applied:
1347 if patch in applied:
1348 stat = 'A'
1348 stat = 'A'
1349 elif self.pushable(i)[0]:
1349 elif self.pushable(i)[0]:
1350 stat = 'U'
1350 stat = 'U'
1351 else:
1351 else:
1352 stat = 'G'
1352 stat = 'G'
1353 pfx = ''
1353 pfx = ''
1354 if self.ui.verbose:
1354 if self.ui.verbose:
1355 pfx = '%d %s ' % (i, stat)
1355 pfx = '%d %s ' % (i, stat)
1356 elif status and status != stat:
1356 elif status and status != stat:
1357 continue
1357 continue
1358 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1358 self.ui.write('%s%s\n' % (pfx, displayname(patch)))
1359 else:
1359 else:
1360 msng_list = []
1360 msng_list = []
1361 for root, dirs, files in os.walk(self.path):
1361 for root, dirs, files in os.walk(self.path):
1362 d = root[len(self.path) + 1:]
1362 d = root[len(self.path) + 1:]
1363 for f in files:
1363 for f in files:
1364 fl = os.path.join(d, f)
1364 fl = os.path.join(d, f)
1365 if (fl not in self.series and
1365 if (fl not in self.series and
1366 fl not in (self.status_path, self.series_path,
1366 fl not in (self.status_path, self.series_path,
1367 self.guards_path)
1367 self.guards_path)
1368 and not fl.startswith('.')):
1368 and not fl.startswith('.')):
1369 msng_list.append(fl)
1369 msng_list.append(fl)
1370 for x in util.sort(msng_list):
1370 for x in util.sort(msng_list):
1371 pfx = self.ui.verbose and ('D ') or ''
1371 pfx = self.ui.verbose and ('D ') or ''
1372 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1372 self.ui.write("%s%s\n" % (pfx, displayname(x)))
1373
1373
1374 def issaveline(self, l):
1374 def issaveline(self, l):
1375 if l.name == '.hg.patches.save.line':
1375 if l.name == '.hg.patches.save.line':
1376 return True
1376 return True
1377
1377
1378 def qrepo(self, create=False):
1378 def qrepo(self, create=False):
1379 if create or os.path.isdir(self.join(".hg")):
1379 if create or os.path.isdir(self.join(".hg")):
1380 return hg.repository(self.ui, path=self.path, create=create)
1380 return hg.repository(self.ui, path=self.path, create=create)
1381
1381
1382 def restore(self, repo, rev, delete=None, qupdate=None):
1382 def restore(self, repo, rev, delete=None, qupdate=None):
1383 c = repo.changelog.read(rev)
1383 c = repo.changelog.read(rev)
1384 desc = c[4].strip()
1384 desc = c[4].strip()
1385 lines = desc.splitlines()
1385 lines = desc.splitlines()
1386 i = 0
1386 i = 0
1387 datastart = None
1387 datastart = None
1388 series = []
1388 series = []
1389 applied = []
1389 applied = []
1390 qpp = None
1390 qpp = None
1391 for i in xrange(0, len(lines)):
1391 for i in xrange(0, len(lines)):
1392 if lines[i] == 'Patch Data:':
1392 if lines[i] == 'Patch Data:':
1393 datastart = i + 1
1393 datastart = i + 1
1394 elif lines[i].startswith('Dirstate:'):
1394 elif lines[i].startswith('Dirstate:'):
1395 l = lines[i].rstrip()
1395 l = lines[i].rstrip()
1396 l = l[10:].split(' ')
1396 l = l[10:].split(' ')
1397 qpp = [ bin(x) for x in l ]
1397 qpp = [ bin(x) for x in l ]
1398 elif datastart != None:
1398 elif datastart != None:
1399 l = lines[i].rstrip()
1399 l = lines[i].rstrip()
1400 se = statusentry(l)
1400 se = statusentry(l)
1401 file_ = se.name
1401 file_ = se.name
1402 if se.rev:
1402 if se.rev:
1403 applied.append(se)
1403 applied.append(se)
1404 else:
1404 else:
1405 series.append(file_)
1405 series.append(file_)
1406 if datastart == None:
1406 if datastart == None:
1407 self.ui.warn(_("No saved patch data found\n"))
1407 self.ui.warn(_("No saved patch data found\n"))
1408 return 1
1408 return 1
1409 self.ui.warn(_("restoring status: %s\n") % lines[0])
1409 self.ui.warn(_("restoring status: %s\n") % lines[0])
1410 self.full_series = series
1410 self.full_series = series
1411 self.applied = applied
1411 self.applied = applied
1412 self.parse_series()
1412 self.parse_series()
1413 self.series_dirty = 1
1413 self.series_dirty = 1
1414 self.applied_dirty = 1
1414 self.applied_dirty = 1
1415 heads = repo.changelog.heads()
1415 heads = repo.changelog.heads()
1416 if delete:
1416 if delete:
1417 if rev not in heads:
1417 if rev not in heads:
1418 self.ui.warn(_("save entry has children, leaving it alone\n"))
1418 self.ui.warn(_("save entry has children, leaving it alone\n"))
1419 else:
1419 else:
1420 self.ui.warn(_("removing save entry %s\n") % short(rev))
1420 self.ui.warn(_("removing save entry %s\n") % short(rev))
1421 pp = repo.dirstate.parents()
1421 pp = repo.dirstate.parents()
1422 if rev in pp:
1422 if rev in pp:
1423 update = True
1423 update = True
1424 else:
1424 else:
1425 update = False
1425 update = False
1426 self.strip(repo, rev, update=update, backup='strip')
1426 self.strip(repo, rev, update=update, backup='strip')
1427 if qpp:
1427 if qpp:
1428 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1428 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1429 (short(qpp[0]), short(qpp[1])))
1429 (short(qpp[0]), short(qpp[1])))
1430 if qupdate:
1430 if qupdate:
1431 self.ui.status(_("queue directory updating\n"))
1431 self.ui.status(_("queue directory updating\n"))
1432 r = self.qrepo()
1432 r = self.qrepo()
1433 if not r:
1433 if not r:
1434 self.ui.warn(_("Unable to load queue repository\n"))
1434 self.ui.warn(_("Unable to load queue repository\n"))
1435 return 1
1435 return 1
1436 hg.clean(r, qpp[0])
1436 hg.clean(r, qpp[0])
1437
1437
1438 def save(self, repo, msg=None):
1438 def save(self, repo, msg=None):
1439 if len(self.applied) == 0:
1439 if len(self.applied) == 0:
1440 self.ui.warn(_("save: no patches applied, exiting\n"))
1440 self.ui.warn(_("save: no patches applied, exiting\n"))
1441 return 1
1441 return 1
1442 if self.issaveline(self.applied[-1]):
1442 if self.issaveline(self.applied[-1]):
1443 self.ui.warn(_("status is already saved\n"))
1443 self.ui.warn(_("status is already saved\n"))
1444 return 1
1444 return 1
1445
1445
1446 ar = [ ':' + x for x in self.full_series ]
1446 ar = [ ':' + x for x in self.full_series ]
1447 if not msg:
1447 if not msg:
1448 msg = _("hg patches saved state")
1448 msg = _("hg patches saved state")
1449 else:
1449 else:
1450 msg = "hg patches: " + msg.rstrip('\r\n')
1450 msg = "hg patches: " + msg.rstrip('\r\n')
1451 r = self.qrepo()
1451 r = self.qrepo()
1452 if r:
1452 if r:
1453 pp = r.dirstate.parents()
1453 pp = r.dirstate.parents()
1454 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1454 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1455 msg += "\n\nPatch Data:\n"
1455 msg += "\n\nPatch Data:\n"
1456 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1456 text = msg + "\n".join([str(x) for x in self.applied]) + '\n' + (ar and
1457 "\n".join(ar) + '\n' or "")
1457 "\n".join(ar) + '\n' or "")
1458 n = repo.commit(None, text, user=None, force=1)
1458 n = repo.commit(None, text, user=None, force=1)
1459 if not n:
1459 if not n:
1460 self.ui.warn(_("repo commit failed\n"))
1460 self.ui.warn(_("repo commit failed\n"))
1461 return 1
1461 return 1
1462 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1462 self.applied.append(statusentry(hex(n),'.hg.patches.save.line'))
1463 self.applied_dirty = 1
1463 self.applied_dirty = 1
1464 self.removeundo(repo)
1464 self.removeundo(repo)
1465
1465
1466 def full_series_end(self):
1466 def full_series_end(self):
1467 if len(self.applied) > 0:
1467 if len(self.applied) > 0:
1468 p = self.applied[-1].name
1468 p = self.applied[-1].name
1469 end = self.find_series(p)
1469 end = self.find_series(p)
1470 if end == None:
1470 if end == None:
1471 return len(self.full_series)
1471 return len(self.full_series)
1472 return end + 1
1472 return end + 1
1473 return 0
1473 return 0
1474
1474
1475 def series_end(self, all_patches=False):
1475 def series_end(self, all_patches=False):
1476 """If all_patches is False, return the index of the next pushable patch
1476 """If all_patches is False, return the index of the next pushable patch
1477 in the series, or the series length. If all_patches is True, return the
1477 in the series, or the series length. If all_patches is True, return the
1478 index of the first patch past the last applied one.
1478 index of the first patch past the last applied one.
1479 """
1479 """
1480 end = 0
1480 end = 0
1481 def next(start):
1481 def next(start):
1482 if all_patches:
1482 if all_patches:
1483 return start
1483 return start
1484 i = start
1484 i = start
1485 while i < len(self.series):
1485 while i < len(self.series):
1486 p, reason = self.pushable(i)
1486 p, reason = self.pushable(i)
1487 if p:
1487 if p:
1488 break
1488 break
1489 self.explain_pushable(i)
1489 self.explain_pushable(i)
1490 i += 1
1490 i += 1
1491 return i
1491 return i
1492 if len(self.applied) > 0:
1492 if len(self.applied) > 0:
1493 p = self.applied[-1].name
1493 p = self.applied[-1].name
1494 try:
1494 try:
1495 end = self.series.index(p)
1495 end = self.series.index(p)
1496 except ValueError:
1496 except ValueError:
1497 return 0
1497 return 0
1498 return next(end + 1)
1498 return next(end + 1)
1499 return next(end)
1499 return next(end)
1500
1500
1501 def appliedname(self, index):
1501 def appliedname(self, index):
1502 pname = self.applied[index].name
1502 pname = self.applied[index].name
1503 if not self.ui.verbose:
1503 if not self.ui.verbose:
1504 p = pname
1504 p = pname
1505 else:
1505 else:
1506 p = str(self.series.index(pname)) + " " + pname
1506 p = str(self.series.index(pname)) + " " + pname
1507 return p
1507 return p
1508
1508
1509 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1509 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1510 force=None, git=False):
1510 force=None, git=False):
1511 def checkseries(patchname):
1511 def checkseries(patchname):
1512 if patchname in self.series:
1512 if patchname in self.series:
1513 raise util.Abort(_('patch %s is already in the series file')
1513 raise util.Abort(_('patch %s is already in the series file')
1514 % patchname)
1514 % patchname)
1515 def checkfile(patchname):
1515 def checkfile(patchname):
1516 if not force and os.path.exists(self.join(patchname)):
1516 if not force and os.path.exists(self.join(patchname)):
1517 raise util.Abort(_('patch "%s" already exists')
1517 raise util.Abort(_('patch "%s" already exists')
1518 % patchname)
1518 % patchname)
1519
1519
1520 if rev:
1520 if rev:
1521 if files:
1521 if files:
1522 raise util.Abort(_('option "-r" not valid when importing '
1522 raise util.Abort(_('option "-r" not valid when importing '
1523 'files'))
1523 'files'))
1524 rev = cmdutil.revrange(repo, rev)
1524 rev = cmdutil.revrange(repo, rev)
1525 rev.sort(lambda x, y: cmp(y, x))
1525 rev.sort(lambda x, y: cmp(y, x))
1526 if (len(files) > 1 or len(rev) > 1) and patchname:
1526 if (len(files) > 1 or len(rev) > 1) and patchname:
1527 raise util.Abort(_('option "-n" not valid when importing multiple '
1527 raise util.Abort(_('option "-n" not valid when importing multiple '
1528 'patches'))
1528 'patches'))
1529 i = 0
1529 i = 0
1530 added = []
1530 added = []
1531 if rev:
1531 if rev:
1532 # If mq patches are applied, we can only import revisions
1532 # If mq patches are applied, we can only import revisions
1533 # that form a linear path to qbase.
1533 # that form a linear path to qbase.
1534 # Otherwise, they should form a linear path to a head.
1534 # Otherwise, they should form a linear path to a head.
1535 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1535 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1536 if len(heads) > 1:
1536 if len(heads) > 1:
1537 raise util.Abort(_('revision %d is the root of more than one '
1537 raise util.Abort(_('revision %d is the root of more than one '
1538 'branch') % rev[-1])
1538 'branch') % rev[-1])
1539 if self.applied:
1539 if self.applied:
1540 base = hex(repo.changelog.node(rev[0]))
1540 base = hex(repo.changelog.node(rev[0]))
1541 if base in [n.rev for n in self.applied]:
1541 if base in [n.rev for n in self.applied]:
1542 raise util.Abort(_('revision %d is already managed')
1542 raise util.Abort(_('revision %d is already managed')
1543 % rev[0])
1543 % rev[0])
1544 if heads != [bin(self.applied[-1].rev)]:
1544 if heads != [bin(self.applied[-1].rev)]:
1545 raise util.Abort(_('revision %d is not the parent of '
1545 raise util.Abort(_('revision %d is not the parent of '
1546 'the queue') % rev[0])
1546 'the queue') % rev[0])
1547 base = repo.changelog.rev(bin(self.applied[0].rev))
1547 base = repo.changelog.rev(bin(self.applied[0].rev))
1548 lastparent = repo.changelog.parentrevs(base)[0]
1548 lastparent = repo.changelog.parentrevs(base)[0]
1549 else:
1549 else:
1550 if heads != [repo.changelog.node(rev[0])]:
1550 if heads != [repo.changelog.node(rev[0])]:
1551 raise util.Abort(_('revision %d has unmanaged children')
1551 raise util.Abort(_('revision %d has unmanaged children')
1552 % rev[0])
1552 % rev[0])
1553 lastparent = None
1553 lastparent = None
1554
1554
1555 if git:
1555 if git:
1556 self.diffopts().git = True
1556 self.diffopts().git = True
1557
1557
1558 for r in rev:
1558 for r in rev:
1559 p1, p2 = repo.changelog.parentrevs(r)
1559 p1, p2 = repo.changelog.parentrevs(r)
1560 n = repo.changelog.node(r)
1560 n = repo.changelog.node(r)
1561 if p2 != nullrev:
1561 if p2 != nullrev:
1562 raise util.Abort(_('cannot import merge revision %d') % r)
1562 raise util.Abort(_('cannot import merge revision %d') % r)
1563 if lastparent and lastparent != r:
1563 if lastparent and lastparent != r:
1564 raise util.Abort(_('revision %d is not the parent of %d')
1564 raise util.Abort(_('revision %d is not the parent of %d')
1565 % (r, lastparent))
1565 % (r, lastparent))
1566 lastparent = p1
1566 lastparent = p1
1567
1567
1568 if not patchname:
1568 if not patchname:
1569 patchname = normname('%d.diff' % r)
1569 patchname = normname('%d.diff' % r)
1570 self.check_reserved_name(patchname)
1570 self.check_reserved_name(patchname)
1571 checkseries(patchname)
1571 checkseries(patchname)
1572 checkfile(patchname)
1572 checkfile(patchname)
1573 self.full_series.insert(0, patchname)
1573 self.full_series.insert(0, patchname)
1574
1574
1575 patchf = self.opener(patchname, "w")
1575 patchf = self.opener(patchname, "w")
1576 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1576 patch.export(repo, [n], fp=patchf, opts=self.diffopts())
1577 patchf.close()
1577 patchf.close()
1578
1578
1579 se = statusentry(hex(n), patchname)
1579 se = statusentry(hex(n), patchname)
1580 self.applied.insert(0, se)
1580 self.applied.insert(0, se)
1581
1581
1582 added.append(patchname)
1582 added.append(patchname)
1583 patchname = None
1583 patchname = None
1584 self.parse_series()
1584 self.parse_series()
1585 self.applied_dirty = 1
1585 self.applied_dirty = 1
1586
1586
1587 for filename in files:
1587 for filename in files:
1588 if existing:
1588 if existing:
1589 if filename == '-':
1589 if filename == '-':
1590 raise util.Abort(_('-e is incompatible with import from -'))
1590 raise util.Abort(_('-e is incompatible with import from -'))
1591 if not patchname:
1591 if not patchname:
1592 patchname = normname(filename)
1592 patchname = normname(filename)
1593 self.check_reserved_name(patchname)
1593 self.check_reserved_name(patchname)
1594 if not os.path.isfile(self.join(patchname)):
1594 if not os.path.isfile(self.join(patchname)):
1595 raise util.Abort(_("patch %s does not exist") % patchname)
1595 raise util.Abort(_("patch %s does not exist") % patchname)
1596 else:
1596 else:
1597 try:
1597 try:
1598 if filename == '-':
1598 if filename == '-':
1599 if not patchname:
1599 if not patchname:
1600 raise util.Abort(_('need --name to import a patch from -'))
1600 raise util.Abort(_('need --name to import a patch from -'))
1601 text = sys.stdin.read()
1601 text = sys.stdin.read()
1602 else:
1602 else:
1603 text = url.open(self.ui, filename).read()
1603 text = url.open(self.ui, filename).read()
1604 except (OSError, IOError):
1604 except (OSError, IOError):
1605 raise util.Abort(_("unable to read %s") % filename)
1605 raise util.Abort(_("unable to read %s") % filename)
1606 if not patchname:
1606 if not patchname:
1607 patchname = normname(os.path.basename(filename))
1607 patchname = normname(os.path.basename(filename))
1608 self.check_reserved_name(patchname)
1608 self.check_reserved_name(patchname)
1609 checkfile(patchname)
1609 checkfile(patchname)
1610 patchf = self.opener(patchname, "w")
1610 patchf = self.opener(patchname, "w")
1611 patchf.write(text)
1611 patchf.write(text)
1612 if not force:
1612 if not force:
1613 checkseries(patchname)
1613 checkseries(patchname)
1614 if patchname not in self.series:
1614 if patchname not in self.series:
1615 index = self.full_series_end() + i
1615 index = self.full_series_end() + i
1616 self.full_series[index:index] = [patchname]
1616 self.full_series[index:index] = [patchname]
1617 self.parse_series()
1617 self.parse_series()
1618 self.ui.warn(_("adding %s to series file\n") % patchname)
1618 self.ui.warn(_("adding %s to series file\n") % patchname)
1619 i += 1
1619 i += 1
1620 added.append(patchname)
1620 added.append(patchname)
1621 patchname = None
1621 patchname = None
1622 self.series_dirty = 1
1622 self.series_dirty = 1
1623 qrepo = self.qrepo()
1623 qrepo = self.qrepo()
1624 if qrepo:
1624 if qrepo:
1625 qrepo.add(added)
1625 qrepo.add(added)
1626
1626
1627 def delete(ui, repo, *patches, **opts):
1627 def delete(ui, repo, *patches, **opts):
1628 """remove patches from queue
1628 """remove patches from queue
1629
1629
1630 The patches must not be applied, unless they are arguments to the
1630 The patches must not be applied, unless they are arguments to the
1631 -r/--rev parameter. At least one patch or revision is required.
1631 -r/--rev parameter. At least one patch or revision is required.
1632
1632
1633 With --rev, mq will stop managing the named revisions (converting
1633 With --rev, mq will stop managing the named revisions (converting
1634 them to regular mercurial changesets). The qfinish command should
1634 them to regular mercurial changesets). The qfinish command should
1635 be used as an alternative for qdelete -r, as the latter option is
1635 be used as an alternative for qdelete -r, as the latter option is
1636 deprecated.
1636 deprecated.
1637
1637
1638 With -k/--keep, the patch files are preserved in the patch
1638 With -k/--keep, the patch files are preserved in the patch
1639 directory."""
1639 directory."""
1640 q = repo.mq
1640 q = repo.mq
1641 q.delete(repo, patches, opts)
1641 q.delete(repo, patches, opts)
1642 q.save_dirty()
1642 q.save_dirty()
1643 return 0
1643 return 0
1644
1644
1645 def applied(ui, repo, patch=None, **opts):
1645 def applied(ui, repo, patch=None, **opts):
1646 """print the patches already applied"""
1646 """print the patches already applied"""
1647 q = repo.mq
1647 q = repo.mq
1648 if patch:
1648 if patch:
1649 if patch not in q.series:
1649 if patch not in q.series:
1650 raise util.Abort(_("patch %s is not in series file") % patch)
1650 raise util.Abort(_("patch %s is not in series file") % patch)
1651 end = q.series.index(patch) + 1
1651 end = q.series.index(patch) + 1
1652 else:
1652 else:
1653 end = q.series_end(True)
1653 end = q.series_end(True)
1654 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1654 return q.qseries(repo, length=end, status='A', summary=opts.get('summary'))
1655
1655
1656 def unapplied(ui, repo, patch=None, **opts):
1656 def unapplied(ui, repo, patch=None, **opts):
1657 """print the patches not yet applied"""
1657 """print the patches not yet applied"""
1658 q = repo.mq
1658 q = repo.mq
1659 if patch:
1659 if patch:
1660 if patch not in q.series:
1660 if patch not in q.series:
1661 raise util.Abort(_("patch %s is not in series file") % patch)
1661 raise util.Abort(_("patch %s is not in series file") % patch)
1662 start = q.series.index(patch) + 1
1662 start = q.series.index(patch) + 1
1663 else:
1663 else:
1664 start = q.series_end(True)
1664 start = q.series_end(True)
1665 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1665 q.qseries(repo, start=start, status='U', summary=opts.get('summary'))
1666
1666
1667 def qimport(ui, repo, *filename, **opts):
1667 def qimport(ui, repo, *filename, **opts):
1668 """import a patch
1668 """import a patch
1669
1669
1670 The patch is inserted into the series after the last applied
1670 The patch is inserted into the series after the last applied
1671 patch. If no patches have been applied, qimport prepends the patch
1671 patch. If no patches have been applied, qimport prepends the patch
1672 to the series.
1672 to the series.
1673
1673
1674 The patch will have the same name as its source file unless you
1674 The patch will have the same name as its source file unless you
1675 give it a new one with -n/--name.
1675 give it a new one with -n/--name.
1676
1676
1677 You can register an existing patch inside the patch directory with
1677 You can register an existing patch inside the patch directory with
1678 the -e/--existing flag.
1678 the -e/--existing flag.
1679
1679
1680 With -f/--force, an existing patch of the same name will be
1680 With -f/--force, an existing patch of the same name will be
1681 overwritten.
1681 overwritten.
1682
1682
1683 An existing changeset may be placed under mq control with -r/--rev
1683 An existing changeset may be placed under mq control with -r/--rev
1684 (e.g. qimport --rev tip -n patch will place tip under mq control).
1684 (e.g. qimport --rev tip -n patch will place tip under mq control).
1685 With -g/--git, patches imported with --rev will use the git diff
1685 With -g/--git, patches imported with --rev will use the git diff
1686 format. See the diffs help topic for information on why this is
1686 format. See the diffs help topic for information on why this is
1687 important for preserving rename/copy information and permission
1687 important for preserving rename/copy information and permission
1688 changes.
1688 changes.
1689
1689
1690 To import a patch from standard input, pass - as the patch file.
1690 To import a patch from standard input, pass - as the patch file.
1691 When importing from standard input, a patch name must be specified
1691 When importing from standard input, a patch name must be specified
1692 using the --name flag.
1692 using the --name flag.
1693 """
1693 """
1694 q = repo.mq
1694 q = repo.mq
1695 q.qimport(repo, filename, patchname=opts['name'],
1695 q.qimport(repo, filename, patchname=opts['name'],
1696 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1696 existing=opts['existing'], force=opts['force'], rev=opts['rev'],
1697 git=opts['git'])
1697 git=opts['git'])
1698 q.save_dirty()
1698 q.save_dirty()
1699 return 0
1699 return 0
1700
1700
1701 def init(ui, repo, **opts):
1701 def init(ui, repo, **opts):
1702 """init a new queue repository
1702 """init a new queue repository
1703
1703
1704 The queue repository is unversioned by default. If
1704 The queue repository is unversioned by default. If
1705 -c/--create-repo is specified, qinit will create a separate nested
1705 -c/--create-repo is specified, qinit will create a separate nested
1706 repository for patches (qinit -c may also be run later to convert
1706 repository for patches (qinit -c may also be run later to convert
1707 an unversioned patch repository into a versioned one). You can use
1707 an unversioned patch repository into a versioned one). You can use
1708 qcommit to commit changes to this queue repository."""
1708 qcommit to commit changes to this queue repository."""
1709 q = repo.mq
1709 q = repo.mq
1710 r = q.init(repo, create=opts['create_repo'])
1710 r = q.init(repo, create=opts['create_repo'])
1711 q.save_dirty()
1711 q.save_dirty()
1712 if r:
1712 if r:
1713 if not os.path.exists(r.wjoin('.hgignore')):
1713 if not os.path.exists(r.wjoin('.hgignore')):
1714 fp = r.wopener('.hgignore', 'w')
1714 fp = r.wopener('.hgignore', 'w')
1715 fp.write('^\\.hg\n')
1715 fp.write('^\\.hg\n')
1716 fp.write('^\\.mq\n')
1716 fp.write('^\\.mq\n')
1717 fp.write('syntax: glob\n')
1717 fp.write('syntax: glob\n')
1718 fp.write('status\n')
1718 fp.write('status\n')
1719 fp.write('guards\n')
1719 fp.write('guards\n')
1720 fp.close()
1720 fp.close()
1721 if not os.path.exists(r.wjoin('series')):
1721 if not os.path.exists(r.wjoin('series')):
1722 r.wopener('series', 'w').close()
1722 r.wopener('series', 'w').close()
1723 r.add(['.hgignore', 'series'])
1723 r.add(['.hgignore', 'series'])
1724 commands.add(ui, r)
1724 commands.add(ui, r)
1725 return 0
1725 return 0
1726
1726
1727 def clone(ui, source, dest=None, **opts):
1727 def clone(ui, source, dest=None, **opts):
1728 '''clone main and patch repository at same time
1728 '''clone main and patch repository at same time
1729
1729
1730 If source is local, destination will have no patches applied. If
1730 If source is local, destination will have no patches applied. If
1731 source is remote, this command can not check if patches are
1731 source is remote, this command can not check if patches are
1732 applied in source, so cannot guarantee that patches are not
1732 applied in source, so cannot guarantee that patches are not
1733 applied in destination. If you clone remote repository, be sure
1733 applied in destination. If you clone remote repository, be sure
1734 before that it has no patches applied.
1734 before that it has no patches applied.
1735
1735
1736 Source patch repository is looked for in <src>/.hg/patches by
1736 Source patch repository is looked for in <src>/.hg/patches by
1737 default. Use -p <url> to change.
1737 default. Use -p <url> to change.
1738
1738
1739 The patch directory must be a nested mercurial repository, as
1739 The patch directory must be a nested mercurial repository, as
1740 would be created by qinit -c.
1740 would be created by qinit -c.
1741 '''
1741 '''
1742 def patchdir(repo):
1742 def patchdir(repo):
1743 url = repo.url()
1743 url = repo.url()
1744 if url.endswith('/'):
1744 if url.endswith('/'):
1745 url = url[:-1]
1745 url = url[:-1]
1746 return url + '/.hg/patches'
1746 return url + '/.hg/patches'
1747 cmdutil.setremoteconfig(ui, opts)
1747 cmdutil.setremoteconfig(ui, opts)
1748 if dest is None:
1748 if dest is None:
1749 dest = hg.defaultdest(source)
1749 dest = hg.defaultdest(source)
1750 sr = hg.repository(ui, ui.expandpath(source))
1750 sr = hg.repository(ui, ui.expandpath(source))
1751 if opts['patches']:
1751 if opts['patches']:
1752 patchespath = ui.expandpath(opts['patches'])
1752 patchespath = ui.expandpath(opts['patches'])
1753 else:
1753 else:
1754 patchespath = patchdir(sr)
1754 patchespath = patchdir(sr)
1755 try:
1755 try:
1756 hg.repository(ui, patchespath)
1756 hg.repository(ui, patchespath)
1757 except error.RepoError:
1757 except error.RepoError:
1758 raise util.Abort(_('versioned patch repository not found'
1758 raise util.Abort(_('versioned patch repository not found'
1759 ' (see qinit -c)'))
1759 ' (see qinit -c)'))
1760 qbase, destrev = None, None
1760 qbase, destrev = None, None
1761 if sr.local():
1761 if sr.local():
1762 if sr.mq.applied:
1762 if sr.mq.applied:
1763 qbase = bin(sr.mq.applied[0].rev)
1763 qbase = bin(sr.mq.applied[0].rev)
1764 if not hg.islocal(dest):
1764 if not hg.islocal(dest):
1765 heads = dict.fromkeys(sr.heads())
1765 heads = set(sr.heads())
1766 for h in sr.heads(qbase):
1766 destrev = list(heads.difference(sr.heads(qbase)))
1767 del heads[h]
1768 destrev = heads.keys()
1769 destrev.append(sr.changelog.parents(qbase)[0])
1767 destrev.append(sr.changelog.parents(qbase)[0])
1770 elif sr.capable('lookup'):
1768 elif sr.capable('lookup'):
1771 try:
1769 try:
1772 qbase = sr.lookup('qbase')
1770 qbase = sr.lookup('qbase')
1773 except error.RepoError:
1771 except error.RepoError:
1774 pass
1772 pass
1775 ui.note(_('cloning main repository\n'))
1773 ui.note(_('cloning main repository\n'))
1776 sr, dr = hg.clone(ui, sr.url(), dest,
1774 sr, dr = hg.clone(ui, sr.url(), dest,
1777 pull=opts['pull'],
1775 pull=opts['pull'],
1778 rev=destrev,
1776 rev=destrev,
1779 update=False,
1777 update=False,
1780 stream=opts['uncompressed'])
1778 stream=opts['uncompressed'])
1781 ui.note(_('cloning patch repository\n'))
1779 ui.note(_('cloning patch repository\n'))
1782 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1780 hg.clone(ui, opts['patches'] or patchdir(sr), patchdir(dr),
1783 pull=opts['pull'], update=not opts['noupdate'],
1781 pull=opts['pull'], update=not opts['noupdate'],
1784 stream=opts['uncompressed'])
1782 stream=opts['uncompressed'])
1785 if dr.local():
1783 if dr.local():
1786 if qbase:
1784 if qbase:
1787 ui.note(_('stripping applied patches from destination '
1785 ui.note(_('stripping applied patches from destination '
1788 'repository\n'))
1786 'repository\n'))
1789 dr.mq.strip(dr, qbase, update=False, backup=None)
1787 dr.mq.strip(dr, qbase, update=False, backup=None)
1790 if not opts['noupdate']:
1788 if not opts['noupdate']:
1791 ui.note(_('updating destination repository\n'))
1789 ui.note(_('updating destination repository\n'))
1792 hg.update(dr, dr.changelog.tip())
1790 hg.update(dr, dr.changelog.tip())
1793
1791
1794 def commit(ui, repo, *pats, **opts):
1792 def commit(ui, repo, *pats, **opts):
1795 """commit changes in the queue repository"""
1793 """commit changes in the queue repository"""
1796 q = repo.mq
1794 q = repo.mq
1797 r = q.qrepo()
1795 r = q.qrepo()
1798 if not r: raise util.Abort('no queue repository')
1796 if not r: raise util.Abort('no queue repository')
1799 commands.commit(r.ui, r, *pats, **opts)
1797 commands.commit(r.ui, r, *pats, **opts)
1800
1798
1801 def series(ui, repo, **opts):
1799 def series(ui, repo, **opts):
1802 """print the entire series file"""
1800 """print the entire series file"""
1803 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1801 repo.mq.qseries(repo, missing=opts['missing'], summary=opts['summary'])
1804 return 0
1802 return 0
1805
1803
1806 def top(ui, repo, **opts):
1804 def top(ui, repo, **opts):
1807 """print the name of the current patch"""
1805 """print the name of the current patch"""
1808 q = repo.mq
1806 q = repo.mq
1809 t = q.applied and q.series_end(True) or 0
1807 t = q.applied and q.series_end(True) or 0
1810 if t:
1808 if t:
1811 return q.qseries(repo, start=t-1, length=1, status='A',
1809 return q.qseries(repo, start=t-1, length=1, status='A',
1812 summary=opts.get('summary'))
1810 summary=opts.get('summary'))
1813 else:
1811 else:
1814 ui.write(_("no patches applied\n"))
1812 ui.write(_("no patches applied\n"))
1815 return 1
1813 return 1
1816
1814
1817 def next(ui, repo, **opts):
1815 def next(ui, repo, **opts):
1818 """print the name of the next patch"""
1816 """print the name of the next patch"""
1819 q = repo.mq
1817 q = repo.mq
1820 end = q.series_end()
1818 end = q.series_end()
1821 if end == len(q.series):
1819 if end == len(q.series):
1822 ui.write(_("all patches applied\n"))
1820 ui.write(_("all patches applied\n"))
1823 return 1
1821 return 1
1824 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1822 return q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
1825
1823
1826 def prev(ui, repo, **opts):
1824 def prev(ui, repo, **opts):
1827 """print the name of the previous patch"""
1825 """print the name of the previous patch"""
1828 q = repo.mq
1826 q = repo.mq
1829 l = len(q.applied)
1827 l = len(q.applied)
1830 if l == 1:
1828 if l == 1:
1831 ui.write(_("only one patch applied\n"))
1829 ui.write(_("only one patch applied\n"))
1832 return 1
1830 return 1
1833 if not l:
1831 if not l:
1834 ui.write(_("no patches applied\n"))
1832 ui.write(_("no patches applied\n"))
1835 return 1
1833 return 1
1836 return q.qseries(repo, start=l-2, length=1, status='A',
1834 return q.qseries(repo, start=l-2, length=1, status='A',
1837 summary=opts.get('summary'))
1835 summary=opts.get('summary'))
1838
1836
1839 def setupheaderopts(ui, opts):
1837 def setupheaderopts(ui, opts):
1840 def do(opt,val):
1838 def do(opt,val):
1841 if not opts[opt] and opts['current' + opt]:
1839 if not opts[opt] and opts['current' + opt]:
1842 opts[opt] = val
1840 opts[opt] = val
1843 do('user', ui.username())
1841 do('user', ui.username())
1844 do('date', "%d %d" % util.makedate())
1842 do('date', "%d %d" % util.makedate())
1845
1843
1846 def new(ui, repo, patch, *args, **opts):
1844 def new(ui, repo, patch, *args, **opts):
1847 """create a new patch
1845 """create a new patch
1848
1846
1849 qnew creates a new patch on top of the currently-applied patch (if
1847 qnew creates a new patch on top of the currently-applied patch (if
1850 any). It will refuse to run if there are any outstanding changes
1848 any). It will refuse to run if there are any outstanding changes
1851 unless -f/--force is specified, in which case the patch will be
1849 unless -f/--force is specified, in which case the patch will be
1852 initialized with them. You may also use -I/--include,
1850 initialized with them. You may also use -I/--include,
1853 -X/--exclude, and/or a list of files after the patch name to add
1851 -X/--exclude, and/or a list of files after the patch name to add
1854 only changes to matching files to the new patch, leaving the rest
1852 only changes to matching files to the new patch, leaving the rest
1855 as uncommitted modifications.
1853 as uncommitted modifications.
1856
1854
1857 -u/--user and -d/--date can be used to set the (given) user and
1855 -u/--user and -d/--date can be used to set the (given) user and
1858 date, respectively. -U/--currentuser and -D/--currentdate set user
1856 date, respectively. -U/--currentuser and -D/--currentdate set user
1859 to current user and date to current date.
1857 to current user and date to current date.
1860
1858
1861 -e/--edit, -m/--message or -l/--logfile set the patch header as
1859 -e/--edit, -m/--message or -l/--logfile set the patch header as
1862 well as the commit message. If none is specified, the header is
1860 well as the commit message. If none is specified, the header is
1863 empty and the commit message is '[mq]: PATCH'.
1861 empty and the commit message is '[mq]: PATCH'.
1864
1862
1865 Use the -g/--git option to keep the patch in the git extended diff
1863 Use the -g/--git option to keep the patch in the git extended diff
1866 format. Read the diffs help topic for more information on why this
1864 format. Read the diffs help topic for more information on why this
1867 is important for preserving permission changes and copy/rename
1865 is important for preserving permission changes and copy/rename
1868 information.
1866 information.
1869 """
1867 """
1870 msg = cmdutil.logmessage(opts)
1868 msg = cmdutil.logmessage(opts)
1871 def getmsg(): return ui.edit(msg, ui.username())
1869 def getmsg(): return ui.edit(msg, ui.username())
1872 q = repo.mq
1870 q = repo.mq
1873 opts['msg'] = msg
1871 opts['msg'] = msg
1874 if opts.get('edit'):
1872 if opts.get('edit'):
1875 opts['msg'] = getmsg
1873 opts['msg'] = getmsg
1876 else:
1874 else:
1877 opts['msg'] = msg
1875 opts['msg'] = msg
1878 setupheaderopts(ui, opts)
1876 setupheaderopts(ui, opts)
1879 q.new(repo, patch, *args, **opts)
1877 q.new(repo, patch, *args, **opts)
1880 q.save_dirty()
1878 q.save_dirty()
1881 return 0
1879 return 0
1882
1880
1883 def refresh(ui, repo, *pats, **opts):
1881 def refresh(ui, repo, *pats, **opts):
1884 """update the current patch
1882 """update the current patch
1885
1883
1886 If any file patterns are provided, the refreshed patch will
1884 If any file patterns are provided, the refreshed patch will
1887 contain only the modifications that match those patterns; the
1885 contain only the modifications that match those patterns; the
1888 remaining modifications will remain in the working directory.
1886 remaining modifications will remain in the working directory.
1889
1887
1890 If -s/--short is specified, files currently included in the patch
1888 If -s/--short is specified, files currently included in the patch
1891 will be refreshed just like matched files and remain in the patch.
1889 will be refreshed just like matched files and remain in the patch.
1892
1890
1893 hg add/remove/copy/rename work as usual, though you might want to
1891 hg add/remove/copy/rename work as usual, though you might want to
1894 use git-style patches (-g/--git or [diff] git=1) to track copies
1892 use git-style patches (-g/--git or [diff] git=1) to track copies
1895 and renames. See the diffs help topic for more information on the
1893 and renames. See the diffs help topic for more information on the
1896 git diff format.
1894 git diff format.
1897 """
1895 """
1898 q = repo.mq
1896 q = repo.mq
1899 message = cmdutil.logmessage(opts)
1897 message = cmdutil.logmessage(opts)
1900 if opts['edit']:
1898 if opts['edit']:
1901 if not q.applied:
1899 if not q.applied:
1902 ui.write(_("no patches applied\n"))
1900 ui.write(_("no patches applied\n"))
1903 return 1
1901 return 1
1904 if message:
1902 if message:
1905 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1903 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1906 patch = q.applied[-1].name
1904 patch = q.applied[-1].name
1907 ph = q.readheaders(patch)
1905 ph = q.readheaders(patch)
1908 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1906 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
1909 setupheaderopts(ui, opts)
1907 setupheaderopts(ui, opts)
1910 ret = q.refresh(repo, pats, msg=message, **opts)
1908 ret = q.refresh(repo, pats, msg=message, **opts)
1911 q.save_dirty()
1909 q.save_dirty()
1912 return ret
1910 return ret
1913
1911
1914 def diff(ui, repo, *pats, **opts):
1912 def diff(ui, repo, *pats, **opts):
1915 """diff of the current patch and subsequent modifications
1913 """diff of the current patch and subsequent modifications
1916
1914
1917 Shows a diff which includes the current patch as well as any
1915 Shows a diff which includes the current patch as well as any
1918 changes which have been made in the working directory since the
1916 changes which have been made in the working directory since the
1919 last refresh (thus showing what the current patch would become
1917 last refresh (thus showing what the current patch would become
1920 after a qrefresh).
1918 after a qrefresh).
1921
1919
1922 Use 'hg diff' if you only want to see the changes made since the
1920 Use 'hg diff' if you only want to see the changes made since the
1923 last qrefresh, or 'hg export qtip' if you want to see changes made
1921 last qrefresh, or 'hg export qtip' if you want to see changes made
1924 by the current patch without including changes made since the
1922 by the current patch without including changes made since the
1925 qrefresh.
1923 qrefresh.
1926 """
1924 """
1927 repo.mq.diff(repo, pats, opts)
1925 repo.mq.diff(repo, pats, opts)
1928 return 0
1926 return 0
1929
1927
1930 def fold(ui, repo, *files, **opts):
1928 def fold(ui, repo, *files, **opts):
1931 """fold the named patches into the current patch
1929 """fold the named patches into the current patch
1932
1930
1933 Patches must not yet be applied. Each patch will be successively
1931 Patches must not yet be applied. Each patch will be successively
1934 applied to the current patch in the order given. If all the
1932 applied to the current patch in the order given. If all the
1935 patches apply successfully, the current patch will be refreshed
1933 patches apply successfully, the current patch will be refreshed
1936 with the new cumulative patch, and the folded patches will be
1934 with the new cumulative patch, and the folded patches will be
1937 deleted. With -k/--keep, the folded patch files will not be
1935 deleted. With -k/--keep, the folded patch files will not be
1938 removed afterwards.
1936 removed afterwards.
1939
1937
1940 The header for each folded patch will be concatenated with the
1938 The header for each folded patch will be concatenated with the
1941 current patch header, separated by a line of '* * *'."""
1939 current patch header, separated by a line of '* * *'."""
1942
1940
1943 q = repo.mq
1941 q = repo.mq
1944
1942
1945 if not files:
1943 if not files:
1946 raise util.Abort(_('qfold requires at least one patch name'))
1944 raise util.Abort(_('qfold requires at least one patch name'))
1947 if not q.check_toppatch(repo):
1945 if not q.check_toppatch(repo):
1948 raise util.Abort(_('No patches applied'))
1946 raise util.Abort(_('No patches applied'))
1949
1947
1950 message = cmdutil.logmessage(opts)
1948 message = cmdutil.logmessage(opts)
1951 if opts['edit']:
1949 if opts['edit']:
1952 if message:
1950 if message:
1953 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1951 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
1954
1952
1955 parent = q.lookup('qtip')
1953 parent = q.lookup('qtip')
1956 patches = []
1954 patches = []
1957 messages = []
1955 messages = []
1958 for f in files:
1956 for f in files:
1959 p = q.lookup(f)
1957 p = q.lookup(f)
1960 if p in patches or p == parent:
1958 if p in patches or p == parent:
1961 ui.warn(_('Skipping already folded patch %s') % p)
1959 ui.warn(_('Skipping already folded patch %s') % p)
1962 if q.isapplied(p):
1960 if q.isapplied(p):
1963 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1961 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
1964 patches.append(p)
1962 patches.append(p)
1965
1963
1966 for p in patches:
1964 for p in patches:
1967 if not message:
1965 if not message:
1968 ph = q.readheaders(p)
1966 ph = q.readheaders(p)
1969 if ph.message:
1967 if ph.message:
1970 messages.append(ph.message)
1968 messages.append(ph.message)
1971 pf = q.join(p)
1969 pf = q.join(p)
1972 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1970 (patchsuccess, files, fuzz) = q.patch(repo, pf)
1973 if not patchsuccess:
1971 if not patchsuccess:
1974 raise util.Abort(_('Error folding patch %s') % p)
1972 raise util.Abort(_('Error folding patch %s') % p)
1975 patch.updatedir(ui, repo, files)
1973 patch.updatedir(ui, repo, files)
1976
1974
1977 if not message:
1975 if not message:
1978 ph = q.readheaders(parent)
1976 ph = q.readheaders(parent)
1979 message, user = ph.message, ph.user
1977 message, user = ph.message, ph.user
1980 for msg in messages:
1978 for msg in messages:
1981 message.append('* * *')
1979 message.append('* * *')
1982 message.extend(msg)
1980 message.extend(msg)
1983 message = '\n'.join(message)
1981 message = '\n'.join(message)
1984
1982
1985 if opts['edit']:
1983 if opts['edit']:
1986 message = ui.edit(message, user or ui.username())
1984 message = ui.edit(message, user or ui.username())
1987
1985
1988 q.refresh(repo, msg=message)
1986 q.refresh(repo, msg=message)
1989 q.delete(repo, patches, opts)
1987 q.delete(repo, patches, opts)
1990 q.save_dirty()
1988 q.save_dirty()
1991
1989
1992 def goto(ui, repo, patch, **opts):
1990 def goto(ui, repo, patch, **opts):
1993 '''push or pop patches until named patch is at top of stack'''
1991 '''push or pop patches until named patch is at top of stack'''
1994 q = repo.mq
1992 q = repo.mq
1995 patch = q.lookup(patch)
1993 patch = q.lookup(patch)
1996 if q.isapplied(patch):
1994 if q.isapplied(patch):
1997 ret = q.pop(repo, patch, force=opts['force'])
1995 ret = q.pop(repo, patch, force=opts['force'])
1998 else:
1996 else:
1999 ret = q.push(repo, patch, force=opts['force'])
1997 ret = q.push(repo, patch, force=opts['force'])
2000 q.save_dirty()
1998 q.save_dirty()
2001 return ret
1999 return ret
2002
2000
2003 def guard(ui, repo, *args, **opts):
2001 def guard(ui, repo, *args, **opts):
2004 '''set or print guards for a patch
2002 '''set or print guards for a patch
2005
2003
2006 Guards control whether a patch can be pushed. A patch with no
2004 Guards control whether a patch can be pushed. A patch with no
2007 guards is always pushed. A patch with a positive guard ("+foo") is
2005 guards is always pushed. A patch with a positive guard ("+foo") is
2008 pushed only if the qselect command has activated it. A patch with
2006 pushed only if the qselect command has activated it. A patch with
2009 a negative guard ("-foo") is never pushed if the qselect command
2007 a negative guard ("-foo") is never pushed if the qselect command
2010 has activated it.
2008 has activated it.
2011
2009
2012 With no arguments, print the currently active guards.
2010 With no arguments, print the currently active guards.
2013 With arguments, set guards for the named patch.
2011 With arguments, set guards for the named patch.
2014 NOTE: Specifying negative guards now requires '--'.
2012 NOTE: Specifying negative guards now requires '--'.
2015
2013
2016 To set guards on another patch:
2014 To set guards on another patch:
2017 hg qguard -- other.patch +2.6.17 -stable
2015 hg qguard -- other.patch +2.6.17 -stable
2018 '''
2016 '''
2019 def status(idx):
2017 def status(idx):
2020 guards = q.series_guards[idx] or ['unguarded']
2018 guards = q.series_guards[idx] or ['unguarded']
2021 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2019 ui.write('%s: %s\n' % (q.series[idx], ' '.join(guards)))
2022 q = repo.mq
2020 q = repo.mq
2023 patch = None
2021 patch = None
2024 args = list(args)
2022 args = list(args)
2025 if opts['list']:
2023 if opts['list']:
2026 if args or opts['none']:
2024 if args or opts['none']:
2027 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2025 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2028 for i in xrange(len(q.series)):
2026 for i in xrange(len(q.series)):
2029 status(i)
2027 status(i)
2030 return
2028 return
2031 if not args or args[0][0:1] in '-+':
2029 if not args or args[0][0:1] in '-+':
2032 if not q.applied:
2030 if not q.applied:
2033 raise util.Abort(_('no patches applied'))
2031 raise util.Abort(_('no patches applied'))
2034 patch = q.applied[-1].name
2032 patch = q.applied[-1].name
2035 if patch is None and args[0][0:1] not in '-+':
2033 if patch is None and args[0][0:1] not in '-+':
2036 patch = args.pop(0)
2034 patch = args.pop(0)
2037 if patch is None:
2035 if patch is None:
2038 raise util.Abort(_('no patch to work with'))
2036 raise util.Abort(_('no patch to work with'))
2039 if args or opts['none']:
2037 if args or opts['none']:
2040 idx = q.find_series(patch)
2038 idx = q.find_series(patch)
2041 if idx is None:
2039 if idx is None:
2042 raise util.Abort(_('no patch named %s') % patch)
2040 raise util.Abort(_('no patch named %s') % patch)
2043 q.set_guards(idx, args)
2041 q.set_guards(idx, args)
2044 q.save_dirty()
2042 q.save_dirty()
2045 else:
2043 else:
2046 status(q.series.index(q.lookup(patch)))
2044 status(q.series.index(q.lookup(patch)))
2047
2045
2048 def header(ui, repo, patch=None):
2046 def header(ui, repo, patch=None):
2049 """print the header of the topmost or specified patch"""
2047 """print the header of the topmost or specified patch"""
2050 q = repo.mq
2048 q = repo.mq
2051
2049
2052 if patch:
2050 if patch:
2053 patch = q.lookup(patch)
2051 patch = q.lookup(patch)
2054 else:
2052 else:
2055 if not q.applied:
2053 if not q.applied:
2056 ui.write('no patches applied\n')
2054 ui.write('no patches applied\n')
2057 return 1
2055 return 1
2058 patch = q.lookup('qtip')
2056 patch = q.lookup('qtip')
2059 ph = repo.mq.readheaders(patch)
2057 ph = repo.mq.readheaders(patch)
2060
2058
2061 ui.write('\n'.join(ph.message) + '\n')
2059 ui.write('\n'.join(ph.message) + '\n')
2062
2060
2063 def lastsavename(path):
2061 def lastsavename(path):
2064 (directory, base) = os.path.split(path)
2062 (directory, base) = os.path.split(path)
2065 names = os.listdir(directory)
2063 names = os.listdir(directory)
2066 namere = re.compile("%s.([0-9]+)" % base)
2064 namere = re.compile("%s.([0-9]+)" % base)
2067 maxindex = None
2065 maxindex = None
2068 maxname = None
2066 maxname = None
2069 for f in names:
2067 for f in names:
2070 m = namere.match(f)
2068 m = namere.match(f)
2071 if m:
2069 if m:
2072 index = int(m.group(1))
2070 index = int(m.group(1))
2073 if maxindex == None or index > maxindex:
2071 if maxindex == None or index > maxindex:
2074 maxindex = index
2072 maxindex = index
2075 maxname = f
2073 maxname = f
2076 if maxname:
2074 if maxname:
2077 return (os.path.join(directory, maxname), maxindex)
2075 return (os.path.join(directory, maxname), maxindex)
2078 return (None, None)
2076 return (None, None)
2079
2077
2080 def savename(path):
2078 def savename(path):
2081 (last, index) = lastsavename(path)
2079 (last, index) = lastsavename(path)
2082 if last is None:
2080 if last is None:
2083 index = 0
2081 index = 0
2084 newpath = path + ".%d" % (index + 1)
2082 newpath = path + ".%d" % (index + 1)
2085 return newpath
2083 return newpath
2086
2084
2087 def push(ui, repo, patch=None, **opts):
2085 def push(ui, repo, patch=None, **opts):
2088 """push the next patch onto the stack
2086 """push the next patch onto the stack
2089
2087
2090 When -f/--force is applied, all local changes in patched files
2088 When -f/--force is applied, all local changes in patched files
2091 will be lost.
2089 will be lost.
2092 """
2090 """
2093 q = repo.mq
2091 q = repo.mq
2094 mergeq = None
2092 mergeq = None
2095
2093
2096 if opts['merge']:
2094 if opts['merge']:
2097 if opts['name']:
2095 if opts['name']:
2098 newpath = repo.join(opts['name'])
2096 newpath = repo.join(opts['name'])
2099 else:
2097 else:
2100 newpath, i = lastsavename(q.path)
2098 newpath, i = lastsavename(q.path)
2101 if not newpath:
2099 if not newpath:
2102 ui.warn(_("no saved queues found, please use -n\n"))
2100 ui.warn(_("no saved queues found, please use -n\n"))
2103 return 1
2101 return 1
2104 mergeq = queue(ui, repo.join(""), newpath)
2102 mergeq = queue(ui, repo.join(""), newpath)
2105 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2103 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2106 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2104 ret = q.push(repo, patch, force=opts['force'], list=opts['list'],
2107 mergeq=mergeq, all=opts.get('all'))
2105 mergeq=mergeq, all=opts.get('all'))
2108 return ret
2106 return ret
2109
2107
2110 def pop(ui, repo, patch=None, **opts):
2108 def pop(ui, repo, patch=None, **opts):
2111 """pop the current patch off the stack
2109 """pop the current patch off the stack
2112
2110
2113 By default, pops off the top of the patch stack. If given a patch
2111 By default, pops off the top of the patch stack. If given a patch
2114 name, keeps popping off patches until the named patch is at the
2112 name, keeps popping off patches until the named patch is at the
2115 top of the stack.
2113 top of the stack.
2116 """
2114 """
2117 localupdate = True
2115 localupdate = True
2118 if opts['name']:
2116 if opts['name']:
2119 q = queue(ui, repo.join(""), repo.join(opts['name']))
2117 q = queue(ui, repo.join(""), repo.join(opts['name']))
2120 ui.warn(_('using patch queue: %s\n') % q.path)
2118 ui.warn(_('using patch queue: %s\n') % q.path)
2121 localupdate = False
2119 localupdate = False
2122 else:
2120 else:
2123 q = repo.mq
2121 q = repo.mq
2124 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2122 ret = q.pop(repo, patch, force=opts['force'], update=localupdate,
2125 all=opts['all'])
2123 all=opts['all'])
2126 q.save_dirty()
2124 q.save_dirty()
2127 return ret
2125 return ret
2128
2126
2129 def rename(ui, repo, patch, name=None, **opts):
2127 def rename(ui, repo, patch, name=None, **opts):
2130 """rename a patch
2128 """rename a patch
2131
2129
2132 With one argument, renames the current patch to PATCH1.
2130 With one argument, renames the current patch to PATCH1.
2133 With two arguments, renames PATCH1 to PATCH2."""
2131 With two arguments, renames PATCH1 to PATCH2."""
2134
2132
2135 q = repo.mq
2133 q = repo.mq
2136
2134
2137 if not name:
2135 if not name:
2138 name = patch
2136 name = patch
2139 patch = None
2137 patch = None
2140
2138
2141 if patch:
2139 if patch:
2142 patch = q.lookup(patch)
2140 patch = q.lookup(patch)
2143 else:
2141 else:
2144 if not q.applied:
2142 if not q.applied:
2145 ui.write(_('no patches applied\n'))
2143 ui.write(_('no patches applied\n'))
2146 return
2144 return
2147 patch = q.lookup('qtip')
2145 patch = q.lookup('qtip')
2148 absdest = q.join(name)
2146 absdest = q.join(name)
2149 if os.path.isdir(absdest):
2147 if os.path.isdir(absdest):
2150 name = normname(os.path.join(name, os.path.basename(patch)))
2148 name = normname(os.path.join(name, os.path.basename(patch)))
2151 absdest = q.join(name)
2149 absdest = q.join(name)
2152 if os.path.exists(absdest):
2150 if os.path.exists(absdest):
2153 raise util.Abort(_('%s already exists') % absdest)
2151 raise util.Abort(_('%s already exists') % absdest)
2154
2152
2155 if name in q.series:
2153 if name in q.series:
2156 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2154 raise util.Abort(_('A patch named %s already exists in the series file') % name)
2157
2155
2158 if ui.verbose:
2156 if ui.verbose:
2159 ui.write('renaming %s to %s\n' % (patch, name))
2157 ui.write('renaming %s to %s\n' % (patch, name))
2160 i = q.find_series(patch)
2158 i = q.find_series(patch)
2161 guards = q.guard_re.findall(q.full_series[i])
2159 guards = q.guard_re.findall(q.full_series[i])
2162 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2160 q.full_series[i] = name + ''.join([' #' + g for g in guards])
2163 q.parse_series()
2161 q.parse_series()
2164 q.series_dirty = 1
2162 q.series_dirty = 1
2165
2163
2166 info = q.isapplied(patch)
2164 info = q.isapplied(patch)
2167 if info:
2165 if info:
2168 q.applied[info[0]] = statusentry(info[1], name)
2166 q.applied[info[0]] = statusentry(info[1], name)
2169 q.applied_dirty = 1
2167 q.applied_dirty = 1
2170
2168
2171 util.rename(q.join(patch), absdest)
2169 util.rename(q.join(patch), absdest)
2172 r = q.qrepo()
2170 r = q.qrepo()
2173 if r:
2171 if r:
2174 wlock = r.wlock()
2172 wlock = r.wlock()
2175 try:
2173 try:
2176 if r.dirstate[patch] == 'a':
2174 if r.dirstate[patch] == 'a':
2177 r.dirstate.forget(patch)
2175 r.dirstate.forget(patch)
2178 r.dirstate.add(name)
2176 r.dirstate.add(name)
2179 else:
2177 else:
2180 if r.dirstate[name] == 'r':
2178 if r.dirstate[name] == 'r':
2181 r.undelete([name])
2179 r.undelete([name])
2182 r.copy(patch, name)
2180 r.copy(patch, name)
2183 r.remove([patch], False)
2181 r.remove([patch], False)
2184 finally:
2182 finally:
2185 wlock.release()
2183 wlock.release()
2186
2184
2187 q.save_dirty()
2185 q.save_dirty()
2188
2186
2189 def restore(ui, repo, rev, **opts):
2187 def restore(ui, repo, rev, **opts):
2190 """restore the queue state saved by a revision"""
2188 """restore the queue state saved by a revision"""
2191 rev = repo.lookup(rev)
2189 rev = repo.lookup(rev)
2192 q = repo.mq
2190 q = repo.mq
2193 q.restore(repo, rev, delete=opts['delete'],
2191 q.restore(repo, rev, delete=opts['delete'],
2194 qupdate=opts['update'])
2192 qupdate=opts['update'])
2195 q.save_dirty()
2193 q.save_dirty()
2196 return 0
2194 return 0
2197
2195
2198 def save(ui, repo, **opts):
2196 def save(ui, repo, **opts):
2199 """save current queue state"""
2197 """save current queue state"""
2200 q = repo.mq
2198 q = repo.mq
2201 message = cmdutil.logmessage(opts)
2199 message = cmdutil.logmessage(opts)
2202 ret = q.save(repo, msg=message)
2200 ret = q.save(repo, msg=message)
2203 if ret:
2201 if ret:
2204 return ret
2202 return ret
2205 q.save_dirty()
2203 q.save_dirty()
2206 if opts['copy']:
2204 if opts['copy']:
2207 path = q.path
2205 path = q.path
2208 if opts['name']:
2206 if opts['name']:
2209 newpath = os.path.join(q.basepath, opts['name'])
2207 newpath = os.path.join(q.basepath, opts['name'])
2210 if os.path.exists(newpath):
2208 if os.path.exists(newpath):
2211 if not os.path.isdir(newpath):
2209 if not os.path.isdir(newpath):
2212 raise util.Abort(_('destination %s exists and is not '
2210 raise util.Abort(_('destination %s exists and is not '
2213 'a directory') % newpath)
2211 'a directory') % newpath)
2214 if not opts['force']:
2212 if not opts['force']:
2215 raise util.Abort(_('destination %s exists, '
2213 raise util.Abort(_('destination %s exists, '
2216 'use -f to force') % newpath)
2214 'use -f to force') % newpath)
2217 else:
2215 else:
2218 newpath = savename(path)
2216 newpath = savename(path)
2219 ui.warn(_("copy %s to %s\n") % (path, newpath))
2217 ui.warn(_("copy %s to %s\n") % (path, newpath))
2220 util.copyfiles(path, newpath)
2218 util.copyfiles(path, newpath)
2221 if opts['empty']:
2219 if opts['empty']:
2222 try:
2220 try:
2223 os.unlink(q.join(q.status_path))
2221 os.unlink(q.join(q.status_path))
2224 except:
2222 except:
2225 pass
2223 pass
2226 return 0
2224 return 0
2227
2225
2228 def strip(ui, repo, rev, **opts):
2226 def strip(ui, repo, rev, **opts):
2229 """strip a revision and all its descendants from the repository
2227 """strip a revision and all its descendants from the repository
2230
2228
2231 If one of the working directory's parent revisions is stripped, the
2229 If one of the working directory's parent revisions is stripped, the
2232 working directory will be updated to the parent of the stripped
2230 working directory will be updated to the parent of the stripped
2233 revision.
2231 revision.
2234 """
2232 """
2235 backup = 'all'
2233 backup = 'all'
2236 if opts['backup']:
2234 if opts['backup']:
2237 backup = 'strip'
2235 backup = 'strip'
2238 elif opts['nobackup']:
2236 elif opts['nobackup']:
2239 backup = 'none'
2237 backup = 'none'
2240
2238
2241 rev = repo.lookup(rev)
2239 rev = repo.lookup(rev)
2242 p = repo.dirstate.parents()
2240 p = repo.dirstate.parents()
2243 cl = repo.changelog
2241 cl = repo.changelog
2244 update = True
2242 update = True
2245 if p[0] == nullid:
2243 if p[0] == nullid:
2246 update = False
2244 update = False
2247 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2245 elif p[1] == nullid and rev != cl.ancestor(p[0], rev):
2248 update = False
2246 update = False
2249 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2247 elif rev not in (cl.ancestor(p[0], rev), cl.ancestor(p[1], rev)):
2250 update = False
2248 update = False
2251
2249
2252 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2250 repo.mq.strip(repo, rev, backup=backup, update=update, force=opts['force'])
2253 return 0
2251 return 0
2254
2252
2255 def select(ui, repo, *args, **opts):
2253 def select(ui, repo, *args, **opts):
2256 '''set or print guarded patches to push
2254 '''set or print guarded patches to push
2257
2255
2258 Use the qguard command to set or print guards on patch, then use
2256 Use the qguard command to set or print guards on patch, then use
2259 qselect to tell mq which guards to use. A patch will be pushed if
2257 qselect to tell mq which guards to use. A patch will be pushed if
2260 it has no guards or any positive guards match the currently
2258 it has no guards or any positive guards match the currently
2261 selected guard, but will not be pushed if any negative guards
2259 selected guard, but will not be pushed if any negative guards
2262 match the current guard. For example:
2260 match the current guard. For example:
2263
2261
2264 qguard foo.patch -stable (negative guard)
2262 qguard foo.patch -stable (negative guard)
2265 qguard bar.patch +stable (positive guard)
2263 qguard bar.patch +stable (positive guard)
2266 qselect stable
2264 qselect stable
2267
2265
2268 This activates the "stable" guard. mq will skip foo.patch (because
2266 This activates the "stable" guard. mq will skip foo.patch (because
2269 it has a negative match) but push bar.patch (because it has a
2267 it has a negative match) but push bar.patch (because it has a
2270 positive match).
2268 positive match).
2271
2269
2272 With no arguments, prints the currently active guards.
2270 With no arguments, prints the currently active guards.
2273 With one argument, sets the active guard.
2271 With one argument, sets the active guard.
2274
2272
2275 Use -n/--none to deactivate guards (no other arguments needed).
2273 Use -n/--none to deactivate guards (no other arguments needed).
2276 When no guards are active, patches with positive guards are
2274 When no guards are active, patches with positive guards are
2277 skipped and patches with negative guards are pushed.
2275 skipped and patches with negative guards are pushed.
2278
2276
2279 qselect can change the guards on applied patches. It does not pop
2277 qselect can change the guards on applied patches. It does not pop
2280 guarded patches by default. Use --pop to pop back to the last
2278 guarded patches by default. Use --pop to pop back to the last
2281 applied patch that is not guarded. Use --reapply (which implies
2279 applied patch that is not guarded. Use --reapply (which implies
2282 --pop) to push back to the current patch afterwards, but skip
2280 --pop) to push back to the current patch afterwards, but skip
2283 guarded patches.
2281 guarded patches.
2284
2282
2285 Use -s/--series to print a list of all guards in the series file
2283 Use -s/--series to print a list of all guards in the series file
2286 (no other arguments needed). Use -v for more information.'''
2284 (no other arguments needed). Use -v for more information.'''
2287
2285
2288 q = repo.mq
2286 q = repo.mq
2289 guards = q.active()
2287 guards = q.active()
2290 if args or opts['none']:
2288 if args or opts['none']:
2291 old_unapplied = q.unapplied(repo)
2289 old_unapplied = q.unapplied(repo)
2292 old_guarded = [i for i in xrange(len(q.applied)) if
2290 old_guarded = [i for i in xrange(len(q.applied)) if
2293 not q.pushable(i)[0]]
2291 not q.pushable(i)[0]]
2294 q.set_active(args)
2292 q.set_active(args)
2295 q.save_dirty()
2293 q.save_dirty()
2296 if not args:
2294 if not args:
2297 ui.status(_('guards deactivated\n'))
2295 ui.status(_('guards deactivated\n'))
2298 if not opts['pop'] and not opts['reapply']:
2296 if not opts['pop'] and not opts['reapply']:
2299 unapplied = q.unapplied(repo)
2297 unapplied = q.unapplied(repo)
2300 guarded = [i for i in xrange(len(q.applied))
2298 guarded = [i for i in xrange(len(q.applied))
2301 if not q.pushable(i)[0]]
2299 if not q.pushable(i)[0]]
2302 if len(unapplied) != len(old_unapplied):
2300 if len(unapplied) != len(old_unapplied):
2303 ui.status(_('number of unguarded, unapplied patches has '
2301 ui.status(_('number of unguarded, unapplied patches has '
2304 'changed from %d to %d\n') %
2302 'changed from %d to %d\n') %
2305 (len(old_unapplied), len(unapplied)))
2303 (len(old_unapplied), len(unapplied)))
2306 if len(guarded) != len(old_guarded):
2304 if len(guarded) != len(old_guarded):
2307 ui.status(_('number of guarded, applied patches has changed '
2305 ui.status(_('number of guarded, applied patches has changed '
2308 'from %d to %d\n') %
2306 'from %d to %d\n') %
2309 (len(old_guarded), len(guarded)))
2307 (len(old_guarded), len(guarded)))
2310 elif opts['series']:
2308 elif opts['series']:
2311 guards = {}
2309 guards = {}
2312 noguards = 0
2310 noguards = 0
2313 for gs in q.series_guards:
2311 for gs in q.series_guards:
2314 if not gs:
2312 if not gs:
2315 noguards += 1
2313 noguards += 1
2316 for g in gs:
2314 for g in gs:
2317 guards.setdefault(g, 0)
2315 guards.setdefault(g, 0)
2318 guards[g] += 1
2316 guards[g] += 1
2319 if ui.verbose:
2317 if ui.verbose:
2320 guards['NONE'] = noguards
2318 guards['NONE'] = noguards
2321 guards = guards.items()
2319 guards = guards.items()
2322 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2320 guards.sort(lambda a, b: cmp(a[0][1:], b[0][1:]))
2323 if guards:
2321 if guards:
2324 ui.note(_('guards in series file:\n'))
2322 ui.note(_('guards in series file:\n'))
2325 for guard, count in guards:
2323 for guard, count in guards:
2326 ui.note('%2d ' % count)
2324 ui.note('%2d ' % count)
2327 ui.write(guard, '\n')
2325 ui.write(guard, '\n')
2328 else:
2326 else:
2329 ui.note(_('no guards in series file\n'))
2327 ui.note(_('no guards in series file\n'))
2330 else:
2328 else:
2331 if guards:
2329 if guards:
2332 ui.note(_('active guards:\n'))
2330 ui.note(_('active guards:\n'))
2333 for g in guards:
2331 for g in guards:
2334 ui.write(g, '\n')
2332 ui.write(g, '\n')
2335 else:
2333 else:
2336 ui.write(_('no active guards\n'))
2334 ui.write(_('no active guards\n'))
2337 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2335 reapply = opts['reapply'] and q.applied and q.appliedname(-1)
2338 popped = False
2336 popped = False
2339 if opts['pop'] or opts['reapply']:
2337 if opts['pop'] or opts['reapply']:
2340 for i in xrange(len(q.applied)):
2338 for i in xrange(len(q.applied)):
2341 pushable, reason = q.pushable(i)
2339 pushable, reason = q.pushable(i)
2342 if not pushable:
2340 if not pushable:
2343 ui.status(_('popping guarded patches\n'))
2341 ui.status(_('popping guarded patches\n'))
2344 popped = True
2342 popped = True
2345 if i == 0:
2343 if i == 0:
2346 q.pop(repo, all=True)
2344 q.pop(repo, all=True)
2347 else:
2345 else:
2348 q.pop(repo, i-1)
2346 q.pop(repo, i-1)
2349 break
2347 break
2350 if popped:
2348 if popped:
2351 try:
2349 try:
2352 if reapply:
2350 if reapply:
2353 ui.status(_('reapplying unguarded patches\n'))
2351 ui.status(_('reapplying unguarded patches\n'))
2354 q.push(repo, reapply)
2352 q.push(repo, reapply)
2355 finally:
2353 finally:
2356 q.save_dirty()
2354 q.save_dirty()
2357
2355
2358 def finish(ui, repo, *revrange, **opts):
2356 def finish(ui, repo, *revrange, **opts):
2359 """move applied patches into repository history
2357 """move applied patches into repository history
2360
2358
2361 Finishes the specified revisions (corresponding to applied
2359 Finishes the specified revisions (corresponding to applied
2362 patches) by moving them out of mq control into regular repository
2360 patches) by moving them out of mq control into regular repository
2363 history.
2361 history.
2364
2362
2365 Accepts a revision range or the -a/--applied option. If --applied
2363 Accepts a revision range or the -a/--applied option. If --applied
2366 is specified, all applied mq revisions are removed from mq
2364 is specified, all applied mq revisions are removed from mq
2367 control. Otherwise, the given revisions must be at the base of the
2365 control. Otherwise, the given revisions must be at the base of the
2368 stack of applied patches.
2366 stack of applied patches.
2369
2367
2370 This can be especially useful if your changes have been applied to
2368 This can be especially useful if your changes have been applied to
2371 an upstream repository, or if you are about to push your changes
2369 an upstream repository, or if you are about to push your changes
2372 to upstream.
2370 to upstream.
2373 """
2371 """
2374 if not opts['applied'] and not revrange:
2372 if not opts['applied'] and not revrange:
2375 raise util.Abort(_('no revisions specified'))
2373 raise util.Abort(_('no revisions specified'))
2376 elif opts['applied']:
2374 elif opts['applied']:
2377 revrange = ('qbase:qtip',) + revrange
2375 revrange = ('qbase:qtip',) + revrange
2378
2376
2379 q = repo.mq
2377 q = repo.mq
2380 if not q.applied:
2378 if not q.applied:
2381 ui.status(_('no patches applied\n'))
2379 ui.status(_('no patches applied\n'))
2382 return 0
2380 return 0
2383
2381
2384 revs = cmdutil.revrange(repo, revrange)
2382 revs = cmdutil.revrange(repo, revrange)
2385 q.finish(repo, revs)
2383 q.finish(repo, revs)
2386 q.save_dirty()
2384 q.save_dirty()
2387 return 0
2385 return 0
2388
2386
2389 def reposetup(ui, repo):
2387 def reposetup(ui, repo):
2390 class mqrepo(repo.__class__):
2388 class mqrepo(repo.__class__):
2391 def abort_if_wdir_patched(self, errmsg, force=False):
2389 def abort_if_wdir_patched(self, errmsg, force=False):
2392 if self.mq.applied and not force:
2390 if self.mq.applied and not force:
2393 parent = hex(self.dirstate.parents()[0])
2391 parent = hex(self.dirstate.parents()[0])
2394 if parent in [s.rev for s in self.mq.applied]:
2392 if parent in [s.rev for s in self.mq.applied]:
2395 raise util.Abort(errmsg)
2393 raise util.Abort(errmsg)
2396
2394
2397 def commit(self, *args, **opts):
2395 def commit(self, *args, **opts):
2398 if len(args) >= 6:
2396 if len(args) >= 6:
2399 force = args[5]
2397 force = args[5]
2400 else:
2398 else:
2401 force = opts.get('force')
2399 force = opts.get('force')
2402 self.abort_if_wdir_patched(
2400 self.abort_if_wdir_patched(
2403 _('cannot commit over an applied mq patch'),
2401 _('cannot commit over an applied mq patch'),
2404 force)
2402 force)
2405
2403
2406 return super(mqrepo, self).commit(*args, **opts)
2404 return super(mqrepo, self).commit(*args, **opts)
2407
2405
2408 def push(self, remote, force=False, revs=None):
2406 def push(self, remote, force=False, revs=None):
2409 if self.mq.applied and not force and not revs:
2407 if self.mq.applied and not force and not revs:
2410 raise util.Abort(_('source has mq patches applied'))
2408 raise util.Abort(_('source has mq patches applied'))
2411 return super(mqrepo, self).push(remote, force, revs)
2409 return super(mqrepo, self).push(remote, force, revs)
2412
2410
2413 def tags(self):
2411 def tags(self):
2414 if self.tagscache:
2412 if self.tagscache:
2415 return self.tagscache
2413 return self.tagscache
2416
2414
2417 tagscache = super(mqrepo, self).tags()
2415 tagscache = super(mqrepo, self).tags()
2418
2416
2419 q = self.mq
2417 q = self.mq
2420 if not q.applied:
2418 if not q.applied:
2421 return tagscache
2419 return tagscache
2422
2420
2423 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2421 mqtags = [(bin(patch.rev), patch.name) for patch in q.applied]
2424
2422
2425 if mqtags[-1][0] not in self.changelog.nodemap:
2423 if mqtags[-1][0] not in self.changelog.nodemap:
2426 self.ui.warn(_('mq status file refers to unknown node %s\n')
2424 self.ui.warn(_('mq status file refers to unknown node %s\n')
2427 % short(mqtags[-1][0]))
2425 % short(mqtags[-1][0]))
2428 return tagscache
2426 return tagscache
2429
2427
2430 mqtags.append((mqtags[-1][0], 'qtip'))
2428 mqtags.append((mqtags[-1][0], 'qtip'))
2431 mqtags.append((mqtags[0][0], 'qbase'))
2429 mqtags.append((mqtags[0][0], 'qbase'))
2432 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2430 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
2433 for patch in mqtags:
2431 for patch in mqtags:
2434 if patch[1] in tagscache:
2432 if patch[1] in tagscache:
2435 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2433 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
2436 % patch[1])
2434 % patch[1])
2437 else:
2435 else:
2438 tagscache[patch[1]] = patch[0]
2436 tagscache[patch[1]] = patch[0]
2439
2437
2440 return tagscache
2438 return tagscache
2441
2439
2442 def _branchtags(self, partial, lrev):
2440 def _branchtags(self, partial, lrev):
2443 q = self.mq
2441 q = self.mq
2444 if not q.applied:
2442 if not q.applied:
2445 return super(mqrepo, self)._branchtags(partial, lrev)
2443 return super(mqrepo, self)._branchtags(partial, lrev)
2446
2444
2447 cl = self.changelog
2445 cl = self.changelog
2448 qbasenode = bin(q.applied[0].rev)
2446 qbasenode = bin(q.applied[0].rev)
2449 if qbasenode not in cl.nodemap:
2447 if qbasenode not in cl.nodemap:
2450 self.ui.warn(_('mq status file refers to unknown node %s\n')
2448 self.ui.warn(_('mq status file refers to unknown node %s\n')
2451 % short(qbasenode))
2449 % short(qbasenode))
2452 return super(mqrepo, self)._branchtags(partial, lrev)
2450 return super(mqrepo, self)._branchtags(partial, lrev)
2453
2451
2454 qbase = cl.rev(qbasenode)
2452 qbase = cl.rev(qbasenode)
2455 start = lrev + 1
2453 start = lrev + 1
2456 if start < qbase:
2454 if start < qbase:
2457 # update the cache (excluding the patches) and save it
2455 # update the cache (excluding the patches) and save it
2458 self._updatebranchcache(partial, lrev+1, qbase)
2456 self._updatebranchcache(partial, lrev+1, qbase)
2459 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2457 self._writebranchcache(partial, cl.node(qbase-1), qbase-1)
2460 start = qbase
2458 start = qbase
2461 # if start = qbase, the cache is as updated as it should be.
2459 # if start = qbase, the cache is as updated as it should be.
2462 # if start > qbase, the cache includes (part of) the patches.
2460 # if start > qbase, the cache includes (part of) the patches.
2463 # we might as well use it, but we won't save it.
2461 # we might as well use it, but we won't save it.
2464
2462
2465 # update the cache up to the tip
2463 # update the cache up to the tip
2466 self._updatebranchcache(partial, start, len(cl))
2464 self._updatebranchcache(partial, start, len(cl))
2467
2465
2468 return partial
2466 return partial
2469
2467
2470 if repo.local():
2468 if repo.local():
2471 repo.__class__ = mqrepo
2469 repo.__class__ = mqrepo
2472 repo.mq = queue(ui, repo.join(""))
2470 repo.mq = queue(ui, repo.join(""))
2473
2471
2474 def mqimport(orig, ui, repo, *args, **kwargs):
2472 def mqimport(orig, ui, repo, *args, **kwargs):
2475 if hasattr(repo, 'abort_if_wdir_patched'):
2473 if hasattr(repo, 'abort_if_wdir_patched'):
2476 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2474 repo.abort_if_wdir_patched(_('cannot import over an applied patch'),
2477 kwargs.get('force'))
2475 kwargs.get('force'))
2478 return orig(ui, repo, *args, **kwargs)
2476 return orig(ui, repo, *args, **kwargs)
2479
2477
2480 def uisetup(ui):
2478 def uisetup(ui):
2481 extensions.wrapcommand(commands.table, 'import', mqimport)
2479 extensions.wrapcommand(commands.table, 'import', mqimport)
2482
2480
2483 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2481 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
2484
2482
2485 cmdtable = {
2483 cmdtable = {
2486 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2484 "qapplied": (applied, [] + seriesopts, _('hg qapplied [-s] [PATCH]')),
2487 "qclone":
2485 "qclone":
2488 (clone,
2486 (clone,
2489 [('', 'pull', None, _('use pull protocol to copy metadata')),
2487 [('', 'pull', None, _('use pull protocol to copy metadata')),
2490 ('U', 'noupdate', None, _('do not update the new working directories')),
2488 ('U', 'noupdate', None, _('do not update the new working directories')),
2491 ('', 'uncompressed', None,
2489 ('', 'uncompressed', None,
2492 _('use uncompressed transfer (fast over LAN)')),
2490 _('use uncompressed transfer (fast over LAN)')),
2493 ('p', 'patches', '', _('location of source patch repository')),
2491 ('p', 'patches', '', _('location of source patch repository')),
2494 ] + commands.remoteopts,
2492 ] + commands.remoteopts,
2495 _('hg qclone [OPTION]... SOURCE [DEST]')),
2493 _('hg qclone [OPTION]... SOURCE [DEST]')),
2496 "qcommit|qci":
2494 "qcommit|qci":
2497 (commit,
2495 (commit,
2498 commands.table["^commit|ci"][1],
2496 commands.table["^commit|ci"][1],
2499 _('hg qcommit [OPTION]... [FILE]...')),
2497 _('hg qcommit [OPTION]... [FILE]...')),
2500 "^qdiff":
2498 "^qdiff":
2501 (diff,
2499 (diff,
2502 commands.diffopts + commands.diffopts2 + commands.walkopts,
2500 commands.diffopts + commands.diffopts2 + commands.walkopts,
2503 _('hg qdiff [OPTION]... [FILE]...')),
2501 _('hg qdiff [OPTION]... [FILE]...')),
2504 "qdelete|qremove|qrm":
2502 "qdelete|qremove|qrm":
2505 (delete,
2503 (delete,
2506 [('k', 'keep', None, _('keep patch file')),
2504 [('k', 'keep', None, _('keep patch file')),
2507 ('r', 'rev', [], _('stop managing a revision'))],
2505 ('r', 'rev', [], _('stop managing a revision'))],
2508 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2506 _('hg qdelete [-k] [-r REV]... [PATCH]...')),
2509 'qfold':
2507 'qfold':
2510 (fold,
2508 (fold,
2511 [('e', 'edit', None, _('edit patch header')),
2509 [('e', 'edit', None, _('edit patch header')),
2512 ('k', 'keep', None, _('keep folded patch files')),
2510 ('k', 'keep', None, _('keep folded patch files')),
2513 ] + commands.commitopts,
2511 ] + commands.commitopts,
2514 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2512 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...')),
2515 'qgoto':
2513 'qgoto':
2516 (goto,
2514 (goto,
2517 [('f', 'force', None, _('overwrite any local changes'))],
2515 [('f', 'force', None, _('overwrite any local changes'))],
2518 _('hg qgoto [OPTION]... PATCH')),
2516 _('hg qgoto [OPTION]... PATCH')),
2519 'qguard':
2517 'qguard':
2520 (guard,
2518 (guard,
2521 [('l', 'list', None, _('list all patches and guards')),
2519 [('l', 'list', None, _('list all patches and guards')),
2522 ('n', 'none', None, _('drop all guards'))],
2520 ('n', 'none', None, _('drop all guards'))],
2523 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2521 _('hg qguard [-l] [-n] -- [PATCH] [+GUARD]... [-GUARD]...')),
2524 'qheader': (header, [], _('hg qheader [PATCH]')),
2522 'qheader': (header, [], _('hg qheader [PATCH]')),
2525 "^qimport":
2523 "^qimport":
2526 (qimport,
2524 (qimport,
2527 [('e', 'existing', None, _('import file in patch directory')),
2525 [('e', 'existing', None, _('import file in patch directory')),
2528 ('n', 'name', '', _('patch file name')),
2526 ('n', 'name', '', _('patch file name')),
2529 ('f', 'force', None, _('overwrite existing files')),
2527 ('f', 'force', None, _('overwrite existing files')),
2530 ('r', 'rev', [], _('place existing revisions under mq control')),
2528 ('r', 'rev', [], _('place existing revisions under mq control')),
2531 ('g', 'git', None, _('use git extended diff format'))],
2529 ('g', 'git', None, _('use git extended diff format'))],
2532 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2530 _('hg qimport [-e] [-n NAME] [-f] [-g] [-r REV]... FILE...')),
2533 "^qinit":
2531 "^qinit":
2534 (init,
2532 (init,
2535 [('c', 'create-repo', None, _('create queue repository'))],
2533 [('c', 'create-repo', None, _('create queue repository'))],
2536 _('hg qinit [-c]')),
2534 _('hg qinit [-c]')),
2537 "qnew":
2535 "qnew":
2538 (new,
2536 (new,
2539 [('e', 'edit', None, _('edit commit message')),
2537 [('e', 'edit', None, _('edit commit message')),
2540 ('f', 'force', None, _('import uncommitted changes into patch')),
2538 ('f', 'force', None, _('import uncommitted changes into patch')),
2541 ('g', 'git', None, _('use git extended diff format')),
2539 ('g', 'git', None, _('use git extended diff format')),
2542 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2540 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2543 ('u', 'user', '', _('add "From: <given user>" to patch')),
2541 ('u', 'user', '', _('add "From: <given user>" to patch')),
2544 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2542 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2545 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2543 ('d', 'date', '', _('add "Date: <given date>" to patch'))
2546 ] + commands.walkopts + commands.commitopts,
2544 ] + commands.walkopts + commands.commitopts,
2547 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2545 _('hg qnew [-e] [-m TEXT] [-l FILE] [-f] PATCH [FILE]...')),
2548 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2546 "qnext": (next, [] + seriesopts, _('hg qnext [-s]')),
2549 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2547 "qprev": (prev, [] + seriesopts, _('hg qprev [-s]')),
2550 "^qpop":
2548 "^qpop":
2551 (pop,
2549 (pop,
2552 [('a', 'all', None, _('pop all patches')),
2550 [('a', 'all', None, _('pop all patches')),
2553 ('n', 'name', '', _('queue name to pop')),
2551 ('n', 'name', '', _('queue name to pop')),
2554 ('f', 'force', None, _('forget any local changes'))],
2552 ('f', 'force', None, _('forget any local changes'))],
2555 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2553 _('hg qpop [-a] [-n NAME] [-f] [PATCH | INDEX]')),
2556 "^qpush":
2554 "^qpush":
2557 (push,
2555 (push,
2558 [('f', 'force', None, _('apply if the patch has rejects')),
2556 [('f', 'force', None, _('apply if the patch has rejects')),
2559 ('l', 'list', None, _('list patch name in commit text')),
2557 ('l', 'list', None, _('list patch name in commit text')),
2560 ('a', 'all', None, _('apply all patches')),
2558 ('a', 'all', None, _('apply all patches')),
2561 ('m', 'merge', None, _('merge from another queue')),
2559 ('m', 'merge', None, _('merge from another queue')),
2562 ('n', 'name', '', _('merge queue name'))],
2560 ('n', 'name', '', _('merge queue name'))],
2563 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2561 _('hg qpush [-f] [-l] [-a] [-m] [-n NAME] [PATCH | INDEX]')),
2564 "^qrefresh":
2562 "^qrefresh":
2565 (refresh,
2563 (refresh,
2566 [('e', 'edit', None, _('edit commit message')),
2564 [('e', 'edit', None, _('edit commit message')),
2567 ('g', 'git', None, _('use git extended diff format')),
2565 ('g', 'git', None, _('use git extended diff format')),
2568 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2566 ('s', 'short', None, _('refresh only files already in the patch and specified files')),
2569 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2567 ('U', 'currentuser', None, _('add/update "From: <current user>" in patch')),
2570 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2568 ('u', 'user', '', _('add/update "From: <given user>" in patch')),
2571 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2569 ('D', 'currentdate', None, _('update "Date: <current date>" in patch (if present)')),
2572 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2570 ('d', 'date', '', _('update "Date: <given date>" in patch (if present)'))
2573 ] + commands.walkopts + commands.commitopts,
2571 ] + commands.walkopts + commands.commitopts,
2574 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2572 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...')),
2575 'qrename|qmv':
2573 'qrename|qmv':
2576 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2574 (rename, [], _('hg qrename PATCH1 [PATCH2]')),
2577 "qrestore":
2575 "qrestore":
2578 (restore,
2576 (restore,
2579 [('d', 'delete', None, _('delete save entry')),
2577 [('d', 'delete', None, _('delete save entry')),
2580 ('u', 'update', None, _('update queue working directory'))],
2578 ('u', 'update', None, _('update queue working directory'))],
2581 _('hg qrestore [-d] [-u] REV')),
2579 _('hg qrestore [-d] [-u] REV')),
2582 "qsave":
2580 "qsave":
2583 (save,
2581 (save,
2584 [('c', 'copy', None, _('copy patch directory')),
2582 [('c', 'copy', None, _('copy patch directory')),
2585 ('n', 'name', '', _('copy directory name')),
2583 ('n', 'name', '', _('copy directory name')),
2586 ('e', 'empty', None, _('clear queue status file')),
2584 ('e', 'empty', None, _('clear queue status file')),
2587 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2585 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2588 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2586 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]')),
2589 "qselect":
2587 "qselect":
2590 (select,
2588 (select,
2591 [('n', 'none', None, _('disable all guards')),
2589 [('n', 'none', None, _('disable all guards')),
2592 ('s', 'series', None, _('list all guards in series file')),
2590 ('s', 'series', None, _('list all guards in series file')),
2593 ('', 'pop', None, _('pop to before first guarded applied patch')),
2591 ('', 'pop', None, _('pop to before first guarded applied patch')),
2594 ('', 'reapply', None, _('pop, then reapply patches'))],
2592 ('', 'reapply', None, _('pop, then reapply patches'))],
2595 _('hg qselect [OPTION]... [GUARD]...')),
2593 _('hg qselect [OPTION]... [GUARD]...')),
2596 "qseries":
2594 "qseries":
2597 (series,
2595 (series,
2598 [('m', 'missing', None, _('print patches not in series')),
2596 [('m', 'missing', None, _('print patches not in series')),
2599 ] + seriesopts,
2597 ] + seriesopts,
2600 _('hg qseries [-ms]')),
2598 _('hg qseries [-ms]')),
2601 "^strip":
2599 "^strip":
2602 (strip,
2600 (strip,
2603 [('f', 'force', None, _('force removal with local changes')),
2601 [('f', 'force', None, _('force removal with local changes')),
2604 ('b', 'backup', None, _('bundle unrelated changesets')),
2602 ('b', 'backup', None, _('bundle unrelated changesets')),
2605 ('n', 'nobackup', None, _('no backups'))],
2603 ('n', 'nobackup', None, _('no backups'))],
2606 _('hg strip [-f] [-b] [-n] REV')),
2604 _('hg strip [-f] [-b] [-n] REV')),
2607 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2605 "qtop": (top, [] + seriesopts, _('hg qtop [-s]')),
2608 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2606 "qunapplied": (unapplied, [] + seriesopts, _('hg qunapplied [-s] [PATCH]')),
2609 "qfinish":
2607 "qfinish":
2610 (finish,
2608 (finish,
2611 [('a', 'applied', None, _('finish all applied changesets'))],
2609 [('a', 'applied', None, _('finish all applied changesets'))],
2612 _('hg qfinish [-a] [REV...]')),
2610 _('hg qfinish [-a] [REV...]')),
2613 }
2611 }
@@ -1,538 +1,538 b''
1 # record.py
1 # record.py
2 #
2 #
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
3 # Copyright 2007 Bryan O'Sullivan <bos@serpentine.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of
5 # This software may be used and distributed according to the terms of
6 # the GNU General Public License, incorporated herein by reference.
6 # the GNU General Public License, incorporated herein by reference.
7
7
8 '''interactive change selection during commit or qrefresh'''
8 '''interactive change selection during commit or qrefresh'''
9
9
10 from mercurial.i18n import gettext, _
10 from mercurial.i18n import gettext, _
11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
11 from mercurial import cmdutil, commands, extensions, hg, mdiff, patch
12 from mercurial import util
12 from mercurial import util
13 import copy, cStringIO, errno, operator, os, re, tempfile
13 import copy, cStringIO, errno, operator, os, re, tempfile
14
14
15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
15 lines_re = re.compile(r'@@ -(\d+),(\d+) \+(\d+),(\d+) @@\s*(.*)')
16
16
17 def scanpatch(fp):
17 def scanpatch(fp):
18 """like patch.iterhunks, but yield different events
18 """like patch.iterhunks, but yield different events
19
19
20 - ('file', [header_lines + fromfile + tofile])
20 - ('file', [header_lines + fromfile + tofile])
21 - ('context', [context_lines])
21 - ('context', [context_lines])
22 - ('hunk', [hunk_lines])
22 - ('hunk', [hunk_lines])
23 - ('range', (-start,len, +start,len, diffp))
23 - ('range', (-start,len, +start,len, diffp))
24 """
24 """
25 lr = patch.linereader(fp)
25 lr = patch.linereader(fp)
26
26
27 def scanwhile(first, p):
27 def scanwhile(first, p):
28 """scan lr while predicate holds"""
28 """scan lr while predicate holds"""
29 lines = [first]
29 lines = [first]
30 while True:
30 while True:
31 line = lr.readline()
31 line = lr.readline()
32 if not line:
32 if not line:
33 break
33 break
34 if p(line):
34 if p(line):
35 lines.append(line)
35 lines.append(line)
36 else:
36 else:
37 lr.push(line)
37 lr.push(line)
38 break
38 break
39 return lines
39 return lines
40
40
41 while True:
41 while True:
42 line = lr.readline()
42 line = lr.readline()
43 if not line:
43 if not line:
44 break
44 break
45 if line.startswith('diff --git a/'):
45 if line.startswith('diff --git a/'):
46 def notheader(line):
46 def notheader(line):
47 s = line.split(None, 1)
47 s = line.split(None, 1)
48 return not s or s[0] not in ('---', 'diff')
48 return not s or s[0] not in ('---', 'diff')
49 header = scanwhile(line, notheader)
49 header = scanwhile(line, notheader)
50 fromfile = lr.readline()
50 fromfile = lr.readline()
51 if fromfile.startswith('---'):
51 if fromfile.startswith('---'):
52 tofile = lr.readline()
52 tofile = lr.readline()
53 header += [fromfile, tofile]
53 header += [fromfile, tofile]
54 else:
54 else:
55 lr.push(fromfile)
55 lr.push(fromfile)
56 yield 'file', header
56 yield 'file', header
57 elif line[0] == ' ':
57 elif line[0] == ' ':
58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
58 yield 'context', scanwhile(line, lambda l: l[0] in ' \\')
59 elif line[0] in '-+':
59 elif line[0] in '-+':
60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
60 yield 'hunk', scanwhile(line, lambda l: l[0] in '-+\\')
61 else:
61 else:
62 m = lines_re.match(line)
62 m = lines_re.match(line)
63 if m:
63 if m:
64 yield 'range', m.groups()
64 yield 'range', m.groups()
65 else:
65 else:
66 raise patch.PatchError('unknown patch content: %r' % line)
66 raise patch.PatchError('unknown patch content: %r' % line)
67
67
68 class header(object):
68 class header(object):
69 """patch header
69 """patch header
70
70
71 XXX shoudn't we move this to mercurial/patch.py ?
71 XXX shoudn't we move this to mercurial/patch.py ?
72 """
72 """
73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
73 diff_re = re.compile('diff --git a/(.*) b/(.*)$')
74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
74 allhunks_re = re.compile('(?:index|new file|deleted file) ')
75 pretty_re = re.compile('(?:new file|deleted file) ')
75 pretty_re = re.compile('(?:new file|deleted file) ')
76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
76 special_re = re.compile('(?:index|new|deleted|copy|rename) ')
77
77
78 def __init__(self, header):
78 def __init__(self, header):
79 self.header = header
79 self.header = header
80 self.hunks = []
80 self.hunks = []
81
81
82 def binary(self):
82 def binary(self):
83 for h in self.header:
83 for h in self.header:
84 if h.startswith('index '):
84 if h.startswith('index '):
85 return True
85 return True
86
86
87 def pretty(self, fp):
87 def pretty(self, fp):
88 for h in self.header:
88 for h in self.header:
89 if h.startswith('index '):
89 if h.startswith('index '):
90 fp.write(_('this modifies a binary file (all or nothing)\n'))
90 fp.write(_('this modifies a binary file (all or nothing)\n'))
91 break
91 break
92 if self.pretty_re.match(h):
92 if self.pretty_re.match(h):
93 fp.write(h)
93 fp.write(h)
94 if self.binary():
94 if self.binary():
95 fp.write(_('this is a binary file\n'))
95 fp.write(_('this is a binary file\n'))
96 break
96 break
97 if h.startswith('---'):
97 if h.startswith('---'):
98 fp.write(_('%d hunks, %d lines changed\n') %
98 fp.write(_('%d hunks, %d lines changed\n') %
99 (len(self.hunks),
99 (len(self.hunks),
100 sum([h.added + h.removed for h in self.hunks])))
100 sum([h.added + h.removed for h in self.hunks])))
101 break
101 break
102 fp.write(h)
102 fp.write(h)
103
103
104 def write(self, fp):
104 def write(self, fp):
105 fp.write(''.join(self.header))
105 fp.write(''.join(self.header))
106
106
107 def allhunks(self):
107 def allhunks(self):
108 for h in self.header:
108 for h in self.header:
109 if self.allhunks_re.match(h):
109 if self.allhunks_re.match(h):
110 return True
110 return True
111
111
112 def files(self):
112 def files(self):
113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
113 fromfile, tofile = self.diff_re.match(self.header[0]).groups()
114 if fromfile == tofile:
114 if fromfile == tofile:
115 return [fromfile]
115 return [fromfile]
116 return [fromfile, tofile]
116 return [fromfile, tofile]
117
117
118 def filename(self):
118 def filename(self):
119 return self.files()[-1]
119 return self.files()[-1]
120
120
121 def __repr__(self):
121 def __repr__(self):
122 return '<header %s>' % (' '.join(map(repr, self.files())))
122 return '<header %s>' % (' '.join(map(repr, self.files())))
123
123
124 def special(self):
124 def special(self):
125 for h in self.header:
125 for h in self.header:
126 if self.special_re.match(h):
126 if self.special_re.match(h):
127 return True
127 return True
128
128
129 def countchanges(hunk):
129 def countchanges(hunk):
130 """hunk -> (n+,n-)"""
130 """hunk -> (n+,n-)"""
131 add = len([h for h in hunk if h[0] == '+'])
131 add = len([h for h in hunk if h[0] == '+'])
132 rem = len([h for h in hunk if h[0] == '-'])
132 rem = len([h for h in hunk if h[0] == '-'])
133 return add, rem
133 return add, rem
134
134
135 class hunk(object):
135 class hunk(object):
136 """patch hunk
136 """patch hunk
137
137
138 XXX shouldn't we merge this with patch.hunk ?
138 XXX shouldn't we merge this with patch.hunk ?
139 """
139 """
140 maxcontext = 3
140 maxcontext = 3
141
141
142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
142 def __init__(self, header, fromline, toline, proc, before, hunk, after):
143 def trimcontext(number, lines):
143 def trimcontext(number, lines):
144 delta = len(lines) - self.maxcontext
144 delta = len(lines) - self.maxcontext
145 if False and delta > 0:
145 if False and delta > 0:
146 return number + delta, lines[:self.maxcontext]
146 return number + delta, lines[:self.maxcontext]
147 return number, lines
147 return number, lines
148
148
149 self.header = header
149 self.header = header
150 self.fromline, self.before = trimcontext(fromline, before)
150 self.fromline, self.before = trimcontext(fromline, before)
151 self.toline, self.after = trimcontext(toline, after)
151 self.toline, self.after = trimcontext(toline, after)
152 self.proc = proc
152 self.proc = proc
153 self.hunk = hunk
153 self.hunk = hunk
154 self.added, self.removed = countchanges(self.hunk)
154 self.added, self.removed = countchanges(self.hunk)
155
155
156 def write(self, fp):
156 def write(self, fp):
157 delta = len(self.before) + len(self.after)
157 delta = len(self.before) + len(self.after)
158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
158 if self.after and self.after[-1] == '\\ No newline at end of file\n':
159 delta -= 1
159 delta -= 1
160 fromlen = delta + self.removed
160 fromlen = delta + self.removed
161 tolen = delta + self.added
161 tolen = delta + self.added
162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
162 fp.write('@@ -%d,%d +%d,%d @@%s\n' %
163 (self.fromline, fromlen, self.toline, tolen,
163 (self.fromline, fromlen, self.toline, tolen,
164 self.proc and (' ' + self.proc)))
164 self.proc and (' ' + self.proc)))
165 fp.write(''.join(self.before + self.hunk + self.after))
165 fp.write(''.join(self.before + self.hunk + self.after))
166
166
167 pretty = write
167 pretty = write
168
168
169 def filename(self):
169 def filename(self):
170 return self.header.filename()
170 return self.header.filename()
171
171
172 def __repr__(self):
172 def __repr__(self):
173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
173 return '<hunk %r@%d>' % (self.filename(), self.fromline)
174
174
175 def parsepatch(fp):
175 def parsepatch(fp):
176 """patch -> [] of hunks """
176 """patch -> [] of hunks """
177 class parser(object):
177 class parser(object):
178 """patch parsing state machine"""
178 """patch parsing state machine"""
179 def __init__(self):
179 def __init__(self):
180 self.fromline = 0
180 self.fromline = 0
181 self.toline = 0
181 self.toline = 0
182 self.proc = ''
182 self.proc = ''
183 self.header = None
183 self.header = None
184 self.context = []
184 self.context = []
185 self.before = []
185 self.before = []
186 self.hunk = []
186 self.hunk = []
187 self.stream = []
187 self.stream = []
188
188
189 def addrange(self, (fromstart, fromend, tostart, toend, proc)):
189 def addrange(self, (fromstart, fromend, tostart, toend, proc)):
190 self.fromline = int(fromstart)
190 self.fromline = int(fromstart)
191 self.toline = int(tostart)
191 self.toline = int(tostart)
192 self.proc = proc
192 self.proc = proc
193
193
194 def addcontext(self, context):
194 def addcontext(self, context):
195 if self.hunk:
195 if self.hunk:
196 h = hunk(self.header, self.fromline, self.toline, self.proc,
196 h = hunk(self.header, self.fromline, self.toline, self.proc,
197 self.before, self.hunk, context)
197 self.before, self.hunk, context)
198 self.header.hunks.append(h)
198 self.header.hunks.append(h)
199 self.stream.append(h)
199 self.stream.append(h)
200 self.fromline += len(self.before) + h.removed
200 self.fromline += len(self.before) + h.removed
201 self.toline += len(self.before) + h.added
201 self.toline += len(self.before) + h.added
202 self.before = []
202 self.before = []
203 self.hunk = []
203 self.hunk = []
204 self.proc = ''
204 self.proc = ''
205 self.context = context
205 self.context = context
206
206
207 def addhunk(self, hunk):
207 def addhunk(self, hunk):
208 if self.context:
208 if self.context:
209 self.before = self.context
209 self.before = self.context
210 self.context = []
210 self.context = []
211 self.hunk = hunk
211 self.hunk = hunk
212
212
213 def newfile(self, hdr):
213 def newfile(self, hdr):
214 self.addcontext([])
214 self.addcontext([])
215 h = header(hdr)
215 h = header(hdr)
216 self.stream.append(h)
216 self.stream.append(h)
217 self.header = h
217 self.header = h
218
218
219 def finished(self):
219 def finished(self):
220 self.addcontext([])
220 self.addcontext([])
221 return self.stream
221 return self.stream
222
222
223 transitions = {
223 transitions = {
224 'file': {'context': addcontext,
224 'file': {'context': addcontext,
225 'file': newfile,
225 'file': newfile,
226 'hunk': addhunk,
226 'hunk': addhunk,
227 'range': addrange},
227 'range': addrange},
228 'context': {'file': newfile,
228 'context': {'file': newfile,
229 'hunk': addhunk,
229 'hunk': addhunk,
230 'range': addrange},
230 'range': addrange},
231 'hunk': {'context': addcontext,
231 'hunk': {'context': addcontext,
232 'file': newfile,
232 'file': newfile,
233 'range': addrange},
233 'range': addrange},
234 'range': {'context': addcontext,
234 'range': {'context': addcontext,
235 'hunk': addhunk},
235 'hunk': addhunk},
236 }
236 }
237
237
238 p = parser()
238 p = parser()
239
239
240 state = 'context'
240 state = 'context'
241 for newstate, data in scanpatch(fp):
241 for newstate, data in scanpatch(fp):
242 try:
242 try:
243 p.transitions[state][newstate](p, data)
243 p.transitions[state][newstate](p, data)
244 except KeyError:
244 except KeyError:
245 raise patch.PatchError('unhandled transition: %s -> %s' %
245 raise patch.PatchError('unhandled transition: %s -> %s' %
246 (state, newstate))
246 (state, newstate))
247 state = newstate
247 state = newstate
248 return p.finished()
248 return p.finished()
249
249
250 def filterpatch(ui, chunks):
250 def filterpatch(ui, chunks):
251 """Interactively filter patch chunks into applied-only chunks"""
251 """Interactively filter patch chunks into applied-only chunks"""
252 chunks = list(chunks)
252 chunks = list(chunks)
253 chunks.reverse()
253 chunks.reverse()
254 seen = {}
254 seen = {}
255 def consumefile():
255 def consumefile():
256 """fetch next portion from chunks until a 'header' is seen
256 """fetch next portion from chunks until a 'header' is seen
257 NB: header == new-file mark
257 NB: header == new-file mark
258 """
258 """
259 consumed = []
259 consumed = []
260 while chunks:
260 while chunks:
261 if isinstance(chunks[-1], header):
261 if isinstance(chunks[-1], header):
262 break
262 break
263 else:
263 else:
264 consumed.append(chunks.pop())
264 consumed.append(chunks.pop())
265 return consumed
265 return consumed
266
266
267 resp_all = [None] # this two are changed from inside prompt,
267 resp_all = [None] # this two are changed from inside prompt,
268 resp_file = [None] # so can't be usual variables
268 resp_file = [None] # so can't be usual variables
269 applied = {} # 'filename' -> [] of chunks
269 applied = {} # 'filename' -> [] of chunks
270 def prompt(query):
270 def prompt(query):
271 """prompt query, and process base inputs
271 """prompt query, and process base inputs
272
272
273 - y/n for the rest of file
273 - y/n for the rest of file
274 - y/n for the rest
274 - y/n for the rest
275 - ? (help)
275 - ? (help)
276 - q (quit)
276 - q (quit)
277
277
278 else, input is returned to the caller.
278 else, input is returned to the caller.
279 """
279 """
280 if resp_all[0] is not None:
280 if resp_all[0] is not None:
281 return resp_all[0]
281 return resp_all[0]
282 if resp_file[0] is not None:
282 if resp_file[0] is not None:
283 return resp_file[0]
283 return resp_file[0]
284 while True:
284 while True:
285 choices = _('[Ynsfdaq?]')
285 choices = _('[Ynsfdaq?]')
286 r = (ui.prompt("%s %s " % (query, choices), '(?i)%s?$' % choices)
286 r = (ui.prompt("%s %s " % (query, choices), '(?i)%s?$' % choices)
287 or _('y')).lower()
287 or _('y')).lower()
288 if r == _('?'):
288 if r == _('?'):
289 doc = gettext(record.__doc__)
289 doc = gettext(record.__doc__)
290 c = doc.find(_('y - record this change'))
290 c = doc.find(_('y - record this change'))
291 for l in doc[c:].splitlines():
291 for l in doc[c:].splitlines():
292 if l: ui.write(l.strip(), '\n')
292 if l: ui.write(l.strip(), '\n')
293 continue
293 continue
294 elif r == _('s'):
294 elif r == _('s'):
295 r = resp_file[0] = 'n'
295 r = resp_file[0] = 'n'
296 elif r == _('f'):
296 elif r == _('f'):
297 r = resp_file[0] = 'y'
297 r = resp_file[0] = 'y'
298 elif r == _('d'):
298 elif r == _('d'):
299 r = resp_all[0] = 'n'
299 r = resp_all[0] = 'n'
300 elif r == _('a'):
300 elif r == _('a'):
301 r = resp_all[0] = 'y'
301 r = resp_all[0] = 'y'
302 elif r == _('q'):
302 elif r == _('q'):
303 raise util.Abort(_('user quit'))
303 raise util.Abort(_('user quit'))
304 return r
304 return r
305 pos, total = 0, len(chunks) - 1
305 pos, total = 0, len(chunks) - 1
306 while chunks:
306 while chunks:
307 chunk = chunks.pop()
307 chunk = chunks.pop()
308 if isinstance(chunk, header):
308 if isinstance(chunk, header):
309 # new-file mark
309 # new-file mark
310 resp_file = [None]
310 resp_file = [None]
311 fixoffset = 0
311 fixoffset = 0
312 hdr = ''.join(chunk.header)
312 hdr = ''.join(chunk.header)
313 if hdr in seen:
313 if hdr in seen:
314 consumefile()
314 consumefile()
315 continue
315 continue
316 seen[hdr] = True
316 seen[hdr] = True
317 if resp_all[0] is None:
317 if resp_all[0] is None:
318 chunk.pretty(ui)
318 chunk.pretty(ui)
319 r = prompt(_('examine changes to %s?') %
319 r = prompt(_('examine changes to %s?') %
320 _(' and ').join(map(repr, chunk.files())))
320 _(' and ').join(map(repr, chunk.files())))
321 if r == _('y'):
321 if r == _('y'):
322 applied[chunk.filename()] = [chunk]
322 applied[chunk.filename()] = [chunk]
323 if chunk.allhunks():
323 if chunk.allhunks():
324 applied[chunk.filename()] += consumefile()
324 applied[chunk.filename()] += consumefile()
325 else:
325 else:
326 consumefile()
326 consumefile()
327 else:
327 else:
328 # new hunk
328 # new hunk
329 if resp_file[0] is None and resp_all[0] is None:
329 if resp_file[0] is None and resp_all[0] is None:
330 chunk.pretty(ui)
330 chunk.pretty(ui)
331 r = total == 1 and prompt(_('record this change to %r?') %
331 r = total == 1 and prompt(_('record this change to %r?') %
332 chunk.filename()) \
332 chunk.filename()) \
333 or prompt(_('record change %d/%d to %r?') %
333 or prompt(_('record change %d/%d to %r?') %
334 (pos, total, chunk.filename()))
334 (pos, total, chunk.filename()))
335 if r == _('y'):
335 if r == _('y'):
336 if fixoffset:
336 if fixoffset:
337 chunk = copy.copy(chunk)
337 chunk = copy.copy(chunk)
338 chunk.toline += fixoffset
338 chunk.toline += fixoffset
339 applied[chunk.filename()].append(chunk)
339 applied[chunk.filename()].append(chunk)
340 else:
340 else:
341 fixoffset += chunk.removed - chunk.added
341 fixoffset += chunk.removed - chunk.added
342 pos = pos + 1
342 pos = pos + 1
343 return reduce(operator.add, [h for h in applied.itervalues()
343 return reduce(operator.add, [h for h in applied.itervalues()
344 if h[0].special() or len(h) > 1], [])
344 if h[0].special() or len(h) > 1], [])
345
345
346 def record(ui, repo, *pats, **opts):
346 def record(ui, repo, *pats, **opts):
347 '''interactively select changes to commit
347 '''interactively select changes to commit
348
348
349 If a list of files is omitted, all changes reported by "hg status"
349 If a list of files is omitted, all changes reported by "hg status"
350 will be candidates for recording.
350 will be candidates for recording.
351
351
352 See 'hg help dates' for a list of formats valid for -d/--date.
352 See 'hg help dates' for a list of formats valid for -d/--date.
353
353
354 You will be prompted for whether to record changes to each
354 You will be prompted for whether to record changes to each
355 modified file, and for files with multiple changes, for each
355 modified file, and for files with multiple changes, for each
356 change to use. For each query, the following responses are
356 change to use. For each query, the following responses are
357 possible:
357 possible:
358
358
359 y - record this change
359 y - record this change
360 n - skip this change
360 n - skip this change
361
361
362 s - skip remaining changes to this file
362 s - skip remaining changes to this file
363 f - record remaining changes to this file
363 f - record remaining changes to this file
364
364
365 d - done, skip remaining changes and files
365 d - done, skip remaining changes and files
366 a - record all changes to all remaining files
366 a - record all changes to all remaining files
367 q - quit, recording no changes
367 q - quit, recording no changes
368
368
369 ? - display help'''
369 ? - display help'''
370
370
371 def record_committer(ui, repo, pats, opts):
371 def record_committer(ui, repo, pats, opts):
372 commands.commit(ui, repo, *pats, **opts)
372 commands.commit(ui, repo, *pats, **opts)
373
373
374 dorecord(ui, repo, record_committer, *pats, **opts)
374 dorecord(ui, repo, record_committer, *pats, **opts)
375
375
376
376
377 def qrecord(ui, repo, patch, *pats, **opts):
377 def qrecord(ui, repo, patch, *pats, **opts):
378 '''interactively record a new patch
378 '''interactively record a new patch
379
379
380 see 'hg help qnew' & 'hg help record' for more information and usage
380 see 'hg help qnew' & 'hg help record' for more information and usage
381 '''
381 '''
382
382
383 try:
383 try:
384 mq = extensions.find('mq')
384 mq = extensions.find('mq')
385 except KeyError:
385 except KeyError:
386 raise util.Abort(_("'mq' extension not loaded"))
386 raise util.Abort(_("'mq' extension not loaded"))
387
387
388 def qrecord_committer(ui, repo, pats, opts):
388 def qrecord_committer(ui, repo, pats, opts):
389 mq.new(ui, repo, patch, *pats, **opts)
389 mq.new(ui, repo, patch, *pats, **opts)
390
390
391 opts = opts.copy()
391 opts = opts.copy()
392 opts['force'] = True # always 'qnew -f'
392 opts['force'] = True # always 'qnew -f'
393 dorecord(ui, repo, qrecord_committer, *pats, **opts)
393 dorecord(ui, repo, qrecord_committer, *pats, **opts)
394
394
395
395
396 def dorecord(ui, repo, committer, *pats, **opts):
396 def dorecord(ui, repo, committer, *pats, **opts):
397 if not ui.interactive:
397 if not ui.interactive:
398 raise util.Abort(_('running non-interactively, use commit instead'))
398 raise util.Abort(_('running non-interactively, use commit instead'))
399
399
400 def recordfunc(ui, repo, message, match, opts):
400 def recordfunc(ui, repo, message, match, opts):
401 """This is generic record driver.
401 """This is generic record driver.
402
402
403 It's job is to interactively filter local changes, and accordingly
403 It's job is to interactively filter local changes, and accordingly
404 prepare working dir into a state, where the job can be delegated to
404 prepare working dir into a state, where the job can be delegated to
405 non-interactive commit command such as 'commit' or 'qrefresh'.
405 non-interactive commit command such as 'commit' or 'qrefresh'.
406
406
407 After the actual job is done by non-interactive command, working dir
407 After the actual job is done by non-interactive command, working dir
408 state is restored to original.
408 state is restored to original.
409
409
410 In the end we'll record intresting changes, and everything else will be
410 In the end we'll record intresting changes, and everything else will be
411 left in place, so the user can continue his work.
411 left in place, so the user can continue his work.
412 """
412 """
413
413
414 changes = repo.status(match=match)[:3]
414 changes = repo.status(match=match)[:3]
415 diffopts = mdiff.diffopts(git=True, nodates=True)
415 diffopts = mdiff.diffopts(git=True, nodates=True)
416 chunks = patch.diff(repo, changes=changes, opts=diffopts)
416 chunks = patch.diff(repo, changes=changes, opts=diffopts)
417 fp = cStringIO.StringIO()
417 fp = cStringIO.StringIO()
418 fp.write(''.join(chunks))
418 fp.write(''.join(chunks))
419 fp.seek(0)
419 fp.seek(0)
420
420
421 # 1. filter patch, so we have intending-to apply subset of it
421 # 1. filter patch, so we have intending-to apply subset of it
422 chunks = filterpatch(ui, parsepatch(fp))
422 chunks = filterpatch(ui, parsepatch(fp))
423 del fp
423 del fp
424
424
425 contenders = {}
425 contenders = set()
426 for h in chunks:
426 for h in chunks:
427 try: contenders.update(dict.fromkeys(h.files()))
427 try: contenders.update(set(h.files()))
428 except AttributeError: pass
428 except AttributeError: pass
429
429
430 changed = changes[0] + changes[1] + changes[2]
430 changed = changes[0] + changes[1] + changes[2]
431 newfiles = [f for f in changed if f in contenders]
431 newfiles = [f for f in changed if f in contenders]
432 if not newfiles:
432 if not newfiles:
433 ui.status(_('no changes to record\n'))
433 ui.status(_('no changes to record\n'))
434 return 0
434 return 0
435
435
436 modified = dict.fromkeys(changes[0])
436 modified = set(changes[0])
437
437
438 # 2. backup changed files, so we can restore them in the end
438 # 2. backup changed files, so we can restore them in the end
439 backups = {}
439 backups = {}
440 backupdir = repo.join('record-backups')
440 backupdir = repo.join('record-backups')
441 try:
441 try:
442 os.mkdir(backupdir)
442 os.mkdir(backupdir)
443 except OSError, err:
443 except OSError, err:
444 if err.errno != errno.EEXIST:
444 if err.errno != errno.EEXIST:
445 raise
445 raise
446 try:
446 try:
447 # backup continues
447 # backup continues
448 for f in newfiles:
448 for f in newfiles:
449 if f not in modified:
449 if f not in modified:
450 continue
450 continue
451 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
451 fd, tmpname = tempfile.mkstemp(prefix=f.replace('/', '_')+'.',
452 dir=backupdir)
452 dir=backupdir)
453 os.close(fd)
453 os.close(fd)
454 ui.debug(_('backup %r as %r\n') % (f, tmpname))
454 ui.debug(_('backup %r as %r\n') % (f, tmpname))
455 util.copyfile(repo.wjoin(f), tmpname)
455 util.copyfile(repo.wjoin(f), tmpname)
456 backups[f] = tmpname
456 backups[f] = tmpname
457
457
458 fp = cStringIO.StringIO()
458 fp = cStringIO.StringIO()
459 for c in chunks:
459 for c in chunks:
460 if c.filename() in backups:
460 if c.filename() in backups:
461 c.write(fp)
461 c.write(fp)
462 dopatch = fp.tell()
462 dopatch = fp.tell()
463 fp.seek(0)
463 fp.seek(0)
464
464
465 # 3a. apply filtered patch to clean repo (clean)
465 # 3a. apply filtered patch to clean repo (clean)
466 if backups:
466 if backups:
467 hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
467 hg.revert(repo, repo.dirstate.parents()[0], backups.has_key)
468
468
469 # 3b. (apply)
469 # 3b. (apply)
470 if dopatch:
470 if dopatch:
471 try:
471 try:
472 ui.debug(_('applying patch\n'))
472 ui.debug(_('applying patch\n'))
473 ui.debug(fp.getvalue())
473 ui.debug(fp.getvalue())
474 pfiles = {}
474 pfiles = {}
475 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles)
475 patch.internalpatch(fp, ui, 1, repo.root, files=pfiles)
476 patch.updatedir(ui, repo, pfiles)
476 patch.updatedir(ui, repo, pfiles)
477 except patch.PatchError, err:
477 except patch.PatchError, err:
478 s = str(err)
478 s = str(err)
479 if s:
479 if s:
480 raise util.Abort(s)
480 raise util.Abort(s)
481 else:
481 else:
482 raise util.Abort(_('patch failed to apply'))
482 raise util.Abort(_('patch failed to apply'))
483 del fp
483 del fp
484
484
485 # 4. We prepared working directory according to filtered patch.
485 # 4. We prepared working directory according to filtered patch.
486 # Now is the time to delegate the job to commit/qrefresh or the like!
486 # Now is the time to delegate the job to commit/qrefresh or the like!
487
487
488 # it is important to first chdir to repo root -- we'll call a
488 # it is important to first chdir to repo root -- we'll call a
489 # highlevel command with list of pathnames relative to repo root
489 # highlevel command with list of pathnames relative to repo root
490 cwd = os.getcwd()
490 cwd = os.getcwd()
491 os.chdir(repo.root)
491 os.chdir(repo.root)
492 try:
492 try:
493 committer(ui, repo, newfiles, opts)
493 committer(ui, repo, newfiles, opts)
494 finally:
494 finally:
495 os.chdir(cwd)
495 os.chdir(cwd)
496
496
497 return 0
497 return 0
498 finally:
498 finally:
499 # 5. finally restore backed-up files
499 # 5. finally restore backed-up files
500 try:
500 try:
501 for realname, tmpname in backups.iteritems():
501 for realname, tmpname in backups.iteritems():
502 ui.debug(_('restoring %r to %r\n') % (tmpname, realname))
502 ui.debug(_('restoring %r to %r\n') % (tmpname, realname))
503 util.copyfile(tmpname, repo.wjoin(realname))
503 util.copyfile(tmpname, repo.wjoin(realname))
504 os.unlink(tmpname)
504 os.unlink(tmpname)
505 os.rmdir(backupdir)
505 os.rmdir(backupdir)
506 except OSError:
506 except OSError:
507 pass
507 pass
508 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
508 return cmdutil.commit(ui, repo, recordfunc, pats, opts)
509
509
510 cmdtable = {
510 cmdtable = {
511 "record":
511 "record":
512 (record,
512 (record,
513
513
514 # add commit options
514 # add commit options
515 commands.table['^commit|ci'][1],
515 commands.table['^commit|ci'][1],
516
516
517 _('hg record [OPTION]... [FILE]...')),
517 _('hg record [OPTION]... [FILE]...')),
518 }
518 }
519
519
520
520
521 def extsetup():
521 def extsetup():
522 try:
522 try:
523 mq = extensions.find('mq')
523 mq = extensions.find('mq')
524 except KeyError:
524 except KeyError:
525 return
525 return
526
526
527 qcmdtable = {
527 qcmdtable = {
528 "qrecord":
528 "qrecord":
529 (qrecord,
529 (qrecord,
530
530
531 # add qnew options, except '--force'
531 # add qnew options, except '--force'
532 [opt for opt in mq.cmdtable['qnew'][1] if opt[1] != 'force'],
532 [opt for opt in mq.cmdtable['qnew'][1] if opt[1] != 'force'],
533
533
534 _('hg qrecord [OPTION]... PATCH [FILE]...')),
534 _('hg qrecord [OPTION]... PATCH [FILE]...')),
535 }
535 }
536
536
537 cmdtable.update(qcmdtable)
537 cmdtable.update(qcmdtable)
538
538
@@ -1,1213 +1,1213 b''
1 # cmdutil.py - help for command processing in mercurial
1 # cmdutil.py - help for command processing in mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import os, sys, bisect, stat, encoding
10 import os, sys, bisect, stat, encoding
11 import mdiff, bdiff, util, templater, templatefilters, patch, errno, error
11 import mdiff, bdiff, util, templater, templatefilters, patch, errno, error
12 import match as _match
12 import match as _match
13
13
14 revrangesep = ':'
14 revrangesep = ':'
15
15
16 def findpossible(cmd, table, strict=False):
16 def findpossible(cmd, table, strict=False):
17 """
17 """
18 Return cmd -> (aliases, command table entry)
18 Return cmd -> (aliases, command table entry)
19 for each matching command.
19 for each matching command.
20 Return debug commands (or their aliases) only if no normal command matches.
20 Return debug commands (or their aliases) only if no normal command matches.
21 """
21 """
22 choice = {}
22 choice = {}
23 debugchoice = {}
23 debugchoice = {}
24 for e in table.keys():
24 for e in table.keys():
25 aliases = e.lstrip("^").split("|")
25 aliases = e.lstrip("^").split("|")
26 found = None
26 found = None
27 if cmd in aliases:
27 if cmd in aliases:
28 found = cmd
28 found = cmd
29 elif not strict:
29 elif not strict:
30 for a in aliases:
30 for a in aliases:
31 if a.startswith(cmd):
31 if a.startswith(cmd):
32 found = a
32 found = a
33 break
33 break
34 if found is not None:
34 if found is not None:
35 if aliases[0].startswith("debug") or found.startswith("debug"):
35 if aliases[0].startswith("debug") or found.startswith("debug"):
36 debugchoice[found] = (aliases, table[e])
36 debugchoice[found] = (aliases, table[e])
37 else:
37 else:
38 choice[found] = (aliases, table[e])
38 choice[found] = (aliases, table[e])
39
39
40 if not choice and debugchoice:
40 if not choice and debugchoice:
41 choice = debugchoice
41 choice = debugchoice
42
42
43 return choice
43 return choice
44
44
45 def findcmd(cmd, table, strict=True):
45 def findcmd(cmd, table, strict=True):
46 """Return (aliases, command table entry) for command string."""
46 """Return (aliases, command table entry) for command string."""
47 choice = findpossible(cmd, table, strict)
47 choice = findpossible(cmd, table, strict)
48
48
49 if cmd in choice:
49 if cmd in choice:
50 return choice[cmd]
50 return choice[cmd]
51
51
52 if len(choice) > 1:
52 if len(choice) > 1:
53 clist = choice.keys()
53 clist = choice.keys()
54 clist.sort()
54 clist.sort()
55 raise error.AmbiguousCommand(cmd, clist)
55 raise error.AmbiguousCommand(cmd, clist)
56
56
57 if choice:
57 if choice:
58 return choice.values()[0]
58 return choice.values()[0]
59
59
60 raise error.UnknownCommand(cmd)
60 raise error.UnknownCommand(cmd)
61
61
62 def bail_if_changed(repo):
62 def bail_if_changed(repo):
63 if repo.dirstate.parents()[1] != nullid:
63 if repo.dirstate.parents()[1] != nullid:
64 raise util.Abort(_('outstanding uncommitted merge'))
64 raise util.Abort(_('outstanding uncommitted merge'))
65 modified, added, removed, deleted = repo.status()[:4]
65 modified, added, removed, deleted = repo.status()[:4]
66 if modified or added or removed or deleted:
66 if modified or added or removed or deleted:
67 raise util.Abort(_("outstanding uncommitted changes"))
67 raise util.Abort(_("outstanding uncommitted changes"))
68
68
69 def logmessage(opts):
69 def logmessage(opts):
70 """ get the log message according to -m and -l option """
70 """ get the log message according to -m and -l option """
71 message = opts.get('message')
71 message = opts.get('message')
72 logfile = opts.get('logfile')
72 logfile = opts.get('logfile')
73
73
74 if message and logfile:
74 if message and logfile:
75 raise util.Abort(_('options --message and --logfile are mutually '
75 raise util.Abort(_('options --message and --logfile are mutually '
76 'exclusive'))
76 'exclusive'))
77 if not message and logfile:
77 if not message and logfile:
78 try:
78 try:
79 if logfile == '-':
79 if logfile == '-':
80 message = sys.stdin.read()
80 message = sys.stdin.read()
81 else:
81 else:
82 message = open(logfile).read()
82 message = open(logfile).read()
83 except IOError, inst:
83 except IOError, inst:
84 raise util.Abort(_("can't read commit message '%s': %s") %
84 raise util.Abort(_("can't read commit message '%s': %s") %
85 (logfile, inst.strerror))
85 (logfile, inst.strerror))
86 return message
86 return message
87
87
88 def loglimit(opts):
88 def loglimit(opts):
89 """get the log limit according to option -l/--limit"""
89 """get the log limit according to option -l/--limit"""
90 limit = opts.get('limit')
90 limit = opts.get('limit')
91 if limit:
91 if limit:
92 try:
92 try:
93 limit = int(limit)
93 limit = int(limit)
94 except ValueError:
94 except ValueError:
95 raise util.Abort(_('limit must be a positive integer'))
95 raise util.Abort(_('limit must be a positive integer'))
96 if limit <= 0: raise util.Abort(_('limit must be positive'))
96 if limit <= 0: raise util.Abort(_('limit must be positive'))
97 else:
97 else:
98 limit = sys.maxint
98 limit = sys.maxint
99 return limit
99 return limit
100
100
101 def setremoteconfig(ui, opts):
101 def setremoteconfig(ui, opts):
102 "copy remote options to ui tree"
102 "copy remote options to ui tree"
103 if opts.get('ssh'):
103 if opts.get('ssh'):
104 ui.setconfig("ui", "ssh", opts['ssh'])
104 ui.setconfig("ui", "ssh", opts['ssh'])
105 if opts.get('remotecmd'):
105 if opts.get('remotecmd'):
106 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
106 ui.setconfig("ui", "remotecmd", opts['remotecmd'])
107
107
108 def revpair(repo, revs):
108 def revpair(repo, revs):
109 '''return pair of nodes, given list of revisions. second item can
109 '''return pair of nodes, given list of revisions. second item can
110 be None, meaning use working dir.'''
110 be None, meaning use working dir.'''
111
111
112 def revfix(repo, val, defval):
112 def revfix(repo, val, defval):
113 if not val and val != 0 and defval is not None:
113 if not val and val != 0 and defval is not None:
114 val = defval
114 val = defval
115 return repo.lookup(val)
115 return repo.lookup(val)
116
116
117 if not revs:
117 if not revs:
118 return repo.dirstate.parents()[0], None
118 return repo.dirstate.parents()[0], None
119 end = None
119 end = None
120 if len(revs) == 1:
120 if len(revs) == 1:
121 if revrangesep in revs[0]:
121 if revrangesep in revs[0]:
122 start, end = revs[0].split(revrangesep, 1)
122 start, end = revs[0].split(revrangesep, 1)
123 start = revfix(repo, start, 0)
123 start = revfix(repo, start, 0)
124 end = revfix(repo, end, len(repo) - 1)
124 end = revfix(repo, end, len(repo) - 1)
125 else:
125 else:
126 start = revfix(repo, revs[0], None)
126 start = revfix(repo, revs[0], None)
127 elif len(revs) == 2:
127 elif len(revs) == 2:
128 if revrangesep in revs[0] or revrangesep in revs[1]:
128 if revrangesep in revs[0] or revrangesep in revs[1]:
129 raise util.Abort(_('too many revisions specified'))
129 raise util.Abort(_('too many revisions specified'))
130 start = revfix(repo, revs[0], None)
130 start = revfix(repo, revs[0], None)
131 end = revfix(repo, revs[1], None)
131 end = revfix(repo, revs[1], None)
132 else:
132 else:
133 raise util.Abort(_('too many revisions specified'))
133 raise util.Abort(_('too many revisions specified'))
134 return start, end
134 return start, end
135
135
136 def revrange(repo, revs):
136 def revrange(repo, revs):
137 """Yield revision as strings from a list of revision specifications."""
137 """Yield revision as strings from a list of revision specifications."""
138
138
139 def revfix(repo, val, defval):
139 def revfix(repo, val, defval):
140 if not val and val != 0 and defval is not None:
140 if not val and val != 0 and defval is not None:
141 return defval
141 return defval
142 return repo.changelog.rev(repo.lookup(val))
142 return repo.changelog.rev(repo.lookup(val))
143
143
144 seen, l = {}, []
144 seen, l = {}, []
145 for spec in revs:
145 for spec in revs:
146 if revrangesep in spec:
146 if revrangesep in spec:
147 start, end = spec.split(revrangesep, 1)
147 start, end = spec.split(revrangesep, 1)
148 start = revfix(repo, start, 0)
148 start = revfix(repo, start, 0)
149 end = revfix(repo, end, len(repo) - 1)
149 end = revfix(repo, end, len(repo) - 1)
150 step = start > end and -1 or 1
150 step = start > end and -1 or 1
151 for rev in xrange(start, end+step, step):
151 for rev in xrange(start, end+step, step):
152 if rev in seen:
152 if rev in seen:
153 continue
153 continue
154 seen[rev] = 1
154 seen[rev] = 1
155 l.append(rev)
155 l.append(rev)
156 else:
156 else:
157 rev = revfix(repo, spec, None)
157 rev = revfix(repo, spec, None)
158 if rev in seen:
158 if rev in seen:
159 continue
159 continue
160 seen[rev] = 1
160 seen[rev] = 1
161 l.append(rev)
161 l.append(rev)
162
162
163 return l
163 return l
164
164
165 def make_filename(repo, pat, node,
165 def make_filename(repo, pat, node,
166 total=None, seqno=None, revwidth=None, pathname=None):
166 total=None, seqno=None, revwidth=None, pathname=None):
167 node_expander = {
167 node_expander = {
168 'H': lambda: hex(node),
168 'H': lambda: hex(node),
169 'R': lambda: str(repo.changelog.rev(node)),
169 'R': lambda: str(repo.changelog.rev(node)),
170 'h': lambda: short(node),
170 'h': lambda: short(node),
171 }
171 }
172 expander = {
172 expander = {
173 '%': lambda: '%',
173 '%': lambda: '%',
174 'b': lambda: os.path.basename(repo.root),
174 'b': lambda: os.path.basename(repo.root),
175 }
175 }
176
176
177 try:
177 try:
178 if node:
178 if node:
179 expander.update(node_expander)
179 expander.update(node_expander)
180 if node:
180 if node:
181 expander['r'] = (lambda:
181 expander['r'] = (lambda:
182 str(repo.changelog.rev(node)).zfill(revwidth or 0))
182 str(repo.changelog.rev(node)).zfill(revwidth or 0))
183 if total is not None:
183 if total is not None:
184 expander['N'] = lambda: str(total)
184 expander['N'] = lambda: str(total)
185 if seqno is not None:
185 if seqno is not None:
186 expander['n'] = lambda: str(seqno)
186 expander['n'] = lambda: str(seqno)
187 if total is not None and seqno is not None:
187 if total is not None and seqno is not None:
188 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
188 expander['n'] = lambda: str(seqno).zfill(len(str(total)))
189 if pathname is not None:
189 if pathname is not None:
190 expander['s'] = lambda: os.path.basename(pathname)
190 expander['s'] = lambda: os.path.basename(pathname)
191 expander['d'] = lambda: os.path.dirname(pathname) or '.'
191 expander['d'] = lambda: os.path.dirname(pathname) or '.'
192 expander['p'] = lambda: pathname
192 expander['p'] = lambda: pathname
193
193
194 newname = []
194 newname = []
195 patlen = len(pat)
195 patlen = len(pat)
196 i = 0
196 i = 0
197 while i < patlen:
197 while i < patlen:
198 c = pat[i]
198 c = pat[i]
199 if c == '%':
199 if c == '%':
200 i += 1
200 i += 1
201 c = pat[i]
201 c = pat[i]
202 c = expander[c]()
202 c = expander[c]()
203 newname.append(c)
203 newname.append(c)
204 i += 1
204 i += 1
205 return ''.join(newname)
205 return ''.join(newname)
206 except KeyError, inst:
206 except KeyError, inst:
207 raise util.Abort(_("invalid format spec '%%%s' in output file name") %
207 raise util.Abort(_("invalid format spec '%%%s' in output file name") %
208 inst.args[0])
208 inst.args[0])
209
209
210 def make_file(repo, pat, node=None,
210 def make_file(repo, pat, node=None,
211 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
211 total=None, seqno=None, revwidth=None, mode='wb', pathname=None):
212
212
213 writable = 'w' in mode or 'a' in mode
213 writable = 'w' in mode or 'a' in mode
214
214
215 if not pat or pat == '-':
215 if not pat or pat == '-':
216 return writable and sys.stdout or sys.stdin
216 return writable and sys.stdout or sys.stdin
217 if hasattr(pat, 'write') and writable:
217 if hasattr(pat, 'write') and writable:
218 return pat
218 return pat
219 if hasattr(pat, 'read') and 'r' in mode:
219 if hasattr(pat, 'read') and 'r' in mode:
220 return pat
220 return pat
221 return open(make_filename(repo, pat, node, total, seqno, revwidth,
221 return open(make_filename(repo, pat, node, total, seqno, revwidth,
222 pathname),
222 pathname),
223 mode)
223 mode)
224
224
225 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
225 def match(repo, pats=[], opts={}, globbed=False, default='relpath'):
226 if not globbed and default == 'relpath':
226 if not globbed and default == 'relpath':
227 pats = util.expand_glob(pats or [])
227 pats = util.expand_glob(pats or [])
228 m = _match.match(repo.root, repo.getcwd(), pats,
228 m = _match.match(repo.root, repo.getcwd(), pats,
229 opts.get('include'), opts.get('exclude'), default)
229 opts.get('include'), opts.get('exclude'), default)
230 def badfn(f, msg):
230 def badfn(f, msg):
231 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
231 repo.ui.warn("%s: %s\n" % (m.rel(f), msg))
232 return False
232 return False
233 m.bad = badfn
233 m.bad = badfn
234 return m
234 return m
235
235
236 def matchall(repo):
236 def matchall(repo):
237 return _match.always(repo.root, repo.getcwd())
237 return _match.always(repo.root, repo.getcwd())
238
238
239 def matchfiles(repo, files):
239 def matchfiles(repo, files):
240 return _match.exact(repo.root, repo.getcwd(), files)
240 return _match.exact(repo.root, repo.getcwd(), files)
241
241
242 def findrenames(repo, added=None, removed=None, threshold=0.5):
242 def findrenames(repo, added=None, removed=None, threshold=0.5):
243 '''find renamed files -- yields (before, after, score) tuples'''
243 '''find renamed files -- yields (before, after, score) tuples'''
244 if added is None or removed is None:
244 if added is None or removed is None:
245 added, removed = repo.status()[1:3]
245 added, removed = repo.status()[1:3]
246 ctx = repo['.']
246 ctx = repo['.']
247 for a in added:
247 for a in added:
248 aa = repo.wread(a)
248 aa = repo.wread(a)
249 bestname, bestscore = None, threshold
249 bestname, bestscore = None, threshold
250 for r in removed:
250 for r in removed:
251 rr = ctx.filectx(r).data()
251 rr = ctx.filectx(r).data()
252
252
253 # bdiff.blocks() returns blocks of matching lines
253 # bdiff.blocks() returns blocks of matching lines
254 # count the number of bytes in each
254 # count the number of bytes in each
255 equal = 0
255 equal = 0
256 alines = mdiff.splitnewlines(aa)
256 alines = mdiff.splitnewlines(aa)
257 matches = bdiff.blocks(aa, rr)
257 matches = bdiff.blocks(aa, rr)
258 for x1,x2,y1,y2 in matches:
258 for x1,x2,y1,y2 in matches:
259 for line in alines[x1:x2]:
259 for line in alines[x1:x2]:
260 equal += len(line)
260 equal += len(line)
261
261
262 lengths = len(aa) + len(rr)
262 lengths = len(aa) + len(rr)
263 if lengths:
263 if lengths:
264 myscore = equal*2.0 / lengths
264 myscore = equal*2.0 / lengths
265 if myscore >= bestscore:
265 if myscore >= bestscore:
266 bestname, bestscore = r, myscore
266 bestname, bestscore = r, myscore
267 if bestname:
267 if bestname:
268 yield bestname, a, bestscore
268 yield bestname, a, bestscore
269
269
270 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
270 def addremove(repo, pats=[], opts={}, dry_run=None, similarity=None):
271 if dry_run is None:
271 if dry_run is None:
272 dry_run = opts.get('dry_run')
272 dry_run = opts.get('dry_run')
273 if similarity is None:
273 if similarity is None:
274 similarity = float(opts.get('similarity') or 0)
274 similarity = float(opts.get('similarity') or 0)
275 add, remove = [], []
275 add, remove = [], []
276 mapping = {}
276 mapping = {}
277 audit_path = util.path_auditor(repo.root)
277 audit_path = util.path_auditor(repo.root)
278 m = match(repo, pats, opts)
278 m = match(repo, pats, opts)
279 for abs in repo.walk(m):
279 for abs in repo.walk(m):
280 target = repo.wjoin(abs)
280 target = repo.wjoin(abs)
281 good = True
281 good = True
282 try:
282 try:
283 audit_path(abs)
283 audit_path(abs)
284 except:
284 except:
285 good = False
285 good = False
286 rel = m.rel(abs)
286 rel = m.rel(abs)
287 exact = m.exact(abs)
287 exact = m.exact(abs)
288 if good and abs not in repo.dirstate:
288 if good and abs not in repo.dirstate:
289 add.append(abs)
289 add.append(abs)
290 mapping[abs] = rel, m.exact(abs)
290 mapping[abs] = rel, m.exact(abs)
291 if repo.ui.verbose or not exact:
291 if repo.ui.verbose or not exact:
292 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
292 repo.ui.status(_('adding %s\n') % ((pats and rel) or abs))
293 if repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
293 if repo.dirstate[abs] != 'r' and (not good or not util.lexists(target)
294 or (os.path.isdir(target) and not os.path.islink(target))):
294 or (os.path.isdir(target) and not os.path.islink(target))):
295 remove.append(abs)
295 remove.append(abs)
296 mapping[abs] = rel, exact
296 mapping[abs] = rel, exact
297 if repo.ui.verbose or not exact:
297 if repo.ui.verbose or not exact:
298 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
298 repo.ui.status(_('removing %s\n') % ((pats and rel) or abs))
299 if not dry_run:
299 if not dry_run:
300 repo.remove(remove)
300 repo.remove(remove)
301 repo.add(add)
301 repo.add(add)
302 if similarity > 0:
302 if similarity > 0:
303 for old, new, score in findrenames(repo, add, remove, similarity):
303 for old, new, score in findrenames(repo, add, remove, similarity):
304 oldrel, oldexact = mapping[old]
304 oldrel, oldexact = mapping[old]
305 newrel, newexact = mapping[new]
305 newrel, newexact = mapping[new]
306 if repo.ui.verbose or not oldexact or not newexact:
306 if repo.ui.verbose or not oldexact or not newexact:
307 repo.ui.status(_('recording removal of %s as rename to %s '
307 repo.ui.status(_('recording removal of %s as rename to %s '
308 '(%d%% similar)\n') %
308 '(%d%% similar)\n') %
309 (oldrel, newrel, score * 100))
309 (oldrel, newrel, score * 100))
310 if not dry_run:
310 if not dry_run:
311 repo.copy(old, new)
311 repo.copy(old, new)
312
312
313 def copy(ui, repo, pats, opts, rename=False):
313 def copy(ui, repo, pats, opts, rename=False):
314 # called with the repo lock held
314 # called with the repo lock held
315 #
315 #
316 # hgsep => pathname that uses "/" to separate directories
316 # hgsep => pathname that uses "/" to separate directories
317 # ossep => pathname that uses os.sep to separate directories
317 # ossep => pathname that uses os.sep to separate directories
318 cwd = repo.getcwd()
318 cwd = repo.getcwd()
319 targets = {}
319 targets = {}
320 after = opts.get("after")
320 after = opts.get("after")
321 dryrun = opts.get("dry_run")
321 dryrun = opts.get("dry_run")
322
322
323 def walkpat(pat):
323 def walkpat(pat):
324 srcs = []
324 srcs = []
325 m = match(repo, [pat], opts, globbed=True)
325 m = match(repo, [pat], opts, globbed=True)
326 for abs in repo.walk(m):
326 for abs in repo.walk(m):
327 state = repo.dirstate[abs]
327 state = repo.dirstate[abs]
328 rel = m.rel(abs)
328 rel = m.rel(abs)
329 exact = m.exact(abs)
329 exact = m.exact(abs)
330 if state in '?r':
330 if state in '?r':
331 if exact and state == '?':
331 if exact and state == '?':
332 ui.warn(_('%s: not copying - file is not managed\n') % rel)
332 ui.warn(_('%s: not copying - file is not managed\n') % rel)
333 if exact and state == 'r':
333 if exact and state == 'r':
334 ui.warn(_('%s: not copying - file has been marked for'
334 ui.warn(_('%s: not copying - file has been marked for'
335 ' remove\n') % rel)
335 ' remove\n') % rel)
336 continue
336 continue
337 # abs: hgsep
337 # abs: hgsep
338 # rel: ossep
338 # rel: ossep
339 srcs.append((abs, rel, exact))
339 srcs.append((abs, rel, exact))
340 return srcs
340 return srcs
341
341
342 # abssrc: hgsep
342 # abssrc: hgsep
343 # relsrc: ossep
343 # relsrc: ossep
344 # otarget: ossep
344 # otarget: ossep
345 def copyfile(abssrc, relsrc, otarget, exact):
345 def copyfile(abssrc, relsrc, otarget, exact):
346 abstarget = util.canonpath(repo.root, cwd, otarget)
346 abstarget = util.canonpath(repo.root, cwd, otarget)
347 reltarget = repo.pathto(abstarget, cwd)
347 reltarget = repo.pathto(abstarget, cwd)
348 target = repo.wjoin(abstarget)
348 target = repo.wjoin(abstarget)
349 src = repo.wjoin(abssrc)
349 src = repo.wjoin(abssrc)
350 state = repo.dirstate[abstarget]
350 state = repo.dirstate[abstarget]
351
351
352 # check for collisions
352 # check for collisions
353 prevsrc = targets.get(abstarget)
353 prevsrc = targets.get(abstarget)
354 if prevsrc is not None:
354 if prevsrc is not None:
355 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
355 ui.warn(_('%s: not overwriting - %s collides with %s\n') %
356 (reltarget, repo.pathto(abssrc, cwd),
356 (reltarget, repo.pathto(abssrc, cwd),
357 repo.pathto(prevsrc, cwd)))
357 repo.pathto(prevsrc, cwd)))
358 return
358 return
359
359
360 # check for overwrites
360 # check for overwrites
361 exists = os.path.exists(target)
361 exists = os.path.exists(target)
362 if not after and exists or after and state in 'mn':
362 if not after and exists or after and state in 'mn':
363 if not opts['force']:
363 if not opts['force']:
364 ui.warn(_('%s: not overwriting - file exists\n') %
364 ui.warn(_('%s: not overwriting - file exists\n') %
365 reltarget)
365 reltarget)
366 return
366 return
367
367
368 if after:
368 if after:
369 if not exists:
369 if not exists:
370 return
370 return
371 elif not dryrun:
371 elif not dryrun:
372 try:
372 try:
373 if exists:
373 if exists:
374 os.unlink(target)
374 os.unlink(target)
375 targetdir = os.path.dirname(target) or '.'
375 targetdir = os.path.dirname(target) or '.'
376 if not os.path.isdir(targetdir):
376 if not os.path.isdir(targetdir):
377 os.makedirs(targetdir)
377 os.makedirs(targetdir)
378 util.copyfile(src, target)
378 util.copyfile(src, target)
379 except IOError, inst:
379 except IOError, inst:
380 if inst.errno == errno.ENOENT:
380 if inst.errno == errno.ENOENT:
381 ui.warn(_('%s: deleted in working copy\n') % relsrc)
381 ui.warn(_('%s: deleted in working copy\n') % relsrc)
382 else:
382 else:
383 ui.warn(_('%s: cannot copy - %s\n') %
383 ui.warn(_('%s: cannot copy - %s\n') %
384 (relsrc, inst.strerror))
384 (relsrc, inst.strerror))
385 return True # report a failure
385 return True # report a failure
386
386
387 if ui.verbose or not exact:
387 if ui.verbose or not exact:
388 if rename:
388 if rename:
389 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
389 ui.status(_('moving %s to %s\n') % (relsrc, reltarget))
390 else:
390 else:
391 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
391 ui.status(_('copying %s to %s\n') % (relsrc, reltarget))
392
392
393 targets[abstarget] = abssrc
393 targets[abstarget] = abssrc
394
394
395 # fix up dirstate
395 # fix up dirstate
396 origsrc = repo.dirstate.copied(abssrc) or abssrc
396 origsrc = repo.dirstate.copied(abssrc) or abssrc
397 if abstarget == origsrc: # copying back a copy?
397 if abstarget == origsrc: # copying back a copy?
398 if state not in 'mn' and not dryrun:
398 if state not in 'mn' and not dryrun:
399 repo.dirstate.normallookup(abstarget)
399 repo.dirstate.normallookup(abstarget)
400 else:
400 else:
401 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
401 if repo.dirstate[origsrc] == 'a' and origsrc == abssrc:
402 if not ui.quiet:
402 if not ui.quiet:
403 ui.warn(_("%s has not been committed yet, so no copy "
403 ui.warn(_("%s has not been committed yet, so no copy "
404 "data will be stored for %s.\n")
404 "data will be stored for %s.\n")
405 % (repo.pathto(origsrc, cwd), reltarget))
405 % (repo.pathto(origsrc, cwd), reltarget))
406 if repo.dirstate[abstarget] in '?r' and not dryrun:
406 if repo.dirstate[abstarget] in '?r' and not dryrun:
407 repo.add([abstarget])
407 repo.add([abstarget])
408 elif not dryrun:
408 elif not dryrun:
409 repo.copy(origsrc, abstarget)
409 repo.copy(origsrc, abstarget)
410
410
411 if rename and not dryrun:
411 if rename and not dryrun:
412 repo.remove([abssrc], not after)
412 repo.remove([abssrc], not after)
413
413
414 # pat: ossep
414 # pat: ossep
415 # dest ossep
415 # dest ossep
416 # srcs: list of (hgsep, hgsep, ossep, bool)
416 # srcs: list of (hgsep, hgsep, ossep, bool)
417 # return: function that takes hgsep and returns ossep
417 # return: function that takes hgsep and returns ossep
418 def targetpathfn(pat, dest, srcs):
418 def targetpathfn(pat, dest, srcs):
419 if os.path.isdir(pat):
419 if os.path.isdir(pat):
420 abspfx = util.canonpath(repo.root, cwd, pat)
420 abspfx = util.canonpath(repo.root, cwd, pat)
421 abspfx = util.localpath(abspfx)
421 abspfx = util.localpath(abspfx)
422 if destdirexists:
422 if destdirexists:
423 striplen = len(os.path.split(abspfx)[0])
423 striplen = len(os.path.split(abspfx)[0])
424 else:
424 else:
425 striplen = len(abspfx)
425 striplen = len(abspfx)
426 if striplen:
426 if striplen:
427 striplen += len(os.sep)
427 striplen += len(os.sep)
428 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
428 res = lambda p: os.path.join(dest, util.localpath(p)[striplen:])
429 elif destdirexists:
429 elif destdirexists:
430 res = lambda p: os.path.join(dest,
430 res = lambda p: os.path.join(dest,
431 os.path.basename(util.localpath(p)))
431 os.path.basename(util.localpath(p)))
432 else:
432 else:
433 res = lambda p: dest
433 res = lambda p: dest
434 return res
434 return res
435
435
436 # pat: ossep
436 # pat: ossep
437 # dest ossep
437 # dest ossep
438 # srcs: list of (hgsep, hgsep, ossep, bool)
438 # srcs: list of (hgsep, hgsep, ossep, bool)
439 # return: function that takes hgsep and returns ossep
439 # return: function that takes hgsep and returns ossep
440 def targetpathafterfn(pat, dest, srcs):
440 def targetpathafterfn(pat, dest, srcs):
441 if util.patkind(pat, None)[0]:
441 if util.patkind(pat, None)[0]:
442 # a mercurial pattern
442 # a mercurial pattern
443 res = lambda p: os.path.join(dest,
443 res = lambda p: os.path.join(dest,
444 os.path.basename(util.localpath(p)))
444 os.path.basename(util.localpath(p)))
445 else:
445 else:
446 abspfx = util.canonpath(repo.root, cwd, pat)
446 abspfx = util.canonpath(repo.root, cwd, pat)
447 if len(abspfx) < len(srcs[0][0]):
447 if len(abspfx) < len(srcs[0][0]):
448 # A directory. Either the target path contains the last
448 # A directory. Either the target path contains the last
449 # component of the source path or it does not.
449 # component of the source path or it does not.
450 def evalpath(striplen):
450 def evalpath(striplen):
451 score = 0
451 score = 0
452 for s in srcs:
452 for s in srcs:
453 t = os.path.join(dest, util.localpath(s[0])[striplen:])
453 t = os.path.join(dest, util.localpath(s[0])[striplen:])
454 if os.path.exists(t):
454 if os.path.exists(t):
455 score += 1
455 score += 1
456 return score
456 return score
457
457
458 abspfx = util.localpath(abspfx)
458 abspfx = util.localpath(abspfx)
459 striplen = len(abspfx)
459 striplen = len(abspfx)
460 if striplen:
460 if striplen:
461 striplen += len(os.sep)
461 striplen += len(os.sep)
462 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
462 if os.path.isdir(os.path.join(dest, os.path.split(abspfx)[1])):
463 score = evalpath(striplen)
463 score = evalpath(striplen)
464 striplen1 = len(os.path.split(abspfx)[0])
464 striplen1 = len(os.path.split(abspfx)[0])
465 if striplen1:
465 if striplen1:
466 striplen1 += len(os.sep)
466 striplen1 += len(os.sep)
467 if evalpath(striplen1) > score:
467 if evalpath(striplen1) > score:
468 striplen = striplen1
468 striplen = striplen1
469 res = lambda p: os.path.join(dest,
469 res = lambda p: os.path.join(dest,
470 util.localpath(p)[striplen:])
470 util.localpath(p)[striplen:])
471 else:
471 else:
472 # a file
472 # a file
473 if destdirexists:
473 if destdirexists:
474 res = lambda p: os.path.join(dest,
474 res = lambda p: os.path.join(dest,
475 os.path.basename(util.localpath(p)))
475 os.path.basename(util.localpath(p)))
476 else:
476 else:
477 res = lambda p: dest
477 res = lambda p: dest
478 return res
478 return res
479
479
480
480
481 pats = util.expand_glob(pats)
481 pats = util.expand_glob(pats)
482 if not pats:
482 if not pats:
483 raise util.Abort(_('no source or destination specified'))
483 raise util.Abort(_('no source or destination specified'))
484 if len(pats) == 1:
484 if len(pats) == 1:
485 raise util.Abort(_('no destination specified'))
485 raise util.Abort(_('no destination specified'))
486 dest = pats.pop()
486 dest = pats.pop()
487 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
487 destdirexists = os.path.isdir(dest) and not os.path.islink(dest)
488 if not destdirexists:
488 if not destdirexists:
489 if len(pats) > 1 or util.patkind(pats[0], None)[0]:
489 if len(pats) > 1 or util.patkind(pats[0], None)[0]:
490 raise util.Abort(_('with multiple sources, destination must be an '
490 raise util.Abort(_('with multiple sources, destination must be an '
491 'existing directory'))
491 'existing directory'))
492 if util.endswithsep(dest):
492 if util.endswithsep(dest):
493 raise util.Abort(_('destination %s is not a directory') % dest)
493 raise util.Abort(_('destination %s is not a directory') % dest)
494
494
495 tfn = targetpathfn
495 tfn = targetpathfn
496 if after:
496 if after:
497 tfn = targetpathafterfn
497 tfn = targetpathafterfn
498 copylist = []
498 copylist = []
499 for pat in pats:
499 for pat in pats:
500 srcs = walkpat(pat)
500 srcs = walkpat(pat)
501 if not srcs:
501 if not srcs:
502 continue
502 continue
503 copylist.append((tfn(pat, dest, srcs), srcs))
503 copylist.append((tfn(pat, dest, srcs), srcs))
504 if not copylist:
504 if not copylist:
505 raise util.Abort(_('no files to copy'))
505 raise util.Abort(_('no files to copy'))
506
506
507 errors = 0
507 errors = 0
508 for targetpath, srcs in copylist:
508 for targetpath, srcs in copylist:
509 for abssrc, relsrc, exact in srcs:
509 for abssrc, relsrc, exact in srcs:
510 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
510 if copyfile(abssrc, relsrc, targetpath(abssrc), exact):
511 errors += 1
511 errors += 1
512
512
513 if errors:
513 if errors:
514 ui.warn(_('(consider using --after)\n'))
514 ui.warn(_('(consider using --after)\n'))
515
515
516 return errors
516 return errors
517
517
518 def service(opts, parentfn=None, initfn=None, runfn=None):
518 def service(opts, parentfn=None, initfn=None, runfn=None):
519 '''Run a command as a service.'''
519 '''Run a command as a service.'''
520
520
521 if opts['daemon'] and not opts['daemon_pipefds']:
521 if opts['daemon'] and not opts['daemon_pipefds']:
522 rfd, wfd = os.pipe()
522 rfd, wfd = os.pipe()
523 args = sys.argv[:]
523 args = sys.argv[:]
524 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
524 args.append('--daemon-pipefds=%d,%d' % (rfd, wfd))
525 # Don't pass --cwd to the child process, because we've already
525 # Don't pass --cwd to the child process, because we've already
526 # changed directory.
526 # changed directory.
527 for i in xrange(1,len(args)):
527 for i in xrange(1,len(args)):
528 if args[i].startswith('--cwd='):
528 if args[i].startswith('--cwd='):
529 del args[i]
529 del args[i]
530 break
530 break
531 elif args[i].startswith('--cwd'):
531 elif args[i].startswith('--cwd'):
532 del args[i:i+2]
532 del args[i:i+2]
533 break
533 break
534 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
534 pid = os.spawnvp(os.P_NOWAIT | getattr(os, 'P_DETACH', 0),
535 args[0], args)
535 args[0], args)
536 os.close(wfd)
536 os.close(wfd)
537 os.read(rfd, 1)
537 os.read(rfd, 1)
538 if parentfn:
538 if parentfn:
539 return parentfn(pid)
539 return parentfn(pid)
540 else:
540 else:
541 os._exit(0)
541 os._exit(0)
542
542
543 if initfn:
543 if initfn:
544 initfn()
544 initfn()
545
545
546 if opts['pid_file']:
546 if opts['pid_file']:
547 fp = open(opts['pid_file'], 'w')
547 fp = open(opts['pid_file'], 'w')
548 fp.write(str(os.getpid()) + '\n')
548 fp.write(str(os.getpid()) + '\n')
549 fp.close()
549 fp.close()
550
550
551 if opts['daemon_pipefds']:
551 if opts['daemon_pipefds']:
552 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
552 rfd, wfd = [int(x) for x in opts['daemon_pipefds'].split(',')]
553 os.close(rfd)
553 os.close(rfd)
554 try:
554 try:
555 os.setsid()
555 os.setsid()
556 except AttributeError:
556 except AttributeError:
557 pass
557 pass
558 os.write(wfd, 'y')
558 os.write(wfd, 'y')
559 os.close(wfd)
559 os.close(wfd)
560 sys.stdout.flush()
560 sys.stdout.flush()
561 sys.stderr.flush()
561 sys.stderr.flush()
562 fd = os.open(util.nulldev, os.O_RDWR)
562 fd = os.open(util.nulldev, os.O_RDWR)
563 if fd != 0: os.dup2(fd, 0)
563 if fd != 0: os.dup2(fd, 0)
564 if fd != 1: os.dup2(fd, 1)
564 if fd != 1: os.dup2(fd, 1)
565 if fd != 2: os.dup2(fd, 2)
565 if fd != 2: os.dup2(fd, 2)
566 if fd not in (0, 1, 2): os.close(fd)
566 if fd not in (0, 1, 2): os.close(fd)
567
567
568 if runfn:
568 if runfn:
569 return runfn()
569 return runfn()
570
570
571 class changeset_printer(object):
571 class changeset_printer(object):
572 '''show changeset information when templating not requested.'''
572 '''show changeset information when templating not requested.'''
573
573
574 def __init__(self, ui, repo, patch, diffopts, buffered):
574 def __init__(self, ui, repo, patch, diffopts, buffered):
575 self.ui = ui
575 self.ui = ui
576 self.repo = repo
576 self.repo = repo
577 self.buffered = buffered
577 self.buffered = buffered
578 self.patch = patch
578 self.patch = patch
579 self.diffopts = diffopts
579 self.diffopts = diffopts
580 self.header = {}
580 self.header = {}
581 self.hunk = {}
581 self.hunk = {}
582 self.lastheader = None
582 self.lastheader = None
583
583
584 def flush(self, rev):
584 def flush(self, rev):
585 if rev in self.header:
585 if rev in self.header:
586 h = self.header[rev]
586 h = self.header[rev]
587 if h != self.lastheader:
587 if h != self.lastheader:
588 self.lastheader = h
588 self.lastheader = h
589 self.ui.write(h)
589 self.ui.write(h)
590 del self.header[rev]
590 del self.header[rev]
591 if rev in self.hunk:
591 if rev in self.hunk:
592 self.ui.write(self.hunk[rev])
592 self.ui.write(self.hunk[rev])
593 del self.hunk[rev]
593 del self.hunk[rev]
594 return 1
594 return 1
595 return 0
595 return 0
596
596
597 def show(self, ctx, copies=(), **props):
597 def show(self, ctx, copies=(), **props):
598 if self.buffered:
598 if self.buffered:
599 self.ui.pushbuffer()
599 self.ui.pushbuffer()
600 self._show(ctx, copies, props)
600 self._show(ctx, copies, props)
601 self.hunk[ctx.rev()] = self.ui.popbuffer()
601 self.hunk[ctx.rev()] = self.ui.popbuffer()
602 else:
602 else:
603 self._show(ctx, copies, props)
603 self._show(ctx, copies, props)
604
604
605 def _show(self, ctx, copies, props):
605 def _show(self, ctx, copies, props):
606 '''show a single changeset or file revision'''
606 '''show a single changeset or file revision'''
607 changenode = ctx.node()
607 changenode = ctx.node()
608 rev = ctx.rev()
608 rev = ctx.rev()
609
609
610 if self.ui.quiet:
610 if self.ui.quiet:
611 self.ui.write("%d:%s\n" % (rev, short(changenode)))
611 self.ui.write("%d:%s\n" % (rev, short(changenode)))
612 return
612 return
613
613
614 log = self.repo.changelog
614 log = self.repo.changelog
615 changes = log.read(changenode)
615 changes = log.read(changenode)
616 date = util.datestr(changes[2])
616 date = util.datestr(changes[2])
617 extra = changes[5]
617 extra = changes[5]
618 branch = extra.get("branch")
618 branch = extra.get("branch")
619
619
620 hexfunc = self.ui.debugflag and hex or short
620 hexfunc = self.ui.debugflag and hex or short
621
621
622 parents = [(p, hexfunc(log.node(p)))
622 parents = [(p, hexfunc(log.node(p)))
623 for p in self._meaningful_parentrevs(log, rev)]
623 for p in self._meaningful_parentrevs(log, rev)]
624
624
625 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
625 self.ui.write(_("changeset: %d:%s\n") % (rev, hexfunc(changenode)))
626
626
627 # don't show the default branch name
627 # don't show the default branch name
628 if branch != 'default':
628 if branch != 'default':
629 branch = encoding.tolocal(branch)
629 branch = encoding.tolocal(branch)
630 self.ui.write(_("branch: %s\n") % branch)
630 self.ui.write(_("branch: %s\n") % branch)
631 for tag in self.repo.nodetags(changenode):
631 for tag in self.repo.nodetags(changenode):
632 self.ui.write(_("tag: %s\n") % tag)
632 self.ui.write(_("tag: %s\n") % tag)
633 for parent in parents:
633 for parent in parents:
634 self.ui.write(_("parent: %d:%s\n") % parent)
634 self.ui.write(_("parent: %d:%s\n") % parent)
635
635
636 if self.ui.debugflag:
636 if self.ui.debugflag:
637 self.ui.write(_("manifest: %d:%s\n") %
637 self.ui.write(_("manifest: %d:%s\n") %
638 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
638 (self.repo.manifest.rev(changes[0]), hex(changes[0])))
639 self.ui.write(_("user: %s\n") % changes[1])
639 self.ui.write(_("user: %s\n") % changes[1])
640 self.ui.write(_("date: %s\n") % date)
640 self.ui.write(_("date: %s\n") % date)
641
641
642 if self.ui.debugflag:
642 if self.ui.debugflag:
643 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
643 files = self.repo.status(log.parents(changenode)[0], changenode)[:3]
644 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
644 for key, value in zip([_("files:"), _("files+:"), _("files-:")],
645 files):
645 files):
646 if value:
646 if value:
647 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
647 self.ui.write("%-12s %s\n" % (key, " ".join(value)))
648 elif changes[3] and self.ui.verbose:
648 elif changes[3] and self.ui.verbose:
649 self.ui.write(_("files: %s\n") % " ".join(changes[3]))
649 self.ui.write(_("files: %s\n") % " ".join(changes[3]))
650 if copies and self.ui.verbose:
650 if copies and self.ui.verbose:
651 copies = ['%s (%s)' % c for c in copies]
651 copies = ['%s (%s)' % c for c in copies]
652 self.ui.write(_("copies: %s\n") % ' '.join(copies))
652 self.ui.write(_("copies: %s\n") % ' '.join(copies))
653
653
654 if extra and self.ui.debugflag:
654 if extra and self.ui.debugflag:
655 for key, value in util.sort(extra.items()):
655 for key, value in util.sort(extra.items()):
656 self.ui.write(_("extra: %s=%s\n")
656 self.ui.write(_("extra: %s=%s\n")
657 % (key, value.encode('string_escape')))
657 % (key, value.encode('string_escape')))
658
658
659 description = changes[4].strip()
659 description = changes[4].strip()
660 if description:
660 if description:
661 if self.ui.verbose:
661 if self.ui.verbose:
662 self.ui.write(_("description:\n"))
662 self.ui.write(_("description:\n"))
663 self.ui.write(description)
663 self.ui.write(description)
664 self.ui.write("\n\n")
664 self.ui.write("\n\n")
665 else:
665 else:
666 self.ui.write(_("summary: %s\n") %
666 self.ui.write(_("summary: %s\n") %
667 description.splitlines()[0])
667 description.splitlines()[0])
668 self.ui.write("\n")
668 self.ui.write("\n")
669
669
670 self.showpatch(changenode)
670 self.showpatch(changenode)
671
671
672 def showpatch(self, node):
672 def showpatch(self, node):
673 if self.patch:
673 if self.patch:
674 prev = self.repo.changelog.parents(node)[0]
674 prev = self.repo.changelog.parents(node)[0]
675 chunks = patch.diff(self.repo, prev, node, match=self.patch,
675 chunks = patch.diff(self.repo, prev, node, match=self.patch,
676 opts=patch.diffopts(self.ui, self.diffopts))
676 opts=patch.diffopts(self.ui, self.diffopts))
677 for chunk in chunks:
677 for chunk in chunks:
678 self.ui.write(chunk)
678 self.ui.write(chunk)
679 self.ui.write("\n")
679 self.ui.write("\n")
680
680
681 def _meaningful_parentrevs(self, log, rev):
681 def _meaningful_parentrevs(self, log, rev):
682 """Return list of meaningful (or all if debug) parentrevs for rev.
682 """Return list of meaningful (or all if debug) parentrevs for rev.
683
683
684 For merges (two non-nullrev revisions) both parents are meaningful.
684 For merges (two non-nullrev revisions) both parents are meaningful.
685 Otherwise the first parent revision is considered meaningful if it
685 Otherwise the first parent revision is considered meaningful if it
686 is not the preceding revision.
686 is not the preceding revision.
687 """
687 """
688 parents = log.parentrevs(rev)
688 parents = log.parentrevs(rev)
689 if not self.ui.debugflag and parents[1] == nullrev:
689 if not self.ui.debugflag and parents[1] == nullrev:
690 if parents[0] >= rev - 1:
690 if parents[0] >= rev - 1:
691 parents = []
691 parents = []
692 else:
692 else:
693 parents = [parents[0]]
693 parents = [parents[0]]
694 return parents
694 return parents
695
695
696
696
697 class changeset_templater(changeset_printer):
697 class changeset_templater(changeset_printer):
698 '''format changeset information.'''
698 '''format changeset information.'''
699
699
700 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
700 def __init__(self, ui, repo, patch, diffopts, mapfile, buffered):
701 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
701 changeset_printer.__init__(self, ui, repo, patch, diffopts, buffered)
702 filters = templatefilters.filters.copy()
702 filters = templatefilters.filters.copy()
703 filters['formatnode'] = (ui.debugflag and (lambda x: x)
703 filters['formatnode'] = (ui.debugflag and (lambda x: x)
704 or (lambda x: x[:12]))
704 or (lambda x: x[:12]))
705 self.t = templater.templater(mapfile, filters,
705 self.t = templater.templater(mapfile, filters,
706 cache={
706 cache={
707 'parent': '{rev}:{node|formatnode} ',
707 'parent': '{rev}:{node|formatnode} ',
708 'manifest': '{rev}:{node|formatnode}',
708 'manifest': '{rev}:{node|formatnode}',
709 'filecopy': '{name} ({source})'})
709 'filecopy': '{name} ({source})'})
710
710
711 def use_template(self, t):
711 def use_template(self, t):
712 '''set template string to use'''
712 '''set template string to use'''
713 self.t.cache['changeset'] = t
713 self.t.cache['changeset'] = t
714
714
715 def _meaningful_parentrevs(self, ctx):
715 def _meaningful_parentrevs(self, ctx):
716 """Return list of meaningful (or all if debug) parentrevs for rev.
716 """Return list of meaningful (or all if debug) parentrevs for rev.
717 """
717 """
718 parents = ctx.parents()
718 parents = ctx.parents()
719 if len(parents) > 1:
719 if len(parents) > 1:
720 return parents
720 return parents
721 if self.ui.debugflag:
721 if self.ui.debugflag:
722 return [parents[0], self.repo['null']]
722 return [parents[0], self.repo['null']]
723 if parents[0].rev() >= ctx.rev() - 1:
723 if parents[0].rev() >= ctx.rev() - 1:
724 return []
724 return []
725 return parents
725 return parents
726
726
727 def _show(self, ctx, copies, props):
727 def _show(self, ctx, copies, props):
728 '''show a single changeset or file revision'''
728 '''show a single changeset or file revision'''
729
729
730 def showlist(name, values, plural=None, **args):
730 def showlist(name, values, plural=None, **args):
731 '''expand set of values.
731 '''expand set of values.
732 name is name of key in template map.
732 name is name of key in template map.
733 values is list of strings or dicts.
733 values is list of strings or dicts.
734 plural is plural of name, if not simply name + 's'.
734 plural is plural of name, if not simply name + 's'.
735
735
736 expansion works like this, given name 'foo'.
736 expansion works like this, given name 'foo'.
737
737
738 if values is empty, expand 'no_foos'.
738 if values is empty, expand 'no_foos'.
739
739
740 if 'foo' not in template map, return values as a string,
740 if 'foo' not in template map, return values as a string,
741 joined by space.
741 joined by space.
742
742
743 expand 'start_foos'.
743 expand 'start_foos'.
744
744
745 for each value, expand 'foo'. if 'last_foo' in template
745 for each value, expand 'foo'. if 'last_foo' in template
746 map, expand it instead of 'foo' for last key.
746 map, expand it instead of 'foo' for last key.
747
747
748 expand 'end_foos'.
748 expand 'end_foos'.
749 '''
749 '''
750 if plural: names = plural
750 if plural: names = plural
751 else: names = name + 's'
751 else: names = name + 's'
752 if not values:
752 if not values:
753 noname = 'no_' + names
753 noname = 'no_' + names
754 if noname in self.t:
754 if noname in self.t:
755 yield self.t(noname, **args)
755 yield self.t(noname, **args)
756 return
756 return
757 if name not in self.t:
757 if name not in self.t:
758 if isinstance(values[0], str):
758 if isinstance(values[0], str):
759 yield ' '.join(values)
759 yield ' '.join(values)
760 else:
760 else:
761 for v in values:
761 for v in values:
762 yield dict(v, **args)
762 yield dict(v, **args)
763 return
763 return
764 startname = 'start_' + names
764 startname = 'start_' + names
765 if startname in self.t:
765 if startname in self.t:
766 yield self.t(startname, **args)
766 yield self.t(startname, **args)
767 vargs = args.copy()
767 vargs = args.copy()
768 def one(v, tag=name):
768 def one(v, tag=name):
769 try:
769 try:
770 vargs.update(v)
770 vargs.update(v)
771 except (AttributeError, ValueError):
771 except (AttributeError, ValueError):
772 try:
772 try:
773 for a, b in v:
773 for a, b in v:
774 vargs[a] = b
774 vargs[a] = b
775 except ValueError:
775 except ValueError:
776 vargs[name] = v
776 vargs[name] = v
777 return self.t(tag, **vargs)
777 return self.t(tag, **vargs)
778 lastname = 'last_' + name
778 lastname = 'last_' + name
779 if lastname in self.t:
779 if lastname in self.t:
780 last = values.pop()
780 last = values.pop()
781 else:
781 else:
782 last = None
782 last = None
783 for v in values:
783 for v in values:
784 yield one(v)
784 yield one(v)
785 if last is not None:
785 if last is not None:
786 yield one(last, tag=lastname)
786 yield one(last, tag=lastname)
787 endname = 'end_' + names
787 endname = 'end_' + names
788 if endname in self.t:
788 if endname in self.t:
789 yield self.t(endname, **args)
789 yield self.t(endname, **args)
790
790
791 def showbranches(**args):
791 def showbranches(**args):
792 branch = ctx.branch()
792 branch = ctx.branch()
793 if branch != 'default':
793 if branch != 'default':
794 branch = encoding.tolocal(branch)
794 branch = encoding.tolocal(branch)
795 return showlist('branch', [branch], plural='branches', **args)
795 return showlist('branch', [branch], plural='branches', **args)
796
796
797 def showparents(**args):
797 def showparents(**args):
798 parents = [[('rev', p.rev()), ('node', p.hex())]
798 parents = [[('rev', p.rev()), ('node', p.hex())]
799 for p in self._meaningful_parentrevs(ctx)]
799 for p in self._meaningful_parentrevs(ctx)]
800 return showlist('parent', parents, **args)
800 return showlist('parent', parents, **args)
801
801
802 def showtags(**args):
802 def showtags(**args):
803 return showlist('tag', ctx.tags(), **args)
803 return showlist('tag', ctx.tags(), **args)
804
804
805 def showextras(**args):
805 def showextras(**args):
806 for key, value in util.sort(ctx.extra().items()):
806 for key, value in util.sort(ctx.extra().items()):
807 args = args.copy()
807 args = args.copy()
808 args.update(dict(key=key, value=value))
808 args.update(dict(key=key, value=value))
809 yield self.t('extra', **args)
809 yield self.t('extra', **args)
810
810
811 def showcopies(**args):
811 def showcopies(**args):
812 c = [{'name': x[0], 'source': x[1]} for x in copies]
812 c = [{'name': x[0], 'source': x[1]} for x in copies]
813 return showlist('file_copy', c, plural='file_copies', **args)
813 return showlist('file_copy', c, plural='file_copies', **args)
814
814
815 files = []
815 files = []
816 def getfiles():
816 def getfiles():
817 if not files:
817 if not files:
818 files[:] = self.repo.status(ctx.parents()[0].node(),
818 files[:] = self.repo.status(ctx.parents()[0].node(),
819 ctx.node())[:3]
819 ctx.node())[:3]
820 return files
820 return files
821 def showfiles(**args):
821 def showfiles(**args):
822 return showlist('file', ctx.files(), **args)
822 return showlist('file', ctx.files(), **args)
823 def showmods(**args):
823 def showmods(**args):
824 return showlist('file_mod', getfiles()[0], **args)
824 return showlist('file_mod', getfiles()[0], **args)
825 def showadds(**args):
825 def showadds(**args):
826 return showlist('file_add', getfiles()[1], **args)
826 return showlist('file_add', getfiles()[1], **args)
827 def showdels(**args):
827 def showdels(**args):
828 return showlist('file_del', getfiles()[2], **args)
828 return showlist('file_del', getfiles()[2], **args)
829 def showmanifest(**args):
829 def showmanifest(**args):
830 args = args.copy()
830 args = args.copy()
831 args.update(dict(rev=self.repo.manifest.rev(ctx.changeset()[0]),
831 args.update(dict(rev=self.repo.manifest.rev(ctx.changeset()[0]),
832 node=hex(ctx.changeset()[0])))
832 node=hex(ctx.changeset()[0])))
833 return self.t('manifest', **args)
833 return self.t('manifest', **args)
834
834
835 def showdiffstat(**args):
835 def showdiffstat(**args):
836 diff = patch.diff(self.repo, ctx.parents()[0].node(), ctx.node())
836 diff = patch.diff(self.repo, ctx.parents()[0].node(), ctx.node())
837 files, adds, removes = 0, 0, 0
837 files, adds, removes = 0, 0, 0
838 for i in patch.diffstatdata(util.iterlines(diff)):
838 for i in patch.diffstatdata(util.iterlines(diff)):
839 files += 1
839 files += 1
840 adds += i[1]
840 adds += i[1]
841 removes += i[2]
841 removes += i[2]
842 return '%s: +%s/-%s' % (files, adds, removes)
842 return '%s: +%s/-%s' % (files, adds, removes)
843
843
844 defprops = {
844 defprops = {
845 'author': ctx.user(),
845 'author': ctx.user(),
846 'branches': showbranches,
846 'branches': showbranches,
847 'date': ctx.date(),
847 'date': ctx.date(),
848 'desc': ctx.description().strip(),
848 'desc': ctx.description().strip(),
849 'file_adds': showadds,
849 'file_adds': showadds,
850 'file_dels': showdels,
850 'file_dels': showdels,
851 'file_mods': showmods,
851 'file_mods': showmods,
852 'files': showfiles,
852 'files': showfiles,
853 'file_copies': showcopies,
853 'file_copies': showcopies,
854 'manifest': showmanifest,
854 'manifest': showmanifest,
855 'node': ctx.hex(),
855 'node': ctx.hex(),
856 'parents': showparents,
856 'parents': showparents,
857 'rev': ctx.rev(),
857 'rev': ctx.rev(),
858 'tags': showtags,
858 'tags': showtags,
859 'extras': showextras,
859 'extras': showextras,
860 'diffstat': showdiffstat,
860 'diffstat': showdiffstat,
861 }
861 }
862 props = props.copy()
862 props = props.copy()
863 props.update(defprops)
863 props.update(defprops)
864
864
865 # find correct templates for current mode
865 # find correct templates for current mode
866
866
867 tmplmodes = [
867 tmplmodes = [
868 (True, None),
868 (True, None),
869 (self.ui.verbose, 'verbose'),
869 (self.ui.verbose, 'verbose'),
870 (self.ui.quiet, 'quiet'),
870 (self.ui.quiet, 'quiet'),
871 (self.ui.debugflag, 'debug'),
871 (self.ui.debugflag, 'debug'),
872 ]
872 ]
873
873
874 types = {'header': '', 'changeset': 'changeset'}
874 types = {'header': '', 'changeset': 'changeset'}
875 for mode, postfix in tmplmodes:
875 for mode, postfix in tmplmodes:
876 for type in types:
876 for type in types:
877 cur = postfix and ('%s_%s' % (type, postfix)) or type
877 cur = postfix and ('%s_%s' % (type, postfix)) or type
878 if mode and cur in self.t:
878 if mode and cur in self.t:
879 types[type] = cur
879 types[type] = cur
880
880
881 try:
881 try:
882
882
883 # write header
883 # write header
884 if types['header']:
884 if types['header']:
885 h = templater.stringify(self.t(types['header'], **props))
885 h = templater.stringify(self.t(types['header'], **props))
886 if self.buffered:
886 if self.buffered:
887 self.header[ctx.rev()] = h
887 self.header[ctx.rev()] = h
888 else:
888 else:
889 self.ui.write(h)
889 self.ui.write(h)
890
890
891 # write changeset metadata, then patch if requested
891 # write changeset metadata, then patch if requested
892 key = types['changeset']
892 key = types['changeset']
893 self.ui.write(templater.stringify(self.t(key, **props)))
893 self.ui.write(templater.stringify(self.t(key, **props)))
894 self.showpatch(ctx.node())
894 self.showpatch(ctx.node())
895
895
896 except KeyError, inst:
896 except KeyError, inst:
897 msg = _("%s: no key named '%s'")
897 msg = _("%s: no key named '%s'")
898 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
898 raise util.Abort(msg % (self.t.mapfile, inst.args[0]))
899 except SyntaxError, inst:
899 except SyntaxError, inst:
900 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
900 raise util.Abort(_('%s: %s') % (self.t.mapfile, inst.args[0]))
901
901
902 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
902 def show_changeset(ui, repo, opts, buffered=False, matchfn=False):
903 """show one changeset using template or regular display.
903 """show one changeset using template or regular display.
904
904
905 Display format will be the first non-empty hit of:
905 Display format will be the first non-empty hit of:
906 1. option 'template'
906 1. option 'template'
907 2. option 'style'
907 2. option 'style'
908 3. [ui] setting 'logtemplate'
908 3. [ui] setting 'logtemplate'
909 4. [ui] setting 'style'
909 4. [ui] setting 'style'
910 If all of these values are either the unset or the empty string,
910 If all of these values are either the unset or the empty string,
911 regular display via changeset_printer() is done.
911 regular display via changeset_printer() is done.
912 """
912 """
913 # options
913 # options
914 patch = False
914 patch = False
915 if opts.get('patch'):
915 if opts.get('patch'):
916 patch = matchfn or matchall(repo)
916 patch = matchfn or matchall(repo)
917
917
918 tmpl = opts.get('template')
918 tmpl = opts.get('template')
919 style = None
919 style = None
920 if tmpl:
920 if tmpl:
921 tmpl = templater.parsestring(tmpl, quoted=False)
921 tmpl = templater.parsestring(tmpl, quoted=False)
922 else:
922 else:
923 style = opts.get('style')
923 style = opts.get('style')
924
924
925 # ui settings
925 # ui settings
926 if not (tmpl or style):
926 if not (tmpl or style):
927 tmpl = ui.config('ui', 'logtemplate')
927 tmpl = ui.config('ui', 'logtemplate')
928 if tmpl:
928 if tmpl:
929 tmpl = templater.parsestring(tmpl)
929 tmpl = templater.parsestring(tmpl)
930 else:
930 else:
931 style = ui.config('ui', 'style')
931 style = ui.config('ui', 'style')
932
932
933 if not (tmpl or style):
933 if not (tmpl or style):
934 return changeset_printer(ui, repo, patch, opts, buffered)
934 return changeset_printer(ui, repo, patch, opts, buffered)
935
935
936 mapfile = None
936 mapfile = None
937 if style and not tmpl:
937 if style and not tmpl:
938 mapfile = style
938 mapfile = style
939 if not os.path.split(mapfile)[0]:
939 if not os.path.split(mapfile)[0]:
940 mapname = (templater.templatepath('map-cmdline.' + mapfile)
940 mapname = (templater.templatepath('map-cmdline.' + mapfile)
941 or templater.templatepath(mapfile))
941 or templater.templatepath(mapfile))
942 if mapname: mapfile = mapname
942 if mapname: mapfile = mapname
943
943
944 try:
944 try:
945 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
945 t = changeset_templater(ui, repo, patch, opts, mapfile, buffered)
946 except SyntaxError, inst:
946 except SyntaxError, inst:
947 raise util.Abort(inst.args[0])
947 raise util.Abort(inst.args[0])
948 if tmpl: t.use_template(tmpl)
948 if tmpl: t.use_template(tmpl)
949 return t
949 return t
950
950
951 def finddate(ui, repo, date):
951 def finddate(ui, repo, date):
952 """Find the tipmost changeset that matches the given date spec"""
952 """Find the tipmost changeset that matches the given date spec"""
953 df = util.matchdate(date)
953 df = util.matchdate(date)
954 get = util.cachefunc(lambda r: repo[r].changeset())
954 get = util.cachefunc(lambda r: repo[r].changeset())
955 changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
955 changeiter, matchfn = walkchangerevs(ui, repo, [], get, {'rev':None})
956 results = {}
956 results = {}
957 for st, rev, fns in changeiter:
957 for st, rev, fns in changeiter:
958 if st == 'add':
958 if st == 'add':
959 d = get(rev)[2]
959 d = get(rev)[2]
960 if df(d[0]):
960 if df(d[0]):
961 results[rev] = d
961 results[rev] = d
962 elif st == 'iter':
962 elif st == 'iter':
963 if rev in results:
963 if rev in results:
964 ui.status(_("Found revision %s from %s\n") %
964 ui.status(_("Found revision %s from %s\n") %
965 (rev, util.datestr(results[rev])))
965 (rev, util.datestr(results[rev])))
966 return str(rev)
966 return str(rev)
967
967
968 raise util.Abort(_("revision matching date not found"))
968 raise util.Abort(_("revision matching date not found"))
969
969
970 def walkchangerevs(ui, repo, pats, change, opts):
970 def walkchangerevs(ui, repo, pats, change, opts):
971 '''Iterate over files and the revs in which they changed.
971 '''Iterate over files and the revs in which they changed.
972
972
973 Callers most commonly need to iterate backwards over the history
973 Callers most commonly need to iterate backwards over the history
974 in which they are interested. Doing so has awful (quadratic-looking)
974 in which they are interested. Doing so has awful (quadratic-looking)
975 performance, so we use iterators in a "windowed" way.
975 performance, so we use iterators in a "windowed" way.
976
976
977 We walk a window of revisions in the desired order. Within the
977 We walk a window of revisions in the desired order. Within the
978 window, we first walk forwards to gather data, then in the desired
978 window, we first walk forwards to gather data, then in the desired
979 order (usually backwards) to display it.
979 order (usually backwards) to display it.
980
980
981 This function returns an (iterator, matchfn) tuple. The iterator
981 This function returns an (iterator, matchfn) tuple. The iterator
982 yields 3-tuples. They will be of one of the following forms:
982 yields 3-tuples. They will be of one of the following forms:
983
983
984 "window", incrementing, lastrev: stepping through a window,
984 "window", incrementing, lastrev: stepping through a window,
985 positive if walking forwards through revs, last rev in the
985 positive if walking forwards through revs, last rev in the
986 sequence iterated over - use to reset state for the current window
986 sequence iterated over - use to reset state for the current window
987
987
988 "add", rev, fns: out-of-order traversal of the given file names
988 "add", rev, fns: out-of-order traversal of the given file names
989 fns, which changed during revision rev - use to gather data for
989 fns, which changed during revision rev - use to gather data for
990 possible display
990 possible display
991
991
992 "iter", rev, None: in-order traversal of the revs earlier iterated
992 "iter", rev, None: in-order traversal of the revs earlier iterated
993 over with "add" - use to display data'''
993 over with "add" - use to display data'''
994
994
995 def increasing_windows(start, end, windowsize=8, sizelimit=512):
995 def increasing_windows(start, end, windowsize=8, sizelimit=512):
996 if start < end:
996 if start < end:
997 while start < end:
997 while start < end:
998 yield start, min(windowsize, end-start)
998 yield start, min(windowsize, end-start)
999 start += windowsize
999 start += windowsize
1000 if windowsize < sizelimit:
1000 if windowsize < sizelimit:
1001 windowsize *= 2
1001 windowsize *= 2
1002 else:
1002 else:
1003 while start > end:
1003 while start > end:
1004 yield start, min(windowsize, start-end-1)
1004 yield start, min(windowsize, start-end-1)
1005 start -= windowsize
1005 start -= windowsize
1006 if windowsize < sizelimit:
1006 if windowsize < sizelimit:
1007 windowsize *= 2
1007 windowsize *= 2
1008
1008
1009 m = match(repo, pats, opts)
1009 m = match(repo, pats, opts)
1010 follow = opts.get('follow') or opts.get('follow_first')
1010 follow = opts.get('follow') or opts.get('follow_first')
1011
1011
1012 if not len(repo):
1012 if not len(repo):
1013 return [], m
1013 return [], m
1014
1014
1015 if follow:
1015 if follow:
1016 defrange = '%s:0' % repo['.'].rev()
1016 defrange = '%s:0' % repo['.'].rev()
1017 else:
1017 else:
1018 defrange = '-1:0'
1018 defrange = '-1:0'
1019 revs = revrange(repo, opts['rev'] or [defrange])
1019 revs = revrange(repo, opts['rev'] or [defrange])
1020 wanted = {}
1020 wanted = set()
1021 slowpath = m.anypats() or (m.files() and opts.get('removed'))
1021 slowpath = m.anypats() or (m.files() and opts.get('removed'))
1022 fncache = {}
1022 fncache = {}
1023
1023
1024 if not slowpath and not m.files():
1024 if not slowpath and not m.files():
1025 # No files, no patterns. Display all revs.
1025 # No files, no patterns. Display all revs.
1026 wanted = dict.fromkeys(revs)
1026 wanted = set(revs)
1027 copies = []
1027 copies = []
1028 if not slowpath:
1028 if not slowpath:
1029 # Only files, no patterns. Check the history of each file.
1029 # Only files, no patterns. Check the history of each file.
1030 def filerevgen(filelog, node):
1030 def filerevgen(filelog, node):
1031 cl_count = len(repo)
1031 cl_count = len(repo)
1032 if node is None:
1032 if node is None:
1033 last = len(filelog) - 1
1033 last = len(filelog) - 1
1034 else:
1034 else:
1035 last = filelog.rev(node)
1035 last = filelog.rev(node)
1036 for i, window in increasing_windows(last, nullrev):
1036 for i, window in increasing_windows(last, nullrev):
1037 revs = []
1037 revs = []
1038 for j in xrange(i - window, i + 1):
1038 for j in xrange(i - window, i + 1):
1039 n = filelog.node(j)
1039 n = filelog.node(j)
1040 revs.append((filelog.linkrev(j),
1040 revs.append((filelog.linkrev(j),
1041 follow and filelog.renamed(n)))
1041 follow and filelog.renamed(n)))
1042 revs.reverse()
1042 revs.reverse()
1043 for rev in revs:
1043 for rev in revs:
1044 # only yield rev for which we have the changelog, it can
1044 # only yield rev for which we have the changelog, it can
1045 # happen while doing "hg log" during a pull or commit
1045 # happen while doing "hg log" during a pull or commit
1046 if rev[0] < cl_count:
1046 if rev[0] < cl_count:
1047 yield rev
1047 yield rev
1048 def iterfiles():
1048 def iterfiles():
1049 for filename in m.files():
1049 for filename in m.files():
1050 yield filename, None
1050 yield filename, None
1051 for filename_node in copies:
1051 for filename_node in copies:
1052 yield filename_node
1052 yield filename_node
1053 minrev, maxrev = min(revs), max(revs)
1053 minrev, maxrev = min(revs), max(revs)
1054 for file_, node in iterfiles():
1054 for file_, node in iterfiles():
1055 filelog = repo.file(file_)
1055 filelog = repo.file(file_)
1056 if not len(filelog):
1056 if not len(filelog):
1057 if node is None:
1057 if node is None:
1058 # A zero count may be a directory or deleted file, so
1058 # A zero count may be a directory or deleted file, so
1059 # try to find matching entries on the slow path.
1059 # try to find matching entries on the slow path.
1060 if follow:
1060 if follow:
1061 raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
1061 raise util.Abort(_('cannot follow nonexistent file: "%s"') % file_)
1062 slowpath = True
1062 slowpath = True
1063 break
1063 break
1064 else:
1064 else:
1065 ui.warn(_('%s:%s copy source revision cannot be found!\n')
1065 ui.warn(_('%s:%s copy source revision cannot be found!\n')
1066 % (file_, short(node)))
1066 % (file_, short(node)))
1067 continue
1067 continue
1068 for rev, copied in filerevgen(filelog, node):
1068 for rev, copied in filerevgen(filelog, node):
1069 if rev <= maxrev:
1069 if rev <= maxrev:
1070 if rev < minrev:
1070 if rev < minrev:
1071 break
1071 break
1072 fncache.setdefault(rev, [])
1072 fncache.setdefault(rev, [])
1073 fncache[rev].append(file_)
1073 fncache[rev].append(file_)
1074 wanted[rev] = 1
1074 wanted.add(rev)
1075 if follow and copied:
1075 if follow and copied:
1076 copies.append(copied)
1076 copies.append(copied)
1077 if slowpath:
1077 if slowpath:
1078 if follow:
1078 if follow:
1079 raise util.Abort(_('can only follow copies/renames for explicit '
1079 raise util.Abort(_('can only follow copies/renames for explicit '
1080 'file names'))
1080 'file names'))
1081
1081
1082 # The slow path checks files modified in every changeset.
1082 # The slow path checks files modified in every changeset.
1083 def changerevgen():
1083 def changerevgen():
1084 for i, window in increasing_windows(len(repo) - 1, nullrev):
1084 for i, window in increasing_windows(len(repo) - 1, nullrev):
1085 for j in xrange(i - window, i + 1):
1085 for j in xrange(i - window, i + 1):
1086 yield j, change(j)[3]
1086 yield j, change(j)[3]
1087
1087
1088 for rev, changefiles in changerevgen():
1088 for rev, changefiles in changerevgen():
1089 matches = filter(m, changefiles)
1089 matches = filter(m, changefiles)
1090 if matches:
1090 if matches:
1091 fncache[rev] = matches
1091 fncache[rev] = matches
1092 wanted[rev] = 1
1092 wanted.add(rev)
1093
1093
1094 class followfilter:
1094 class followfilter:
1095 def __init__(self, onlyfirst=False):
1095 def __init__(self, onlyfirst=False):
1096 self.startrev = nullrev
1096 self.startrev = nullrev
1097 self.roots = []
1097 self.roots = []
1098 self.onlyfirst = onlyfirst
1098 self.onlyfirst = onlyfirst
1099
1099
1100 def match(self, rev):
1100 def match(self, rev):
1101 def realparents(rev):
1101 def realparents(rev):
1102 if self.onlyfirst:
1102 if self.onlyfirst:
1103 return repo.changelog.parentrevs(rev)[0:1]
1103 return repo.changelog.parentrevs(rev)[0:1]
1104 else:
1104 else:
1105 return filter(lambda x: x != nullrev,
1105 return filter(lambda x: x != nullrev,
1106 repo.changelog.parentrevs(rev))
1106 repo.changelog.parentrevs(rev))
1107
1107
1108 if self.startrev == nullrev:
1108 if self.startrev == nullrev:
1109 self.startrev = rev
1109 self.startrev = rev
1110 return True
1110 return True
1111
1111
1112 if rev > self.startrev:
1112 if rev > self.startrev:
1113 # forward: all descendants
1113 # forward: all descendants
1114 if not self.roots:
1114 if not self.roots:
1115 self.roots.append(self.startrev)
1115 self.roots.append(self.startrev)
1116 for parent in realparents(rev):
1116 for parent in realparents(rev):
1117 if parent in self.roots:
1117 if parent in self.roots:
1118 self.roots.append(rev)
1118 self.roots.append(rev)
1119 return True
1119 return True
1120 else:
1120 else:
1121 # backwards: all parents
1121 # backwards: all parents
1122 if not self.roots:
1122 if not self.roots:
1123 self.roots.extend(realparents(self.startrev))
1123 self.roots.extend(realparents(self.startrev))
1124 if rev in self.roots:
1124 if rev in self.roots:
1125 self.roots.remove(rev)
1125 self.roots.remove(rev)
1126 self.roots.extend(realparents(rev))
1126 self.roots.extend(realparents(rev))
1127 return True
1127 return True
1128
1128
1129 return False
1129 return False
1130
1130
1131 # it might be worthwhile to do this in the iterator if the rev range
1131 # it might be worthwhile to do this in the iterator if the rev range
1132 # is descending and the prune args are all within that range
1132 # is descending and the prune args are all within that range
1133 for rev in opts.get('prune', ()):
1133 for rev in opts.get('prune', ()):
1134 rev = repo.changelog.rev(repo.lookup(rev))
1134 rev = repo.changelog.rev(repo.lookup(rev))
1135 ff = followfilter()
1135 ff = followfilter()
1136 stop = min(revs[0], revs[-1])
1136 stop = min(revs[0], revs[-1])
1137 for x in xrange(rev, stop-1, -1):
1137 for x in xrange(rev, stop-1, -1):
1138 if ff.match(x) and x in wanted:
1138 if ff.match(x):
1139 del wanted[x]
1139 wanted.discard(x)
1140
1140
1141 def iterate():
1141 def iterate():
1142 if follow and not m.files():
1142 if follow and not m.files():
1143 ff = followfilter(onlyfirst=opts.get('follow_first'))
1143 ff = followfilter(onlyfirst=opts.get('follow_first'))
1144 def want(rev):
1144 def want(rev):
1145 return ff.match(rev) and rev in wanted
1145 return ff.match(rev) and rev in wanted
1146 else:
1146 else:
1147 def want(rev):
1147 def want(rev):
1148 return rev in wanted
1148 return rev in wanted
1149
1149
1150 for i, window in increasing_windows(0, len(revs)):
1150 for i, window in increasing_windows(0, len(revs)):
1151 yield 'window', revs[0] < revs[-1], revs[-1]
1151 yield 'window', revs[0] < revs[-1], revs[-1]
1152 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1152 nrevs = [rev for rev in revs[i:i+window] if want(rev)]
1153 for rev in util.sort(list(nrevs)):
1153 for rev in util.sort(list(nrevs)):
1154 fns = fncache.get(rev)
1154 fns = fncache.get(rev)
1155 if not fns:
1155 if not fns:
1156 def fns_generator():
1156 def fns_generator():
1157 for f in change(rev)[3]:
1157 for f in change(rev)[3]:
1158 if m(f):
1158 if m(f):
1159 yield f
1159 yield f
1160 fns = fns_generator()
1160 fns = fns_generator()
1161 yield 'add', rev, fns
1161 yield 'add', rev, fns
1162 for rev in nrevs:
1162 for rev in nrevs:
1163 yield 'iter', rev, None
1163 yield 'iter', rev, None
1164 return iterate(), m
1164 return iterate(), m
1165
1165
1166 def commit(ui, repo, commitfunc, pats, opts):
1166 def commit(ui, repo, commitfunc, pats, opts):
1167 '''commit the specified files or all outstanding changes'''
1167 '''commit the specified files or all outstanding changes'''
1168 date = opts.get('date')
1168 date = opts.get('date')
1169 if date:
1169 if date:
1170 opts['date'] = util.parsedate(date)
1170 opts['date'] = util.parsedate(date)
1171 message = logmessage(opts)
1171 message = logmessage(opts)
1172
1172
1173 # extract addremove carefully -- this function can be called from a command
1173 # extract addremove carefully -- this function can be called from a command
1174 # that doesn't support addremove
1174 # that doesn't support addremove
1175 if opts.get('addremove'):
1175 if opts.get('addremove'):
1176 addremove(repo, pats, opts)
1176 addremove(repo, pats, opts)
1177
1177
1178 m = match(repo, pats, opts)
1178 m = match(repo, pats, opts)
1179 if pats:
1179 if pats:
1180 modified, added, removed = repo.status(match=m)[:3]
1180 modified, added, removed = repo.status(match=m)[:3]
1181 files = util.sort(modified + added + removed)
1181 files = util.sort(modified + added + removed)
1182
1182
1183 def is_dir(f):
1183 def is_dir(f):
1184 name = f + '/'
1184 name = f + '/'
1185 i = bisect.bisect(files, name)
1185 i = bisect.bisect(files, name)
1186 return i < len(files) and files[i].startswith(name)
1186 return i < len(files) and files[i].startswith(name)
1187
1187
1188 for f in m.files():
1188 for f in m.files():
1189 if f == '.':
1189 if f == '.':
1190 continue
1190 continue
1191 if f not in files:
1191 if f not in files:
1192 rf = repo.wjoin(f)
1192 rf = repo.wjoin(f)
1193 rel = repo.pathto(f)
1193 rel = repo.pathto(f)
1194 try:
1194 try:
1195 mode = os.lstat(rf)[stat.ST_MODE]
1195 mode = os.lstat(rf)[stat.ST_MODE]
1196 except OSError:
1196 except OSError:
1197 if is_dir(f): # deleted directory ?
1197 if is_dir(f): # deleted directory ?
1198 continue
1198 continue
1199 raise util.Abort(_("file %s not found!") % rel)
1199 raise util.Abort(_("file %s not found!") % rel)
1200 if stat.S_ISDIR(mode):
1200 if stat.S_ISDIR(mode):
1201 if not is_dir(f):
1201 if not is_dir(f):
1202 raise util.Abort(_("no match under directory %s!")
1202 raise util.Abort(_("no match under directory %s!")
1203 % rel)
1203 % rel)
1204 elif not (stat.S_ISREG(mode) or stat.S_ISLNK(mode)):
1204 elif not (stat.S_ISREG(mode) or stat.S_ISLNK(mode)):
1205 raise util.Abort(_("can't commit %s: "
1205 raise util.Abort(_("can't commit %s: "
1206 "unsupported file type!") % rel)
1206 "unsupported file type!") % rel)
1207 elif f not in repo.dirstate:
1207 elif f not in repo.dirstate:
1208 raise util.Abort(_("file %s not tracked!") % rel)
1208 raise util.Abort(_("file %s not tracked!") % rel)
1209 m = matchfiles(repo, files)
1209 m = matchfiles(repo, files)
1210 try:
1210 try:
1211 return commitfunc(ui, repo, message, m, opts)
1211 return commitfunc(ui, repo, message, m, opts)
1212 except ValueError, inst:
1212 except ValueError, inst:
1213 raise util.Abort(str(inst))
1213 raise util.Abort(str(inst))
@@ -1,3469 +1,3469 b''
1 # commands.py - command processing for mercurial
1 # commands.py - command processing for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import hex, nullid, nullrev, short
8 from node import hex, nullid, nullrev, short
9 from lock import release
9 from lock import release
10 from i18n import _, gettext
10 from i18n import _, gettext
11 import os, re, sys, textwrap
11 import os, re, sys, textwrap
12 import hg, util, revlog, bundlerepo, extensions, copies, context, error
12 import hg, util, revlog, bundlerepo, extensions, copies, context, error
13 import difflib, patch, time, help, mdiff, tempfile, url, encoding
13 import difflib, patch, time, help, mdiff, tempfile, url, encoding
14 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
14 import archival, changegroup, cmdutil, hgweb.server, sshserver, hbisect
15 import merge as merge_
15 import merge as merge_
16
16
17 # Commands start here, listed alphabetically
17 # Commands start here, listed alphabetically
18
18
19 def add(ui, repo, *pats, **opts):
19 def add(ui, repo, *pats, **opts):
20 """add the specified files on the next commit
20 """add the specified files on the next commit
21
21
22 Schedule files to be version controlled and added to the
22 Schedule files to be version controlled and added to the
23 repository.
23 repository.
24
24
25 The files will be added to the repository at the next commit. To
25 The files will be added to the repository at the next commit. To
26 undo an add before that, see hg revert.
26 undo an add before that, see hg revert.
27
27
28 If no names are given, add all files to the repository.
28 If no names are given, add all files to the repository.
29 """
29 """
30
30
31 rejected = None
31 rejected = None
32 exacts = {}
32 exacts = {}
33 names = []
33 names = []
34 m = cmdutil.match(repo, pats, opts)
34 m = cmdutil.match(repo, pats, opts)
35 m.bad = lambda x,y: True
35 m.bad = lambda x,y: True
36 for abs in repo.walk(m):
36 for abs in repo.walk(m):
37 if m.exact(abs):
37 if m.exact(abs):
38 if ui.verbose:
38 if ui.verbose:
39 ui.status(_('adding %s\n') % m.rel(abs))
39 ui.status(_('adding %s\n') % m.rel(abs))
40 names.append(abs)
40 names.append(abs)
41 exacts[abs] = 1
41 exacts[abs] = 1
42 elif abs not in repo.dirstate:
42 elif abs not in repo.dirstate:
43 ui.status(_('adding %s\n') % m.rel(abs))
43 ui.status(_('adding %s\n') % m.rel(abs))
44 names.append(abs)
44 names.append(abs)
45 if not opts.get('dry_run'):
45 if not opts.get('dry_run'):
46 rejected = repo.add(names)
46 rejected = repo.add(names)
47 rejected = [p for p in rejected if p in exacts]
47 rejected = [p for p in rejected if p in exacts]
48 return rejected and 1 or 0
48 return rejected and 1 or 0
49
49
50 def addremove(ui, repo, *pats, **opts):
50 def addremove(ui, repo, *pats, **opts):
51 """add all new files, delete all missing files
51 """add all new files, delete all missing files
52
52
53 Add all new files and remove all missing files from the
53 Add all new files and remove all missing files from the
54 repository.
54 repository.
55
55
56 New files are ignored if they match any of the patterns in
56 New files are ignored if they match any of the patterns in
57 .hgignore. As with add, these changes take effect at the next
57 .hgignore. As with add, these changes take effect at the next
58 commit.
58 commit.
59
59
60 Use the -s/--similarity option to detect renamed files. With a
60 Use the -s/--similarity option to detect renamed files. With a
61 parameter > 0, this compares every removed file with every added
61 parameter > 0, this compares every removed file with every added
62 file and records those similar enough as renames. This option
62 file and records those similar enough as renames. This option
63 takes a percentage between 0 (disabled) and 100 (files must be
63 takes a percentage between 0 (disabled) and 100 (files must be
64 identical) as its parameter. Detecting renamed files this way can
64 identical) as its parameter. Detecting renamed files this way can
65 be expensive.
65 be expensive.
66 """
66 """
67 try:
67 try:
68 sim = float(opts.get('similarity') or 0)
68 sim = float(opts.get('similarity') or 0)
69 except ValueError:
69 except ValueError:
70 raise util.Abort(_('similarity must be a number'))
70 raise util.Abort(_('similarity must be a number'))
71 if sim < 0 or sim > 100:
71 if sim < 0 or sim > 100:
72 raise util.Abort(_('similarity must be between 0 and 100'))
72 raise util.Abort(_('similarity must be between 0 and 100'))
73 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
73 return cmdutil.addremove(repo, pats, opts, similarity=sim/100.)
74
74
75 def annotate(ui, repo, *pats, **opts):
75 def annotate(ui, repo, *pats, **opts):
76 """show changeset information per file line
76 """show changeset information per file line
77
77
78 List changes in files, showing the revision id responsible for
78 List changes in files, showing the revision id responsible for
79 each line
79 each line
80
80
81 This command is useful to discover who did a change or when a
81 This command is useful to discover who did a change or when a
82 change took place.
82 change took place.
83
83
84 Without the -a/--text option, annotate will avoid processing files
84 Without the -a/--text option, annotate will avoid processing files
85 it detects as binary. With -a, annotate will generate an
85 it detects as binary. With -a, annotate will generate an
86 annotation anyway, probably with undesirable results.
86 annotation anyway, probably with undesirable results.
87 """
87 """
88 datefunc = ui.quiet and util.shortdate or util.datestr
88 datefunc = ui.quiet and util.shortdate or util.datestr
89 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
89 getdate = util.cachefunc(lambda x: datefunc(x[0].date()))
90
90
91 if not pats:
91 if not pats:
92 raise util.Abort(_('at least one file name or pattern required'))
92 raise util.Abort(_('at least one file name or pattern required'))
93
93
94 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
94 opmap = [('user', lambda x: ui.shortuser(x[0].user())),
95 ('number', lambda x: str(x[0].rev())),
95 ('number', lambda x: str(x[0].rev())),
96 ('changeset', lambda x: short(x[0].node())),
96 ('changeset', lambda x: short(x[0].node())),
97 ('date', getdate),
97 ('date', getdate),
98 ('follow', lambda x: x[0].path()),
98 ('follow', lambda x: x[0].path()),
99 ]
99 ]
100
100
101 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
101 if (not opts.get('user') and not opts.get('changeset') and not opts.get('date')
102 and not opts.get('follow')):
102 and not opts.get('follow')):
103 opts['number'] = 1
103 opts['number'] = 1
104
104
105 linenumber = opts.get('line_number') is not None
105 linenumber = opts.get('line_number') is not None
106 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
106 if (linenumber and (not opts.get('changeset')) and (not opts.get('number'))):
107 raise util.Abort(_('at least one of -n/-c is required for -l'))
107 raise util.Abort(_('at least one of -n/-c is required for -l'))
108
108
109 funcmap = [func for op, func in opmap if opts.get(op)]
109 funcmap = [func for op, func in opmap if opts.get(op)]
110 if linenumber:
110 if linenumber:
111 lastfunc = funcmap[-1]
111 lastfunc = funcmap[-1]
112 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
112 funcmap[-1] = lambda x: "%s:%s" % (lastfunc(x), x[1])
113
113
114 ctx = repo[opts.get('rev')]
114 ctx = repo[opts.get('rev')]
115
115
116 m = cmdutil.match(repo, pats, opts)
116 m = cmdutil.match(repo, pats, opts)
117 for abs in ctx.walk(m):
117 for abs in ctx.walk(m):
118 fctx = ctx[abs]
118 fctx = ctx[abs]
119 if not opts.get('text') and util.binary(fctx.data()):
119 if not opts.get('text') and util.binary(fctx.data()):
120 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
120 ui.write(_("%s: binary file\n") % ((pats and m.rel(abs)) or abs))
121 continue
121 continue
122
122
123 lines = fctx.annotate(follow=opts.get('follow'),
123 lines = fctx.annotate(follow=opts.get('follow'),
124 linenumber=linenumber)
124 linenumber=linenumber)
125 pieces = []
125 pieces = []
126
126
127 for f in funcmap:
127 for f in funcmap:
128 l = [f(n) for n, dummy in lines]
128 l = [f(n) for n, dummy in lines]
129 if l:
129 if l:
130 ml = max(map(len, l))
130 ml = max(map(len, l))
131 pieces.append(["%*s" % (ml, x) for x in l])
131 pieces.append(["%*s" % (ml, x) for x in l])
132
132
133 if pieces:
133 if pieces:
134 for p, l in zip(zip(*pieces), lines):
134 for p, l in zip(zip(*pieces), lines):
135 ui.write("%s: %s" % (" ".join(p), l[1]))
135 ui.write("%s: %s" % (" ".join(p), l[1]))
136
136
137 def archive(ui, repo, dest, **opts):
137 def archive(ui, repo, dest, **opts):
138 '''create unversioned archive of a repository revision
138 '''create unversioned archive of a repository revision
139
139
140 By default, the revision used is the parent of the working
140 By default, the revision used is the parent of the working
141 directory; use -r/--rev to specify a different revision.
141 directory; use -r/--rev to specify a different revision.
142
142
143 To specify the type of archive to create, use -t/--type. Valid
143 To specify the type of archive to create, use -t/--type. Valid
144 types are:
144 types are:
145
145
146 "files" (default): a directory full of files
146 "files" (default): a directory full of files
147 "tar": tar archive, uncompressed
147 "tar": tar archive, uncompressed
148 "tbz2": tar archive, compressed using bzip2
148 "tbz2": tar archive, compressed using bzip2
149 "tgz": tar archive, compressed using gzip
149 "tgz": tar archive, compressed using gzip
150 "uzip": zip archive, uncompressed
150 "uzip": zip archive, uncompressed
151 "zip": zip archive, compressed using deflate
151 "zip": zip archive, compressed using deflate
152
152
153 The exact name of the destination archive or directory is given
153 The exact name of the destination archive or directory is given
154 using a format string; see 'hg help export' for details.
154 using a format string; see 'hg help export' for details.
155
155
156 Each member added to an archive file has a directory prefix
156 Each member added to an archive file has a directory prefix
157 prepended. Use -p/--prefix to specify a format string for the
157 prepended. Use -p/--prefix to specify a format string for the
158 prefix. The default is the basename of the archive, with suffixes
158 prefix. The default is the basename of the archive, with suffixes
159 removed.
159 removed.
160 '''
160 '''
161
161
162 ctx = repo[opts.get('rev')]
162 ctx = repo[opts.get('rev')]
163 if not ctx:
163 if not ctx:
164 raise util.Abort(_('no working directory: please specify a revision'))
164 raise util.Abort(_('no working directory: please specify a revision'))
165 node = ctx.node()
165 node = ctx.node()
166 dest = cmdutil.make_filename(repo, dest, node)
166 dest = cmdutil.make_filename(repo, dest, node)
167 if os.path.realpath(dest) == repo.root:
167 if os.path.realpath(dest) == repo.root:
168 raise util.Abort(_('repository root cannot be destination'))
168 raise util.Abort(_('repository root cannot be destination'))
169 matchfn = cmdutil.match(repo, [], opts)
169 matchfn = cmdutil.match(repo, [], opts)
170 kind = opts.get('type') or 'files'
170 kind = opts.get('type') or 'files'
171 prefix = opts.get('prefix')
171 prefix = opts.get('prefix')
172 if dest == '-':
172 if dest == '-':
173 if kind == 'files':
173 if kind == 'files':
174 raise util.Abort(_('cannot archive plain files to stdout'))
174 raise util.Abort(_('cannot archive plain files to stdout'))
175 dest = sys.stdout
175 dest = sys.stdout
176 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
176 if not prefix: prefix = os.path.basename(repo.root) + '-%h'
177 prefix = cmdutil.make_filename(repo, prefix, node)
177 prefix = cmdutil.make_filename(repo, prefix, node)
178 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
178 archival.archive(repo, dest, node, kind, not opts.get('no_decode'),
179 matchfn, prefix)
179 matchfn, prefix)
180
180
181 def backout(ui, repo, node=None, rev=None, **opts):
181 def backout(ui, repo, node=None, rev=None, **opts):
182 '''reverse effect of earlier changeset
182 '''reverse effect of earlier changeset
183
183
184 Commit the backed out changes as a new changeset. The new
184 Commit the backed out changes as a new changeset. The new
185 changeset is a child of the backed out changeset.
185 changeset is a child of the backed out changeset.
186
186
187 If you back out a changeset other than the tip, a new head is
187 If you back out a changeset other than the tip, a new head is
188 created. This head will be the new tip and you should merge this
188 created. This head will be the new tip and you should merge this
189 backout changeset with another head (current one by default).
189 backout changeset with another head (current one by default).
190
190
191 The --merge option remembers the parent of the working directory
191 The --merge option remembers the parent of the working directory
192 before starting the backout, then merges the new head with that
192 before starting the backout, then merges the new head with that
193 changeset afterwards. This saves you from doing the merge by hand.
193 changeset afterwards. This saves you from doing the merge by hand.
194 The result of this merge is not committed, as with a normal merge.
194 The result of this merge is not committed, as with a normal merge.
195
195
196 See \'hg help dates\' for a list of formats valid for -d/--date.
196 See \'hg help dates\' for a list of formats valid for -d/--date.
197 '''
197 '''
198 if rev and node:
198 if rev and node:
199 raise util.Abort(_("please specify just one revision"))
199 raise util.Abort(_("please specify just one revision"))
200
200
201 if not rev:
201 if not rev:
202 rev = node
202 rev = node
203
203
204 if not rev:
204 if not rev:
205 raise util.Abort(_("please specify a revision to backout"))
205 raise util.Abort(_("please specify a revision to backout"))
206
206
207 date = opts.get('date')
207 date = opts.get('date')
208 if date:
208 if date:
209 opts['date'] = util.parsedate(date)
209 opts['date'] = util.parsedate(date)
210
210
211 cmdutil.bail_if_changed(repo)
211 cmdutil.bail_if_changed(repo)
212 node = repo.lookup(rev)
212 node = repo.lookup(rev)
213
213
214 op1, op2 = repo.dirstate.parents()
214 op1, op2 = repo.dirstate.parents()
215 a = repo.changelog.ancestor(op1, node)
215 a = repo.changelog.ancestor(op1, node)
216 if a != node:
216 if a != node:
217 raise util.Abort(_('cannot back out change on a different branch'))
217 raise util.Abort(_('cannot back out change on a different branch'))
218
218
219 p1, p2 = repo.changelog.parents(node)
219 p1, p2 = repo.changelog.parents(node)
220 if p1 == nullid:
220 if p1 == nullid:
221 raise util.Abort(_('cannot back out a change with no parents'))
221 raise util.Abort(_('cannot back out a change with no parents'))
222 if p2 != nullid:
222 if p2 != nullid:
223 if not opts.get('parent'):
223 if not opts.get('parent'):
224 raise util.Abort(_('cannot back out a merge changeset without '
224 raise util.Abort(_('cannot back out a merge changeset without '
225 '--parent'))
225 '--parent'))
226 p = repo.lookup(opts['parent'])
226 p = repo.lookup(opts['parent'])
227 if p not in (p1, p2):
227 if p not in (p1, p2):
228 raise util.Abort(_('%s is not a parent of %s') %
228 raise util.Abort(_('%s is not a parent of %s') %
229 (short(p), short(node)))
229 (short(p), short(node)))
230 parent = p
230 parent = p
231 else:
231 else:
232 if opts.get('parent'):
232 if opts.get('parent'):
233 raise util.Abort(_('cannot use --parent on non-merge changeset'))
233 raise util.Abort(_('cannot use --parent on non-merge changeset'))
234 parent = p1
234 parent = p1
235
235
236 # the backout should appear on the same branch
236 # the backout should appear on the same branch
237 branch = repo.dirstate.branch()
237 branch = repo.dirstate.branch()
238 hg.clean(repo, node, show_stats=False)
238 hg.clean(repo, node, show_stats=False)
239 repo.dirstate.setbranch(branch)
239 repo.dirstate.setbranch(branch)
240 revert_opts = opts.copy()
240 revert_opts = opts.copy()
241 revert_opts['date'] = None
241 revert_opts['date'] = None
242 revert_opts['all'] = True
242 revert_opts['all'] = True
243 revert_opts['rev'] = hex(parent)
243 revert_opts['rev'] = hex(parent)
244 revert_opts['no_backup'] = None
244 revert_opts['no_backup'] = None
245 revert(ui, repo, **revert_opts)
245 revert(ui, repo, **revert_opts)
246 commit_opts = opts.copy()
246 commit_opts = opts.copy()
247 commit_opts['addremove'] = False
247 commit_opts['addremove'] = False
248 if not commit_opts['message'] and not commit_opts['logfile']:
248 if not commit_opts['message'] and not commit_opts['logfile']:
249 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
249 commit_opts['message'] = _("Backed out changeset %s") % (short(node))
250 commit_opts['force_editor'] = True
250 commit_opts['force_editor'] = True
251 commit(ui, repo, **commit_opts)
251 commit(ui, repo, **commit_opts)
252 def nice(node):
252 def nice(node):
253 return '%d:%s' % (repo.changelog.rev(node), short(node))
253 return '%d:%s' % (repo.changelog.rev(node), short(node))
254 ui.status(_('changeset %s backs out changeset %s\n') %
254 ui.status(_('changeset %s backs out changeset %s\n') %
255 (nice(repo.changelog.tip()), nice(node)))
255 (nice(repo.changelog.tip()), nice(node)))
256 if op1 != node:
256 if op1 != node:
257 hg.clean(repo, op1, show_stats=False)
257 hg.clean(repo, op1, show_stats=False)
258 if opts.get('merge'):
258 if opts.get('merge'):
259 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
259 ui.status(_('merging with changeset %s\n') % nice(repo.changelog.tip()))
260 hg.merge(repo, hex(repo.changelog.tip()))
260 hg.merge(repo, hex(repo.changelog.tip()))
261 else:
261 else:
262 ui.status(_('the backout changeset is a new head - '
262 ui.status(_('the backout changeset is a new head - '
263 'do not forget to merge\n'))
263 'do not forget to merge\n'))
264 ui.status(_('(use "backout --merge" '
264 ui.status(_('(use "backout --merge" '
265 'if you want to auto-merge)\n'))
265 'if you want to auto-merge)\n'))
266
266
267 def bisect(ui, repo, rev=None, extra=None, command=None,
267 def bisect(ui, repo, rev=None, extra=None, command=None,
268 reset=None, good=None, bad=None, skip=None, noupdate=None):
268 reset=None, good=None, bad=None, skip=None, noupdate=None):
269 """subdivision search of changesets
269 """subdivision search of changesets
270
270
271 This command helps to find changesets which introduce problems. To
271 This command helps to find changesets which introduce problems. To
272 use, mark the earliest changeset you know exhibits the problem as
272 use, mark the earliest changeset you know exhibits the problem as
273 bad, then mark the latest changeset which is free from the problem
273 bad, then mark the latest changeset which is free from the problem
274 as good. Bisect will update your working directory to a revision
274 as good. Bisect will update your working directory to a revision
275 for testing (unless the -U/--noupdate option is specified). Once
275 for testing (unless the -U/--noupdate option is specified). Once
276 you have performed tests, mark the working directory as bad or
276 you have performed tests, mark the working directory as bad or
277 good and bisect will either update to another candidate changeset
277 good and bisect will either update to another candidate changeset
278 or announce that it has found the bad revision.
278 or announce that it has found the bad revision.
279
279
280 As a shortcut, you can also use the revision argument to mark a
280 As a shortcut, you can also use the revision argument to mark a
281 revision as good or bad without checking it out first.
281 revision as good or bad without checking it out first.
282
282
283 If you supply a command it will be used for automatic bisection.
283 If you supply a command it will be used for automatic bisection.
284 Its exit status will be used as flag to mark revision as bad or
284 Its exit status will be used as flag to mark revision as bad or
285 good. In case exit status is 0 the revision is marked as good, 125
285 good. In case exit status is 0 the revision is marked as good, 125
286 - skipped, 127 (command not found) - bisection will be aborted;
286 - skipped, 127 (command not found) - bisection will be aborted;
287 any other status bigger than 0 will mark revision as bad.
287 any other status bigger than 0 will mark revision as bad.
288 """
288 """
289 def print_result(nodes, good):
289 def print_result(nodes, good):
290 displayer = cmdutil.show_changeset(ui, repo, {})
290 displayer = cmdutil.show_changeset(ui, repo, {})
291 if len(nodes) == 1:
291 if len(nodes) == 1:
292 # narrowed it down to a single revision
292 # narrowed it down to a single revision
293 if good:
293 if good:
294 ui.write(_("The first good revision is:\n"))
294 ui.write(_("The first good revision is:\n"))
295 else:
295 else:
296 ui.write(_("The first bad revision is:\n"))
296 ui.write(_("The first bad revision is:\n"))
297 displayer.show(repo[nodes[0]])
297 displayer.show(repo[nodes[0]])
298 else:
298 else:
299 # multiple possible revisions
299 # multiple possible revisions
300 if good:
300 if good:
301 ui.write(_("Due to skipped revisions, the first "
301 ui.write(_("Due to skipped revisions, the first "
302 "good revision could be any of:\n"))
302 "good revision could be any of:\n"))
303 else:
303 else:
304 ui.write(_("Due to skipped revisions, the first "
304 ui.write(_("Due to skipped revisions, the first "
305 "bad revision could be any of:\n"))
305 "bad revision could be any of:\n"))
306 for n in nodes:
306 for n in nodes:
307 displayer.show(repo[n])
307 displayer.show(repo[n])
308
308
309 def check_state(state, interactive=True):
309 def check_state(state, interactive=True):
310 if not state['good'] or not state['bad']:
310 if not state['good'] or not state['bad']:
311 if (good or bad or skip or reset) and interactive:
311 if (good or bad or skip or reset) and interactive:
312 return
312 return
313 if not state['good']:
313 if not state['good']:
314 raise util.Abort(_('cannot bisect (no known good revisions)'))
314 raise util.Abort(_('cannot bisect (no known good revisions)'))
315 else:
315 else:
316 raise util.Abort(_('cannot bisect (no known bad revisions)'))
316 raise util.Abort(_('cannot bisect (no known bad revisions)'))
317 return True
317 return True
318
318
319 # backward compatibility
319 # backward compatibility
320 if rev in "good bad reset init".split():
320 if rev in "good bad reset init".split():
321 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
321 ui.warn(_("(use of 'hg bisect <cmd>' is deprecated)\n"))
322 cmd, rev, extra = rev, extra, None
322 cmd, rev, extra = rev, extra, None
323 if cmd == "good":
323 if cmd == "good":
324 good = True
324 good = True
325 elif cmd == "bad":
325 elif cmd == "bad":
326 bad = True
326 bad = True
327 else:
327 else:
328 reset = True
328 reset = True
329 elif extra or good + bad + skip + reset + bool(command) > 1:
329 elif extra or good + bad + skip + reset + bool(command) > 1:
330 raise util.Abort(_('incompatible arguments'))
330 raise util.Abort(_('incompatible arguments'))
331
331
332 if reset:
332 if reset:
333 p = repo.join("bisect.state")
333 p = repo.join("bisect.state")
334 if os.path.exists(p):
334 if os.path.exists(p):
335 os.unlink(p)
335 os.unlink(p)
336 return
336 return
337
337
338 state = hbisect.load_state(repo)
338 state = hbisect.load_state(repo)
339
339
340 if command:
340 if command:
341 commandpath = util.find_exe(command)
341 commandpath = util.find_exe(command)
342 changesets = 1
342 changesets = 1
343 try:
343 try:
344 while changesets:
344 while changesets:
345 # update state
345 # update state
346 status = os.spawnl(os.P_WAIT, commandpath, commandpath)
346 status = os.spawnl(os.P_WAIT, commandpath, commandpath)
347 if status == 125:
347 if status == 125:
348 transition = "skip"
348 transition = "skip"
349 elif status == 0:
349 elif status == 0:
350 transition = "good"
350 transition = "good"
351 # status < 0 means process was killed
351 # status < 0 means process was killed
352 elif status == 127:
352 elif status == 127:
353 raise util.Abort(_("failed to execute %s") % command)
353 raise util.Abort(_("failed to execute %s") % command)
354 elif status < 0:
354 elif status < 0:
355 raise util.Abort(_("%s killed") % command)
355 raise util.Abort(_("%s killed") % command)
356 else:
356 else:
357 transition = "bad"
357 transition = "bad"
358 node = repo.lookup(rev or '.')
358 node = repo.lookup(rev or '.')
359 state[transition].append(node)
359 state[transition].append(node)
360 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
360 ui.note(_('Changeset %s: %s\n') % (short(node), transition))
361 check_state(state, interactive=False)
361 check_state(state, interactive=False)
362 # bisect
362 # bisect
363 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
363 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
364 # update to next check
364 # update to next check
365 cmdutil.bail_if_changed(repo)
365 cmdutil.bail_if_changed(repo)
366 hg.clean(repo, nodes[0], show_stats=False)
366 hg.clean(repo, nodes[0], show_stats=False)
367 finally:
367 finally:
368 hbisect.save_state(repo, state)
368 hbisect.save_state(repo, state)
369 return print_result(nodes, not status)
369 return print_result(nodes, not status)
370
370
371 # update state
371 # update state
372 node = repo.lookup(rev or '.')
372 node = repo.lookup(rev or '.')
373 if good:
373 if good:
374 state['good'].append(node)
374 state['good'].append(node)
375 elif bad:
375 elif bad:
376 state['bad'].append(node)
376 state['bad'].append(node)
377 elif skip:
377 elif skip:
378 state['skip'].append(node)
378 state['skip'].append(node)
379
379
380 hbisect.save_state(repo, state)
380 hbisect.save_state(repo, state)
381
381
382 if not check_state(state):
382 if not check_state(state):
383 return
383 return
384
384
385 # actually bisect
385 # actually bisect
386 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
386 nodes, changesets, good = hbisect.bisect(repo.changelog, state)
387 if changesets == 0:
387 if changesets == 0:
388 print_result(nodes, good)
388 print_result(nodes, good)
389 else:
389 else:
390 assert len(nodes) == 1 # only a single node can be tested next
390 assert len(nodes) == 1 # only a single node can be tested next
391 node = nodes[0]
391 node = nodes[0]
392 # compute the approximate number of remaining tests
392 # compute the approximate number of remaining tests
393 tests, size = 0, 2
393 tests, size = 0, 2
394 while size <= changesets:
394 while size <= changesets:
395 tests, size = tests + 1, size * 2
395 tests, size = tests + 1, size * 2
396 rev = repo.changelog.rev(node)
396 rev = repo.changelog.rev(node)
397 ui.write(_("Testing changeset %s:%s "
397 ui.write(_("Testing changeset %s:%s "
398 "(%s changesets remaining, ~%s tests)\n")
398 "(%s changesets remaining, ~%s tests)\n")
399 % (rev, short(node), changesets, tests))
399 % (rev, short(node), changesets, tests))
400 if not noupdate:
400 if not noupdate:
401 cmdutil.bail_if_changed(repo)
401 cmdutil.bail_if_changed(repo)
402 return hg.clean(repo, node)
402 return hg.clean(repo, node)
403
403
404 def branch(ui, repo, label=None, **opts):
404 def branch(ui, repo, label=None, **opts):
405 """set or show the current branch name
405 """set or show the current branch name
406
406
407 With no argument, show the current branch name. With one argument,
407 With no argument, show the current branch name. With one argument,
408 set the working directory branch name (the branch does not exist
408 set the working directory branch name (the branch does not exist
409 in the repository until the next commit). It is recommended to use
409 in the repository until the next commit). It is recommended to use
410 the 'default' branch as your primary development branch.
410 the 'default' branch as your primary development branch.
411
411
412 Unless -f/--force is specified, branch will not let you set a
412 Unless -f/--force is specified, branch will not let you set a
413 branch name that shadows an existing branch.
413 branch name that shadows an existing branch.
414
414
415 Use -C/--clean to reset the working directory branch to that of
415 Use -C/--clean to reset the working directory branch to that of
416 the parent of the working directory, negating a previous branch
416 the parent of the working directory, negating a previous branch
417 change.
417 change.
418
418
419 Use the command 'hg update' to switch to an existing branch.
419 Use the command 'hg update' to switch to an existing branch.
420 """
420 """
421
421
422 if opts.get('clean'):
422 if opts.get('clean'):
423 label = repo[None].parents()[0].branch()
423 label = repo[None].parents()[0].branch()
424 repo.dirstate.setbranch(label)
424 repo.dirstate.setbranch(label)
425 ui.status(_('reset working directory to branch %s\n') % label)
425 ui.status(_('reset working directory to branch %s\n') % label)
426 elif label:
426 elif label:
427 if not opts.get('force') and label in repo.branchtags():
427 if not opts.get('force') and label in repo.branchtags():
428 if label not in [p.branch() for p in repo.parents()]:
428 if label not in [p.branch() for p in repo.parents()]:
429 raise util.Abort(_('a branch of the same name already exists'
429 raise util.Abort(_('a branch of the same name already exists'
430 ' (use --force to override)'))
430 ' (use --force to override)'))
431 repo.dirstate.setbranch(encoding.fromlocal(label))
431 repo.dirstate.setbranch(encoding.fromlocal(label))
432 ui.status(_('marked working directory as branch %s\n') % label)
432 ui.status(_('marked working directory as branch %s\n') % label)
433 else:
433 else:
434 ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
434 ui.write("%s\n" % encoding.tolocal(repo.dirstate.branch()))
435
435
436 def branches(ui, repo, active=False):
436 def branches(ui, repo, active=False):
437 """list repository named branches
437 """list repository named branches
438
438
439 List the repository's named branches, indicating which ones are
439 List the repository's named branches, indicating which ones are
440 inactive. If active is specified, only show active branches.
440 inactive. If active is specified, only show active branches.
441
441
442 A branch is considered active if it contains repository heads.
442 A branch is considered active if it contains repository heads.
443
443
444 Use the command 'hg update' to switch to an existing branch.
444 Use the command 'hg update' to switch to an existing branch.
445 """
445 """
446 hexfunc = ui.debugflag and hex or short
446 hexfunc = ui.debugflag and hex or short
447 activebranches = [encoding.tolocal(repo[n].branch())
447 activebranches = [encoding.tolocal(repo[n].branch())
448 for n in repo.heads(closed=False)]
448 for n in repo.heads(closed=False)]
449 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
449 branches = util.sort([(tag in activebranches, repo.changelog.rev(node), tag)
450 for tag, node in repo.branchtags().items()])
450 for tag, node in repo.branchtags().items()])
451 branches.reverse()
451 branches.reverse()
452
452
453 for isactive, node, tag in branches:
453 for isactive, node, tag in branches:
454 if (not active) or isactive:
454 if (not active) or isactive:
455 if ui.quiet:
455 if ui.quiet:
456 ui.write("%s\n" % tag)
456 ui.write("%s\n" % tag)
457 else:
457 else:
458 hn = repo.lookup(node)
458 hn = repo.lookup(node)
459 if isactive:
459 if isactive:
460 notice = ''
460 notice = ''
461 elif hn not in repo.branchheads(tag, closed=False):
461 elif hn not in repo.branchheads(tag, closed=False):
462 notice = ' (closed)'
462 notice = ' (closed)'
463 else:
463 else:
464 notice = ' (inactive)'
464 notice = ' (inactive)'
465 rev = str(node).rjust(31 - encoding.colwidth(tag))
465 rev = str(node).rjust(31 - encoding.colwidth(tag))
466 data = tag, rev, hexfunc(hn), notice
466 data = tag, rev, hexfunc(hn), notice
467 ui.write("%s %s:%s%s\n" % data)
467 ui.write("%s %s:%s%s\n" % data)
468
468
469 def bundle(ui, repo, fname, dest=None, **opts):
469 def bundle(ui, repo, fname, dest=None, **opts):
470 """create a changegroup file
470 """create a changegroup file
471
471
472 Generate a compressed changegroup file collecting changesets not
472 Generate a compressed changegroup file collecting changesets not
473 known to be in another repository.
473 known to be in another repository.
474
474
475 If no destination repository is specified the destination is
475 If no destination repository is specified the destination is
476 assumed to have all the nodes specified by one or more --base
476 assumed to have all the nodes specified by one or more --base
477 parameters. To create a bundle containing all changesets, use
477 parameters. To create a bundle containing all changesets, use
478 -a/--all (or --base null). To change the compression method
478 -a/--all (or --base null). To change the compression method
479 applied, use the -t/--type option (by default, bundles are
479 applied, use the -t/--type option (by default, bundles are
480 compressed using bz2).
480 compressed using bz2).
481
481
482 The bundle file can then be transferred using conventional means
482 The bundle file can then be transferred using conventional means
483 and applied to another repository with the unbundle or pull
483 and applied to another repository with the unbundle or pull
484 command. This is useful when direct push and pull are not
484 command. This is useful when direct push and pull are not
485 available or when exporting an entire repository is undesirable.
485 available or when exporting an entire repository is undesirable.
486
486
487 Applying bundles preserves all changeset contents including
487 Applying bundles preserves all changeset contents including
488 permissions, copy/rename information, and revision history.
488 permissions, copy/rename information, and revision history.
489 """
489 """
490 revs = opts.get('rev') or None
490 revs = opts.get('rev') or None
491 if revs:
491 if revs:
492 revs = [repo.lookup(rev) for rev in revs]
492 revs = [repo.lookup(rev) for rev in revs]
493 if opts.get('all'):
493 if opts.get('all'):
494 base = ['null']
494 base = ['null']
495 else:
495 else:
496 base = opts.get('base')
496 base = opts.get('base')
497 if base:
497 if base:
498 if dest:
498 if dest:
499 raise util.Abort(_("--base is incompatible with specifiying "
499 raise util.Abort(_("--base is incompatible with specifiying "
500 "a destination"))
500 "a destination"))
501 base = [repo.lookup(rev) for rev in base]
501 base = [repo.lookup(rev) for rev in base]
502 # create the right base
502 # create the right base
503 # XXX: nodesbetween / changegroup* should be "fixed" instead
503 # XXX: nodesbetween / changegroup* should be "fixed" instead
504 o = []
504 o = []
505 has = {nullid: None}
505 has = {nullid: None}
506 for n in base:
506 for n in base:
507 has.update(repo.changelog.reachable(n))
507 has.update(repo.changelog.reachable(n))
508 if revs:
508 if revs:
509 visit = list(revs)
509 visit = list(revs)
510 else:
510 else:
511 visit = repo.changelog.heads()
511 visit = repo.changelog.heads()
512 seen = {}
512 seen = {}
513 while visit:
513 while visit:
514 n = visit.pop(0)
514 n = visit.pop(0)
515 parents = [p for p in repo.changelog.parents(n) if p not in has]
515 parents = [p for p in repo.changelog.parents(n) if p not in has]
516 if len(parents) == 0:
516 if len(parents) == 0:
517 o.insert(0, n)
517 o.insert(0, n)
518 else:
518 else:
519 for p in parents:
519 for p in parents:
520 if p not in seen:
520 if p not in seen:
521 seen[p] = 1
521 seen[p] = 1
522 visit.append(p)
522 visit.append(p)
523 else:
523 else:
524 cmdutil.setremoteconfig(ui, opts)
524 cmdutil.setremoteconfig(ui, opts)
525 dest, revs, checkout = hg.parseurl(
525 dest, revs, checkout = hg.parseurl(
526 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
526 ui.expandpath(dest or 'default-push', dest or 'default'), revs)
527 other = hg.repository(ui, dest)
527 other = hg.repository(ui, dest)
528 o = repo.findoutgoing(other, force=opts.get('force'))
528 o = repo.findoutgoing(other, force=opts.get('force'))
529
529
530 if revs:
530 if revs:
531 cg = repo.changegroupsubset(o, revs, 'bundle')
531 cg = repo.changegroupsubset(o, revs, 'bundle')
532 else:
532 else:
533 cg = repo.changegroup(o, 'bundle')
533 cg = repo.changegroup(o, 'bundle')
534
534
535 bundletype = opts.get('type', 'bzip2').lower()
535 bundletype = opts.get('type', 'bzip2').lower()
536 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
536 btypes = {'none': 'HG10UN', 'bzip2': 'HG10BZ', 'gzip': 'HG10GZ'}
537 bundletype = btypes.get(bundletype)
537 bundletype = btypes.get(bundletype)
538 if bundletype not in changegroup.bundletypes:
538 if bundletype not in changegroup.bundletypes:
539 raise util.Abort(_('unknown bundle type specified with --type'))
539 raise util.Abort(_('unknown bundle type specified with --type'))
540
540
541 changegroup.writebundle(cg, fname, bundletype)
541 changegroup.writebundle(cg, fname, bundletype)
542
542
543 def cat(ui, repo, file1, *pats, **opts):
543 def cat(ui, repo, file1, *pats, **opts):
544 """output the current or given revision of files
544 """output the current or given revision of files
545
545
546 Print the specified files as they were at the given revision. If
546 Print the specified files as they were at the given revision. If
547 no revision is given, the parent of the working directory is used,
547 no revision is given, the parent of the working directory is used,
548 or tip if no revision is checked out.
548 or tip if no revision is checked out.
549
549
550 Output may be to a file, in which case the name of the file is
550 Output may be to a file, in which case the name of the file is
551 given using a format string. The formatting rules are the same as
551 given using a format string. The formatting rules are the same as
552 for the export command, with the following additions:
552 for the export command, with the following additions:
553
553
554 %s basename of file being printed
554 %s basename of file being printed
555 %d dirname of file being printed, or '.' if in repository root
555 %d dirname of file being printed, or '.' if in repository root
556 %p root-relative path name of file being printed
556 %p root-relative path name of file being printed
557 """
557 """
558 ctx = repo[opts.get('rev')]
558 ctx = repo[opts.get('rev')]
559 err = 1
559 err = 1
560 m = cmdutil.match(repo, (file1,) + pats, opts)
560 m = cmdutil.match(repo, (file1,) + pats, opts)
561 for abs in ctx.walk(m):
561 for abs in ctx.walk(m):
562 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
562 fp = cmdutil.make_file(repo, opts.get('output'), ctx.node(), pathname=abs)
563 data = ctx[abs].data()
563 data = ctx[abs].data()
564 if opts.get('decode'):
564 if opts.get('decode'):
565 data = repo.wwritedata(abs, data)
565 data = repo.wwritedata(abs, data)
566 fp.write(data)
566 fp.write(data)
567 err = 0
567 err = 0
568 return err
568 return err
569
569
570 def clone(ui, source, dest=None, **opts):
570 def clone(ui, source, dest=None, **opts):
571 """make a copy of an existing repository
571 """make a copy of an existing repository
572
572
573 Create a copy of an existing repository in a new directory.
573 Create a copy of an existing repository in a new directory.
574
574
575 If no destination directory name is specified, it defaults to the
575 If no destination directory name is specified, it defaults to the
576 basename of the source.
576 basename of the source.
577
577
578 The location of the source is added to the new repository's
578 The location of the source is added to the new repository's
579 .hg/hgrc file, as the default to be used for future pulls.
579 .hg/hgrc file, as the default to be used for future pulls.
580
580
581 If you use the -r/--rev option to clone up to a specific revision,
581 If you use the -r/--rev option to clone up to a specific revision,
582 no subsequent revisions (including subsequent tags) will be
582 no subsequent revisions (including subsequent tags) will be
583 present in the cloned repository. This option implies --pull, even
583 present in the cloned repository. This option implies --pull, even
584 on local repositories.
584 on local repositories.
585
585
586 By default, clone will check out the head of the 'default' branch.
586 By default, clone will check out the head of the 'default' branch.
587 If the -U/--noupdate option is used, the new clone will contain
587 If the -U/--noupdate option is used, the new clone will contain
588 only a repository (.hg) and no working copy (the working copy
588 only a repository (.hg) and no working copy (the working copy
589 parent is the null revision).
589 parent is the null revision).
590
590
591 See 'hg help urls' for valid source format details.
591 See 'hg help urls' for valid source format details.
592
592
593 It is possible to specify an ssh:// URL as the destination, but no
593 It is possible to specify an ssh:// URL as the destination, but no
594 .hg/hgrc and working directory will be created on the remote side.
594 .hg/hgrc and working directory will be created on the remote side.
595 Look at the help text for URLs for important details about ssh://
595 Look at the help text for URLs for important details about ssh://
596 URLs.
596 URLs.
597
597
598 For efficiency, hardlinks are used for cloning whenever the source
598 For efficiency, hardlinks are used for cloning whenever the source
599 and destination are on the same filesystem (note this applies only
599 and destination are on the same filesystem (note this applies only
600 to the repository data, not to the checked out files). Some
600 to the repository data, not to the checked out files). Some
601 filesystems, such as AFS, implement hardlinking incorrectly, but
601 filesystems, such as AFS, implement hardlinking incorrectly, but
602 do not report errors. In these cases, use the --pull option to
602 do not report errors. In these cases, use the --pull option to
603 avoid hardlinking.
603 avoid hardlinking.
604
604
605 In some cases, you can clone repositories and checked out files
605 In some cases, you can clone repositories and checked out files
606 using full hardlinks with
606 using full hardlinks with
607
607
608 $ cp -al REPO REPOCLONE
608 $ cp -al REPO REPOCLONE
609
609
610 This is the fastest way to clone, but it is not always safe. The
610 This is the fastest way to clone, but it is not always safe. The
611 operation is not atomic (making sure REPO is not modified during
611 operation is not atomic (making sure REPO is not modified during
612 the operation is up to you) and you have to make sure your editor
612 the operation is up to you) and you have to make sure your editor
613 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
613 breaks hardlinks (Emacs and most Linux Kernel tools do so). Also,
614 this is not compatible with certain extensions that place their
614 this is not compatible with certain extensions that place their
615 metadata under the .hg directory, such as mq.
615 metadata under the .hg directory, such as mq.
616
616
617 """
617 """
618 cmdutil.setremoteconfig(ui, opts)
618 cmdutil.setremoteconfig(ui, opts)
619 hg.clone(ui, source, dest,
619 hg.clone(ui, source, dest,
620 pull=opts.get('pull'),
620 pull=opts.get('pull'),
621 stream=opts.get('uncompressed'),
621 stream=opts.get('uncompressed'),
622 rev=opts.get('rev'),
622 rev=opts.get('rev'),
623 update=not opts.get('noupdate'))
623 update=not opts.get('noupdate'))
624
624
625 def commit(ui, repo, *pats, **opts):
625 def commit(ui, repo, *pats, **opts):
626 """commit the specified files or all outstanding changes
626 """commit the specified files or all outstanding changes
627
627
628 Commit changes to the given files into the repository. Unlike a
628 Commit changes to the given files into the repository. Unlike a
629 centralized RCS, this operation is a local operation. See hg push
629 centralized RCS, this operation is a local operation. See hg push
630 for means to actively distribute your changes.
630 for means to actively distribute your changes.
631
631
632 If a list of files is omitted, all changes reported by "hg status"
632 If a list of files is omitted, all changes reported by "hg status"
633 will be committed.
633 will be committed.
634
634
635 If you are committing the result of a merge, do not provide any
635 If you are committing the result of a merge, do not provide any
636 file names or -I/-X filters.
636 file names or -I/-X filters.
637
637
638 If no commit message is specified, the configured editor is
638 If no commit message is specified, the configured editor is
639 started to prompt you for a message.
639 started to prompt you for a message.
640
640
641 See 'hg help dates' for a list of formats valid for -d/--date.
641 See 'hg help dates' for a list of formats valid for -d/--date.
642 """
642 """
643 extra = {}
643 extra = {}
644 if opts.get('close_branch'):
644 if opts.get('close_branch'):
645 extra['close'] = 1
645 extra['close'] = 1
646 def commitfunc(ui, repo, message, match, opts):
646 def commitfunc(ui, repo, message, match, opts):
647 return repo.commit(match.files(), message, opts.get('user'),
647 return repo.commit(match.files(), message, opts.get('user'),
648 opts.get('date'), match, force_editor=opts.get('force_editor'),
648 opts.get('date'), match, force_editor=opts.get('force_editor'),
649 extra=extra)
649 extra=extra)
650
650
651 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
651 node = cmdutil.commit(ui, repo, commitfunc, pats, opts)
652 if not node:
652 if not node:
653 return
653 return
654 cl = repo.changelog
654 cl = repo.changelog
655 rev = cl.rev(node)
655 rev = cl.rev(node)
656 parents = cl.parentrevs(rev)
656 parents = cl.parentrevs(rev)
657 if rev - 1 in parents:
657 if rev - 1 in parents:
658 # one of the parents was the old tip
658 # one of the parents was the old tip
659 pass
659 pass
660 elif (parents == (nullrev, nullrev) or
660 elif (parents == (nullrev, nullrev) or
661 len(cl.heads(cl.node(parents[0]))) > 1 and
661 len(cl.heads(cl.node(parents[0]))) > 1 and
662 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
662 (parents[1] == nullrev or len(cl.heads(cl.node(parents[1]))) > 1)):
663 ui.status(_('created new head\n'))
663 ui.status(_('created new head\n'))
664
664
665 if ui.debugflag:
665 if ui.debugflag:
666 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
666 ui.write(_('committed changeset %d:%s\n') % (rev,hex(node)))
667 elif ui.verbose:
667 elif ui.verbose:
668 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
668 ui.write(_('committed changeset %d:%s\n') % (rev,short(node)))
669
669
670 def copy(ui, repo, *pats, **opts):
670 def copy(ui, repo, *pats, **opts):
671 """mark files as copied for the next commit
671 """mark files as copied for the next commit
672
672
673 Mark dest as having copies of source files. If dest is a
673 Mark dest as having copies of source files. If dest is a
674 directory, copies are put in that directory. If dest is a file,
674 directory, copies are put in that directory. If dest is a file,
675 the source must be a single file.
675 the source must be a single file.
676
676
677 By default, this command copies the contents of files as they
677 By default, this command copies the contents of files as they
678 stand in the working directory. If invoked with -A/--after, the
678 stand in the working directory. If invoked with -A/--after, the
679 operation is recorded, but no copying is performed.
679 operation is recorded, but no copying is performed.
680
680
681 This command takes effect with the next commit. To undo a copy
681 This command takes effect with the next commit. To undo a copy
682 before that, see hg revert.
682 before that, see hg revert.
683 """
683 """
684 wlock = repo.wlock(False)
684 wlock = repo.wlock(False)
685 try:
685 try:
686 return cmdutil.copy(ui, repo, pats, opts)
686 return cmdutil.copy(ui, repo, pats, opts)
687 finally:
687 finally:
688 wlock.release()
688 wlock.release()
689
689
690 def debugancestor(ui, repo, *args):
690 def debugancestor(ui, repo, *args):
691 """find the ancestor revision of two revisions in a given index"""
691 """find the ancestor revision of two revisions in a given index"""
692 if len(args) == 3:
692 if len(args) == 3:
693 index, rev1, rev2 = args
693 index, rev1, rev2 = args
694 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
694 r = revlog.revlog(util.opener(os.getcwd(), audit=False), index)
695 lookup = r.lookup
695 lookup = r.lookup
696 elif len(args) == 2:
696 elif len(args) == 2:
697 if not repo:
697 if not repo:
698 raise util.Abort(_("There is no Mercurial repository here "
698 raise util.Abort(_("There is no Mercurial repository here "
699 "(.hg not found)"))
699 "(.hg not found)"))
700 rev1, rev2 = args
700 rev1, rev2 = args
701 r = repo.changelog
701 r = repo.changelog
702 lookup = repo.lookup
702 lookup = repo.lookup
703 else:
703 else:
704 raise util.Abort(_('either two or three arguments required'))
704 raise util.Abort(_('either two or three arguments required'))
705 a = r.ancestor(lookup(rev1), lookup(rev2))
705 a = r.ancestor(lookup(rev1), lookup(rev2))
706 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
706 ui.write("%d:%s\n" % (r.rev(a), hex(a)))
707
707
708 def debugcommands(ui, cmd='', *args):
708 def debugcommands(ui, cmd='', *args):
709 for cmd, vals in util.sort(table.iteritems()):
709 for cmd, vals in util.sort(table.iteritems()):
710 cmd = cmd.split('|')[0].strip('^')
710 cmd = cmd.split('|')[0].strip('^')
711 opts = ', '.join([i[1] for i in vals[1]])
711 opts = ', '.join([i[1] for i in vals[1]])
712 ui.write('%s: %s\n' % (cmd, opts))
712 ui.write('%s: %s\n' % (cmd, opts))
713
713
714 def debugcomplete(ui, cmd='', **opts):
714 def debugcomplete(ui, cmd='', **opts):
715 """returns the completion list associated with the given command"""
715 """returns the completion list associated with the given command"""
716
716
717 if opts.get('options'):
717 if opts.get('options'):
718 options = []
718 options = []
719 otables = [globalopts]
719 otables = [globalopts]
720 if cmd:
720 if cmd:
721 aliases, entry = cmdutil.findcmd(cmd, table, False)
721 aliases, entry = cmdutil.findcmd(cmd, table, False)
722 otables.append(entry[1])
722 otables.append(entry[1])
723 for t in otables:
723 for t in otables:
724 for o in t:
724 for o in t:
725 if o[0]:
725 if o[0]:
726 options.append('-%s' % o[0])
726 options.append('-%s' % o[0])
727 options.append('--%s' % o[1])
727 options.append('--%s' % o[1])
728 ui.write("%s\n" % "\n".join(options))
728 ui.write("%s\n" % "\n".join(options))
729 return
729 return
730
730
731 cmdlist = cmdutil.findpossible(cmd, table)
731 cmdlist = cmdutil.findpossible(cmd, table)
732 if ui.verbose:
732 if ui.verbose:
733 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
733 cmdlist = [' '.join(c[0]) for c in cmdlist.values()]
734 ui.write("%s\n" % "\n".join(util.sort(cmdlist)))
734 ui.write("%s\n" % "\n".join(util.sort(cmdlist)))
735
735
736 def debugfsinfo(ui, path = "."):
736 def debugfsinfo(ui, path = "."):
737 file('.debugfsinfo', 'w').write('')
737 file('.debugfsinfo', 'w').write('')
738 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
738 ui.write('exec: %s\n' % (util.checkexec(path) and 'yes' or 'no'))
739 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
739 ui.write('symlink: %s\n' % (util.checklink(path) and 'yes' or 'no'))
740 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
740 ui.write('case-sensitive: %s\n' % (util.checkcase('.debugfsinfo')
741 and 'yes' or 'no'))
741 and 'yes' or 'no'))
742 os.unlink('.debugfsinfo')
742 os.unlink('.debugfsinfo')
743
743
744 def debugrebuildstate(ui, repo, rev="tip"):
744 def debugrebuildstate(ui, repo, rev="tip"):
745 """rebuild the dirstate as it would look like for the given revision"""
745 """rebuild the dirstate as it would look like for the given revision"""
746 ctx = repo[rev]
746 ctx = repo[rev]
747 wlock = repo.wlock()
747 wlock = repo.wlock()
748 try:
748 try:
749 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
749 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
750 finally:
750 finally:
751 wlock.release()
751 wlock.release()
752
752
753 def debugcheckstate(ui, repo):
753 def debugcheckstate(ui, repo):
754 """validate the correctness of the current dirstate"""
754 """validate the correctness of the current dirstate"""
755 parent1, parent2 = repo.dirstate.parents()
755 parent1, parent2 = repo.dirstate.parents()
756 m1 = repo[parent1].manifest()
756 m1 = repo[parent1].manifest()
757 m2 = repo[parent2].manifest()
757 m2 = repo[parent2].manifest()
758 errors = 0
758 errors = 0
759 for f in repo.dirstate:
759 for f in repo.dirstate:
760 state = repo.dirstate[f]
760 state = repo.dirstate[f]
761 if state in "nr" and f not in m1:
761 if state in "nr" and f not in m1:
762 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
762 ui.warn(_("%s in state %s, but not in manifest1\n") % (f, state))
763 errors += 1
763 errors += 1
764 if state in "a" and f in m1:
764 if state in "a" and f in m1:
765 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
765 ui.warn(_("%s in state %s, but also in manifest1\n") % (f, state))
766 errors += 1
766 errors += 1
767 if state in "m" and f not in m1 and f not in m2:
767 if state in "m" and f not in m1 and f not in m2:
768 ui.warn(_("%s in state %s, but not in either manifest\n") %
768 ui.warn(_("%s in state %s, but not in either manifest\n") %
769 (f, state))
769 (f, state))
770 errors += 1
770 errors += 1
771 for f in m1:
771 for f in m1:
772 state = repo.dirstate[f]
772 state = repo.dirstate[f]
773 if state not in "nrm":
773 if state not in "nrm":
774 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
774 ui.warn(_("%s in manifest1, but listed as state %s") % (f, state))
775 errors += 1
775 errors += 1
776 if errors:
776 if errors:
777 error = _(".hg/dirstate inconsistent with current parent's manifest")
777 error = _(".hg/dirstate inconsistent with current parent's manifest")
778 raise util.Abort(error)
778 raise util.Abort(error)
779
779
780 def showconfig(ui, repo, *values, **opts):
780 def showconfig(ui, repo, *values, **opts):
781 """show combined config settings from all hgrc files
781 """show combined config settings from all hgrc files
782
782
783 With no args, print names and values of all config items.
783 With no args, print names and values of all config items.
784
784
785 With one arg of the form section.name, print just the value of
785 With one arg of the form section.name, print just the value of
786 that config item.
786 that config item.
787
787
788 With multiple args, print names and values of all config items
788 With multiple args, print names and values of all config items
789 with matching section names."""
789 with matching section names."""
790
790
791 untrusted = bool(opts.get('untrusted'))
791 untrusted = bool(opts.get('untrusted'))
792 if values:
792 if values:
793 if len([v for v in values if '.' in v]) > 1:
793 if len([v for v in values if '.' in v]) > 1:
794 raise util.Abort(_('only one config item permitted'))
794 raise util.Abort(_('only one config item permitted'))
795 for section, name, value in ui.walkconfig(untrusted=untrusted):
795 for section, name, value in ui.walkconfig(untrusted=untrusted):
796 sectname = section + '.' + name
796 sectname = section + '.' + name
797 if values:
797 if values:
798 for v in values:
798 for v in values:
799 if v == section:
799 if v == section:
800 ui.write('%s=%s\n' % (sectname, value))
800 ui.write('%s=%s\n' % (sectname, value))
801 elif v == sectname:
801 elif v == sectname:
802 ui.write(value, '\n')
802 ui.write(value, '\n')
803 else:
803 else:
804 ui.write('%s=%s\n' % (sectname, value))
804 ui.write('%s=%s\n' % (sectname, value))
805
805
806 def debugsetparents(ui, repo, rev1, rev2=None):
806 def debugsetparents(ui, repo, rev1, rev2=None):
807 """manually set the parents of the current working directory
807 """manually set the parents of the current working directory
808
808
809 This is useful for writing repository conversion tools, but should
809 This is useful for writing repository conversion tools, but should
810 be used with care.
810 be used with care.
811 """
811 """
812
812
813 if not rev2:
813 if not rev2:
814 rev2 = hex(nullid)
814 rev2 = hex(nullid)
815
815
816 wlock = repo.wlock()
816 wlock = repo.wlock()
817 try:
817 try:
818 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
818 repo.dirstate.setparents(repo.lookup(rev1), repo.lookup(rev2))
819 finally:
819 finally:
820 wlock.release()
820 wlock.release()
821
821
822 def debugstate(ui, repo, nodates=None):
822 def debugstate(ui, repo, nodates=None):
823 """show the contents of the current dirstate"""
823 """show the contents of the current dirstate"""
824 timestr = ""
824 timestr = ""
825 showdate = not nodates
825 showdate = not nodates
826 for file_, ent in util.sort(repo.dirstate._map.iteritems()):
826 for file_, ent in util.sort(repo.dirstate._map.iteritems()):
827 if showdate:
827 if showdate:
828 if ent[3] == -1:
828 if ent[3] == -1:
829 # Pad or slice to locale representation
829 # Pad or slice to locale representation
830 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
830 locale_len = len(time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(0)))
831 timestr = 'unset'
831 timestr = 'unset'
832 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
832 timestr = timestr[:locale_len] + ' '*(locale_len - len(timestr))
833 else:
833 else:
834 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
834 timestr = time.strftime("%Y-%m-%d %H:%M:%S ", time.localtime(ent[3]))
835 if ent[1] & 020000:
835 if ent[1] & 020000:
836 mode = 'lnk'
836 mode = 'lnk'
837 else:
837 else:
838 mode = '%3o' % (ent[1] & 0777)
838 mode = '%3o' % (ent[1] & 0777)
839 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
839 ui.write("%c %s %10d %s%s\n" % (ent[0], mode, ent[2], timestr, file_))
840 for f in repo.dirstate.copies():
840 for f in repo.dirstate.copies():
841 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
841 ui.write(_("copy: %s -> %s\n") % (repo.dirstate.copied(f), f))
842
842
843 def debugdata(ui, file_, rev):
843 def debugdata(ui, file_, rev):
844 """dump the contents of a data file revision"""
844 """dump the contents of a data file revision"""
845 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
845 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_[:-2] + ".i")
846 try:
846 try:
847 ui.write(r.revision(r.lookup(rev)))
847 ui.write(r.revision(r.lookup(rev)))
848 except KeyError:
848 except KeyError:
849 raise util.Abort(_('invalid revision identifier %s') % rev)
849 raise util.Abort(_('invalid revision identifier %s') % rev)
850
850
851 def debugdate(ui, date, range=None, **opts):
851 def debugdate(ui, date, range=None, **opts):
852 """parse and display a date"""
852 """parse and display a date"""
853 if opts["extended"]:
853 if opts["extended"]:
854 d = util.parsedate(date, util.extendeddateformats)
854 d = util.parsedate(date, util.extendeddateformats)
855 else:
855 else:
856 d = util.parsedate(date)
856 d = util.parsedate(date)
857 ui.write("internal: %s %s\n" % d)
857 ui.write("internal: %s %s\n" % d)
858 ui.write("standard: %s\n" % util.datestr(d))
858 ui.write("standard: %s\n" % util.datestr(d))
859 if range:
859 if range:
860 m = util.matchdate(range)
860 m = util.matchdate(range)
861 ui.write("match: %s\n" % m(d[0]))
861 ui.write("match: %s\n" % m(d[0]))
862
862
863 def debugindex(ui, file_):
863 def debugindex(ui, file_):
864 """dump the contents of an index file"""
864 """dump the contents of an index file"""
865 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
865 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
866 ui.write(" rev offset length base linkrev"
866 ui.write(" rev offset length base linkrev"
867 " nodeid p1 p2\n")
867 " nodeid p1 p2\n")
868 for i in r:
868 for i in r:
869 node = r.node(i)
869 node = r.node(i)
870 try:
870 try:
871 pp = r.parents(node)
871 pp = r.parents(node)
872 except:
872 except:
873 pp = [nullid, nullid]
873 pp = [nullid, nullid]
874 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
874 ui.write("% 6d % 9d % 7d % 6d % 7d %s %s %s\n" % (
875 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
875 i, r.start(i), r.length(i), r.base(i), r.linkrev(i),
876 short(node), short(pp[0]), short(pp[1])))
876 short(node), short(pp[0]), short(pp[1])))
877
877
878 def debugindexdot(ui, file_):
878 def debugindexdot(ui, file_):
879 """dump an index DAG as a .dot file"""
879 """dump an index DAG as a .dot file"""
880 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
880 r = revlog.revlog(util.opener(os.getcwd(), audit=False), file_)
881 ui.write("digraph G {\n")
881 ui.write("digraph G {\n")
882 for i in r:
882 for i in r:
883 node = r.node(i)
883 node = r.node(i)
884 pp = r.parents(node)
884 pp = r.parents(node)
885 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
885 ui.write("\t%d -> %d\n" % (r.rev(pp[0]), i))
886 if pp[1] != nullid:
886 if pp[1] != nullid:
887 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
887 ui.write("\t%d -> %d\n" % (r.rev(pp[1]), i))
888 ui.write("}\n")
888 ui.write("}\n")
889
889
890 def debuginstall(ui):
890 def debuginstall(ui):
891 '''test Mercurial installation'''
891 '''test Mercurial installation'''
892
892
893 def writetemp(contents):
893 def writetemp(contents):
894 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
894 (fd, name) = tempfile.mkstemp(prefix="hg-debuginstall-")
895 f = os.fdopen(fd, "wb")
895 f = os.fdopen(fd, "wb")
896 f.write(contents)
896 f.write(contents)
897 f.close()
897 f.close()
898 return name
898 return name
899
899
900 problems = 0
900 problems = 0
901
901
902 # encoding
902 # encoding
903 ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
903 ui.status(_("Checking encoding (%s)...\n") % encoding.encoding)
904 try:
904 try:
905 encoding.fromlocal("test")
905 encoding.fromlocal("test")
906 except util.Abort, inst:
906 except util.Abort, inst:
907 ui.write(" %s\n" % inst)
907 ui.write(" %s\n" % inst)
908 ui.write(_(" (check that your locale is properly set)\n"))
908 ui.write(_(" (check that your locale is properly set)\n"))
909 problems += 1
909 problems += 1
910
910
911 # compiled modules
911 # compiled modules
912 ui.status(_("Checking extensions...\n"))
912 ui.status(_("Checking extensions...\n"))
913 try:
913 try:
914 import bdiff, mpatch, base85
914 import bdiff, mpatch, base85
915 except Exception, inst:
915 except Exception, inst:
916 ui.write(" %s\n" % inst)
916 ui.write(" %s\n" % inst)
917 ui.write(_(" One or more extensions could not be found"))
917 ui.write(_(" One or more extensions could not be found"))
918 ui.write(_(" (check that you compiled the extensions)\n"))
918 ui.write(_(" (check that you compiled the extensions)\n"))
919 problems += 1
919 problems += 1
920
920
921 # templates
921 # templates
922 ui.status(_("Checking templates...\n"))
922 ui.status(_("Checking templates...\n"))
923 try:
923 try:
924 import templater
924 import templater
925 templater.templater(templater.templatepath("map-cmdline.default"))
925 templater.templater(templater.templatepath("map-cmdline.default"))
926 except Exception, inst:
926 except Exception, inst:
927 ui.write(" %s\n" % inst)
927 ui.write(" %s\n" % inst)
928 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
928 ui.write(_(" (templates seem to have been installed incorrectly)\n"))
929 problems += 1
929 problems += 1
930
930
931 # patch
931 # patch
932 ui.status(_("Checking patch...\n"))
932 ui.status(_("Checking patch...\n"))
933 patchproblems = 0
933 patchproblems = 0
934 a = "1\n2\n3\n4\n"
934 a = "1\n2\n3\n4\n"
935 b = "1\n2\n3\ninsert\n4\n"
935 b = "1\n2\n3\ninsert\n4\n"
936 fa = writetemp(a)
936 fa = writetemp(a)
937 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
937 d = mdiff.unidiff(a, None, b, None, os.path.basename(fa),
938 os.path.basename(fa))
938 os.path.basename(fa))
939 fd = writetemp(d)
939 fd = writetemp(d)
940
940
941 files = {}
941 files = {}
942 try:
942 try:
943 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
943 patch.patch(fd, ui, cwd=os.path.dirname(fa), files=files)
944 except util.Abort, e:
944 except util.Abort, e:
945 ui.write(_(" patch call failed:\n"))
945 ui.write(_(" patch call failed:\n"))
946 ui.write(" " + str(e) + "\n")
946 ui.write(" " + str(e) + "\n")
947 patchproblems += 1
947 patchproblems += 1
948 else:
948 else:
949 if list(files) != [os.path.basename(fa)]:
949 if list(files) != [os.path.basename(fa)]:
950 ui.write(_(" unexpected patch output!\n"))
950 ui.write(_(" unexpected patch output!\n"))
951 patchproblems += 1
951 patchproblems += 1
952 a = file(fa).read()
952 a = file(fa).read()
953 if a != b:
953 if a != b:
954 ui.write(_(" patch test failed!\n"))
954 ui.write(_(" patch test failed!\n"))
955 patchproblems += 1
955 patchproblems += 1
956
956
957 if patchproblems:
957 if patchproblems:
958 if ui.config('ui', 'patch'):
958 if ui.config('ui', 'patch'):
959 ui.write(_(" (Current patch tool may be incompatible with patch,"
959 ui.write(_(" (Current patch tool may be incompatible with patch,"
960 " or misconfigured. Please check your .hgrc file)\n"))
960 " or misconfigured. Please check your .hgrc file)\n"))
961 else:
961 else:
962 ui.write(_(" Internal patcher failure, please report this error"
962 ui.write(_(" Internal patcher failure, please report this error"
963 " to http://www.selenic.com/mercurial/bts\n"))
963 " to http://www.selenic.com/mercurial/bts\n"))
964 problems += patchproblems
964 problems += patchproblems
965
965
966 os.unlink(fa)
966 os.unlink(fa)
967 os.unlink(fd)
967 os.unlink(fd)
968
968
969 # editor
969 # editor
970 ui.status(_("Checking commit editor...\n"))
970 ui.status(_("Checking commit editor...\n"))
971 editor = ui.geteditor()
971 editor = ui.geteditor()
972 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
972 cmdpath = util.find_exe(editor) or util.find_exe(editor.split()[0])
973 if not cmdpath:
973 if not cmdpath:
974 if editor == 'vi':
974 if editor == 'vi':
975 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
975 ui.write(_(" No commit editor set and can't find vi in PATH\n"))
976 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
976 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
977 else:
977 else:
978 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
978 ui.write(_(" Can't find editor '%s' in PATH\n") % editor)
979 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
979 ui.write(_(" (specify a commit editor in your .hgrc file)\n"))
980 problems += 1
980 problems += 1
981
981
982 # check username
982 # check username
983 ui.status(_("Checking username...\n"))
983 ui.status(_("Checking username...\n"))
984 user = os.environ.get("HGUSER")
984 user = os.environ.get("HGUSER")
985 if user is None:
985 if user is None:
986 user = ui.config("ui", "username")
986 user = ui.config("ui", "username")
987 if user is None:
987 if user is None:
988 user = os.environ.get("EMAIL")
988 user = os.environ.get("EMAIL")
989 if not user:
989 if not user:
990 ui.warn(" ")
990 ui.warn(" ")
991 ui.username()
991 ui.username()
992 ui.write(_(" (specify a username in your .hgrc file)\n"))
992 ui.write(_(" (specify a username in your .hgrc file)\n"))
993
993
994 if not problems:
994 if not problems:
995 ui.status(_("No problems detected\n"))
995 ui.status(_("No problems detected\n"))
996 else:
996 else:
997 ui.write(_("%s problems detected,"
997 ui.write(_("%s problems detected,"
998 " please check your install!\n") % problems)
998 " please check your install!\n") % problems)
999
999
1000 return problems
1000 return problems
1001
1001
1002 def debugrename(ui, repo, file1, *pats, **opts):
1002 def debugrename(ui, repo, file1, *pats, **opts):
1003 """dump rename information"""
1003 """dump rename information"""
1004
1004
1005 ctx = repo[opts.get('rev')]
1005 ctx = repo[opts.get('rev')]
1006 m = cmdutil.match(repo, (file1,) + pats, opts)
1006 m = cmdutil.match(repo, (file1,) + pats, opts)
1007 for abs in ctx.walk(m):
1007 for abs in ctx.walk(m):
1008 fctx = ctx[abs]
1008 fctx = ctx[abs]
1009 o = fctx.filelog().renamed(fctx.filenode())
1009 o = fctx.filelog().renamed(fctx.filenode())
1010 rel = m.rel(abs)
1010 rel = m.rel(abs)
1011 if o:
1011 if o:
1012 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1012 ui.write(_("%s renamed from %s:%s\n") % (rel, o[0], hex(o[1])))
1013 else:
1013 else:
1014 ui.write(_("%s not renamed\n") % rel)
1014 ui.write(_("%s not renamed\n") % rel)
1015
1015
1016 def debugwalk(ui, repo, *pats, **opts):
1016 def debugwalk(ui, repo, *pats, **opts):
1017 """show how files match on given patterns"""
1017 """show how files match on given patterns"""
1018 m = cmdutil.match(repo, pats, opts)
1018 m = cmdutil.match(repo, pats, opts)
1019 items = list(repo.walk(m))
1019 items = list(repo.walk(m))
1020 if not items:
1020 if not items:
1021 return
1021 return
1022 fmt = 'f %%-%ds %%-%ds %%s' % (
1022 fmt = 'f %%-%ds %%-%ds %%s' % (
1023 max([len(abs) for abs in items]),
1023 max([len(abs) for abs in items]),
1024 max([len(m.rel(abs)) for abs in items]))
1024 max([len(m.rel(abs)) for abs in items]))
1025 for abs in items:
1025 for abs in items:
1026 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
1026 line = fmt % (abs, m.rel(abs), m.exact(abs) and 'exact' or '')
1027 ui.write("%s\n" % line.rstrip())
1027 ui.write("%s\n" % line.rstrip())
1028
1028
1029 def diff(ui, repo, *pats, **opts):
1029 def diff(ui, repo, *pats, **opts):
1030 """diff repository (or selected files)
1030 """diff repository (or selected files)
1031
1031
1032 Show differences between revisions for the specified files.
1032 Show differences between revisions for the specified files.
1033
1033
1034 Differences between files are shown using the unified diff format.
1034 Differences between files are shown using the unified diff format.
1035
1035
1036 NOTE: diff may generate unexpected results for merges, as it will
1036 NOTE: diff may generate unexpected results for merges, as it will
1037 default to comparing against the working directory's first parent
1037 default to comparing against the working directory's first parent
1038 changeset if no revisions are specified.
1038 changeset if no revisions are specified.
1039
1039
1040 When two revision arguments are given, then changes are shown
1040 When two revision arguments are given, then changes are shown
1041 between those revisions. If only one revision is specified then
1041 between those revisions. If only one revision is specified then
1042 that revision is compared to the working directory, and, when no
1042 that revision is compared to the working directory, and, when no
1043 revisions are specified, the working directory files are compared
1043 revisions are specified, the working directory files are compared
1044 to its parent.
1044 to its parent.
1045
1045
1046 Without the -a/--text option, diff will avoid generating diffs of
1046 Without the -a/--text option, diff will avoid generating diffs of
1047 files it detects as binary. With -a, diff will generate a diff
1047 files it detects as binary. With -a, diff will generate a diff
1048 anyway, probably with undesirable results.
1048 anyway, probably with undesirable results.
1049
1049
1050 Use the -g/--git option to generate diffs in the git extended diff
1050 Use the -g/--git option to generate diffs in the git extended diff
1051 format. For more information, read 'hg help diffs'.
1051 format. For more information, read 'hg help diffs'.
1052 """
1052 """
1053
1053
1054 revs = opts.get('rev')
1054 revs = opts.get('rev')
1055 change = opts.get('change')
1055 change = opts.get('change')
1056
1056
1057 if revs and change:
1057 if revs and change:
1058 msg = _('cannot specify --rev and --change at the same time')
1058 msg = _('cannot specify --rev and --change at the same time')
1059 raise util.Abort(msg)
1059 raise util.Abort(msg)
1060 elif change:
1060 elif change:
1061 node2 = repo.lookup(change)
1061 node2 = repo.lookup(change)
1062 node1 = repo[node2].parents()[0].node()
1062 node1 = repo[node2].parents()[0].node()
1063 else:
1063 else:
1064 node1, node2 = cmdutil.revpair(repo, revs)
1064 node1, node2 = cmdutil.revpair(repo, revs)
1065
1065
1066 m = cmdutil.match(repo, pats, opts)
1066 m = cmdutil.match(repo, pats, opts)
1067 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1067 it = patch.diff(repo, node1, node2, match=m, opts=patch.diffopts(ui, opts))
1068 for chunk in it:
1068 for chunk in it:
1069 repo.ui.write(chunk)
1069 repo.ui.write(chunk)
1070
1070
1071 def export(ui, repo, *changesets, **opts):
1071 def export(ui, repo, *changesets, **opts):
1072 """dump the header and diffs for one or more changesets
1072 """dump the header and diffs for one or more changesets
1073
1073
1074 Print the changeset header and diffs for one or more revisions.
1074 Print the changeset header and diffs for one or more revisions.
1075
1075
1076 The information shown in the changeset header is: author,
1076 The information shown in the changeset header is: author,
1077 changeset hash, parent(s) and commit comment.
1077 changeset hash, parent(s) and commit comment.
1078
1078
1079 NOTE: export may generate unexpected diff output for merge
1079 NOTE: export may generate unexpected diff output for merge
1080 changesets, as it will compare the merge changeset against its
1080 changesets, as it will compare the merge changeset against its
1081 first parent only.
1081 first parent only.
1082
1082
1083 Output may be to a file, in which case the name of the file is
1083 Output may be to a file, in which case the name of the file is
1084 given using a format string. The formatting rules are as follows:
1084 given using a format string. The formatting rules are as follows:
1085
1085
1086 %% literal "%" character
1086 %% literal "%" character
1087 %H changeset hash (40 bytes of hexadecimal)
1087 %H changeset hash (40 bytes of hexadecimal)
1088 %N number of patches being generated
1088 %N number of patches being generated
1089 %R changeset revision number
1089 %R changeset revision number
1090 %b basename of the exporting repository
1090 %b basename of the exporting repository
1091 %h short-form changeset hash (12 bytes of hexadecimal)
1091 %h short-form changeset hash (12 bytes of hexadecimal)
1092 %n zero-padded sequence number, starting at 1
1092 %n zero-padded sequence number, starting at 1
1093 %r zero-padded changeset revision number
1093 %r zero-padded changeset revision number
1094
1094
1095 Without the -a/--text option, export will avoid generating diffs
1095 Without the -a/--text option, export will avoid generating diffs
1096 of files it detects as binary. With -a, export will generate a
1096 of files it detects as binary. With -a, export will generate a
1097 diff anyway, probably with undesirable results.
1097 diff anyway, probably with undesirable results.
1098
1098
1099 Use the -g/--git option to generate diffs in the git extended diff
1099 Use the -g/--git option to generate diffs in the git extended diff
1100 format. Read the diffs help topic for more information.
1100 format. Read the diffs help topic for more information.
1101
1101
1102 With the --switch-parent option, the diff will be against the
1102 With the --switch-parent option, the diff will be against the
1103 second parent. It can be useful to review a merge.
1103 second parent. It can be useful to review a merge.
1104 """
1104 """
1105 if not changesets:
1105 if not changesets:
1106 raise util.Abort(_("export requires at least one changeset"))
1106 raise util.Abort(_("export requires at least one changeset"))
1107 revs = cmdutil.revrange(repo, changesets)
1107 revs = cmdutil.revrange(repo, changesets)
1108 if len(revs) > 1:
1108 if len(revs) > 1:
1109 ui.note(_('exporting patches:\n'))
1109 ui.note(_('exporting patches:\n'))
1110 else:
1110 else:
1111 ui.note(_('exporting patch:\n'))
1111 ui.note(_('exporting patch:\n'))
1112 patch.export(repo, revs, template=opts.get('output'),
1112 patch.export(repo, revs, template=opts.get('output'),
1113 switch_parent=opts.get('switch_parent'),
1113 switch_parent=opts.get('switch_parent'),
1114 opts=patch.diffopts(ui, opts))
1114 opts=patch.diffopts(ui, opts))
1115
1115
1116 def grep(ui, repo, pattern, *pats, **opts):
1116 def grep(ui, repo, pattern, *pats, **opts):
1117 """search for a pattern in specified files and revisions
1117 """search for a pattern in specified files and revisions
1118
1118
1119 Search revisions of files for a regular expression.
1119 Search revisions of files for a regular expression.
1120
1120
1121 This command behaves differently than Unix grep. It only accepts
1121 This command behaves differently than Unix grep. It only accepts
1122 Python/Perl regexps. It searches repository history, not the
1122 Python/Perl regexps. It searches repository history, not the
1123 working directory. It always prints the revision number in which a
1123 working directory. It always prints the revision number in which a
1124 match appears.
1124 match appears.
1125
1125
1126 By default, grep only prints output for the first revision of a
1126 By default, grep only prints output for the first revision of a
1127 file in which it finds a match. To get it to print every revision
1127 file in which it finds a match. To get it to print every revision
1128 that contains a change in match status ("-" for a match that
1128 that contains a change in match status ("-" for a match that
1129 becomes a non-match, or "+" for a non-match that becomes a match),
1129 becomes a non-match, or "+" for a non-match that becomes a match),
1130 use the --all flag.
1130 use the --all flag.
1131 """
1131 """
1132 reflags = 0
1132 reflags = 0
1133 if opts.get('ignore_case'):
1133 if opts.get('ignore_case'):
1134 reflags |= re.I
1134 reflags |= re.I
1135 try:
1135 try:
1136 regexp = re.compile(pattern, reflags)
1136 regexp = re.compile(pattern, reflags)
1137 except Exception, inst:
1137 except Exception, inst:
1138 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1138 ui.warn(_("grep: invalid match pattern: %s\n") % inst)
1139 return None
1139 return None
1140 sep, eol = ':', '\n'
1140 sep, eol = ':', '\n'
1141 if opts.get('print0'):
1141 if opts.get('print0'):
1142 sep = eol = '\0'
1142 sep = eol = '\0'
1143
1143
1144 fcache = {}
1144 fcache = {}
1145 def getfile(fn):
1145 def getfile(fn):
1146 if fn not in fcache:
1146 if fn not in fcache:
1147 fcache[fn] = repo.file(fn)
1147 fcache[fn] = repo.file(fn)
1148 return fcache[fn]
1148 return fcache[fn]
1149
1149
1150 def matchlines(body):
1150 def matchlines(body):
1151 begin = 0
1151 begin = 0
1152 linenum = 0
1152 linenum = 0
1153 while True:
1153 while True:
1154 match = regexp.search(body, begin)
1154 match = regexp.search(body, begin)
1155 if not match:
1155 if not match:
1156 break
1156 break
1157 mstart, mend = match.span()
1157 mstart, mend = match.span()
1158 linenum += body.count('\n', begin, mstart) + 1
1158 linenum += body.count('\n', begin, mstart) + 1
1159 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1159 lstart = body.rfind('\n', begin, mstart) + 1 or begin
1160 begin = body.find('\n', mend) + 1 or len(body)
1160 begin = body.find('\n', mend) + 1 or len(body)
1161 lend = begin - 1
1161 lend = begin - 1
1162 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1162 yield linenum, mstart - lstart, mend - lstart, body[lstart:lend]
1163
1163
1164 class linestate(object):
1164 class linestate(object):
1165 def __init__(self, line, linenum, colstart, colend):
1165 def __init__(self, line, linenum, colstart, colend):
1166 self.line = line
1166 self.line = line
1167 self.linenum = linenum
1167 self.linenum = linenum
1168 self.colstart = colstart
1168 self.colstart = colstart
1169 self.colend = colend
1169 self.colend = colend
1170
1170
1171 def __hash__(self):
1171 def __hash__(self):
1172 return hash((self.linenum, self.line))
1172 return hash((self.linenum, self.line))
1173
1173
1174 def __eq__(self, other):
1174 def __eq__(self, other):
1175 return self.line == other.line
1175 return self.line == other.line
1176
1176
1177 matches = {}
1177 matches = {}
1178 copies = {}
1178 copies = {}
1179 def grepbody(fn, rev, body):
1179 def grepbody(fn, rev, body):
1180 matches[rev].setdefault(fn, [])
1180 matches[rev].setdefault(fn, [])
1181 m = matches[rev][fn]
1181 m = matches[rev][fn]
1182 for lnum, cstart, cend, line in matchlines(body):
1182 for lnum, cstart, cend, line in matchlines(body):
1183 s = linestate(line, lnum, cstart, cend)
1183 s = linestate(line, lnum, cstart, cend)
1184 m.append(s)
1184 m.append(s)
1185
1185
1186 def difflinestates(a, b):
1186 def difflinestates(a, b):
1187 sm = difflib.SequenceMatcher(None, a, b)
1187 sm = difflib.SequenceMatcher(None, a, b)
1188 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1188 for tag, alo, ahi, blo, bhi in sm.get_opcodes():
1189 if tag == 'insert':
1189 if tag == 'insert':
1190 for i in xrange(blo, bhi):
1190 for i in xrange(blo, bhi):
1191 yield ('+', b[i])
1191 yield ('+', b[i])
1192 elif tag == 'delete':
1192 elif tag == 'delete':
1193 for i in xrange(alo, ahi):
1193 for i in xrange(alo, ahi):
1194 yield ('-', a[i])
1194 yield ('-', a[i])
1195 elif tag == 'replace':
1195 elif tag == 'replace':
1196 for i in xrange(alo, ahi):
1196 for i in xrange(alo, ahi):
1197 yield ('-', a[i])
1197 yield ('-', a[i])
1198 for i in xrange(blo, bhi):
1198 for i in xrange(blo, bhi):
1199 yield ('+', b[i])
1199 yield ('+', b[i])
1200
1200
1201 prev = {}
1201 prev = {}
1202 def display(fn, rev, states, prevstates):
1202 def display(fn, rev, states, prevstates):
1203 datefunc = ui.quiet and util.shortdate or util.datestr
1203 datefunc = ui.quiet and util.shortdate or util.datestr
1204 found = False
1204 found = False
1205 filerevmatches = {}
1205 filerevmatches = {}
1206 r = prev.get(fn, -1)
1206 r = prev.get(fn, -1)
1207 if opts.get('all'):
1207 if opts.get('all'):
1208 iter = difflinestates(states, prevstates)
1208 iter = difflinestates(states, prevstates)
1209 else:
1209 else:
1210 iter = [('', l) for l in prevstates]
1210 iter = [('', l) for l in prevstates]
1211 for change, l in iter:
1211 for change, l in iter:
1212 cols = [fn, str(r)]
1212 cols = [fn, str(r)]
1213 if opts.get('line_number'):
1213 if opts.get('line_number'):
1214 cols.append(str(l.linenum))
1214 cols.append(str(l.linenum))
1215 if opts.get('all'):
1215 if opts.get('all'):
1216 cols.append(change)
1216 cols.append(change)
1217 if opts.get('user'):
1217 if opts.get('user'):
1218 cols.append(ui.shortuser(get(r)[1]))
1218 cols.append(ui.shortuser(get(r)[1]))
1219 if opts.get('date'):
1219 if opts.get('date'):
1220 cols.append(datefunc(get(r)[2]))
1220 cols.append(datefunc(get(r)[2]))
1221 if opts.get('files_with_matches'):
1221 if opts.get('files_with_matches'):
1222 c = (fn, r)
1222 c = (fn, r)
1223 if c in filerevmatches:
1223 if c in filerevmatches:
1224 continue
1224 continue
1225 filerevmatches[c] = 1
1225 filerevmatches[c] = 1
1226 else:
1226 else:
1227 cols.append(l.line)
1227 cols.append(l.line)
1228 ui.write(sep.join(cols), eol)
1228 ui.write(sep.join(cols), eol)
1229 found = True
1229 found = True
1230 return found
1230 return found
1231
1231
1232 fstate = {}
1232 fstate = {}
1233 skip = {}
1233 skip = {}
1234 get = util.cachefunc(lambda r: repo[r].changeset())
1234 get = util.cachefunc(lambda r: repo[r].changeset())
1235 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1235 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1236 found = False
1236 found = False
1237 follow = opts.get('follow')
1237 follow = opts.get('follow')
1238 for st, rev, fns in changeiter:
1238 for st, rev, fns in changeiter:
1239 if st == 'window':
1239 if st == 'window':
1240 matches.clear()
1240 matches.clear()
1241 elif st == 'add':
1241 elif st == 'add':
1242 ctx = repo[rev]
1242 ctx = repo[rev]
1243 matches[rev] = {}
1243 matches[rev] = {}
1244 for fn in fns:
1244 for fn in fns:
1245 if fn in skip:
1245 if fn in skip:
1246 continue
1246 continue
1247 try:
1247 try:
1248 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1248 grepbody(fn, rev, getfile(fn).read(ctx.filenode(fn)))
1249 fstate.setdefault(fn, [])
1249 fstate.setdefault(fn, [])
1250 if follow:
1250 if follow:
1251 copied = getfile(fn).renamed(ctx.filenode(fn))
1251 copied = getfile(fn).renamed(ctx.filenode(fn))
1252 if copied:
1252 if copied:
1253 copies.setdefault(rev, {})[fn] = copied[0]
1253 copies.setdefault(rev, {})[fn] = copied[0]
1254 except error.LookupError:
1254 except error.LookupError:
1255 pass
1255 pass
1256 elif st == 'iter':
1256 elif st == 'iter':
1257 for fn, m in util.sort(matches[rev].items()):
1257 for fn, m in util.sort(matches[rev].items()):
1258 copy = copies.get(rev, {}).get(fn)
1258 copy = copies.get(rev, {}).get(fn)
1259 if fn in skip:
1259 if fn in skip:
1260 if copy:
1260 if copy:
1261 skip[copy] = True
1261 skip[copy] = True
1262 continue
1262 continue
1263 if fn in prev or fstate[fn]:
1263 if fn in prev or fstate[fn]:
1264 r = display(fn, rev, m, fstate[fn])
1264 r = display(fn, rev, m, fstate[fn])
1265 found = found or r
1265 found = found or r
1266 if r and not opts.get('all'):
1266 if r and not opts.get('all'):
1267 skip[fn] = True
1267 skip[fn] = True
1268 if copy:
1268 if copy:
1269 skip[copy] = True
1269 skip[copy] = True
1270 fstate[fn] = m
1270 fstate[fn] = m
1271 if copy:
1271 if copy:
1272 fstate[copy] = m
1272 fstate[copy] = m
1273 prev[fn] = rev
1273 prev[fn] = rev
1274
1274
1275 for fn, state in util.sort(fstate.items()):
1275 for fn, state in util.sort(fstate.items()):
1276 if fn in skip:
1276 if fn in skip:
1277 continue
1277 continue
1278 if fn not in copies.get(prev[fn], {}):
1278 if fn not in copies.get(prev[fn], {}):
1279 found = display(fn, rev, {}, state) or found
1279 found = display(fn, rev, {}, state) or found
1280 return (not found and 1) or 0
1280 return (not found and 1) or 0
1281
1281
1282 def heads(ui, repo, *branchrevs, **opts):
1282 def heads(ui, repo, *branchrevs, **opts):
1283 """show current repository heads or show branch heads
1283 """show current repository heads or show branch heads
1284
1284
1285 With no arguments, show all repository head changesets.
1285 With no arguments, show all repository head changesets.
1286
1286
1287 If branch or revisions names are given this will show the heads of
1287 If branch or revisions names are given this will show the heads of
1288 the specified branches or the branches those revisions are tagged
1288 the specified branches or the branches those revisions are tagged
1289 with.
1289 with.
1290
1290
1291 Repository "heads" are changesets that don't have child
1291 Repository "heads" are changesets that don't have child
1292 changesets. They are where development generally takes place and
1292 changesets. They are where development generally takes place and
1293 are the usual targets for update and merge operations.
1293 are the usual targets for update and merge operations.
1294
1294
1295 Branch heads are changesets that have a given branch tag, but have
1295 Branch heads are changesets that have a given branch tag, but have
1296 no child changesets with that tag. They are usually where
1296 no child changesets with that tag. They are usually where
1297 development on the given branch takes place.
1297 development on the given branch takes place.
1298 """
1298 """
1299 if opts.get('rev'):
1299 if opts.get('rev'):
1300 start = repo.lookup(opts['rev'])
1300 start = repo.lookup(opts['rev'])
1301 else:
1301 else:
1302 start = None
1302 start = None
1303 closed = not opts.get('active')
1303 closed = not opts.get('active')
1304 if not branchrevs:
1304 if not branchrevs:
1305 # Assume we're looking repo-wide heads if no revs were specified.
1305 # Assume we're looking repo-wide heads if no revs were specified.
1306 heads = repo.heads(start, closed=closed)
1306 heads = repo.heads(start, closed=closed)
1307 else:
1307 else:
1308 heads = []
1308 heads = []
1309 visitedset = set()
1309 visitedset = set()
1310 for branchrev in branchrevs:
1310 for branchrev in branchrevs:
1311 branch = repo[branchrev].branch()
1311 branch = repo[branchrev].branch()
1312 if branch in visitedset:
1312 if branch in visitedset:
1313 continue
1313 continue
1314 visitedset.add(branch)
1314 visitedset.add(branch)
1315 bheads = repo.branchheads(branch, start, closed=closed)
1315 bheads = repo.branchheads(branch, start, closed=closed)
1316 if not bheads:
1316 if not bheads:
1317 if branch != branchrev:
1317 if branch != branchrev:
1318 ui.warn(_("no changes on branch %s containing %s are "
1318 ui.warn(_("no changes on branch %s containing %s are "
1319 "reachable from %s\n")
1319 "reachable from %s\n")
1320 % (branch, branchrev, opts.get('rev')))
1320 % (branch, branchrev, opts.get('rev')))
1321 else:
1321 else:
1322 ui.warn(_("no changes on branch %s are reachable from %s\n")
1322 ui.warn(_("no changes on branch %s are reachable from %s\n")
1323 % (branch, opts.get('rev')))
1323 % (branch, opts.get('rev')))
1324 heads.extend(bheads)
1324 heads.extend(bheads)
1325 if not heads:
1325 if not heads:
1326 return 1
1326 return 1
1327 displayer = cmdutil.show_changeset(ui, repo, opts)
1327 displayer = cmdutil.show_changeset(ui, repo, opts)
1328 for n in heads:
1328 for n in heads:
1329 displayer.show(repo[n])
1329 displayer.show(repo[n])
1330
1330
1331 def help_(ui, name=None, with_version=False):
1331 def help_(ui, name=None, with_version=False):
1332 """show help for a given topic or a help overview
1332 """show help for a given topic or a help overview
1333
1333
1334 With no arguments, print a list of commands and short help.
1334 With no arguments, print a list of commands and short help.
1335
1335
1336 Given a topic, extension, or command name, print help for that
1336 Given a topic, extension, or command name, print help for that
1337 topic."""
1337 topic."""
1338 option_lists = []
1338 option_lists = []
1339
1339
1340 def addglobalopts(aliases):
1340 def addglobalopts(aliases):
1341 if ui.verbose:
1341 if ui.verbose:
1342 option_lists.append((_("global options:"), globalopts))
1342 option_lists.append((_("global options:"), globalopts))
1343 if name == 'shortlist':
1343 if name == 'shortlist':
1344 option_lists.append((_('use "hg help" for the full list '
1344 option_lists.append((_('use "hg help" for the full list '
1345 'of commands'), ()))
1345 'of commands'), ()))
1346 else:
1346 else:
1347 if name == 'shortlist':
1347 if name == 'shortlist':
1348 msg = _('use "hg help" for the full list of commands '
1348 msg = _('use "hg help" for the full list of commands '
1349 'or "hg -v" for details')
1349 'or "hg -v" for details')
1350 elif aliases:
1350 elif aliases:
1351 msg = _('use "hg -v help%s" to show aliases and '
1351 msg = _('use "hg -v help%s" to show aliases and '
1352 'global options') % (name and " " + name or "")
1352 'global options') % (name and " " + name or "")
1353 else:
1353 else:
1354 msg = _('use "hg -v help %s" to show global options') % name
1354 msg = _('use "hg -v help %s" to show global options') % name
1355 option_lists.append((msg, ()))
1355 option_lists.append((msg, ()))
1356
1356
1357 def helpcmd(name):
1357 def helpcmd(name):
1358 if with_version:
1358 if with_version:
1359 version_(ui)
1359 version_(ui)
1360 ui.write('\n')
1360 ui.write('\n')
1361
1361
1362 try:
1362 try:
1363 aliases, i = cmdutil.findcmd(name, table, False)
1363 aliases, i = cmdutil.findcmd(name, table, False)
1364 except error.AmbiguousCommand, inst:
1364 except error.AmbiguousCommand, inst:
1365 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1365 select = lambda c: c.lstrip('^').startswith(inst.args[0])
1366 helplist(_('list of commands:\n\n'), select)
1366 helplist(_('list of commands:\n\n'), select)
1367 return
1367 return
1368
1368
1369 # synopsis
1369 # synopsis
1370 if len(i) > 2:
1370 if len(i) > 2:
1371 if i[2].startswith('hg'):
1371 if i[2].startswith('hg'):
1372 ui.write("%s\n" % i[2])
1372 ui.write("%s\n" % i[2])
1373 else:
1373 else:
1374 ui.write('hg %s %s\n' % (aliases[0], i[2]))
1374 ui.write('hg %s %s\n' % (aliases[0], i[2]))
1375 else:
1375 else:
1376 ui.write('hg %s\n' % aliases[0])
1376 ui.write('hg %s\n' % aliases[0])
1377
1377
1378 # aliases
1378 # aliases
1379 if not ui.quiet and len(aliases) > 1:
1379 if not ui.quiet and len(aliases) > 1:
1380 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1380 ui.write(_("\naliases: %s\n") % ', '.join(aliases[1:]))
1381
1381
1382 # description
1382 # description
1383 doc = gettext(i[0].__doc__)
1383 doc = gettext(i[0].__doc__)
1384 if not doc:
1384 if not doc:
1385 doc = _("(no help text available)")
1385 doc = _("(no help text available)")
1386 if ui.quiet:
1386 if ui.quiet:
1387 doc = doc.splitlines(0)[0]
1387 doc = doc.splitlines(0)[0]
1388 ui.write("\n%s\n" % doc.rstrip())
1388 ui.write("\n%s\n" % doc.rstrip())
1389
1389
1390 if not ui.quiet:
1390 if not ui.quiet:
1391 # options
1391 # options
1392 if i[1]:
1392 if i[1]:
1393 option_lists.append((_("options:\n"), i[1]))
1393 option_lists.append((_("options:\n"), i[1]))
1394
1394
1395 addglobalopts(False)
1395 addglobalopts(False)
1396
1396
1397 def helplist(header, select=None):
1397 def helplist(header, select=None):
1398 h = {}
1398 h = {}
1399 cmds = {}
1399 cmds = {}
1400 for c, e in table.iteritems():
1400 for c, e in table.iteritems():
1401 f = c.split("|", 1)[0]
1401 f = c.split("|", 1)[0]
1402 if select and not select(f):
1402 if select and not select(f):
1403 continue
1403 continue
1404 if (not select and name != 'shortlist' and
1404 if (not select and name != 'shortlist' and
1405 e[0].__module__ != __name__):
1405 e[0].__module__ != __name__):
1406 continue
1406 continue
1407 if name == "shortlist" and not f.startswith("^"):
1407 if name == "shortlist" and not f.startswith("^"):
1408 continue
1408 continue
1409 f = f.lstrip("^")
1409 f = f.lstrip("^")
1410 if not ui.debugflag and f.startswith("debug"):
1410 if not ui.debugflag and f.startswith("debug"):
1411 continue
1411 continue
1412 doc = gettext(e[0].__doc__)
1412 doc = gettext(e[0].__doc__)
1413 if not doc:
1413 if not doc:
1414 doc = _("(no help text available)")
1414 doc = _("(no help text available)")
1415 h[f] = doc.splitlines(0)[0].rstrip()
1415 h[f] = doc.splitlines(0)[0].rstrip()
1416 cmds[f] = c.lstrip("^")
1416 cmds[f] = c.lstrip("^")
1417
1417
1418 if not h:
1418 if not h:
1419 ui.status(_('no commands defined\n'))
1419 ui.status(_('no commands defined\n'))
1420 return
1420 return
1421
1421
1422 ui.status(header)
1422 ui.status(header)
1423 fns = util.sort(h)
1423 fns = util.sort(h)
1424 m = max(map(len, fns))
1424 m = max(map(len, fns))
1425 for f in fns:
1425 for f in fns:
1426 if ui.verbose:
1426 if ui.verbose:
1427 commands = cmds[f].replace("|",", ")
1427 commands = cmds[f].replace("|",", ")
1428 ui.write(" %s:\n %s\n"%(commands, h[f]))
1428 ui.write(" %s:\n %s\n"%(commands, h[f]))
1429 else:
1429 else:
1430 ui.write(' %-*s %s\n' % (m, f, h[f]))
1430 ui.write(' %-*s %s\n' % (m, f, h[f]))
1431
1431
1432 exts = list(extensions.extensions())
1432 exts = list(extensions.extensions())
1433 if exts and name != 'shortlist':
1433 if exts and name != 'shortlist':
1434 ui.write(_('\nenabled extensions:\n\n'))
1434 ui.write(_('\nenabled extensions:\n\n'))
1435 maxlength = 0
1435 maxlength = 0
1436 exthelps = []
1436 exthelps = []
1437 for ename, ext in exts:
1437 for ename, ext in exts:
1438 doc = (gettext(ext.__doc__) or _('(no help text available)'))
1438 doc = (gettext(ext.__doc__) or _('(no help text available)'))
1439 ename = ename.split('.')[-1]
1439 ename = ename.split('.')[-1]
1440 maxlength = max(len(ename), maxlength)
1440 maxlength = max(len(ename), maxlength)
1441 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1441 exthelps.append((ename, doc.splitlines(0)[0].strip()))
1442 for ename, text in exthelps:
1442 for ename, text in exthelps:
1443 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1443 ui.write(_(' %s %s\n') % (ename.ljust(maxlength), text))
1444
1444
1445 if not ui.quiet:
1445 if not ui.quiet:
1446 addglobalopts(True)
1446 addglobalopts(True)
1447
1447
1448 def helptopic(name):
1448 def helptopic(name):
1449 for names, header, doc in help.helptable:
1449 for names, header, doc in help.helptable:
1450 if name in names:
1450 if name in names:
1451 break
1451 break
1452 else:
1452 else:
1453 raise error.UnknownCommand(name)
1453 raise error.UnknownCommand(name)
1454
1454
1455 # description
1455 # description
1456 if not doc:
1456 if not doc:
1457 doc = _("(no help text available)")
1457 doc = _("(no help text available)")
1458 if callable(doc):
1458 if callable(doc):
1459 doc = doc()
1459 doc = doc()
1460
1460
1461 ui.write("%s\n" % header)
1461 ui.write("%s\n" % header)
1462 ui.write("%s\n" % doc.rstrip())
1462 ui.write("%s\n" % doc.rstrip())
1463
1463
1464 def helpext(name):
1464 def helpext(name):
1465 try:
1465 try:
1466 mod = extensions.find(name)
1466 mod = extensions.find(name)
1467 except KeyError:
1467 except KeyError:
1468 raise error.UnknownCommand(name)
1468 raise error.UnknownCommand(name)
1469
1469
1470 doc = gettext(mod.__doc__) or _('no help text available')
1470 doc = gettext(mod.__doc__) or _('no help text available')
1471 doc = doc.splitlines(0)
1471 doc = doc.splitlines(0)
1472 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1472 ui.write(_('%s extension - %s\n') % (name.split('.')[-1], doc[0]))
1473 for d in doc[1:]:
1473 for d in doc[1:]:
1474 ui.write(d, '\n')
1474 ui.write(d, '\n')
1475
1475
1476 ui.status('\n')
1476 ui.status('\n')
1477
1477
1478 try:
1478 try:
1479 ct = mod.cmdtable
1479 ct = mod.cmdtable
1480 except AttributeError:
1480 except AttributeError:
1481 ct = {}
1481 ct = {}
1482
1482
1483 modcmds = dict.fromkeys([c.split('|', 1)[0] for c in ct])
1483 modcmds = set([c.split('|', 1)[0] for c in ct])
1484 helplist(_('list of commands:\n\n'), modcmds.has_key)
1484 helplist(_('list of commands:\n\n'), modcmds.__contains__)
1485
1485
1486 if name and name != 'shortlist':
1486 if name and name != 'shortlist':
1487 i = None
1487 i = None
1488 for f in (helptopic, helpcmd, helpext):
1488 for f in (helptopic, helpcmd, helpext):
1489 try:
1489 try:
1490 f(name)
1490 f(name)
1491 i = None
1491 i = None
1492 break
1492 break
1493 except error.UnknownCommand, inst:
1493 except error.UnknownCommand, inst:
1494 i = inst
1494 i = inst
1495 if i:
1495 if i:
1496 raise i
1496 raise i
1497
1497
1498 else:
1498 else:
1499 # program name
1499 # program name
1500 if ui.verbose or with_version:
1500 if ui.verbose or with_version:
1501 version_(ui)
1501 version_(ui)
1502 else:
1502 else:
1503 ui.status(_("Mercurial Distributed SCM\n"))
1503 ui.status(_("Mercurial Distributed SCM\n"))
1504 ui.status('\n')
1504 ui.status('\n')
1505
1505
1506 # list of commands
1506 # list of commands
1507 if name == "shortlist":
1507 if name == "shortlist":
1508 header = _('basic commands:\n\n')
1508 header = _('basic commands:\n\n')
1509 else:
1509 else:
1510 header = _('list of commands:\n\n')
1510 header = _('list of commands:\n\n')
1511
1511
1512 helplist(header)
1512 helplist(header)
1513
1513
1514 # list all option lists
1514 # list all option lists
1515 opt_output = []
1515 opt_output = []
1516 for title, options in option_lists:
1516 for title, options in option_lists:
1517 opt_output.append(("\n%s" % title, None))
1517 opt_output.append(("\n%s" % title, None))
1518 for shortopt, longopt, default, desc in options:
1518 for shortopt, longopt, default, desc in options:
1519 if "DEPRECATED" in desc and not ui.verbose: continue
1519 if "DEPRECATED" in desc and not ui.verbose: continue
1520 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1520 opt_output.append(("%2s%s" % (shortopt and "-%s" % shortopt,
1521 longopt and " --%s" % longopt),
1521 longopt and " --%s" % longopt),
1522 "%s%s" % (desc,
1522 "%s%s" % (desc,
1523 default
1523 default
1524 and _(" (default: %s)") % default
1524 and _(" (default: %s)") % default
1525 or "")))
1525 or "")))
1526
1526
1527 if not name:
1527 if not name:
1528 ui.write(_("\nadditional help topics:\n\n"))
1528 ui.write(_("\nadditional help topics:\n\n"))
1529 topics = []
1529 topics = []
1530 for names, header, doc in help.helptable:
1530 for names, header, doc in help.helptable:
1531 names = [(-len(name), name) for name in names]
1531 names = [(-len(name), name) for name in names]
1532 names.sort()
1532 names.sort()
1533 topics.append((names[0][1], header))
1533 topics.append((names[0][1], header))
1534 topics_len = max([len(s[0]) for s in topics])
1534 topics_len = max([len(s[0]) for s in topics])
1535 for t, desc in topics:
1535 for t, desc in topics:
1536 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1536 ui.write(" %-*s %s\n" % (topics_len, t, desc))
1537
1537
1538 if opt_output:
1538 if opt_output:
1539 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1539 opts_len = max([len(line[0]) for line in opt_output if line[1]] or [0])
1540 for first, second in opt_output:
1540 for first, second in opt_output:
1541 if second:
1541 if second:
1542 # wrap descriptions at 70 characters, just like the
1542 # wrap descriptions at 70 characters, just like the
1543 # main help texts
1543 # main help texts
1544 second = textwrap.wrap(second, width=70 - opts_len - 3)
1544 second = textwrap.wrap(second, width=70 - opts_len - 3)
1545 pad = '\n' + ' ' * (opts_len + 3)
1545 pad = '\n' + ' ' * (opts_len + 3)
1546 ui.write(" %-*s %s\n" % (opts_len, first, pad.join(second)))
1546 ui.write(" %-*s %s\n" % (opts_len, first, pad.join(second)))
1547 else:
1547 else:
1548 ui.write("%s\n" % first)
1548 ui.write("%s\n" % first)
1549
1549
1550 def identify(ui, repo, source=None,
1550 def identify(ui, repo, source=None,
1551 rev=None, num=None, id=None, branch=None, tags=None):
1551 rev=None, num=None, id=None, branch=None, tags=None):
1552 """identify the working copy or specified revision
1552 """identify the working copy or specified revision
1553
1553
1554 With no revision, print a summary of the current state of the
1554 With no revision, print a summary of the current state of the
1555 repository.
1555 repository.
1556
1556
1557 With a path, do a lookup in another repository.
1557 With a path, do a lookup in another repository.
1558
1558
1559 This summary identifies the repository state using one or two
1559 This summary identifies the repository state using one or two
1560 parent hash identifiers, followed by a "+" if there are
1560 parent hash identifiers, followed by a "+" if there are
1561 uncommitted changes in the working directory, a list of tags for
1561 uncommitted changes in the working directory, a list of tags for
1562 this revision and a branch name for non-default branches.
1562 this revision and a branch name for non-default branches.
1563 """
1563 """
1564
1564
1565 if not repo and not source:
1565 if not repo and not source:
1566 raise util.Abort(_("There is no Mercurial repository here "
1566 raise util.Abort(_("There is no Mercurial repository here "
1567 "(.hg not found)"))
1567 "(.hg not found)"))
1568
1568
1569 hexfunc = ui.debugflag and hex or short
1569 hexfunc = ui.debugflag and hex or short
1570 default = not (num or id or branch or tags)
1570 default = not (num or id or branch or tags)
1571 output = []
1571 output = []
1572
1572
1573 revs = []
1573 revs = []
1574 if source:
1574 if source:
1575 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1575 source, revs, checkout = hg.parseurl(ui.expandpath(source), [])
1576 repo = hg.repository(ui, source)
1576 repo = hg.repository(ui, source)
1577
1577
1578 if not repo.local():
1578 if not repo.local():
1579 if not rev and revs:
1579 if not rev and revs:
1580 rev = revs[0]
1580 rev = revs[0]
1581 if not rev:
1581 if not rev:
1582 rev = "tip"
1582 rev = "tip"
1583 if num or branch or tags:
1583 if num or branch or tags:
1584 raise util.Abort(
1584 raise util.Abort(
1585 "can't query remote revision number, branch, or tags")
1585 "can't query remote revision number, branch, or tags")
1586 output = [hexfunc(repo.lookup(rev))]
1586 output = [hexfunc(repo.lookup(rev))]
1587 elif not rev:
1587 elif not rev:
1588 ctx = repo[None]
1588 ctx = repo[None]
1589 parents = ctx.parents()
1589 parents = ctx.parents()
1590 changed = False
1590 changed = False
1591 if default or id or num:
1591 if default or id or num:
1592 changed = ctx.files() + ctx.deleted()
1592 changed = ctx.files() + ctx.deleted()
1593 if default or id:
1593 if default or id:
1594 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1594 output = ["%s%s" % ('+'.join([hexfunc(p.node()) for p in parents]),
1595 (changed) and "+" or "")]
1595 (changed) and "+" or "")]
1596 if num:
1596 if num:
1597 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1597 output.append("%s%s" % ('+'.join([str(p.rev()) for p in parents]),
1598 (changed) and "+" or ""))
1598 (changed) and "+" or ""))
1599 else:
1599 else:
1600 ctx = repo[rev]
1600 ctx = repo[rev]
1601 if default or id:
1601 if default or id:
1602 output = [hexfunc(ctx.node())]
1602 output = [hexfunc(ctx.node())]
1603 if num:
1603 if num:
1604 output.append(str(ctx.rev()))
1604 output.append(str(ctx.rev()))
1605
1605
1606 if repo.local() and default and not ui.quiet:
1606 if repo.local() and default and not ui.quiet:
1607 b = encoding.tolocal(ctx.branch())
1607 b = encoding.tolocal(ctx.branch())
1608 if b != 'default':
1608 if b != 'default':
1609 output.append("(%s)" % b)
1609 output.append("(%s)" % b)
1610
1610
1611 # multiple tags for a single parent separated by '/'
1611 # multiple tags for a single parent separated by '/'
1612 t = "/".join(ctx.tags())
1612 t = "/".join(ctx.tags())
1613 if t:
1613 if t:
1614 output.append(t)
1614 output.append(t)
1615
1615
1616 if branch:
1616 if branch:
1617 output.append(encoding.tolocal(ctx.branch()))
1617 output.append(encoding.tolocal(ctx.branch()))
1618
1618
1619 if tags:
1619 if tags:
1620 output.extend(ctx.tags())
1620 output.extend(ctx.tags())
1621
1621
1622 ui.write("%s\n" % ' '.join(output))
1622 ui.write("%s\n" % ' '.join(output))
1623
1623
1624 def import_(ui, repo, patch1, *patches, **opts):
1624 def import_(ui, repo, patch1, *patches, **opts):
1625 """import an ordered set of patches
1625 """import an ordered set of patches
1626
1626
1627 Import a list of patches and commit them individually.
1627 Import a list of patches and commit them individually.
1628
1628
1629 If there are outstanding changes in the working directory, import
1629 If there are outstanding changes in the working directory, import
1630 will abort unless given the -f/--force flag.
1630 will abort unless given the -f/--force flag.
1631
1631
1632 You can import a patch straight from a mail message. Even patches
1632 You can import a patch straight from a mail message. Even patches
1633 as attachments work (body part must be type text/plain or
1633 as attachments work (body part must be type text/plain or
1634 text/x-patch to be used). From and Subject headers of email
1634 text/x-patch to be used). From and Subject headers of email
1635 message are used as default committer and commit message. All
1635 message are used as default committer and commit message. All
1636 text/plain body parts before first diff are added to commit
1636 text/plain body parts before first diff are added to commit
1637 message.
1637 message.
1638
1638
1639 If the imported patch was generated by hg export, user and
1639 If the imported patch was generated by hg export, user and
1640 description from patch override values from message headers and
1640 description from patch override values from message headers and
1641 body. Values given on command line with -m/--message and -u/--user
1641 body. Values given on command line with -m/--message and -u/--user
1642 override these.
1642 override these.
1643
1643
1644 If --exact is specified, import will set the working directory to
1644 If --exact is specified, import will set the working directory to
1645 the parent of each patch before applying it, and will abort if the
1645 the parent of each patch before applying it, and will abort if the
1646 resulting changeset has a different ID than the one recorded in
1646 resulting changeset has a different ID than the one recorded in
1647 the patch. This may happen due to character set problems or other
1647 the patch. This may happen due to character set problems or other
1648 deficiencies in the text patch format.
1648 deficiencies in the text patch format.
1649
1649
1650 With -s/--similarity, hg will attempt to discover renames and
1650 With -s/--similarity, hg will attempt to discover renames and
1651 copies in the patch in the same way as 'addremove'.
1651 copies in the patch in the same way as 'addremove'.
1652
1652
1653 To read a patch from standard input, use patch name "-". See 'hg
1653 To read a patch from standard input, use patch name "-". See 'hg
1654 help dates' for a list of formats valid for -d/--date.
1654 help dates' for a list of formats valid for -d/--date.
1655 """
1655 """
1656 patches = (patch1,) + patches
1656 patches = (patch1,) + patches
1657
1657
1658 date = opts.get('date')
1658 date = opts.get('date')
1659 if date:
1659 if date:
1660 opts['date'] = util.parsedate(date)
1660 opts['date'] = util.parsedate(date)
1661
1661
1662 try:
1662 try:
1663 sim = float(opts.get('similarity') or 0)
1663 sim = float(opts.get('similarity') or 0)
1664 except ValueError:
1664 except ValueError:
1665 raise util.Abort(_('similarity must be a number'))
1665 raise util.Abort(_('similarity must be a number'))
1666 if sim < 0 or sim > 100:
1666 if sim < 0 or sim > 100:
1667 raise util.Abort(_('similarity must be between 0 and 100'))
1667 raise util.Abort(_('similarity must be between 0 and 100'))
1668
1668
1669 if opts.get('exact') or not opts.get('force'):
1669 if opts.get('exact') or not opts.get('force'):
1670 cmdutil.bail_if_changed(repo)
1670 cmdutil.bail_if_changed(repo)
1671
1671
1672 d = opts["base"]
1672 d = opts["base"]
1673 strip = opts["strip"]
1673 strip = opts["strip"]
1674 wlock = lock = None
1674 wlock = lock = None
1675 try:
1675 try:
1676 wlock = repo.wlock()
1676 wlock = repo.wlock()
1677 lock = repo.lock()
1677 lock = repo.lock()
1678 for p in patches:
1678 for p in patches:
1679 pf = os.path.join(d, p)
1679 pf = os.path.join(d, p)
1680
1680
1681 if pf == '-':
1681 if pf == '-':
1682 ui.status(_("applying patch from stdin\n"))
1682 ui.status(_("applying patch from stdin\n"))
1683 pf = sys.stdin
1683 pf = sys.stdin
1684 else:
1684 else:
1685 ui.status(_("applying %s\n") % p)
1685 ui.status(_("applying %s\n") % p)
1686 pf = url.open(ui, pf)
1686 pf = url.open(ui, pf)
1687 data = patch.extract(ui, pf)
1687 data = patch.extract(ui, pf)
1688 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1688 tmpname, message, user, date, branch, nodeid, p1, p2 = data
1689
1689
1690 if tmpname is None:
1690 if tmpname is None:
1691 raise util.Abort(_('no diffs found'))
1691 raise util.Abort(_('no diffs found'))
1692
1692
1693 try:
1693 try:
1694 cmdline_message = cmdutil.logmessage(opts)
1694 cmdline_message = cmdutil.logmessage(opts)
1695 if cmdline_message:
1695 if cmdline_message:
1696 # pickup the cmdline msg
1696 # pickup the cmdline msg
1697 message = cmdline_message
1697 message = cmdline_message
1698 elif message:
1698 elif message:
1699 # pickup the patch msg
1699 # pickup the patch msg
1700 message = message.strip()
1700 message = message.strip()
1701 else:
1701 else:
1702 # launch the editor
1702 # launch the editor
1703 message = None
1703 message = None
1704 ui.debug(_('message:\n%s\n') % message)
1704 ui.debug(_('message:\n%s\n') % message)
1705
1705
1706 wp = repo.parents()
1706 wp = repo.parents()
1707 if opts.get('exact'):
1707 if opts.get('exact'):
1708 if not nodeid or not p1:
1708 if not nodeid or not p1:
1709 raise util.Abort(_('not a mercurial patch'))
1709 raise util.Abort(_('not a mercurial patch'))
1710 p1 = repo.lookup(p1)
1710 p1 = repo.lookup(p1)
1711 p2 = repo.lookup(p2 or hex(nullid))
1711 p2 = repo.lookup(p2 or hex(nullid))
1712
1712
1713 if p1 != wp[0].node():
1713 if p1 != wp[0].node():
1714 hg.clean(repo, p1)
1714 hg.clean(repo, p1)
1715 repo.dirstate.setparents(p1, p2)
1715 repo.dirstate.setparents(p1, p2)
1716 elif p2:
1716 elif p2:
1717 try:
1717 try:
1718 p1 = repo.lookup(p1)
1718 p1 = repo.lookup(p1)
1719 p2 = repo.lookup(p2)
1719 p2 = repo.lookup(p2)
1720 if p1 == wp[0].node():
1720 if p1 == wp[0].node():
1721 repo.dirstate.setparents(p1, p2)
1721 repo.dirstate.setparents(p1, p2)
1722 except error.RepoError:
1722 except error.RepoError:
1723 pass
1723 pass
1724 if opts.get('exact') or opts.get('import_branch'):
1724 if opts.get('exact') or opts.get('import_branch'):
1725 repo.dirstate.setbranch(branch or 'default')
1725 repo.dirstate.setbranch(branch or 'default')
1726
1726
1727 files = {}
1727 files = {}
1728 try:
1728 try:
1729 patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1729 patch.patch(tmpname, ui, strip=strip, cwd=repo.root,
1730 files=files)
1730 files=files)
1731 finally:
1731 finally:
1732 files = patch.updatedir(ui, repo, files, similarity=sim/100.)
1732 files = patch.updatedir(ui, repo, files, similarity=sim/100.)
1733 if not opts.get('no_commit'):
1733 if not opts.get('no_commit'):
1734 n = repo.commit(files, message, opts.get('user') or user,
1734 n = repo.commit(files, message, opts.get('user') or user,
1735 opts.get('date') or date)
1735 opts.get('date') or date)
1736 if opts.get('exact'):
1736 if opts.get('exact'):
1737 if hex(n) != nodeid:
1737 if hex(n) != nodeid:
1738 repo.rollback()
1738 repo.rollback()
1739 raise util.Abort(_('patch is damaged'
1739 raise util.Abort(_('patch is damaged'
1740 ' or loses information'))
1740 ' or loses information'))
1741 # Force a dirstate write so that the next transaction
1741 # Force a dirstate write so that the next transaction
1742 # backups an up-do-date file.
1742 # backups an up-do-date file.
1743 repo.dirstate.write()
1743 repo.dirstate.write()
1744 finally:
1744 finally:
1745 os.unlink(tmpname)
1745 os.unlink(tmpname)
1746 finally:
1746 finally:
1747 release(lock, wlock)
1747 release(lock, wlock)
1748
1748
1749 def incoming(ui, repo, source="default", **opts):
1749 def incoming(ui, repo, source="default", **opts):
1750 """show new changesets found in source
1750 """show new changesets found in source
1751
1751
1752 Show new changesets found in the specified path/URL or the default
1752 Show new changesets found in the specified path/URL or the default
1753 pull location. These are the changesets that would be pulled if a
1753 pull location. These are the changesets that would be pulled if a
1754 pull was requested.
1754 pull was requested.
1755
1755
1756 For remote repository, using --bundle avoids downloading the
1756 For remote repository, using --bundle avoids downloading the
1757 changesets twice if the incoming is followed by a pull.
1757 changesets twice if the incoming is followed by a pull.
1758
1758
1759 See pull for valid source format details.
1759 See pull for valid source format details.
1760 """
1760 """
1761 limit = cmdutil.loglimit(opts)
1761 limit = cmdutil.loglimit(opts)
1762 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1762 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
1763 cmdutil.setremoteconfig(ui, opts)
1763 cmdutil.setremoteconfig(ui, opts)
1764
1764
1765 other = hg.repository(ui, source)
1765 other = hg.repository(ui, source)
1766 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1766 ui.status(_('comparing with %s\n') % url.hidepassword(source))
1767 if revs:
1767 if revs:
1768 revs = [other.lookup(rev) for rev in revs]
1768 revs = [other.lookup(rev) for rev in revs]
1769 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
1769 common, incoming, rheads = repo.findcommonincoming(other, heads=revs,
1770 force=opts["force"])
1770 force=opts["force"])
1771 if not incoming:
1771 if not incoming:
1772 try:
1772 try:
1773 os.unlink(opts["bundle"])
1773 os.unlink(opts["bundle"])
1774 except:
1774 except:
1775 pass
1775 pass
1776 ui.status(_("no changes found\n"))
1776 ui.status(_("no changes found\n"))
1777 return 1
1777 return 1
1778
1778
1779 cleanup = None
1779 cleanup = None
1780 try:
1780 try:
1781 fname = opts["bundle"]
1781 fname = opts["bundle"]
1782 if fname or not other.local():
1782 if fname or not other.local():
1783 # create a bundle (uncompressed if other repo is not local)
1783 # create a bundle (uncompressed if other repo is not local)
1784
1784
1785 if revs is None and other.capable('changegroupsubset'):
1785 if revs is None and other.capable('changegroupsubset'):
1786 revs = rheads
1786 revs = rheads
1787
1787
1788 if revs is None:
1788 if revs is None:
1789 cg = other.changegroup(incoming, "incoming")
1789 cg = other.changegroup(incoming, "incoming")
1790 else:
1790 else:
1791 cg = other.changegroupsubset(incoming, revs, 'incoming')
1791 cg = other.changegroupsubset(incoming, revs, 'incoming')
1792 bundletype = other.local() and "HG10BZ" or "HG10UN"
1792 bundletype = other.local() and "HG10BZ" or "HG10UN"
1793 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1793 fname = cleanup = changegroup.writebundle(cg, fname, bundletype)
1794 # keep written bundle?
1794 # keep written bundle?
1795 if opts["bundle"]:
1795 if opts["bundle"]:
1796 cleanup = None
1796 cleanup = None
1797 if not other.local():
1797 if not other.local():
1798 # use the created uncompressed bundlerepo
1798 # use the created uncompressed bundlerepo
1799 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1799 other = bundlerepo.bundlerepository(ui, repo.root, fname)
1800
1800
1801 o = other.changelog.nodesbetween(incoming, revs)[0]
1801 o = other.changelog.nodesbetween(incoming, revs)[0]
1802 if opts.get('newest_first'):
1802 if opts.get('newest_first'):
1803 o.reverse()
1803 o.reverse()
1804 displayer = cmdutil.show_changeset(ui, other, opts)
1804 displayer = cmdutil.show_changeset(ui, other, opts)
1805 count = 0
1805 count = 0
1806 for n in o:
1806 for n in o:
1807 if count >= limit:
1807 if count >= limit:
1808 break
1808 break
1809 parents = [p for p in other.changelog.parents(n) if p != nullid]
1809 parents = [p for p in other.changelog.parents(n) if p != nullid]
1810 if opts.get('no_merges') and len(parents) == 2:
1810 if opts.get('no_merges') and len(parents) == 2:
1811 continue
1811 continue
1812 count += 1
1812 count += 1
1813 displayer.show(other[n])
1813 displayer.show(other[n])
1814 finally:
1814 finally:
1815 if hasattr(other, 'close'):
1815 if hasattr(other, 'close'):
1816 other.close()
1816 other.close()
1817 if cleanup:
1817 if cleanup:
1818 os.unlink(cleanup)
1818 os.unlink(cleanup)
1819
1819
1820 def init(ui, dest=".", **opts):
1820 def init(ui, dest=".", **opts):
1821 """create a new repository in the given directory
1821 """create a new repository in the given directory
1822
1822
1823 Initialize a new repository in the given directory. If the given
1823 Initialize a new repository in the given directory. If the given
1824 directory does not exist, it is created.
1824 directory does not exist, it is created.
1825
1825
1826 If no directory is given, the current directory is used.
1826 If no directory is given, the current directory is used.
1827
1827
1828 It is possible to specify an ssh:// URL as the destination.
1828 It is possible to specify an ssh:// URL as the destination.
1829 See 'hg help urls' for more information.
1829 See 'hg help urls' for more information.
1830 """
1830 """
1831 cmdutil.setremoteconfig(ui, opts)
1831 cmdutil.setremoteconfig(ui, opts)
1832 hg.repository(ui, dest, create=1)
1832 hg.repository(ui, dest, create=1)
1833
1833
1834 def locate(ui, repo, *pats, **opts):
1834 def locate(ui, repo, *pats, **opts):
1835 """locate files matching specific patterns
1835 """locate files matching specific patterns
1836
1836
1837 Print all files under Mercurial control whose names match the
1837 Print all files under Mercurial control whose names match the
1838 given patterns.
1838 given patterns.
1839
1839
1840 This command searches the entire repository by default. To search
1840 This command searches the entire repository by default. To search
1841 just the current directory and its subdirectories, use
1841 just the current directory and its subdirectories, use
1842 "--include .".
1842 "--include .".
1843
1843
1844 If no patterns are given to match, this command prints all file
1844 If no patterns are given to match, this command prints all file
1845 names.
1845 names.
1846
1846
1847 If you want to feed the output of this command into the "xargs"
1847 If you want to feed the output of this command into the "xargs"
1848 command, use the -0 option to both this command and "xargs". This
1848 command, use the -0 option to both this command and "xargs". This
1849 will avoid the problem of "xargs" treating single filenames that
1849 will avoid the problem of "xargs" treating single filenames that
1850 contain white space as multiple filenames.
1850 contain white space as multiple filenames.
1851 """
1851 """
1852 end = opts.get('print0') and '\0' or '\n'
1852 end = opts.get('print0') and '\0' or '\n'
1853 rev = opts.get('rev') or None
1853 rev = opts.get('rev') or None
1854
1854
1855 ret = 1
1855 ret = 1
1856 m = cmdutil.match(repo, pats, opts, default='relglob')
1856 m = cmdutil.match(repo, pats, opts, default='relglob')
1857 m.bad = lambda x,y: False
1857 m.bad = lambda x,y: False
1858 for abs in repo[rev].walk(m):
1858 for abs in repo[rev].walk(m):
1859 if not rev and abs not in repo.dirstate:
1859 if not rev and abs not in repo.dirstate:
1860 continue
1860 continue
1861 if opts.get('fullpath'):
1861 if opts.get('fullpath'):
1862 ui.write(repo.wjoin(abs), end)
1862 ui.write(repo.wjoin(abs), end)
1863 else:
1863 else:
1864 ui.write(((pats and m.rel(abs)) or abs), end)
1864 ui.write(((pats and m.rel(abs)) or abs), end)
1865 ret = 0
1865 ret = 0
1866
1866
1867 return ret
1867 return ret
1868
1868
1869 def log(ui, repo, *pats, **opts):
1869 def log(ui, repo, *pats, **opts):
1870 """show revision history of entire repository or files
1870 """show revision history of entire repository or files
1871
1871
1872 Print the revision history of the specified files or the entire
1872 Print the revision history of the specified files or the entire
1873 project.
1873 project.
1874
1874
1875 File history is shown without following rename or copy history of
1875 File history is shown without following rename or copy history of
1876 files. Use -f/--follow with a file name to follow history across
1876 files. Use -f/--follow with a file name to follow history across
1877 renames and copies. --follow without a file name will only show
1877 renames and copies. --follow without a file name will only show
1878 ancestors or descendants of the starting revision. --follow-first
1878 ancestors or descendants of the starting revision. --follow-first
1879 only follows the first parent of merge revisions.
1879 only follows the first parent of merge revisions.
1880
1880
1881 If no revision range is specified, the default is tip:0 unless
1881 If no revision range is specified, the default is tip:0 unless
1882 --follow is set, in which case the working directory parent is
1882 --follow is set, in which case the working directory parent is
1883 used as the starting revision.
1883 used as the starting revision.
1884
1884
1885 See 'hg help dates' for a list of formats valid for -d/--date.
1885 See 'hg help dates' for a list of formats valid for -d/--date.
1886
1886
1887 By default this command outputs: changeset id and hash, tags,
1887 By default this command outputs: changeset id and hash, tags,
1888 non-trivial parents, user, date and time, and a summary for each
1888 non-trivial parents, user, date and time, and a summary for each
1889 commit. When the -v/--verbose switch is used, the list of changed
1889 commit. When the -v/--verbose switch is used, the list of changed
1890 files and full commit message is shown.
1890 files and full commit message is shown.
1891
1891
1892 NOTE: log -p/--patch may generate unexpected diff output for merge
1892 NOTE: log -p/--patch may generate unexpected diff output for merge
1893 changesets, as it will only compare the merge changeset against
1893 changesets, as it will only compare the merge changeset against
1894 its first parent. Also, the files: list will only reflect files
1894 its first parent. Also, the files: list will only reflect files
1895 that are different from BOTH parents.
1895 that are different from BOTH parents.
1896
1896
1897 """
1897 """
1898
1898
1899 get = util.cachefunc(lambda r: repo[r].changeset())
1899 get = util.cachefunc(lambda r: repo[r].changeset())
1900 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1900 changeiter, matchfn = cmdutil.walkchangerevs(ui, repo, pats, get, opts)
1901
1901
1902 limit = cmdutil.loglimit(opts)
1902 limit = cmdutil.loglimit(opts)
1903 count = 0
1903 count = 0
1904
1904
1905 if opts.get('copies') and opts.get('rev'):
1905 if opts.get('copies') and opts.get('rev'):
1906 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1906 endrev = max(cmdutil.revrange(repo, opts.get('rev'))) + 1
1907 else:
1907 else:
1908 endrev = len(repo)
1908 endrev = len(repo)
1909 rcache = {}
1909 rcache = {}
1910 ncache = {}
1910 ncache = {}
1911 def getrenamed(fn, rev):
1911 def getrenamed(fn, rev):
1912 '''looks up all renames for a file (up to endrev) the first
1912 '''looks up all renames for a file (up to endrev) the first
1913 time the file is given. It indexes on the changerev and only
1913 time the file is given. It indexes on the changerev and only
1914 parses the manifest if linkrev != changerev.
1914 parses the manifest if linkrev != changerev.
1915 Returns rename info for fn at changerev rev.'''
1915 Returns rename info for fn at changerev rev.'''
1916 if fn not in rcache:
1916 if fn not in rcache:
1917 rcache[fn] = {}
1917 rcache[fn] = {}
1918 ncache[fn] = {}
1918 ncache[fn] = {}
1919 fl = repo.file(fn)
1919 fl = repo.file(fn)
1920 for i in fl:
1920 for i in fl:
1921 node = fl.node(i)
1921 node = fl.node(i)
1922 lr = fl.linkrev(i)
1922 lr = fl.linkrev(i)
1923 renamed = fl.renamed(node)
1923 renamed = fl.renamed(node)
1924 rcache[fn][lr] = renamed
1924 rcache[fn][lr] = renamed
1925 if renamed:
1925 if renamed:
1926 ncache[fn][node] = renamed
1926 ncache[fn][node] = renamed
1927 if lr >= endrev:
1927 if lr >= endrev:
1928 break
1928 break
1929 if rev in rcache[fn]:
1929 if rev in rcache[fn]:
1930 return rcache[fn][rev]
1930 return rcache[fn][rev]
1931
1931
1932 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1932 # If linkrev != rev (i.e. rev not found in rcache) fallback to
1933 # filectx logic.
1933 # filectx logic.
1934
1934
1935 try:
1935 try:
1936 return repo[rev][fn].renamed()
1936 return repo[rev][fn].renamed()
1937 except error.LookupError:
1937 except error.LookupError:
1938 pass
1938 pass
1939 return None
1939 return None
1940
1940
1941 df = False
1941 df = False
1942 if opts["date"]:
1942 if opts["date"]:
1943 df = util.matchdate(opts["date"])
1943 df = util.matchdate(opts["date"])
1944
1944
1945 only_branches = opts.get('only_branch')
1945 only_branches = opts.get('only_branch')
1946
1946
1947 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1947 displayer = cmdutil.show_changeset(ui, repo, opts, True, matchfn)
1948 for st, rev, fns in changeiter:
1948 for st, rev, fns in changeiter:
1949 if st == 'add':
1949 if st == 'add':
1950 parents = [p for p in repo.changelog.parentrevs(rev)
1950 parents = [p for p in repo.changelog.parentrevs(rev)
1951 if p != nullrev]
1951 if p != nullrev]
1952 if opts.get('no_merges') and len(parents) == 2:
1952 if opts.get('no_merges') and len(parents) == 2:
1953 continue
1953 continue
1954 if opts.get('only_merges') and len(parents) != 2:
1954 if opts.get('only_merges') and len(parents) != 2:
1955 continue
1955 continue
1956
1956
1957 if only_branches:
1957 if only_branches:
1958 revbranch = get(rev)[5]['branch']
1958 revbranch = get(rev)[5]['branch']
1959 if revbranch not in only_branches:
1959 if revbranch not in only_branches:
1960 continue
1960 continue
1961
1961
1962 if df:
1962 if df:
1963 changes = get(rev)
1963 changes = get(rev)
1964 if not df(changes[2][0]):
1964 if not df(changes[2][0]):
1965 continue
1965 continue
1966
1966
1967 if opts.get('keyword'):
1967 if opts.get('keyword'):
1968 changes = get(rev)
1968 changes = get(rev)
1969 miss = 0
1969 miss = 0
1970 for k in [kw.lower() for kw in opts['keyword']]:
1970 for k in [kw.lower() for kw in opts['keyword']]:
1971 if not (k in changes[1].lower() or
1971 if not (k in changes[1].lower() or
1972 k in changes[4].lower() or
1972 k in changes[4].lower() or
1973 k in " ".join(changes[3]).lower()):
1973 k in " ".join(changes[3]).lower()):
1974 miss = 1
1974 miss = 1
1975 break
1975 break
1976 if miss:
1976 if miss:
1977 continue
1977 continue
1978
1978
1979 if opts['user']:
1979 if opts['user']:
1980 changes = get(rev)
1980 changes = get(rev)
1981 if not [k for k in opts['user'] if k in changes[1]]:
1981 if not [k for k in opts['user'] if k in changes[1]]:
1982 continue
1982 continue
1983
1983
1984 copies = []
1984 copies = []
1985 if opts.get('copies') and rev:
1985 if opts.get('copies') and rev:
1986 for fn in get(rev)[3]:
1986 for fn in get(rev)[3]:
1987 rename = getrenamed(fn, rev)
1987 rename = getrenamed(fn, rev)
1988 if rename:
1988 if rename:
1989 copies.append((fn, rename[0]))
1989 copies.append((fn, rename[0]))
1990 displayer.show(context.changectx(repo, rev), copies=copies)
1990 displayer.show(context.changectx(repo, rev), copies=copies)
1991 elif st == 'iter':
1991 elif st == 'iter':
1992 if count == limit: break
1992 if count == limit: break
1993 if displayer.flush(rev):
1993 if displayer.flush(rev):
1994 count += 1
1994 count += 1
1995
1995
1996 def manifest(ui, repo, node=None, rev=None):
1996 def manifest(ui, repo, node=None, rev=None):
1997 """output the current or given revision of the project manifest
1997 """output the current or given revision of the project manifest
1998
1998
1999 Print a list of version controlled files for the given revision.
1999 Print a list of version controlled files for the given revision.
2000 If no revision is given, the first parent of the working directory
2000 If no revision is given, the first parent of the working directory
2001 is used, or the null revision if none is checked out.
2001 is used, or the null revision if none is checked out.
2002
2002
2003 With -v flag, print file permissions, symlink and executable bits.
2003 With -v flag, print file permissions, symlink and executable bits.
2004 With --debug flag, print file revision hashes.
2004 With --debug flag, print file revision hashes.
2005 """
2005 """
2006
2006
2007 if rev and node:
2007 if rev and node:
2008 raise util.Abort(_("please specify just one revision"))
2008 raise util.Abort(_("please specify just one revision"))
2009
2009
2010 if not node:
2010 if not node:
2011 node = rev
2011 node = rev
2012
2012
2013 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
2013 decor = {'l':'644 @ ', 'x':'755 * ', '':'644 '}
2014 ctx = repo[node]
2014 ctx = repo[node]
2015 for f in ctx:
2015 for f in ctx:
2016 if ui.debugflag:
2016 if ui.debugflag:
2017 ui.write("%40s " % hex(ctx.manifest()[f]))
2017 ui.write("%40s " % hex(ctx.manifest()[f]))
2018 if ui.verbose:
2018 if ui.verbose:
2019 ui.write(decor[ctx.flags(f)])
2019 ui.write(decor[ctx.flags(f)])
2020 ui.write("%s\n" % f)
2020 ui.write("%s\n" % f)
2021
2021
2022 def merge(ui, repo, node=None, force=None, rev=None):
2022 def merge(ui, repo, node=None, force=None, rev=None):
2023 """merge working directory with another revision
2023 """merge working directory with another revision
2024
2024
2025 The contents of the current working directory is updated with all
2025 The contents of the current working directory is updated with all
2026 changes made in the requested revision since the last common
2026 changes made in the requested revision since the last common
2027 predecessor revision.
2027 predecessor revision.
2028
2028
2029 Files that changed between either parent are marked as changed for
2029 Files that changed between either parent are marked as changed for
2030 the next commit and a commit must be performed before any further
2030 the next commit and a commit must be performed before any further
2031 updates are allowed. The next commit has two parents.
2031 updates are allowed. The next commit has two parents.
2032
2032
2033 If no revision is specified, the working directory's parent is a
2033 If no revision is specified, the working directory's parent is a
2034 head revision, and the current branch contains exactly one other
2034 head revision, and the current branch contains exactly one other
2035 head, the other head is merged with by default. Otherwise, an
2035 head, the other head is merged with by default. Otherwise, an
2036 explicit revision to merge with must be provided.
2036 explicit revision to merge with must be provided.
2037 """
2037 """
2038
2038
2039 if rev and node:
2039 if rev and node:
2040 raise util.Abort(_("please specify just one revision"))
2040 raise util.Abort(_("please specify just one revision"))
2041 if not node:
2041 if not node:
2042 node = rev
2042 node = rev
2043
2043
2044 if not node:
2044 if not node:
2045 branch = repo.changectx(None).branch()
2045 branch = repo.changectx(None).branch()
2046 bheads = repo.branchheads(branch)
2046 bheads = repo.branchheads(branch)
2047 if len(bheads) > 2:
2047 if len(bheads) > 2:
2048 raise util.Abort(_("branch '%s' has %d heads - "
2048 raise util.Abort(_("branch '%s' has %d heads - "
2049 "please merge with an explicit rev") %
2049 "please merge with an explicit rev") %
2050 (branch, len(bheads)))
2050 (branch, len(bheads)))
2051
2051
2052 parent = repo.dirstate.parents()[0]
2052 parent = repo.dirstate.parents()[0]
2053 if len(bheads) == 1:
2053 if len(bheads) == 1:
2054 if len(repo.heads()) > 1:
2054 if len(repo.heads()) > 1:
2055 raise util.Abort(_("branch '%s' has one head - "
2055 raise util.Abort(_("branch '%s' has one head - "
2056 "please merge with an explicit rev") %
2056 "please merge with an explicit rev") %
2057 branch)
2057 branch)
2058 msg = _('there is nothing to merge')
2058 msg = _('there is nothing to merge')
2059 if parent != repo.lookup(repo[None].branch()):
2059 if parent != repo.lookup(repo[None].branch()):
2060 msg = _('%s - use "hg update" instead') % msg
2060 msg = _('%s - use "hg update" instead') % msg
2061 raise util.Abort(msg)
2061 raise util.Abort(msg)
2062
2062
2063 if parent not in bheads:
2063 if parent not in bheads:
2064 raise util.Abort(_('working dir not at a head rev - '
2064 raise util.Abort(_('working dir not at a head rev - '
2065 'use "hg update" or merge with an explicit rev'))
2065 'use "hg update" or merge with an explicit rev'))
2066 node = parent == bheads[0] and bheads[-1] or bheads[0]
2066 node = parent == bheads[0] and bheads[-1] or bheads[0]
2067 return hg.merge(repo, node, force=force)
2067 return hg.merge(repo, node, force=force)
2068
2068
2069 def outgoing(ui, repo, dest=None, **opts):
2069 def outgoing(ui, repo, dest=None, **opts):
2070 """show changesets not found in destination
2070 """show changesets not found in destination
2071
2071
2072 Show changesets not found in the specified destination repository
2072 Show changesets not found in the specified destination repository
2073 or the default push location. These are the changesets that would
2073 or the default push location. These are the changesets that would
2074 be pushed if a push was requested.
2074 be pushed if a push was requested.
2075
2075
2076 See pull for valid destination format details.
2076 See pull for valid destination format details.
2077 """
2077 """
2078 limit = cmdutil.loglimit(opts)
2078 limit = cmdutil.loglimit(opts)
2079 dest, revs, checkout = hg.parseurl(
2079 dest, revs, checkout = hg.parseurl(
2080 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2080 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2081 cmdutil.setremoteconfig(ui, opts)
2081 cmdutil.setremoteconfig(ui, opts)
2082 if revs:
2082 if revs:
2083 revs = [repo.lookup(rev) for rev in revs]
2083 revs = [repo.lookup(rev) for rev in revs]
2084
2084
2085 other = hg.repository(ui, dest)
2085 other = hg.repository(ui, dest)
2086 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2086 ui.status(_('comparing with %s\n') % url.hidepassword(dest))
2087 o = repo.findoutgoing(other, force=opts.get('force'))
2087 o = repo.findoutgoing(other, force=opts.get('force'))
2088 if not o:
2088 if not o:
2089 ui.status(_("no changes found\n"))
2089 ui.status(_("no changes found\n"))
2090 return 1
2090 return 1
2091 o = repo.changelog.nodesbetween(o, revs)[0]
2091 o = repo.changelog.nodesbetween(o, revs)[0]
2092 if opts.get('newest_first'):
2092 if opts.get('newest_first'):
2093 o.reverse()
2093 o.reverse()
2094 displayer = cmdutil.show_changeset(ui, repo, opts)
2094 displayer = cmdutil.show_changeset(ui, repo, opts)
2095 count = 0
2095 count = 0
2096 for n in o:
2096 for n in o:
2097 if count >= limit:
2097 if count >= limit:
2098 break
2098 break
2099 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2099 parents = [p for p in repo.changelog.parents(n) if p != nullid]
2100 if opts.get('no_merges') and len(parents) == 2:
2100 if opts.get('no_merges') and len(parents) == 2:
2101 continue
2101 continue
2102 count += 1
2102 count += 1
2103 displayer.show(repo[n])
2103 displayer.show(repo[n])
2104
2104
2105 def parents(ui, repo, file_=None, **opts):
2105 def parents(ui, repo, file_=None, **opts):
2106 """show the parents of the working directory or revision
2106 """show the parents of the working directory or revision
2107
2107
2108 Print the working directory's parent revisions. If a revision is
2108 Print the working directory's parent revisions. If a revision is
2109 given via -r/--rev, the parent of that revision will be printed.
2109 given via -r/--rev, the parent of that revision will be printed.
2110 If a file argument is given, revision in which the file was last
2110 If a file argument is given, revision in which the file was last
2111 changed (before the working directory revision or the argument to
2111 changed (before the working directory revision or the argument to
2112 --rev if given) is printed.
2112 --rev if given) is printed.
2113 """
2113 """
2114 rev = opts.get('rev')
2114 rev = opts.get('rev')
2115 if rev:
2115 if rev:
2116 ctx = repo[rev]
2116 ctx = repo[rev]
2117 else:
2117 else:
2118 ctx = repo[None]
2118 ctx = repo[None]
2119
2119
2120 if file_:
2120 if file_:
2121 m = cmdutil.match(repo, (file_,), opts)
2121 m = cmdutil.match(repo, (file_,), opts)
2122 if m.anypats() or len(m.files()) != 1:
2122 if m.anypats() or len(m.files()) != 1:
2123 raise util.Abort(_('can only specify an explicit file name'))
2123 raise util.Abort(_('can only specify an explicit file name'))
2124 file_ = m.files()[0]
2124 file_ = m.files()[0]
2125 filenodes = []
2125 filenodes = []
2126 for cp in ctx.parents():
2126 for cp in ctx.parents():
2127 if not cp:
2127 if not cp:
2128 continue
2128 continue
2129 try:
2129 try:
2130 filenodes.append(cp.filenode(file_))
2130 filenodes.append(cp.filenode(file_))
2131 except error.LookupError:
2131 except error.LookupError:
2132 pass
2132 pass
2133 if not filenodes:
2133 if not filenodes:
2134 raise util.Abort(_("'%s' not found in manifest!") % file_)
2134 raise util.Abort(_("'%s' not found in manifest!") % file_)
2135 fl = repo.file(file_)
2135 fl = repo.file(file_)
2136 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2136 p = [repo.lookup(fl.linkrev(fl.rev(fn))) for fn in filenodes]
2137 else:
2137 else:
2138 p = [cp.node() for cp in ctx.parents()]
2138 p = [cp.node() for cp in ctx.parents()]
2139
2139
2140 displayer = cmdutil.show_changeset(ui, repo, opts)
2140 displayer = cmdutil.show_changeset(ui, repo, opts)
2141 for n in p:
2141 for n in p:
2142 if n != nullid:
2142 if n != nullid:
2143 displayer.show(repo[n])
2143 displayer.show(repo[n])
2144
2144
2145 def paths(ui, repo, search=None):
2145 def paths(ui, repo, search=None):
2146 """show aliases for remote repositories
2146 """show aliases for remote repositories
2147
2147
2148 Show definition of symbolic path name NAME. If no name is given,
2148 Show definition of symbolic path name NAME. If no name is given,
2149 show definition of available names.
2149 show definition of available names.
2150
2150
2151 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2151 Path names are defined in the [paths] section of /etc/mercurial/hgrc
2152 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2152 and $HOME/.hgrc. If run inside a repository, .hg/hgrc is used, too.
2153
2153
2154 See 'hg help urls' for more information.
2154 See 'hg help urls' for more information.
2155 """
2155 """
2156 if search:
2156 if search:
2157 for name, path in ui.configitems("paths"):
2157 for name, path in ui.configitems("paths"):
2158 if name == search:
2158 if name == search:
2159 ui.write("%s\n" % url.hidepassword(path))
2159 ui.write("%s\n" % url.hidepassword(path))
2160 return
2160 return
2161 ui.warn(_("not found!\n"))
2161 ui.warn(_("not found!\n"))
2162 return 1
2162 return 1
2163 else:
2163 else:
2164 for name, path in ui.configitems("paths"):
2164 for name, path in ui.configitems("paths"):
2165 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2165 ui.write("%s = %s\n" % (name, url.hidepassword(path)))
2166
2166
2167 def postincoming(ui, repo, modheads, optupdate, checkout):
2167 def postincoming(ui, repo, modheads, optupdate, checkout):
2168 if modheads == 0:
2168 if modheads == 0:
2169 return
2169 return
2170 if optupdate:
2170 if optupdate:
2171 if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
2171 if (modheads <= 1 or len(repo.branchheads()) == 1) or checkout:
2172 return hg.update(repo, checkout)
2172 return hg.update(repo, checkout)
2173 else:
2173 else:
2174 ui.status(_("not updating, since new heads added\n"))
2174 ui.status(_("not updating, since new heads added\n"))
2175 if modheads > 1:
2175 if modheads > 1:
2176 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2176 ui.status(_("(run 'hg heads' to see heads, 'hg merge' to merge)\n"))
2177 else:
2177 else:
2178 ui.status(_("(run 'hg update' to get a working copy)\n"))
2178 ui.status(_("(run 'hg update' to get a working copy)\n"))
2179
2179
2180 def pull(ui, repo, source="default", **opts):
2180 def pull(ui, repo, source="default", **opts):
2181 """pull changes from the specified source
2181 """pull changes from the specified source
2182
2182
2183 Pull changes from a remote repository to the local one.
2183 Pull changes from a remote repository to the local one.
2184
2184
2185 This finds all changes from the repository at the specified path
2185 This finds all changes from the repository at the specified path
2186 or URL and adds them to the local repository. By default, this
2186 or URL and adds them to the local repository. By default, this
2187 does not update the copy of the project in the working directory.
2187 does not update the copy of the project in the working directory.
2188
2188
2189 Use hg incoming if you want to see what will be added by the next
2189 Use hg incoming if you want to see what will be added by the next
2190 pull without actually adding the changes to the repository.
2190 pull without actually adding the changes to the repository.
2191
2191
2192 If SOURCE is omitted, the 'default' path will be used.
2192 If SOURCE is omitted, the 'default' path will be used.
2193 See 'hg help urls' for more information.
2193 See 'hg help urls' for more information.
2194 """
2194 """
2195 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2195 source, revs, checkout = hg.parseurl(ui.expandpath(source), opts.get('rev'))
2196 cmdutil.setremoteconfig(ui, opts)
2196 cmdutil.setremoteconfig(ui, opts)
2197
2197
2198 other = hg.repository(ui, source)
2198 other = hg.repository(ui, source)
2199 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2199 ui.status(_('pulling from %s\n') % url.hidepassword(source))
2200 if revs:
2200 if revs:
2201 try:
2201 try:
2202 revs = [other.lookup(rev) for rev in revs]
2202 revs = [other.lookup(rev) for rev in revs]
2203 except error.CapabilityError:
2203 except error.CapabilityError:
2204 err = _("Other repository doesn't support revision lookup, "
2204 err = _("Other repository doesn't support revision lookup, "
2205 "so a rev cannot be specified.")
2205 "so a rev cannot be specified.")
2206 raise util.Abort(err)
2206 raise util.Abort(err)
2207
2207
2208 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2208 modheads = repo.pull(other, heads=revs, force=opts.get('force'))
2209 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2209 return postincoming(ui, repo, modheads, opts.get('update'), checkout)
2210
2210
2211 def push(ui, repo, dest=None, **opts):
2211 def push(ui, repo, dest=None, **opts):
2212 """push changes to the specified destination
2212 """push changes to the specified destination
2213
2213
2214 Push changes from the local repository to the given destination.
2214 Push changes from the local repository to the given destination.
2215
2215
2216 This is the symmetrical operation for pull. It moves changes from
2216 This is the symmetrical operation for pull. It moves changes from
2217 the current repository to a different one. If the destination is
2217 the current repository to a different one. If the destination is
2218 local this is identical to a pull in that directory from the
2218 local this is identical to a pull in that directory from the
2219 current one.
2219 current one.
2220
2220
2221 By default, push will refuse to run if it detects the result would
2221 By default, push will refuse to run if it detects the result would
2222 increase the number of remote heads. This generally indicates the
2222 increase the number of remote heads. This generally indicates the
2223 the client has forgotten to pull and merge before pushing.
2223 the client has forgotten to pull and merge before pushing.
2224
2224
2225 If -r/--rev is used, the named revision and all its ancestors will
2225 If -r/--rev is used, the named revision and all its ancestors will
2226 be pushed to the remote repository.
2226 be pushed to the remote repository.
2227
2227
2228 Look at the help text for URLs for important details about ssh://
2228 Look at the help text for URLs for important details about ssh://
2229 URLs. If DESTINATION is omitted, a default path will be used.
2229 URLs. If DESTINATION is omitted, a default path will be used.
2230 See 'hg help urls' for more information.
2230 See 'hg help urls' for more information.
2231 """
2231 """
2232 dest, revs, checkout = hg.parseurl(
2232 dest, revs, checkout = hg.parseurl(
2233 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2233 ui.expandpath(dest or 'default-push', dest or 'default'), opts.get('rev'))
2234 cmdutil.setremoteconfig(ui, opts)
2234 cmdutil.setremoteconfig(ui, opts)
2235
2235
2236 other = hg.repository(ui, dest)
2236 other = hg.repository(ui, dest)
2237 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2237 ui.status(_('pushing to %s\n') % url.hidepassword(dest))
2238 if revs:
2238 if revs:
2239 revs = [repo.lookup(rev) for rev in revs]
2239 revs = [repo.lookup(rev) for rev in revs]
2240 r = repo.push(other, opts.get('force'), revs=revs)
2240 r = repo.push(other, opts.get('force'), revs=revs)
2241 return r == 0
2241 return r == 0
2242
2242
2243 def rawcommit(ui, repo, *pats, **opts):
2243 def rawcommit(ui, repo, *pats, **opts):
2244 """raw commit interface (DEPRECATED)
2244 """raw commit interface (DEPRECATED)
2245
2245
2246 (DEPRECATED)
2246 (DEPRECATED)
2247 Lowlevel commit, for use in helper scripts.
2247 Lowlevel commit, for use in helper scripts.
2248
2248
2249 This command is not intended to be used by normal users, as it is
2249 This command is not intended to be used by normal users, as it is
2250 primarily useful for importing from other SCMs.
2250 primarily useful for importing from other SCMs.
2251
2251
2252 This command is now deprecated and will be removed in a future
2252 This command is now deprecated and will be removed in a future
2253 release, please use debugsetparents and commit instead.
2253 release, please use debugsetparents and commit instead.
2254 """
2254 """
2255
2255
2256 ui.warn(_("(the rawcommit command is deprecated)\n"))
2256 ui.warn(_("(the rawcommit command is deprecated)\n"))
2257
2257
2258 message = cmdutil.logmessage(opts)
2258 message = cmdutil.logmessage(opts)
2259
2259
2260 files = cmdutil.match(repo, pats, opts).files()
2260 files = cmdutil.match(repo, pats, opts).files()
2261 if opts.get('files'):
2261 if opts.get('files'):
2262 files += open(opts['files']).read().splitlines()
2262 files += open(opts['files']).read().splitlines()
2263
2263
2264 parents = [repo.lookup(p) for p in opts['parent']]
2264 parents = [repo.lookup(p) for p in opts['parent']]
2265
2265
2266 try:
2266 try:
2267 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2267 repo.rawcommit(files, message, opts['user'], opts['date'], *parents)
2268 except ValueError, inst:
2268 except ValueError, inst:
2269 raise util.Abort(str(inst))
2269 raise util.Abort(str(inst))
2270
2270
2271 def recover(ui, repo):
2271 def recover(ui, repo):
2272 """roll back an interrupted transaction
2272 """roll back an interrupted transaction
2273
2273
2274 Recover from an interrupted commit or pull.
2274 Recover from an interrupted commit or pull.
2275
2275
2276 This command tries to fix the repository status after an
2276 This command tries to fix the repository status after an
2277 interrupted operation. It should only be necessary when Mercurial
2277 interrupted operation. It should only be necessary when Mercurial
2278 suggests it.
2278 suggests it.
2279 """
2279 """
2280 if repo.recover():
2280 if repo.recover():
2281 return hg.verify(repo)
2281 return hg.verify(repo)
2282 return 1
2282 return 1
2283
2283
2284 def remove(ui, repo, *pats, **opts):
2284 def remove(ui, repo, *pats, **opts):
2285 """remove the specified files on the next commit
2285 """remove the specified files on the next commit
2286
2286
2287 Schedule the indicated files for removal from the repository.
2287 Schedule the indicated files for removal from the repository.
2288
2288
2289 This only removes files from the current branch, not from the
2289 This only removes files from the current branch, not from the
2290 entire project history. -A/--after can be used to remove only
2290 entire project history. -A/--after can be used to remove only
2291 files that have already been deleted, -f/--force can be used to
2291 files that have already been deleted, -f/--force can be used to
2292 force deletion, and -Af can be used to remove files from the next
2292 force deletion, and -Af can be used to remove files from the next
2293 revision without deleting them.
2293 revision without deleting them.
2294
2294
2295 The following table details the behavior of remove for different
2295 The following table details the behavior of remove for different
2296 file states (columns) and option combinations (rows). The file
2296 file states (columns) and option combinations (rows). The file
2297 states are Added, Clean, Modified and Missing (as reported by hg
2297 states are Added, Clean, Modified and Missing (as reported by hg
2298 status). The actions are Warn, Remove (from branch) and Delete
2298 status). The actions are Warn, Remove (from branch) and Delete
2299 (from disk).
2299 (from disk).
2300
2300
2301 A C M !
2301 A C M !
2302 none W RD W R
2302 none W RD W R
2303 -f R RD RD R
2303 -f R RD RD R
2304 -A W W W R
2304 -A W W W R
2305 -Af R R R R
2305 -Af R R R R
2306
2306
2307 This command schedules the files to be removed at the next commit.
2307 This command schedules the files to be removed at the next commit.
2308 To undo a remove before that, see hg revert.
2308 To undo a remove before that, see hg revert.
2309 """
2309 """
2310
2310
2311 after, force = opts.get('after'), opts.get('force')
2311 after, force = opts.get('after'), opts.get('force')
2312 if not pats and not after:
2312 if not pats and not after:
2313 raise util.Abort(_('no files specified'))
2313 raise util.Abort(_('no files specified'))
2314
2314
2315 m = cmdutil.match(repo, pats, opts)
2315 m = cmdutil.match(repo, pats, opts)
2316 s = repo.status(match=m, clean=True)
2316 s = repo.status(match=m, clean=True)
2317 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2317 modified, added, deleted, clean = s[0], s[1], s[3], s[6]
2318
2318
2319 def warn(files, reason):
2319 def warn(files, reason):
2320 for f in files:
2320 for f in files:
2321 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2321 ui.warn(_('not removing %s: file %s (use -f to force removal)\n')
2322 % (m.rel(f), reason))
2322 % (m.rel(f), reason))
2323
2323
2324 if force:
2324 if force:
2325 remove, forget = modified + deleted + clean, added
2325 remove, forget = modified + deleted + clean, added
2326 elif after:
2326 elif after:
2327 remove, forget = deleted, []
2327 remove, forget = deleted, []
2328 warn(modified + added + clean, _('still exists'))
2328 warn(modified + added + clean, _('still exists'))
2329 else:
2329 else:
2330 remove, forget = deleted + clean, []
2330 remove, forget = deleted + clean, []
2331 warn(modified, _('is modified'))
2331 warn(modified, _('is modified'))
2332 warn(added, _('has been marked for add'))
2332 warn(added, _('has been marked for add'))
2333
2333
2334 for f in util.sort(remove + forget):
2334 for f in util.sort(remove + forget):
2335 if ui.verbose or not m.exact(f):
2335 if ui.verbose or not m.exact(f):
2336 ui.status(_('removing %s\n') % m.rel(f))
2336 ui.status(_('removing %s\n') % m.rel(f))
2337
2337
2338 repo.forget(forget)
2338 repo.forget(forget)
2339 repo.remove(remove, unlink=not after)
2339 repo.remove(remove, unlink=not after)
2340
2340
2341 def rename(ui, repo, *pats, **opts):
2341 def rename(ui, repo, *pats, **opts):
2342 """rename files; equivalent of copy + remove
2342 """rename files; equivalent of copy + remove
2343
2343
2344 Mark dest as copies of sources; mark sources for deletion. If dest
2344 Mark dest as copies of sources; mark sources for deletion. If dest
2345 is a directory, copies are put in that directory. If dest is a
2345 is a directory, copies are put in that directory. If dest is a
2346 file, there can only be one source.
2346 file, there can only be one source.
2347
2347
2348 By default, this command copies the contents of files as they
2348 By default, this command copies the contents of files as they
2349 exist in the working directory. If invoked with -A/--after, the
2349 exist in the working directory. If invoked with -A/--after, the
2350 operation is recorded, but no copying is performed.
2350 operation is recorded, but no copying is performed.
2351
2351
2352 This command takes effect at the next commit. To undo a rename
2352 This command takes effect at the next commit. To undo a rename
2353 before that, see hg revert.
2353 before that, see hg revert.
2354 """
2354 """
2355 wlock = repo.wlock(False)
2355 wlock = repo.wlock(False)
2356 try:
2356 try:
2357 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2357 return cmdutil.copy(ui, repo, pats, opts, rename=True)
2358 finally:
2358 finally:
2359 wlock.release()
2359 wlock.release()
2360
2360
2361 def resolve(ui, repo, *pats, **opts):
2361 def resolve(ui, repo, *pats, **opts):
2362 """retry file merges from a merge or update
2362 """retry file merges from a merge or update
2363
2363
2364 This command will cleanly retry unresolved file merges using file
2364 This command will cleanly retry unresolved file merges using file
2365 revisions preserved from the last update or merge. To attempt to
2365 revisions preserved from the last update or merge. To attempt to
2366 resolve all unresolved files, use the -a/--all switch.
2366 resolve all unresolved files, use the -a/--all switch.
2367
2367
2368 If a conflict is resolved manually, please note that the changes
2368 If a conflict is resolved manually, please note that the changes
2369 will be overwritten if the merge is retried with resolve. The
2369 will be overwritten if the merge is retried with resolve. The
2370 -m/--mark switch should be used to mark the file as resolved.
2370 -m/--mark switch should be used to mark the file as resolved.
2371
2371
2372 This command will also allow listing resolved files and manually
2372 This command will also allow listing resolved files and manually
2373 marking and unmarking files as resolved. All files must be marked
2373 marking and unmarking files as resolved. All files must be marked
2374 as resolved before the new commits are permitted.
2374 as resolved before the new commits are permitted.
2375
2375
2376 The codes used to show the status of files are:
2376 The codes used to show the status of files are:
2377 U = unresolved
2377 U = unresolved
2378 R = resolved
2378 R = resolved
2379 """
2379 """
2380
2380
2381 all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
2381 all, mark, unmark, show = [opts.get(o) for o in 'all mark unmark list'.split()]
2382
2382
2383 if (show and (mark or unmark)) or (mark and unmark):
2383 if (show and (mark or unmark)) or (mark and unmark):
2384 raise util.Abort(_("too many options specified"))
2384 raise util.Abort(_("too many options specified"))
2385 if pats and all:
2385 if pats and all:
2386 raise util.Abort(_("can't specify --all and patterns"))
2386 raise util.Abort(_("can't specify --all and patterns"))
2387 if not (all or pats or show or mark or unmark):
2387 if not (all or pats or show or mark or unmark):
2388 raise util.Abort(_('no files or directories specified; '
2388 raise util.Abort(_('no files or directories specified; '
2389 'use --all to remerge all files'))
2389 'use --all to remerge all files'))
2390
2390
2391 ms = merge_.mergestate(repo)
2391 ms = merge_.mergestate(repo)
2392 m = cmdutil.match(repo, pats, opts)
2392 m = cmdutil.match(repo, pats, opts)
2393
2393
2394 for f in ms:
2394 for f in ms:
2395 if m(f):
2395 if m(f):
2396 if show:
2396 if show:
2397 ui.write("%s %s\n" % (ms[f].upper(), f))
2397 ui.write("%s %s\n" % (ms[f].upper(), f))
2398 elif mark:
2398 elif mark:
2399 ms.mark(f, "r")
2399 ms.mark(f, "r")
2400 elif unmark:
2400 elif unmark:
2401 ms.mark(f, "u")
2401 ms.mark(f, "u")
2402 else:
2402 else:
2403 wctx = repo[None]
2403 wctx = repo[None]
2404 mctx = wctx.parents()[-1]
2404 mctx = wctx.parents()[-1]
2405
2405
2406 # backup pre-resolve (merge uses .orig for its own purposes)
2406 # backup pre-resolve (merge uses .orig for its own purposes)
2407 a = repo.wjoin(f)
2407 a = repo.wjoin(f)
2408 util.copyfile(a, a + ".resolve")
2408 util.copyfile(a, a + ".resolve")
2409
2409
2410 # resolve file
2410 # resolve file
2411 ms.resolve(f, wctx, mctx)
2411 ms.resolve(f, wctx, mctx)
2412
2412
2413 # replace filemerge's .orig file with our resolve file
2413 # replace filemerge's .orig file with our resolve file
2414 util.rename(a + ".resolve", a + ".orig")
2414 util.rename(a + ".resolve", a + ".orig")
2415
2415
2416 def revert(ui, repo, *pats, **opts):
2416 def revert(ui, repo, *pats, **opts):
2417 """restore individual files or directories to an earlier state
2417 """restore individual files or directories to an earlier state
2418
2418
2419 (use update -r to check out earlier revisions, revert does not
2419 (use update -r to check out earlier revisions, revert does not
2420 change the working directory parents)
2420 change the working directory parents)
2421
2421
2422 With no revision specified, revert the named files or directories
2422 With no revision specified, revert the named files or directories
2423 to the contents they had in the parent of the working directory.
2423 to the contents they had in the parent of the working directory.
2424 This restores the contents of the affected files to an unmodified
2424 This restores the contents of the affected files to an unmodified
2425 state and unschedules adds, removes, copies, and renames. If the
2425 state and unschedules adds, removes, copies, and renames. If the
2426 working directory has two parents, you must explicitly specify the
2426 working directory has two parents, you must explicitly specify the
2427 revision to revert to.
2427 revision to revert to.
2428
2428
2429 Using the -r/--rev option, revert the given files or directories
2429 Using the -r/--rev option, revert the given files or directories
2430 to their contents as of a specific revision. This can be helpful
2430 to their contents as of a specific revision. This can be helpful
2431 to "roll back" some or all of an earlier change. See 'hg help
2431 to "roll back" some or all of an earlier change. See 'hg help
2432 dates' for a list of formats valid for -d/--date.
2432 dates' for a list of formats valid for -d/--date.
2433
2433
2434 Revert modifies the working directory. It does not commit any
2434 Revert modifies the working directory. It does not commit any
2435 changes, or change the parent of the working directory. If you
2435 changes, or change the parent of the working directory. If you
2436 revert to a revision other than the parent of the working
2436 revert to a revision other than the parent of the working
2437 directory, the reverted files will thus appear modified
2437 directory, the reverted files will thus appear modified
2438 afterwards.
2438 afterwards.
2439
2439
2440 If a file has been deleted, it is restored. If the executable mode
2440 If a file has been deleted, it is restored. If the executable mode
2441 of a file was changed, it is reset.
2441 of a file was changed, it is reset.
2442
2442
2443 If names are given, all files matching the names are reverted.
2443 If names are given, all files matching the names are reverted.
2444 If no arguments are given, no files are reverted.
2444 If no arguments are given, no files are reverted.
2445
2445
2446 Modified files are saved with a .orig suffix before reverting.
2446 Modified files are saved with a .orig suffix before reverting.
2447 To disable these backups, use --no-backup.
2447 To disable these backups, use --no-backup.
2448 """
2448 """
2449
2449
2450 if opts["date"]:
2450 if opts["date"]:
2451 if opts["rev"]:
2451 if opts["rev"]:
2452 raise util.Abort(_("you can't specify a revision and a date"))
2452 raise util.Abort(_("you can't specify a revision and a date"))
2453 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2453 opts["rev"] = cmdutil.finddate(ui, repo, opts["date"])
2454
2454
2455 if not pats and not opts.get('all'):
2455 if not pats and not opts.get('all'):
2456 raise util.Abort(_('no files or directories specified; '
2456 raise util.Abort(_('no files or directories specified; '
2457 'use --all to revert the whole repo'))
2457 'use --all to revert the whole repo'))
2458
2458
2459 parent, p2 = repo.dirstate.parents()
2459 parent, p2 = repo.dirstate.parents()
2460 if not opts.get('rev') and p2 != nullid:
2460 if not opts.get('rev') and p2 != nullid:
2461 raise util.Abort(_('uncommitted merge - please provide a '
2461 raise util.Abort(_('uncommitted merge - please provide a '
2462 'specific revision'))
2462 'specific revision'))
2463 ctx = repo[opts.get('rev')]
2463 ctx = repo[opts.get('rev')]
2464 node = ctx.node()
2464 node = ctx.node()
2465 mf = ctx.manifest()
2465 mf = ctx.manifest()
2466 if node == parent:
2466 if node == parent:
2467 pmf = mf
2467 pmf = mf
2468 else:
2468 else:
2469 pmf = None
2469 pmf = None
2470
2470
2471 # need all matching names in dirstate and manifest of target rev,
2471 # need all matching names in dirstate and manifest of target rev,
2472 # so have to walk both. do not print errors if files exist in one
2472 # so have to walk both. do not print errors if files exist in one
2473 # but not other.
2473 # but not other.
2474
2474
2475 names = {}
2475 names = {}
2476
2476
2477 wlock = repo.wlock()
2477 wlock = repo.wlock()
2478 try:
2478 try:
2479 # walk dirstate.
2479 # walk dirstate.
2480
2480
2481 m = cmdutil.match(repo, pats, opts)
2481 m = cmdutil.match(repo, pats, opts)
2482 m.bad = lambda x,y: False
2482 m.bad = lambda x,y: False
2483 for abs in repo.walk(m):
2483 for abs in repo.walk(m):
2484 names[abs] = m.rel(abs), m.exact(abs)
2484 names[abs] = m.rel(abs), m.exact(abs)
2485
2485
2486 # walk target manifest.
2486 # walk target manifest.
2487
2487
2488 def badfn(path, msg):
2488 def badfn(path, msg):
2489 if path in names:
2489 if path in names:
2490 return False
2490 return False
2491 path_ = path + '/'
2491 path_ = path + '/'
2492 for f in names:
2492 for f in names:
2493 if f.startswith(path_):
2493 if f.startswith(path_):
2494 return False
2494 return False
2495 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2495 repo.ui.warn("%s: %s\n" % (m.rel(path), msg))
2496 return False
2496 return False
2497
2497
2498 m = cmdutil.match(repo, pats, opts)
2498 m = cmdutil.match(repo, pats, opts)
2499 m.bad = badfn
2499 m.bad = badfn
2500 for abs in repo[node].walk(m):
2500 for abs in repo[node].walk(m):
2501 if abs not in names:
2501 if abs not in names:
2502 names[abs] = m.rel(abs), m.exact(abs)
2502 names[abs] = m.rel(abs), m.exact(abs)
2503
2503
2504 m = cmdutil.matchfiles(repo, names)
2504 m = cmdutil.matchfiles(repo, names)
2505 changes = repo.status(match=m)[:4]
2505 changes = repo.status(match=m)[:4]
2506 modified, added, removed, deleted = map(dict.fromkeys, changes)
2506 modified, added, removed, deleted = map(set, changes)
2507
2507
2508 # if f is a rename, also revert the source
2508 # if f is a rename, also revert the source
2509 cwd = repo.getcwd()
2509 cwd = repo.getcwd()
2510 for f in added:
2510 for f in added:
2511 src = repo.dirstate.copied(f)
2511 src = repo.dirstate.copied(f)
2512 if src and src not in names and repo.dirstate[src] == 'r':
2512 if src and src not in names and repo.dirstate[src] == 'r':
2513 removed[src] = None
2513 removed.add(src)
2514 names[src] = (repo.pathto(src, cwd), True)
2514 names[src] = (repo.pathto(src, cwd), True)
2515
2515
2516 def removeforget(abs):
2516 def removeforget(abs):
2517 if repo.dirstate[abs] == 'a':
2517 if repo.dirstate[abs] == 'a':
2518 return _('forgetting %s\n')
2518 return _('forgetting %s\n')
2519 return _('removing %s\n')
2519 return _('removing %s\n')
2520
2520
2521 revert = ([], _('reverting %s\n'))
2521 revert = ([], _('reverting %s\n'))
2522 add = ([], _('adding %s\n'))
2522 add = ([], _('adding %s\n'))
2523 remove = ([], removeforget)
2523 remove = ([], removeforget)
2524 undelete = ([], _('undeleting %s\n'))
2524 undelete = ([], _('undeleting %s\n'))
2525
2525
2526 disptable = (
2526 disptable = (
2527 # dispatch table:
2527 # dispatch table:
2528 # file state
2528 # file state
2529 # action if in target manifest
2529 # action if in target manifest
2530 # action if not in target manifest
2530 # action if not in target manifest
2531 # make backup if in target manifest
2531 # make backup if in target manifest
2532 # make backup if not in target manifest
2532 # make backup if not in target manifest
2533 (modified, revert, remove, True, True),
2533 (modified, revert, remove, True, True),
2534 (added, revert, remove, True, False),
2534 (added, revert, remove, True, False),
2535 (removed, undelete, None, False, False),
2535 (removed, undelete, None, False, False),
2536 (deleted, revert, remove, False, False),
2536 (deleted, revert, remove, False, False),
2537 )
2537 )
2538
2538
2539 for abs, (rel, exact) in util.sort(names.items()):
2539 for abs, (rel, exact) in util.sort(names.items()):
2540 mfentry = mf.get(abs)
2540 mfentry = mf.get(abs)
2541 target = repo.wjoin(abs)
2541 target = repo.wjoin(abs)
2542 def handle(xlist, dobackup):
2542 def handle(xlist, dobackup):
2543 xlist[0].append(abs)
2543 xlist[0].append(abs)
2544 if dobackup and not opts.get('no_backup') and util.lexists(target):
2544 if dobackup and not opts.get('no_backup') and util.lexists(target):
2545 bakname = "%s.orig" % rel
2545 bakname = "%s.orig" % rel
2546 ui.note(_('saving current version of %s as %s\n') %
2546 ui.note(_('saving current version of %s as %s\n') %
2547 (rel, bakname))
2547 (rel, bakname))
2548 if not opts.get('dry_run'):
2548 if not opts.get('dry_run'):
2549 util.copyfile(target, bakname)
2549 util.copyfile(target, bakname)
2550 if ui.verbose or not exact:
2550 if ui.verbose or not exact:
2551 msg = xlist[1]
2551 msg = xlist[1]
2552 if not isinstance(msg, basestring):
2552 if not isinstance(msg, basestring):
2553 msg = msg(abs)
2553 msg = msg(abs)
2554 ui.status(msg % rel)
2554 ui.status(msg % rel)
2555 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2555 for table, hitlist, misslist, backuphit, backupmiss in disptable:
2556 if abs not in table: continue
2556 if abs not in table: continue
2557 # file has changed in dirstate
2557 # file has changed in dirstate
2558 if mfentry:
2558 if mfentry:
2559 handle(hitlist, backuphit)
2559 handle(hitlist, backuphit)
2560 elif misslist is not None:
2560 elif misslist is not None:
2561 handle(misslist, backupmiss)
2561 handle(misslist, backupmiss)
2562 break
2562 break
2563 else:
2563 else:
2564 if abs not in repo.dirstate:
2564 if abs not in repo.dirstate:
2565 if mfentry:
2565 if mfentry:
2566 handle(add, True)
2566 handle(add, True)
2567 elif exact:
2567 elif exact:
2568 ui.warn(_('file not managed: %s\n') % rel)
2568 ui.warn(_('file not managed: %s\n') % rel)
2569 continue
2569 continue
2570 # file has not changed in dirstate
2570 # file has not changed in dirstate
2571 if node == parent:
2571 if node == parent:
2572 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2572 if exact: ui.warn(_('no changes needed to %s\n') % rel)
2573 continue
2573 continue
2574 if pmf is None:
2574 if pmf is None:
2575 # only need parent manifest in this unlikely case,
2575 # only need parent manifest in this unlikely case,
2576 # so do not read by default
2576 # so do not read by default
2577 pmf = repo[parent].manifest()
2577 pmf = repo[parent].manifest()
2578 if abs in pmf:
2578 if abs in pmf:
2579 if mfentry:
2579 if mfentry:
2580 # if version of file is same in parent and target
2580 # if version of file is same in parent and target
2581 # manifests, do nothing
2581 # manifests, do nothing
2582 if (pmf[abs] != mfentry or
2582 if (pmf[abs] != mfentry or
2583 pmf.flags(abs) != mf.flags(abs)):
2583 pmf.flags(abs) != mf.flags(abs)):
2584 handle(revert, False)
2584 handle(revert, False)
2585 else:
2585 else:
2586 handle(remove, False)
2586 handle(remove, False)
2587
2587
2588 if not opts.get('dry_run'):
2588 if not opts.get('dry_run'):
2589 def checkout(f):
2589 def checkout(f):
2590 fc = ctx[f]
2590 fc = ctx[f]
2591 repo.wwrite(f, fc.data(), fc.flags())
2591 repo.wwrite(f, fc.data(), fc.flags())
2592
2592
2593 audit_path = util.path_auditor(repo.root)
2593 audit_path = util.path_auditor(repo.root)
2594 for f in remove[0]:
2594 for f in remove[0]:
2595 if repo.dirstate[f] == 'a':
2595 if repo.dirstate[f] == 'a':
2596 repo.dirstate.forget(f)
2596 repo.dirstate.forget(f)
2597 continue
2597 continue
2598 audit_path(f)
2598 audit_path(f)
2599 try:
2599 try:
2600 util.unlink(repo.wjoin(f))
2600 util.unlink(repo.wjoin(f))
2601 except OSError:
2601 except OSError:
2602 pass
2602 pass
2603 repo.dirstate.remove(f)
2603 repo.dirstate.remove(f)
2604
2604
2605 normal = None
2605 normal = None
2606 if node == parent:
2606 if node == parent:
2607 # We're reverting to our parent. If possible, we'd like status
2607 # We're reverting to our parent. If possible, we'd like status
2608 # to report the file as clean. We have to use normallookup for
2608 # to report the file as clean. We have to use normallookup for
2609 # merges to avoid losing information about merged/dirty files.
2609 # merges to avoid losing information about merged/dirty files.
2610 if p2 != nullid:
2610 if p2 != nullid:
2611 normal = repo.dirstate.normallookup
2611 normal = repo.dirstate.normallookup
2612 else:
2612 else:
2613 normal = repo.dirstate.normal
2613 normal = repo.dirstate.normal
2614 for f in revert[0]:
2614 for f in revert[0]:
2615 checkout(f)
2615 checkout(f)
2616 if normal:
2616 if normal:
2617 normal(f)
2617 normal(f)
2618
2618
2619 for f in add[0]:
2619 for f in add[0]:
2620 checkout(f)
2620 checkout(f)
2621 repo.dirstate.add(f)
2621 repo.dirstate.add(f)
2622
2622
2623 normal = repo.dirstate.normallookup
2623 normal = repo.dirstate.normallookup
2624 if node == parent and p2 == nullid:
2624 if node == parent and p2 == nullid:
2625 normal = repo.dirstate.normal
2625 normal = repo.dirstate.normal
2626 for f in undelete[0]:
2626 for f in undelete[0]:
2627 checkout(f)
2627 checkout(f)
2628 normal(f)
2628 normal(f)
2629
2629
2630 finally:
2630 finally:
2631 wlock.release()
2631 wlock.release()
2632
2632
2633 def rollback(ui, repo):
2633 def rollback(ui, repo):
2634 """roll back the last transaction
2634 """roll back the last transaction
2635
2635
2636 This command should be used with care. There is only one level of
2636 This command should be used with care. There is only one level of
2637 rollback, and there is no way to undo a rollback. It will also
2637 rollback, and there is no way to undo a rollback. It will also
2638 restore the dirstate at the time of the last transaction, losing
2638 restore the dirstate at the time of the last transaction, losing
2639 any dirstate changes since that time.
2639 any dirstate changes since that time.
2640
2640
2641 Transactions are used to encapsulate the effects of all commands
2641 Transactions are used to encapsulate the effects of all commands
2642 that create new changesets or propagate existing changesets into a
2642 that create new changesets or propagate existing changesets into a
2643 repository. For example, the following commands are transactional,
2643 repository. For example, the following commands are transactional,
2644 and their effects can be rolled back:
2644 and their effects can be rolled back:
2645
2645
2646 commit
2646 commit
2647 import
2647 import
2648 pull
2648 pull
2649 push (with this repository as destination)
2649 push (with this repository as destination)
2650 unbundle
2650 unbundle
2651
2651
2652 This command is not intended for use on public repositories. Once
2652 This command is not intended for use on public repositories. Once
2653 changes are visible for pull by other users, rolling a transaction
2653 changes are visible for pull by other users, rolling a transaction
2654 back locally is ineffective (someone else may already have pulled
2654 back locally is ineffective (someone else may already have pulled
2655 the changes). Furthermore, a race is possible with readers of the
2655 the changes). Furthermore, a race is possible with readers of the
2656 repository; for example an in-progress pull from the repository
2656 repository; for example an in-progress pull from the repository
2657 may fail if a rollback is performed.
2657 may fail if a rollback is performed.
2658 """
2658 """
2659 repo.rollback()
2659 repo.rollback()
2660
2660
2661 def root(ui, repo):
2661 def root(ui, repo):
2662 """print the root (top) of the current working directory
2662 """print the root (top) of the current working directory
2663
2663
2664 Print the root directory of the current repository.
2664 Print the root directory of the current repository.
2665 """
2665 """
2666 ui.write(repo.root + "\n")
2666 ui.write(repo.root + "\n")
2667
2667
2668 def serve(ui, repo, **opts):
2668 def serve(ui, repo, **opts):
2669 """export the repository via HTTP
2669 """export the repository via HTTP
2670
2670
2671 Start a local HTTP repository browser and pull server.
2671 Start a local HTTP repository browser and pull server.
2672
2672
2673 By default, the server logs accesses to stdout and errors to
2673 By default, the server logs accesses to stdout and errors to
2674 stderr. Use the -A and -E options to log to files.
2674 stderr. Use the -A and -E options to log to files.
2675 """
2675 """
2676
2676
2677 if opts["stdio"]:
2677 if opts["stdio"]:
2678 if repo is None:
2678 if repo is None:
2679 raise error.RepoError(_("There is no Mercurial repository here"
2679 raise error.RepoError(_("There is no Mercurial repository here"
2680 " (.hg not found)"))
2680 " (.hg not found)"))
2681 s = sshserver.sshserver(ui, repo)
2681 s = sshserver.sshserver(ui, repo)
2682 s.serve_forever()
2682 s.serve_forever()
2683
2683
2684 parentui = ui.parentui or ui
2684 parentui = ui.parentui or ui
2685 optlist = ("name templates style address port prefix ipv6"
2685 optlist = ("name templates style address port prefix ipv6"
2686 " accesslog errorlog webdir_conf certificate")
2686 " accesslog errorlog webdir_conf certificate")
2687 for o in optlist.split():
2687 for o in optlist.split():
2688 if opts[o]:
2688 if opts[o]:
2689 parentui.setconfig("web", o, str(opts[o]))
2689 parentui.setconfig("web", o, str(opts[o]))
2690 if (repo is not None) and (repo.ui != parentui):
2690 if (repo is not None) and (repo.ui != parentui):
2691 repo.ui.setconfig("web", o, str(opts[o]))
2691 repo.ui.setconfig("web", o, str(opts[o]))
2692
2692
2693 if repo is None and not ui.config("web", "webdir_conf"):
2693 if repo is None and not ui.config("web", "webdir_conf"):
2694 raise error.RepoError(_("There is no Mercurial repository here"
2694 raise error.RepoError(_("There is no Mercurial repository here"
2695 " (.hg not found)"))
2695 " (.hg not found)"))
2696
2696
2697 class service:
2697 class service:
2698 def init(self):
2698 def init(self):
2699 util.set_signal_handler()
2699 util.set_signal_handler()
2700 self.httpd = hgweb.server.create_server(parentui, repo)
2700 self.httpd = hgweb.server.create_server(parentui, repo)
2701
2701
2702 if not ui.verbose: return
2702 if not ui.verbose: return
2703
2703
2704 if self.httpd.prefix:
2704 if self.httpd.prefix:
2705 prefix = self.httpd.prefix.strip('/') + '/'
2705 prefix = self.httpd.prefix.strip('/') + '/'
2706 else:
2706 else:
2707 prefix = ''
2707 prefix = ''
2708
2708
2709 port = ':%d' % self.httpd.port
2709 port = ':%d' % self.httpd.port
2710 if port == ':80':
2710 if port == ':80':
2711 port = ''
2711 port = ''
2712
2712
2713 bindaddr = self.httpd.addr
2713 bindaddr = self.httpd.addr
2714 if bindaddr == '0.0.0.0':
2714 if bindaddr == '0.0.0.0':
2715 bindaddr = '*'
2715 bindaddr = '*'
2716 elif ':' in bindaddr: # IPv6
2716 elif ':' in bindaddr: # IPv6
2717 bindaddr = '[%s]' % bindaddr
2717 bindaddr = '[%s]' % bindaddr
2718
2718
2719 fqaddr = self.httpd.fqaddr
2719 fqaddr = self.httpd.fqaddr
2720 if ':' in fqaddr:
2720 if ':' in fqaddr:
2721 fqaddr = '[%s]' % fqaddr
2721 fqaddr = '[%s]' % fqaddr
2722 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2722 ui.status(_('listening at http://%s%s/%s (bound to %s:%d)\n') %
2723 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2723 (fqaddr, port, prefix, bindaddr, self.httpd.port))
2724
2724
2725 def run(self):
2725 def run(self):
2726 self.httpd.serve_forever()
2726 self.httpd.serve_forever()
2727
2727
2728 service = service()
2728 service = service()
2729
2729
2730 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2730 cmdutil.service(opts, initfn=service.init, runfn=service.run)
2731
2731
2732 def status(ui, repo, *pats, **opts):
2732 def status(ui, repo, *pats, **opts):
2733 """show changed files in the working directory
2733 """show changed files in the working directory
2734
2734
2735 Show status of files in the repository. If names are given, only
2735 Show status of files in the repository. If names are given, only
2736 files that match are shown. Files that are clean or ignored or
2736 files that match are shown. Files that are clean or ignored or
2737 source of a copy/move operation, are not listed unless -c/--clean,
2737 source of a copy/move operation, are not listed unless -c/--clean,
2738 -i/--ignored, -C/--copies or -A/--all is given. Unless options
2738 -i/--ignored, -C/--copies or -A/--all is given. Unless options
2739 described with "show only ..." are given, the options -mardu are
2739 described with "show only ..." are given, the options -mardu are
2740 used.
2740 used.
2741
2741
2742 Option -q/--quiet hides untracked (unknown and ignored) files
2742 Option -q/--quiet hides untracked (unknown and ignored) files
2743 unless explicitly requested with -u/--unknown or -i/--ignored.
2743 unless explicitly requested with -u/--unknown or -i/--ignored.
2744
2744
2745 NOTE: status may appear to disagree with diff if permissions have
2745 NOTE: status may appear to disagree with diff if permissions have
2746 changed or a merge has occurred. The standard diff format does not
2746 changed or a merge has occurred. The standard diff format does not
2747 report permission changes and diff only reports changes relative
2747 report permission changes and diff only reports changes relative
2748 to one merge parent.
2748 to one merge parent.
2749
2749
2750 If one revision is given, it is used as the base revision.
2750 If one revision is given, it is used as the base revision.
2751 If two revisions are given, the difference between them is shown.
2751 If two revisions are given, the difference between them is shown.
2752
2752
2753 The codes used to show the status of files are:
2753 The codes used to show the status of files are:
2754 M = modified
2754 M = modified
2755 A = added
2755 A = added
2756 R = removed
2756 R = removed
2757 C = clean
2757 C = clean
2758 ! = missing (deleted by non-hg command, but still tracked)
2758 ! = missing (deleted by non-hg command, but still tracked)
2759 ? = not tracked
2759 ? = not tracked
2760 I = ignored
2760 I = ignored
2761 = the previous added file was copied from here
2761 = the previous added file was copied from here
2762 """
2762 """
2763
2763
2764 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2764 node1, node2 = cmdutil.revpair(repo, opts.get('rev'))
2765 cwd = (pats and repo.getcwd()) or ''
2765 cwd = (pats and repo.getcwd()) or ''
2766 end = opts.get('print0') and '\0' or '\n'
2766 end = opts.get('print0') and '\0' or '\n'
2767 copy = {}
2767 copy = {}
2768 states = 'modified added removed deleted unknown ignored clean'.split()
2768 states = 'modified added removed deleted unknown ignored clean'.split()
2769 show = [k for k in states if opts.get(k)]
2769 show = [k for k in states if opts.get(k)]
2770 if opts.get('all'):
2770 if opts.get('all'):
2771 show += ui.quiet and (states[:4] + ['clean']) or states
2771 show += ui.quiet and (states[:4] + ['clean']) or states
2772 if not show:
2772 if not show:
2773 show = ui.quiet and states[:4] or states[:5]
2773 show = ui.quiet and states[:4] or states[:5]
2774
2774
2775 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2775 stat = repo.status(node1, node2, cmdutil.match(repo, pats, opts),
2776 'ignored' in show, 'clean' in show, 'unknown' in show)
2776 'ignored' in show, 'clean' in show, 'unknown' in show)
2777 changestates = zip(states, 'MAR!?IC', stat)
2777 changestates = zip(states, 'MAR!?IC', stat)
2778
2778
2779 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2779 if (opts.get('all') or opts.get('copies')) and not opts.get('no_status'):
2780 ctxn = repo[nullid]
2780 ctxn = repo[nullid]
2781 ctx1 = repo[node1]
2781 ctx1 = repo[node1]
2782 ctx2 = repo[node2]
2782 ctx2 = repo[node2]
2783 added = stat[1]
2783 added = stat[1]
2784 if node2 is None:
2784 if node2 is None:
2785 added = stat[0] + stat[1] # merged?
2785 added = stat[0] + stat[1] # merged?
2786
2786
2787 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
2787 for k, v in copies.copies(repo, ctx1, ctx2, ctxn)[0].iteritems():
2788 if k in added:
2788 if k in added:
2789 copy[k] = v
2789 copy[k] = v
2790 elif v in added:
2790 elif v in added:
2791 copy[v] = k
2791 copy[v] = k
2792
2792
2793 for state, char, files in changestates:
2793 for state, char, files in changestates:
2794 if state in show:
2794 if state in show:
2795 format = "%s %%s%s" % (char, end)
2795 format = "%s %%s%s" % (char, end)
2796 if opts.get('no_status'):
2796 if opts.get('no_status'):
2797 format = "%%s%s" % end
2797 format = "%%s%s" % end
2798
2798
2799 for f in files:
2799 for f in files:
2800 ui.write(format % repo.pathto(f, cwd))
2800 ui.write(format % repo.pathto(f, cwd))
2801 if f in copy:
2801 if f in copy:
2802 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2802 ui.write(' %s%s' % (repo.pathto(copy[f], cwd), end))
2803
2803
2804 def tag(ui, repo, name1, *names, **opts):
2804 def tag(ui, repo, name1, *names, **opts):
2805 """add one or more tags for the current or given revision
2805 """add one or more tags for the current or given revision
2806
2806
2807 Name a particular revision using <name>.
2807 Name a particular revision using <name>.
2808
2808
2809 Tags are used to name particular revisions of the repository and are
2809 Tags are used to name particular revisions of the repository and are
2810 very useful to compare different revisions, to go back to significant
2810 very useful to compare different revisions, to go back to significant
2811 earlier versions or to mark branch points as releases, etc.
2811 earlier versions or to mark branch points as releases, etc.
2812
2812
2813 If no revision is given, the parent of the working directory is
2813 If no revision is given, the parent of the working directory is
2814 used, or tip if no revision is checked out.
2814 used, or tip if no revision is checked out.
2815
2815
2816 To facilitate version control, distribution, and merging of tags,
2816 To facilitate version control, distribution, and merging of tags,
2817 they are stored as a file named ".hgtags" which is managed
2817 they are stored as a file named ".hgtags" which is managed
2818 similarly to other project files and can be hand-edited if
2818 similarly to other project files and can be hand-edited if
2819 necessary. The file '.hg/localtags' is used for local tags (not
2819 necessary. The file '.hg/localtags' is used for local tags (not
2820 shared among repositories).
2820 shared among repositories).
2821
2821
2822 See 'hg help dates' for a list of formats valid for -d/--date.
2822 See 'hg help dates' for a list of formats valid for -d/--date.
2823 """
2823 """
2824
2824
2825 rev_ = "."
2825 rev_ = "."
2826 names = (name1,) + names
2826 names = (name1,) + names
2827 if len(names) != len(dict.fromkeys(names)):
2827 if len(names) != len(set(names)):
2828 raise util.Abort(_('tag names must be unique'))
2828 raise util.Abort(_('tag names must be unique'))
2829 for n in names:
2829 for n in names:
2830 if n in ['tip', '.', 'null']:
2830 if n in ['tip', '.', 'null']:
2831 raise util.Abort(_('the name \'%s\' is reserved') % n)
2831 raise util.Abort(_('the name \'%s\' is reserved') % n)
2832 if opts.get('rev') and opts.get('remove'):
2832 if opts.get('rev') and opts.get('remove'):
2833 raise util.Abort(_("--rev and --remove are incompatible"))
2833 raise util.Abort(_("--rev and --remove are incompatible"))
2834 if opts.get('rev'):
2834 if opts.get('rev'):
2835 rev_ = opts['rev']
2835 rev_ = opts['rev']
2836 message = opts.get('message')
2836 message = opts.get('message')
2837 if opts.get('remove'):
2837 if opts.get('remove'):
2838 expectedtype = opts.get('local') and 'local' or 'global'
2838 expectedtype = opts.get('local') and 'local' or 'global'
2839 for n in names:
2839 for n in names:
2840 if not repo.tagtype(n):
2840 if not repo.tagtype(n):
2841 raise util.Abort(_('tag \'%s\' does not exist') % n)
2841 raise util.Abort(_('tag \'%s\' does not exist') % n)
2842 if repo.tagtype(n) != expectedtype:
2842 if repo.tagtype(n) != expectedtype:
2843 if expectedtype == 'global':
2843 if expectedtype == 'global':
2844 raise util.Abort(_('tag \'%s\' is not a global tag') % n)
2844 raise util.Abort(_('tag \'%s\' is not a global tag') % n)
2845 else:
2845 else:
2846 raise util.Abort(_('tag \'%s\' is not a local tag') % n)
2846 raise util.Abort(_('tag \'%s\' is not a local tag') % n)
2847 rev_ = nullid
2847 rev_ = nullid
2848 if not message:
2848 if not message:
2849 message = _('Removed tag %s') % ', '.join(names)
2849 message = _('Removed tag %s') % ', '.join(names)
2850 elif not opts.get('force'):
2850 elif not opts.get('force'):
2851 for n in names:
2851 for n in names:
2852 if n in repo.tags():
2852 if n in repo.tags():
2853 raise util.Abort(_('tag \'%s\' already exists '
2853 raise util.Abort(_('tag \'%s\' already exists '
2854 '(use -f to force)') % n)
2854 '(use -f to force)') % n)
2855 if not rev_ and repo.dirstate.parents()[1] != nullid:
2855 if not rev_ and repo.dirstate.parents()[1] != nullid:
2856 raise util.Abort(_('uncommitted merge - please provide a '
2856 raise util.Abort(_('uncommitted merge - please provide a '
2857 'specific revision'))
2857 'specific revision'))
2858 r = repo[rev_].node()
2858 r = repo[rev_].node()
2859
2859
2860 if not message:
2860 if not message:
2861 message = (_('Added tag %s for changeset %s') %
2861 message = (_('Added tag %s for changeset %s') %
2862 (', '.join(names), short(r)))
2862 (', '.join(names), short(r)))
2863
2863
2864 date = opts.get('date')
2864 date = opts.get('date')
2865 if date:
2865 if date:
2866 date = util.parsedate(date)
2866 date = util.parsedate(date)
2867
2867
2868 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2868 repo.tag(names, r, message, opts.get('local'), opts.get('user'), date)
2869
2869
2870 def tags(ui, repo):
2870 def tags(ui, repo):
2871 """list repository tags
2871 """list repository tags
2872
2872
2873 This lists both regular and local tags. When the -v/--verbose
2873 This lists both regular and local tags. When the -v/--verbose
2874 switch is used, a third column "local" is printed for local tags.
2874 switch is used, a third column "local" is printed for local tags.
2875 """
2875 """
2876
2876
2877 l = repo.tagslist()
2877 l = repo.tagslist()
2878 l.reverse()
2878 l.reverse()
2879 hexfunc = ui.debugflag and hex or short
2879 hexfunc = ui.debugflag and hex or short
2880 tagtype = ""
2880 tagtype = ""
2881
2881
2882 for t, n in l:
2882 for t, n in l:
2883 if ui.quiet:
2883 if ui.quiet:
2884 ui.write("%s\n" % t)
2884 ui.write("%s\n" % t)
2885 continue
2885 continue
2886
2886
2887 try:
2887 try:
2888 hn = hexfunc(n)
2888 hn = hexfunc(n)
2889 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2889 r = "%5d:%s" % (repo.changelog.rev(n), hn)
2890 except error.LookupError:
2890 except error.LookupError:
2891 r = " ?:%s" % hn
2891 r = " ?:%s" % hn
2892 else:
2892 else:
2893 spaces = " " * (30 - encoding.colwidth(t))
2893 spaces = " " * (30 - encoding.colwidth(t))
2894 if ui.verbose:
2894 if ui.verbose:
2895 if repo.tagtype(t) == 'local':
2895 if repo.tagtype(t) == 'local':
2896 tagtype = " local"
2896 tagtype = " local"
2897 else:
2897 else:
2898 tagtype = ""
2898 tagtype = ""
2899 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2899 ui.write("%s%s %s%s\n" % (t, spaces, r, tagtype))
2900
2900
2901 def tip(ui, repo, **opts):
2901 def tip(ui, repo, **opts):
2902 """show the tip revision
2902 """show the tip revision
2903
2903
2904 The tip revision (usually just called the tip) is the most
2904 The tip revision (usually just called the tip) is the most
2905 recently added changeset in the repository, the most recently
2905 recently added changeset in the repository, the most recently
2906 changed head.
2906 changed head.
2907
2907
2908 If you have just made a commit, that commit will be the tip. If
2908 If you have just made a commit, that commit will be the tip. If
2909 you have just pulled changes from another repository, the tip of
2909 you have just pulled changes from another repository, the tip of
2910 that repository becomes the current tip. The "tip" tag is special
2910 that repository becomes the current tip. The "tip" tag is special
2911 and cannot be renamed or assigned to a different changeset.
2911 and cannot be renamed or assigned to a different changeset.
2912 """
2912 """
2913 cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
2913 cmdutil.show_changeset(ui, repo, opts).show(repo[len(repo) - 1])
2914
2914
2915 def unbundle(ui, repo, fname1, *fnames, **opts):
2915 def unbundle(ui, repo, fname1, *fnames, **opts):
2916 """apply one or more changegroup files
2916 """apply one or more changegroup files
2917
2917
2918 Apply one or more compressed changegroup files generated by the
2918 Apply one or more compressed changegroup files generated by the
2919 bundle command.
2919 bundle command.
2920 """
2920 """
2921 fnames = (fname1,) + fnames
2921 fnames = (fname1,) + fnames
2922
2922
2923 lock = repo.lock()
2923 lock = repo.lock()
2924 try:
2924 try:
2925 for fname in fnames:
2925 for fname in fnames:
2926 f = url.open(ui, fname)
2926 f = url.open(ui, fname)
2927 gen = changegroup.readbundle(f, fname)
2927 gen = changegroup.readbundle(f, fname)
2928 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2928 modheads = repo.addchangegroup(gen, 'unbundle', 'bundle:' + fname)
2929 finally:
2929 finally:
2930 lock.release()
2930 lock.release()
2931
2931
2932 return postincoming(ui, repo, modheads, opts.get('update'), None)
2932 return postincoming(ui, repo, modheads, opts.get('update'), None)
2933
2933
2934 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2934 def update(ui, repo, node=None, rev=None, clean=False, date=None):
2935 """update working directory
2935 """update working directory
2936
2936
2937 Update the repository's working directory to the specified
2937 Update the repository's working directory to the specified
2938 revision, or the tip of the current branch if none is specified.
2938 revision, or the tip of the current branch if none is specified.
2939 Use null as the revision to remove the working copy (like 'hg
2939 Use null as the revision to remove the working copy (like 'hg
2940 clone -U').
2940 clone -U').
2941
2941
2942 When the working directory contains no uncommitted changes, it
2942 When the working directory contains no uncommitted changes, it
2943 will be replaced by the state of the requested revision from the
2943 will be replaced by the state of the requested revision from the
2944 repository. When the requested revision is on a different branch,
2944 repository. When the requested revision is on a different branch,
2945 the working directory will additionally be switched to that
2945 the working directory will additionally be switched to that
2946 branch.
2946 branch.
2947
2947
2948 When there are uncommitted changes, use option -C to discard them,
2948 When there are uncommitted changes, use option -C to discard them,
2949 forcibly replacing the state of the working directory with the
2949 forcibly replacing the state of the working directory with the
2950 requested revision.
2950 requested revision.
2951
2951
2952 When there are uncommitted changes and option -C is not used, and
2952 When there are uncommitted changes and option -C is not used, and
2953 the parent revision and requested revision are on the same branch,
2953 the parent revision and requested revision are on the same branch,
2954 and one of them is an ancestor of the other, then the new working
2954 and one of them is an ancestor of the other, then the new working
2955 directory will contain the requested revision merged with the
2955 directory will contain the requested revision merged with the
2956 uncommitted changes. Otherwise, the update will fail with a
2956 uncommitted changes. Otherwise, the update will fail with a
2957 suggestion to use 'merge' or 'update -C' instead.
2957 suggestion to use 'merge' or 'update -C' instead.
2958
2958
2959 If you want to update just one file to an older revision, use
2959 If you want to update just one file to an older revision, use
2960 revert.
2960 revert.
2961
2961
2962 See 'hg help dates' for a list of formats valid for -d/--date.
2962 See 'hg help dates' for a list of formats valid for -d/--date.
2963 """
2963 """
2964 if rev and node:
2964 if rev and node:
2965 raise util.Abort(_("please specify just one revision"))
2965 raise util.Abort(_("please specify just one revision"))
2966
2966
2967 if not rev:
2967 if not rev:
2968 rev = node
2968 rev = node
2969
2969
2970 if date:
2970 if date:
2971 if rev:
2971 if rev:
2972 raise util.Abort(_("you can't specify a revision and a date"))
2972 raise util.Abort(_("you can't specify a revision and a date"))
2973 rev = cmdutil.finddate(ui, repo, date)
2973 rev = cmdutil.finddate(ui, repo, date)
2974
2974
2975 if clean:
2975 if clean:
2976 return hg.clean(repo, rev)
2976 return hg.clean(repo, rev)
2977 else:
2977 else:
2978 return hg.update(repo, rev)
2978 return hg.update(repo, rev)
2979
2979
2980 def verify(ui, repo):
2980 def verify(ui, repo):
2981 """verify the integrity of the repository
2981 """verify the integrity of the repository
2982
2982
2983 Verify the integrity of the current repository.
2983 Verify the integrity of the current repository.
2984
2984
2985 This will perform an extensive check of the repository's
2985 This will perform an extensive check of the repository's
2986 integrity, validating the hashes and checksums of each entry in
2986 integrity, validating the hashes and checksums of each entry in
2987 the changelog, manifest, and tracked files, as well as the
2987 the changelog, manifest, and tracked files, as well as the
2988 integrity of their crosslinks and indices.
2988 integrity of their crosslinks and indices.
2989 """
2989 """
2990 return hg.verify(repo)
2990 return hg.verify(repo)
2991
2991
2992 def version_(ui):
2992 def version_(ui):
2993 """output version and copyright information"""
2993 """output version and copyright information"""
2994 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2994 ui.write(_("Mercurial Distributed SCM (version %s)\n")
2995 % util.version())
2995 % util.version())
2996 ui.status(_(
2996 ui.status(_(
2997 "\nCopyright (C) 2005-2009 Matt Mackall <mpm@selenic.com> and others\n"
2997 "\nCopyright (C) 2005-2009 Matt Mackall <mpm@selenic.com> and others\n"
2998 "This is free software; see the source for copying conditions. "
2998 "This is free software; see the source for copying conditions. "
2999 "There is NO\nwarranty; "
2999 "There is NO\nwarranty; "
3000 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
3000 "not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.\n"
3001 ))
3001 ))
3002
3002
3003 # Command options and aliases are listed here, alphabetically
3003 # Command options and aliases are listed here, alphabetically
3004
3004
3005 globalopts = [
3005 globalopts = [
3006 ('R', 'repository', '',
3006 ('R', 'repository', '',
3007 _('repository root directory or symbolic path name')),
3007 _('repository root directory or symbolic path name')),
3008 ('', 'cwd', '', _('change working directory')),
3008 ('', 'cwd', '', _('change working directory')),
3009 ('y', 'noninteractive', None,
3009 ('y', 'noninteractive', None,
3010 _('do not prompt, assume \'yes\' for any required answers')),
3010 _('do not prompt, assume \'yes\' for any required answers')),
3011 ('q', 'quiet', None, _('suppress output')),
3011 ('q', 'quiet', None, _('suppress output')),
3012 ('v', 'verbose', None, _('enable additional output')),
3012 ('v', 'verbose', None, _('enable additional output')),
3013 ('', 'config', [], _('set/override config option')),
3013 ('', 'config', [], _('set/override config option')),
3014 ('', 'debug', None, _('enable debugging output')),
3014 ('', 'debug', None, _('enable debugging output')),
3015 ('', 'debugger', None, _('start debugger')),
3015 ('', 'debugger', None, _('start debugger')),
3016 ('', 'encoding', encoding.encoding, _('set the charset encoding')),
3016 ('', 'encoding', encoding.encoding, _('set the charset encoding')),
3017 ('', 'encodingmode', encoding.encodingmode,
3017 ('', 'encodingmode', encoding.encodingmode,
3018 _('set the charset encoding mode')),
3018 _('set the charset encoding mode')),
3019 ('', 'traceback', None, _('print traceback on exception')),
3019 ('', 'traceback', None, _('print traceback on exception')),
3020 ('', 'time', None, _('time how long the command takes')),
3020 ('', 'time', None, _('time how long the command takes')),
3021 ('', 'profile', None, _('print command execution profile')),
3021 ('', 'profile', None, _('print command execution profile')),
3022 ('', 'version', None, _('output version information and exit')),
3022 ('', 'version', None, _('output version information and exit')),
3023 ('h', 'help', None, _('display help and exit')),
3023 ('h', 'help', None, _('display help and exit')),
3024 ]
3024 ]
3025
3025
3026 dryrunopts = [('n', 'dry-run', None,
3026 dryrunopts = [('n', 'dry-run', None,
3027 _('do not perform actions, just print output'))]
3027 _('do not perform actions, just print output'))]
3028
3028
3029 remoteopts = [
3029 remoteopts = [
3030 ('e', 'ssh', '', _('specify ssh command to use')),
3030 ('e', 'ssh', '', _('specify ssh command to use')),
3031 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
3031 ('', 'remotecmd', '', _('specify hg command to run on the remote side')),
3032 ]
3032 ]
3033
3033
3034 walkopts = [
3034 walkopts = [
3035 ('I', 'include', [], _('include names matching the given patterns')),
3035 ('I', 'include', [], _('include names matching the given patterns')),
3036 ('X', 'exclude', [], _('exclude names matching the given patterns')),
3036 ('X', 'exclude', [], _('exclude names matching the given patterns')),
3037 ]
3037 ]
3038
3038
3039 commitopts = [
3039 commitopts = [
3040 ('m', 'message', '', _('use <text> as commit message')),
3040 ('m', 'message', '', _('use <text> as commit message')),
3041 ('l', 'logfile', '', _('read commit message from <file>')),
3041 ('l', 'logfile', '', _('read commit message from <file>')),
3042 ]
3042 ]
3043
3043
3044 commitopts2 = [
3044 commitopts2 = [
3045 ('d', 'date', '', _('record datecode as commit date')),
3045 ('d', 'date', '', _('record datecode as commit date')),
3046 ('u', 'user', '', _('record user as committer')),
3046 ('u', 'user', '', _('record user as committer')),
3047 ]
3047 ]
3048
3048
3049 templateopts = [
3049 templateopts = [
3050 ('', 'style', '', _('display using template map file')),
3050 ('', 'style', '', _('display using template map file')),
3051 ('', 'template', '', _('display with template')),
3051 ('', 'template', '', _('display with template')),
3052 ]
3052 ]
3053
3053
3054 logopts = [
3054 logopts = [
3055 ('p', 'patch', None, _('show patch')),
3055 ('p', 'patch', None, _('show patch')),
3056 ('g', 'git', None, _('use git extended diff format')),
3056 ('g', 'git', None, _('use git extended diff format')),
3057 ('l', 'limit', '', _('limit number of changes displayed')),
3057 ('l', 'limit', '', _('limit number of changes displayed')),
3058 ('M', 'no-merges', None, _('do not show merges')),
3058 ('M', 'no-merges', None, _('do not show merges')),
3059 ] + templateopts
3059 ] + templateopts
3060
3060
3061 diffopts = [
3061 diffopts = [
3062 ('a', 'text', None, _('treat all files as text')),
3062 ('a', 'text', None, _('treat all files as text')),
3063 ('g', 'git', None, _('use git extended diff format')),
3063 ('g', 'git', None, _('use git extended diff format')),
3064 ('', 'nodates', None, _("don't include dates in diff headers"))
3064 ('', 'nodates', None, _("don't include dates in diff headers"))
3065 ]
3065 ]
3066
3066
3067 diffopts2 = [
3067 diffopts2 = [
3068 ('p', 'show-function', None, _('show which function each change is in')),
3068 ('p', 'show-function', None, _('show which function each change is in')),
3069 ('w', 'ignore-all-space', None,
3069 ('w', 'ignore-all-space', None,
3070 _('ignore white space when comparing lines')),
3070 _('ignore white space when comparing lines')),
3071 ('b', 'ignore-space-change', None,
3071 ('b', 'ignore-space-change', None,
3072 _('ignore changes in the amount of white space')),
3072 _('ignore changes in the amount of white space')),
3073 ('B', 'ignore-blank-lines', None,
3073 ('B', 'ignore-blank-lines', None,
3074 _('ignore changes whose lines are all blank')),
3074 _('ignore changes whose lines are all blank')),
3075 ('U', 'unified', '', _('number of lines of context to show'))
3075 ('U', 'unified', '', _('number of lines of context to show'))
3076 ]
3076 ]
3077
3077
3078 similarityopts = [
3078 similarityopts = [
3079 ('s', 'similarity', '',
3079 ('s', 'similarity', '',
3080 _('guess renamed files by similarity (0<=s<=100)'))
3080 _('guess renamed files by similarity (0<=s<=100)'))
3081 ]
3081 ]
3082
3082
3083 table = {
3083 table = {
3084 "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
3084 "^add": (add, walkopts + dryrunopts, _('[OPTION]... [FILE]...')),
3085 "addremove":
3085 "addremove":
3086 (addremove, similarityopts + walkopts + dryrunopts,
3086 (addremove, similarityopts + walkopts + dryrunopts,
3087 _('[OPTION]... [FILE]...')),
3087 _('[OPTION]... [FILE]...')),
3088 "^annotate|blame":
3088 "^annotate|blame":
3089 (annotate,
3089 (annotate,
3090 [('r', 'rev', '', _('annotate the specified revision')),
3090 [('r', 'rev', '', _('annotate the specified revision')),
3091 ('f', 'follow', None, _('follow file copies and renames')),
3091 ('f', 'follow', None, _('follow file copies and renames')),
3092 ('a', 'text', None, _('treat all files as text')),
3092 ('a', 'text', None, _('treat all files as text')),
3093 ('u', 'user', None, _('list the author (long with -v)')),
3093 ('u', 'user', None, _('list the author (long with -v)')),
3094 ('d', 'date', None, _('list the date (short with -q)')),
3094 ('d', 'date', None, _('list the date (short with -q)')),
3095 ('n', 'number', None, _('list the revision number (default)')),
3095 ('n', 'number', None, _('list the revision number (default)')),
3096 ('c', 'changeset', None, _('list the changeset')),
3096 ('c', 'changeset', None, _('list the changeset')),
3097 ('l', 'line-number', None,
3097 ('l', 'line-number', None,
3098 _('show line number at the first appearance'))
3098 _('show line number at the first appearance'))
3099 ] + walkopts,
3099 ] + walkopts,
3100 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3100 _('[-r REV] [-f] [-a] [-u] [-d] [-n] [-c] [-l] FILE...')),
3101 "archive":
3101 "archive":
3102 (archive,
3102 (archive,
3103 [('', 'no-decode', None, _('do not pass files through decoders')),
3103 [('', 'no-decode', None, _('do not pass files through decoders')),
3104 ('p', 'prefix', '', _('directory prefix for files in archive')),
3104 ('p', 'prefix', '', _('directory prefix for files in archive')),
3105 ('r', 'rev', '', _('revision to distribute')),
3105 ('r', 'rev', '', _('revision to distribute')),
3106 ('t', 'type', '', _('type of distribution to create')),
3106 ('t', 'type', '', _('type of distribution to create')),
3107 ] + walkopts,
3107 ] + walkopts,
3108 _('[OPTION]... DEST')),
3108 _('[OPTION]... DEST')),
3109 "backout":
3109 "backout":
3110 (backout,
3110 (backout,
3111 [('', 'merge', None,
3111 [('', 'merge', None,
3112 _('merge with old dirstate parent after backout')),
3112 _('merge with old dirstate parent after backout')),
3113 ('', 'parent', '', _('parent to choose when backing out merge')),
3113 ('', 'parent', '', _('parent to choose when backing out merge')),
3114 ('r', 'rev', '', _('revision to backout')),
3114 ('r', 'rev', '', _('revision to backout')),
3115 ] + walkopts + commitopts + commitopts2,
3115 ] + walkopts + commitopts + commitopts2,
3116 _('[OPTION]... [-r] REV')),
3116 _('[OPTION]... [-r] REV')),
3117 "bisect":
3117 "bisect":
3118 (bisect,
3118 (bisect,
3119 [('r', 'reset', False, _('reset bisect state')),
3119 [('r', 'reset', False, _('reset bisect state')),
3120 ('g', 'good', False, _('mark changeset good')),
3120 ('g', 'good', False, _('mark changeset good')),
3121 ('b', 'bad', False, _('mark changeset bad')),
3121 ('b', 'bad', False, _('mark changeset bad')),
3122 ('s', 'skip', False, _('skip testing changeset')),
3122 ('s', 'skip', False, _('skip testing changeset')),
3123 ('c', 'command', '', _('use command to check changeset state')),
3123 ('c', 'command', '', _('use command to check changeset state')),
3124 ('U', 'noupdate', False, _('do not update to target'))],
3124 ('U', 'noupdate', False, _('do not update to target'))],
3125 _("[-gbsr] [-c CMD] [REV]")),
3125 _("[-gbsr] [-c CMD] [REV]")),
3126 "branch":
3126 "branch":
3127 (branch,
3127 (branch,
3128 [('f', 'force', None,
3128 [('f', 'force', None,
3129 _('set branch name even if it shadows an existing branch')),
3129 _('set branch name even if it shadows an existing branch')),
3130 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3130 ('C', 'clean', None, _('reset branch name to parent branch name'))],
3131 _('[-fC] [NAME]')),
3131 _('[-fC] [NAME]')),
3132 "branches":
3132 "branches":
3133 (branches,
3133 (branches,
3134 [('a', 'active', False,
3134 [('a', 'active', False,
3135 _('show only branches that have unmerged heads'))],
3135 _('show only branches that have unmerged heads'))],
3136 _('[-a]')),
3136 _('[-a]')),
3137 "bundle":
3137 "bundle":
3138 (bundle,
3138 (bundle,
3139 [('f', 'force', None,
3139 [('f', 'force', None,
3140 _('run even when remote repository is unrelated')),
3140 _('run even when remote repository is unrelated')),
3141 ('r', 'rev', [],
3141 ('r', 'rev', [],
3142 _('a changeset up to which you would like to bundle')),
3142 _('a changeset up to which you would like to bundle')),
3143 ('', 'base', [],
3143 ('', 'base', [],
3144 _('a base changeset to specify instead of a destination')),
3144 _('a base changeset to specify instead of a destination')),
3145 ('a', 'all', None, _('bundle all changesets in the repository')),
3145 ('a', 'all', None, _('bundle all changesets in the repository')),
3146 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3146 ('t', 'type', 'bzip2', _('bundle compression type to use')),
3147 ] + remoteopts,
3147 ] + remoteopts,
3148 _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3148 _('[-f] [-a] [-r REV]... [--base REV]... FILE [DEST]')),
3149 "cat":
3149 "cat":
3150 (cat,
3150 (cat,
3151 [('o', 'output', '', _('print output to file with formatted name')),
3151 [('o', 'output', '', _('print output to file with formatted name')),
3152 ('r', 'rev', '', _('print the given revision')),
3152 ('r', 'rev', '', _('print the given revision')),
3153 ('', 'decode', None, _('apply any matching decode filter')),
3153 ('', 'decode', None, _('apply any matching decode filter')),
3154 ] + walkopts,
3154 ] + walkopts,
3155 _('[OPTION]... FILE...')),
3155 _('[OPTION]... FILE...')),
3156 "^clone":
3156 "^clone":
3157 (clone,
3157 (clone,
3158 [('U', 'noupdate', None,
3158 [('U', 'noupdate', None,
3159 _('the clone will only contain a repository (no working copy)')),
3159 _('the clone will only contain a repository (no working copy)')),
3160 ('r', 'rev', [],
3160 ('r', 'rev', [],
3161 _('a changeset you would like to have after cloning')),
3161 _('a changeset you would like to have after cloning')),
3162 ('', 'pull', None, _('use pull protocol to copy metadata')),
3162 ('', 'pull', None, _('use pull protocol to copy metadata')),
3163 ('', 'uncompressed', None,
3163 ('', 'uncompressed', None,
3164 _('use uncompressed transfer (fast over LAN)')),
3164 _('use uncompressed transfer (fast over LAN)')),
3165 ] + remoteopts,
3165 ] + remoteopts,
3166 _('[OPTION]... SOURCE [DEST]')),
3166 _('[OPTION]... SOURCE [DEST]')),
3167 "^commit|ci":
3167 "^commit|ci":
3168 (commit,
3168 (commit,
3169 [('A', 'addremove', None,
3169 [('A', 'addremove', None,
3170 _('mark new/missing files as added/removed before committing')),
3170 _('mark new/missing files as added/removed before committing')),
3171 ('', 'close-branch', None,
3171 ('', 'close-branch', None,
3172 _('mark a branch as closed, hiding it from the branch list')),
3172 _('mark a branch as closed, hiding it from the branch list')),
3173 ] + walkopts + commitopts + commitopts2,
3173 ] + walkopts + commitopts + commitopts2,
3174 _('[OPTION]... [FILE]...')),
3174 _('[OPTION]... [FILE]...')),
3175 "copy|cp":
3175 "copy|cp":
3176 (copy,
3176 (copy,
3177 [('A', 'after', None, _('record a copy that has already occurred')),
3177 [('A', 'after', None, _('record a copy that has already occurred')),
3178 ('f', 'force', None,
3178 ('f', 'force', None,
3179 _('forcibly copy over an existing managed file')),
3179 _('forcibly copy over an existing managed file')),
3180 ] + walkopts + dryrunopts,
3180 ] + walkopts + dryrunopts,
3181 _('[OPTION]... [SOURCE]... DEST')),
3181 _('[OPTION]... [SOURCE]... DEST')),
3182 "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
3182 "debugancestor": (debugancestor, [], _('[INDEX] REV1 REV2')),
3183 "debugcheckstate": (debugcheckstate, []),
3183 "debugcheckstate": (debugcheckstate, []),
3184 "debugcommands": (debugcommands, [], _('[COMMAND]')),
3184 "debugcommands": (debugcommands, [], _('[COMMAND]')),
3185 "debugcomplete":
3185 "debugcomplete":
3186 (debugcomplete,
3186 (debugcomplete,
3187 [('o', 'options', None, _('show the command options'))],
3187 [('o', 'options', None, _('show the command options'))],
3188 _('[-o] CMD')),
3188 _('[-o] CMD')),
3189 "debugdate":
3189 "debugdate":
3190 (debugdate,
3190 (debugdate,
3191 [('e', 'extended', None, _('try extended date formats'))],
3191 [('e', 'extended', None, _('try extended date formats'))],
3192 _('[-e] DATE [RANGE]')),
3192 _('[-e] DATE [RANGE]')),
3193 "debugdata": (debugdata, [], _('FILE REV')),
3193 "debugdata": (debugdata, [], _('FILE REV')),
3194 "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
3194 "debugfsinfo": (debugfsinfo, [], _('[PATH]')),
3195 "debugindex": (debugindex, [], _('FILE')),
3195 "debugindex": (debugindex, [], _('FILE')),
3196 "debugindexdot": (debugindexdot, [], _('FILE')),
3196 "debugindexdot": (debugindexdot, [], _('FILE')),
3197 "debuginstall": (debuginstall, []),
3197 "debuginstall": (debuginstall, []),
3198 "debugrawcommit|rawcommit":
3198 "debugrawcommit|rawcommit":
3199 (rawcommit,
3199 (rawcommit,
3200 [('p', 'parent', [], _('parent')),
3200 [('p', 'parent', [], _('parent')),
3201 ('F', 'files', '', _('file list'))
3201 ('F', 'files', '', _('file list'))
3202 ] + commitopts + commitopts2,
3202 ] + commitopts + commitopts2,
3203 _('[OPTION]... [FILE]...')),
3203 _('[OPTION]... [FILE]...')),
3204 "debugrebuildstate":
3204 "debugrebuildstate":
3205 (debugrebuildstate,
3205 (debugrebuildstate,
3206 [('r', 'rev', '', _('revision to rebuild to'))],
3206 [('r', 'rev', '', _('revision to rebuild to'))],
3207 _('[-r REV] [REV]')),
3207 _('[-r REV] [REV]')),
3208 "debugrename":
3208 "debugrename":
3209 (debugrename,
3209 (debugrename,
3210 [('r', 'rev', '', _('revision to debug'))],
3210 [('r', 'rev', '', _('revision to debug'))],
3211 _('[-r REV] FILE')),
3211 _('[-r REV] FILE')),
3212 "debugsetparents":
3212 "debugsetparents":
3213 (debugsetparents, [], _('REV1 [REV2]')),
3213 (debugsetparents, [], _('REV1 [REV2]')),
3214 "debugstate":
3214 "debugstate":
3215 (debugstate,
3215 (debugstate,
3216 [('', 'nodates', None, _('do not display the saved mtime'))],
3216 [('', 'nodates', None, _('do not display the saved mtime'))],
3217 _('[OPTION]...')),
3217 _('[OPTION]...')),
3218 "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
3218 "debugwalk": (debugwalk, walkopts, _('[OPTION]... [FILE]...')),
3219 "^diff":
3219 "^diff":
3220 (diff,
3220 (diff,
3221 [('r', 'rev', [], _('revision')),
3221 [('r', 'rev', [], _('revision')),
3222 ('c', 'change', '', _('change made by revision'))
3222 ('c', 'change', '', _('change made by revision'))
3223 ] + diffopts + diffopts2 + walkopts,
3223 ] + diffopts + diffopts2 + walkopts,
3224 _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3224 _('[OPTION]... [-r REV1 [-r REV2]] [FILE]...')),
3225 "^export":
3225 "^export":
3226 (export,
3226 (export,
3227 [('o', 'output', '', _('print output to file with formatted name')),
3227 [('o', 'output', '', _('print output to file with formatted name')),
3228 ('', 'switch-parent', None, _('diff against the second parent'))
3228 ('', 'switch-parent', None, _('diff against the second parent'))
3229 ] + diffopts,
3229 ] + diffopts,
3230 _('[OPTION]... [-o OUTFILESPEC] REV...')),
3230 _('[OPTION]... [-o OUTFILESPEC] REV...')),
3231 "grep":
3231 "grep":
3232 (grep,
3232 (grep,
3233 [('0', 'print0', None, _('end fields with NUL')),
3233 [('0', 'print0', None, _('end fields with NUL')),
3234 ('', 'all', None, _('print all revisions that match')),
3234 ('', 'all', None, _('print all revisions that match')),
3235 ('f', 'follow', None,
3235 ('f', 'follow', None,
3236 _('follow changeset history, or file history across copies and renames')),
3236 _('follow changeset history, or file history across copies and renames')),
3237 ('i', 'ignore-case', None, _('ignore case when matching')),
3237 ('i', 'ignore-case', None, _('ignore case when matching')),
3238 ('l', 'files-with-matches', None,
3238 ('l', 'files-with-matches', None,
3239 _('print only filenames and revisions that match')),
3239 _('print only filenames and revisions that match')),
3240 ('n', 'line-number', None, _('print matching line numbers')),
3240 ('n', 'line-number', None, _('print matching line numbers')),
3241 ('r', 'rev', [], _('search in given revision range')),
3241 ('r', 'rev', [], _('search in given revision range')),
3242 ('u', 'user', None, _('list the author (long with -v)')),
3242 ('u', 'user', None, _('list the author (long with -v)')),
3243 ('d', 'date', None, _('list the date (short with -q)')),
3243 ('d', 'date', None, _('list the date (short with -q)')),
3244 ] + walkopts,
3244 ] + walkopts,
3245 _('[OPTION]... PATTERN [FILE]...')),
3245 _('[OPTION]... PATTERN [FILE]...')),
3246 "heads":
3246 "heads":
3247 (heads,
3247 (heads,
3248 [('r', 'rev', '', _('show only heads which are descendants of REV')),
3248 [('r', 'rev', '', _('show only heads which are descendants of REV')),
3249 ('a', 'active', False,
3249 ('a', 'active', False,
3250 _('show only the active heads from open branches')),
3250 _('show only the active heads from open branches')),
3251 ] + templateopts,
3251 ] + templateopts,
3252 _('[-r REV] [REV]...')),
3252 _('[-r REV] [REV]...')),
3253 "help": (help_, [], _('[TOPIC]')),
3253 "help": (help_, [], _('[TOPIC]')),
3254 "identify|id":
3254 "identify|id":
3255 (identify,
3255 (identify,
3256 [('r', 'rev', '', _('identify the specified revision')),
3256 [('r', 'rev', '', _('identify the specified revision')),
3257 ('n', 'num', None, _('show local revision number')),
3257 ('n', 'num', None, _('show local revision number')),
3258 ('i', 'id', None, _('show global revision id')),
3258 ('i', 'id', None, _('show global revision id')),
3259 ('b', 'branch', None, _('show branch')),
3259 ('b', 'branch', None, _('show branch')),
3260 ('t', 'tags', None, _('show tags'))],
3260 ('t', 'tags', None, _('show tags'))],
3261 _('[-nibt] [-r REV] [SOURCE]')),
3261 _('[-nibt] [-r REV] [SOURCE]')),
3262 "import|patch":
3262 "import|patch":
3263 (import_,
3263 (import_,
3264 [('p', 'strip', 1,
3264 [('p', 'strip', 1,
3265 _('directory strip option for patch. This has the same '
3265 _('directory strip option for patch. This has the same '
3266 'meaning as the corresponding patch option')),
3266 'meaning as the corresponding patch option')),
3267 ('b', 'base', '', _('base path')),
3267 ('b', 'base', '', _('base path')),
3268 ('f', 'force', None,
3268 ('f', 'force', None,
3269 _('skip check for outstanding uncommitted changes')),
3269 _('skip check for outstanding uncommitted changes')),
3270 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3270 ('', 'no-commit', None, _("don't commit, just update the working directory")),
3271 ('', 'exact', None,
3271 ('', 'exact', None,
3272 _('apply patch to the nodes from which it was generated')),
3272 _('apply patch to the nodes from which it was generated')),
3273 ('', 'import-branch', None,
3273 ('', 'import-branch', None,
3274 _('use any branch information in patch (implied by --exact)'))] +
3274 _('use any branch information in patch (implied by --exact)'))] +
3275 commitopts + commitopts2 + similarityopts,
3275 commitopts + commitopts2 + similarityopts,
3276 _('[OPTION]... PATCH...')),
3276 _('[OPTION]... PATCH...')),
3277 "incoming|in":
3277 "incoming|in":
3278 (incoming,
3278 (incoming,
3279 [('f', 'force', None,
3279 [('f', 'force', None,
3280 _('run even when remote repository is unrelated')),
3280 _('run even when remote repository is unrelated')),
3281 ('n', 'newest-first', None, _('show newest record first')),
3281 ('n', 'newest-first', None, _('show newest record first')),
3282 ('', 'bundle', '', _('file to store the bundles into')),
3282 ('', 'bundle', '', _('file to store the bundles into')),
3283 ('r', 'rev', [],
3283 ('r', 'rev', [],
3284 _('a specific revision up to which you would like to pull')),
3284 _('a specific revision up to which you would like to pull')),
3285 ] + logopts + remoteopts,
3285 ] + logopts + remoteopts,
3286 _('[-p] [-n] [-M] [-f] [-r REV]...'
3286 _('[-p] [-n] [-M] [-f] [-r REV]...'
3287 ' [--bundle FILENAME] [SOURCE]')),
3287 ' [--bundle FILENAME] [SOURCE]')),
3288 "^init":
3288 "^init":
3289 (init,
3289 (init,
3290 remoteopts,
3290 remoteopts,
3291 _('[-e CMD] [--remotecmd CMD] [DEST]')),
3291 _('[-e CMD] [--remotecmd CMD] [DEST]')),
3292 "locate":
3292 "locate":
3293 (locate,
3293 (locate,
3294 [('r', 'rev', '', _('search the repository as it stood at REV')),
3294 [('r', 'rev', '', _('search the repository as it stood at REV')),
3295 ('0', 'print0', None,
3295 ('0', 'print0', None,
3296 _('end filenames with NUL, for use with xargs')),
3296 _('end filenames with NUL, for use with xargs')),
3297 ('f', 'fullpath', None,
3297 ('f', 'fullpath', None,
3298 _('print complete paths from the filesystem root')),
3298 _('print complete paths from the filesystem root')),
3299 ] + walkopts,
3299 ] + walkopts,
3300 _('[OPTION]... [PATTERN]...')),
3300 _('[OPTION]... [PATTERN]...')),
3301 "^log|history":
3301 "^log|history":
3302 (log,
3302 (log,
3303 [('f', 'follow', None,
3303 [('f', 'follow', None,
3304 _('follow changeset history, or file history across copies and renames')),
3304 _('follow changeset history, or file history across copies and renames')),
3305 ('', 'follow-first', None,
3305 ('', 'follow-first', None,
3306 _('only follow the first parent of merge changesets')),
3306 _('only follow the first parent of merge changesets')),
3307 ('d', 'date', '', _('show revisions matching date spec')),
3307 ('d', 'date', '', _('show revisions matching date spec')),
3308 ('C', 'copies', None, _('show copied files')),
3308 ('C', 'copies', None, _('show copied files')),
3309 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3309 ('k', 'keyword', [], _('do case-insensitive search for a keyword')),
3310 ('r', 'rev', [], _('show the specified revision or range')),
3310 ('r', 'rev', [], _('show the specified revision or range')),
3311 ('', 'removed', None, _('include revisions where files were removed')),
3311 ('', 'removed', None, _('include revisions where files were removed')),
3312 ('m', 'only-merges', None, _('show only merges')),
3312 ('m', 'only-merges', None, _('show only merges')),
3313 ('u', 'user', [], _('revisions committed by user')),
3313 ('u', 'user', [], _('revisions committed by user')),
3314 ('b', 'only-branch', [],
3314 ('b', 'only-branch', [],
3315 _('show only changesets within the given named branch')),
3315 _('show only changesets within the given named branch')),
3316 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3316 ('P', 'prune', [], _('do not display revision or any of its ancestors')),
3317 ] + logopts + walkopts,
3317 ] + logopts + walkopts,
3318 _('[OPTION]... [FILE]')),
3318 _('[OPTION]... [FILE]')),
3319 "manifest":
3319 "manifest":
3320 (manifest,
3320 (manifest,
3321 [('r', 'rev', '', _('revision to display'))],
3321 [('r', 'rev', '', _('revision to display'))],
3322 _('[-r REV]')),
3322 _('[-r REV]')),
3323 "^merge":
3323 "^merge":
3324 (merge,
3324 (merge,
3325 [('f', 'force', None, _('force a merge with outstanding changes')),
3325 [('f', 'force', None, _('force a merge with outstanding changes')),
3326 ('r', 'rev', '', _('revision to merge')),
3326 ('r', 'rev', '', _('revision to merge')),
3327 ],
3327 ],
3328 _('[-f] [[-r] REV]')),
3328 _('[-f] [[-r] REV]')),
3329 "outgoing|out":
3329 "outgoing|out":
3330 (outgoing,
3330 (outgoing,
3331 [('f', 'force', None,
3331 [('f', 'force', None,
3332 _('run even when remote repository is unrelated')),
3332 _('run even when remote repository is unrelated')),
3333 ('r', 'rev', [],
3333 ('r', 'rev', [],
3334 _('a specific revision up to which you would like to push')),
3334 _('a specific revision up to which you would like to push')),
3335 ('n', 'newest-first', None, _('show newest record first')),
3335 ('n', 'newest-first', None, _('show newest record first')),
3336 ] + logopts + remoteopts,
3336 ] + logopts + remoteopts,
3337 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3337 _('[-M] [-p] [-n] [-f] [-r REV]... [DEST]')),
3338 "^parents":
3338 "^parents":
3339 (parents,
3339 (parents,
3340 [('r', 'rev', '', _('show parents from the specified revision')),
3340 [('r', 'rev', '', _('show parents from the specified revision')),
3341 ] + templateopts,
3341 ] + templateopts,
3342 _('hg parents [-r REV] [FILE]')),
3342 _('hg parents [-r REV] [FILE]')),
3343 "paths": (paths, [], _('[NAME]')),
3343 "paths": (paths, [], _('[NAME]')),
3344 "^pull":
3344 "^pull":
3345 (pull,
3345 (pull,
3346 [('u', 'update', None,
3346 [('u', 'update', None,
3347 _('update to new tip if changesets were pulled')),
3347 _('update to new tip if changesets were pulled')),
3348 ('f', 'force', None,
3348 ('f', 'force', None,
3349 _('run even when remote repository is unrelated')),
3349 _('run even when remote repository is unrelated')),
3350 ('r', 'rev', [],
3350 ('r', 'rev', [],
3351 _('a specific revision up to which you would like to pull')),
3351 _('a specific revision up to which you would like to pull')),
3352 ] + remoteopts,
3352 ] + remoteopts,
3353 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3353 _('[-u] [-f] [-r REV]... [-e CMD] [--remotecmd CMD] [SOURCE]')),
3354 "^push":
3354 "^push":
3355 (push,
3355 (push,
3356 [('f', 'force', None, _('force push')),
3356 [('f', 'force', None, _('force push')),
3357 ('r', 'rev', [],
3357 ('r', 'rev', [],
3358 _('a specific revision up to which you would like to push')),
3358 _('a specific revision up to which you would like to push')),
3359 ] + remoteopts,
3359 ] + remoteopts,
3360 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3360 _('[-f] [-r REV]... [-e CMD] [--remotecmd CMD] [DEST]')),
3361 "recover": (recover, []),
3361 "recover": (recover, []),
3362 "^remove|rm":
3362 "^remove|rm":
3363 (remove,
3363 (remove,
3364 [('A', 'after', None, _('record delete for missing files')),
3364 [('A', 'after', None, _('record delete for missing files')),
3365 ('f', 'force', None,
3365 ('f', 'force', None,
3366 _('remove (and delete) file even if added or modified')),
3366 _('remove (and delete) file even if added or modified')),
3367 ] + walkopts,
3367 ] + walkopts,
3368 _('[OPTION]... FILE...')),
3368 _('[OPTION]... FILE...')),
3369 "rename|mv":
3369 "rename|mv":
3370 (rename,
3370 (rename,
3371 [('A', 'after', None, _('record a rename that has already occurred')),
3371 [('A', 'after', None, _('record a rename that has already occurred')),
3372 ('f', 'force', None,
3372 ('f', 'force', None,
3373 _('forcibly copy over an existing managed file')),
3373 _('forcibly copy over an existing managed file')),
3374 ] + walkopts + dryrunopts,
3374 ] + walkopts + dryrunopts,
3375 _('[OPTION]... SOURCE... DEST')),
3375 _('[OPTION]... SOURCE... DEST')),
3376 "resolve":
3376 "resolve":
3377 (resolve,
3377 (resolve,
3378 [('a', 'all', None, _('remerge all unresolved files')),
3378 [('a', 'all', None, _('remerge all unresolved files')),
3379 ('l', 'list', None, _('list state of files needing merge')),
3379 ('l', 'list', None, _('list state of files needing merge')),
3380 ('m', 'mark', None, _('mark files as resolved')),
3380 ('m', 'mark', None, _('mark files as resolved')),
3381 ('u', 'unmark', None, _('unmark files as resolved'))]
3381 ('u', 'unmark', None, _('unmark files as resolved'))]
3382 + walkopts,
3382 + walkopts,
3383 _('[OPTION]... [FILE]...')),
3383 _('[OPTION]... [FILE]...')),
3384 "revert":
3384 "revert":
3385 (revert,
3385 (revert,
3386 [('a', 'all', None, _('revert all changes when no arguments given')),
3386 [('a', 'all', None, _('revert all changes when no arguments given')),
3387 ('d', 'date', '', _('tipmost revision matching date')),
3387 ('d', 'date', '', _('tipmost revision matching date')),
3388 ('r', 'rev', '', _('revision to revert to')),
3388 ('r', 'rev', '', _('revision to revert to')),
3389 ('', 'no-backup', None, _('do not save backup copies of files')),
3389 ('', 'no-backup', None, _('do not save backup copies of files')),
3390 ] + walkopts + dryrunopts,
3390 ] + walkopts + dryrunopts,
3391 _('[OPTION]... [-r REV] [NAME]...')),
3391 _('[OPTION]... [-r REV] [NAME]...')),
3392 "rollback": (rollback, []),
3392 "rollback": (rollback, []),
3393 "root": (root, []),
3393 "root": (root, []),
3394 "^serve":
3394 "^serve":
3395 (serve,
3395 (serve,
3396 [('A', 'accesslog', '', _('name of access log file to write to')),
3396 [('A', 'accesslog', '', _('name of access log file to write to')),
3397 ('d', 'daemon', None, _('run server in background')),
3397 ('d', 'daemon', None, _('run server in background')),
3398 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3398 ('', 'daemon-pipefds', '', _('used internally by daemon mode')),
3399 ('E', 'errorlog', '', _('name of error log file to write to')),
3399 ('E', 'errorlog', '', _('name of error log file to write to')),
3400 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3400 ('p', 'port', 0, _('port to listen on (default: 8000)')),
3401 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3401 ('a', 'address', '', _('address to listen on (default: all interfaces)')),
3402 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3402 ('', 'prefix', '', _('prefix path to serve from (default: server root)')),
3403 ('n', 'name', '',
3403 ('n', 'name', '',
3404 _('name to show in web pages (default: working directory)')),
3404 _('name to show in web pages (default: working directory)')),
3405 ('', 'webdir-conf', '', _('name of the webdir config file'
3405 ('', 'webdir-conf', '', _('name of the webdir config file'
3406 ' (serve more than one repository)')),
3406 ' (serve more than one repository)')),
3407 ('', 'pid-file', '', _('name of file to write process ID to')),
3407 ('', 'pid-file', '', _('name of file to write process ID to')),
3408 ('', 'stdio', None, _('for remote clients')),
3408 ('', 'stdio', None, _('for remote clients')),
3409 ('t', 'templates', '', _('web templates to use')),
3409 ('t', 'templates', '', _('web templates to use')),
3410 ('', 'style', '', _('template style to use')),
3410 ('', 'style', '', _('template style to use')),
3411 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3411 ('6', 'ipv6', None, _('use IPv6 in addition to IPv4')),
3412 ('', 'certificate', '', _('SSL certificate file'))],
3412 ('', 'certificate', '', _('SSL certificate file'))],
3413 _('[OPTION]...')),
3413 _('[OPTION]...')),
3414 "showconfig|debugconfig":
3414 "showconfig|debugconfig":
3415 (showconfig,
3415 (showconfig,
3416 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3416 [('u', 'untrusted', None, _('show untrusted configuration options'))],
3417 _('[-u] [NAME]...')),
3417 _('[-u] [NAME]...')),
3418 "^status|st":
3418 "^status|st":
3419 (status,
3419 (status,
3420 [('A', 'all', None, _('show status of all files')),
3420 [('A', 'all', None, _('show status of all files')),
3421 ('m', 'modified', None, _('show only modified files')),
3421 ('m', 'modified', None, _('show only modified files')),
3422 ('a', 'added', None, _('show only added files')),
3422 ('a', 'added', None, _('show only added files')),
3423 ('r', 'removed', None, _('show only removed files')),
3423 ('r', 'removed', None, _('show only removed files')),
3424 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3424 ('d', 'deleted', None, _('show only deleted (but tracked) files')),
3425 ('c', 'clean', None, _('show only files without changes')),
3425 ('c', 'clean', None, _('show only files without changes')),
3426 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3426 ('u', 'unknown', None, _('show only unknown (not tracked) files')),
3427 ('i', 'ignored', None, _('show only ignored files')),
3427 ('i', 'ignored', None, _('show only ignored files')),
3428 ('n', 'no-status', None, _('hide status prefix')),
3428 ('n', 'no-status', None, _('hide status prefix')),
3429 ('C', 'copies', None, _('show source of copied files')),
3429 ('C', 'copies', None, _('show source of copied files')),
3430 ('0', 'print0', None,
3430 ('0', 'print0', None,
3431 _('end filenames with NUL, for use with xargs')),
3431 _('end filenames with NUL, for use with xargs')),
3432 ('', 'rev', [], _('show difference from revision')),
3432 ('', 'rev', [], _('show difference from revision')),
3433 ] + walkopts,
3433 ] + walkopts,
3434 _('[OPTION]... [FILE]...')),
3434 _('[OPTION]... [FILE]...')),
3435 "tag":
3435 "tag":
3436 (tag,
3436 (tag,
3437 [('f', 'force', None, _('replace existing tag')),
3437 [('f', 'force', None, _('replace existing tag')),
3438 ('l', 'local', None, _('make the tag local')),
3438 ('l', 'local', None, _('make the tag local')),
3439 ('r', 'rev', '', _('revision to tag')),
3439 ('r', 'rev', '', _('revision to tag')),
3440 ('', 'remove', None, _('remove a tag')),
3440 ('', 'remove', None, _('remove a tag')),
3441 # -l/--local is already there, commitopts cannot be used
3441 # -l/--local is already there, commitopts cannot be used
3442 ('m', 'message', '', _('use <text> as commit message')),
3442 ('m', 'message', '', _('use <text> as commit message')),
3443 ] + commitopts2,
3443 ] + commitopts2,
3444 _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3444 _('[-l] [-m TEXT] [-d DATE] [-u USER] [-r REV] NAME...')),
3445 "tags": (tags, []),
3445 "tags": (tags, []),
3446 "tip":
3446 "tip":
3447 (tip,
3447 (tip,
3448 [('p', 'patch', None, _('show patch')),
3448 [('p', 'patch', None, _('show patch')),
3449 ('g', 'git', None, _('use git extended diff format')),
3449 ('g', 'git', None, _('use git extended diff format')),
3450 ] + templateopts,
3450 ] + templateopts,
3451 _('[-p]')),
3451 _('[-p]')),
3452 "unbundle":
3452 "unbundle":
3453 (unbundle,
3453 (unbundle,
3454 [('u', 'update', None,
3454 [('u', 'update', None,
3455 _('update to new tip if changesets were unbundled'))],
3455 _('update to new tip if changesets were unbundled'))],
3456 _('[-u] FILE...')),
3456 _('[-u] FILE...')),
3457 "^update|up|checkout|co":
3457 "^update|up|checkout|co":
3458 (update,
3458 (update,
3459 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3459 [('C', 'clean', None, _('overwrite locally modified files (no backup)')),
3460 ('d', 'date', '', _('tipmost revision matching date')),
3460 ('d', 'date', '', _('tipmost revision matching date')),
3461 ('r', 'rev', '', _('revision'))],
3461 ('r', 'rev', '', _('revision'))],
3462 _('[-C] [-d DATE] [[-r] REV]')),
3462 _('[-C] [-d DATE] [[-r] REV]')),
3463 "verify": (verify, []),
3463 "verify": (verify, []),
3464 "version": (version_, []),
3464 "version": (version_, []),
3465 }
3465 }
3466
3466
3467 norepo = ("clone init version help debugcommands debugcomplete debugdata"
3467 norepo = ("clone init version help debugcommands debugcomplete debugdata"
3468 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3468 " debugindex debugindexdot debugdate debuginstall debugfsinfo")
3469 optionalrepo = ("identify paths serve showconfig debugancestor")
3469 optionalrepo = ("identify paths serve showconfig debugancestor")
@@ -1,232 +1,232 b''
1 # copies.py - copy detection for Mercurial
1 # copies.py - copy detection for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from i18n import _
8 from i18n import _
9 import util, heapq
9 import util, heapq
10
10
11 def _nonoverlap(d1, d2, d3):
11 def _nonoverlap(d1, d2, d3):
12 "Return list of elements in d1 not in d2 or d3"
12 "Return list of elements in d1 not in d2 or d3"
13 return util.sort([d for d in d1 if d not in d3 and d not in d2])
13 return util.sort([d for d in d1 if d not in d3 and d not in d2])
14
14
15 def _dirname(f):
15 def _dirname(f):
16 s = f.rfind("/")
16 s = f.rfind("/")
17 if s == -1:
17 if s == -1:
18 return ""
18 return ""
19 return f[:s]
19 return f[:s]
20
20
21 def _dirs(files):
21 def _dirs(files):
22 d = {}
22 d = {}
23 for f in files:
23 for f in files:
24 f = _dirname(f)
24 f = _dirname(f)
25 while f not in d:
25 while f not in d:
26 d[f] = True
26 d[f] = True
27 f = _dirname(f)
27 f = _dirname(f)
28 return d
28 return d
29
29
30 def _findoldnames(fctx, limit):
30 def _findoldnames(fctx, limit):
31 "find files that path was copied from, back to linkrev limit"
31 "find files that path was copied from, back to linkrev limit"
32 old = {}
32 old = {}
33 seen = {}
33 seen = {}
34 orig = fctx.path()
34 orig = fctx.path()
35 visit = [(fctx, 0)]
35 visit = [(fctx, 0)]
36 while visit:
36 while visit:
37 fc, depth = visit.pop()
37 fc, depth = visit.pop()
38 s = str(fc)
38 s = str(fc)
39 if s in seen:
39 if s in seen:
40 continue
40 continue
41 seen[s] = 1
41 seen[s] = 1
42 if fc.path() != orig and fc.path() not in old:
42 if fc.path() != orig and fc.path() not in old:
43 old[fc.path()] = (depth, fc.path()) # remember depth
43 old[fc.path()] = (depth, fc.path()) # remember depth
44 if fc.rev() < limit and fc.rev() is not None:
44 if fc.rev() < limit and fc.rev() is not None:
45 continue
45 continue
46 visit += [(p, depth - 1) for p in fc.parents()]
46 visit += [(p, depth - 1) for p in fc.parents()]
47
47
48 # return old names sorted by depth
48 # return old names sorted by depth
49 return [o[1] for o in util.sort(old.values())]
49 return [o[1] for o in util.sort(old.values())]
50
50
51 def _findlimit(repo, a, b):
51 def _findlimit(repo, a, b):
52 "find the earliest revision that's an ancestor of a or b but not both"
52 "find the earliest revision that's an ancestor of a or b but not both"
53 # basic idea:
53 # basic idea:
54 # - mark a and b with different sides
54 # - mark a and b with different sides
55 # - if a parent's children are all on the same side, the parent is
55 # - if a parent's children are all on the same side, the parent is
56 # on that side, otherwise it is on no side
56 # on that side, otherwise it is on no side
57 # - walk the graph in topological order with the help of a heap;
57 # - walk the graph in topological order with the help of a heap;
58 # - add unseen parents to side map
58 # - add unseen parents to side map
59 # - clear side of any parent that has children on different sides
59 # - clear side of any parent that has children on different sides
60 # - track number of interesting revs that might still be on a side
60 # - track number of interesting revs that might still be on a side
61 # - track the lowest interesting rev seen
61 # - track the lowest interesting rev seen
62 # - quit when interesting revs is zero
62 # - quit when interesting revs is zero
63
63
64 cl = repo.changelog
64 cl = repo.changelog
65 working = len(cl) # pseudo rev for the working directory
65 working = len(cl) # pseudo rev for the working directory
66 if a is None:
66 if a is None:
67 a = working
67 a = working
68 if b is None:
68 if b is None:
69 b = working
69 b = working
70
70
71 side = {a: -1, b: 1}
71 side = {a: -1, b: 1}
72 visit = [-a, -b]
72 visit = [-a, -b]
73 heapq.heapify(visit)
73 heapq.heapify(visit)
74 interesting = len(visit)
74 interesting = len(visit)
75 limit = working
75 limit = working
76
76
77 while interesting:
77 while interesting:
78 r = -heapq.heappop(visit)
78 r = -heapq.heappop(visit)
79 if r == working:
79 if r == working:
80 parents = [cl.rev(p) for p in repo.dirstate.parents()]
80 parents = [cl.rev(p) for p in repo.dirstate.parents()]
81 else:
81 else:
82 parents = cl.parentrevs(r)
82 parents = cl.parentrevs(r)
83 for p in parents:
83 for p in parents:
84 if p not in side:
84 if p not in side:
85 # first time we see p; add it to visit
85 # first time we see p; add it to visit
86 side[p] = side[r]
86 side[p] = side[r]
87 if side[p]:
87 if side[p]:
88 interesting += 1
88 interesting += 1
89 heapq.heappush(visit, -p)
89 heapq.heappush(visit, -p)
90 elif side[p] and side[p] != side[r]:
90 elif side[p] and side[p] != side[r]:
91 # p was interesting but now we know better
91 # p was interesting but now we know better
92 side[p] = 0
92 side[p] = 0
93 interesting -= 1
93 interesting -= 1
94 if side[r]:
94 if side[r]:
95 limit = r # lowest rev visited
95 limit = r # lowest rev visited
96 interesting -= 1
96 interesting -= 1
97 return limit
97 return limit
98
98
99 def copies(repo, c1, c2, ca, checkdirs=False):
99 def copies(repo, c1, c2, ca, checkdirs=False):
100 """
100 """
101 Find moves and copies between context c1 and c2
101 Find moves and copies between context c1 and c2
102 """
102 """
103 # avoid silly behavior for update from empty dir
103 # avoid silly behavior for update from empty dir
104 if not c1 or not c2 or c1 == c2:
104 if not c1 or not c2 or c1 == c2:
105 return {}, {}
105 return {}, {}
106
106
107 # avoid silly behavior for parent -> working dir
107 # avoid silly behavior for parent -> working dir
108 if c2.node() == None and c1.node() == repo.dirstate.parents()[0]:
108 if c2.node() == None and c1.node() == repo.dirstate.parents()[0]:
109 return repo.dirstate.copies(), {}
109 return repo.dirstate.copies(), {}
110
110
111 limit = _findlimit(repo, c1.rev(), c2.rev())
111 limit = _findlimit(repo, c1.rev(), c2.rev())
112 m1 = c1.manifest()
112 m1 = c1.manifest()
113 m2 = c2.manifest()
113 m2 = c2.manifest()
114 ma = ca.manifest()
114 ma = ca.manifest()
115
115
116 def makectx(f, n):
116 def makectx(f, n):
117 if len(n) != 20: # in a working context?
117 if len(n) != 20: # in a working context?
118 if c1.rev() is None:
118 if c1.rev() is None:
119 return c1.filectx(f)
119 return c1.filectx(f)
120 return c2.filectx(f)
120 return c2.filectx(f)
121 return repo.filectx(f, fileid=n)
121 return repo.filectx(f, fileid=n)
122 ctx = util.cachefunc(makectx)
122 ctx = util.cachefunc(makectx)
123
123
124 copy = {}
124 copy = {}
125 fullcopy = {}
125 fullcopy = {}
126 diverge = {}
126 diverge = {}
127
127
128 def checkcopies(f, m1, m2):
128 def checkcopies(f, m1, m2):
129 '''check possible copies of f from m1 to m2'''
129 '''check possible copies of f from m1 to m2'''
130 c1 = ctx(f, m1[f])
130 c1 = ctx(f, m1[f])
131 for of in _findoldnames(c1, limit):
131 for of in _findoldnames(c1, limit):
132 fullcopy[f] = of # remember for dir rename detection
132 fullcopy[f] = of # remember for dir rename detection
133 if of in m2: # original file not in other manifest?
133 if of in m2: # original file not in other manifest?
134 # if the original file is unchanged on the other branch,
134 # if the original file is unchanged on the other branch,
135 # no merge needed
135 # no merge needed
136 if m2[of] != ma.get(of):
136 if m2[of] != ma.get(of):
137 c2 = ctx(of, m2[of])
137 c2 = ctx(of, m2[of])
138 ca = c1.ancestor(c2)
138 ca = c1.ancestor(c2)
139 # related and named changed on only one side?
139 # related and named changed on only one side?
140 if ca and (ca.path() == f or ca.path() == c2.path()):
140 if ca and (ca.path() == f or ca.path() == c2.path()):
141 if c1 != ca or c2 != ca: # merge needed?
141 if c1 != ca or c2 != ca: # merge needed?
142 copy[f] = of
142 copy[f] = of
143 elif of in ma:
143 elif of in ma:
144 diverge.setdefault(of, []).append(f)
144 diverge.setdefault(of, []).append(f)
145
145
146 repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
146 repo.ui.debug(_(" searching for copies back to rev %d\n") % limit)
147
147
148 u1 = _nonoverlap(m1, m2, ma)
148 u1 = _nonoverlap(m1, m2, ma)
149 u2 = _nonoverlap(m2, m1, ma)
149 u2 = _nonoverlap(m2, m1, ma)
150
150
151 if u1:
151 if u1:
152 repo.ui.debug(_(" unmatched files in local:\n %s\n")
152 repo.ui.debug(_(" unmatched files in local:\n %s\n")
153 % "\n ".join(u1))
153 % "\n ".join(u1))
154 if u2:
154 if u2:
155 repo.ui.debug(_(" unmatched files in other:\n %s\n")
155 repo.ui.debug(_(" unmatched files in other:\n %s\n")
156 % "\n ".join(u2))
156 % "\n ".join(u2))
157
157
158 for f in u1:
158 for f in u1:
159 checkcopies(f, m1, m2)
159 checkcopies(f, m1, m2)
160 for f in u2:
160 for f in u2:
161 checkcopies(f, m2, m1)
161 checkcopies(f, m2, m1)
162
162
163 diverge2 = {}
163 diverge2 = set()
164 for of, fl in diverge.items():
164 for of, fl in diverge.items():
165 if len(fl) == 1:
165 if len(fl) == 1:
166 del diverge[of] # not actually divergent
166 del diverge[of] # not actually divergent
167 else:
167 else:
168 diverge2.update(dict.fromkeys(fl)) # reverse map for below
168 diverge2.update(fl) # reverse map for below
169
169
170 if fullcopy:
170 if fullcopy:
171 repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
171 repo.ui.debug(_(" all copies found (* = to merge, ! = divergent):\n"))
172 for f in fullcopy:
172 for f in fullcopy:
173 note = ""
173 note = ""
174 if f in copy: note += "*"
174 if f in copy: note += "*"
175 if f in diverge2: note += "!"
175 if f in diverge2: note += "!"
176 repo.ui.debug(_(" %s -> %s %s\n") % (f, fullcopy[f], note))
176 repo.ui.debug(_(" %s -> %s %s\n") % (f, fullcopy[f], note))
177 del diverge2
177 del diverge2
178
178
179 if not fullcopy or not checkdirs:
179 if not fullcopy or not checkdirs:
180 return copy, diverge
180 return copy, diverge
181
181
182 repo.ui.debug(_(" checking for directory renames\n"))
182 repo.ui.debug(_(" checking for directory renames\n"))
183
183
184 # generate a directory move map
184 # generate a directory move map
185 d1, d2 = _dirs(m1), _dirs(m2)
185 d1, d2 = _dirs(m1), _dirs(m2)
186 invalid = {}
186 invalid = {}
187 dirmove = {}
187 dirmove = {}
188
188
189 # examine each file copy for a potential directory move, which is
189 # examine each file copy for a potential directory move, which is
190 # when all the files in a directory are moved to a new directory
190 # when all the files in a directory are moved to a new directory
191 for dst, src in fullcopy.iteritems():
191 for dst, src in fullcopy.iteritems():
192 dsrc, ddst = _dirname(src), _dirname(dst)
192 dsrc, ddst = _dirname(src), _dirname(dst)
193 if dsrc in invalid:
193 if dsrc in invalid:
194 # already seen to be uninteresting
194 # already seen to be uninteresting
195 continue
195 continue
196 elif dsrc in d1 and ddst in d1:
196 elif dsrc in d1 and ddst in d1:
197 # directory wasn't entirely moved locally
197 # directory wasn't entirely moved locally
198 invalid[dsrc] = True
198 invalid[dsrc] = True
199 elif dsrc in d2 and ddst in d2:
199 elif dsrc in d2 and ddst in d2:
200 # directory wasn't entirely moved remotely
200 # directory wasn't entirely moved remotely
201 invalid[dsrc] = True
201 invalid[dsrc] = True
202 elif dsrc in dirmove and dirmove[dsrc] != ddst:
202 elif dsrc in dirmove and dirmove[dsrc] != ddst:
203 # files from the same directory moved to two different places
203 # files from the same directory moved to two different places
204 invalid[dsrc] = True
204 invalid[dsrc] = True
205 else:
205 else:
206 # looks good so far
206 # looks good so far
207 dirmove[dsrc + "/"] = ddst + "/"
207 dirmove[dsrc + "/"] = ddst + "/"
208
208
209 for i in invalid:
209 for i in invalid:
210 if i in dirmove:
210 if i in dirmove:
211 del dirmove[i]
211 del dirmove[i]
212 del d1, d2, invalid
212 del d1, d2, invalid
213
213
214 if not dirmove:
214 if not dirmove:
215 return copy, diverge
215 return copy, diverge
216
216
217 for d in dirmove:
217 for d in dirmove:
218 repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
218 repo.ui.debug(_(" dir %s -> %s\n") % (d, dirmove[d]))
219
219
220 # check unaccounted nonoverlapping files against directory moves
220 # check unaccounted nonoverlapping files against directory moves
221 for f in u1 + u2:
221 for f in u1 + u2:
222 if f not in fullcopy:
222 if f not in fullcopy:
223 for d in dirmove:
223 for d in dirmove:
224 if f.startswith(d):
224 if f.startswith(d):
225 # new file added in a directory that was moved, move it
225 # new file added in a directory that was moved, move it
226 df = dirmove[d] + f[len(d):]
226 df = dirmove[d] + f[len(d):]
227 if df not in copy:
227 if df not in copy:
228 copy[f] = df
228 copy[f] = df
229 repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
229 repo.ui.debug(_(" file %s -> %s\n") % (f, copy[f]))
230 break
230 break
231
231
232 return copy, diverge
232 return copy, diverge
@@ -1,144 +1,144 b''
1 # changelog bisection for mercurial
1 # changelog bisection for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall
3 # Copyright 2007 Matt Mackall
4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
5 # Inspired by git bisect, extension skeleton taken from mq.py.
5 # Inspired by git bisect, extension skeleton taken from mq.py.
6 #
6 #
7 # This software may be used and distributed according to the terms
7 # This software may be used and distributed according to the terms
8 # of the GNU General Public License, incorporated herein by reference.
8 # of the GNU General Public License, incorporated herein by reference.
9
9
10 import os
10 import os
11 from i18n import _
11 from i18n import _
12 from node import short, hex
12 from node import short, hex
13 import util
13 import util
14
14
15 def bisect(changelog, state):
15 def bisect(changelog, state):
16 """find the next node (if any) for testing during a bisect search.
16 """find the next node (if any) for testing during a bisect search.
17 returns a (nodes, number, good) tuple.
17 returns a (nodes, number, good) tuple.
18
18
19 'nodes' is the final result of the bisect if 'number' is 0.
19 'nodes' is the final result of the bisect if 'number' is 0.
20 Otherwise 'number' indicates the remaining possible candidates for
20 Otherwise 'number' indicates the remaining possible candidates for
21 the search and 'nodes' contains the next bisect target.
21 the search and 'nodes' contains the next bisect target.
22 'good' is True if bisect is searching for a first good changeset, False
22 'good' is True if bisect is searching for a first good changeset, False
23 if searching for a first bad one.
23 if searching for a first bad one.
24 """
24 """
25
25
26 clparents = changelog.parentrevs
26 clparents = changelog.parentrevs
27 skip = dict.fromkeys([changelog.rev(n) for n in state['skip']])
27 skip = set([changelog.rev(n) for n in state['skip']])
28
28
29 def buildancestors(bad, good):
29 def buildancestors(bad, good):
30 # only the earliest bad revision matters
30 # only the earliest bad revision matters
31 badrev = min([changelog.rev(n) for n in bad])
31 badrev = min([changelog.rev(n) for n in bad])
32 goodrevs = [changelog.rev(n) for n in good]
32 goodrevs = [changelog.rev(n) for n in good]
33 # build ancestors array
33 # build ancestors array
34 ancestors = [[]] * (len(changelog) + 1) # an extra for [-1]
34 ancestors = [[]] * (len(changelog) + 1) # an extra for [-1]
35
35
36 # clear good revs from array
36 # clear good revs from array
37 for node in goodrevs:
37 for node in goodrevs:
38 ancestors[node] = None
38 ancestors[node] = None
39 for rev in xrange(len(changelog), -1, -1):
39 for rev in xrange(len(changelog), -1, -1):
40 if ancestors[rev] is None:
40 if ancestors[rev] is None:
41 for prev in clparents(rev):
41 for prev in clparents(rev):
42 ancestors[prev] = None
42 ancestors[prev] = None
43
43
44 if ancestors[badrev] is None:
44 if ancestors[badrev] is None:
45 return badrev, None
45 return badrev, None
46 return badrev, ancestors
46 return badrev, ancestors
47
47
48 good = 0
48 good = 0
49 badrev, ancestors = buildancestors(state['bad'], state['good'])
49 badrev, ancestors = buildancestors(state['bad'], state['good'])
50 if not ancestors: # looking for bad to good transition?
50 if not ancestors: # looking for bad to good transition?
51 good = 1
51 good = 1
52 badrev, ancestors = buildancestors(state['good'], state['bad'])
52 badrev, ancestors = buildancestors(state['good'], state['bad'])
53 bad = changelog.node(badrev)
53 bad = changelog.node(badrev)
54 if not ancestors: # now we're confused
54 if not ancestors: # now we're confused
55 raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
55 raise util.Abort(_("Inconsistent state, %s:%s is good and bad")
56 % (badrev, short(bad)))
56 % (badrev, short(bad)))
57
57
58 # build children dict
58 # build children dict
59 children = {}
59 children = {}
60 visit = [badrev]
60 visit = [badrev]
61 candidates = []
61 candidates = []
62 while visit:
62 while visit:
63 rev = visit.pop(0)
63 rev = visit.pop(0)
64 if ancestors[rev] == []:
64 if ancestors[rev] == []:
65 candidates.append(rev)
65 candidates.append(rev)
66 for prev in clparents(rev):
66 for prev in clparents(rev):
67 if prev != -1:
67 if prev != -1:
68 if prev in children:
68 if prev in children:
69 children[prev].append(rev)
69 children[prev].append(rev)
70 else:
70 else:
71 children[prev] = [rev]
71 children[prev] = [rev]
72 visit.append(prev)
72 visit.append(prev)
73
73
74 candidates.sort()
74 candidates.sort()
75 # have we narrowed it down to one entry?
75 # have we narrowed it down to one entry?
76 # or have all other possible candidates besides 'bad' have been skipped?
76 # or have all other possible candidates besides 'bad' have been skipped?
77 tot = len(candidates)
77 tot = len(candidates)
78 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
78 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
79 if tot == 1 or not unskipped:
79 if tot == 1 or not unskipped:
80 return ([changelog.node(rev) for rev in candidates], 0, good)
80 return ([changelog.node(rev) for rev in candidates], 0, good)
81 perfect = tot // 2
81 perfect = tot // 2
82
82
83 # find the best node to test
83 # find the best node to test
84 best_rev = None
84 best_rev = None
85 best_len = -1
85 best_len = -1
86 poison = {}
86 poison = {}
87 for rev in candidates:
87 for rev in candidates:
88 if rev in poison:
88 if rev in poison:
89 for c in children.get(rev, []):
89 for c in children.get(rev, []):
90 poison[c] = True # poison children
90 poison[c] = True # poison children
91 continue
91 continue
92
92
93 a = ancestors[rev] or [rev]
93 a = ancestors[rev] or [rev]
94 ancestors[rev] = None
94 ancestors[rev] = None
95
95
96 x = len(a) # number of ancestors
96 x = len(a) # number of ancestors
97 y = tot - x # number of non-ancestors
97 y = tot - x # number of non-ancestors
98 value = min(x, y) # how good is this test?
98 value = min(x, y) # how good is this test?
99 if value > best_len and rev not in skip:
99 if value > best_len and rev not in skip:
100 best_len = value
100 best_len = value
101 best_rev = rev
101 best_rev = rev
102 if value == perfect: # found a perfect candidate? quit early
102 if value == perfect: # found a perfect candidate? quit early
103 break
103 break
104
104
105 if y < perfect and rev not in skip: # all downhill from here?
105 if y < perfect and rev not in skip: # all downhill from here?
106 for c in children.get(rev, []):
106 for c in children.get(rev, []):
107 poison[c] = True # poison children
107 poison[c] = True # poison children
108 continue
108 continue
109
109
110 for c in children.get(rev, []):
110 for c in children.get(rev, []):
111 if ancestors[c]:
111 if ancestors[c]:
112 ancestors[c] = dict.fromkeys(ancestors[c] + a).keys()
112 ancestors[c] = list(set(ancestors[c] + a))
113 else:
113 else:
114 ancestors[c] = a + [c]
114 ancestors[c] = a + [c]
115
115
116 assert best_rev is not None
116 assert best_rev is not None
117 best_node = changelog.node(best_rev)
117 best_node = changelog.node(best_rev)
118
118
119 return ([best_node], tot, good)
119 return ([best_node], tot, good)
120
120
121
121
122 def load_state(repo):
122 def load_state(repo):
123 state = {'good': [], 'bad': [], 'skip': []}
123 state = {'good': [], 'bad': [], 'skip': []}
124 if os.path.exists(repo.join("bisect.state")):
124 if os.path.exists(repo.join("bisect.state")):
125 for l in repo.opener("bisect.state"):
125 for l in repo.opener("bisect.state"):
126 kind, node = l[:-1].split()
126 kind, node = l[:-1].split()
127 node = repo.lookup(node)
127 node = repo.lookup(node)
128 if kind not in state:
128 if kind not in state:
129 raise util.Abort(_("unknown bisect kind %s") % kind)
129 raise util.Abort(_("unknown bisect kind %s") % kind)
130 state[kind].append(node)
130 state[kind].append(node)
131 return state
131 return state
132
132
133
133
134 def save_state(repo, state):
134 def save_state(repo, state):
135 f = repo.opener("bisect.state", "w", atomictemp=True)
135 f = repo.opener("bisect.state", "w", atomictemp=True)
136 wlock = repo.wlock()
136 wlock = repo.wlock()
137 try:
137 try:
138 for kind in state:
138 for kind in state:
139 for node in state[kind]:
139 for node in state[kind]:
140 f.write("%s %s\n" % (kind, hex(node)))
140 f.write("%s %s\n" % (kind, hex(node)))
141 f.rename()
141 f.rename()
142 finally:
142 finally:
143 wlock.release()
143 wlock.release()
144
144
@@ -1,2174 +1,2174 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store, encoding
12 import lock, transaction, stat, errno, ui, store, encoding
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 from lock import release
17 from lock import release
18
18
19 class localrepository(repo.repository):
19 class localrepository(repo.repository):
20 capabilities = set(('lookup', 'changegroupsubset'))
20 capabilities = set(('lookup', 'changegroupsubset'))
21 supported = ('revlogv1', 'store', 'fncache')
21 supported = ('revlogv1', 'store', 'fncache')
22
22
23 def __init__(self, parentui, path=None, create=0):
23 def __init__(self, parentui, path=None, create=0):
24 repo.repository.__init__(self)
24 repo.repository.__init__(self)
25 self.root = os.path.realpath(path)
25 self.root = os.path.realpath(path)
26 self.path = os.path.join(self.root, ".hg")
26 self.path = os.path.join(self.root, ".hg")
27 self.origroot = path
27 self.origroot = path
28 self.opener = util.opener(self.path)
28 self.opener = util.opener(self.path)
29 self.wopener = util.opener(self.root)
29 self.wopener = util.opener(self.root)
30
30
31 if not os.path.isdir(self.path):
31 if not os.path.isdir(self.path):
32 if create:
32 if create:
33 if not os.path.exists(path):
33 if not os.path.exists(path):
34 os.mkdir(path)
34 os.mkdir(path)
35 os.mkdir(self.path)
35 os.mkdir(self.path)
36 requirements = ["revlogv1"]
36 requirements = ["revlogv1"]
37 if parentui.configbool('format', 'usestore', True):
37 if parentui.configbool('format', 'usestore', True):
38 os.mkdir(os.path.join(self.path, "store"))
38 os.mkdir(os.path.join(self.path, "store"))
39 requirements.append("store")
39 requirements.append("store")
40 if parentui.configbool('format', 'usefncache', True):
40 if parentui.configbool('format', 'usefncache', True):
41 requirements.append("fncache")
41 requirements.append("fncache")
42 # create an invalid changelog
42 # create an invalid changelog
43 self.opener("00changelog.i", "a").write(
43 self.opener("00changelog.i", "a").write(
44 '\0\0\0\2' # represents revlogv2
44 '\0\0\0\2' # represents revlogv2
45 ' dummy changelog to prevent using the old repo layout'
45 ' dummy changelog to prevent using the old repo layout'
46 )
46 )
47 reqfile = self.opener("requires", "w")
47 reqfile = self.opener("requires", "w")
48 for r in requirements:
48 for r in requirements:
49 reqfile.write("%s\n" % r)
49 reqfile.write("%s\n" % r)
50 reqfile.close()
50 reqfile.close()
51 else:
51 else:
52 raise error.RepoError(_("repository %s not found") % path)
52 raise error.RepoError(_("repository %s not found") % path)
53 elif create:
53 elif create:
54 raise error.RepoError(_("repository %s already exists") % path)
54 raise error.RepoError(_("repository %s already exists") % path)
55 else:
55 else:
56 # find requirements
56 # find requirements
57 requirements = []
57 requirements = []
58 try:
58 try:
59 requirements = self.opener("requires").read().splitlines()
59 requirements = self.opener("requires").read().splitlines()
60 for r in requirements:
60 for r in requirements:
61 if r not in self.supported:
61 if r not in self.supported:
62 raise error.RepoError(_("requirement '%s' not supported") % r)
62 raise error.RepoError(_("requirement '%s' not supported") % r)
63 except IOError, inst:
63 except IOError, inst:
64 if inst.errno != errno.ENOENT:
64 if inst.errno != errno.ENOENT:
65 raise
65 raise
66
66
67 self.store = store.store(requirements, self.path, util.opener)
67 self.store = store.store(requirements, self.path, util.opener)
68 self.spath = self.store.path
68 self.spath = self.store.path
69 self.sopener = self.store.opener
69 self.sopener = self.store.opener
70 self.sjoin = self.store.join
70 self.sjoin = self.store.join
71 self.opener.createmode = self.store.createmode
71 self.opener.createmode = self.store.createmode
72
72
73 self.ui = ui.ui(parentui=parentui)
73 self.ui = ui.ui(parentui=parentui)
74 try:
74 try:
75 self.ui.readconfig(self.join("hgrc"), self.root)
75 self.ui.readconfig(self.join("hgrc"), self.root)
76 extensions.loadall(self.ui)
76 extensions.loadall(self.ui)
77 except IOError:
77 except IOError:
78 pass
78 pass
79
79
80 self.tagscache = None
80 self.tagscache = None
81 self._tagstypecache = None
81 self._tagstypecache = None
82 self.branchcache = None
82 self.branchcache = None
83 self._ubranchcache = None # UTF-8 version of branchcache
83 self._ubranchcache = None # UTF-8 version of branchcache
84 self._branchcachetip = None
84 self._branchcachetip = None
85 self.nodetagscache = None
85 self.nodetagscache = None
86 self.filterpats = {}
86 self.filterpats = {}
87 self._datafilters = {}
87 self._datafilters = {}
88 self._transref = self._lockref = self._wlockref = None
88 self._transref = self._lockref = self._wlockref = None
89
89
90 def __getattr__(self, name):
90 def __getattr__(self, name):
91 if name == 'changelog':
91 if name == 'changelog':
92 self.changelog = changelog.changelog(self.sopener)
92 self.changelog = changelog.changelog(self.sopener)
93 if 'HG_PENDING' in os.environ:
93 if 'HG_PENDING' in os.environ:
94 p = os.environ['HG_PENDING']
94 p = os.environ['HG_PENDING']
95 if p.startswith(self.root):
95 if p.startswith(self.root):
96 self.changelog.readpending('00changelog.i.a')
96 self.changelog.readpending('00changelog.i.a')
97 self.sopener.defversion = self.changelog.version
97 self.sopener.defversion = self.changelog.version
98 return self.changelog
98 return self.changelog
99 if name == 'manifest':
99 if name == 'manifest':
100 self.changelog
100 self.changelog
101 self.manifest = manifest.manifest(self.sopener)
101 self.manifest = manifest.manifest(self.sopener)
102 return self.manifest
102 return self.manifest
103 if name == 'dirstate':
103 if name == 'dirstate':
104 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
104 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
105 return self.dirstate
105 return self.dirstate
106 else:
106 else:
107 raise AttributeError(name)
107 raise AttributeError(name)
108
108
109 def __getitem__(self, changeid):
109 def __getitem__(self, changeid):
110 if changeid == None:
110 if changeid == None:
111 return context.workingctx(self)
111 return context.workingctx(self)
112 return context.changectx(self, changeid)
112 return context.changectx(self, changeid)
113
113
114 def __nonzero__(self):
114 def __nonzero__(self):
115 return True
115 return True
116
116
117 def __len__(self):
117 def __len__(self):
118 return len(self.changelog)
118 return len(self.changelog)
119
119
120 def __iter__(self):
120 def __iter__(self):
121 for i in xrange(len(self)):
121 for i in xrange(len(self)):
122 yield i
122 yield i
123
123
124 def url(self):
124 def url(self):
125 return 'file:' + self.root
125 return 'file:' + self.root
126
126
127 def hook(self, name, throw=False, **args):
127 def hook(self, name, throw=False, **args):
128 return hook.hook(self.ui, self, name, throw, **args)
128 return hook.hook(self.ui, self, name, throw, **args)
129
129
130 tag_disallowed = ':\r\n'
130 tag_disallowed = ':\r\n'
131
131
132 def _tag(self, names, node, message, local, user, date, parent=None,
132 def _tag(self, names, node, message, local, user, date, parent=None,
133 extra={}):
133 extra={}):
134 use_dirstate = parent is None
134 use_dirstate = parent is None
135
135
136 if isinstance(names, str):
136 if isinstance(names, str):
137 allchars = names
137 allchars = names
138 names = (names,)
138 names = (names,)
139 else:
139 else:
140 allchars = ''.join(names)
140 allchars = ''.join(names)
141 for c in self.tag_disallowed:
141 for c in self.tag_disallowed:
142 if c in allchars:
142 if c in allchars:
143 raise util.Abort(_('%r cannot be used in a tag name') % c)
143 raise util.Abort(_('%r cannot be used in a tag name') % c)
144
144
145 for name in names:
145 for name in names:
146 self.hook('pretag', throw=True, node=hex(node), tag=name,
146 self.hook('pretag', throw=True, node=hex(node), tag=name,
147 local=local)
147 local=local)
148
148
149 def writetags(fp, names, munge, prevtags):
149 def writetags(fp, names, munge, prevtags):
150 fp.seek(0, 2)
150 fp.seek(0, 2)
151 if prevtags and prevtags[-1] != '\n':
151 if prevtags and prevtags[-1] != '\n':
152 fp.write('\n')
152 fp.write('\n')
153 for name in names:
153 for name in names:
154 m = munge and munge(name) or name
154 m = munge and munge(name) or name
155 if self._tagstypecache and name in self._tagstypecache:
155 if self._tagstypecache and name in self._tagstypecache:
156 old = self.tagscache.get(name, nullid)
156 old = self.tagscache.get(name, nullid)
157 fp.write('%s %s\n' % (hex(old), m))
157 fp.write('%s %s\n' % (hex(old), m))
158 fp.write('%s %s\n' % (hex(node), m))
158 fp.write('%s %s\n' % (hex(node), m))
159 fp.close()
159 fp.close()
160
160
161 prevtags = ''
161 prevtags = ''
162 if local:
162 if local:
163 try:
163 try:
164 fp = self.opener('localtags', 'r+')
164 fp = self.opener('localtags', 'r+')
165 except IOError:
165 except IOError:
166 fp = self.opener('localtags', 'a')
166 fp = self.opener('localtags', 'a')
167 else:
167 else:
168 prevtags = fp.read()
168 prevtags = fp.read()
169
169
170 # local tags are stored in the current charset
170 # local tags are stored in the current charset
171 writetags(fp, names, None, prevtags)
171 writetags(fp, names, None, prevtags)
172 for name in names:
172 for name in names:
173 self.hook('tag', node=hex(node), tag=name, local=local)
173 self.hook('tag', node=hex(node), tag=name, local=local)
174 return
174 return
175
175
176 if use_dirstate:
176 if use_dirstate:
177 try:
177 try:
178 fp = self.wfile('.hgtags', 'rb+')
178 fp = self.wfile('.hgtags', 'rb+')
179 except IOError:
179 except IOError:
180 fp = self.wfile('.hgtags', 'ab')
180 fp = self.wfile('.hgtags', 'ab')
181 else:
181 else:
182 prevtags = fp.read()
182 prevtags = fp.read()
183 else:
183 else:
184 try:
184 try:
185 prevtags = self.filectx('.hgtags', parent).data()
185 prevtags = self.filectx('.hgtags', parent).data()
186 except error.LookupError:
186 except error.LookupError:
187 pass
187 pass
188 fp = self.wfile('.hgtags', 'wb')
188 fp = self.wfile('.hgtags', 'wb')
189 if prevtags:
189 if prevtags:
190 fp.write(prevtags)
190 fp.write(prevtags)
191
191
192 # committed tags are stored in UTF-8
192 # committed tags are stored in UTF-8
193 writetags(fp, names, encoding.fromlocal, prevtags)
193 writetags(fp, names, encoding.fromlocal, prevtags)
194
194
195 if use_dirstate and '.hgtags' not in self.dirstate:
195 if use_dirstate and '.hgtags' not in self.dirstate:
196 self.add(['.hgtags'])
196 self.add(['.hgtags'])
197
197
198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
198 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
199 extra=extra)
199 extra=extra)
200
200
201 for name in names:
201 for name in names:
202 self.hook('tag', node=hex(node), tag=name, local=local)
202 self.hook('tag', node=hex(node), tag=name, local=local)
203
203
204 return tagnode
204 return tagnode
205
205
206 def tag(self, names, node, message, local, user, date):
206 def tag(self, names, node, message, local, user, date):
207 '''tag a revision with one or more symbolic names.
207 '''tag a revision with one or more symbolic names.
208
208
209 names is a list of strings or, when adding a single tag, names may be a
209 names is a list of strings or, when adding a single tag, names may be a
210 string.
210 string.
211
211
212 if local is True, the tags are stored in a per-repository file.
212 if local is True, the tags are stored in a per-repository file.
213 otherwise, they are stored in the .hgtags file, and a new
213 otherwise, they are stored in the .hgtags file, and a new
214 changeset is committed with the change.
214 changeset is committed with the change.
215
215
216 keyword arguments:
216 keyword arguments:
217
217
218 local: whether to store tags in non-version-controlled file
218 local: whether to store tags in non-version-controlled file
219 (default False)
219 (default False)
220
220
221 message: commit message to use if committing
221 message: commit message to use if committing
222
222
223 user: name of user to use if committing
223 user: name of user to use if committing
224
224
225 date: date tuple to use if committing'''
225 date: date tuple to use if committing'''
226
226
227 for x in self.status()[:5]:
227 for x in self.status()[:5]:
228 if '.hgtags' in x:
228 if '.hgtags' in x:
229 raise util.Abort(_('working copy of .hgtags is changed '
229 raise util.Abort(_('working copy of .hgtags is changed '
230 '(please commit .hgtags manually)'))
230 '(please commit .hgtags manually)'))
231
231
232 self.tags() # instantiate the cache
232 self.tags() # instantiate the cache
233 self._tag(names, node, message, local, user, date)
233 self._tag(names, node, message, local, user, date)
234
234
235 def tags(self):
235 def tags(self):
236 '''return a mapping of tag to node'''
236 '''return a mapping of tag to node'''
237 if self.tagscache:
237 if self.tagscache:
238 return self.tagscache
238 return self.tagscache
239
239
240 globaltags = {}
240 globaltags = {}
241 tagtypes = {}
241 tagtypes = {}
242
242
243 def readtags(lines, fn, tagtype):
243 def readtags(lines, fn, tagtype):
244 filetags = {}
244 filetags = {}
245 count = 0
245 count = 0
246
246
247 def warn(msg):
247 def warn(msg):
248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
248 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
249
249
250 for l in lines:
250 for l in lines:
251 count += 1
251 count += 1
252 if not l:
252 if not l:
253 continue
253 continue
254 s = l.split(" ", 1)
254 s = l.split(" ", 1)
255 if len(s) != 2:
255 if len(s) != 2:
256 warn(_("cannot parse entry"))
256 warn(_("cannot parse entry"))
257 continue
257 continue
258 node, key = s
258 node, key = s
259 key = encoding.tolocal(key.strip()) # stored in UTF-8
259 key = encoding.tolocal(key.strip()) # stored in UTF-8
260 try:
260 try:
261 bin_n = bin(node)
261 bin_n = bin(node)
262 except TypeError:
262 except TypeError:
263 warn(_("node '%s' is not well formed") % node)
263 warn(_("node '%s' is not well formed") % node)
264 continue
264 continue
265 if bin_n not in self.changelog.nodemap:
265 if bin_n not in self.changelog.nodemap:
266 warn(_("tag '%s' refers to unknown node") % key)
266 warn(_("tag '%s' refers to unknown node") % key)
267 continue
267 continue
268
268
269 h = []
269 h = []
270 if key in filetags:
270 if key in filetags:
271 n, h = filetags[key]
271 n, h = filetags[key]
272 h.append(n)
272 h.append(n)
273 filetags[key] = (bin_n, h)
273 filetags[key] = (bin_n, h)
274
274
275 for k, nh in filetags.iteritems():
275 for k, nh in filetags.iteritems():
276 if k not in globaltags:
276 if k not in globaltags:
277 globaltags[k] = nh
277 globaltags[k] = nh
278 tagtypes[k] = tagtype
278 tagtypes[k] = tagtype
279 continue
279 continue
280
280
281 # we prefer the global tag if:
281 # we prefer the global tag if:
282 # it supercedes us OR
282 # it supercedes us OR
283 # mutual supercedes and it has a higher rank
283 # mutual supercedes and it has a higher rank
284 # otherwise we win because we're tip-most
284 # otherwise we win because we're tip-most
285 an, ah = nh
285 an, ah = nh
286 bn, bh = globaltags[k]
286 bn, bh = globaltags[k]
287 if (bn != an and an in bh and
287 if (bn != an and an in bh and
288 (bn not in ah or len(bh) > len(ah))):
288 (bn not in ah or len(bh) > len(ah))):
289 an = bn
289 an = bn
290 ah.extend([n for n in bh if n not in ah])
290 ah.extend([n for n in bh if n not in ah])
291 globaltags[k] = an, ah
291 globaltags[k] = an, ah
292 tagtypes[k] = tagtype
292 tagtypes[k] = tagtype
293
293
294 # read the tags file from each head, ending with the tip
294 # read the tags file from each head, ending with the tip
295 f = None
295 f = None
296 for rev, node, fnode in self._hgtagsnodes():
296 for rev, node, fnode in self._hgtagsnodes():
297 f = (f and f.filectx(fnode) or
297 f = (f and f.filectx(fnode) or
298 self.filectx('.hgtags', fileid=fnode))
298 self.filectx('.hgtags', fileid=fnode))
299 readtags(f.data().splitlines(), f, "global")
299 readtags(f.data().splitlines(), f, "global")
300
300
301 try:
301 try:
302 data = encoding.fromlocal(self.opener("localtags").read())
302 data = encoding.fromlocal(self.opener("localtags").read())
303 # localtags are stored in the local character set
303 # localtags are stored in the local character set
304 # while the internal tag table is stored in UTF-8
304 # while the internal tag table is stored in UTF-8
305 readtags(data.splitlines(), "localtags", "local")
305 readtags(data.splitlines(), "localtags", "local")
306 except IOError:
306 except IOError:
307 pass
307 pass
308
308
309 self.tagscache = {}
309 self.tagscache = {}
310 self._tagstypecache = {}
310 self._tagstypecache = {}
311 for k, nh in globaltags.iteritems():
311 for k, nh in globaltags.iteritems():
312 n = nh[0]
312 n = nh[0]
313 if n != nullid:
313 if n != nullid:
314 self.tagscache[k] = n
314 self.tagscache[k] = n
315 self._tagstypecache[k] = tagtypes[k]
315 self._tagstypecache[k] = tagtypes[k]
316 self.tagscache['tip'] = self.changelog.tip()
316 self.tagscache['tip'] = self.changelog.tip()
317 return self.tagscache
317 return self.tagscache
318
318
319 def tagtype(self, tagname):
319 def tagtype(self, tagname):
320 '''
320 '''
321 return the type of the given tag. result can be:
321 return the type of the given tag. result can be:
322
322
323 'local' : a local tag
323 'local' : a local tag
324 'global' : a global tag
324 'global' : a global tag
325 None : tag does not exist
325 None : tag does not exist
326 '''
326 '''
327
327
328 self.tags()
328 self.tags()
329
329
330 return self._tagstypecache.get(tagname)
330 return self._tagstypecache.get(tagname)
331
331
332 def _hgtagsnodes(self):
332 def _hgtagsnodes(self):
333 heads = self.heads()
333 heads = self.heads()
334 heads.reverse()
334 heads.reverse()
335 last = {}
335 last = {}
336 ret = []
336 ret = []
337 for node in heads:
337 for node in heads:
338 c = self[node]
338 c = self[node]
339 rev = c.rev()
339 rev = c.rev()
340 try:
340 try:
341 fnode = c.filenode('.hgtags')
341 fnode = c.filenode('.hgtags')
342 except error.LookupError:
342 except error.LookupError:
343 continue
343 continue
344 ret.append((rev, node, fnode))
344 ret.append((rev, node, fnode))
345 if fnode in last:
345 if fnode in last:
346 ret[last[fnode]] = None
346 ret[last[fnode]] = None
347 last[fnode] = len(ret) - 1
347 last[fnode] = len(ret) - 1
348 return [item for item in ret if item]
348 return [item for item in ret if item]
349
349
350 def tagslist(self):
350 def tagslist(self):
351 '''return a list of tags ordered by revision'''
351 '''return a list of tags ordered by revision'''
352 l = []
352 l = []
353 for t, n in self.tags().iteritems():
353 for t, n in self.tags().iteritems():
354 try:
354 try:
355 r = self.changelog.rev(n)
355 r = self.changelog.rev(n)
356 except:
356 except:
357 r = -2 # sort to the beginning of the list if unknown
357 r = -2 # sort to the beginning of the list if unknown
358 l.append((r, t, n))
358 l.append((r, t, n))
359 return [(t, n) for r, t, n in util.sort(l)]
359 return [(t, n) for r, t, n in util.sort(l)]
360
360
361 def nodetags(self, node):
361 def nodetags(self, node):
362 '''return the tags associated with a node'''
362 '''return the tags associated with a node'''
363 if not self.nodetagscache:
363 if not self.nodetagscache:
364 self.nodetagscache = {}
364 self.nodetagscache = {}
365 for t, n in self.tags().iteritems():
365 for t, n in self.tags().iteritems():
366 self.nodetagscache.setdefault(n, []).append(t)
366 self.nodetagscache.setdefault(n, []).append(t)
367 return self.nodetagscache.get(node, [])
367 return self.nodetagscache.get(node, [])
368
368
369 def _branchtags(self, partial, lrev):
369 def _branchtags(self, partial, lrev):
370 # TODO: rename this function?
370 # TODO: rename this function?
371 tiprev = len(self) - 1
371 tiprev = len(self) - 1
372 if lrev != tiprev:
372 if lrev != tiprev:
373 self._updatebranchcache(partial, lrev+1, tiprev+1)
373 self._updatebranchcache(partial, lrev+1, tiprev+1)
374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
374 self._writebranchcache(partial, self.changelog.tip(), tiprev)
375
375
376 return partial
376 return partial
377
377
378 def _branchheads(self):
378 def _branchheads(self):
379 tip = self.changelog.tip()
379 tip = self.changelog.tip()
380 if self.branchcache is not None and self._branchcachetip == tip:
380 if self.branchcache is not None and self._branchcachetip == tip:
381 return self.branchcache
381 return self.branchcache
382
382
383 oldtip = self._branchcachetip
383 oldtip = self._branchcachetip
384 self._branchcachetip = tip
384 self._branchcachetip = tip
385 if self.branchcache is None:
385 if self.branchcache is None:
386 self.branchcache = {} # avoid recursion in changectx
386 self.branchcache = {} # avoid recursion in changectx
387 else:
387 else:
388 self.branchcache.clear() # keep using the same dict
388 self.branchcache.clear() # keep using the same dict
389 if oldtip is None or oldtip not in self.changelog.nodemap:
389 if oldtip is None or oldtip not in self.changelog.nodemap:
390 partial, last, lrev = self._readbranchcache()
390 partial, last, lrev = self._readbranchcache()
391 else:
391 else:
392 lrev = self.changelog.rev(oldtip)
392 lrev = self.changelog.rev(oldtip)
393 partial = self._ubranchcache
393 partial = self._ubranchcache
394
394
395 self._branchtags(partial, lrev)
395 self._branchtags(partial, lrev)
396 # this private cache holds all heads (not just tips)
396 # this private cache holds all heads (not just tips)
397 self._ubranchcache = partial
397 self._ubranchcache = partial
398
398
399 # the branch cache is stored on disk as UTF-8, but in the local
399 # the branch cache is stored on disk as UTF-8, but in the local
400 # charset internally
400 # charset internally
401 for k, v in partial.iteritems():
401 for k, v in partial.iteritems():
402 self.branchcache[encoding.tolocal(k)] = v
402 self.branchcache[encoding.tolocal(k)] = v
403 return self.branchcache
403 return self.branchcache
404
404
405
405
406 def branchtags(self):
406 def branchtags(self):
407 '''return a dict where branch names map to the tipmost head of
407 '''return a dict where branch names map to the tipmost head of
408 the branch, open heads come before closed'''
408 the branch, open heads come before closed'''
409 bt = {}
409 bt = {}
410 for bn, heads in self._branchheads().iteritems():
410 for bn, heads in self._branchheads().iteritems():
411 head = None
411 head = None
412 for i in range(len(heads)-1, -1, -1):
412 for i in range(len(heads)-1, -1, -1):
413 h = heads[i]
413 h = heads[i]
414 if 'close' not in self.changelog.read(h)[5]:
414 if 'close' not in self.changelog.read(h)[5]:
415 head = h
415 head = h
416 break
416 break
417 # no open heads were found
417 # no open heads were found
418 if head is None:
418 if head is None:
419 head = heads[-1]
419 head = heads[-1]
420 bt[bn] = head
420 bt[bn] = head
421 return bt
421 return bt
422
422
423
423
424 def _readbranchcache(self):
424 def _readbranchcache(self):
425 partial = {}
425 partial = {}
426 try:
426 try:
427 f = self.opener("branchheads.cache")
427 f = self.opener("branchheads.cache")
428 lines = f.read().split('\n')
428 lines = f.read().split('\n')
429 f.close()
429 f.close()
430 except (IOError, OSError):
430 except (IOError, OSError):
431 return {}, nullid, nullrev
431 return {}, nullid, nullrev
432
432
433 try:
433 try:
434 last, lrev = lines.pop(0).split(" ", 1)
434 last, lrev = lines.pop(0).split(" ", 1)
435 last, lrev = bin(last), int(lrev)
435 last, lrev = bin(last), int(lrev)
436 if lrev >= len(self) or self[lrev].node() != last:
436 if lrev >= len(self) or self[lrev].node() != last:
437 # invalidate the cache
437 # invalidate the cache
438 raise ValueError('invalidating branch cache (tip differs)')
438 raise ValueError('invalidating branch cache (tip differs)')
439 for l in lines:
439 for l in lines:
440 if not l: continue
440 if not l: continue
441 node, label = l.split(" ", 1)
441 node, label = l.split(" ", 1)
442 partial.setdefault(label.strip(), []).append(bin(node))
442 partial.setdefault(label.strip(), []).append(bin(node))
443 except KeyboardInterrupt:
443 except KeyboardInterrupt:
444 raise
444 raise
445 except Exception, inst:
445 except Exception, inst:
446 if self.ui.debugflag:
446 if self.ui.debugflag:
447 self.ui.warn(str(inst), '\n')
447 self.ui.warn(str(inst), '\n')
448 partial, last, lrev = {}, nullid, nullrev
448 partial, last, lrev = {}, nullid, nullrev
449 return partial, last, lrev
449 return partial, last, lrev
450
450
451 def _writebranchcache(self, branches, tip, tiprev):
451 def _writebranchcache(self, branches, tip, tiprev):
452 try:
452 try:
453 f = self.opener("branchheads.cache", "w", atomictemp=True)
453 f = self.opener("branchheads.cache", "w", atomictemp=True)
454 f.write("%s %s\n" % (hex(tip), tiprev))
454 f.write("%s %s\n" % (hex(tip), tiprev))
455 for label, nodes in branches.iteritems():
455 for label, nodes in branches.iteritems():
456 for node in nodes:
456 for node in nodes:
457 f.write("%s %s\n" % (hex(node), label))
457 f.write("%s %s\n" % (hex(node), label))
458 f.rename()
458 f.rename()
459 except (IOError, OSError):
459 except (IOError, OSError):
460 pass
460 pass
461
461
462 def _updatebranchcache(self, partial, start, end):
462 def _updatebranchcache(self, partial, start, end):
463 for r in xrange(start, end):
463 for r in xrange(start, end):
464 c = self[r]
464 c = self[r]
465 b = c.branch()
465 b = c.branch()
466 bheads = partial.setdefault(b, [])
466 bheads = partial.setdefault(b, [])
467 bheads.append(c.node())
467 bheads.append(c.node())
468 for p in c.parents():
468 for p in c.parents():
469 pn = p.node()
469 pn = p.node()
470 if pn in bheads:
470 if pn in bheads:
471 bheads.remove(pn)
471 bheads.remove(pn)
472
472
473 def lookup(self, key):
473 def lookup(self, key):
474 if isinstance(key, int):
474 if isinstance(key, int):
475 return self.changelog.node(key)
475 return self.changelog.node(key)
476 elif key == '.':
476 elif key == '.':
477 return self.dirstate.parents()[0]
477 return self.dirstate.parents()[0]
478 elif key == 'null':
478 elif key == 'null':
479 return nullid
479 return nullid
480 elif key == 'tip':
480 elif key == 'tip':
481 return self.changelog.tip()
481 return self.changelog.tip()
482 n = self.changelog._match(key)
482 n = self.changelog._match(key)
483 if n:
483 if n:
484 return n
484 return n
485 if key in self.tags():
485 if key in self.tags():
486 return self.tags()[key]
486 return self.tags()[key]
487 if key in self.branchtags():
487 if key in self.branchtags():
488 return self.branchtags()[key]
488 return self.branchtags()[key]
489 n = self.changelog._partialmatch(key)
489 n = self.changelog._partialmatch(key)
490 if n:
490 if n:
491 return n
491 return n
492 try:
492 try:
493 if len(key) == 20:
493 if len(key) == 20:
494 key = hex(key)
494 key = hex(key)
495 except:
495 except:
496 pass
496 pass
497 raise error.RepoError(_("unknown revision '%s'") % key)
497 raise error.RepoError(_("unknown revision '%s'") % key)
498
498
499 def local(self):
499 def local(self):
500 return True
500 return True
501
501
502 def join(self, f):
502 def join(self, f):
503 return os.path.join(self.path, f)
503 return os.path.join(self.path, f)
504
504
505 def wjoin(self, f):
505 def wjoin(self, f):
506 return os.path.join(self.root, f)
506 return os.path.join(self.root, f)
507
507
508 def rjoin(self, f):
508 def rjoin(self, f):
509 return os.path.join(self.root, util.pconvert(f))
509 return os.path.join(self.root, util.pconvert(f))
510
510
511 def file(self, f):
511 def file(self, f):
512 if f[0] == '/':
512 if f[0] == '/':
513 f = f[1:]
513 f = f[1:]
514 return filelog.filelog(self.sopener, f)
514 return filelog.filelog(self.sopener, f)
515
515
516 def changectx(self, changeid):
516 def changectx(self, changeid):
517 return self[changeid]
517 return self[changeid]
518
518
519 def parents(self, changeid=None):
519 def parents(self, changeid=None):
520 '''get list of changectxs for parents of changeid'''
520 '''get list of changectxs for parents of changeid'''
521 return self[changeid].parents()
521 return self[changeid].parents()
522
522
523 def filectx(self, path, changeid=None, fileid=None):
523 def filectx(self, path, changeid=None, fileid=None):
524 """changeid can be a changeset revision, node, or tag.
524 """changeid can be a changeset revision, node, or tag.
525 fileid can be a file revision or node."""
525 fileid can be a file revision or node."""
526 return context.filectx(self, path, changeid, fileid)
526 return context.filectx(self, path, changeid, fileid)
527
527
528 def getcwd(self):
528 def getcwd(self):
529 return self.dirstate.getcwd()
529 return self.dirstate.getcwd()
530
530
531 def pathto(self, f, cwd=None):
531 def pathto(self, f, cwd=None):
532 return self.dirstate.pathto(f, cwd)
532 return self.dirstate.pathto(f, cwd)
533
533
534 def wfile(self, f, mode='r'):
534 def wfile(self, f, mode='r'):
535 return self.wopener(f, mode)
535 return self.wopener(f, mode)
536
536
537 def _link(self, f):
537 def _link(self, f):
538 return os.path.islink(self.wjoin(f))
538 return os.path.islink(self.wjoin(f))
539
539
540 def _filter(self, filter, filename, data):
540 def _filter(self, filter, filename, data):
541 if filter not in self.filterpats:
541 if filter not in self.filterpats:
542 l = []
542 l = []
543 for pat, cmd in self.ui.configitems(filter):
543 for pat, cmd in self.ui.configitems(filter):
544 if cmd == '!':
544 if cmd == '!':
545 continue
545 continue
546 mf = util.matcher(self.root, "", [pat], [], [])[1]
546 mf = util.matcher(self.root, "", [pat], [], [])[1]
547 fn = None
547 fn = None
548 params = cmd
548 params = cmd
549 for name, filterfn in self._datafilters.iteritems():
549 for name, filterfn in self._datafilters.iteritems():
550 if cmd.startswith(name):
550 if cmd.startswith(name):
551 fn = filterfn
551 fn = filterfn
552 params = cmd[len(name):].lstrip()
552 params = cmd[len(name):].lstrip()
553 break
553 break
554 if not fn:
554 if not fn:
555 fn = lambda s, c, **kwargs: util.filter(s, c)
555 fn = lambda s, c, **kwargs: util.filter(s, c)
556 # Wrap old filters not supporting keyword arguments
556 # Wrap old filters not supporting keyword arguments
557 if not inspect.getargspec(fn)[2]:
557 if not inspect.getargspec(fn)[2]:
558 oldfn = fn
558 oldfn = fn
559 fn = lambda s, c, **kwargs: oldfn(s, c)
559 fn = lambda s, c, **kwargs: oldfn(s, c)
560 l.append((mf, fn, params))
560 l.append((mf, fn, params))
561 self.filterpats[filter] = l
561 self.filterpats[filter] = l
562
562
563 for mf, fn, cmd in self.filterpats[filter]:
563 for mf, fn, cmd in self.filterpats[filter]:
564 if mf(filename):
564 if mf(filename):
565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
565 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
566 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
567 break
567 break
568
568
569 return data
569 return data
570
570
571 def adddatafilter(self, name, filter):
571 def adddatafilter(self, name, filter):
572 self._datafilters[name] = filter
572 self._datafilters[name] = filter
573
573
574 def wread(self, filename):
574 def wread(self, filename):
575 if self._link(filename):
575 if self._link(filename):
576 data = os.readlink(self.wjoin(filename))
576 data = os.readlink(self.wjoin(filename))
577 else:
577 else:
578 data = self.wopener(filename, 'r').read()
578 data = self.wopener(filename, 'r').read()
579 return self._filter("encode", filename, data)
579 return self._filter("encode", filename, data)
580
580
581 def wwrite(self, filename, data, flags):
581 def wwrite(self, filename, data, flags):
582 data = self._filter("decode", filename, data)
582 data = self._filter("decode", filename, data)
583 try:
583 try:
584 os.unlink(self.wjoin(filename))
584 os.unlink(self.wjoin(filename))
585 except OSError:
585 except OSError:
586 pass
586 pass
587 if 'l' in flags:
587 if 'l' in flags:
588 self.wopener.symlink(data, filename)
588 self.wopener.symlink(data, filename)
589 else:
589 else:
590 self.wopener(filename, 'w').write(data)
590 self.wopener(filename, 'w').write(data)
591 if 'x' in flags:
591 if 'x' in flags:
592 util.set_flags(self.wjoin(filename), False, True)
592 util.set_flags(self.wjoin(filename), False, True)
593
593
594 def wwritedata(self, filename, data):
594 def wwritedata(self, filename, data):
595 return self._filter("decode", filename, data)
595 return self._filter("decode", filename, data)
596
596
597 def transaction(self):
597 def transaction(self):
598 tr = self._transref and self._transref() or None
598 tr = self._transref and self._transref() or None
599 if tr and tr.running():
599 if tr and tr.running():
600 return tr.nest()
600 return tr.nest()
601
601
602 # abort here if the journal already exists
602 # abort here if the journal already exists
603 if os.path.exists(self.sjoin("journal")):
603 if os.path.exists(self.sjoin("journal")):
604 raise error.RepoError(_("journal already exists - run hg recover"))
604 raise error.RepoError(_("journal already exists - run hg recover"))
605
605
606 # save dirstate for rollback
606 # save dirstate for rollback
607 try:
607 try:
608 ds = self.opener("dirstate").read()
608 ds = self.opener("dirstate").read()
609 except IOError:
609 except IOError:
610 ds = ""
610 ds = ""
611 self.opener("journal.dirstate", "w").write(ds)
611 self.opener("journal.dirstate", "w").write(ds)
612 self.opener("journal.branch", "w").write(self.dirstate.branch())
612 self.opener("journal.branch", "w").write(self.dirstate.branch())
613
613
614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
614 renames = [(self.sjoin("journal"), self.sjoin("undo")),
615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
615 (self.join("journal.dirstate"), self.join("undo.dirstate")),
616 (self.join("journal.branch"), self.join("undo.branch"))]
616 (self.join("journal.branch"), self.join("undo.branch"))]
617 tr = transaction.transaction(self.ui.warn, self.sopener,
617 tr = transaction.transaction(self.ui.warn, self.sopener,
618 self.sjoin("journal"),
618 self.sjoin("journal"),
619 aftertrans(renames),
619 aftertrans(renames),
620 self.store.createmode)
620 self.store.createmode)
621 self._transref = weakref.ref(tr)
621 self._transref = weakref.ref(tr)
622 return tr
622 return tr
623
623
624 def recover(self):
624 def recover(self):
625 lock = self.lock()
625 lock = self.lock()
626 try:
626 try:
627 if os.path.exists(self.sjoin("journal")):
627 if os.path.exists(self.sjoin("journal")):
628 self.ui.status(_("rolling back interrupted transaction\n"))
628 self.ui.status(_("rolling back interrupted transaction\n"))
629 transaction.rollback(self.sopener, self.sjoin("journal"))
629 transaction.rollback(self.sopener, self.sjoin("journal"))
630 self.invalidate()
630 self.invalidate()
631 return True
631 return True
632 else:
632 else:
633 self.ui.warn(_("no interrupted transaction available\n"))
633 self.ui.warn(_("no interrupted transaction available\n"))
634 return False
634 return False
635 finally:
635 finally:
636 lock.release()
636 lock.release()
637
637
638 def rollback(self):
638 def rollback(self):
639 wlock = lock = None
639 wlock = lock = None
640 try:
640 try:
641 wlock = self.wlock()
641 wlock = self.wlock()
642 lock = self.lock()
642 lock = self.lock()
643 if os.path.exists(self.sjoin("undo")):
643 if os.path.exists(self.sjoin("undo")):
644 self.ui.status(_("rolling back last transaction\n"))
644 self.ui.status(_("rolling back last transaction\n"))
645 transaction.rollback(self.sopener, self.sjoin("undo"))
645 transaction.rollback(self.sopener, self.sjoin("undo"))
646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
646 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
647 try:
647 try:
648 branch = self.opener("undo.branch").read()
648 branch = self.opener("undo.branch").read()
649 self.dirstate.setbranch(branch)
649 self.dirstate.setbranch(branch)
650 except IOError:
650 except IOError:
651 self.ui.warn(_("Named branch could not be reset, "
651 self.ui.warn(_("Named branch could not be reset, "
652 "current branch still is: %s\n")
652 "current branch still is: %s\n")
653 % encoding.tolocal(self.dirstate.branch()))
653 % encoding.tolocal(self.dirstate.branch()))
654 self.invalidate()
654 self.invalidate()
655 self.dirstate.invalidate()
655 self.dirstate.invalidate()
656 else:
656 else:
657 self.ui.warn(_("no rollback information available\n"))
657 self.ui.warn(_("no rollback information available\n"))
658 finally:
658 finally:
659 release(lock, wlock)
659 release(lock, wlock)
660
660
661 def invalidate(self):
661 def invalidate(self):
662 for a in "changelog manifest".split():
662 for a in "changelog manifest".split():
663 if a in self.__dict__:
663 if a in self.__dict__:
664 delattr(self, a)
664 delattr(self, a)
665 self.tagscache = None
665 self.tagscache = None
666 self._tagstypecache = None
666 self._tagstypecache = None
667 self.nodetagscache = None
667 self.nodetagscache = None
668 self.branchcache = None
668 self.branchcache = None
669 self._ubranchcache = None
669 self._ubranchcache = None
670 self._branchcachetip = None
670 self._branchcachetip = None
671
671
672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
672 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
673 try:
673 try:
674 l = lock.lock(lockname, 0, releasefn, desc=desc)
674 l = lock.lock(lockname, 0, releasefn, desc=desc)
675 except error.LockHeld, inst:
675 except error.LockHeld, inst:
676 if not wait:
676 if not wait:
677 raise
677 raise
678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
678 self.ui.warn(_("waiting for lock on %s held by %r\n") %
679 (desc, inst.locker))
679 (desc, inst.locker))
680 # default to 600 seconds timeout
680 # default to 600 seconds timeout
681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
681 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
682 releasefn, desc=desc)
682 releasefn, desc=desc)
683 if acquirefn:
683 if acquirefn:
684 acquirefn()
684 acquirefn()
685 return l
685 return l
686
686
687 def lock(self, wait=True):
687 def lock(self, wait=True):
688 l = self._lockref and self._lockref()
688 l = self._lockref and self._lockref()
689 if l is not None and l.held:
689 if l is not None and l.held:
690 l.lock()
690 l.lock()
691 return l
691 return l
692
692
693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
693 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
694 _('repository %s') % self.origroot)
694 _('repository %s') % self.origroot)
695 self._lockref = weakref.ref(l)
695 self._lockref = weakref.ref(l)
696 return l
696 return l
697
697
698 def wlock(self, wait=True):
698 def wlock(self, wait=True):
699 l = self._wlockref and self._wlockref()
699 l = self._wlockref and self._wlockref()
700 if l is not None and l.held:
700 if l is not None and l.held:
701 l.lock()
701 l.lock()
702 return l
702 return l
703
703
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
704 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
705 self.dirstate.invalidate, _('working directory of %s') %
705 self.dirstate.invalidate, _('working directory of %s') %
706 self.origroot)
706 self.origroot)
707 self._wlockref = weakref.ref(l)
707 self._wlockref = weakref.ref(l)
708 return l
708 return l
709
709
710 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
710 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
711 """
711 """
712 commit an individual file as part of a larger transaction
712 commit an individual file as part of a larger transaction
713 """
713 """
714
714
715 fn = fctx.path()
715 fn = fctx.path()
716 t = fctx.data()
716 t = fctx.data()
717 fl = self.file(fn)
717 fl = self.file(fn)
718 fp1 = manifest1.get(fn, nullid)
718 fp1 = manifest1.get(fn, nullid)
719 fp2 = manifest2.get(fn, nullid)
719 fp2 = manifest2.get(fn, nullid)
720
720
721 meta = {}
721 meta = {}
722 cp = fctx.renamed()
722 cp = fctx.renamed()
723 if cp and cp[0] != fn:
723 if cp and cp[0] != fn:
724 # Mark the new revision of this file as a copy of another
724 # Mark the new revision of this file as a copy of another
725 # file. This copy data will effectively act as a parent
725 # file. This copy data will effectively act as a parent
726 # of this new revision. If this is a merge, the first
726 # of this new revision. If this is a merge, the first
727 # parent will be the nullid (meaning "look up the copy data")
727 # parent will be the nullid (meaning "look up the copy data")
728 # and the second one will be the other parent. For example:
728 # and the second one will be the other parent. For example:
729 #
729 #
730 # 0 --- 1 --- 3 rev1 changes file foo
730 # 0 --- 1 --- 3 rev1 changes file foo
731 # \ / rev2 renames foo to bar and changes it
731 # \ / rev2 renames foo to bar and changes it
732 # \- 2 -/ rev3 should have bar with all changes and
732 # \- 2 -/ rev3 should have bar with all changes and
733 # should record that bar descends from
733 # should record that bar descends from
734 # bar in rev2 and foo in rev1
734 # bar in rev2 and foo in rev1
735 #
735 #
736 # this allows this merge to succeed:
736 # this allows this merge to succeed:
737 #
737 #
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
738 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
739 # \ / merging rev3 and rev4 should use bar@rev2
740 # \- 2 --- 4 as the merge base
740 # \- 2 --- 4 as the merge base
741 #
741 #
742
742
743 cf = cp[0]
743 cf = cp[0]
744 cr = manifest1.get(cf)
744 cr = manifest1.get(cf)
745 nfp = fp2
745 nfp = fp2
746
746
747 if manifest2: # branch merge
747 if manifest2: # branch merge
748 if fp2 == nullid or cr is None: # copied on remote side
748 if fp2 == nullid or cr is None: # copied on remote side
749 if cf in manifest2:
749 if cf in manifest2:
750 cr = manifest2[cf]
750 cr = manifest2[cf]
751 nfp = fp1
751 nfp = fp1
752
752
753 # find source in nearest ancestor if we've lost track
753 # find source in nearest ancestor if we've lost track
754 if not cr:
754 if not cr:
755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
755 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
756 (fn, cf))
756 (fn, cf))
757 for a in self['.'].ancestors():
757 for a in self['.'].ancestors():
758 if cf in a:
758 if cf in a:
759 cr = a[cf].filenode()
759 cr = a[cf].filenode()
760 break
760 break
761
761
762 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
762 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
763 meta["copy"] = cf
763 meta["copy"] = cf
764 meta["copyrev"] = hex(cr)
764 meta["copyrev"] = hex(cr)
765 fp1, fp2 = nullid, nfp
765 fp1, fp2 = nullid, nfp
766 elif fp2 != nullid:
766 elif fp2 != nullid:
767 # is one parent an ancestor of the other?
767 # is one parent an ancestor of the other?
768 fpa = fl.ancestor(fp1, fp2)
768 fpa = fl.ancestor(fp1, fp2)
769 if fpa == fp1:
769 if fpa == fp1:
770 fp1, fp2 = fp2, nullid
770 fp1, fp2 = fp2, nullid
771 elif fpa == fp2:
771 elif fpa == fp2:
772 fp2 = nullid
772 fp2 = nullid
773
773
774 # is the file unmodified from the parent? report existing entry
774 # is the file unmodified from the parent? report existing entry
775 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
775 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
776 return fp1
776 return fp1
777
777
778 changelist.append(fn)
778 changelist.append(fn)
779 return fl.add(t, meta, tr, linkrev, fp1, fp2)
779 return fl.add(t, meta, tr, linkrev, fp1, fp2)
780
780
781 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
781 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
782 if p1 is None:
782 if p1 is None:
783 p1, p2 = self.dirstate.parents()
783 p1, p2 = self.dirstate.parents()
784 return self.commit(files=files, text=text, user=user, date=date,
784 return self.commit(files=files, text=text, user=user, date=date,
785 p1=p1, p2=p2, extra=extra, empty_ok=True)
785 p1=p1, p2=p2, extra=extra, empty_ok=True)
786
786
787 def commit(self, files=None, text="", user=None, date=None,
787 def commit(self, files=None, text="", user=None, date=None,
788 match=None, force=False, force_editor=False,
788 match=None, force=False, force_editor=False,
789 p1=None, p2=None, extra={}, empty_ok=False):
789 p1=None, p2=None, extra={}, empty_ok=False):
790 wlock = lock = None
790 wlock = lock = None
791 if extra.get("close"):
791 if extra.get("close"):
792 force = True
792 force = True
793 if files:
793 if files:
794 files = list(set(files))
794 files = list(set(files))
795 try:
795 try:
796 wlock = self.wlock()
796 wlock = self.wlock()
797 lock = self.lock()
797 lock = self.lock()
798 use_dirstate = (p1 is None) # not rawcommit
798 use_dirstate = (p1 is None) # not rawcommit
799
799
800 if use_dirstate:
800 if use_dirstate:
801 p1, p2 = self.dirstate.parents()
801 p1, p2 = self.dirstate.parents()
802 update_dirstate = True
802 update_dirstate = True
803
803
804 if (not force and p2 != nullid and
804 if (not force and p2 != nullid and
805 (match and (match.files() or match.anypats()))):
805 (match and (match.files() or match.anypats()))):
806 raise util.Abort(_('cannot partially commit a merge '
806 raise util.Abort(_('cannot partially commit a merge '
807 '(do not specify files or patterns)'))
807 '(do not specify files or patterns)'))
808
808
809 if files:
809 if files:
810 modified, removed = [], []
810 modified, removed = [], []
811 for f in files:
811 for f in files:
812 s = self.dirstate[f]
812 s = self.dirstate[f]
813 if s in 'nma':
813 if s in 'nma':
814 modified.append(f)
814 modified.append(f)
815 elif s == 'r':
815 elif s == 'r':
816 removed.append(f)
816 removed.append(f)
817 else:
817 else:
818 self.ui.warn(_("%s not tracked!\n") % f)
818 self.ui.warn(_("%s not tracked!\n") % f)
819 changes = [modified, [], removed, [], []]
819 changes = [modified, [], removed, [], []]
820 else:
820 else:
821 changes = self.status(match=match)
821 changes = self.status(match=match)
822 else:
822 else:
823 p1, p2 = p1, p2 or nullid
823 p1, p2 = p1, p2 or nullid
824 update_dirstate = (self.dirstate.parents()[0] == p1)
824 update_dirstate = (self.dirstate.parents()[0] == p1)
825 changes = [files, [], [], [], []]
825 changes = [files, [], [], [], []]
826
826
827 ms = merge_.mergestate(self)
827 ms = merge_.mergestate(self)
828 for f in changes[0]:
828 for f in changes[0]:
829 if f in ms and ms[f] == 'u':
829 if f in ms and ms[f] == 'u':
830 raise util.Abort(_("unresolved merge conflicts "
830 raise util.Abort(_("unresolved merge conflicts "
831 "(see hg resolve)"))
831 "(see hg resolve)"))
832 wctx = context.workingctx(self, (p1, p2), text, user, date,
832 wctx = context.workingctx(self, (p1, p2), text, user, date,
833 extra, changes)
833 extra, changes)
834 r = self._commitctx(wctx, force, force_editor, empty_ok,
834 r = self._commitctx(wctx, force, force_editor, empty_ok,
835 use_dirstate, update_dirstate)
835 use_dirstate, update_dirstate)
836 ms.reset()
836 ms.reset()
837 return r
837 return r
838
838
839 finally:
839 finally:
840 release(lock, wlock)
840 release(lock, wlock)
841
841
842 def commitctx(self, ctx):
842 def commitctx(self, ctx):
843 """Add a new revision to current repository.
843 """Add a new revision to current repository.
844
844
845 Revision information is passed in the context.memctx argument.
845 Revision information is passed in the context.memctx argument.
846 commitctx() does not touch the working directory.
846 commitctx() does not touch the working directory.
847 """
847 """
848 wlock = lock = None
848 wlock = lock = None
849 try:
849 try:
850 wlock = self.wlock()
850 wlock = self.wlock()
851 lock = self.lock()
851 lock = self.lock()
852 return self._commitctx(ctx, force=True, force_editor=False,
852 return self._commitctx(ctx, force=True, force_editor=False,
853 empty_ok=True, use_dirstate=False,
853 empty_ok=True, use_dirstate=False,
854 update_dirstate=False)
854 update_dirstate=False)
855 finally:
855 finally:
856 release(lock, wlock)
856 release(lock, wlock)
857
857
858 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
858 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
859 use_dirstate=True, update_dirstate=True):
859 use_dirstate=True, update_dirstate=True):
860 tr = None
860 tr = None
861 valid = 0 # don't save the dirstate if this isn't set
861 valid = 0 # don't save the dirstate if this isn't set
862 try:
862 try:
863 commit = util.sort(wctx.modified() + wctx.added())
863 commit = util.sort(wctx.modified() + wctx.added())
864 remove = wctx.removed()
864 remove = wctx.removed()
865 extra = wctx.extra().copy()
865 extra = wctx.extra().copy()
866 branchname = extra['branch']
866 branchname = extra['branch']
867 user = wctx.user()
867 user = wctx.user()
868 text = wctx.description()
868 text = wctx.description()
869
869
870 p1, p2 = [p.node() for p in wctx.parents()]
870 p1, p2 = [p.node() for p in wctx.parents()]
871 c1 = self.changelog.read(p1)
871 c1 = self.changelog.read(p1)
872 c2 = self.changelog.read(p2)
872 c2 = self.changelog.read(p2)
873 m1 = self.manifest.read(c1[0]).copy()
873 m1 = self.manifest.read(c1[0]).copy()
874 m2 = self.manifest.read(c2[0])
874 m2 = self.manifest.read(c2[0])
875
875
876 if use_dirstate:
876 if use_dirstate:
877 oldname = c1[5].get("branch") # stored in UTF-8
877 oldname = c1[5].get("branch") # stored in UTF-8
878 if (not commit and not remove and not force and p2 == nullid
878 if (not commit and not remove and not force and p2 == nullid
879 and branchname == oldname):
879 and branchname == oldname):
880 self.ui.status(_("nothing changed\n"))
880 self.ui.status(_("nothing changed\n"))
881 return None
881 return None
882
882
883 xp1 = hex(p1)
883 xp1 = hex(p1)
884 if p2 == nullid: xp2 = ''
884 if p2 == nullid: xp2 = ''
885 else: xp2 = hex(p2)
885 else: xp2 = hex(p2)
886
886
887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
887 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
888
888
889 tr = self.transaction()
889 tr = self.transaction()
890 trp = weakref.proxy(tr)
890 trp = weakref.proxy(tr)
891
891
892 # check in files
892 # check in files
893 new = {}
893 new = {}
894 changed = []
894 changed = []
895 linkrev = len(self)
895 linkrev = len(self)
896 for f in commit:
896 for f in commit:
897 self.ui.note(f + "\n")
897 self.ui.note(f + "\n")
898 try:
898 try:
899 fctx = wctx.filectx(f)
899 fctx = wctx.filectx(f)
900 newflags = fctx.flags()
900 newflags = fctx.flags()
901 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
901 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
902 if ((not changed or changed[-1] != f) and
902 if ((not changed or changed[-1] != f) and
903 m2.get(f) != new[f]):
903 m2.get(f) != new[f]):
904 # mention the file in the changelog if some
904 # mention the file in the changelog if some
905 # flag changed, even if there was no content
905 # flag changed, even if there was no content
906 # change.
906 # change.
907 if m1.flags(f) != newflags:
907 if m1.flags(f) != newflags:
908 changed.append(f)
908 changed.append(f)
909 m1.set(f, newflags)
909 m1.set(f, newflags)
910 if use_dirstate:
910 if use_dirstate:
911 self.dirstate.normal(f)
911 self.dirstate.normal(f)
912
912
913 except (OSError, IOError):
913 except (OSError, IOError):
914 if use_dirstate:
914 if use_dirstate:
915 self.ui.warn(_("trouble committing %s!\n") % f)
915 self.ui.warn(_("trouble committing %s!\n") % f)
916 raise
916 raise
917 else:
917 else:
918 remove.append(f)
918 remove.append(f)
919
919
920 updated, added = [], []
920 updated, added = [], []
921 for f in util.sort(changed):
921 for f in util.sort(changed):
922 if f in m1 or f in m2:
922 if f in m1 or f in m2:
923 updated.append(f)
923 updated.append(f)
924 else:
924 else:
925 added.append(f)
925 added.append(f)
926
926
927 # update manifest
927 # update manifest
928 m1.update(new)
928 m1.update(new)
929 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
929 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
930 removed1 = []
930 removed1 = []
931
931
932 for f in removed:
932 for f in removed:
933 if f in m1:
933 if f in m1:
934 del m1[f]
934 del m1[f]
935 removed1.append(f)
935 removed1.append(f)
936 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
936 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
937 (new, removed1))
937 (new, removed1))
938
938
939 # add changeset
939 # add changeset
940 if (not empty_ok and not text) or force_editor:
940 if (not empty_ok and not text) or force_editor:
941 edittext = []
941 edittext = []
942 if text:
942 if text:
943 edittext.append(text)
943 edittext.append(text)
944 edittext.append("")
944 edittext.append("")
945 edittext.append("") # Empty line between message and comments.
945 edittext.append("") # Empty line between message and comments.
946 edittext.append(_("HG: Enter commit message."
946 edittext.append(_("HG: Enter commit message."
947 " Lines beginning with 'HG:' are removed."))
947 " Lines beginning with 'HG:' are removed."))
948 edittext.append("HG: --")
948 edittext.append("HG: --")
949 edittext.append("HG: user: %s" % user)
949 edittext.append("HG: user: %s" % user)
950 if p2 != nullid:
950 if p2 != nullid:
951 edittext.append("HG: branch merge")
951 edittext.append("HG: branch merge")
952 if branchname:
952 if branchname:
953 edittext.append("HG: branch '%s'"
953 edittext.append("HG: branch '%s'"
954 % encoding.tolocal(branchname))
954 % encoding.tolocal(branchname))
955 edittext.extend(["HG: added %s" % f for f in added])
955 edittext.extend(["HG: added %s" % f for f in added])
956 edittext.extend(["HG: changed %s" % f for f in updated])
956 edittext.extend(["HG: changed %s" % f for f in updated])
957 edittext.extend(["HG: removed %s" % f for f in removed])
957 edittext.extend(["HG: removed %s" % f for f in removed])
958 if not added and not updated and not removed:
958 if not added and not updated and not removed:
959 edittext.append("HG: no files changed")
959 edittext.append("HG: no files changed")
960 edittext.append("")
960 edittext.append("")
961 # run editor in the repository root
961 # run editor in the repository root
962 olddir = os.getcwd()
962 olddir = os.getcwd()
963 os.chdir(self.root)
963 os.chdir(self.root)
964 text = self.ui.edit("\n".join(edittext), user)
964 text = self.ui.edit("\n".join(edittext), user)
965 os.chdir(olddir)
965 os.chdir(olddir)
966
966
967 lines = [line.rstrip() for line in text.rstrip().splitlines()]
967 lines = [line.rstrip() for line in text.rstrip().splitlines()]
968 while lines and not lines[0]:
968 while lines and not lines[0]:
969 del lines[0]
969 del lines[0]
970 if not lines and use_dirstate:
970 if not lines and use_dirstate:
971 raise util.Abort(_("empty commit message"))
971 raise util.Abort(_("empty commit message"))
972 text = '\n'.join(lines)
972 text = '\n'.join(lines)
973
973
974 self.changelog.delayupdate()
974 self.changelog.delayupdate()
975 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
975 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
976 user, wctx.date(), extra)
976 user, wctx.date(), extra)
977 p = lambda: self.changelog.writepending() and self.root or ""
977 p = lambda: self.changelog.writepending() and self.root or ""
978 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
978 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
979 parent2=xp2, pending=p)
979 parent2=xp2, pending=p)
980 self.changelog.finalize(trp)
980 self.changelog.finalize(trp)
981 tr.close()
981 tr.close()
982
982
983 if self.branchcache:
983 if self.branchcache:
984 self.branchtags()
984 self.branchtags()
985
985
986 if use_dirstate or update_dirstate:
986 if use_dirstate or update_dirstate:
987 self.dirstate.setparents(n)
987 self.dirstate.setparents(n)
988 if use_dirstate:
988 if use_dirstate:
989 for f in removed:
989 for f in removed:
990 self.dirstate.forget(f)
990 self.dirstate.forget(f)
991 valid = 1 # our dirstate updates are complete
991 valid = 1 # our dirstate updates are complete
992
992
993 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
993 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
994 return n
994 return n
995 finally:
995 finally:
996 if not valid: # don't save our updated dirstate
996 if not valid: # don't save our updated dirstate
997 self.dirstate.invalidate()
997 self.dirstate.invalidate()
998 del tr
998 del tr
999
999
1000 def walk(self, match, node=None):
1000 def walk(self, match, node=None):
1001 '''
1001 '''
1002 walk recursively through the directory tree or a given
1002 walk recursively through the directory tree or a given
1003 changeset, finding all files matched by the match
1003 changeset, finding all files matched by the match
1004 function
1004 function
1005 '''
1005 '''
1006 return self[node].walk(match)
1006 return self[node].walk(match)
1007
1007
1008 def status(self, node1='.', node2=None, match=None,
1008 def status(self, node1='.', node2=None, match=None,
1009 ignored=False, clean=False, unknown=False):
1009 ignored=False, clean=False, unknown=False):
1010 """return status of files between two nodes or node and working directory
1010 """return status of files between two nodes or node and working directory
1011
1011
1012 If node1 is None, use the first dirstate parent instead.
1012 If node1 is None, use the first dirstate parent instead.
1013 If node2 is None, compare node1 with working directory.
1013 If node2 is None, compare node1 with working directory.
1014 """
1014 """
1015
1015
1016 def mfmatches(ctx):
1016 def mfmatches(ctx):
1017 mf = ctx.manifest().copy()
1017 mf = ctx.manifest().copy()
1018 for fn in mf.keys():
1018 for fn in mf.keys():
1019 if not match(fn):
1019 if not match(fn):
1020 del mf[fn]
1020 del mf[fn]
1021 return mf
1021 return mf
1022
1022
1023 if isinstance(node1, context.changectx):
1023 if isinstance(node1, context.changectx):
1024 ctx1 = node1
1024 ctx1 = node1
1025 else:
1025 else:
1026 ctx1 = self[node1]
1026 ctx1 = self[node1]
1027 if isinstance(node2, context.changectx):
1027 if isinstance(node2, context.changectx):
1028 ctx2 = node2
1028 ctx2 = node2
1029 else:
1029 else:
1030 ctx2 = self[node2]
1030 ctx2 = self[node2]
1031
1031
1032 working = ctx2.rev() is None
1032 working = ctx2.rev() is None
1033 parentworking = working and ctx1 == self['.']
1033 parentworking = working and ctx1 == self['.']
1034 match = match or match_.always(self.root, self.getcwd())
1034 match = match or match_.always(self.root, self.getcwd())
1035 listignored, listclean, listunknown = ignored, clean, unknown
1035 listignored, listclean, listunknown = ignored, clean, unknown
1036
1036
1037 # load earliest manifest first for caching reasons
1037 # load earliest manifest first for caching reasons
1038 if not working and ctx2.rev() < ctx1.rev():
1038 if not working and ctx2.rev() < ctx1.rev():
1039 ctx2.manifest()
1039 ctx2.manifest()
1040
1040
1041 if not parentworking:
1041 if not parentworking:
1042 def bad(f, msg):
1042 def bad(f, msg):
1043 if f not in ctx1:
1043 if f not in ctx1:
1044 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1044 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1045 return False
1045 return False
1046 match.bad = bad
1046 match.bad = bad
1047
1047
1048 if working: # we need to scan the working dir
1048 if working: # we need to scan the working dir
1049 s = self.dirstate.status(match, listignored, listclean, listunknown)
1049 s = self.dirstate.status(match, listignored, listclean, listunknown)
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1050 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1051
1051
1052 # check for any possibly clean files
1052 # check for any possibly clean files
1053 if parentworking and cmp:
1053 if parentworking and cmp:
1054 fixup = []
1054 fixup = []
1055 # do a full compare of any files that might have changed
1055 # do a full compare of any files that might have changed
1056 for f in cmp:
1056 for f in cmp:
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1057 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1058 or ctx1[f].cmp(ctx2[f].data())):
1058 or ctx1[f].cmp(ctx2[f].data())):
1059 modified.append(f)
1059 modified.append(f)
1060 else:
1060 else:
1061 fixup.append(f)
1061 fixup.append(f)
1062
1062
1063 if listclean:
1063 if listclean:
1064 clean += fixup
1064 clean += fixup
1065
1065
1066 # update dirstate for files that are actually clean
1066 # update dirstate for files that are actually clean
1067 if fixup:
1067 if fixup:
1068 wlock = None
1068 wlock = None
1069 try:
1069 try:
1070 try:
1070 try:
1071 # updating the dirstate is optional
1071 # updating the dirstate is optional
1072 # so we dont wait on the lock
1072 # so we dont wait on the lock
1073 wlock = self.wlock(False)
1073 wlock = self.wlock(False)
1074 for f in fixup:
1074 for f in fixup:
1075 self.dirstate.normal(f)
1075 self.dirstate.normal(f)
1076 except error.LockError:
1076 except error.LockError:
1077 pass
1077 pass
1078 finally:
1078 finally:
1079 release(wlock)
1079 release(wlock)
1080
1080
1081 if not parentworking:
1081 if not parentworking:
1082 mf1 = mfmatches(ctx1)
1082 mf1 = mfmatches(ctx1)
1083 if working:
1083 if working:
1084 # we are comparing working dir against non-parent
1084 # we are comparing working dir against non-parent
1085 # generate a pseudo-manifest for the working dir
1085 # generate a pseudo-manifest for the working dir
1086 mf2 = mfmatches(self['.'])
1086 mf2 = mfmatches(self['.'])
1087 for f in cmp + modified + added:
1087 for f in cmp + modified + added:
1088 mf2[f] = None
1088 mf2[f] = None
1089 mf2.set(f, ctx2.flags(f))
1089 mf2.set(f, ctx2.flags(f))
1090 for f in removed:
1090 for f in removed:
1091 if f in mf2:
1091 if f in mf2:
1092 del mf2[f]
1092 del mf2[f]
1093 else:
1093 else:
1094 # we are comparing two revisions
1094 # we are comparing two revisions
1095 deleted, unknown, ignored = [], [], []
1095 deleted, unknown, ignored = [], [], []
1096 mf2 = mfmatches(ctx2)
1096 mf2 = mfmatches(ctx2)
1097
1097
1098 modified, added, clean = [], [], []
1098 modified, added, clean = [], [], []
1099 for fn in mf2:
1099 for fn in mf2:
1100 if fn in mf1:
1100 if fn in mf1:
1101 if (mf1.flags(fn) != mf2.flags(fn) or
1101 if (mf1.flags(fn) != mf2.flags(fn) or
1102 (mf1[fn] != mf2[fn] and
1102 (mf1[fn] != mf2[fn] and
1103 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1103 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1104 modified.append(fn)
1104 modified.append(fn)
1105 elif listclean:
1105 elif listclean:
1106 clean.append(fn)
1106 clean.append(fn)
1107 del mf1[fn]
1107 del mf1[fn]
1108 else:
1108 else:
1109 added.append(fn)
1109 added.append(fn)
1110 removed = mf1.keys()
1110 removed = mf1.keys()
1111
1111
1112 r = modified, added, removed, deleted, unknown, ignored, clean
1112 r = modified, added, removed, deleted, unknown, ignored, clean
1113 [l.sort() for l in r]
1113 [l.sort() for l in r]
1114 return r
1114 return r
1115
1115
1116 def add(self, list):
1116 def add(self, list):
1117 wlock = self.wlock()
1117 wlock = self.wlock()
1118 try:
1118 try:
1119 rejected = []
1119 rejected = []
1120 for f in list:
1120 for f in list:
1121 p = self.wjoin(f)
1121 p = self.wjoin(f)
1122 try:
1122 try:
1123 st = os.lstat(p)
1123 st = os.lstat(p)
1124 except:
1124 except:
1125 self.ui.warn(_("%s does not exist!\n") % f)
1125 self.ui.warn(_("%s does not exist!\n") % f)
1126 rejected.append(f)
1126 rejected.append(f)
1127 continue
1127 continue
1128 if st.st_size > 10000000:
1128 if st.st_size > 10000000:
1129 self.ui.warn(_("%s: files over 10MB may cause memory and"
1129 self.ui.warn(_("%s: files over 10MB may cause memory and"
1130 " performance problems\n"
1130 " performance problems\n"
1131 "(use 'hg revert %s' to unadd the file)\n")
1131 "(use 'hg revert %s' to unadd the file)\n")
1132 % (f, f))
1132 % (f, f))
1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1133 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1134 self.ui.warn(_("%s not added: only files and symlinks "
1134 self.ui.warn(_("%s not added: only files and symlinks "
1135 "supported currently\n") % f)
1135 "supported currently\n") % f)
1136 rejected.append(p)
1136 rejected.append(p)
1137 elif self.dirstate[f] in 'amn':
1137 elif self.dirstate[f] in 'amn':
1138 self.ui.warn(_("%s already tracked!\n") % f)
1138 self.ui.warn(_("%s already tracked!\n") % f)
1139 elif self.dirstate[f] == 'r':
1139 elif self.dirstate[f] == 'r':
1140 self.dirstate.normallookup(f)
1140 self.dirstate.normallookup(f)
1141 else:
1141 else:
1142 self.dirstate.add(f)
1142 self.dirstate.add(f)
1143 return rejected
1143 return rejected
1144 finally:
1144 finally:
1145 wlock.release()
1145 wlock.release()
1146
1146
1147 def forget(self, list):
1147 def forget(self, list):
1148 wlock = self.wlock()
1148 wlock = self.wlock()
1149 try:
1149 try:
1150 for f in list:
1150 for f in list:
1151 if self.dirstate[f] != 'a':
1151 if self.dirstate[f] != 'a':
1152 self.ui.warn(_("%s not added!\n") % f)
1152 self.ui.warn(_("%s not added!\n") % f)
1153 else:
1153 else:
1154 self.dirstate.forget(f)
1154 self.dirstate.forget(f)
1155 finally:
1155 finally:
1156 wlock.release()
1156 wlock.release()
1157
1157
1158 def remove(self, list, unlink=False):
1158 def remove(self, list, unlink=False):
1159 wlock = None
1159 wlock = None
1160 try:
1160 try:
1161 if unlink:
1161 if unlink:
1162 for f in list:
1162 for f in list:
1163 try:
1163 try:
1164 util.unlink(self.wjoin(f))
1164 util.unlink(self.wjoin(f))
1165 except OSError, inst:
1165 except OSError, inst:
1166 if inst.errno != errno.ENOENT:
1166 if inst.errno != errno.ENOENT:
1167 raise
1167 raise
1168 wlock = self.wlock()
1168 wlock = self.wlock()
1169 for f in list:
1169 for f in list:
1170 if unlink and os.path.exists(self.wjoin(f)):
1170 if unlink and os.path.exists(self.wjoin(f)):
1171 self.ui.warn(_("%s still exists!\n") % f)
1171 self.ui.warn(_("%s still exists!\n") % f)
1172 elif self.dirstate[f] == 'a':
1172 elif self.dirstate[f] == 'a':
1173 self.dirstate.forget(f)
1173 self.dirstate.forget(f)
1174 elif f not in self.dirstate:
1174 elif f not in self.dirstate:
1175 self.ui.warn(_("%s not tracked!\n") % f)
1175 self.ui.warn(_("%s not tracked!\n") % f)
1176 else:
1176 else:
1177 self.dirstate.remove(f)
1177 self.dirstate.remove(f)
1178 finally:
1178 finally:
1179 release(wlock)
1179 release(wlock)
1180
1180
1181 def undelete(self, list):
1181 def undelete(self, list):
1182 manifests = [self.manifest.read(self.changelog.read(p)[0])
1182 manifests = [self.manifest.read(self.changelog.read(p)[0])
1183 for p in self.dirstate.parents() if p != nullid]
1183 for p in self.dirstate.parents() if p != nullid]
1184 wlock = self.wlock()
1184 wlock = self.wlock()
1185 try:
1185 try:
1186 for f in list:
1186 for f in list:
1187 if self.dirstate[f] != 'r':
1187 if self.dirstate[f] != 'r':
1188 self.ui.warn(_("%s not removed!\n") % f)
1188 self.ui.warn(_("%s not removed!\n") % f)
1189 else:
1189 else:
1190 m = f in manifests[0] and manifests[0] or manifests[1]
1190 m = f in manifests[0] and manifests[0] or manifests[1]
1191 t = self.file(f).read(m[f])
1191 t = self.file(f).read(m[f])
1192 self.wwrite(f, t, m.flags(f))
1192 self.wwrite(f, t, m.flags(f))
1193 self.dirstate.normal(f)
1193 self.dirstate.normal(f)
1194 finally:
1194 finally:
1195 wlock.release()
1195 wlock.release()
1196
1196
1197 def copy(self, source, dest):
1197 def copy(self, source, dest):
1198 p = self.wjoin(dest)
1198 p = self.wjoin(dest)
1199 if not (os.path.exists(p) or os.path.islink(p)):
1199 if not (os.path.exists(p) or os.path.islink(p)):
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1200 self.ui.warn(_("%s does not exist!\n") % dest)
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1201 elif not (os.path.isfile(p) or os.path.islink(p)):
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1202 self.ui.warn(_("copy failed: %s is not a file or a "
1203 "symbolic link\n") % dest)
1203 "symbolic link\n") % dest)
1204 else:
1204 else:
1205 wlock = self.wlock()
1205 wlock = self.wlock()
1206 try:
1206 try:
1207 if self.dirstate[dest] in '?r':
1207 if self.dirstate[dest] in '?r':
1208 self.dirstate.add(dest)
1208 self.dirstate.add(dest)
1209 self.dirstate.copy(source, dest)
1209 self.dirstate.copy(source, dest)
1210 finally:
1210 finally:
1211 wlock.release()
1211 wlock.release()
1212
1212
1213 def heads(self, start=None, closed=True):
1213 def heads(self, start=None, closed=True):
1214 heads = self.changelog.heads(start)
1214 heads = self.changelog.heads(start)
1215 def display(head):
1215 def display(head):
1216 if closed:
1216 if closed:
1217 return True
1217 return True
1218 extras = self.changelog.read(head)[5]
1218 extras = self.changelog.read(head)[5]
1219 return ('close' not in extras)
1219 return ('close' not in extras)
1220 # sort the output in rev descending order
1220 # sort the output in rev descending order
1221 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1221 heads = [(-self.changelog.rev(h), h) for h in heads if display(h)]
1222 return [n for (r, n) in util.sort(heads)]
1222 return [n for (r, n) in util.sort(heads)]
1223
1223
1224 def branchheads(self, branch=None, start=None, closed=True):
1224 def branchheads(self, branch=None, start=None, closed=True):
1225 if branch is None:
1225 if branch is None:
1226 branch = self[None].branch()
1226 branch = self[None].branch()
1227 branches = self._branchheads()
1227 branches = self._branchheads()
1228 if branch not in branches:
1228 if branch not in branches:
1229 return []
1229 return []
1230 bheads = branches[branch]
1230 bheads = branches[branch]
1231 # the cache returns heads ordered lowest to highest
1231 # the cache returns heads ordered lowest to highest
1232 bheads.reverse()
1232 bheads.reverse()
1233 if start is not None:
1233 if start is not None:
1234 # filter out the heads that cannot be reached from startrev
1234 # filter out the heads that cannot be reached from startrev
1235 bheads = self.changelog.nodesbetween([start], bheads)[2]
1235 bheads = self.changelog.nodesbetween([start], bheads)[2]
1236 if not closed:
1236 if not closed:
1237 bheads = [h for h in bheads if
1237 bheads = [h for h in bheads if
1238 ('close' not in self.changelog.read(h)[5])]
1238 ('close' not in self.changelog.read(h)[5])]
1239 return bheads
1239 return bheads
1240
1240
1241 def branches(self, nodes):
1241 def branches(self, nodes):
1242 if not nodes:
1242 if not nodes:
1243 nodes = [self.changelog.tip()]
1243 nodes = [self.changelog.tip()]
1244 b = []
1244 b = []
1245 for n in nodes:
1245 for n in nodes:
1246 t = n
1246 t = n
1247 while 1:
1247 while 1:
1248 p = self.changelog.parents(n)
1248 p = self.changelog.parents(n)
1249 if p[1] != nullid or p[0] == nullid:
1249 if p[1] != nullid or p[0] == nullid:
1250 b.append((t, n, p[0], p[1]))
1250 b.append((t, n, p[0], p[1]))
1251 break
1251 break
1252 n = p[0]
1252 n = p[0]
1253 return b
1253 return b
1254
1254
1255 def between(self, pairs):
1255 def between(self, pairs):
1256 r = []
1256 r = []
1257
1257
1258 for top, bottom in pairs:
1258 for top, bottom in pairs:
1259 n, l, i = top, [], 0
1259 n, l, i = top, [], 0
1260 f = 1
1260 f = 1
1261
1261
1262 while n != bottom and n != nullid:
1262 while n != bottom and n != nullid:
1263 p = self.changelog.parents(n)[0]
1263 p = self.changelog.parents(n)[0]
1264 if i == f:
1264 if i == f:
1265 l.append(n)
1265 l.append(n)
1266 f = f * 2
1266 f = f * 2
1267 n = p
1267 n = p
1268 i += 1
1268 i += 1
1269
1269
1270 r.append(l)
1270 r.append(l)
1271
1271
1272 return r
1272 return r
1273
1273
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1274 def findincoming(self, remote, base=None, heads=None, force=False):
1275 """Return list of roots of the subsets of missing nodes from remote
1275 """Return list of roots of the subsets of missing nodes from remote
1276
1276
1277 If base dict is specified, assume that these nodes and their parents
1277 If base dict is specified, assume that these nodes and their parents
1278 exist on the remote side and that no child of a node of base exists
1278 exist on the remote side and that no child of a node of base exists
1279 in both remote and self.
1279 in both remote and self.
1280 Furthermore base will be updated to include the nodes that exists
1280 Furthermore base will be updated to include the nodes that exists
1281 in self and remote but no children exists in self and remote.
1281 in self and remote but no children exists in self and remote.
1282 If a list of heads is specified, return only nodes which are heads
1282 If a list of heads is specified, return only nodes which are heads
1283 or ancestors of these heads.
1283 or ancestors of these heads.
1284
1284
1285 All the ancestors of base are in self and in remote.
1285 All the ancestors of base are in self and in remote.
1286 All the descendants of the list returned are missing in self.
1286 All the descendants of the list returned are missing in self.
1287 (and so we know that the rest of the nodes are missing in remote, see
1287 (and so we know that the rest of the nodes are missing in remote, see
1288 outgoing)
1288 outgoing)
1289 """
1289 """
1290 return self.findcommonincoming(remote, base, heads, force)[1]
1290 return self.findcommonincoming(remote, base, heads, force)[1]
1291
1291
1292 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1292 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1293 """Return a tuple (common, missing roots, heads) used to identify
1293 """Return a tuple (common, missing roots, heads) used to identify
1294 missing nodes from remote.
1294 missing nodes from remote.
1295
1295
1296 If base dict is specified, assume that these nodes and their parents
1296 If base dict is specified, assume that these nodes and their parents
1297 exist on the remote side and that no child of a node of base exists
1297 exist on the remote side and that no child of a node of base exists
1298 in both remote and self.
1298 in both remote and self.
1299 Furthermore base will be updated to include the nodes that exists
1299 Furthermore base will be updated to include the nodes that exists
1300 in self and remote but no children exists in self and remote.
1300 in self and remote but no children exists in self and remote.
1301 If a list of heads is specified, return only nodes which are heads
1301 If a list of heads is specified, return only nodes which are heads
1302 or ancestors of these heads.
1302 or ancestors of these heads.
1303
1303
1304 All the ancestors of base are in self and in remote.
1304 All the ancestors of base are in self and in remote.
1305 """
1305 """
1306 m = self.changelog.nodemap
1306 m = self.changelog.nodemap
1307 search = []
1307 search = []
1308 fetch = {}
1308 fetch = {}
1309 seen = {}
1309 seen = {}
1310 seenbranch = {}
1310 seenbranch = {}
1311 if base == None:
1311 if base == None:
1312 base = {}
1312 base = {}
1313
1313
1314 if not heads:
1314 if not heads:
1315 heads = remote.heads()
1315 heads = remote.heads()
1316
1316
1317 if self.changelog.tip() == nullid:
1317 if self.changelog.tip() == nullid:
1318 base[nullid] = 1
1318 base[nullid] = 1
1319 if heads != [nullid]:
1319 if heads != [nullid]:
1320 return [nullid], [nullid], list(heads)
1320 return [nullid], [nullid], list(heads)
1321 return [nullid], [], []
1321 return [nullid], [], []
1322
1322
1323 # assume we're closer to the tip than the root
1323 # assume we're closer to the tip than the root
1324 # and start by examining the heads
1324 # and start by examining the heads
1325 self.ui.status(_("searching for changes\n"))
1325 self.ui.status(_("searching for changes\n"))
1326
1326
1327 unknown = []
1327 unknown = []
1328 for h in heads:
1328 for h in heads:
1329 if h not in m:
1329 if h not in m:
1330 unknown.append(h)
1330 unknown.append(h)
1331 else:
1331 else:
1332 base[h] = 1
1332 base[h] = 1
1333
1333
1334 heads = unknown
1334 heads = unknown
1335 if not unknown:
1335 if not unknown:
1336 return base.keys(), [], []
1336 return base.keys(), [], []
1337
1337
1338 req = dict.fromkeys(unknown)
1338 req = set(unknown)
1339 reqcnt = 0
1339 reqcnt = 0
1340
1340
1341 # search through remote branches
1341 # search through remote branches
1342 # a 'branch' here is a linear segment of history, with four parts:
1342 # a 'branch' here is a linear segment of history, with four parts:
1343 # head, root, first parent, second parent
1343 # head, root, first parent, second parent
1344 # (a branch always has two parents (or none) by definition)
1344 # (a branch always has two parents (or none) by definition)
1345 unknown = remote.branches(unknown)
1345 unknown = remote.branches(unknown)
1346 while unknown:
1346 while unknown:
1347 r = []
1347 r = []
1348 while unknown:
1348 while unknown:
1349 n = unknown.pop(0)
1349 n = unknown.pop(0)
1350 if n[0] in seen:
1350 if n[0] in seen:
1351 continue
1351 continue
1352
1352
1353 self.ui.debug(_("examining %s:%s\n")
1353 self.ui.debug(_("examining %s:%s\n")
1354 % (short(n[0]), short(n[1])))
1354 % (short(n[0]), short(n[1])))
1355 if n[0] == nullid: # found the end of the branch
1355 if n[0] == nullid: # found the end of the branch
1356 pass
1356 pass
1357 elif n in seenbranch:
1357 elif n in seenbranch:
1358 self.ui.debug(_("branch already found\n"))
1358 self.ui.debug(_("branch already found\n"))
1359 continue
1359 continue
1360 elif n[1] and n[1] in m: # do we know the base?
1360 elif n[1] and n[1] in m: # do we know the base?
1361 self.ui.debug(_("found incomplete branch %s:%s\n")
1361 self.ui.debug(_("found incomplete branch %s:%s\n")
1362 % (short(n[0]), short(n[1])))
1362 % (short(n[0]), short(n[1])))
1363 search.append(n[0:2]) # schedule branch range for scanning
1363 search.append(n[0:2]) # schedule branch range for scanning
1364 seenbranch[n] = 1
1364 seenbranch[n] = 1
1365 else:
1365 else:
1366 if n[1] not in seen and n[1] not in fetch:
1366 if n[1] not in seen and n[1] not in fetch:
1367 if n[2] in m and n[3] in m:
1367 if n[2] in m and n[3] in m:
1368 self.ui.debug(_("found new changeset %s\n") %
1368 self.ui.debug(_("found new changeset %s\n") %
1369 short(n[1]))
1369 short(n[1]))
1370 fetch[n[1]] = 1 # earliest unknown
1370 fetch[n[1]] = 1 # earliest unknown
1371 for p in n[2:4]:
1371 for p in n[2:4]:
1372 if p in m:
1372 if p in m:
1373 base[p] = 1 # latest known
1373 base[p] = 1 # latest known
1374
1374
1375 for p in n[2:4]:
1375 for p in n[2:4]:
1376 if p not in req and p not in m:
1376 if p not in req and p not in m:
1377 r.append(p)
1377 r.append(p)
1378 req[p] = 1
1378 req.add(p)
1379 seen[n[0]] = 1
1379 seen[n[0]] = 1
1380
1380
1381 if r:
1381 if r:
1382 reqcnt += 1
1382 reqcnt += 1
1383 self.ui.debug(_("request %d: %s\n") %
1383 self.ui.debug(_("request %d: %s\n") %
1384 (reqcnt, " ".join(map(short, r))))
1384 (reqcnt, " ".join(map(short, r))))
1385 for p in xrange(0, len(r), 10):
1385 for p in xrange(0, len(r), 10):
1386 for b in remote.branches(r[p:p+10]):
1386 for b in remote.branches(r[p:p+10]):
1387 self.ui.debug(_("received %s:%s\n") %
1387 self.ui.debug(_("received %s:%s\n") %
1388 (short(b[0]), short(b[1])))
1388 (short(b[0]), short(b[1])))
1389 unknown.append(b)
1389 unknown.append(b)
1390
1390
1391 # do binary search on the branches we found
1391 # do binary search on the branches we found
1392 while search:
1392 while search:
1393 newsearch = []
1393 newsearch = []
1394 reqcnt += 1
1394 reqcnt += 1
1395 for n, l in zip(search, remote.between(search)):
1395 for n, l in zip(search, remote.between(search)):
1396 l.append(n[1])
1396 l.append(n[1])
1397 p = n[0]
1397 p = n[0]
1398 f = 1
1398 f = 1
1399 for i in l:
1399 for i in l:
1400 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1400 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1401 if i in m:
1401 if i in m:
1402 if f <= 2:
1402 if f <= 2:
1403 self.ui.debug(_("found new branch changeset %s\n") %
1403 self.ui.debug(_("found new branch changeset %s\n") %
1404 short(p))
1404 short(p))
1405 fetch[p] = 1
1405 fetch[p] = 1
1406 base[i] = 1
1406 base[i] = 1
1407 else:
1407 else:
1408 self.ui.debug(_("narrowed branch search to %s:%s\n")
1408 self.ui.debug(_("narrowed branch search to %s:%s\n")
1409 % (short(p), short(i)))
1409 % (short(p), short(i)))
1410 newsearch.append((p, i))
1410 newsearch.append((p, i))
1411 break
1411 break
1412 p, f = i, f * 2
1412 p, f = i, f * 2
1413 search = newsearch
1413 search = newsearch
1414
1414
1415 # sanity check our fetch list
1415 # sanity check our fetch list
1416 for f in fetch.keys():
1416 for f in fetch.keys():
1417 if f in m:
1417 if f in m:
1418 raise error.RepoError(_("already have changeset ")
1418 raise error.RepoError(_("already have changeset ")
1419 + short(f[:4]))
1419 + short(f[:4]))
1420
1420
1421 if base.keys() == [nullid]:
1421 if base.keys() == [nullid]:
1422 if force:
1422 if force:
1423 self.ui.warn(_("warning: repository is unrelated\n"))
1423 self.ui.warn(_("warning: repository is unrelated\n"))
1424 else:
1424 else:
1425 raise util.Abort(_("repository is unrelated"))
1425 raise util.Abort(_("repository is unrelated"))
1426
1426
1427 self.ui.debug(_("found new changesets starting at ") +
1427 self.ui.debug(_("found new changesets starting at ") +
1428 " ".join([short(f) for f in fetch]) + "\n")
1428 " ".join([short(f) for f in fetch]) + "\n")
1429
1429
1430 self.ui.debug(_("%d total queries\n") % reqcnt)
1430 self.ui.debug(_("%d total queries\n") % reqcnt)
1431
1431
1432 return base.keys(), fetch.keys(), heads
1432 return base.keys(), fetch.keys(), heads
1433
1433
1434 def findoutgoing(self, remote, base=None, heads=None, force=False):
1434 def findoutgoing(self, remote, base=None, heads=None, force=False):
1435 """Return list of nodes that are roots of subsets not in remote
1435 """Return list of nodes that are roots of subsets not in remote
1436
1436
1437 If base dict is specified, assume that these nodes and their parents
1437 If base dict is specified, assume that these nodes and their parents
1438 exist on the remote side.
1438 exist on the remote side.
1439 If a list of heads is specified, return only nodes which are heads
1439 If a list of heads is specified, return only nodes which are heads
1440 or ancestors of these heads, and return a second element which
1440 or ancestors of these heads, and return a second element which
1441 contains all remote heads which get new children.
1441 contains all remote heads which get new children.
1442 """
1442 """
1443 if base == None:
1443 if base == None:
1444 base = {}
1444 base = {}
1445 self.findincoming(remote, base, heads, force=force)
1445 self.findincoming(remote, base, heads, force=force)
1446
1446
1447 self.ui.debug(_("common changesets up to ")
1447 self.ui.debug(_("common changesets up to ")
1448 + " ".join(map(short, base.keys())) + "\n")
1448 + " ".join(map(short, base.keys())) + "\n")
1449
1449
1450 remain = dict.fromkeys(self.changelog.nodemap)
1450 remain = set(self.changelog.nodemap)
1451
1451
1452 # prune everything remote has from the tree
1452 # prune everything remote has from the tree
1453 del remain[nullid]
1453 remain.remove(nullid)
1454 remove = base.keys()
1454 remove = base.keys()
1455 while remove:
1455 while remove:
1456 n = remove.pop(0)
1456 n = remove.pop(0)
1457 if n in remain:
1457 if n in remain:
1458 del remain[n]
1458 remain.remove(n)
1459 for p in self.changelog.parents(n):
1459 for p in self.changelog.parents(n):
1460 remove.append(p)
1460 remove.append(p)
1461
1461
1462 # find every node whose parents have been pruned
1462 # find every node whose parents have been pruned
1463 subset = []
1463 subset = []
1464 # find every remote head that will get new children
1464 # find every remote head that will get new children
1465 updated_heads = {}
1465 updated_heads = {}
1466 for n in remain:
1466 for n in remain:
1467 p1, p2 = self.changelog.parents(n)
1467 p1, p2 = self.changelog.parents(n)
1468 if p1 not in remain and p2 not in remain:
1468 if p1 not in remain and p2 not in remain:
1469 subset.append(n)
1469 subset.append(n)
1470 if heads:
1470 if heads:
1471 if p1 in heads:
1471 if p1 in heads:
1472 updated_heads[p1] = True
1472 updated_heads[p1] = True
1473 if p2 in heads:
1473 if p2 in heads:
1474 updated_heads[p2] = True
1474 updated_heads[p2] = True
1475
1475
1476 # this is the set of all roots we have to push
1476 # this is the set of all roots we have to push
1477 if heads:
1477 if heads:
1478 return subset, updated_heads.keys()
1478 return subset, updated_heads.keys()
1479 else:
1479 else:
1480 return subset
1480 return subset
1481
1481
1482 def pull(self, remote, heads=None, force=False):
1482 def pull(self, remote, heads=None, force=False):
1483 lock = self.lock()
1483 lock = self.lock()
1484 try:
1484 try:
1485 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1485 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1486 force=force)
1486 force=force)
1487 if fetch == [nullid]:
1487 if fetch == [nullid]:
1488 self.ui.status(_("requesting all changes\n"))
1488 self.ui.status(_("requesting all changes\n"))
1489
1489
1490 if not fetch:
1490 if not fetch:
1491 self.ui.status(_("no changes found\n"))
1491 self.ui.status(_("no changes found\n"))
1492 return 0
1492 return 0
1493
1493
1494 if heads is None and remote.capable('changegroupsubset'):
1494 if heads is None and remote.capable('changegroupsubset'):
1495 heads = rheads
1495 heads = rheads
1496
1496
1497 if heads is None:
1497 if heads is None:
1498 cg = remote.changegroup(fetch, 'pull')
1498 cg = remote.changegroup(fetch, 'pull')
1499 else:
1499 else:
1500 if not remote.capable('changegroupsubset'):
1500 if not remote.capable('changegroupsubset'):
1501 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1501 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1502 cg = remote.changegroupsubset(fetch, heads, 'pull')
1502 cg = remote.changegroupsubset(fetch, heads, 'pull')
1503 return self.addchangegroup(cg, 'pull', remote.url())
1503 return self.addchangegroup(cg, 'pull', remote.url())
1504 finally:
1504 finally:
1505 lock.release()
1505 lock.release()
1506
1506
1507 def push(self, remote, force=False, revs=None):
1507 def push(self, remote, force=False, revs=None):
1508 # there are two ways to push to remote repo:
1508 # there are two ways to push to remote repo:
1509 #
1509 #
1510 # addchangegroup assumes local user can lock remote
1510 # addchangegroup assumes local user can lock remote
1511 # repo (local filesystem, old ssh servers).
1511 # repo (local filesystem, old ssh servers).
1512 #
1512 #
1513 # unbundle assumes local user cannot lock remote repo (new ssh
1513 # unbundle assumes local user cannot lock remote repo (new ssh
1514 # servers, http servers).
1514 # servers, http servers).
1515
1515
1516 if remote.capable('unbundle'):
1516 if remote.capable('unbundle'):
1517 return self.push_unbundle(remote, force, revs)
1517 return self.push_unbundle(remote, force, revs)
1518 return self.push_addchangegroup(remote, force, revs)
1518 return self.push_addchangegroup(remote, force, revs)
1519
1519
1520 def prepush(self, remote, force, revs):
1520 def prepush(self, remote, force, revs):
1521 common = {}
1521 common = {}
1522 remote_heads = remote.heads()
1522 remote_heads = remote.heads()
1523 inc = self.findincoming(remote, common, remote_heads, force=force)
1523 inc = self.findincoming(remote, common, remote_heads, force=force)
1524
1524
1525 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1525 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1526 if revs is not None:
1526 if revs is not None:
1527 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1527 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1528 else:
1528 else:
1529 bases, heads = update, self.changelog.heads()
1529 bases, heads = update, self.changelog.heads()
1530
1530
1531 if not bases:
1531 if not bases:
1532 self.ui.status(_("no changes found\n"))
1532 self.ui.status(_("no changes found\n"))
1533 return None, 1
1533 return None, 1
1534 elif not force:
1534 elif not force:
1535 # check if we're creating new remote heads
1535 # check if we're creating new remote heads
1536 # to be a remote head after push, node must be either
1536 # to be a remote head after push, node must be either
1537 # - unknown locally
1537 # - unknown locally
1538 # - a local outgoing head descended from update
1538 # - a local outgoing head descended from update
1539 # - a remote head that's known locally and not
1539 # - a remote head that's known locally and not
1540 # ancestral to an outgoing head
1540 # ancestral to an outgoing head
1541
1541
1542 warn = 0
1542 warn = 0
1543
1543
1544 if remote_heads == [nullid]:
1544 if remote_heads == [nullid]:
1545 warn = 0
1545 warn = 0
1546 elif not revs and len(heads) > len(remote_heads):
1546 elif not revs and len(heads) > len(remote_heads):
1547 warn = 1
1547 warn = 1
1548 else:
1548 else:
1549 newheads = list(heads)
1549 newheads = list(heads)
1550 for r in remote_heads:
1550 for r in remote_heads:
1551 if r in self.changelog.nodemap:
1551 if r in self.changelog.nodemap:
1552 desc = self.changelog.heads(r, heads)
1552 desc = self.changelog.heads(r, heads)
1553 l = [h for h in heads if h in desc]
1553 l = [h for h in heads if h in desc]
1554 if not l:
1554 if not l:
1555 newheads.append(r)
1555 newheads.append(r)
1556 else:
1556 else:
1557 newheads.append(r)
1557 newheads.append(r)
1558 if len(newheads) > len(remote_heads):
1558 if len(newheads) > len(remote_heads):
1559 warn = 1
1559 warn = 1
1560
1560
1561 if warn:
1561 if warn:
1562 self.ui.warn(_("abort: push creates new remote heads!\n"))
1562 self.ui.warn(_("abort: push creates new remote heads!\n"))
1563 self.ui.status(_("(did you forget to merge?"
1563 self.ui.status(_("(did you forget to merge?"
1564 " use push -f to force)\n"))
1564 " use push -f to force)\n"))
1565 return None, 0
1565 return None, 0
1566 elif inc:
1566 elif inc:
1567 self.ui.warn(_("note: unsynced remote changes!\n"))
1567 self.ui.warn(_("note: unsynced remote changes!\n"))
1568
1568
1569
1569
1570 if revs is None:
1570 if revs is None:
1571 # use the fast path, no race possible on push
1571 # use the fast path, no race possible on push
1572 cg = self._changegroup(common.keys(), 'push')
1572 cg = self._changegroup(common.keys(), 'push')
1573 else:
1573 else:
1574 cg = self.changegroupsubset(update, revs, 'push')
1574 cg = self.changegroupsubset(update, revs, 'push')
1575 return cg, remote_heads
1575 return cg, remote_heads
1576
1576
1577 def push_addchangegroup(self, remote, force, revs):
1577 def push_addchangegroup(self, remote, force, revs):
1578 lock = remote.lock()
1578 lock = remote.lock()
1579 try:
1579 try:
1580 ret = self.prepush(remote, force, revs)
1580 ret = self.prepush(remote, force, revs)
1581 if ret[0] is not None:
1581 if ret[0] is not None:
1582 cg, remote_heads = ret
1582 cg, remote_heads = ret
1583 return remote.addchangegroup(cg, 'push', self.url())
1583 return remote.addchangegroup(cg, 'push', self.url())
1584 return ret[1]
1584 return ret[1]
1585 finally:
1585 finally:
1586 lock.release()
1586 lock.release()
1587
1587
1588 def push_unbundle(self, remote, force, revs):
1588 def push_unbundle(self, remote, force, revs):
1589 # local repo finds heads on server, finds out what revs it
1589 # local repo finds heads on server, finds out what revs it
1590 # must push. once revs transferred, if server finds it has
1590 # must push. once revs transferred, if server finds it has
1591 # different heads (someone else won commit/push race), server
1591 # different heads (someone else won commit/push race), server
1592 # aborts.
1592 # aborts.
1593
1593
1594 ret = self.prepush(remote, force, revs)
1594 ret = self.prepush(remote, force, revs)
1595 if ret[0] is not None:
1595 if ret[0] is not None:
1596 cg, remote_heads = ret
1596 cg, remote_heads = ret
1597 if force: remote_heads = ['force']
1597 if force: remote_heads = ['force']
1598 return remote.unbundle(cg, remote_heads, 'push')
1598 return remote.unbundle(cg, remote_heads, 'push')
1599 return ret[1]
1599 return ret[1]
1600
1600
1601 def changegroupinfo(self, nodes, source):
1601 def changegroupinfo(self, nodes, source):
1602 if self.ui.verbose or source == 'bundle':
1602 if self.ui.verbose or source == 'bundle':
1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1603 self.ui.status(_("%d changesets found\n") % len(nodes))
1604 if self.ui.debugflag:
1604 if self.ui.debugflag:
1605 self.ui.debug(_("list of changesets:\n"))
1605 self.ui.debug(_("list of changesets:\n"))
1606 for node in nodes:
1606 for node in nodes:
1607 self.ui.debug("%s\n" % hex(node))
1607 self.ui.debug("%s\n" % hex(node))
1608
1608
1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1609 def changegroupsubset(self, bases, heads, source, extranodes=None):
1610 """This function generates a changegroup consisting of all the nodes
1610 """This function generates a changegroup consisting of all the nodes
1611 that are descendents of any of the bases, and ancestors of any of
1611 that are descendents of any of the bases, and ancestors of any of
1612 the heads.
1612 the heads.
1613
1613
1614 It is fairly complex as determining which filenodes and which
1614 It is fairly complex as determining which filenodes and which
1615 manifest nodes need to be included for the changeset to be complete
1615 manifest nodes need to be included for the changeset to be complete
1616 is non-trivial.
1616 is non-trivial.
1617
1617
1618 Another wrinkle is doing the reverse, figuring out which changeset in
1618 Another wrinkle is doing the reverse, figuring out which changeset in
1619 the changegroup a particular filenode or manifestnode belongs to.
1619 the changegroup a particular filenode or manifestnode belongs to.
1620
1620
1621 The caller can specify some nodes that must be included in the
1621 The caller can specify some nodes that must be included in the
1622 changegroup using the extranodes argument. It should be a dict
1622 changegroup using the extranodes argument. It should be a dict
1623 where the keys are the filenames (or 1 for the manifest), and the
1623 where the keys are the filenames (or 1 for the manifest), and the
1624 values are lists of (node, linknode) tuples, where node is a wanted
1624 values are lists of (node, linknode) tuples, where node is a wanted
1625 node and linknode is the changelog node that should be transmitted as
1625 node and linknode is the changelog node that should be transmitted as
1626 the linkrev.
1626 the linkrev.
1627 """
1627 """
1628
1628
1629 if extranodes is None:
1629 if extranodes is None:
1630 # can we go through the fast path ?
1630 # can we go through the fast path ?
1631 heads.sort()
1631 heads.sort()
1632 allheads = self.heads()
1632 allheads = self.heads()
1633 allheads.sort()
1633 allheads.sort()
1634 if heads == allheads:
1634 if heads == allheads:
1635 common = []
1635 common = []
1636 # parents of bases are known from both sides
1636 # parents of bases are known from both sides
1637 for n in bases:
1637 for n in bases:
1638 for p in self.changelog.parents(n):
1638 for p in self.changelog.parents(n):
1639 if p != nullid:
1639 if p != nullid:
1640 common.append(p)
1640 common.append(p)
1641 return self._changegroup(common, source)
1641 return self._changegroup(common, source)
1642
1642
1643 self.hook('preoutgoing', throw=True, source=source)
1643 self.hook('preoutgoing', throw=True, source=source)
1644
1644
1645 # Set up some initial variables
1645 # Set up some initial variables
1646 # Make it easy to refer to self.changelog
1646 # Make it easy to refer to self.changelog
1647 cl = self.changelog
1647 cl = self.changelog
1648 # msng is short for missing - compute the list of changesets in this
1648 # msng is short for missing - compute the list of changesets in this
1649 # changegroup.
1649 # changegroup.
1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1650 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1651 self.changegroupinfo(msng_cl_lst, source)
1651 self.changegroupinfo(msng_cl_lst, source)
1652 # Some bases may turn out to be superfluous, and some heads may be
1652 # Some bases may turn out to be superfluous, and some heads may be
1653 # too. nodesbetween will return the minimal set of bases and heads
1653 # too. nodesbetween will return the minimal set of bases and heads
1654 # necessary to re-create the changegroup.
1654 # necessary to re-create the changegroup.
1655
1655
1656 # Known heads are the list of heads that it is assumed the recipient
1656 # Known heads are the list of heads that it is assumed the recipient
1657 # of this changegroup will know about.
1657 # of this changegroup will know about.
1658 knownheads = {}
1658 knownheads = {}
1659 # We assume that all parents of bases are known heads.
1659 # We assume that all parents of bases are known heads.
1660 for n in bases:
1660 for n in bases:
1661 for p in cl.parents(n):
1661 for p in cl.parents(n):
1662 if p != nullid:
1662 if p != nullid:
1663 knownheads[p] = 1
1663 knownheads[p] = 1
1664 knownheads = knownheads.keys()
1664 knownheads = knownheads.keys()
1665 if knownheads:
1665 if knownheads:
1666 # Now that we know what heads are known, we can compute which
1666 # Now that we know what heads are known, we can compute which
1667 # changesets are known. The recipient must know about all
1667 # changesets are known. The recipient must know about all
1668 # changesets required to reach the known heads from the null
1668 # changesets required to reach the known heads from the null
1669 # changeset.
1669 # changeset.
1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1670 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1671 junk = None
1671 junk = None
1672 # Transform the list into an ersatz set.
1672 # Transform the list into an ersatz set.
1673 has_cl_set = dict.fromkeys(has_cl_set)
1673 has_cl_set = set(has_cl_set)
1674 else:
1674 else:
1675 # If there were no known heads, the recipient cannot be assumed to
1675 # If there were no known heads, the recipient cannot be assumed to
1676 # know about any changesets.
1676 # know about any changesets.
1677 has_cl_set = {}
1677 has_cl_set = set()
1678
1678
1679 # Make it easy to refer to self.manifest
1679 # Make it easy to refer to self.manifest
1680 mnfst = self.manifest
1680 mnfst = self.manifest
1681 # We don't know which manifests are missing yet
1681 # We don't know which manifests are missing yet
1682 msng_mnfst_set = {}
1682 msng_mnfst_set = {}
1683 # Nor do we know which filenodes are missing.
1683 # Nor do we know which filenodes are missing.
1684 msng_filenode_set = {}
1684 msng_filenode_set = {}
1685
1685
1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1686 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1687 junk = None
1687 junk = None
1688
1688
1689 # A changeset always belongs to itself, so the changenode lookup
1689 # A changeset always belongs to itself, so the changenode lookup
1690 # function for a changenode is identity.
1690 # function for a changenode is identity.
1691 def identity(x):
1691 def identity(x):
1692 return x
1692 return x
1693
1693
1694 # A function generating function. Sets up an environment for the
1694 # A function generating function. Sets up an environment for the
1695 # inner function.
1695 # inner function.
1696 def cmp_by_rev_func(revlog):
1696 def cmp_by_rev_func(revlog):
1697 # Compare two nodes by their revision number in the environment's
1697 # Compare two nodes by their revision number in the environment's
1698 # revision history. Since the revision number both represents the
1698 # revision history. Since the revision number both represents the
1699 # most efficient order to read the nodes in, and represents a
1699 # most efficient order to read the nodes in, and represents a
1700 # topological sorting of the nodes, this function is often useful.
1700 # topological sorting of the nodes, this function is often useful.
1701 def cmp_by_rev(a, b):
1701 def cmp_by_rev(a, b):
1702 return cmp(revlog.rev(a), revlog.rev(b))
1702 return cmp(revlog.rev(a), revlog.rev(b))
1703 return cmp_by_rev
1703 return cmp_by_rev
1704
1704
1705 # If we determine that a particular file or manifest node must be a
1705 # If we determine that a particular file or manifest node must be a
1706 # node that the recipient of the changegroup will already have, we can
1706 # node that the recipient of the changegroup will already have, we can
1707 # also assume the recipient will have all the parents. This function
1707 # also assume the recipient will have all the parents. This function
1708 # prunes them from the set of missing nodes.
1708 # prunes them from the set of missing nodes.
1709 def prune_parents(revlog, hasset, msngset):
1709 def prune_parents(revlog, hasset, msngset):
1710 haslst = hasset.keys()
1710 haslst = hasset.keys()
1711 haslst.sort(cmp_by_rev_func(revlog))
1711 haslst.sort(cmp_by_rev_func(revlog))
1712 for node in haslst:
1712 for node in haslst:
1713 parentlst = [p for p in revlog.parents(node) if p != nullid]
1713 parentlst = [p for p in revlog.parents(node) if p != nullid]
1714 while parentlst:
1714 while parentlst:
1715 n = parentlst.pop()
1715 n = parentlst.pop()
1716 if n not in hasset:
1716 if n not in hasset:
1717 hasset[n] = 1
1717 hasset[n] = 1
1718 p = [p for p in revlog.parents(n) if p != nullid]
1718 p = [p for p in revlog.parents(n) if p != nullid]
1719 parentlst.extend(p)
1719 parentlst.extend(p)
1720 for n in hasset:
1720 for n in hasset:
1721 msngset.pop(n, None)
1721 msngset.pop(n, None)
1722
1722
1723 # This is a function generating function used to set up an environment
1723 # This is a function generating function used to set up an environment
1724 # for the inner function to execute in.
1724 # for the inner function to execute in.
1725 def manifest_and_file_collector(changedfileset):
1725 def manifest_and_file_collector(changedfileset):
1726 # This is an information gathering function that gathers
1726 # This is an information gathering function that gathers
1727 # information from each changeset node that goes out as part of
1727 # information from each changeset node that goes out as part of
1728 # the changegroup. The information gathered is a list of which
1728 # the changegroup. The information gathered is a list of which
1729 # manifest nodes are potentially required (the recipient may
1729 # manifest nodes are potentially required (the recipient may
1730 # already have them) and total list of all files which were
1730 # already have them) and total list of all files which were
1731 # changed in any changeset in the changegroup.
1731 # changed in any changeset in the changegroup.
1732 #
1732 #
1733 # We also remember the first changenode we saw any manifest
1733 # We also remember the first changenode we saw any manifest
1734 # referenced by so we can later determine which changenode 'owns'
1734 # referenced by so we can later determine which changenode 'owns'
1735 # the manifest.
1735 # the manifest.
1736 def collect_manifests_and_files(clnode):
1736 def collect_manifests_and_files(clnode):
1737 c = cl.read(clnode)
1737 c = cl.read(clnode)
1738 for f in c[3]:
1738 for f in c[3]:
1739 # This is to make sure we only have one instance of each
1739 # This is to make sure we only have one instance of each
1740 # filename string for each filename.
1740 # filename string for each filename.
1741 changedfileset.setdefault(f, f)
1741 changedfileset.setdefault(f, f)
1742 msng_mnfst_set.setdefault(c[0], clnode)
1742 msng_mnfst_set.setdefault(c[0], clnode)
1743 return collect_manifests_and_files
1743 return collect_manifests_and_files
1744
1744
1745 # Figure out which manifest nodes (of the ones we think might be part
1745 # Figure out which manifest nodes (of the ones we think might be part
1746 # of the changegroup) the recipient must know about and remove them
1746 # of the changegroup) the recipient must know about and remove them
1747 # from the changegroup.
1747 # from the changegroup.
1748 def prune_manifests():
1748 def prune_manifests():
1749 has_mnfst_set = {}
1749 has_mnfst_set = {}
1750 for n in msng_mnfst_set:
1750 for n in msng_mnfst_set:
1751 # If a 'missing' manifest thinks it belongs to a changenode
1751 # If a 'missing' manifest thinks it belongs to a changenode
1752 # the recipient is assumed to have, obviously the recipient
1752 # the recipient is assumed to have, obviously the recipient
1753 # must have that manifest.
1753 # must have that manifest.
1754 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1754 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1755 if linknode in has_cl_set:
1755 if linknode in has_cl_set:
1756 has_mnfst_set[n] = 1
1756 has_mnfst_set[n] = 1
1757 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1757 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1758
1758
1759 # Use the information collected in collect_manifests_and_files to say
1759 # Use the information collected in collect_manifests_and_files to say
1760 # which changenode any manifestnode belongs to.
1760 # which changenode any manifestnode belongs to.
1761 def lookup_manifest_link(mnfstnode):
1761 def lookup_manifest_link(mnfstnode):
1762 return msng_mnfst_set[mnfstnode]
1762 return msng_mnfst_set[mnfstnode]
1763
1763
1764 # A function generating function that sets up the initial environment
1764 # A function generating function that sets up the initial environment
1765 # the inner function.
1765 # the inner function.
1766 def filenode_collector(changedfiles):
1766 def filenode_collector(changedfiles):
1767 next_rev = [0]
1767 next_rev = [0]
1768 # This gathers information from each manifestnode included in the
1768 # This gathers information from each manifestnode included in the
1769 # changegroup about which filenodes the manifest node references
1769 # changegroup about which filenodes the manifest node references
1770 # so we can include those in the changegroup too.
1770 # so we can include those in the changegroup too.
1771 #
1771 #
1772 # It also remembers which changenode each filenode belongs to. It
1772 # It also remembers which changenode each filenode belongs to. It
1773 # does this by assuming the a filenode belongs to the changenode
1773 # does this by assuming the a filenode belongs to the changenode
1774 # the first manifest that references it belongs to.
1774 # the first manifest that references it belongs to.
1775 def collect_msng_filenodes(mnfstnode):
1775 def collect_msng_filenodes(mnfstnode):
1776 r = mnfst.rev(mnfstnode)
1776 r = mnfst.rev(mnfstnode)
1777 if r == next_rev[0]:
1777 if r == next_rev[0]:
1778 # If the last rev we looked at was the one just previous,
1778 # If the last rev we looked at was the one just previous,
1779 # we only need to see a diff.
1779 # we only need to see a diff.
1780 deltamf = mnfst.readdelta(mnfstnode)
1780 deltamf = mnfst.readdelta(mnfstnode)
1781 # For each line in the delta
1781 # For each line in the delta
1782 for f, fnode in deltamf.iteritems():
1782 for f, fnode in deltamf.iteritems():
1783 f = changedfiles.get(f, None)
1783 f = changedfiles.get(f, None)
1784 # And if the file is in the list of files we care
1784 # And if the file is in the list of files we care
1785 # about.
1785 # about.
1786 if f is not None:
1786 if f is not None:
1787 # Get the changenode this manifest belongs to
1787 # Get the changenode this manifest belongs to
1788 clnode = msng_mnfst_set[mnfstnode]
1788 clnode = msng_mnfst_set[mnfstnode]
1789 # Create the set of filenodes for the file if
1789 # Create the set of filenodes for the file if
1790 # there isn't one already.
1790 # there isn't one already.
1791 ndset = msng_filenode_set.setdefault(f, {})
1791 ndset = msng_filenode_set.setdefault(f, {})
1792 # And set the filenode's changelog node to the
1792 # And set the filenode's changelog node to the
1793 # manifest's if it hasn't been set already.
1793 # manifest's if it hasn't been set already.
1794 ndset.setdefault(fnode, clnode)
1794 ndset.setdefault(fnode, clnode)
1795 else:
1795 else:
1796 # Otherwise we need a full manifest.
1796 # Otherwise we need a full manifest.
1797 m = mnfst.read(mnfstnode)
1797 m = mnfst.read(mnfstnode)
1798 # For every file in we care about.
1798 # For every file in we care about.
1799 for f in changedfiles:
1799 for f in changedfiles:
1800 fnode = m.get(f, None)
1800 fnode = m.get(f, None)
1801 # If it's in the manifest
1801 # If it's in the manifest
1802 if fnode is not None:
1802 if fnode is not None:
1803 # See comments above.
1803 # See comments above.
1804 clnode = msng_mnfst_set[mnfstnode]
1804 clnode = msng_mnfst_set[mnfstnode]
1805 ndset = msng_filenode_set.setdefault(f, {})
1805 ndset = msng_filenode_set.setdefault(f, {})
1806 ndset.setdefault(fnode, clnode)
1806 ndset.setdefault(fnode, clnode)
1807 # Remember the revision we hope to see next.
1807 # Remember the revision we hope to see next.
1808 next_rev[0] = r + 1
1808 next_rev[0] = r + 1
1809 return collect_msng_filenodes
1809 return collect_msng_filenodes
1810
1810
1811 # We have a list of filenodes we think we need for a file, lets remove
1811 # We have a list of filenodes we think we need for a file, lets remove
1812 # all those we now the recipient must have.
1812 # all those we now the recipient must have.
1813 def prune_filenodes(f, filerevlog):
1813 def prune_filenodes(f, filerevlog):
1814 msngset = msng_filenode_set[f]
1814 msngset = msng_filenode_set[f]
1815 hasset = {}
1815 hasset = {}
1816 # If a 'missing' filenode thinks it belongs to a changenode we
1816 # If a 'missing' filenode thinks it belongs to a changenode we
1817 # assume the recipient must have, then the recipient must have
1817 # assume the recipient must have, then the recipient must have
1818 # that filenode.
1818 # that filenode.
1819 for n in msngset:
1819 for n in msngset:
1820 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1820 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1821 if clnode in has_cl_set:
1821 if clnode in has_cl_set:
1822 hasset[n] = 1
1822 hasset[n] = 1
1823 prune_parents(filerevlog, hasset, msngset)
1823 prune_parents(filerevlog, hasset, msngset)
1824
1824
1825 # A function generator function that sets up the a context for the
1825 # A function generator function that sets up the a context for the
1826 # inner function.
1826 # inner function.
1827 def lookup_filenode_link_func(fname):
1827 def lookup_filenode_link_func(fname):
1828 msngset = msng_filenode_set[fname]
1828 msngset = msng_filenode_set[fname]
1829 # Lookup the changenode the filenode belongs to.
1829 # Lookup the changenode the filenode belongs to.
1830 def lookup_filenode_link(fnode):
1830 def lookup_filenode_link(fnode):
1831 return msngset[fnode]
1831 return msngset[fnode]
1832 return lookup_filenode_link
1832 return lookup_filenode_link
1833
1833
1834 # Add the nodes that were explicitly requested.
1834 # Add the nodes that were explicitly requested.
1835 def add_extra_nodes(name, nodes):
1835 def add_extra_nodes(name, nodes):
1836 if not extranodes or name not in extranodes:
1836 if not extranodes or name not in extranodes:
1837 return
1837 return
1838
1838
1839 for node, linknode in extranodes[name]:
1839 for node, linknode in extranodes[name]:
1840 if node not in nodes:
1840 if node not in nodes:
1841 nodes[node] = linknode
1841 nodes[node] = linknode
1842
1842
1843 # Now that we have all theses utility functions to help out and
1843 # Now that we have all theses utility functions to help out and
1844 # logically divide up the task, generate the group.
1844 # logically divide up the task, generate the group.
1845 def gengroup():
1845 def gengroup():
1846 # The set of changed files starts empty.
1846 # The set of changed files starts empty.
1847 changedfiles = {}
1847 changedfiles = {}
1848 # Create a changenode group generator that will call our functions
1848 # Create a changenode group generator that will call our functions
1849 # back to lookup the owning changenode and collect information.
1849 # back to lookup the owning changenode and collect information.
1850 group = cl.group(msng_cl_lst, identity,
1850 group = cl.group(msng_cl_lst, identity,
1851 manifest_and_file_collector(changedfiles))
1851 manifest_and_file_collector(changedfiles))
1852 for chnk in group:
1852 for chnk in group:
1853 yield chnk
1853 yield chnk
1854
1854
1855 # The list of manifests has been collected by the generator
1855 # The list of manifests has been collected by the generator
1856 # calling our functions back.
1856 # calling our functions back.
1857 prune_manifests()
1857 prune_manifests()
1858 add_extra_nodes(1, msng_mnfst_set)
1858 add_extra_nodes(1, msng_mnfst_set)
1859 msng_mnfst_lst = msng_mnfst_set.keys()
1859 msng_mnfst_lst = msng_mnfst_set.keys()
1860 # Sort the manifestnodes by revision number.
1860 # Sort the manifestnodes by revision number.
1861 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1861 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1862 # Create a generator for the manifestnodes that calls our lookup
1862 # Create a generator for the manifestnodes that calls our lookup
1863 # and data collection functions back.
1863 # and data collection functions back.
1864 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1864 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1865 filenode_collector(changedfiles))
1865 filenode_collector(changedfiles))
1866 for chnk in group:
1866 for chnk in group:
1867 yield chnk
1867 yield chnk
1868
1868
1869 # These are no longer needed, dereference and toss the memory for
1869 # These are no longer needed, dereference and toss the memory for
1870 # them.
1870 # them.
1871 msng_mnfst_lst = None
1871 msng_mnfst_lst = None
1872 msng_mnfst_set.clear()
1872 msng_mnfst_set.clear()
1873
1873
1874 if extranodes:
1874 if extranodes:
1875 for fname in extranodes:
1875 for fname in extranodes:
1876 if isinstance(fname, int):
1876 if isinstance(fname, int):
1877 continue
1877 continue
1878 msng_filenode_set.setdefault(fname, {})
1878 msng_filenode_set.setdefault(fname, {})
1879 changedfiles[fname] = 1
1879 changedfiles[fname] = 1
1880 # Go through all our files in order sorted by name.
1880 # Go through all our files in order sorted by name.
1881 for fname in util.sort(changedfiles):
1881 for fname in util.sort(changedfiles):
1882 filerevlog = self.file(fname)
1882 filerevlog = self.file(fname)
1883 if not len(filerevlog):
1883 if not len(filerevlog):
1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1884 raise util.Abort(_("empty or missing revlog for %s") % fname)
1885 # Toss out the filenodes that the recipient isn't really
1885 # Toss out the filenodes that the recipient isn't really
1886 # missing.
1886 # missing.
1887 if fname in msng_filenode_set:
1887 if fname in msng_filenode_set:
1888 prune_filenodes(fname, filerevlog)
1888 prune_filenodes(fname, filerevlog)
1889 add_extra_nodes(fname, msng_filenode_set[fname])
1889 add_extra_nodes(fname, msng_filenode_set[fname])
1890 msng_filenode_lst = msng_filenode_set[fname].keys()
1890 msng_filenode_lst = msng_filenode_set[fname].keys()
1891 else:
1891 else:
1892 msng_filenode_lst = []
1892 msng_filenode_lst = []
1893 # If any filenodes are left, generate the group for them,
1893 # If any filenodes are left, generate the group for them,
1894 # otherwise don't bother.
1894 # otherwise don't bother.
1895 if len(msng_filenode_lst) > 0:
1895 if len(msng_filenode_lst) > 0:
1896 yield changegroup.chunkheader(len(fname))
1896 yield changegroup.chunkheader(len(fname))
1897 yield fname
1897 yield fname
1898 # Sort the filenodes by their revision #
1898 # Sort the filenodes by their revision #
1899 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1899 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1900 # Create a group generator and only pass in a changenode
1900 # Create a group generator and only pass in a changenode
1901 # lookup function as we need to collect no information
1901 # lookup function as we need to collect no information
1902 # from filenodes.
1902 # from filenodes.
1903 group = filerevlog.group(msng_filenode_lst,
1903 group = filerevlog.group(msng_filenode_lst,
1904 lookup_filenode_link_func(fname))
1904 lookup_filenode_link_func(fname))
1905 for chnk in group:
1905 for chnk in group:
1906 yield chnk
1906 yield chnk
1907 if fname in msng_filenode_set:
1907 if fname in msng_filenode_set:
1908 # Don't need this anymore, toss it to free memory.
1908 # Don't need this anymore, toss it to free memory.
1909 del msng_filenode_set[fname]
1909 del msng_filenode_set[fname]
1910 # Signal that no more groups are left.
1910 # Signal that no more groups are left.
1911 yield changegroup.closechunk()
1911 yield changegroup.closechunk()
1912
1912
1913 if msng_cl_lst:
1913 if msng_cl_lst:
1914 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1914 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1915
1915
1916 return util.chunkbuffer(gengroup())
1916 return util.chunkbuffer(gengroup())
1917
1917
1918 def changegroup(self, basenodes, source):
1918 def changegroup(self, basenodes, source):
1919 # to avoid a race we use changegroupsubset() (issue1320)
1919 # to avoid a race we use changegroupsubset() (issue1320)
1920 return self.changegroupsubset(basenodes, self.heads(), source)
1920 return self.changegroupsubset(basenodes, self.heads(), source)
1921
1921
1922 def _changegroup(self, common, source):
1922 def _changegroup(self, common, source):
1923 """Generate a changegroup of all nodes that we have that a recipient
1923 """Generate a changegroup of all nodes that we have that a recipient
1924 doesn't.
1924 doesn't.
1925
1925
1926 This is much easier than the previous function as we can assume that
1926 This is much easier than the previous function as we can assume that
1927 the recipient has any changenode we aren't sending them.
1927 the recipient has any changenode we aren't sending them.
1928
1928
1929 common is the set of common nodes between remote and self"""
1929 common is the set of common nodes between remote and self"""
1930
1930
1931 self.hook('preoutgoing', throw=True, source=source)
1931 self.hook('preoutgoing', throw=True, source=source)
1932
1932
1933 cl = self.changelog
1933 cl = self.changelog
1934 nodes = cl.findmissing(common)
1934 nodes = cl.findmissing(common)
1935 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1935 revset = set([cl.rev(n) for n in nodes])
1936 self.changegroupinfo(nodes, source)
1936 self.changegroupinfo(nodes, source)
1937
1937
1938 def identity(x):
1938 def identity(x):
1939 return x
1939 return x
1940
1940
1941 def gennodelst(log):
1941 def gennodelst(log):
1942 for r in log:
1942 for r in log:
1943 if log.linkrev(r) in revset:
1943 if log.linkrev(r) in revset:
1944 yield log.node(r)
1944 yield log.node(r)
1945
1945
1946 def changed_file_collector(changedfileset):
1946 def changed_file_collector(changedfileset):
1947 def collect_changed_files(clnode):
1947 def collect_changed_files(clnode):
1948 c = cl.read(clnode)
1948 c = cl.read(clnode)
1949 for fname in c[3]:
1949 for fname in c[3]:
1950 changedfileset[fname] = 1
1950 changedfileset[fname] = 1
1951 return collect_changed_files
1951 return collect_changed_files
1952
1952
1953 def lookuprevlink_func(revlog):
1953 def lookuprevlink_func(revlog):
1954 def lookuprevlink(n):
1954 def lookuprevlink(n):
1955 return cl.node(revlog.linkrev(revlog.rev(n)))
1955 return cl.node(revlog.linkrev(revlog.rev(n)))
1956 return lookuprevlink
1956 return lookuprevlink
1957
1957
1958 def gengroup():
1958 def gengroup():
1959 # construct a list of all changed files
1959 # construct a list of all changed files
1960 changedfiles = {}
1960 changedfiles = {}
1961
1961
1962 for chnk in cl.group(nodes, identity,
1962 for chnk in cl.group(nodes, identity,
1963 changed_file_collector(changedfiles)):
1963 changed_file_collector(changedfiles)):
1964 yield chnk
1964 yield chnk
1965
1965
1966 mnfst = self.manifest
1966 mnfst = self.manifest
1967 nodeiter = gennodelst(mnfst)
1967 nodeiter = gennodelst(mnfst)
1968 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1968 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1969 yield chnk
1969 yield chnk
1970
1970
1971 for fname in util.sort(changedfiles):
1971 for fname in util.sort(changedfiles):
1972 filerevlog = self.file(fname)
1972 filerevlog = self.file(fname)
1973 if not len(filerevlog):
1973 if not len(filerevlog):
1974 raise util.Abort(_("empty or missing revlog for %s") % fname)
1974 raise util.Abort(_("empty or missing revlog for %s") % fname)
1975 nodeiter = gennodelst(filerevlog)
1975 nodeiter = gennodelst(filerevlog)
1976 nodeiter = list(nodeiter)
1976 nodeiter = list(nodeiter)
1977 if nodeiter:
1977 if nodeiter:
1978 yield changegroup.chunkheader(len(fname))
1978 yield changegroup.chunkheader(len(fname))
1979 yield fname
1979 yield fname
1980 lookup = lookuprevlink_func(filerevlog)
1980 lookup = lookuprevlink_func(filerevlog)
1981 for chnk in filerevlog.group(nodeiter, lookup):
1981 for chnk in filerevlog.group(nodeiter, lookup):
1982 yield chnk
1982 yield chnk
1983
1983
1984 yield changegroup.closechunk()
1984 yield changegroup.closechunk()
1985
1985
1986 if nodes:
1986 if nodes:
1987 self.hook('outgoing', node=hex(nodes[0]), source=source)
1987 self.hook('outgoing', node=hex(nodes[0]), source=source)
1988
1988
1989 return util.chunkbuffer(gengroup())
1989 return util.chunkbuffer(gengroup())
1990
1990
1991 def addchangegroup(self, source, srctype, url, emptyok=False):
1991 def addchangegroup(self, source, srctype, url, emptyok=False):
1992 """add changegroup to repo.
1992 """add changegroup to repo.
1993
1993
1994 return values:
1994 return values:
1995 - nothing changed or no source: 0
1995 - nothing changed or no source: 0
1996 - more heads than before: 1+added heads (2..n)
1996 - more heads than before: 1+added heads (2..n)
1997 - less heads than before: -1-removed heads (-2..-n)
1997 - less heads than before: -1-removed heads (-2..-n)
1998 - number of heads stays the same: 1
1998 - number of heads stays the same: 1
1999 """
1999 """
2000 def csmap(x):
2000 def csmap(x):
2001 self.ui.debug(_("add changeset %s\n") % short(x))
2001 self.ui.debug(_("add changeset %s\n") % short(x))
2002 return len(cl)
2002 return len(cl)
2003
2003
2004 def revmap(x):
2004 def revmap(x):
2005 return cl.rev(x)
2005 return cl.rev(x)
2006
2006
2007 if not source:
2007 if not source:
2008 return 0
2008 return 0
2009
2009
2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2010 self.hook('prechangegroup', throw=True, source=srctype, url=url)
2011
2011
2012 changesets = files = revisions = 0
2012 changesets = files = revisions = 0
2013
2013
2014 # write changelog data to temp files so concurrent readers will not see
2014 # write changelog data to temp files so concurrent readers will not see
2015 # inconsistent view
2015 # inconsistent view
2016 cl = self.changelog
2016 cl = self.changelog
2017 cl.delayupdate()
2017 cl.delayupdate()
2018 oldheads = len(cl.heads())
2018 oldheads = len(cl.heads())
2019
2019
2020 tr = self.transaction()
2020 tr = self.transaction()
2021 try:
2021 try:
2022 trp = weakref.proxy(tr)
2022 trp = weakref.proxy(tr)
2023 # pull off the changeset group
2023 # pull off the changeset group
2024 self.ui.status(_("adding changesets\n"))
2024 self.ui.status(_("adding changesets\n"))
2025 cor = len(cl) - 1
2025 cor = len(cl) - 1
2026 chunkiter = changegroup.chunkiter(source)
2026 chunkiter = changegroup.chunkiter(source)
2027 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2027 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2028 raise util.Abort(_("received changelog group is empty"))
2028 raise util.Abort(_("received changelog group is empty"))
2029 cnr = len(cl) - 1
2029 cnr = len(cl) - 1
2030 changesets = cnr - cor
2030 changesets = cnr - cor
2031
2031
2032 # pull off the manifest group
2032 # pull off the manifest group
2033 self.ui.status(_("adding manifests\n"))
2033 self.ui.status(_("adding manifests\n"))
2034 chunkiter = changegroup.chunkiter(source)
2034 chunkiter = changegroup.chunkiter(source)
2035 # no need to check for empty manifest group here:
2035 # no need to check for empty manifest group here:
2036 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2036 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2037 # no new manifest will be created and the manifest group will
2037 # no new manifest will be created and the manifest group will
2038 # be empty during the pull
2038 # be empty during the pull
2039 self.manifest.addgroup(chunkiter, revmap, trp)
2039 self.manifest.addgroup(chunkiter, revmap, trp)
2040
2040
2041 # process the files
2041 # process the files
2042 self.ui.status(_("adding file changes\n"))
2042 self.ui.status(_("adding file changes\n"))
2043 while 1:
2043 while 1:
2044 f = changegroup.getchunk(source)
2044 f = changegroup.getchunk(source)
2045 if not f:
2045 if not f:
2046 break
2046 break
2047 self.ui.debug(_("adding %s revisions\n") % f)
2047 self.ui.debug(_("adding %s revisions\n") % f)
2048 fl = self.file(f)
2048 fl = self.file(f)
2049 o = len(fl)
2049 o = len(fl)
2050 chunkiter = changegroup.chunkiter(source)
2050 chunkiter = changegroup.chunkiter(source)
2051 if fl.addgroup(chunkiter, revmap, trp) is None:
2051 if fl.addgroup(chunkiter, revmap, trp) is None:
2052 raise util.Abort(_("received file revlog group is empty"))
2052 raise util.Abort(_("received file revlog group is empty"))
2053 revisions += len(fl) - o
2053 revisions += len(fl) - o
2054 files += 1
2054 files += 1
2055
2055
2056 newheads = len(self.changelog.heads())
2056 newheads = len(self.changelog.heads())
2057 heads = ""
2057 heads = ""
2058 if oldheads and newheads != oldheads:
2058 if oldheads and newheads != oldheads:
2059 heads = _(" (%+d heads)") % (newheads - oldheads)
2059 heads = _(" (%+d heads)") % (newheads - oldheads)
2060
2060
2061 self.ui.status(_("added %d changesets"
2061 self.ui.status(_("added %d changesets"
2062 " with %d changes to %d files%s\n")
2062 " with %d changes to %d files%s\n")
2063 % (changesets, revisions, files, heads))
2063 % (changesets, revisions, files, heads))
2064
2064
2065 if changesets > 0:
2065 if changesets > 0:
2066 p = lambda: self.changelog.writepending() and self.root or ""
2066 p = lambda: self.changelog.writepending() and self.root or ""
2067 self.hook('pretxnchangegroup', throw=True,
2067 self.hook('pretxnchangegroup', throw=True,
2068 node=hex(self.changelog.node(cor+1)), source=srctype,
2068 node=hex(self.changelog.node(cor+1)), source=srctype,
2069 url=url, pending=p)
2069 url=url, pending=p)
2070
2070
2071 # make changelog see real files again
2071 # make changelog see real files again
2072 cl.finalize(trp)
2072 cl.finalize(trp)
2073
2073
2074 tr.close()
2074 tr.close()
2075 finally:
2075 finally:
2076 del tr
2076 del tr
2077
2077
2078 if changesets > 0:
2078 if changesets > 0:
2079 # forcefully update the on-disk branch cache
2079 # forcefully update the on-disk branch cache
2080 self.ui.debug(_("updating the branch cache\n"))
2080 self.ui.debug(_("updating the branch cache\n"))
2081 self.branchtags()
2081 self.branchtags()
2082 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2082 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2083 source=srctype, url=url)
2083 source=srctype, url=url)
2084
2084
2085 for i in xrange(cor + 1, cnr + 1):
2085 for i in xrange(cor + 1, cnr + 1):
2086 self.hook("incoming", node=hex(self.changelog.node(i)),
2086 self.hook("incoming", node=hex(self.changelog.node(i)),
2087 source=srctype, url=url)
2087 source=srctype, url=url)
2088
2088
2089 # never return 0 here:
2089 # never return 0 here:
2090 if newheads < oldheads:
2090 if newheads < oldheads:
2091 return newheads - oldheads - 1
2091 return newheads - oldheads - 1
2092 else:
2092 else:
2093 return newheads - oldheads + 1
2093 return newheads - oldheads + 1
2094
2094
2095
2095
2096 def stream_in(self, remote):
2096 def stream_in(self, remote):
2097 fp = remote.stream_out()
2097 fp = remote.stream_out()
2098 l = fp.readline()
2098 l = fp.readline()
2099 try:
2099 try:
2100 resp = int(l)
2100 resp = int(l)
2101 except ValueError:
2101 except ValueError:
2102 raise error.ResponseError(
2102 raise error.ResponseError(
2103 _('Unexpected response from remote server:'), l)
2103 _('Unexpected response from remote server:'), l)
2104 if resp == 1:
2104 if resp == 1:
2105 raise util.Abort(_('operation forbidden by server'))
2105 raise util.Abort(_('operation forbidden by server'))
2106 elif resp == 2:
2106 elif resp == 2:
2107 raise util.Abort(_('locking the remote repository failed'))
2107 raise util.Abort(_('locking the remote repository failed'))
2108 elif resp != 0:
2108 elif resp != 0:
2109 raise util.Abort(_('the server sent an unknown error code'))
2109 raise util.Abort(_('the server sent an unknown error code'))
2110 self.ui.status(_('streaming all changes\n'))
2110 self.ui.status(_('streaming all changes\n'))
2111 l = fp.readline()
2111 l = fp.readline()
2112 try:
2112 try:
2113 total_files, total_bytes = map(int, l.split(' ', 1))
2113 total_files, total_bytes = map(int, l.split(' ', 1))
2114 except (ValueError, TypeError):
2114 except (ValueError, TypeError):
2115 raise error.ResponseError(
2115 raise error.ResponseError(
2116 _('Unexpected response from remote server:'), l)
2116 _('Unexpected response from remote server:'), l)
2117 self.ui.status(_('%d files to transfer, %s of data\n') %
2117 self.ui.status(_('%d files to transfer, %s of data\n') %
2118 (total_files, util.bytecount(total_bytes)))
2118 (total_files, util.bytecount(total_bytes)))
2119 start = time.time()
2119 start = time.time()
2120 for i in xrange(total_files):
2120 for i in xrange(total_files):
2121 # XXX doesn't support '\n' or '\r' in filenames
2121 # XXX doesn't support '\n' or '\r' in filenames
2122 l = fp.readline()
2122 l = fp.readline()
2123 try:
2123 try:
2124 name, size = l.split('\0', 1)
2124 name, size = l.split('\0', 1)
2125 size = int(size)
2125 size = int(size)
2126 except (ValueError, TypeError):
2126 except (ValueError, TypeError):
2127 raise error.ResponseError(
2127 raise error.ResponseError(
2128 _('Unexpected response from remote server:'), l)
2128 _('Unexpected response from remote server:'), l)
2129 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2129 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2130 ofp = self.sopener(name, 'w')
2130 ofp = self.sopener(name, 'w')
2131 for chunk in util.filechunkiter(fp, limit=size):
2131 for chunk in util.filechunkiter(fp, limit=size):
2132 ofp.write(chunk)
2132 ofp.write(chunk)
2133 ofp.close()
2133 ofp.close()
2134 elapsed = time.time() - start
2134 elapsed = time.time() - start
2135 if elapsed <= 0:
2135 if elapsed <= 0:
2136 elapsed = 0.001
2136 elapsed = 0.001
2137 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2137 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2138 (util.bytecount(total_bytes), elapsed,
2138 (util.bytecount(total_bytes), elapsed,
2139 util.bytecount(total_bytes / elapsed)))
2139 util.bytecount(total_bytes / elapsed)))
2140 self.invalidate()
2140 self.invalidate()
2141 return len(self.heads()) + 1
2141 return len(self.heads()) + 1
2142
2142
2143 def clone(self, remote, heads=[], stream=False):
2143 def clone(self, remote, heads=[], stream=False):
2144 '''clone remote repository.
2144 '''clone remote repository.
2145
2145
2146 keyword arguments:
2146 keyword arguments:
2147 heads: list of revs to clone (forces use of pull)
2147 heads: list of revs to clone (forces use of pull)
2148 stream: use streaming clone if possible'''
2148 stream: use streaming clone if possible'''
2149
2149
2150 # now, all clients that can request uncompressed clones can
2150 # now, all clients that can request uncompressed clones can
2151 # read repo formats supported by all servers that can serve
2151 # read repo formats supported by all servers that can serve
2152 # them.
2152 # them.
2153
2153
2154 # if revlog format changes, client will have to check version
2154 # if revlog format changes, client will have to check version
2155 # and format flags on "stream" capability, and use
2155 # and format flags on "stream" capability, and use
2156 # uncompressed only if compatible.
2156 # uncompressed only if compatible.
2157
2157
2158 if stream and not heads and remote.capable('stream'):
2158 if stream and not heads and remote.capable('stream'):
2159 return self.stream_in(remote)
2159 return self.stream_in(remote)
2160 return self.pull(remote, heads)
2160 return self.pull(remote, heads)
2161
2161
2162 # used to avoid circular references so destructors work
2162 # used to avoid circular references so destructors work
2163 def aftertrans(files):
2163 def aftertrans(files):
2164 renamefiles = [tuple(t) for t in files]
2164 renamefiles = [tuple(t) for t in files]
2165 def a():
2165 def a():
2166 for src, dest in renamefiles:
2166 for src, dest in renamefiles:
2167 util.rename(src, dest)
2167 util.rename(src, dest)
2168 return a
2168 return a
2169
2169
2170 def instance(ui, path, create):
2170 def instance(ui, path, create):
2171 return localrepository(ui, util.drop_scheme('file', path), create)
2171 return localrepository(ui, util.drop_scheme('file', path), create)
2172
2172
2173 def islocal(path):
2173 def islocal(path):
2174 return True
2174 return True
@@ -1,47 +1,47 b''
1 import util
1 import util
2
2
3 class _match(object):
3 class _match(object):
4 def __init__(self, root, cwd, files, mf, ap):
4 def __init__(self, root, cwd, files, mf, ap):
5 self._root = root
5 self._root = root
6 self._cwd = cwd
6 self._cwd = cwd
7 self._files = files
7 self._files = files
8 self._fmap = dict.fromkeys(files)
8 self._fmap = set(files)
9 self.matchfn = mf
9 self.matchfn = mf
10 self._anypats = ap
10 self._anypats = ap
11 def __call__(self, fn):
11 def __call__(self, fn):
12 return self.matchfn(fn)
12 return self.matchfn(fn)
13 def __iter__(self):
13 def __iter__(self):
14 for f in self._files:
14 for f in self._files:
15 yield f
15 yield f
16 def bad(self, f, msg):
16 def bad(self, f, msg):
17 return True
17 return True
18 def dir(self, f):
18 def dir(self, f):
19 pass
19 pass
20 def missing(self, f):
20 def missing(self, f):
21 pass
21 pass
22 def exact(self, f):
22 def exact(self, f):
23 return f in self._fmap
23 return f in self._fmap
24 def rel(self, f):
24 def rel(self, f):
25 return util.pathto(self._root, self._cwd, f)
25 return util.pathto(self._root, self._cwd, f)
26 def files(self):
26 def files(self):
27 return self._files
27 return self._files
28 def anypats(self):
28 def anypats(self):
29 return self._anypats
29 return self._anypats
30
30
31 class always(_match):
31 class always(_match):
32 def __init__(self, root, cwd):
32 def __init__(self, root, cwd):
33 _match.__init__(self, root, cwd, [], lambda f: True, False)
33 _match.__init__(self, root, cwd, [], lambda f: True, False)
34
34
35 class never(_match):
35 class never(_match):
36 def __init__(self, root, cwd):
36 def __init__(self, root, cwd):
37 _match.__init__(self, root, cwd, [], lambda f: False, False)
37 _match.__init__(self, root, cwd, [], lambda f: False, False)
38
38
39 class exact(_match):
39 class exact(_match):
40 def __init__(self, root, cwd, files):
40 def __init__(self, root, cwd, files):
41 _match.__init__(self, root, cwd, files, lambda f: f in files, False)
41 _match.__init__(self, root, cwd, files, lambda f: f in files, False)
42
42
43 class match(_match):
43 class match(_match):
44 def __init__(self, root, cwd, patterns, include, exclude, default):
44 def __init__(self, root, cwd, patterns, include, exclude, default):
45 f, mf, ap = util.matcher(root, cwd, patterns, include, exclude,
45 f, mf, ap = util.matcher(root, cwd, patterns, include, exclude,
46 None, default)
46 None, default)
47 _match.__init__(self, root, cwd, f, mf, ap)
47 _match.__init__(self, root, cwd, f, mf, ap)
@@ -1,507 +1,507 b''
1 # merge.py - directory-level update/merge handling for Mercurial
1 # merge.py - directory-level update/merge handling for Mercurial
2 #
2 #
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2006, 2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import nullid, nullrev, hex, bin
8 from node import nullid, nullrev, hex, bin
9 from i18n import _
9 from i18n import _
10 import errno, util, os, filemerge, copies, shutil
10 import errno, util, os, filemerge, copies, shutil
11
11
12 class mergestate(object):
12 class mergestate(object):
13 '''track 3-way merge state of individual files'''
13 '''track 3-way merge state of individual files'''
14 def __init__(self, repo):
14 def __init__(self, repo):
15 self._repo = repo
15 self._repo = repo
16 self._read()
16 self._read()
17 def reset(self, node=None):
17 def reset(self, node=None):
18 self._state = {}
18 self._state = {}
19 if node:
19 if node:
20 self._local = node
20 self._local = node
21 shutil.rmtree(self._repo.join("merge"), True)
21 shutil.rmtree(self._repo.join("merge"), True)
22 def _read(self):
22 def _read(self):
23 self._state = {}
23 self._state = {}
24 try:
24 try:
25 localnode = None
25 localnode = None
26 f = self._repo.opener("merge/state")
26 f = self._repo.opener("merge/state")
27 for i, l in enumerate(f):
27 for i, l in enumerate(f):
28 if i == 0:
28 if i == 0:
29 localnode = l[:-1]
29 localnode = l[:-1]
30 else:
30 else:
31 bits = l[:-1].split("\0")
31 bits = l[:-1].split("\0")
32 self._state[bits[0]] = bits[1:]
32 self._state[bits[0]] = bits[1:]
33 self._local = bin(localnode)
33 self._local = bin(localnode)
34 except IOError, err:
34 except IOError, err:
35 if err.errno != errno.ENOENT:
35 if err.errno != errno.ENOENT:
36 raise
36 raise
37 def _write(self):
37 def _write(self):
38 f = self._repo.opener("merge/state", "w")
38 f = self._repo.opener("merge/state", "w")
39 f.write(hex(self._local) + "\n")
39 f.write(hex(self._local) + "\n")
40 for d, v in self._state.iteritems():
40 for d, v in self._state.iteritems():
41 f.write("\0".join([d] + v) + "\n")
41 f.write("\0".join([d] + v) + "\n")
42 def add(self, fcl, fco, fca, fd, flags):
42 def add(self, fcl, fco, fca, fd, flags):
43 hash = util.sha1(fcl.path()).hexdigest()
43 hash = util.sha1(fcl.path()).hexdigest()
44 self._repo.opener("merge/" + hash, "w").write(fcl.data())
44 self._repo.opener("merge/" + hash, "w").write(fcl.data())
45 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
45 self._state[fd] = ['u', hash, fcl.path(), fca.path(),
46 hex(fca.filenode()), fco.path(), flags]
46 hex(fca.filenode()), fco.path(), flags]
47 self._write()
47 self._write()
48 def __contains__(self, dfile):
48 def __contains__(self, dfile):
49 return dfile in self._state
49 return dfile in self._state
50 def __getitem__(self, dfile):
50 def __getitem__(self, dfile):
51 return self._state[dfile][0]
51 return self._state[dfile][0]
52 def __iter__(self):
52 def __iter__(self):
53 l = self._state.keys()
53 l = self._state.keys()
54 l.sort()
54 l.sort()
55 for f in l:
55 for f in l:
56 yield f
56 yield f
57 def mark(self, dfile, state):
57 def mark(self, dfile, state):
58 self._state[dfile][0] = state
58 self._state[dfile][0] = state
59 self._write()
59 self._write()
60 def resolve(self, dfile, wctx, octx):
60 def resolve(self, dfile, wctx, octx):
61 if self[dfile] == 'r':
61 if self[dfile] == 'r':
62 return 0
62 return 0
63 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
63 state, hash, lfile, afile, anode, ofile, flags = self._state[dfile]
64 f = self._repo.opener("merge/" + hash)
64 f = self._repo.opener("merge/" + hash)
65 self._repo.wwrite(dfile, f.read(), flags)
65 self._repo.wwrite(dfile, f.read(), flags)
66 fcd = wctx[dfile]
66 fcd = wctx[dfile]
67 fco = octx[ofile]
67 fco = octx[ofile]
68 fca = self._repo.filectx(afile, fileid=anode)
68 fca = self._repo.filectx(afile, fileid=anode)
69 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
69 r = filemerge.filemerge(self._repo, self._local, lfile, fcd, fco, fca)
70 if not r:
70 if not r:
71 self.mark(dfile, 'r')
71 self.mark(dfile, 'r')
72 return r
72 return r
73
73
74 def _checkunknown(wctx, mctx):
74 def _checkunknown(wctx, mctx):
75 "check for collisions between unknown files and files in mctx"
75 "check for collisions between unknown files and files in mctx"
76 for f in wctx.unknown():
76 for f in wctx.unknown():
77 if f in mctx and mctx[f].cmp(wctx[f].data()):
77 if f in mctx and mctx[f].cmp(wctx[f].data()):
78 raise util.Abort(_("untracked file in working directory differs"
78 raise util.Abort(_("untracked file in working directory differs"
79 " from file in requested revision: '%s'") % f)
79 " from file in requested revision: '%s'") % f)
80
80
81 def _checkcollision(mctx):
81 def _checkcollision(mctx):
82 "check for case folding collisions in the destination context"
82 "check for case folding collisions in the destination context"
83 folded = {}
83 folded = {}
84 for fn in mctx:
84 for fn in mctx:
85 fold = fn.lower()
85 fold = fn.lower()
86 if fold in folded:
86 if fold in folded:
87 raise util.Abort(_("case-folding collision between %s and %s")
87 raise util.Abort(_("case-folding collision between %s and %s")
88 % (fn, folded[fold]))
88 % (fn, folded[fold]))
89 folded[fold] = fn
89 folded[fold] = fn
90
90
91 def _forgetremoved(wctx, mctx, branchmerge):
91 def _forgetremoved(wctx, mctx, branchmerge):
92 """
92 """
93 Forget removed files
93 Forget removed files
94
94
95 If we're jumping between revisions (as opposed to merging), and if
95 If we're jumping between revisions (as opposed to merging), and if
96 neither the working directory nor the target rev has the file,
96 neither the working directory nor the target rev has the file,
97 then we need to remove it from the dirstate, to prevent the
97 then we need to remove it from the dirstate, to prevent the
98 dirstate from listing the file when it is no longer in the
98 dirstate from listing the file when it is no longer in the
99 manifest.
99 manifest.
100
100
101 If we're merging, and the other revision has removed a file
101 If we're merging, and the other revision has removed a file
102 that is not present in the working directory, we need to mark it
102 that is not present in the working directory, we need to mark it
103 as removed.
103 as removed.
104 """
104 """
105
105
106 action = []
106 action = []
107 state = branchmerge and 'r' or 'f'
107 state = branchmerge and 'r' or 'f'
108 for f in wctx.deleted():
108 for f in wctx.deleted():
109 if f not in mctx:
109 if f not in mctx:
110 action.append((f, state))
110 action.append((f, state))
111
111
112 if not branchmerge:
112 if not branchmerge:
113 for f in wctx.removed():
113 for f in wctx.removed():
114 if f not in mctx:
114 if f not in mctx:
115 action.append((f, "f"))
115 action.append((f, "f"))
116
116
117 return action
117 return action
118
118
119 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
119 def manifestmerge(repo, p1, p2, pa, overwrite, partial):
120 """
120 """
121 Merge p1 and p2 with ancestor ma and generate merge action list
121 Merge p1 and p2 with ancestor ma and generate merge action list
122
122
123 overwrite = whether we clobber working files
123 overwrite = whether we clobber working files
124 partial = function to filter file lists
124 partial = function to filter file lists
125 """
125 """
126
126
127 repo.ui.note(_("resolving manifests\n"))
127 repo.ui.note(_("resolving manifests\n"))
128 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
128 repo.ui.debug(_(" overwrite %s partial %s\n") % (overwrite, bool(partial)))
129 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
129 repo.ui.debug(_(" ancestor %s local %s remote %s\n") % (pa, p1, p2))
130
130
131 m1 = p1.manifest()
131 m1 = p1.manifest()
132 m2 = p2.manifest()
132 m2 = p2.manifest()
133 ma = pa.manifest()
133 ma = pa.manifest()
134 backwards = (pa == p2)
134 backwards = (pa == p2)
135 action = []
135 action = []
136 copy, copied, diverge = {}, {}, {}
136 copy, copied, diverge = {}, {}, {}
137
137
138 def fmerge(f, f2=None, fa=None):
138 def fmerge(f, f2=None, fa=None):
139 """merge flags"""
139 """merge flags"""
140 if not f2:
140 if not f2:
141 f2 = f
141 f2 = f
142 fa = f
142 fa = f
143 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
143 a, m, n = ma.flags(fa), m1.flags(f), m2.flags(f2)
144 if m == n: # flags agree
144 if m == n: # flags agree
145 return m # unchanged
145 return m # unchanged
146 if m and n: # flags are set but don't agree
146 if m and n: # flags are set but don't agree
147 if not a: # both differ from parent
147 if not a: # both differ from parent
148 r = repo.ui.prompt(
148 r = repo.ui.prompt(
149 _(" conflicting flags for %s\n"
149 _(" conflicting flags for %s\n"
150 "(n)one, e(x)ec or sym(l)ink?") % f, "[nxl]", "n")
150 "(n)one, e(x)ec or sym(l)ink?") % f, "[nxl]", "n")
151 return r != "n" and r or ''
151 return r != "n" and r or ''
152 if m == a:
152 if m == a:
153 return n # changed from m to n
153 return n # changed from m to n
154 return m # changed from n to m
154 return m # changed from n to m
155 if m and m != a: # changed from a to m
155 if m and m != a: # changed from a to m
156 return m
156 return m
157 if n and n != a: # changed from a to n
157 if n and n != a: # changed from a to n
158 return n
158 return n
159 return '' # flag was cleared
159 return '' # flag was cleared
160
160
161 def act(msg, m, f, *args):
161 def act(msg, m, f, *args):
162 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
162 repo.ui.debug(" %s: %s -> %s\n" % (f, msg, m))
163 action.append((f, m) + args)
163 action.append((f, m) + args)
164
164
165 if pa and not (backwards or overwrite):
165 if pa and not (backwards or overwrite):
166 if repo.ui.configbool("merge", "followcopies", True):
166 if repo.ui.configbool("merge", "followcopies", True):
167 dirs = repo.ui.configbool("merge", "followdirs", True)
167 dirs = repo.ui.configbool("merge", "followdirs", True)
168 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
168 copy, diverge = copies.copies(repo, p1, p2, pa, dirs)
169 copied = dict.fromkeys(copy.values())
169 copied = set(copy.values())
170 for of, fl in diverge.iteritems():
170 for of, fl in diverge.iteritems():
171 act("divergent renames", "dr", of, fl)
171 act("divergent renames", "dr", of, fl)
172
172
173 # Compare manifests
173 # Compare manifests
174 for f, n in m1.iteritems():
174 for f, n in m1.iteritems():
175 if partial and not partial(f):
175 if partial and not partial(f):
176 continue
176 continue
177 if f in m2:
177 if f in m2:
178 if overwrite or backwards:
178 if overwrite or backwards:
179 rflags = m2.flags(f)
179 rflags = m2.flags(f)
180 else:
180 else:
181 rflags = fmerge(f)
181 rflags = fmerge(f)
182 # are files different?
182 # are files different?
183 if n != m2[f]:
183 if n != m2[f]:
184 a = ma.get(f, nullid)
184 a = ma.get(f, nullid)
185 # are we clobbering?
185 # are we clobbering?
186 if overwrite:
186 if overwrite:
187 act("clobbering", "g", f, rflags)
187 act("clobbering", "g", f, rflags)
188 # or are we going back in time and clean?
188 # or are we going back in time and clean?
189 elif backwards:
189 elif backwards:
190 if not n[20:] or not p2[f].cmp(p1[f].data()):
190 if not n[20:] or not p2[f].cmp(p1[f].data()):
191 act("reverting", "g", f, rflags)
191 act("reverting", "g", f, rflags)
192 # are both different from the ancestor?
192 # are both different from the ancestor?
193 elif n != a and m2[f] != a:
193 elif n != a and m2[f] != a:
194 act("versions differ", "m", f, f, f, rflags, False)
194 act("versions differ", "m", f, f, f, rflags, False)
195 # is remote's version newer?
195 # is remote's version newer?
196 elif m2[f] != a:
196 elif m2[f] != a:
197 act("remote is newer", "g", f, rflags)
197 act("remote is newer", "g", f, rflags)
198 # local is newer, not overwrite, check mode bits
198 # local is newer, not overwrite, check mode bits
199 elif m1.flags(f) != rflags:
199 elif m1.flags(f) != rflags:
200 act("update permissions", "e", f, rflags)
200 act("update permissions", "e", f, rflags)
201 # contents same, check mode bits
201 # contents same, check mode bits
202 elif m1.flags(f) != rflags:
202 elif m1.flags(f) != rflags:
203 act("update permissions", "e", f, rflags)
203 act("update permissions", "e", f, rflags)
204 elif f in copied:
204 elif f in copied:
205 continue
205 continue
206 elif f in copy:
206 elif f in copy:
207 f2 = copy[f]
207 f2 = copy[f]
208 if f2 not in m2: # directory rename
208 if f2 not in m2: # directory rename
209 act("remote renamed directory to " + f2, "d",
209 act("remote renamed directory to " + f2, "d",
210 f, None, f2, m1.flags(f))
210 f, None, f2, m1.flags(f))
211 elif f2 in m1: # case 2 A,B/B/B
211 elif f2 in m1: # case 2 A,B/B/B
212 act("local copied to " + f2, "m",
212 act("local copied to " + f2, "m",
213 f, f2, f, fmerge(f, f2, f2), False)
213 f, f2, f, fmerge(f, f2, f2), False)
214 else: # case 4,21 A/B/B
214 else: # case 4,21 A/B/B
215 act("local moved to " + f2, "m",
215 act("local moved to " + f2, "m",
216 f, f2, f, fmerge(f, f2, f2), False)
216 f, f2, f, fmerge(f, f2, f2), False)
217 elif f in ma:
217 elif f in ma:
218 if n != ma[f] and not overwrite:
218 if n != ma[f] and not overwrite:
219 if repo.ui.prompt(
219 if repo.ui.prompt(
220 _(" local changed %s which remote deleted\n"
220 _(" local changed %s which remote deleted\n"
221 "use (c)hanged version or (d)elete?") % f,
221 "use (c)hanged version or (d)elete?") % f,
222 _("[cd]"), _("c")) == _("d"):
222 _("[cd]"), _("c")) == _("d"):
223 act("prompt delete", "r", f)
223 act("prompt delete", "r", f)
224 act("prompt keep", "a", f)
224 act("prompt keep", "a", f)
225 else:
225 else:
226 act("other deleted", "r", f)
226 act("other deleted", "r", f)
227 else:
227 else:
228 # file is created on branch or in working directory
228 # file is created on branch or in working directory
229 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
229 if (overwrite and n[20:] != "u") or (backwards and not n[20:]):
230 act("remote deleted", "r", f)
230 act("remote deleted", "r", f)
231
231
232 for f, n in m2.iteritems():
232 for f, n in m2.iteritems():
233 if partial and not partial(f):
233 if partial and not partial(f):
234 continue
234 continue
235 if f in m1:
235 if f in m1:
236 continue
236 continue
237 if f in copied:
237 if f in copied:
238 continue
238 continue
239 if f in copy:
239 if f in copy:
240 f2 = copy[f]
240 f2 = copy[f]
241 if f2 not in m1: # directory rename
241 if f2 not in m1: # directory rename
242 act("local renamed directory to " + f2, "d",
242 act("local renamed directory to " + f2, "d",
243 None, f, f2, m2.flags(f))
243 None, f, f2, m2.flags(f))
244 elif f2 in m2: # rename case 1, A/A,B/A
244 elif f2 in m2: # rename case 1, A/A,B/A
245 act("remote copied to " + f, "m",
245 act("remote copied to " + f, "m",
246 f2, f, f, fmerge(f2, f, f2), False)
246 f2, f, f, fmerge(f2, f, f2), False)
247 else: # case 3,20 A/B/A
247 else: # case 3,20 A/B/A
248 act("remote moved to " + f, "m",
248 act("remote moved to " + f, "m",
249 f2, f, f, fmerge(f2, f, f2), True)
249 f2, f, f, fmerge(f2, f, f2), True)
250 elif f in ma:
250 elif f in ma:
251 if overwrite or backwards:
251 if overwrite or backwards:
252 act("recreating", "g", f, m2.flags(f))
252 act("recreating", "g", f, m2.flags(f))
253 elif n != ma[f]:
253 elif n != ma[f]:
254 if repo.ui.prompt(
254 if repo.ui.prompt(
255 _("remote changed %s which local deleted\n"
255 _("remote changed %s which local deleted\n"
256 "use (c)hanged version or leave (d)eleted?") % f,
256 "use (c)hanged version or leave (d)eleted?") % f,
257 _("[cd]"), _("c")) == _("c"):
257 _("[cd]"), _("c")) == _("c"):
258 act("prompt recreating", "g", f, m2.flags(f))
258 act("prompt recreating", "g", f, m2.flags(f))
259 else:
259 else:
260 act("remote created", "g", f, m2.flags(f))
260 act("remote created", "g", f, m2.flags(f))
261
261
262 return action
262 return action
263
263
264 def actioncmp(a1, a2):
264 def actioncmp(a1, a2):
265 m1 = a1[1]
265 m1 = a1[1]
266 m2 = a2[1]
266 m2 = a2[1]
267 if m1 == m2:
267 if m1 == m2:
268 return cmp(a1, a2)
268 return cmp(a1, a2)
269 if m1 == 'r':
269 if m1 == 'r':
270 return -1
270 return -1
271 if m2 == 'r':
271 if m2 == 'r':
272 return 1
272 return 1
273 return cmp(a1, a2)
273 return cmp(a1, a2)
274
274
275 def applyupdates(repo, action, wctx, mctx):
275 def applyupdates(repo, action, wctx, mctx):
276 "apply the merge action list to the working directory"
276 "apply the merge action list to the working directory"
277
277
278 updated, merged, removed, unresolved = 0, 0, 0, 0
278 updated, merged, removed, unresolved = 0, 0, 0, 0
279 ms = mergestate(repo)
279 ms = mergestate(repo)
280 ms.reset(wctx.parents()[0].node())
280 ms.reset(wctx.parents()[0].node())
281 moves = []
281 moves = []
282 action.sort(actioncmp)
282 action.sort(actioncmp)
283
283
284 # prescan for merges
284 # prescan for merges
285 for a in action:
285 for a in action:
286 f, m = a[:2]
286 f, m = a[:2]
287 if m == 'm': # merge
287 if m == 'm': # merge
288 f2, fd, flags, move = a[2:]
288 f2, fd, flags, move = a[2:]
289 repo.ui.debug(_("preserving %s for resolve of %s\n") % (f, fd))
289 repo.ui.debug(_("preserving %s for resolve of %s\n") % (f, fd))
290 fcl = wctx[f]
290 fcl = wctx[f]
291 fco = mctx[f2]
291 fco = mctx[f2]
292 fca = fcl.ancestor(fco) or repo.filectx(f, fileid=nullrev)
292 fca = fcl.ancestor(fco) or repo.filectx(f, fileid=nullrev)
293 ms.add(fcl, fco, fca, fd, flags)
293 ms.add(fcl, fco, fca, fd, flags)
294 if f != fd and move:
294 if f != fd and move:
295 moves.append(f)
295 moves.append(f)
296
296
297 # remove renamed files after safely stored
297 # remove renamed files after safely stored
298 for f in moves:
298 for f in moves:
299 if util.lexists(repo.wjoin(f)):
299 if util.lexists(repo.wjoin(f)):
300 repo.ui.debug(_("removing %s\n") % f)
300 repo.ui.debug(_("removing %s\n") % f)
301 os.unlink(repo.wjoin(f))
301 os.unlink(repo.wjoin(f))
302
302
303 audit_path = util.path_auditor(repo.root)
303 audit_path = util.path_auditor(repo.root)
304
304
305 for a in action:
305 for a in action:
306 f, m = a[:2]
306 f, m = a[:2]
307 if f and f[0] == "/":
307 if f and f[0] == "/":
308 continue
308 continue
309 if m == "r": # remove
309 if m == "r": # remove
310 repo.ui.note(_("removing %s\n") % f)
310 repo.ui.note(_("removing %s\n") % f)
311 audit_path(f)
311 audit_path(f)
312 try:
312 try:
313 util.unlink(repo.wjoin(f))
313 util.unlink(repo.wjoin(f))
314 except OSError, inst:
314 except OSError, inst:
315 if inst.errno != errno.ENOENT:
315 if inst.errno != errno.ENOENT:
316 repo.ui.warn(_("update failed to remove %s: %s!\n") %
316 repo.ui.warn(_("update failed to remove %s: %s!\n") %
317 (f, inst.strerror))
317 (f, inst.strerror))
318 removed += 1
318 removed += 1
319 elif m == "m": # merge
319 elif m == "m": # merge
320 f2, fd, flags, move = a[2:]
320 f2, fd, flags, move = a[2:]
321 r = ms.resolve(fd, wctx, mctx)
321 r = ms.resolve(fd, wctx, mctx)
322 if r > 0:
322 if r > 0:
323 unresolved += 1
323 unresolved += 1
324 else:
324 else:
325 if r is None:
325 if r is None:
326 updated += 1
326 updated += 1
327 else:
327 else:
328 merged += 1
328 merged += 1
329 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
329 util.set_flags(repo.wjoin(fd), 'l' in flags, 'x' in flags)
330 if f != fd and move and util.lexists(repo.wjoin(f)):
330 if f != fd and move and util.lexists(repo.wjoin(f)):
331 repo.ui.debug(_("removing %s\n") % f)
331 repo.ui.debug(_("removing %s\n") % f)
332 os.unlink(repo.wjoin(f))
332 os.unlink(repo.wjoin(f))
333 elif m == "g": # get
333 elif m == "g": # get
334 flags = a[2]
334 flags = a[2]
335 repo.ui.note(_("getting %s\n") % f)
335 repo.ui.note(_("getting %s\n") % f)
336 t = mctx.filectx(f).data()
336 t = mctx.filectx(f).data()
337 repo.wwrite(f, t, flags)
337 repo.wwrite(f, t, flags)
338 updated += 1
338 updated += 1
339 elif m == "d": # directory rename
339 elif m == "d": # directory rename
340 f2, fd, flags = a[2:]
340 f2, fd, flags = a[2:]
341 if f:
341 if f:
342 repo.ui.note(_("moving %s to %s\n") % (f, fd))
342 repo.ui.note(_("moving %s to %s\n") % (f, fd))
343 t = wctx.filectx(f).data()
343 t = wctx.filectx(f).data()
344 repo.wwrite(fd, t, flags)
344 repo.wwrite(fd, t, flags)
345 util.unlink(repo.wjoin(f))
345 util.unlink(repo.wjoin(f))
346 if f2:
346 if f2:
347 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
347 repo.ui.note(_("getting %s to %s\n") % (f2, fd))
348 t = mctx.filectx(f2).data()
348 t = mctx.filectx(f2).data()
349 repo.wwrite(fd, t, flags)
349 repo.wwrite(fd, t, flags)
350 updated += 1
350 updated += 1
351 elif m == "dr": # divergent renames
351 elif m == "dr": # divergent renames
352 fl = a[2]
352 fl = a[2]
353 repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
353 repo.ui.warn(_("warning: detected divergent renames of %s to:\n") % f)
354 for nf in fl:
354 for nf in fl:
355 repo.ui.warn(" %s\n" % nf)
355 repo.ui.warn(" %s\n" % nf)
356 elif m == "e": # exec
356 elif m == "e": # exec
357 flags = a[2]
357 flags = a[2]
358 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
358 util.set_flags(repo.wjoin(f), 'l' in flags, 'x' in flags)
359
359
360 return updated, merged, removed, unresolved
360 return updated, merged, removed, unresolved
361
361
362 def recordupdates(repo, action, branchmerge):
362 def recordupdates(repo, action, branchmerge):
363 "record merge actions to the dirstate"
363 "record merge actions to the dirstate"
364
364
365 for a in action:
365 for a in action:
366 f, m = a[:2]
366 f, m = a[:2]
367 if m == "r": # remove
367 if m == "r": # remove
368 if branchmerge:
368 if branchmerge:
369 repo.dirstate.remove(f)
369 repo.dirstate.remove(f)
370 else:
370 else:
371 repo.dirstate.forget(f)
371 repo.dirstate.forget(f)
372 elif m == "a": # re-add
372 elif m == "a": # re-add
373 if not branchmerge:
373 if not branchmerge:
374 repo.dirstate.add(f)
374 repo.dirstate.add(f)
375 elif m == "f": # forget
375 elif m == "f": # forget
376 repo.dirstate.forget(f)
376 repo.dirstate.forget(f)
377 elif m == "e": # exec change
377 elif m == "e": # exec change
378 repo.dirstate.normallookup(f)
378 repo.dirstate.normallookup(f)
379 elif m == "g": # get
379 elif m == "g": # get
380 if branchmerge:
380 if branchmerge:
381 repo.dirstate.normaldirty(f)
381 repo.dirstate.normaldirty(f)
382 else:
382 else:
383 repo.dirstate.normal(f)
383 repo.dirstate.normal(f)
384 elif m == "m": # merge
384 elif m == "m": # merge
385 f2, fd, flag, move = a[2:]
385 f2, fd, flag, move = a[2:]
386 if branchmerge:
386 if branchmerge:
387 # We've done a branch merge, mark this file as merged
387 # We've done a branch merge, mark this file as merged
388 # so that we properly record the merger later
388 # so that we properly record the merger later
389 repo.dirstate.merge(fd)
389 repo.dirstate.merge(fd)
390 if f != f2: # copy/rename
390 if f != f2: # copy/rename
391 if move:
391 if move:
392 repo.dirstate.remove(f)
392 repo.dirstate.remove(f)
393 if f != fd:
393 if f != fd:
394 repo.dirstate.copy(f, fd)
394 repo.dirstate.copy(f, fd)
395 else:
395 else:
396 repo.dirstate.copy(f2, fd)
396 repo.dirstate.copy(f2, fd)
397 else:
397 else:
398 # We've update-merged a locally modified file, so
398 # We've update-merged a locally modified file, so
399 # we set the dirstate to emulate a normal checkout
399 # we set the dirstate to emulate a normal checkout
400 # of that file some time in the past. Thus our
400 # of that file some time in the past. Thus our
401 # merge will appear as a normal local file
401 # merge will appear as a normal local file
402 # modification.
402 # modification.
403 repo.dirstate.normallookup(fd)
403 repo.dirstate.normallookup(fd)
404 if move:
404 if move:
405 repo.dirstate.forget(f)
405 repo.dirstate.forget(f)
406 elif m == "d": # directory rename
406 elif m == "d": # directory rename
407 f2, fd, flag = a[2:]
407 f2, fd, flag = a[2:]
408 if not f2 and f not in repo.dirstate:
408 if not f2 and f not in repo.dirstate:
409 # untracked file moved
409 # untracked file moved
410 continue
410 continue
411 if branchmerge:
411 if branchmerge:
412 repo.dirstate.add(fd)
412 repo.dirstate.add(fd)
413 if f:
413 if f:
414 repo.dirstate.remove(f)
414 repo.dirstate.remove(f)
415 repo.dirstate.copy(f, fd)
415 repo.dirstate.copy(f, fd)
416 if f2:
416 if f2:
417 repo.dirstate.copy(f2, fd)
417 repo.dirstate.copy(f2, fd)
418 else:
418 else:
419 repo.dirstate.normal(fd)
419 repo.dirstate.normal(fd)
420 if f:
420 if f:
421 repo.dirstate.forget(f)
421 repo.dirstate.forget(f)
422
422
423 def update(repo, node, branchmerge, force, partial):
423 def update(repo, node, branchmerge, force, partial):
424 """
424 """
425 Perform a merge between the working directory and the given node
425 Perform a merge between the working directory and the given node
426
426
427 branchmerge = whether to merge between branches
427 branchmerge = whether to merge between branches
428 force = whether to force branch merging or file overwriting
428 force = whether to force branch merging or file overwriting
429 partial = a function to filter file lists (dirstate not updated)
429 partial = a function to filter file lists (dirstate not updated)
430 """
430 """
431
431
432 wlock = repo.wlock()
432 wlock = repo.wlock()
433 try:
433 try:
434 wc = repo[None]
434 wc = repo[None]
435 if node is None:
435 if node is None:
436 # tip of current branch
436 # tip of current branch
437 try:
437 try:
438 node = repo.branchtags()[wc.branch()]
438 node = repo.branchtags()[wc.branch()]
439 except KeyError:
439 except KeyError:
440 if wc.branch() == "default": # no default branch!
440 if wc.branch() == "default": # no default branch!
441 node = repo.lookup("tip") # update to tip
441 node = repo.lookup("tip") # update to tip
442 else:
442 else:
443 raise util.Abort(_("branch %s not found") % wc.branch())
443 raise util.Abort(_("branch %s not found") % wc.branch())
444 overwrite = force and not branchmerge
444 overwrite = force and not branchmerge
445 pl = wc.parents()
445 pl = wc.parents()
446 p1, p2 = pl[0], repo[node]
446 p1, p2 = pl[0], repo[node]
447 pa = p1.ancestor(p2)
447 pa = p1.ancestor(p2)
448 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
448 fp1, fp2, xp1, xp2 = p1.node(), p2.node(), str(p1), str(p2)
449 fastforward = False
449 fastforward = False
450
450
451 ### check phase
451 ### check phase
452 if not overwrite and len(pl) > 1:
452 if not overwrite and len(pl) > 1:
453 raise util.Abort(_("outstanding uncommitted merges"))
453 raise util.Abort(_("outstanding uncommitted merges"))
454 if branchmerge:
454 if branchmerge:
455 if pa == p2:
455 if pa == p2:
456 raise util.Abort(_("can't merge with ancestor"))
456 raise util.Abort(_("can't merge with ancestor"))
457 elif pa == p1:
457 elif pa == p1:
458 if p1.branch() != p2.branch():
458 if p1.branch() != p2.branch():
459 fastforward = True
459 fastforward = True
460 else:
460 else:
461 raise util.Abort(_("nothing to merge (use 'hg update'"
461 raise util.Abort(_("nothing to merge (use 'hg update'"
462 " or check 'hg heads')"))
462 " or check 'hg heads')"))
463 if not force and (wc.files() or wc.deleted()):
463 if not force and (wc.files() or wc.deleted()):
464 raise util.Abort(_("outstanding uncommitted changes"))
464 raise util.Abort(_("outstanding uncommitted changes"))
465 elif not overwrite:
465 elif not overwrite:
466 if pa == p1 or pa == p2: # linear
466 if pa == p1 or pa == p2: # linear
467 pass # all good
467 pass # all good
468 elif p1.branch() == p2.branch():
468 elif p1.branch() == p2.branch():
469 if wc.files() or wc.deleted():
469 if wc.files() or wc.deleted():
470 raise util.Abort(_("crosses branches (use 'hg merge' or "
470 raise util.Abort(_("crosses branches (use 'hg merge' or "
471 "'hg update -C' to discard changes)"))
471 "'hg update -C' to discard changes)"))
472 raise util.Abort(_("crosses branches (use 'hg merge' "
472 raise util.Abort(_("crosses branches (use 'hg merge' "
473 "or 'hg update -C')"))
473 "or 'hg update -C')"))
474 elif wc.files() or wc.deleted():
474 elif wc.files() or wc.deleted():
475 raise util.Abort(_("crosses named branches (use "
475 raise util.Abort(_("crosses named branches (use "
476 "'hg update -C' to discard changes)"))
476 "'hg update -C' to discard changes)"))
477 else:
477 else:
478 # Allow jumping branches if there are no changes
478 # Allow jumping branches if there are no changes
479 overwrite = True
479 overwrite = True
480
480
481 ### calculate phase
481 ### calculate phase
482 action = []
482 action = []
483 if not force:
483 if not force:
484 _checkunknown(wc, p2)
484 _checkunknown(wc, p2)
485 if not util.checkcase(repo.path):
485 if not util.checkcase(repo.path):
486 _checkcollision(p2)
486 _checkcollision(p2)
487 action += _forgetremoved(wc, p2, branchmerge)
487 action += _forgetremoved(wc, p2, branchmerge)
488 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
488 action += manifestmerge(repo, wc, p2, pa, overwrite, partial)
489
489
490 ### apply phase
490 ### apply phase
491 if not branchmerge: # just jump to the new rev
491 if not branchmerge: # just jump to the new rev
492 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
492 fp1, fp2, xp1, xp2 = fp2, nullid, xp2, ''
493 if not partial:
493 if not partial:
494 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
494 repo.hook('preupdate', throw=True, parent1=xp1, parent2=xp2)
495
495
496 stats = applyupdates(repo, action, wc, p2)
496 stats = applyupdates(repo, action, wc, p2)
497
497
498 if not partial:
498 if not partial:
499 recordupdates(repo, action, branchmerge)
499 recordupdates(repo, action, branchmerge)
500 repo.dirstate.setparents(fp1, fp2)
500 repo.dirstate.setparents(fp1, fp2)
501 if not branchmerge and not fastforward:
501 if not branchmerge and not fastforward:
502 repo.dirstate.setbranch(p2.branch())
502 repo.dirstate.setbranch(p2.branch())
503 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
503 repo.hook('update', parent1=xp1, parent2=xp2, error=stats[3])
504
504
505 return stats
505 return stats
506 finally:
506 finally:
507 wlock.release()
507 wlock.release()
@@ -1,1371 +1,1369 b''
1 """
1 """
2 revlog.py - storage back-end for mercurial
2 revlog.py - storage back-end for mercurial
3
3
4 This provides efficient delta storage with O(1) retrieve and append
4 This provides efficient delta storage with O(1) retrieve and append
5 and O(changes) merge between branches
5 and O(changes) merge between branches
6
6
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
7 Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
8
8
9 This software may be used and distributed according to the terms
9 This software may be used and distributed according to the terms
10 of the GNU General Public License, incorporated herein by reference.
10 of the GNU General Public License, incorporated herein by reference.
11 """
11 """
12
12
13 # import stuff from node for others to import from revlog
13 # import stuff from node for others to import from revlog
14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
14 from node import bin, hex, nullid, nullrev, short #@UnusedImport
15 from i18n import _
15 from i18n import _
16 import changegroup, errno, ancestor, mdiff, parsers
16 import changegroup, errno, ancestor, mdiff, parsers
17 import struct, util, zlib, error
17 import struct, util, zlib, error
18
18
19 _pack = struct.pack
19 _pack = struct.pack
20 _unpack = struct.unpack
20 _unpack = struct.unpack
21 _compress = zlib.compress
21 _compress = zlib.compress
22 _decompress = zlib.decompress
22 _decompress = zlib.decompress
23 _sha = util.sha1
23 _sha = util.sha1
24
24
25 # revlog flags
25 # revlog flags
26 REVLOGV0 = 0
26 REVLOGV0 = 0
27 REVLOGNG = 1
27 REVLOGNG = 1
28 REVLOGNGINLINEDATA = (1 << 16)
28 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
29 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
30 REVLOG_DEFAULT_FORMAT = REVLOGNG
30 REVLOG_DEFAULT_FORMAT = REVLOGNG
31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
31 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
32
32
33 RevlogError = error.RevlogError
33 RevlogError = error.RevlogError
34 LookupError = error.LookupError
34 LookupError = error.LookupError
35
35
36 def getoffset(q):
36 def getoffset(q):
37 return int(q >> 16)
37 return int(q >> 16)
38
38
39 def gettype(q):
39 def gettype(q):
40 return int(q & 0xFFFF)
40 return int(q & 0xFFFF)
41
41
42 def offset_type(offset, type):
42 def offset_type(offset, type):
43 return long(long(offset) << 16 | type)
43 return long(long(offset) << 16 | type)
44
44
45 nullhash = _sha(nullid)
45 nullhash = _sha(nullid)
46
46
47 def hash(text, p1, p2):
47 def hash(text, p1, p2):
48 """generate a hash from the given text and its parent hashes
48 """generate a hash from the given text and its parent hashes
49
49
50 This hash combines both the current file contents and its history
50 This hash combines both the current file contents and its history
51 in a manner that makes it easy to distinguish nodes with the same
51 in a manner that makes it easy to distinguish nodes with the same
52 content in the revision graph.
52 content in the revision graph.
53 """
53 """
54 # As of now, if one of the parent node is null, p2 is null
54 # As of now, if one of the parent node is null, p2 is null
55 if p2 == nullid:
55 if p2 == nullid:
56 # deep copy of a hash is faster than creating one
56 # deep copy of a hash is faster than creating one
57 s = nullhash.copy()
57 s = nullhash.copy()
58 s.update(p1)
58 s.update(p1)
59 else:
59 else:
60 # none of the parent nodes are nullid
60 # none of the parent nodes are nullid
61 l = [p1, p2]
61 l = [p1, p2]
62 l.sort()
62 l.sort()
63 s = _sha(l[0])
63 s = _sha(l[0])
64 s.update(l[1])
64 s.update(l[1])
65 s.update(text)
65 s.update(text)
66 return s.digest()
66 return s.digest()
67
67
68 def compress(text):
68 def compress(text):
69 """ generate a possibly-compressed representation of text """
69 """ generate a possibly-compressed representation of text """
70 if not text:
70 if not text:
71 return ("", text)
71 return ("", text)
72 l = len(text)
72 l = len(text)
73 bin = None
73 bin = None
74 if l < 44:
74 if l < 44:
75 pass
75 pass
76 elif l > 1000000:
76 elif l > 1000000:
77 # zlib makes an internal copy, thus doubling memory usage for
77 # zlib makes an internal copy, thus doubling memory usage for
78 # large files, so lets do this in pieces
78 # large files, so lets do this in pieces
79 z = zlib.compressobj()
79 z = zlib.compressobj()
80 p = []
80 p = []
81 pos = 0
81 pos = 0
82 while pos < l:
82 while pos < l:
83 pos2 = pos + 2**20
83 pos2 = pos + 2**20
84 p.append(z.compress(text[pos:pos2]))
84 p.append(z.compress(text[pos:pos2]))
85 pos = pos2
85 pos = pos2
86 p.append(z.flush())
86 p.append(z.flush())
87 if sum(map(len, p)) < l:
87 if sum(map(len, p)) < l:
88 bin = "".join(p)
88 bin = "".join(p)
89 else:
89 else:
90 bin = _compress(text)
90 bin = _compress(text)
91 if bin is None or len(bin) > l:
91 if bin is None or len(bin) > l:
92 if text[0] == '\0':
92 if text[0] == '\0':
93 return ("", text)
93 return ("", text)
94 return ('u', text)
94 return ('u', text)
95 return ("", bin)
95 return ("", bin)
96
96
97 def decompress(bin):
97 def decompress(bin):
98 """ decompress the given input """
98 """ decompress the given input """
99 if not bin:
99 if not bin:
100 return bin
100 return bin
101 t = bin[0]
101 t = bin[0]
102 if t == '\0':
102 if t == '\0':
103 return bin
103 return bin
104 if t == 'x':
104 if t == 'x':
105 return _decompress(bin)
105 return _decompress(bin)
106 if t == 'u':
106 if t == 'u':
107 return bin[1:]
107 return bin[1:]
108 raise RevlogError(_("unknown compression type %r") % t)
108 raise RevlogError(_("unknown compression type %r") % t)
109
109
110 class lazyparser(object):
110 class lazyparser(object):
111 """
111 """
112 this class avoids the need to parse the entirety of large indices
112 this class avoids the need to parse the entirety of large indices
113 """
113 """
114
114
115 # lazyparser is not safe to use on windows if win32 extensions not
115 # lazyparser is not safe to use on windows if win32 extensions not
116 # available. it keeps file handle open, which make it not possible
116 # available. it keeps file handle open, which make it not possible
117 # to break hardlinks on local cloned repos.
117 # to break hardlinks on local cloned repos.
118
118
119 def __init__(self, dataf, size):
119 def __init__(self, dataf, size):
120 self.dataf = dataf
120 self.dataf = dataf
121 self.s = struct.calcsize(indexformatng)
121 self.s = struct.calcsize(indexformatng)
122 self.datasize = size
122 self.datasize = size
123 self.l = size/self.s
123 self.l = size/self.s
124 self.index = [None] * self.l
124 self.index = [None] * self.l
125 self.map = {nullid: nullrev}
125 self.map = {nullid: nullrev}
126 self.allmap = 0
126 self.allmap = 0
127 self.all = 0
127 self.all = 0
128 self.mapfind_count = 0
128 self.mapfind_count = 0
129
129
130 def loadmap(self):
130 def loadmap(self):
131 """
131 """
132 during a commit, we need to make sure the rev being added is
132 during a commit, we need to make sure the rev being added is
133 not a duplicate. This requires loading the entire index,
133 not a duplicate. This requires loading the entire index,
134 which is fairly slow. loadmap can load up just the node map,
134 which is fairly slow. loadmap can load up just the node map,
135 which takes much less time.
135 which takes much less time.
136 """
136 """
137 if self.allmap:
137 if self.allmap:
138 return
138 return
139 end = self.datasize
139 end = self.datasize
140 self.allmap = 1
140 self.allmap = 1
141 cur = 0
141 cur = 0
142 count = 0
142 count = 0
143 blocksize = self.s * 256
143 blocksize = self.s * 256
144 self.dataf.seek(0)
144 self.dataf.seek(0)
145 while cur < end:
145 while cur < end:
146 data = self.dataf.read(blocksize)
146 data = self.dataf.read(blocksize)
147 off = 0
147 off = 0
148 for x in xrange(256):
148 for x in xrange(256):
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
149 n = data[off + ngshaoffset:off + ngshaoffset + 20]
150 self.map[n] = count
150 self.map[n] = count
151 count += 1
151 count += 1
152 if count >= self.l:
152 if count >= self.l:
153 break
153 break
154 off += self.s
154 off += self.s
155 cur += blocksize
155 cur += blocksize
156
156
157 def loadblock(self, blockstart, blocksize, data=None):
157 def loadblock(self, blockstart, blocksize, data=None):
158 if self.all:
158 if self.all:
159 return
159 return
160 if data is None:
160 if data is None:
161 self.dataf.seek(blockstart)
161 self.dataf.seek(blockstart)
162 if blockstart + blocksize > self.datasize:
162 if blockstart + blocksize > self.datasize:
163 # the revlog may have grown since we've started running,
163 # the revlog may have grown since we've started running,
164 # but we don't have space in self.index for more entries.
164 # but we don't have space in self.index for more entries.
165 # limit blocksize so that we don't get too much data.
165 # limit blocksize so that we don't get too much data.
166 blocksize = max(self.datasize - blockstart, 0)
166 blocksize = max(self.datasize - blockstart, 0)
167 data = self.dataf.read(blocksize)
167 data = self.dataf.read(blocksize)
168 lend = len(data) / self.s
168 lend = len(data) / self.s
169 i = blockstart / self.s
169 i = blockstart / self.s
170 off = 0
170 off = 0
171 # lazyindex supports __delitem__
171 # lazyindex supports __delitem__
172 if lend > len(self.index) - i:
172 if lend > len(self.index) - i:
173 lend = len(self.index) - i
173 lend = len(self.index) - i
174 for x in xrange(lend):
174 for x in xrange(lend):
175 if self.index[i + x] == None:
175 if self.index[i + x] == None:
176 b = data[off : off + self.s]
176 b = data[off : off + self.s]
177 self.index[i + x] = b
177 self.index[i + x] = b
178 n = b[ngshaoffset:ngshaoffset + 20]
178 n = b[ngshaoffset:ngshaoffset + 20]
179 self.map[n] = i + x
179 self.map[n] = i + x
180 off += self.s
180 off += self.s
181
181
182 def findnode(self, node):
182 def findnode(self, node):
183 """search backwards through the index file for a specific node"""
183 """search backwards through the index file for a specific node"""
184 if self.allmap:
184 if self.allmap:
185 return None
185 return None
186
186
187 # hg log will cause many many searches for the manifest
187 # hg log will cause many many searches for the manifest
188 # nodes. After we get called a few times, just load the whole
188 # nodes. After we get called a few times, just load the whole
189 # thing.
189 # thing.
190 if self.mapfind_count > 8:
190 if self.mapfind_count > 8:
191 self.loadmap()
191 self.loadmap()
192 if node in self.map:
192 if node in self.map:
193 return node
193 return node
194 return None
194 return None
195 self.mapfind_count += 1
195 self.mapfind_count += 1
196 last = self.l - 1
196 last = self.l - 1
197 while self.index[last] != None:
197 while self.index[last] != None:
198 if last == 0:
198 if last == 0:
199 self.all = 1
199 self.all = 1
200 self.allmap = 1
200 self.allmap = 1
201 return None
201 return None
202 last -= 1
202 last -= 1
203 end = (last + 1) * self.s
203 end = (last + 1) * self.s
204 blocksize = self.s * 256
204 blocksize = self.s * 256
205 while end >= 0:
205 while end >= 0:
206 start = max(end - blocksize, 0)
206 start = max(end - blocksize, 0)
207 self.dataf.seek(start)
207 self.dataf.seek(start)
208 data = self.dataf.read(end - start)
208 data = self.dataf.read(end - start)
209 findend = end - start
209 findend = end - start
210 while True:
210 while True:
211 # we're searching backwards, so we have to make sure
211 # we're searching backwards, so we have to make sure
212 # we don't find a changeset where this node is a parent
212 # we don't find a changeset where this node is a parent
213 off = data.find(node, 0, findend)
213 off = data.find(node, 0, findend)
214 findend = off
214 findend = off
215 if off >= 0:
215 if off >= 0:
216 i = off / self.s
216 i = off / self.s
217 off = i * self.s
217 off = i * self.s
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
218 n = data[off + ngshaoffset:off + ngshaoffset + 20]
219 if n == node:
219 if n == node:
220 self.map[n] = i + start / self.s
220 self.map[n] = i + start / self.s
221 return node
221 return node
222 else:
222 else:
223 break
223 break
224 end -= blocksize
224 end -= blocksize
225 return None
225 return None
226
226
227 def loadindex(self, i=None, end=None):
227 def loadindex(self, i=None, end=None):
228 if self.all:
228 if self.all:
229 return
229 return
230 all = False
230 all = False
231 if i == None:
231 if i == None:
232 blockstart = 0
232 blockstart = 0
233 blocksize = (65536 / self.s) * self.s
233 blocksize = (65536 / self.s) * self.s
234 end = self.datasize
234 end = self.datasize
235 all = True
235 all = True
236 else:
236 else:
237 if end:
237 if end:
238 blockstart = i * self.s
238 blockstart = i * self.s
239 end = end * self.s
239 end = end * self.s
240 blocksize = end - blockstart
240 blocksize = end - blockstart
241 else:
241 else:
242 blockstart = (i & ~1023) * self.s
242 blockstart = (i & ~1023) * self.s
243 blocksize = self.s * 1024
243 blocksize = self.s * 1024
244 end = blockstart + blocksize
244 end = blockstart + blocksize
245 while blockstart < end:
245 while blockstart < end:
246 self.loadblock(blockstart, blocksize)
246 self.loadblock(blockstart, blocksize)
247 blockstart += blocksize
247 blockstart += blocksize
248 if all:
248 if all:
249 self.all = True
249 self.all = True
250
250
251 class lazyindex(object):
251 class lazyindex(object):
252 """a lazy version of the index array"""
252 """a lazy version of the index array"""
253 def __init__(self, parser):
253 def __init__(self, parser):
254 self.p = parser
254 self.p = parser
255 def __len__(self):
255 def __len__(self):
256 return len(self.p.index)
256 return len(self.p.index)
257 def load(self, pos):
257 def load(self, pos):
258 if pos < 0:
258 if pos < 0:
259 pos += len(self.p.index)
259 pos += len(self.p.index)
260 self.p.loadindex(pos)
260 self.p.loadindex(pos)
261 return self.p.index[pos]
261 return self.p.index[pos]
262 def __getitem__(self, pos):
262 def __getitem__(self, pos):
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
263 return _unpack(indexformatng, self.p.index[pos] or self.load(pos))
264 def __setitem__(self, pos, item):
264 def __setitem__(self, pos, item):
265 self.p.index[pos] = _pack(indexformatng, *item)
265 self.p.index[pos] = _pack(indexformatng, *item)
266 def __delitem__(self, pos):
266 def __delitem__(self, pos):
267 del self.p.index[pos]
267 del self.p.index[pos]
268 def insert(self, pos, e):
268 def insert(self, pos, e):
269 self.p.index.insert(pos, _pack(indexformatng, *e))
269 self.p.index.insert(pos, _pack(indexformatng, *e))
270 def append(self, e):
270 def append(self, e):
271 self.p.index.append(_pack(indexformatng, *e))
271 self.p.index.append(_pack(indexformatng, *e))
272
272
273 class lazymap(object):
273 class lazymap(object):
274 """a lazy version of the node map"""
274 """a lazy version of the node map"""
275 def __init__(self, parser):
275 def __init__(self, parser):
276 self.p = parser
276 self.p = parser
277 def load(self, key):
277 def load(self, key):
278 n = self.p.findnode(key)
278 n = self.p.findnode(key)
279 if n == None:
279 if n == None:
280 raise KeyError(key)
280 raise KeyError(key)
281 def __contains__(self, key):
281 def __contains__(self, key):
282 if key in self.p.map:
282 if key in self.p.map:
283 return True
283 return True
284 self.p.loadmap()
284 self.p.loadmap()
285 return key in self.p.map
285 return key in self.p.map
286 def __iter__(self):
286 def __iter__(self):
287 yield nullid
287 yield nullid
288 for i in xrange(self.p.l):
288 for i in xrange(self.p.l):
289 ret = self.p.index[i]
289 ret = self.p.index[i]
290 if not ret:
290 if not ret:
291 self.p.loadindex(i)
291 self.p.loadindex(i)
292 ret = self.p.index[i]
292 ret = self.p.index[i]
293 if isinstance(ret, str):
293 if isinstance(ret, str):
294 ret = _unpack(indexformatng, ret)
294 ret = _unpack(indexformatng, ret)
295 yield ret[7]
295 yield ret[7]
296 def __getitem__(self, key):
296 def __getitem__(self, key):
297 try:
297 try:
298 return self.p.map[key]
298 return self.p.map[key]
299 except KeyError:
299 except KeyError:
300 try:
300 try:
301 self.load(key)
301 self.load(key)
302 return self.p.map[key]
302 return self.p.map[key]
303 except KeyError:
303 except KeyError:
304 raise KeyError("node " + hex(key))
304 raise KeyError("node " + hex(key))
305 def __setitem__(self, key, val):
305 def __setitem__(self, key, val):
306 self.p.map[key] = val
306 self.p.map[key] = val
307 def __delitem__(self, key):
307 def __delitem__(self, key):
308 del self.p.map[key]
308 del self.p.map[key]
309
309
310 indexformatv0 = ">4l20s20s20s"
310 indexformatv0 = ">4l20s20s20s"
311 v0shaoffset = 56
311 v0shaoffset = 56
312
312
313 class revlogoldio(object):
313 class revlogoldio(object):
314 def __init__(self):
314 def __init__(self):
315 self.size = struct.calcsize(indexformatv0)
315 self.size = struct.calcsize(indexformatv0)
316
316
317 def parseindex(self, fp, inline):
317 def parseindex(self, fp, inline):
318 s = self.size
318 s = self.size
319 index = []
319 index = []
320 nodemap = {nullid: nullrev}
320 nodemap = {nullid: nullrev}
321 n = off = 0
321 n = off = 0
322 data = fp.read()
322 data = fp.read()
323 l = len(data)
323 l = len(data)
324 while off + s <= l:
324 while off + s <= l:
325 cur = data[off:off + s]
325 cur = data[off:off + s]
326 off += s
326 off += s
327 e = _unpack(indexformatv0, cur)
327 e = _unpack(indexformatv0, cur)
328 # transform to revlogv1 format
328 # transform to revlogv1 format
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
329 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
330 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
331 index.append(e2)
331 index.append(e2)
332 nodemap[e[6]] = n
332 nodemap[e[6]] = n
333 n += 1
333 n += 1
334
334
335 return index, nodemap, None
335 return index, nodemap, None
336
336
337 def packentry(self, entry, node, version, rev):
337 def packentry(self, entry, node, version, rev):
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
338 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
339 node(entry[5]), node(entry[6]), entry[7])
339 node(entry[5]), node(entry[6]), entry[7])
340 return _pack(indexformatv0, *e2)
340 return _pack(indexformatv0, *e2)
341
341
342 # index ng:
342 # index ng:
343 # 6 bytes offset
343 # 6 bytes offset
344 # 2 bytes flags
344 # 2 bytes flags
345 # 4 bytes compressed length
345 # 4 bytes compressed length
346 # 4 bytes uncompressed length
346 # 4 bytes uncompressed length
347 # 4 bytes: base rev
347 # 4 bytes: base rev
348 # 4 bytes link rev
348 # 4 bytes link rev
349 # 4 bytes parent 1 rev
349 # 4 bytes parent 1 rev
350 # 4 bytes parent 2 rev
350 # 4 bytes parent 2 rev
351 # 32 bytes: nodeid
351 # 32 bytes: nodeid
352 indexformatng = ">Qiiiiii20s12x"
352 indexformatng = ">Qiiiiii20s12x"
353 ngshaoffset = 32
353 ngshaoffset = 32
354 versionformat = ">I"
354 versionformat = ">I"
355
355
356 class revlogio(object):
356 class revlogio(object):
357 def __init__(self):
357 def __init__(self):
358 self.size = struct.calcsize(indexformatng)
358 self.size = struct.calcsize(indexformatng)
359
359
360 def parseindex(self, fp, inline):
360 def parseindex(self, fp, inline):
361 try:
361 try:
362 size = util.fstat(fp).st_size
362 size = util.fstat(fp).st_size
363 except AttributeError:
363 except AttributeError:
364 size = 0
364 size = 0
365
365
366 if util.openhardlinks() and not inline and size > 1000000:
366 if util.openhardlinks() and not inline and size > 1000000:
367 # big index, let's parse it on demand
367 # big index, let's parse it on demand
368 parser = lazyparser(fp, size)
368 parser = lazyparser(fp, size)
369 index = lazyindex(parser)
369 index = lazyindex(parser)
370 nodemap = lazymap(parser)
370 nodemap = lazymap(parser)
371 e = list(index[0])
371 e = list(index[0])
372 type = gettype(e[0])
372 type = gettype(e[0])
373 e[0] = offset_type(0, type)
373 e[0] = offset_type(0, type)
374 index[0] = e
374 index[0] = e
375 return index, nodemap, None
375 return index, nodemap, None
376
376
377 data = fp.read()
377 data = fp.read()
378 # call the C implementation to parse the index data
378 # call the C implementation to parse the index data
379 index, nodemap, cache = parsers.parse_index(data, inline)
379 index, nodemap, cache = parsers.parse_index(data, inline)
380 return index, nodemap, cache
380 return index, nodemap, cache
381
381
382 def packentry(self, entry, node, version, rev):
382 def packentry(self, entry, node, version, rev):
383 p = _pack(indexformatng, *entry)
383 p = _pack(indexformatng, *entry)
384 if rev == 0:
384 if rev == 0:
385 p = _pack(versionformat, version) + p[4:]
385 p = _pack(versionformat, version) + p[4:]
386 return p
386 return p
387
387
388 class revlog(object):
388 class revlog(object):
389 """
389 """
390 the underlying revision storage object
390 the underlying revision storage object
391
391
392 A revlog consists of two parts, an index and the revision data.
392 A revlog consists of two parts, an index and the revision data.
393
393
394 The index is a file with a fixed record size containing
394 The index is a file with a fixed record size containing
395 information on each revision, including its nodeid (hash), the
395 information on each revision, including its nodeid (hash), the
396 nodeids of its parents, the position and offset of its data within
396 nodeids of its parents, the position and offset of its data within
397 the data file, and the revision it's based on. Finally, each entry
397 the data file, and the revision it's based on. Finally, each entry
398 contains a linkrev entry that can serve as a pointer to external
398 contains a linkrev entry that can serve as a pointer to external
399 data.
399 data.
400
400
401 The revision data itself is a linear collection of data chunks.
401 The revision data itself is a linear collection of data chunks.
402 Each chunk represents a revision and is usually represented as a
402 Each chunk represents a revision and is usually represented as a
403 delta against the previous chunk. To bound lookup time, runs of
403 delta against the previous chunk. To bound lookup time, runs of
404 deltas are limited to about 2 times the length of the original
404 deltas are limited to about 2 times the length of the original
405 version data. This makes retrieval of a version proportional to
405 version data. This makes retrieval of a version proportional to
406 its size, or O(1) relative to the number of revisions.
406 its size, or O(1) relative to the number of revisions.
407
407
408 Both pieces of the revlog are written to in an append-only
408 Both pieces of the revlog are written to in an append-only
409 fashion, which means we never need to rewrite a file to insert or
409 fashion, which means we never need to rewrite a file to insert or
410 remove data, and can use some simple techniques to avoid the need
410 remove data, and can use some simple techniques to avoid the need
411 for locking while reading.
411 for locking while reading.
412 """
412 """
413 def __init__(self, opener, indexfile):
413 def __init__(self, opener, indexfile):
414 """
414 """
415 create a revlog object
415 create a revlog object
416
416
417 opener is a function that abstracts the file opening operation
417 opener is a function that abstracts the file opening operation
418 and can be used to implement COW semantics or the like.
418 and can be used to implement COW semantics or the like.
419 """
419 """
420 self.indexfile = indexfile
420 self.indexfile = indexfile
421 self.datafile = indexfile[:-2] + ".d"
421 self.datafile = indexfile[:-2] + ".d"
422 self.opener = opener
422 self.opener = opener
423 self._cache = None
423 self._cache = None
424 self._chunkcache = None
424 self._chunkcache = None
425 self.nodemap = {nullid: nullrev}
425 self.nodemap = {nullid: nullrev}
426 self.index = []
426 self.index = []
427
427
428 v = REVLOG_DEFAULT_VERSION
428 v = REVLOG_DEFAULT_VERSION
429 if hasattr(opener, "defversion"):
429 if hasattr(opener, "defversion"):
430 v = opener.defversion
430 v = opener.defversion
431 if v & REVLOGNG:
431 if v & REVLOGNG:
432 v |= REVLOGNGINLINEDATA
432 v |= REVLOGNGINLINEDATA
433
433
434 i = ""
434 i = ""
435 try:
435 try:
436 f = self.opener(self.indexfile)
436 f = self.opener(self.indexfile)
437 i = f.read(4)
437 i = f.read(4)
438 f.seek(0)
438 f.seek(0)
439 if len(i) > 0:
439 if len(i) > 0:
440 v = struct.unpack(versionformat, i)[0]
440 v = struct.unpack(versionformat, i)[0]
441 except IOError, inst:
441 except IOError, inst:
442 if inst.errno != errno.ENOENT:
442 if inst.errno != errno.ENOENT:
443 raise
443 raise
444
444
445 self.version = v
445 self.version = v
446 self._inline = v & REVLOGNGINLINEDATA
446 self._inline = v & REVLOGNGINLINEDATA
447 flags = v & ~0xFFFF
447 flags = v & ~0xFFFF
448 fmt = v & 0xFFFF
448 fmt = v & 0xFFFF
449 if fmt == REVLOGV0 and flags:
449 if fmt == REVLOGV0 and flags:
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
450 raise RevlogError(_("index %s unknown flags %#04x for format v0")
451 % (self.indexfile, flags >> 16))
451 % (self.indexfile, flags >> 16))
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
452 elif fmt == REVLOGNG and flags & ~REVLOGNGINLINEDATA:
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
453 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
454 % (self.indexfile, flags >> 16))
454 % (self.indexfile, flags >> 16))
455 elif fmt > REVLOGNG:
455 elif fmt > REVLOGNG:
456 raise RevlogError(_("index %s unknown format %d")
456 raise RevlogError(_("index %s unknown format %d")
457 % (self.indexfile, fmt))
457 % (self.indexfile, fmt))
458
458
459 self._io = revlogio()
459 self._io = revlogio()
460 if self.version == REVLOGV0:
460 if self.version == REVLOGV0:
461 self._io = revlogoldio()
461 self._io = revlogoldio()
462 if i:
462 if i:
463 try:
463 try:
464 d = self._io.parseindex(f, self._inline)
464 d = self._io.parseindex(f, self._inline)
465 except (ValueError, IndexError), e:
465 except (ValueError, IndexError), e:
466 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
466 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
467 self.index, self.nodemap, self._chunkcache = d
467 self.index, self.nodemap, self._chunkcache = d
468
468
469 # add the magic null revision at -1 (if it hasn't been done already)
469 # add the magic null revision at -1 (if it hasn't been done already)
470 if (self.index == [] or isinstance(self.index, lazyindex) or
470 if (self.index == [] or isinstance(self.index, lazyindex) or
471 self.index[-1][7] != nullid) :
471 self.index[-1][7] != nullid) :
472 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
472 self.index.append((0, 0, 0, -1, -1, -1, -1, nullid))
473
473
474 def _loadindex(self, start, end):
474 def _loadindex(self, start, end):
475 """load a block of indexes all at once from the lazy parser"""
475 """load a block of indexes all at once from the lazy parser"""
476 if isinstance(self.index, lazyindex):
476 if isinstance(self.index, lazyindex):
477 self.index.p.loadindex(start, end)
477 self.index.p.loadindex(start, end)
478
478
479 def _loadindexmap(self):
479 def _loadindexmap(self):
480 """loads both the map and the index from the lazy parser"""
480 """loads both the map and the index from the lazy parser"""
481 if isinstance(self.index, lazyindex):
481 if isinstance(self.index, lazyindex):
482 p = self.index.p
482 p = self.index.p
483 p.loadindex()
483 p.loadindex()
484 self.nodemap = p.map
484 self.nodemap = p.map
485
485
486 def _loadmap(self):
486 def _loadmap(self):
487 """loads the map from the lazy parser"""
487 """loads the map from the lazy parser"""
488 if isinstance(self.nodemap, lazymap):
488 if isinstance(self.nodemap, lazymap):
489 self.nodemap.p.loadmap()
489 self.nodemap.p.loadmap()
490 self.nodemap = self.nodemap.p.map
490 self.nodemap = self.nodemap.p.map
491
491
492 def tip(self):
492 def tip(self):
493 return self.node(len(self.index) - 2)
493 return self.node(len(self.index) - 2)
494 def __len__(self):
494 def __len__(self):
495 return len(self.index) - 1
495 return len(self.index) - 1
496 def __iter__(self):
496 def __iter__(self):
497 for i in xrange(len(self)):
497 for i in xrange(len(self)):
498 yield i
498 yield i
499 def rev(self, node):
499 def rev(self, node):
500 try:
500 try:
501 return self.nodemap[node]
501 return self.nodemap[node]
502 except KeyError:
502 except KeyError:
503 raise LookupError(node, self.indexfile, _('no node'))
503 raise LookupError(node, self.indexfile, _('no node'))
504 def node(self, rev):
504 def node(self, rev):
505 return self.index[rev][7]
505 return self.index[rev][7]
506 def linkrev(self, rev):
506 def linkrev(self, rev):
507 return self.index[rev][4]
507 return self.index[rev][4]
508 def parents(self, node):
508 def parents(self, node):
509 i = self.index
509 i = self.index
510 d = i[self.rev(node)]
510 d = i[self.rev(node)]
511 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
511 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
512 def parentrevs(self, rev):
512 def parentrevs(self, rev):
513 return self.index[rev][5:7]
513 return self.index[rev][5:7]
514 def start(self, rev):
514 def start(self, rev):
515 return int(self.index[rev][0] >> 16)
515 return int(self.index[rev][0] >> 16)
516 def end(self, rev):
516 def end(self, rev):
517 return self.start(rev) + self.length(rev)
517 return self.start(rev) + self.length(rev)
518 def length(self, rev):
518 def length(self, rev):
519 return self.index[rev][1]
519 return self.index[rev][1]
520 def base(self, rev):
520 def base(self, rev):
521 return self.index[rev][3]
521 return self.index[rev][3]
522
522
523 def size(self, rev):
523 def size(self, rev):
524 """return the length of the uncompressed text for a given revision"""
524 """return the length of the uncompressed text for a given revision"""
525 l = self.index[rev][2]
525 l = self.index[rev][2]
526 if l >= 0:
526 if l >= 0:
527 return l
527 return l
528
528
529 t = self.revision(self.node(rev))
529 t = self.revision(self.node(rev))
530 return len(t)
530 return len(t)
531
531
532 # alternate implementation, The advantage to this code is it
532 # alternate implementation, The advantage to this code is it
533 # will be faster for a single revision. But, the results are not
533 # will be faster for a single revision. But, the results are not
534 # cached, so finding the size of every revision will be slower.
534 # cached, so finding the size of every revision will be slower.
535 """
535 """
536 if self.cache and self.cache[1] == rev:
536 if self.cache and self.cache[1] == rev:
537 return len(self.cache[2])
537 return len(self.cache[2])
538
538
539 base = self.base(rev)
539 base = self.base(rev)
540 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
540 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
541 base = self.cache[1]
541 base = self.cache[1]
542 text = self.cache[2]
542 text = self.cache[2]
543 else:
543 else:
544 text = self.revision(self.node(base))
544 text = self.revision(self.node(base))
545
545
546 l = len(text)
546 l = len(text)
547 for x in xrange(base + 1, rev + 1):
547 for x in xrange(base + 1, rev + 1):
548 l = mdiff.patchedsize(l, self.chunk(x))
548 l = mdiff.patchedsize(l, self.chunk(x))
549 return l
549 return l
550 """
550 """
551
551
552 def reachable(self, node, stop=None):
552 def reachable(self, node, stop=None):
553 """return a hash of all nodes ancestral to a given node, including
553 """return a hash of all nodes ancestral to a given node, including
554 the node itself, stopping when stop is matched"""
554 the node itself, stopping when stop is matched"""
555 reachable = {}
555 reachable = {}
556 visit = [node]
556 visit = [node]
557 reachable[node] = 1
557 reachable[node] = 1
558 if stop:
558 if stop:
559 stopn = self.rev(stop)
559 stopn = self.rev(stop)
560 else:
560 else:
561 stopn = 0
561 stopn = 0
562 while visit:
562 while visit:
563 n = visit.pop(0)
563 n = visit.pop(0)
564 if n == stop:
564 if n == stop:
565 continue
565 continue
566 if n == nullid:
566 if n == nullid:
567 continue
567 continue
568 for p in self.parents(n):
568 for p in self.parents(n):
569 if self.rev(p) < stopn:
569 if self.rev(p) < stopn:
570 continue
570 continue
571 if p not in reachable:
571 if p not in reachable:
572 reachable[p] = 1
572 reachable[p] = 1
573 visit.append(p)
573 visit.append(p)
574 return reachable
574 return reachable
575
575
576 def ancestors(self, *revs):
576 def ancestors(self, *revs):
577 'Generate the ancestors of revs using a breadth-first visit'
577 'Generate the ancestors of revs using a breadth-first visit'
578 visit = list(revs)
578 visit = list(revs)
579 seen = set([nullrev])
579 seen = set([nullrev])
580 while visit:
580 while visit:
581 for parent in self.parentrevs(visit.pop(0)):
581 for parent in self.parentrevs(visit.pop(0)):
582 if parent not in seen:
582 if parent not in seen:
583 visit.append(parent)
583 visit.append(parent)
584 seen.add(parent)
584 seen.add(parent)
585 yield parent
585 yield parent
586
586
587 def descendants(self, *revs):
587 def descendants(self, *revs):
588 'Generate the descendants of revs in topological order'
588 'Generate the descendants of revs in topological order'
589 seen = set(revs)
589 seen = set(revs)
590 for i in xrange(min(revs) + 1, len(self)):
590 for i in xrange(min(revs) + 1, len(self)):
591 for x in self.parentrevs(i):
591 for x in self.parentrevs(i):
592 if x != nullrev and x in seen:
592 if x != nullrev and x in seen:
593 seen.add(i)
593 seen.add(i)
594 yield i
594 yield i
595 break
595 break
596
596
597 def findmissing(self, common=None, heads=None):
597 def findmissing(self, common=None, heads=None):
598 '''
598 '''
599 returns the topologically sorted list of nodes from the set:
599 returns the topologically sorted list of nodes from the set:
600 missing = (ancestors(heads) \ ancestors(common))
600 missing = (ancestors(heads) \ ancestors(common))
601
601
602 where ancestors() is the set of ancestors from heads, heads included
602 where ancestors() is the set of ancestors from heads, heads included
603
603
604 if heads is None, the heads of the revlog are used
604 if heads is None, the heads of the revlog are used
605 if common is None, nullid is assumed to be a common node
605 if common is None, nullid is assumed to be a common node
606 '''
606 '''
607 if common is None:
607 if common is None:
608 common = [nullid]
608 common = [nullid]
609 if heads is None:
609 if heads is None:
610 heads = self.heads()
610 heads = self.heads()
611
611
612 common = [self.rev(n) for n in common]
612 common = [self.rev(n) for n in common]
613 heads = [self.rev(n) for n in heads]
613 heads = [self.rev(n) for n in heads]
614
614
615 # we want the ancestors, but inclusive
615 # we want the ancestors, but inclusive
616 has = dict.fromkeys(self.ancestors(*common))
616 has = set(self.ancestors(*common))
617 has[nullrev] = None
617 has.add(nullrev)
618 for r in common:
618 has.update(common)
619 has[r] = None
620
619
621 # take all ancestors from heads that aren't in has
620 # take all ancestors from heads that aren't in has
622 missing = {}
621 missing = {}
623 visit = [r for r in heads if r not in has]
622 visit = [r for r in heads if r not in has]
624 while visit:
623 while visit:
625 r = visit.pop(0)
624 r = visit.pop(0)
626 if r in missing:
625 if r in missing:
627 continue
626 continue
628 else:
627 else:
629 missing[r] = None
628 missing[r] = None
630 for p in self.parentrevs(r):
629 for p in self.parentrevs(r):
631 if p not in has:
630 if p not in has:
632 visit.append(p)
631 visit.append(p)
633 missing = missing.keys()
632 missing = missing.keys()
634 missing.sort()
633 missing.sort()
635 return [self.node(r) for r in missing]
634 return [self.node(r) for r in missing]
636
635
637 def nodesbetween(self, roots=None, heads=None):
636 def nodesbetween(self, roots=None, heads=None):
638 """Return a tuple containing three elements. Elements 1 and 2 contain
637 """Return a tuple containing three elements. Elements 1 and 2 contain
639 a final list bases and heads after all the unreachable ones have been
638 a final list bases and heads after all the unreachable ones have been
640 pruned. Element 0 contains a topologically sorted list of all
639 pruned. Element 0 contains a topologically sorted list of all
641
640
642 nodes that satisfy these constraints:
641 nodes that satisfy these constraints:
643 1. All nodes must be descended from a node in roots (the nodes on
642 1. All nodes must be descended from a node in roots (the nodes on
644 roots are considered descended from themselves).
643 roots are considered descended from themselves).
645 2. All nodes must also be ancestors of a node in heads (the nodes in
644 2. All nodes must also be ancestors of a node in heads (the nodes in
646 heads are considered to be their own ancestors).
645 heads are considered to be their own ancestors).
647
646
648 If roots is unspecified, nullid is assumed as the only root.
647 If roots is unspecified, nullid is assumed as the only root.
649 If heads is unspecified, it is taken to be the output of the
648 If heads is unspecified, it is taken to be the output of the
650 heads method (i.e. a list of all nodes in the repository that
649 heads method (i.e. a list of all nodes in the repository that
651 have no children)."""
650 have no children)."""
652 nonodes = ([], [], [])
651 nonodes = ([], [], [])
653 if roots is not None:
652 if roots is not None:
654 roots = list(roots)
653 roots = list(roots)
655 if not roots:
654 if not roots:
656 return nonodes
655 return nonodes
657 lowestrev = min([self.rev(n) for n in roots])
656 lowestrev = min([self.rev(n) for n in roots])
658 else:
657 else:
659 roots = [nullid] # Everybody's a descendent of nullid
658 roots = [nullid] # Everybody's a descendent of nullid
660 lowestrev = nullrev
659 lowestrev = nullrev
661 if (lowestrev == nullrev) and (heads is None):
660 if (lowestrev == nullrev) and (heads is None):
662 # We want _all_ the nodes!
661 # We want _all_ the nodes!
663 return ([self.node(r) for r in self], [nullid], list(self.heads()))
662 return ([self.node(r) for r in self], [nullid], list(self.heads()))
664 if heads is None:
663 if heads is None:
665 # All nodes are ancestors, so the latest ancestor is the last
664 # All nodes are ancestors, so the latest ancestor is the last
666 # node.
665 # node.
667 highestrev = len(self) - 1
666 highestrev = len(self) - 1
668 # Set ancestors to None to signal that every node is an ancestor.
667 # Set ancestors to None to signal that every node is an ancestor.
669 ancestors = None
668 ancestors = None
670 # Set heads to an empty dictionary for later discovery of heads
669 # Set heads to an empty dictionary for later discovery of heads
671 heads = {}
670 heads = {}
672 else:
671 else:
673 heads = list(heads)
672 heads = list(heads)
674 if not heads:
673 if not heads:
675 return nonodes
674 return nonodes
676 ancestors = {}
675 ancestors = {}
677 # Turn heads into a dictionary so we can remove 'fake' heads.
676 # Turn heads into a dictionary so we can remove 'fake' heads.
678 # Also, later we will be using it to filter out the heads we can't
677 # Also, later we will be using it to filter out the heads we can't
679 # find from roots.
678 # find from roots.
680 heads = dict.fromkeys(heads, 0)
679 heads = dict.fromkeys(heads, 0)
681 # Start at the top and keep marking parents until we're done.
680 # Start at the top and keep marking parents until we're done.
682 nodestotag = heads.keys()
681 nodestotag = heads.keys()
683 # Remember where the top was so we can use it as a limit later.
682 # Remember where the top was so we can use it as a limit later.
684 highestrev = max([self.rev(n) for n in nodestotag])
683 highestrev = max([self.rev(n) for n in nodestotag])
685 while nodestotag:
684 while nodestotag:
686 # grab a node to tag
685 # grab a node to tag
687 n = nodestotag.pop()
686 n = nodestotag.pop()
688 # Never tag nullid
687 # Never tag nullid
689 if n == nullid:
688 if n == nullid:
690 continue
689 continue
691 # A node's revision number represents its place in a
690 # A node's revision number represents its place in a
692 # topologically sorted list of nodes.
691 # topologically sorted list of nodes.
693 r = self.rev(n)
692 r = self.rev(n)
694 if r >= lowestrev:
693 if r >= lowestrev:
695 if n not in ancestors:
694 if n not in ancestors:
696 # If we are possibly a descendent of one of the roots
695 # If we are possibly a descendent of one of the roots
697 # and we haven't already been marked as an ancestor
696 # and we haven't already been marked as an ancestor
698 ancestors[n] = 1 # Mark as ancestor
697 ancestors[n] = 1 # Mark as ancestor
699 # Add non-nullid parents to list of nodes to tag.
698 # Add non-nullid parents to list of nodes to tag.
700 nodestotag.extend([p for p in self.parents(n) if
699 nodestotag.extend([p for p in self.parents(n) if
701 p != nullid])
700 p != nullid])
702 elif n in heads: # We've seen it before, is it a fake head?
701 elif n in heads: # We've seen it before, is it a fake head?
703 # So it is, real heads should not be the ancestors of
702 # So it is, real heads should not be the ancestors of
704 # any other heads.
703 # any other heads.
705 heads.pop(n)
704 heads.pop(n)
706 if not ancestors:
705 if not ancestors:
707 return nonodes
706 return nonodes
708 # Now that we have our set of ancestors, we want to remove any
707 # Now that we have our set of ancestors, we want to remove any
709 # roots that are not ancestors.
708 # roots that are not ancestors.
710
709
711 # If one of the roots was nullid, everything is included anyway.
710 # If one of the roots was nullid, everything is included anyway.
712 if lowestrev > nullrev:
711 if lowestrev > nullrev:
713 # But, since we weren't, let's recompute the lowest rev to not
712 # But, since we weren't, let's recompute the lowest rev to not
714 # include roots that aren't ancestors.
713 # include roots that aren't ancestors.
715
714
716 # Filter out roots that aren't ancestors of heads
715 # Filter out roots that aren't ancestors of heads
717 roots = [n for n in roots if n in ancestors]
716 roots = [n for n in roots if n in ancestors]
718 # Recompute the lowest revision
717 # Recompute the lowest revision
719 if roots:
718 if roots:
720 lowestrev = min([self.rev(n) for n in roots])
719 lowestrev = min([self.rev(n) for n in roots])
721 else:
720 else:
722 # No more roots? Return empty list
721 # No more roots? Return empty list
723 return nonodes
722 return nonodes
724 else:
723 else:
725 # We are descending from nullid, and don't need to care about
724 # We are descending from nullid, and don't need to care about
726 # any other roots.
725 # any other roots.
727 lowestrev = nullrev
726 lowestrev = nullrev
728 roots = [nullid]
727 roots = [nullid]
729 # Transform our roots list into a 'set' (i.e. a dictionary where the
728 # Transform our roots list into a set.
730 # values don't matter.
729 descendents = set(roots)
731 descendents = dict.fromkeys(roots, 1)
732 # Also, keep the original roots so we can filter out roots that aren't
730 # Also, keep the original roots so we can filter out roots that aren't
733 # 'real' roots (i.e. are descended from other roots).
731 # 'real' roots (i.e. are descended from other roots).
734 roots = descendents.copy()
732 roots = descendents.copy()
735 # Our topologically sorted list of output nodes.
733 # Our topologically sorted list of output nodes.
736 orderedout = []
734 orderedout = []
737 # Don't start at nullid since we don't want nullid in our output list,
735 # Don't start at nullid since we don't want nullid in our output list,
738 # and if nullid shows up in descedents, empty parents will look like
736 # and if nullid shows up in descedents, empty parents will look like
739 # they're descendents.
737 # they're descendents.
740 for r in xrange(max(lowestrev, 0), highestrev + 1):
738 for r in xrange(max(lowestrev, 0), highestrev + 1):
741 n = self.node(r)
739 n = self.node(r)
742 isdescendent = False
740 isdescendent = False
743 if lowestrev == nullrev: # Everybody is a descendent of nullid
741 if lowestrev == nullrev: # Everybody is a descendent of nullid
744 isdescendent = True
742 isdescendent = True
745 elif n in descendents:
743 elif n in descendents:
746 # n is already a descendent
744 # n is already a descendent
747 isdescendent = True
745 isdescendent = True
748 # This check only needs to be done here because all the roots
746 # This check only needs to be done here because all the roots
749 # will start being marked is descendents before the loop.
747 # will start being marked is descendents before the loop.
750 if n in roots:
748 if n in roots:
751 # If n was a root, check if it's a 'real' root.
749 # If n was a root, check if it's a 'real' root.
752 p = tuple(self.parents(n))
750 p = tuple(self.parents(n))
753 # If any of its parents are descendents, it's not a root.
751 # If any of its parents are descendents, it's not a root.
754 if (p[0] in descendents) or (p[1] in descendents):
752 if (p[0] in descendents) or (p[1] in descendents):
755 roots.pop(n)
753 roots.remove(n)
756 else:
754 else:
757 p = tuple(self.parents(n))
755 p = tuple(self.parents(n))
758 # A node is a descendent if either of its parents are
756 # A node is a descendent if either of its parents are
759 # descendents. (We seeded the dependents list with the roots
757 # descendents. (We seeded the dependents list with the roots
760 # up there, remember?)
758 # up there, remember?)
761 if (p[0] in descendents) or (p[1] in descendents):
759 if (p[0] in descendents) or (p[1] in descendents):
762 descendents[n] = 1
760 descendents.add(n)
763 isdescendent = True
761 isdescendent = True
764 if isdescendent and ((ancestors is None) or (n in ancestors)):
762 if isdescendent and ((ancestors is None) or (n in ancestors)):
765 # Only include nodes that are both descendents and ancestors.
763 # Only include nodes that are both descendents and ancestors.
766 orderedout.append(n)
764 orderedout.append(n)
767 if (ancestors is not None) and (n in heads):
765 if (ancestors is not None) and (n in heads):
768 # We're trying to figure out which heads are reachable
766 # We're trying to figure out which heads are reachable
769 # from roots.
767 # from roots.
770 # Mark this head as having been reached
768 # Mark this head as having been reached
771 heads[n] = 1
769 heads[n] = 1
772 elif ancestors is None:
770 elif ancestors is None:
773 # Otherwise, we're trying to discover the heads.
771 # Otherwise, we're trying to discover the heads.
774 # Assume this is a head because if it isn't, the next step
772 # Assume this is a head because if it isn't, the next step
775 # will eventually remove it.
773 # will eventually remove it.
776 heads[n] = 1
774 heads[n] = 1
777 # But, obviously its parents aren't.
775 # But, obviously its parents aren't.
778 for p in self.parents(n):
776 for p in self.parents(n):
779 heads.pop(p, None)
777 heads.pop(p, None)
780 heads = [n for n in heads.iterkeys() if heads[n] != 0]
778 heads = [n for n in heads.iterkeys() if heads[n] != 0]
781 roots = roots.keys()
779 roots = list(roots)
782 assert orderedout
780 assert orderedout
783 assert roots
781 assert roots
784 assert heads
782 assert heads
785 return (orderedout, roots, heads)
783 return (orderedout, roots, heads)
786
784
787 def heads(self, start=None, stop=None):
785 def heads(self, start=None, stop=None):
788 """return the list of all nodes that have no children
786 """return the list of all nodes that have no children
789
787
790 if start is specified, only heads that are descendants of
788 if start is specified, only heads that are descendants of
791 start will be returned
789 start will be returned
792 if stop is specified, it will consider all the revs from stop
790 if stop is specified, it will consider all the revs from stop
793 as if they had no children
791 as if they had no children
794 """
792 """
795 if start is None and stop is None:
793 if start is None and stop is None:
796 count = len(self)
794 count = len(self)
797 if not count:
795 if not count:
798 return [nullid]
796 return [nullid]
799 ishead = [1] * (count + 1)
797 ishead = [1] * (count + 1)
800 index = self.index
798 index = self.index
801 for r in xrange(count):
799 for r in xrange(count):
802 e = index[r]
800 e = index[r]
803 ishead[e[5]] = ishead[e[6]] = 0
801 ishead[e[5]] = ishead[e[6]] = 0
804 return [self.node(r) for r in xrange(count) if ishead[r]]
802 return [self.node(r) for r in xrange(count) if ishead[r]]
805
803
806 if start is None:
804 if start is None:
807 start = nullid
805 start = nullid
808 if stop is None:
806 if stop is None:
809 stop = []
807 stop = []
810 stoprevs = dict.fromkeys([self.rev(n) for n in stop])
808 stoprevs = set([self.rev(n) for n in stop])
811 startrev = self.rev(start)
809 startrev = self.rev(start)
812 reachable = {startrev: 1}
810 reachable = {startrev: 1}
813 heads = {startrev: 1}
811 heads = {startrev: 1}
814
812
815 parentrevs = self.parentrevs
813 parentrevs = self.parentrevs
816 for r in xrange(startrev + 1, len(self)):
814 for r in xrange(startrev + 1, len(self)):
817 for p in parentrevs(r):
815 for p in parentrevs(r):
818 if p in reachable:
816 if p in reachable:
819 if r not in stoprevs:
817 if r not in stoprevs:
820 reachable[r] = 1
818 reachable[r] = 1
821 heads[r] = 1
819 heads[r] = 1
822 if p in heads and p not in stoprevs:
820 if p in heads and p not in stoprevs:
823 del heads[p]
821 del heads[p]
824
822
825 return [self.node(r) for r in heads]
823 return [self.node(r) for r in heads]
826
824
827 def children(self, node):
825 def children(self, node):
828 """find the children of a given node"""
826 """find the children of a given node"""
829 c = []
827 c = []
830 p = self.rev(node)
828 p = self.rev(node)
831 for r in range(p + 1, len(self)):
829 for r in range(p + 1, len(self)):
832 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
830 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
833 if prevs:
831 if prevs:
834 for pr in prevs:
832 for pr in prevs:
835 if pr == p:
833 if pr == p:
836 c.append(self.node(r))
834 c.append(self.node(r))
837 elif p == nullrev:
835 elif p == nullrev:
838 c.append(self.node(r))
836 c.append(self.node(r))
839 return c
837 return c
840
838
841 def _match(self, id):
839 def _match(self, id):
842 if isinstance(id, (long, int)):
840 if isinstance(id, (long, int)):
843 # rev
841 # rev
844 return self.node(id)
842 return self.node(id)
845 if len(id) == 20:
843 if len(id) == 20:
846 # possibly a binary node
844 # possibly a binary node
847 # odds of a binary node being all hex in ASCII are 1 in 10**25
845 # odds of a binary node being all hex in ASCII are 1 in 10**25
848 try:
846 try:
849 node = id
847 node = id
850 self.rev(node) # quick search the index
848 self.rev(node) # quick search the index
851 return node
849 return node
852 except LookupError:
850 except LookupError:
853 pass # may be partial hex id
851 pass # may be partial hex id
854 try:
852 try:
855 # str(rev)
853 # str(rev)
856 rev = int(id)
854 rev = int(id)
857 if str(rev) != id:
855 if str(rev) != id:
858 raise ValueError
856 raise ValueError
859 if rev < 0:
857 if rev < 0:
860 rev = len(self) + rev
858 rev = len(self) + rev
861 if rev < 0 or rev >= len(self):
859 if rev < 0 or rev >= len(self):
862 raise ValueError
860 raise ValueError
863 return self.node(rev)
861 return self.node(rev)
864 except (ValueError, OverflowError):
862 except (ValueError, OverflowError):
865 pass
863 pass
866 if len(id) == 40:
864 if len(id) == 40:
867 try:
865 try:
868 # a full hex nodeid?
866 # a full hex nodeid?
869 node = bin(id)
867 node = bin(id)
870 self.rev(node)
868 self.rev(node)
871 return node
869 return node
872 except (TypeError, LookupError):
870 except (TypeError, LookupError):
873 pass
871 pass
874
872
875 def _partialmatch(self, id):
873 def _partialmatch(self, id):
876 if len(id) < 40:
874 if len(id) < 40:
877 try:
875 try:
878 # hex(node)[:...]
876 # hex(node)[:...]
879 l = len(id) / 2 # grab an even number of digits
877 l = len(id) / 2 # grab an even number of digits
880 bin_id = bin(id[:l*2])
878 bin_id = bin(id[:l*2])
881 nl = [n for n in self.nodemap if n[:l] == bin_id]
879 nl = [n for n in self.nodemap if n[:l] == bin_id]
882 nl = [n for n in nl if hex(n).startswith(id)]
880 nl = [n for n in nl if hex(n).startswith(id)]
883 if len(nl) > 0:
881 if len(nl) > 0:
884 if len(nl) == 1:
882 if len(nl) == 1:
885 return nl[0]
883 return nl[0]
886 raise LookupError(id, self.indexfile,
884 raise LookupError(id, self.indexfile,
887 _('ambiguous identifier'))
885 _('ambiguous identifier'))
888 return None
886 return None
889 except TypeError:
887 except TypeError:
890 pass
888 pass
891
889
892 def lookup(self, id):
890 def lookup(self, id):
893 """locate a node based on:
891 """locate a node based on:
894 - revision number or str(revision number)
892 - revision number or str(revision number)
895 - nodeid or subset of hex nodeid
893 - nodeid or subset of hex nodeid
896 """
894 """
897 n = self._match(id)
895 n = self._match(id)
898 if n is not None:
896 if n is not None:
899 return n
897 return n
900 n = self._partialmatch(id)
898 n = self._partialmatch(id)
901 if n:
899 if n:
902 return n
900 return n
903
901
904 raise LookupError(id, self.indexfile, _('no match found'))
902 raise LookupError(id, self.indexfile, _('no match found'))
905
903
906 def cmp(self, node, text):
904 def cmp(self, node, text):
907 """compare text with a given file revision"""
905 """compare text with a given file revision"""
908 p1, p2 = self.parents(node)
906 p1, p2 = self.parents(node)
909 return hash(text, p1, p2) != node
907 return hash(text, p1, p2) != node
910
908
911 def chunk(self, rev, df=None):
909 def chunk(self, rev, df=None):
912 def loadcache(df):
910 def loadcache(df):
913 if not df:
911 if not df:
914 if self._inline:
912 if self._inline:
915 df = self.opener(self.indexfile)
913 df = self.opener(self.indexfile)
916 else:
914 else:
917 df = self.opener(self.datafile)
915 df = self.opener(self.datafile)
918 df.seek(start)
916 df.seek(start)
919 self._chunkcache = (start, df.read(cache_length))
917 self._chunkcache = (start, df.read(cache_length))
920
918
921 start, length = self.start(rev), self.length(rev)
919 start, length = self.start(rev), self.length(rev)
922 if self._inline:
920 if self._inline:
923 start += (rev + 1) * self._io.size
921 start += (rev + 1) * self._io.size
924 end = start + length
922 end = start + length
925
923
926 offset = 0
924 offset = 0
927 if not self._chunkcache:
925 if not self._chunkcache:
928 cache_length = max(65536, length)
926 cache_length = max(65536, length)
929 loadcache(df)
927 loadcache(df)
930 else:
928 else:
931 cache_start = self._chunkcache[0]
929 cache_start = self._chunkcache[0]
932 cache_length = len(self._chunkcache[1])
930 cache_length = len(self._chunkcache[1])
933 cache_end = cache_start + cache_length
931 cache_end = cache_start + cache_length
934 if start >= cache_start and end <= cache_end:
932 if start >= cache_start and end <= cache_end:
935 # it is cached
933 # it is cached
936 offset = start - cache_start
934 offset = start - cache_start
937 else:
935 else:
938 cache_length = max(65536, length)
936 cache_length = max(65536, length)
939 loadcache(df)
937 loadcache(df)
940
938
941 # avoid copying large chunks
939 # avoid copying large chunks
942 c = self._chunkcache[1]
940 c = self._chunkcache[1]
943 if cache_length != length:
941 if cache_length != length:
944 c = c[offset:offset + length]
942 c = c[offset:offset + length]
945
943
946 return decompress(c)
944 return decompress(c)
947
945
948 def revdiff(self, rev1, rev2):
946 def revdiff(self, rev1, rev2):
949 """return or calculate a delta between two revisions"""
947 """return or calculate a delta between two revisions"""
950 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
948 if rev1 + 1 == rev2 and self.base(rev1) == self.base(rev2):
951 return self.chunk(rev2)
949 return self.chunk(rev2)
952
950
953 return mdiff.textdiff(self.revision(self.node(rev1)),
951 return mdiff.textdiff(self.revision(self.node(rev1)),
954 self.revision(self.node(rev2)))
952 self.revision(self.node(rev2)))
955
953
956 def revision(self, node):
954 def revision(self, node):
957 """return an uncompressed revision of a given node"""
955 """return an uncompressed revision of a given node"""
958 if node == nullid:
956 if node == nullid:
959 return ""
957 return ""
960 if self._cache and self._cache[0] == node:
958 if self._cache and self._cache[0] == node:
961 return str(self._cache[2])
959 return str(self._cache[2])
962
960
963 # look up what we need to read
961 # look up what we need to read
964 text = None
962 text = None
965 rev = self.rev(node)
963 rev = self.rev(node)
966 base = self.base(rev)
964 base = self.base(rev)
967
965
968 # check rev flags
966 # check rev flags
969 if self.index[rev][0] & 0xFFFF:
967 if self.index[rev][0] & 0xFFFF:
970 raise RevlogError(_('incompatible revision flag %x') %
968 raise RevlogError(_('incompatible revision flag %x') %
971 (self.index[rev][0] & 0xFFFF))
969 (self.index[rev][0] & 0xFFFF))
972
970
973 df = None
971 df = None
974
972
975 # do we have useful data cached?
973 # do we have useful data cached?
976 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
974 if self._cache and self._cache[1] >= base and self._cache[1] < rev:
977 base = self._cache[1]
975 base = self._cache[1]
978 text = str(self._cache[2])
976 text = str(self._cache[2])
979 self._loadindex(base, rev + 1)
977 self._loadindex(base, rev + 1)
980 if not self._inline and rev > base + 1:
978 if not self._inline and rev > base + 1:
981 df = self.opener(self.datafile)
979 df = self.opener(self.datafile)
982 else:
980 else:
983 self._loadindex(base, rev + 1)
981 self._loadindex(base, rev + 1)
984 if not self._inline and rev > base:
982 if not self._inline and rev > base:
985 df = self.opener(self.datafile)
983 df = self.opener(self.datafile)
986 text = self.chunk(base, df=df)
984 text = self.chunk(base, df=df)
987
985
988 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
986 bins = [self.chunk(r, df) for r in xrange(base + 1, rev + 1)]
989 text = mdiff.patches(text, bins)
987 text = mdiff.patches(text, bins)
990 p1, p2 = self.parents(node)
988 p1, p2 = self.parents(node)
991 if node != hash(text, p1, p2):
989 if node != hash(text, p1, p2):
992 raise RevlogError(_("integrity check failed on %s:%d")
990 raise RevlogError(_("integrity check failed on %s:%d")
993 % (self.datafile, rev))
991 % (self.datafile, rev))
994
992
995 self._cache = (node, rev, text)
993 self._cache = (node, rev, text)
996 return text
994 return text
997
995
998 def checkinlinesize(self, tr, fp=None):
996 def checkinlinesize(self, tr, fp=None):
999 if not self._inline:
997 if not self._inline:
1000 return
998 return
1001 if not fp:
999 if not fp:
1002 fp = self.opener(self.indexfile, 'r')
1000 fp = self.opener(self.indexfile, 'r')
1003 fp.seek(0, 2)
1001 fp.seek(0, 2)
1004 size = fp.tell()
1002 size = fp.tell()
1005 if size < 131072:
1003 if size < 131072:
1006 return
1004 return
1007 trinfo = tr.find(self.indexfile)
1005 trinfo = tr.find(self.indexfile)
1008 if trinfo == None:
1006 if trinfo == None:
1009 raise RevlogError(_("%s not found in the transaction")
1007 raise RevlogError(_("%s not found in the transaction")
1010 % self.indexfile)
1008 % self.indexfile)
1011
1009
1012 trindex = trinfo[2]
1010 trindex = trinfo[2]
1013 dataoff = self.start(trindex)
1011 dataoff = self.start(trindex)
1014
1012
1015 tr.add(self.datafile, dataoff)
1013 tr.add(self.datafile, dataoff)
1016 df = self.opener(self.datafile, 'w')
1014 df = self.opener(self.datafile, 'w')
1017 try:
1015 try:
1018 calc = self._io.size
1016 calc = self._io.size
1019 for r in self:
1017 for r in self:
1020 start = self.start(r) + (r + 1) * calc
1018 start = self.start(r) + (r + 1) * calc
1021 length = self.length(r)
1019 length = self.length(r)
1022 fp.seek(start)
1020 fp.seek(start)
1023 d = fp.read(length)
1021 d = fp.read(length)
1024 df.write(d)
1022 df.write(d)
1025 finally:
1023 finally:
1026 df.close()
1024 df.close()
1027
1025
1028 fp.close()
1026 fp.close()
1029 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1027 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1030 self.version &= ~(REVLOGNGINLINEDATA)
1028 self.version &= ~(REVLOGNGINLINEDATA)
1031 self._inline = False
1029 self._inline = False
1032 for i in self:
1030 for i in self:
1033 e = self._io.packentry(self.index[i], self.node, self.version, i)
1031 e = self._io.packentry(self.index[i], self.node, self.version, i)
1034 fp.write(e)
1032 fp.write(e)
1035
1033
1036 # if we don't call rename, the temp file will never replace the
1034 # if we don't call rename, the temp file will never replace the
1037 # real index
1035 # real index
1038 fp.rename()
1036 fp.rename()
1039
1037
1040 tr.replace(self.indexfile, trindex * calc)
1038 tr.replace(self.indexfile, trindex * calc)
1041 self._chunkcache = None
1039 self._chunkcache = None
1042
1040
1043 def addrevision(self, text, transaction, link, p1, p2, d=None):
1041 def addrevision(self, text, transaction, link, p1, p2, d=None):
1044 """add a revision to the log
1042 """add a revision to the log
1045
1043
1046 text - the revision data to add
1044 text - the revision data to add
1047 transaction - the transaction object used for rollback
1045 transaction - the transaction object used for rollback
1048 link - the linkrev data to add
1046 link - the linkrev data to add
1049 p1, p2 - the parent nodeids of the revision
1047 p1, p2 - the parent nodeids of the revision
1050 d - an optional precomputed delta
1048 d - an optional precomputed delta
1051 """
1049 """
1052 dfh = None
1050 dfh = None
1053 if not self._inline:
1051 if not self._inline:
1054 dfh = self.opener(self.datafile, "a")
1052 dfh = self.opener(self.datafile, "a")
1055 ifh = self.opener(self.indexfile, "a+")
1053 ifh = self.opener(self.indexfile, "a+")
1056 try:
1054 try:
1057 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1055 return self._addrevision(text, transaction, link, p1, p2, d, ifh, dfh)
1058 finally:
1056 finally:
1059 if dfh:
1057 if dfh:
1060 dfh.close()
1058 dfh.close()
1061 ifh.close()
1059 ifh.close()
1062
1060
1063 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1061 def _addrevision(self, text, transaction, link, p1, p2, d, ifh, dfh):
1064 node = hash(text, p1, p2)
1062 node = hash(text, p1, p2)
1065 if node in self.nodemap:
1063 if node in self.nodemap:
1066 return node
1064 return node
1067
1065
1068 curr = len(self)
1066 curr = len(self)
1069 prev = curr - 1
1067 prev = curr - 1
1070 base = self.base(prev)
1068 base = self.base(prev)
1071 offset = self.end(prev)
1069 offset = self.end(prev)
1072
1070
1073 if curr:
1071 if curr:
1074 if not d:
1072 if not d:
1075 ptext = self.revision(self.node(prev))
1073 ptext = self.revision(self.node(prev))
1076 d = mdiff.textdiff(ptext, text)
1074 d = mdiff.textdiff(ptext, text)
1077 data = compress(d)
1075 data = compress(d)
1078 l = len(data[1]) + len(data[0])
1076 l = len(data[1]) + len(data[0])
1079 dist = l + offset - self.start(base)
1077 dist = l + offset - self.start(base)
1080
1078
1081 # full versions are inserted when the needed deltas
1079 # full versions are inserted when the needed deltas
1082 # become comparable to the uncompressed text
1080 # become comparable to the uncompressed text
1083 if not curr or dist > len(text) * 2:
1081 if not curr or dist > len(text) * 2:
1084 data = compress(text)
1082 data = compress(text)
1085 l = len(data[1]) + len(data[0])
1083 l = len(data[1]) + len(data[0])
1086 base = curr
1084 base = curr
1087
1085
1088 e = (offset_type(offset, 0), l, len(text),
1086 e = (offset_type(offset, 0), l, len(text),
1089 base, link, self.rev(p1), self.rev(p2), node)
1087 base, link, self.rev(p1), self.rev(p2), node)
1090 self.index.insert(-1, e)
1088 self.index.insert(-1, e)
1091 self.nodemap[node] = curr
1089 self.nodemap[node] = curr
1092
1090
1093 entry = self._io.packentry(e, self.node, self.version, curr)
1091 entry = self._io.packentry(e, self.node, self.version, curr)
1094 if not self._inline:
1092 if not self._inline:
1095 transaction.add(self.datafile, offset)
1093 transaction.add(self.datafile, offset)
1096 transaction.add(self.indexfile, curr * len(entry))
1094 transaction.add(self.indexfile, curr * len(entry))
1097 if data[0]:
1095 if data[0]:
1098 dfh.write(data[0])
1096 dfh.write(data[0])
1099 dfh.write(data[1])
1097 dfh.write(data[1])
1100 dfh.flush()
1098 dfh.flush()
1101 ifh.write(entry)
1099 ifh.write(entry)
1102 else:
1100 else:
1103 offset += curr * self._io.size
1101 offset += curr * self._io.size
1104 transaction.add(self.indexfile, offset, curr)
1102 transaction.add(self.indexfile, offset, curr)
1105 ifh.write(entry)
1103 ifh.write(entry)
1106 ifh.write(data[0])
1104 ifh.write(data[0])
1107 ifh.write(data[1])
1105 ifh.write(data[1])
1108 self.checkinlinesize(transaction, ifh)
1106 self.checkinlinesize(transaction, ifh)
1109
1107
1110 self._cache = (node, curr, text)
1108 self._cache = (node, curr, text)
1111 return node
1109 return node
1112
1110
1113 def ancestor(self, a, b):
1111 def ancestor(self, a, b):
1114 """calculate the least common ancestor of nodes a and b"""
1112 """calculate the least common ancestor of nodes a and b"""
1115
1113
1116 def parents(rev):
1114 def parents(rev):
1117 return [p for p in self.parentrevs(rev) if p != nullrev]
1115 return [p for p in self.parentrevs(rev) if p != nullrev]
1118
1116
1119 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1117 c = ancestor.ancestor(self.rev(a), self.rev(b), parents)
1120 if c is None:
1118 if c is None:
1121 return nullid
1119 return nullid
1122
1120
1123 return self.node(c)
1121 return self.node(c)
1124
1122
1125 def group(self, nodelist, lookup, infocollect=None):
1123 def group(self, nodelist, lookup, infocollect=None):
1126 """calculate a delta group
1124 """calculate a delta group
1127
1125
1128 Given a list of changeset revs, return a set of deltas and
1126 Given a list of changeset revs, return a set of deltas and
1129 metadata corresponding to nodes. the first delta is
1127 metadata corresponding to nodes. the first delta is
1130 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1128 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
1131 have this parent as it has all history before these
1129 have this parent as it has all history before these
1132 changesets. parent is parent[0]
1130 changesets. parent is parent[0]
1133 """
1131 """
1134 revs = [self.rev(n) for n in nodelist]
1132 revs = [self.rev(n) for n in nodelist]
1135
1133
1136 # if we don't have any revisions touched by these changesets, bail
1134 # if we don't have any revisions touched by these changesets, bail
1137 if not revs:
1135 if not revs:
1138 yield changegroup.closechunk()
1136 yield changegroup.closechunk()
1139 return
1137 return
1140
1138
1141 # add the parent of the first rev
1139 # add the parent of the first rev
1142 p = self.parents(self.node(revs[0]))[0]
1140 p = self.parents(self.node(revs[0]))[0]
1143 revs.insert(0, self.rev(p))
1141 revs.insert(0, self.rev(p))
1144
1142
1145 # build deltas
1143 # build deltas
1146 for d in xrange(0, len(revs) - 1):
1144 for d in xrange(0, len(revs) - 1):
1147 a, b = revs[d], revs[d + 1]
1145 a, b = revs[d], revs[d + 1]
1148 nb = self.node(b)
1146 nb = self.node(b)
1149
1147
1150 if infocollect is not None:
1148 if infocollect is not None:
1151 infocollect(nb)
1149 infocollect(nb)
1152
1150
1153 p = self.parents(nb)
1151 p = self.parents(nb)
1154 meta = nb + p[0] + p[1] + lookup(nb)
1152 meta = nb + p[0] + p[1] + lookup(nb)
1155 if a == -1:
1153 if a == -1:
1156 d = self.revision(nb)
1154 d = self.revision(nb)
1157 meta += mdiff.trivialdiffheader(len(d))
1155 meta += mdiff.trivialdiffheader(len(d))
1158 else:
1156 else:
1159 d = self.revdiff(a, b)
1157 d = self.revdiff(a, b)
1160 yield changegroup.chunkheader(len(meta) + len(d))
1158 yield changegroup.chunkheader(len(meta) + len(d))
1161 yield meta
1159 yield meta
1162 if len(d) > 2**20:
1160 if len(d) > 2**20:
1163 pos = 0
1161 pos = 0
1164 while pos < len(d):
1162 while pos < len(d):
1165 pos2 = pos + 2 ** 18
1163 pos2 = pos + 2 ** 18
1166 yield d[pos:pos2]
1164 yield d[pos:pos2]
1167 pos = pos2
1165 pos = pos2
1168 else:
1166 else:
1169 yield d
1167 yield d
1170
1168
1171 yield changegroup.closechunk()
1169 yield changegroup.closechunk()
1172
1170
1173 def addgroup(self, revs, linkmapper, transaction):
1171 def addgroup(self, revs, linkmapper, transaction):
1174 """
1172 """
1175 add a delta group
1173 add a delta group
1176
1174
1177 given a set of deltas, add them to the revision log. the
1175 given a set of deltas, add them to the revision log. the
1178 first delta is against its parent, which should be in our
1176 first delta is against its parent, which should be in our
1179 log, the rest are against the previous delta.
1177 log, the rest are against the previous delta.
1180 """
1178 """
1181
1179
1182 #track the base of the current delta log
1180 #track the base of the current delta log
1183 r = len(self)
1181 r = len(self)
1184 t = r - 1
1182 t = r - 1
1185 node = None
1183 node = None
1186
1184
1187 base = prev = nullrev
1185 base = prev = nullrev
1188 start = end = textlen = 0
1186 start = end = textlen = 0
1189 if r:
1187 if r:
1190 end = self.end(t)
1188 end = self.end(t)
1191
1189
1192 ifh = self.opener(self.indexfile, "a+")
1190 ifh = self.opener(self.indexfile, "a+")
1193 isize = r * self._io.size
1191 isize = r * self._io.size
1194 if self._inline:
1192 if self._inline:
1195 transaction.add(self.indexfile, end + isize, r)
1193 transaction.add(self.indexfile, end + isize, r)
1196 dfh = None
1194 dfh = None
1197 else:
1195 else:
1198 transaction.add(self.indexfile, isize, r)
1196 transaction.add(self.indexfile, isize, r)
1199 transaction.add(self.datafile, end)
1197 transaction.add(self.datafile, end)
1200 dfh = self.opener(self.datafile, "a")
1198 dfh = self.opener(self.datafile, "a")
1201
1199
1202 try:
1200 try:
1203 # loop through our set of deltas
1201 # loop through our set of deltas
1204 chain = None
1202 chain = None
1205 for chunk in revs:
1203 for chunk in revs:
1206 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1204 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
1207 link = linkmapper(cs)
1205 link = linkmapper(cs)
1208 if node in self.nodemap:
1206 if node in self.nodemap:
1209 # this can happen if two branches make the same change
1207 # this can happen if two branches make the same change
1210 chain = node
1208 chain = node
1211 continue
1209 continue
1212 delta = buffer(chunk, 80)
1210 delta = buffer(chunk, 80)
1213 del chunk
1211 del chunk
1214
1212
1215 for p in (p1, p2):
1213 for p in (p1, p2):
1216 if not p in self.nodemap:
1214 if not p in self.nodemap:
1217 raise LookupError(p, self.indexfile, _('unknown parent'))
1215 raise LookupError(p, self.indexfile, _('unknown parent'))
1218
1216
1219 if not chain:
1217 if not chain:
1220 # retrieve the parent revision of the delta chain
1218 # retrieve the parent revision of the delta chain
1221 chain = p1
1219 chain = p1
1222 if not chain in self.nodemap:
1220 if not chain in self.nodemap:
1223 raise LookupError(chain, self.indexfile, _('unknown base'))
1221 raise LookupError(chain, self.indexfile, _('unknown base'))
1224
1222
1225 # full versions are inserted when the needed deltas become
1223 # full versions are inserted when the needed deltas become
1226 # comparable to the uncompressed text or when the previous
1224 # comparable to the uncompressed text or when the previous
1227 # version is not the one we have a delta against. We use
1225 # version is not the one we have a delta against. We use
1228 # the size of the previous full rev as a proxy for the
1226 # the size of the previous full rev as a proxy for the
1229 # current size.
1227 # current size.
1230
1228
1231 if chain == prev:
1229 if chain == prev:
1232 cdelta = compress(delta)
1230 cdelta = compress(delta)
1233 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1231 cdeltalen = len(cdelta[0]) + len(cdelta[1])
1234 textlen = mdiff.patchedsize(textlen, delta)
1232 textlen = mdiff.patchedsize(textlen, delta)
1235
1233
1236 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1234 if chain != prev or (end - start + cdeltalen) > textlen * 2:
1237 # flush our writes here so we can read it in revision
1235 # flush our writes here so we can read it in revision
1238 if dfh:
1236 if dfh:
1239 dfh.flush()
1237 dfh.flush()
1240 ifh.flush()
1238 ifh.flush()
1241 text = self.revision(chain)
1239 text = self.revision(chain)
1242 if len(text) == 0:
1240 if len(text) == 0:
1243 # skip over trivial delta header
1241 # skip over trivial delta header
1244 text = buffer(delta, 12)
1242 text = buffer(delta, 12)
1245 else:
1243 else:
1246 text = mdiff.patches(text, [delta])
1244 text = mdiff.patches(text, [delta])
1247 del delta
1245 del delta
1248 chk = self._addrevision(text, transaction, link, p1, p2, None,
1246 chk = self._addrevision(text, transaction, link, p1, p2, None,
1249 ifh, dfh)
1247 ifh, dfh)
1250 if not dfh and not self._inline:
1248 if not dfh and not self._inline:
1251 # addrevision switched from inline to conventional
1249 # addrevision switched from inline to conventional
1252 # reopen the index
1250 # reopen the index
1253 dfh = self.opener(self.datafile, "a")
1251 dfh = self.opener(self.datafile, "a")
1254 ifh = self.opener(self.indexfile, "a")
1252 ifh = self.opener(self.indexfile, "a")
1255 if chk != node:
1253 if chk != node:
1256 raise RevlogError(_("consistency error adding group"))
1254 raise RevlogError(_("consistency error adding group"))
1257 textlen = len(text)
1255 textlen = len(text)
1258 else:
1256 else:
1259 e = (offset_type(end, 0), cdeltalen, textlen, base,
1257 e = (offset_type(end, 0), cdeltalen, textlen, base,
1260 link, self.rev(p1), self.rev(p2), node)
1258 link, self.rev(p1), self.rev(p2), node)
1261 self.index.insert(-1, e)
1259 self.index.insert(-1, e)
1262 self.nodemap[node] = r
1260 self.nodemap[node] = r
1263 entry = self._io.packentry(e, self.node, self.version, r)
1261 entry = self._io.packentry(e, self.node, self.version, r)
1264 if self._inline:
1262 if self._inline:
1265 ifh.write(entry)
1263 ifh.write(entry)
1266 ifh.write(cdelta[0])
1264 ifh.write(cdelta[0])
1267 ifh.write(cdelta[1])
1265 ifh.write(cdelta[1])
1268 self.checkinlinesize(transaction, ifh)
1266 self.checkinlinesize(transaction, ifh)
1269 if not self._inline:
1267 if not self._inline:
1270 dfh = self.opener(self.datafile, "a")
1268 dfh = self.opener(self.datafile, "a")
1271 ifh = self.opener(self.indexfile, "a")
1269 ifh = self.opener(self.indexfile, "a")
1272 else:
1270 else:
1273 dfh.write(cdelta[0])
1271 dfh.write(cdelta[0])
1274 dfh.write(cdelta[1])
1272 dfh.write(cdelta[1])
1275 ifh.write(entry)
1273 ifh.write(entry)
1276
1274
1277 t, r, chain, prev = r, r + 1, node, node
1275 t, r, chain, prev = r, r + 1, node, node
1278 base = self.base(t)
1276 base = self.base(t)
1279 start = self.start(base)
1277 start = self.start(base)
1280 end = self.end(t)
1278 end = self.end(t)
1281 finally:
1279 finally:
1282 if dfh:
1280 if dfh:
1283 dfh.close()
1281 dfh.close()
1284 ifh.close()
1282 ifh.close()
1285
1283
1286 return node
1284 return node
1287
1285
1288 def strip(self, minlink, transaction):
1286 def strip(self, minlink, transaction):
1289 """truncate the revlog on the first revision with a linkrev >= minlink
1287 """truncate the revlog on the first revision with a linkrev >= minlink
1290
1288
1291 This function is called when we're stripping revision minlink and
1289 This function is called when we're stripping revision minlink and
1292 its descendants from the repository.
1290 its descendants from the repository.
1293
1291
1294 We have to remove all revisions with linkrev >= minlink, because
1292 We have to remove all revisions with linkrev >= minlink, because
1295 the equivalent changelog revisions will be renumbered after the
1293 the equivalent changelog revisions will be renumbered after the
1296 strip.
1294 strip.
1297
1295
1298 So we truncate the revlog on the first of these revisions, and
1296 So we truncate the revlog on the first of these revisions, and
1299 trust that the caller has saved the revisions that shouldn't be
1297 trust that the caller has saved the revisions that shouldn't be
1300 removed and that it'll readd them after this truncation.
1298 removed and that it'll readd them after this truncation.
1301 """
1299 """
1302 if len(self) == 0:
1300 if len(self) == 0:
1303 return
1301 return
1304
1302
1305 if isinstance(self.index, lazyindex):
1303 if isinstance(self.index, lazyindex):
1306 self._loadindexmap()
1304 self._loadindexmap()
1307
1305
1308 for rev in self:
1306 for rev in self:
1309 if self.index[rev][4] >= minlink:
1307 if self.index[rev][4] >= minlink:
1310 break
1308 break
1311 else:
1309 else:
1312 return
1310 return
1313
1311
1314 # first truncate the files on disk
1312 # first truncate the files on disk
1315 end = self.start(rev)
1313 end = self.start(rev)
1316 if not self._inline:
1314 if not self._inline:
1317 transaction.add(self.datafile, end)
1315 transaction.add(self.datafile, end)
1318 end = rev * self._io.size
1316 end = rev * self._io.size
1319 else:
1317 else:
1320 end += rev * self._io.size
1318 end += rev * self._io.size
1321
1319
1322 transaction.add(self.indexfile, end)
1320 transaction.add(self.indexfile, end)
1323
1321
1324 # then reset internal state in memory to forget those revisions
1322 # then reset internal state in memory to forget those revisions
1325 self._cache = None
1323 self._cache = None
1326 self._chunkcache = None
1324 self._chunkcache = None
1327 for x in xrange(rev, len(self)):
1325 for x in xrange(rev, len(self)):
1328 del self.nodemap[self.node(x)]
1326 del self.nodemap[self.node(x)]
1329
1327
1330 del self.index[rev:-1]
1328 del self.index[rev:-1]
1331
1329
1332 def checksize(self):
1330 def checksize(self):
1333 expected = 0
1331 expected = 0
1334 if len(self):
1332 if len(self):
1335 expected = max(0, self.end(len(self) - 1))
1333 expected = max(0, self.end(len(self) - 1))
1336
1334
1337 try:
1335 try:
1338 f = self.opener(self.datafile)
1336 f = self.opener(self.datafile)
1339 f.seek(0, 2)
1337 f.seek(0, 2)
1340 actual = f.tell()
1338 actual = f.tell()
1341 dd = actual - expected
1339 dd = actual - expected
1342 except IOError, inst:
1340 except IOError, inst:
1343 if inst.errno != errno.ENOENT:
1341 if inst.errno != errno.ENOENT:
1344 raise
1342 raise
1345 dd = 0
1343 dd = 0
1346
1344
1347 try:
1345 try:
1348 f = self.opener(self.indexfile)
1346 f = self.opener(self.indexfile)
1349 f.seek(0, 2)
1347 f.seek(0, 2)
1350 actual = f.tell()
1348 actual = f.tell()
1351 s = self._io.size
1349 s = self._io.size
1352 i = max(0, actual / s)
1350 i = max(0, actual / s)
1353 di = actual - (i * s)
1351 di = actual - (i * s)
1354 if self._inline:
1352 if self._inline:
1355 databytes = 0
1353 databytes = 0
1356 for r in self:
1354 for r in self:
1357 databytes += max(0, self.length(r))
1355 databytes += max(0, self.length(r))
1358 dd = 0
1356 dd = 0
1359 di = actual - len(self) * s - databytes
1357 di = actual - len(self) * s - databytes
1360 except IOError, inst:
1358 except IOError, inst:
1361 if inst.errno != errno.ENOENT:
1359 if inst.errno != errno.ENOENT:
1362 raise
1360 raise
1363 di = 0
1361 di = 0
1364
1362
1365 return (dd, di)
1363 return (dd, di)
1366
1364
1367 def files(self):
1365 def files(self):
1368 res = [ self.indexfile ]
1366 res = [ self.indexfile ]
1369 if not self._inline:
1367 if not self._inline:
1370 res.append(self.datafile)
1368 res.append(self.datafile)
1371 return res
1369 return res
General Comments 0
You need to be logged in to leave comments. Login now