##// END OF EJS Templates
atomictempfile: make close() consistent with other file-like objects....
Greg Ward -
r15057:774da712 default
parent child Browse files
Show More
@@ -1,3292 +1,3292 b''
1 # mq.py - patch queues for mercurial
1 # mq.py - patch queues for mercurial
2 #
2 #
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
3 # Copyright 2005, 2006 Chris Mason <mason@suse.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 '''manage a stack of patches
8 '''manage a stack of patches
9
9
10 This extension lets you work with a stack of patches in a Mercurial
10 This extension lets you work with a stack of patches in a Mercurial
11 repository. It manages two stacks of patches - all known patches, and
11 repository. It manages two stacks of patches - all known patches, and
12 applied patches (subset of known patches).
12 applied patches (subset of known patches).
13
13
14 Known patches are represented as patch files in the .hg/patches
14 Known patches are represented as patch files in the .hg/patches
15 directory. Applied patches are both patch files and changesets.
15 directory. Applied patches are both patch files and changesets.
16
16
17 Common tasks (use :hg:`help command` for more details)::
17 Common tasks (use :hg:`help command` for more details)::
18
18
19 create new patch qnew
19 create new patch qnew
20 import existing patch qimport
20 import existing patch qimport
21
21
22 print patch series qseries
22 print patch series qseries
23 print applied patches qapplied
23 print applied patches qapplied
24
24
25 add known patch to applied stack qpush
25 add known patch to applied stack qpush
26 remove patch from applied stack qpop
26 remove patch from applied stack qpop
27 refresh contents of top applied patch qrefresh
27 refresh contents of top applied patch qrefresh
28
28
29 By default, mq will automatically use git patches when required to
29 By default, mq will automatically use git patches when required to
30 avoid losing file mode changes, copy records, binary files or empty
30 avoid losing file mode changes, copy records, binary files or empty
31 files creations or deletions. This behaviour can be configured with::
31 files creations or deletions. This behaviour can be configured with::
32
32
33 [mq]
33 [mq]
34 git = auto/keep/yes/no
34 git = auto/keep/yes/no
35
35
36 If set to 'keep', mq will obey the [diff] section configuration while
36 If set to 'keep', mq will obey the [diff] section configuration while
37 preserving existing git patches upon qrefresh. If set to 'yes' or
37 preserving existing git patches upon qrefresh. If set to 'yes' or
38 'no', mq will override the [diff] section and always generate git or
38 'no', mq will override the [diff] section and always generate git or
39 regular patches, possibly losing data in the second case.
39 regular patches, possibly losing data in the second case.
40
40
41 You will by default be managing a patch queue named "patches". You can
41 You will by default be managing a patch queue named "patches". You can
42 create other, independent patch queues with the :hg:`qqueue` command.
42 create other, independent patch queues with the :hg:`qqueue` command.
43 '''
43 '''
44
44
45 from mercurial.i18n import _
45 from mercurial.i18n import _
46 from mercurial.node import bin, hex, short, nullid, nullrev
46 from mercurial.node import bin, hex, short, nullid, nullrev
47 from mercurial.lock import release
47 from mercurial.lock import release
48 from mercurial import commands, cmdutil, hg, scmutil, util, revset
48 from mercurial import commands, cmdutil, hg, scmutil, util, revset
49 from mercurial import repair, extensions, url, error
49 from mercurial import repair, extensions, url, error
50 from mercurial import patch as patchmod
50 from mercurial import patch as patchmod
51 import os, re, errno, shutil
51 import os, re, errno, shutil
52
52
53 commands.norepo += " qclone"
53 commands.norepo += " qclone"
54
54
55 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
55 seriesopts = [('s', 'summary', None, _('print first line of patch header'))]
56
56
57 cmdtable = {}
57 cmdtable = {}
58 command = cmdutil.command(cmdtable)
58 command = cmdutil.command(cmdtable)
59
59
60 # Patch names looks like unix-file names.
60 # Patch names looks like unix-file names.
61 # They must be joinable with queue directory and result in the patch path.
61 # They must be joinable with queue directory and result in the patch path.
62 normname = util.normpath
62 normname = util.normpath
63
63
64 class statusentry(object):
64 class statusentry(object):
65 def __init__(self, node, name):
65 def __init__(self, node, name):
66 self.node, self.name = node, name
66 self.node, self.name = node, name
67 def __repr__(self):
67 def __repr__(self):
68 return hex(self.node) + ':' + self.name
68 return hex(self.node) + ':' + self.name
69
69
70 class patchheader(object):
70 class patchheader(object):
71 def __init__(self, pf, plainmode=False):
71 def __init__(self, pf, plainmode=False):
72 def eatdiff(lines):
72 def eatdiff(lines):
73 while lines:
73 while lines:
74 l = lines[-1]
74 l = lines[-1]
75 if (l.startswith("diff -") or
75 if (l.startswith("diff -") or
76 l.startswith("Index:") or
76 l.startswith("Index:") or
77 l.startswith("===========")):
77 l.startswith("===========")):
78 del lines[-1]
78 del lines[-1]
79 else:
79 else:
80 break
80 break
81 def eatempty(lines):
81 def eatempty(lines):
82 while lines:
82 while lines:
83 if not lines[-1].strip():
83 if not lines[-1].strip():
84 del lines[-1]
84 del lines[-1]
85 else:
85 else:
86 break
86 break
87
87
88 message = []
88 message = []
89 comments = []
89 comments = []
90 user = None
90 user = None
91 date = None
91 date = None
92 parent = None
92 parent = None
93 format = None
93 format = None
94 subject = None
94 subject = None
95 branch = None
95 branch = None
96 nodeid = None
96 nodeid = None
97 diffstart = 0
97 diffstart = 0
98
98
99 for line in file(pf):
99 for line in file(pf):
100 line = line.rstrip()
100 line = line.rstrip()
101 if (line.startswith('diff --git')
101 if (line.startswith('diff --git')
102 or (diffstart and line.startswith('+++ '))):
102 or (diffstart and line.startswith('+++ '))):
103 diffstart = 2
103 diffstart = 2
104 break
104 break
105 diffstart = 0 # reset
105 diffstart = 0 # reset
106 if line.startswith("--- "):
106 if line.startswith("--- "):
107 diffstart = 1
107 diffstart = 1
108 continue
108 continue
109 elif format == "hgpatch":
109 elif format == "hgpatch":
110 # parse values when importing the result of an hg export
110 # parse values when importing the result of an hg export
111 if line.startswith("# User "):
111 if line.startswith("# User "):
112 user = line[7:]
112 user = line[7:]
113 elif line.startswith("# Date "):
113 elif line.startswith("# Date "):
114 date = line[7:]
114 date = line[7:]
115 elif line.startswith("# Parent "):
115 elif line.startswith("# Parent "):
116 parent = line[9:].lstrip()
116 parent = line[9:].lstrip()
117 elif line.startswith("# Branch "):
117 elif line.startswith("# Branch "):
118 branch = line[9:]
118 branch = line[9:]
119 elif line.startswith("# Node ID "):
119 elif line.startswith("# Node ID "):
120 nodeid = line[10:]
120 nodeid = line[10:]
121 elif not line.startswith("# ") and line:
121 elif not line.startswith("# ") and line:
122 message.append(line)
122 message.append(line)
123 format = None
123 format = None
124 elif line == '# HG changeset patch':
124 elif line == '# HG changeset patch':
125 message = []
125 message = []
126 format = "hgpatch"
126 format = "hgpatch"
127 elif (format != "tagdone" and (line.startswith("Subject: ") or
127 elif (format != "tagdone" and (line.startswith("Subject: ") or
128 line.startswith("subject: "))):
128 line.startswith("subject: "))):
129 subject = line[9:]
129 subject = line[9:]
130 format = "tag"
130 format = "tag"
131 elif (format != "tagdone" and (line.startswith("From: ") or
131 elif (format != "tagdone" and (line.startswith("From: ") or
132 line.startswith("from: "))):
132 line.startswith("from: "))):
133 user = line[6:]
133 user = line[6:]
134 format = "tag"
134 format = "tag"
135 elif (format != "tagdone" and (line.startswith("Date: ") or
135 elif (format != "tagdone" and (line.startswith("Date: ") or
136 line.startswith("date: "))):
136 line.startswith("date: "))):
137 date = line[6:]
137 date = line[6:]
138 format = "tag"
138 format = "tag"
139 elif format == "tag" and line == "":
139 elif format == "tag" and line == "":
140 # when looking for tags (subject: from: etc) they
140 # when looking for tags (subject: from: etc) they
141 # end once you find a blank line in the source
141 # end once you find a blank line in the source
142 format = "tagdone"
142 format = "tagdone"
143 elif message or line:
143 elif message or line:
144 message.append(line)
144 message.append(line)
145 comments.append(line)
145 comments.append(line)
146
146
147 eatdiff(message)
147 eatdiff(message)
148 eatdiff(comments)
148 eatdiff(comments)
149 # Remember the exact starting line of the patch diffs before consuming
149 # Remember the exact starting line of the patch diffs before consuming
150 # empty lines, for external use by TortoiseHg and others
150 # empty lines, for external use by TortoiseHg and others
151 self.diffstartline = len(comments)
151 self.diffstartline = len(comments)
152 eatempty(message)
152 eatempty(message)
153 eatempty(comments)
153 eatempty(comments)
154
154
155 # make sure message isn't empty
155 # make sure message isn't empty
156 if format and format.startswith("tag") and subject:
156 if format and format.startswith("tag") and subject:
157 message.insert(0, "")
157 message.insert(0, "")
158 message.insert(0, subject)
158 message.insert(0, subject)
159
159
160 self.message = message
160 self.message = message
161 self.comments = comments
161 self.comments = comments
162 self.user = user
162 self.user = user
163 self.date = date
163 self.date = date
164 self.parent = parent
164 self.parent = parent
165 # nodeid and branch are for external use by TortoiseHg and others
165 # nodeid and branch are for external use by TortoiseHg and others
166 self.nodeid = nodeid
166 self.nodeid = nodeid
167 self.branch = branch
167 self.branch = branch
168 self.haspatch = diffstart > 1
168 self.haspatch = diffstart > 1
169 self.plainmode = plainmode
169 self.plainmode = plainmode
170
170
171 def setuser(self, user):
171 def setuser(self, user):
172 if not self.updateheader(['From: ', '# User '], user):
172 if not self.updateheader(['From: ', '# User '], user):
173 try:
173 try:
174 patchheaderat = self.comments.index('# HG changeset patch')
174 patchheaderat = self.comments.index('# HG changeset patch')
175 self.comments.insert(patchheaderat + 1, '# User ' + user)
175 self.comments.insert(patchheaderat + 1, '# User ' + user)
176 except ValueError:
176 except ValueError:
177 if self.plainmode or self._hasheader(['Date: ']):
177 if self.plainmode or self._hasheader(['Date: ']):
178 self.comments = ['From: ' + user] + self.comments
178 self.comments = ['From: ' + user] + self.comments
179 else:
179 else:
180 tmp = ['# HG changeset patch', '# User ' + user, '']
180 tmp = ['# HG changeset patch', '# User ' + user, '']
181 self.comments = tmp + self.comments
181 self.comments = tmp + self.comments
182 self.user = user
182 self.user = user
183
183
184 def setdate(self, date):
184 def setdate(self, date):
185 if not self.updateheader(['Date: ', '# Date '], date):
185 if not self.updateheader(['Date: ', '# Date '], date):
186 try:
186 try:
187 patchheaderat = self.comments.index('# HG changeset patch')
187 patchheaderat = self.comments.index('# HG changeset patch')
188 self.comments.insert(patchheaderat + 1, '# Date ' + date)
188 self.comments.insert(patchheaderat + 1, '# Date ' + date)
189 except ValueError:
189 except ValueError:
190 if self.plainmode or self._hasheader(['From: ']):
190 if self.plainmode or self._hasheader(['From: ']):
191 self.comments = ['Date: ' + date] + self.comments
191 self.comments = ['Date: ' + date] + self.comments
192 else:
192 else:
193 tmp = ['# HG changeset patch', '# Date ' + date, '']
193 tmp = ['# HG changeset patch', '# Date ' + date, '']
194 self.comments = tmp + self.comments
194 self.comments = tmp + self.comments
195 self.date = date
195 self.date = date
196
196
197 def setparent(self, parent):
197 def setparent(self, parent):
198 if not self.updateheader(['# Parent '], parent):
198 if not self.updateheader(['# Parent '], parent):
199 try:
199 try:
200 patchheaderat = self.comments.index('# HG changeset patch')
200 patchheaderat = self.comments.index('# HG changeset patch')
201 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
201 self.comments.insert(patchheaderat + 1, '# Parent ' + parent)
202 except ValueError:
202 except ValueError:
203 pass
203 pass
204 self.parent = parent
204 self.parent = parent
205
205
206 def setmessage(self, message):
206 def setmessage(self, message):
207 if self.comments:
207 if self.comments:
208 self._delmsg()
208 self._delmsg()
209 self.message = [message]
209 self.message = [message]
210 self.comments += self.message
210 self.comments += self.message
211
211
212 def updateheader(self, prefixes, new):
212 def updateheader(self, prefixes, new):
213 '''Update all references to a field in the patch header.
213 '''Update all references to a field in the patch header.
214 Return whether the field is present.'''
214 Return whether the field is present.'''
215 res = False
215 res = False
216 for prefix in prefixes:
216 for prefix in prefixes:
217 for i in xrange(len(self.comments)):
217 for i in xrange(len(self.comments)):
218 if self.comments[i].startswith(prefix):
218 if self.comments[i].startswith(prefix):
219 self.comments[i] = prefix + new
219 self.comments[i] = prefix + new
220 res = True
220 res = True
221 break
221 break
222 return res
222 return res
223
223
224 def _hasheader(self, prefixes):
224 def _hasheader(self, prefixes):
225 '''Check if a header starts with any of the given prefixes.'''
225 '''Check if a header starts with any of the given prefixes.'''
226 for prefix in prefixes:
226 for prefix in prefixes:
227 for comment in self.comments:
227 for comment in self.comments:
228 if comment.startswith(prefix):
228 if comment.startswith(prefix):
229 return True
229 return True
230 return False
230 return False
231
231
232 def __str__(self):
232 def __str__(self):
233 if not self.comments:
233 if not self.comments:
234 return ''
234 return ''
235 return '\n'.join(self.comments) + '\n\n'
235 return '\n'.join(self.comments) + '\n\n'
236
236
237 def _delmsg(self):
237 def _delmsg(self):
238 '''Remove existing message, keeping the rest of the comments fields.
238 '''Remove existing message, keeping the rest of the comments fields.
239 If comments contains 'subject: ', message will prepend
239 If comments contains 'subject: ', message will prepend
240 the field and a blank line.'''
240 the field and a blank line.'''
241 if self.message:
241 if self.message:
242 subj = 'subject: ' + self.message[0].lower()
242 subj = 'subject: ' + self.message[0].lower()
243 for i in xrange(len(self.comments)):
243 for i in xrange(len(self.comments)):
244 if subj == self.comments[i].lower():
244 if subj == self.comments[i].lower():
245 del self.comments[i]
245 del self.comments[i]
246 self.message = self.message[2:]
246 self.message = self.message[2:]
247 break
247 break
248 ci = 0
248 ci = 0
249 for mi in self.message:
249 for mi in self.message:
250 while mi != self.comments[ci]:
250 while mi != self.comments[ci]:
251 ci += 1
251 ci += 1
252 del self.comments[ci]
252 del self.comments[ci]
253
253
254 class queue(object):
254 class queue(object):
255 def __init__(self, ui, path, patchdir=None):
255 def __init__(self, ui, path, patchdir=None):
256 self.basepath = path
256 self.basepath = path
257 try:
257 try:
258 fh = open(os.path.join(path, 'patches.queue'))
258 fh = open(os.path.join(path, 'patches.queue'))
259 cur = fh.read().rstrip()
259 cur = fh.read().rstrip()
260 fh.close()
260 fh.close()
261 if not cur:
261 if not cur:
262 curpath = os.path.join(path, 'patches')
262 curpath = os.path.join(path, 'patches')
263 else:
263 else:
264 curpath = os.path.join(path, 'patches-' + cur)
264 curpath = os.path.join(path, 'patches-' + cur)
265 except IOError:
265 except IOError:
266 curpath = os.path.join(path, 'patches')
266 curpath = os.path.join(path, 'patches')
267 self.path = patchdir or curpath
267 self.path = patchdir or curpath
268 self.opener = scmutil.opener(self.path)
268 self.opener = scmutil.opener(self.path)
269 self.ui = ui
269 self.ui = ui
270 self.applieddirty = 0
270 self.applieddirty = 0
271 self.seriesdirty = 0
271 self.seriesdirty = 0
272 self.added = []
272 self.added = []
273 self.seriespath = "series"
273 self.seriespath = "series"
274 self.statuspath = "status"
274 self.statuspath = "status"
275 self.guardspath = "guards"
275 self.guardspath = "guards"
276 self.activeguards = None
276 self.activeguards = None
277 self.guardsdirty = False
277 self.guardsdirty = False
278 # Handle mq.git as a bool with extended values
278 # Handle mq.git as a bool with extended values
279 try:
279 try:
280 gitmode = ui.configbool('mq', 'git', None)
280 gitmode = ui.configbool('mq', 'git', None)
281 if gitmode is None:
281 if gitmode is None:
282 raise error.ConfigError()
282 raise error.ConfigError()
283 self.gitmode = gitmode and 'yes' or 'no'
283 self.gitmode = gitmode and 'yes' or 'no'
284 except error.ConfigError:
284 except error.ConfigError:
285 self.gitmode = ui.config('mq', 'git', 'auto').lower()
285 self.gitmode = ui.config('mq', 'git', 'auto').lower()
286 self.plainmode = ui.configbool('mq', 'plain', False)
286 self.plainmode = ui.configbool('mq', 'plain', False)
287
287
288 @util.propertycache
288 @util.propertycache
289 def applied(self):
289 def applied(self):
290 if os.path.exists(self.join(self.statuspath)):
290 if os.path.exists(self.join(self.statuspath)):
291 def parselines(lines):
291 def parselines(lines):
292 for l in lines:
292 for l in lines:
293 entry = l.split(':', 1)
293 entry = l.split(':', 1)
294 if len(entry) > 1:
294 if len(entry) > 1:
295 n, name = entry
295 n, name = entry
296 yield statusentry(bin(n), name)
296 yield statusentry(bin(n), name)
297 elif l.strip():
297 elif l.strip():
298 self.ui.warn(_('malformated mq status line: %s\n') % entry)
298 self.ui.warn(_('malformated mq status line: %s\n') % entry)
299 # else we ignore empty lines
299 # else we ignore empty lines
300 lines = self.opener.read(self.statuspath).splitlines()
300 lines = self.opener.read(self.statuspath).splitlines()
301 return list(parselines(lines))
301 return list(parselines(lines))
302 return []
302 return []
303
303
304 @util.propertycache
304 @util.propertycache
305 def fullseries(self):
305 def fullseries(self):
306 if os.path.exists(self.join(self.seriespath)):
306 if os.path.exists(self.join(self.seriespath)):
307 return self.opener.read(self.seriespath).splitlines()
307 return self.opener.read(self.seriespath).splitlines()
308 return []
308 return []
309
309
310 @util.propertycache
310 @util.propertycache
311 def series(self):
311 def series(self):
312 self.parseseries()
312 self.parseseries()
313 return self.series
313 return self.series
314
314
315 @util.propertycache
315 @util.propertycache
316 def seriesguards(self):
316 def seriesguards(self):
317 self.parseseries()
317 self.parseseries()
318 return self.seriesguards
318 return self.seriesguards
319
319
320 def invalidate(self):
320 def invalidate(self):
321 for a in 'applied fullseries series seriesguards'.split():
321 for a in 'applied fullseries series seriesguards'.split():
322 if a in self.__dict__:
322 if a in self.__dict__:
323 delattr(self, a)
323 delattr(self, a)
324 self.applieddirty = 0
324 self.applieddirty = 0
325 self.seriesdirty = 0
325 self.seriesdirty = 0
326 self.guardsdirty = False
326 self.guardsdirty = False
327 self.activeguards = None
327 self.activeguards = None
328
328
329 def diffopts(self, opts={}, patchfn=None):
329 def diffopts(self, opts={}, patchfn=None):
330 diffopts = patchmod.diffopts(self.ui, opts)
330 diffopts = patchmod.diffopts(self.ui, opts)
331 if self.gitmode == 'auto':
331 if self.gitmode == 'auto':
332 diffopts.upgrade = True
332 diffopts.upgrade = True
333 elif self.gitmode == 'keep':
333 elif self.gitmode == 'keep':
334 pass
334 pass
335 elif self.gitmode in ('yes', 'no'):
335 elif self.gitmode in ('yes', 'no'):
336 diffopts.git = self.gitmode == 'yes'
336 diffopts.git = self.gitmode == 'yes'
337 else:
337 else:
338 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
338 raise util.Abort(_('mq.git option can be auto/keep/yes/no'
339 ' got %s') % self.gitmode)
339 ' got %s') % self.gitmode)
340 if patchfn:
340 if patchfn:
341 diffopts = self.patchopts(diffopts, patchfn)
341 diffopts = self.patchopts(diffopts, patchfn)
342 return diffopts
342 return diffopts
343
343
344 def patchopts(self, diffopts, *patches):
344 def patchopts(self, diffopts, *patches):
345 """Return a copy of input diff options with git set to true if
345 """Return a copy of input diff options with git set to true if
346 referenced patch is a git patch and should be preserved as such.
346 referenced patch is a git patch and should be preserved as such.
347 """
347 """
348 diffopts = diffopts.copy()
348 diffopts = diffopts.copy()
349 if not diffopts.git and self.gitmode == 'keep':
349 if not diffopts.git and self.gitmode == 'keep':
350 for patchfn in patches:
350 for patchfn in patches:
351 patchf = self.opener(patchfn, 'r')
351 patchf = self.opener(patchfn, 'r')
352 # if the patch was a git patch, refresh it as a git patch
352 # if the patch was a git patch, refresh it as a git patch
353 for line in patchf:
353 for line in patchf:
354 if line.startswith('diff --git'):
354 if line.startswith('diff --git'):
355 diffopts.git = True
355 diffopts.git = True
356 break
356 break
357 patchf.close()
357 patchf.close()
358 return diffopts
358 return diffopts
359
359
360 def join(self, *p):
360 def join(self, *p):
361 return os.path.join(self.path, *p)
361 return os.path.join(self.path, *p)
362
362
363 def findseries(self, patch):
363 def findseries(self, patch):
364 def matchpatch(l):
364 def matchpatch(l):
365 l = l.split('#', 1)[0]
365 l = l.split('#', 1)[0]
366 return l.strip() == patch
366 return l.strip() == patch
367 for index, l in enumerate(self.fullseries):
367 for index, l in enumerate(self.fullseries):
368 if matchpatch(l):
368 if matchpatch(l):
369 return index
369 return index
370 return None
370 return None
371
371
372 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
372 guard_re = re.compile(r'\s?#([-+][^-+# \t\r\n\f][^# \t\r\n\f]*)')
373
373
374 def parseseries(self):
374 def parseseries(self):
375 self.series = []
375 self.series = []
376 self.seriesguards = []
376 self.seriesguards = []
377 for l in self.fullseries:
377 for l in self.fullseries:
378 h = l.find('#')
378 h = l.find('#')
379 if h == -1:
379 if h == -1:
380 patch = l
380 patch = l
381 comment = ''
381 comment = ''
382 elif h == 0:
382 elif h == 0:
383 continue
383 continue
384 else:
384 else:
385 patch = l[:h]
385 patch = l[:h]
386 comment = l[h:]
386 comment = l[h:]
387 patch = patch.strip()
387 patch = patch.strip()
388 if patch:
388 if patch:
389 if patch in self.series:
389 if patch in self.series:
390 raise util.Abort(_('%s appears more than once in %s') %
390 raise util.Abort(_('%s appears more than once in %s') %
391 (patch, self.join(self.seriespath)))
391 (patch, self.join(self.seriespath)))
392 self.series.append(patch)
392 self.series.append(patch)
393 self.seriesguards.append(self.guard_re.findall(comment))
393 self.seriesguards.append(self.guard_re.findall(comment))
394
394
395 def checkguard(self, guard):
395 def checkguard(self, guard):
396 if not guard:
396 if not guard:
397 return _('guard cannot be an empty string')
397 return _('guard cannot be an empty string')
398 bad_chars = '# \t\r\n\f'
398 bad_chars = '# \t\r\n\f'
399 first = guard[0]
399 first = guard[0]
400 if first in '-+':
400 if first in '-+':
401 return (_('guard %r starts with invalid character: %r') %
401 return (_('guard %r starts with invalid character: %r') %
402 (guard, first))
402 (guard, first))
403 for c in bad_chars:
403 for c in bad_chars:
404 if c in guard:
404 if c in guard:
405 return _('invalid character in guard %r: %r') % (guard, c)
405 return _('invalid character in guard %r: %r') % (guard, c)
406
406
407 def setactive(self, guards):
407 def setactive(self, guards):
408 for guard in guards:
408 for guard in guards:
409 bad = self.checkguard(guard)
409 bad = self.checkguard(guard)
410 if bad:
410 if bad:
411 raise util.Abort(bad)
411 raise util.Abort(bad)
412 guards = sorted(set(guards))
412 guards = sorted(set(guards))
413 self.ui.debug('active guards: %s\n' % ' '.join(guards))
413 self.ui.debug('active guards: %s\n' % ' '.join(guards))
414 self.activeguards = guards
414 self.activeguards = guards
415 self.guardsdirty = True
415 self.guardsdirty = True
416
416
417 def active(self):
417 def active(self):
418 if self.activeguards is None:
418 if self.activeguards is None:
419 self.activeguards = []
419 self.activeguards = []
420 try:
420 try:
421 guards = self.opener.read(self.guardspath).split()
421 guards = self.opener.read(self.guardspath).split()
422 except IOError, err:
422 except IOError, err:
423 if err.errno != errno.ENOENT:
423 if err.errno != errno.ENOENT:
424 raise
424 raise
425 guards = []
425 guards = []
426 for i, guard in enumerate(guards):
426 for i, guard in enumerate(guards):
427 bad = self.checkguard(guard)
427 bad = self.checkguard(guard)
428 if bad:
428 if bad:
429 self.ui.warn('%s:%d: %s\n' %
429 self.ui.warn('%s:%d: %s\n' %
430 (self.join(self.guardspath), i + 1, bad))
430 (self.join(self.guardspath), i + 1, bad))
431 else:
431 else:
432 self.activeguards.append(guard)
432 self.activeguards.append(guard)
433 return self.activeguards
433 return self.activeguards
434
434
435 def setguards(self, idx, guards):
435 def setguards(self, idx, guards):
436 for g in guards:
436 for g in guards:
437 if len(g) < 2:
437 if len(g) < 2:
438 raise util.Abort(_('guard %r too short') % g)
438 raise util.Abort(_('guard %r too short') % g)
439 if g[0] not in '-+':
439 if g[0] not in '-+':
440 raise util.Abort(_('guard %r starts with invalid char') % g)
440 raise util.Abort(_('guard %r starts with invalid char') % g)
441 bad = self.checkguard(g[1:])
441 bad = self.checkguard(g[1:])
442 if bad:
442 if bad:
443 raise util.Abort(bad)
443 raise util.Abort(bad)
444 drop = self.guard_re.sub('', self.fullseries[idx])
444 drop = self.guard_re.sub('', self.fullseries[idx])
445 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
445 self.fullseries[idx] = drop + ''.join([' #' + g for g in guards])
446 self.parseseries()
446 self.parseseries()
447 self.seriesdirty = True
447 self.seriesdirty = True
448
448
449 def pushable(self, idx):
449 def pushable(self, idx):
450 if isinstance(idx, str):
450 if isinstance(idx, str):
451 idx = self.series.index(idx)
451 idx = self.series.index(idx)
452 patchguards = self.seriesguards[idx]
452 patchguards = self.seriesguards[idx]
453 if not patchguards:
453 if not patchguards:
454 return True, None
454 return True, None
455 guards = self.active()
455 guards = self.active()
456 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
456 exactneg = [g for g in patchguards if g[0] == '-' and g[1:] in guards]
457 if exactneg:
457 if exactneg:
458 return False, repr(exactneg[0])
458 return False, repr(exactneg[0])
459 pos = [g for g in patchguards if g[0] == '+']
459 pos = [g for g in patchguards if g[0] == '+']
460 exactpos = [g for g in pos if g[1:] in guards]
460 exactpos = [g for g in pos if g[1:] in guards]
461 if pos:
461 if pos:
462 if exactpos:
462 if exactpos:
463 return True, repr(exactpos[0])
463 return True, repr(exactpos[0])
464 return False, ' '.join(map(repr, pos))
464 return False, ' '.join(map(repr, pos))
465 return True, ''
465 return True, ''
466
466
467 def explainpushable(self, idx, all_patches=False):
467 def explainpushable(self, idx, all_patches=False):
468 write = all_patches and self.ui.write or self.ui.warn
468 write = all_patches and self.ui.write or self.ui.warn
469 if all_patches or self.ui.verbose:
469 if all_patches or self.ui.verbose:
470 if isinstance(idx, str):
470 if isinstance(idx, str):
471 idx = self.series.index(idx)
471 idx = self.series.index(idx)
472 pushable, why = self.pushable(idx)
472 pushable, why = self.pushable(idx)
473 if all_patches and pushable:
473 if all_patches and pushable:
474 if why is None:
474 if why is None:
475 write(_('allowing %s - no guards in effect\n') %
475 write(_('allowing %s - no guards in effect\n') %
476 self.series[idx])
476 self.series[idx])
477 else:
477 else:
478 if not why:
478 if not why:
479 write(_('allowing %s - no matching negative guards\n') %
479 write(_('allowing %s - no matching negative guards\n') %
480 self.series[idx])
480 self.series[idx])
481 else:
481 else:
482 write(_('allowing %s - guarded by %s\n') %
482 write(_('allowing %s - guarded by %s\n') %
483 (self.series[idx], why))
483 (self.series[idx], why))
484 if not pushable:
484 if not pushable:
485 if why:
485 if why:
486 write(_('skipping %s - guarded by %s\n') %
486 write(_('skipping %s - guarded by %s\n') %
487 (self.series[idx], why))
487 (self.series[idx], why))
488 else:
488 else:
489 write(_('skipping %s - no matching guards\n') %
489 write(_('skipping %s - no matching guards\n') %
490 self.series[idx])
490 self.series[idx])
491
491
492 def savedirty(self):
492 def savedirty(self):
493 def writelist(items, path):
493 def writelist(items, path):
494 fp = self.opener(path, 'w')
494 fp = self.opener(path, 'w')
495 for i in items:
495 for i in items:
496 fp.write("%s\n" % i)
496 fp.write("%s\n" % i)
497 fp.close()
497 fp.close()
498 if self.applieddirty:
498 if self.applieddirty:
499 writelist(map(str, self.applied), self.statuspath)
499 writelist(map(str, self.applied), self.statuspath)
500 if self.seriesdirty:
500 if self.seriesdirty:
501 writelist(self.fullseries, self.seriespath)
501 writelist(self.fullseries, self.seriespath)
502 if self.guardsdirty:
502 if self.guardsdirty:
503 writelist(self.activeguards, self.guardspath)
503 writelist(self.activeguards, self.guardspath)
504 if self.added:
504 if self.added:
505 qrepo = self.qrepo()
505 qrepo = self.qrepo()
506 if qrepo:
506 if qrepo:
507 qrepo[None].add(f for f in self.added if f not in qrepo[None])
507 qrepo[None].add(f for f in self.added if f not in qrepo[None])
508 self.added = []
508 self.added = []
509
509
510 def removeundo(self, repo):
510 def removeundo(self, repo):
511 undo = repo.sjoin('undo')
511 undo = repo.sjoin('undo')
512 if not os.path.exists(undo):
512 if not os.path.exists(undo):
513 return
513 return
514 try:
514 try:
515 os.unlink(undo)
515 os.unlink(undo)
516 except OSError, inst:
516 except OSError, inst:
517 self.ui.warn(_('error removing undo: %s\n') % str(inst))
517 self.ui.warn(_('error removing undo: %s\n') % str(inst))
518
518
519 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
519 def printdiff(self, repo, diffopts, node1, node2=None, files=None,
520 fp=None, changes=None, opts={}):
520 fp=None, changes=None, opts={}):
521 stat = opts.get('stat')
521 stat = opts.get('stat')
522 m = scmutil.match(repo[node1], files, opts)
522 m = scmutil.match(repo[node1], files, opts)
523 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
523 cmdutil.diffordiffstat(self.ui, repo, diffopts, node1, node2, m,
524 changes, stat, fp)
524 changes, stat, fp)
525
525
526 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
526 def mergeone(self, repo, mergeq, head, patch, rev, diffopts):
527 # first try just applying the patch
527 # first try just applying the patch
528 (err, n) = self.apply(repo, [patch], update_status=False,
528 (err, n) = self.apply(repo, [patch], update_status=False,
529 strict=True, merge=rev)
529 strict=True, merge=rev)
530
530
531 if err == 0:
531 if err == 0:
532 return (err, n)
532 return (err, n)
533
533
534 if n is None:
534 if n is None:
535 raise util.Abort(_("apply failed for patch %s") % patch)
535 raise util.Abort(_("apply failed for patch %s") % patch)
536
536
537 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
537 self.ui.warn(_("patch didn't work out, merging %s\n") % patch)
538
538
539 # apply failed, strip away that rev and merge.
539 # apply failed, strip away that rev and merge.
540 hg.clean(repo, head)
540 hg.clean(repo, head)
541 self.strip(repo, [n], update=False, backup='strip')
541 self.strip(repo, [n], update=False, backup='strip')
542
542
543 ctx = repo[rev]
543 ctx = repo[rev]
544 ret = hg.merge(repo, rev)
544 ret = hg.merge(repo, rev)
545 if ret:
545 if ret:
546 raise util.Abort(_("update returned %d") % ret)
546 raise util.Abort(_("update returned %d") % ret)
547 n = repo.commit(ctx.description(), ctx.user(), force=True)
547 n = repo.commit(ctx.description(), ctx.user(), force=True)
548 if n is None:
548 if n is None:
549 raise util.Abort(_("repo commit failed"))
549 raise util.Abort(_("repo commit failed"))
550 try:
550 try:
551 ph = patchheader(mergeq.join(patch), self.plainmode)
551 ph = patchheader(mergeq.join(patch), self.plainmode)
552 except:
552 except:
553 raise util.Abort(_("unable to read %s") % patch)
553 raise util.Abort(_("unable to read %s") % patch)
554
554
555 diffopts = self.patchopts(diffopts, patch)
555 diffopts = self.patchopts(diffopts, patch)
556 patchf = self.opener(patch, "w")
556 patchf = self.opener(patch, "w")
557 comments = str(ph)
557 comments = str(ph)
558 if comments:
558 if comments:
559 patchf.write(comments)
559 patchf.write(comments)
560 self.printdiff(repo, diffopts, head, n, fp=patchf)
560 self.printdiff(repo, diffopts, head, n, fp=patchf)
561 patchf.close()
561 patchf.close()
562 self.removeundo(repo)
562 self.removeundo(repo)
563 return (0, n)
563 return (0, n)
564
564
565 def qparents(self, repo, rev=None):
565 def qparents(self, repo, rev=None):
566 if rev is None:
566 if rev is None:
567 (p1, p2) = repo.dirstate.parents()
567 (p1, p2) = repo.dirstate.parents()
568 if p2 == nullid:
568 if p2 == nullid:
569 return p1
569 return p1
570 if not self.applied:
570 if not self.applied:
571 return None
571 return None
572 return self.applied[-1].node
572 return self.applied[-1].node
573 p1, p2 = repo.changelog.parents(rev)
573 p1, p2 = repo.changelog.parents(rev)
574 if p2 != nullid and p2 in [x.node for x in self.applied]:
574 if p2 != nullid and p2 in [x.node for x in self.applied]:
575 return p2
575 return p2
576 return p1
576 return p1
577
577
578 def mergepatch(self, repo, mergeq, series, diffopts):
578 def mergepatch(self, repo, mergeq, series, diffopts):
579 if not self.applied:
579 if not self.applied:
580 # each of the patches merged in will have two parents. This
580 # each of the patches merged in will have two parents. This
581 # can confuse the qrefresh, qdiff, and strip code because it
581 # can confuse the qrefresh, qdiff, and strip code because it
582 # needs to know which parent is actually in the patch queue.
582 # needs to know which parent is actually in the patch queue.
583 # so, we insert a merge marker with only one parent. This way
583 # so, we insert a merge marker with only one parent. This way
584 # the first patch in the queue is never a merge patch
584 # the first patch in the queue is never a merge patch
585 #
585 #
586 pname = ".hg.patches.merge.marker"
586 pname = ".hg.patches.merge.marker"
587 n = repo.commit('[mq]: merge marker', force=True)
587 n = repo.commit('[mq]: merge marker', force=True)
588 self.removeundo(repo)
588 self.removeundo(repo)
589 self.applied.append(statusentry(n, pname))
589 self.applied.append(statusentry(n, pname))
590 self.applieddirty = 1
590 self.applieddirty = 1
591
591
592 head = self.qparents(repo)
592 head = self.qparents(repo)
593
593
594 for patch in series:
594 for patch in series:
595 patch = mergeq.lookup(patch, strict=True)
595 patch = mergeq.lookup(patch, strict=True)
596 if not patch:
596 if not patch:
597 self.ui.warn(_("patch %s does not exist\n") % patch)
597 self.ui.warn(_("patch %s does not exist\n") % patch)
598 return (1, None)
598 return (1, None)
599 pushable, reason = self.pushable(patch)
599 pushable, reason = self.pushable(patch)
600 if not pushable:
600 if not pushable:
601 self.explainpushable(patch, all_patches=True)
601 self.explainpushable(patch, all_patches=True)
602 continue
602 continue
603 info = mergeq.isapplied(patch)
603 info = mergeq.isapplied(patch)
604 if not info:
604 if not info:
605 self.ui.warn(_("patch %s is not applied\n") % patch)
605 self.ui.warn(_("patch %s is not applied\n") % patch)
606 return (1, None)
606 return (1, None)
607 rev = info[1]
607 rev = info[1]
608 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
608 err, head = self.mergeone(repo, mergeq, head, patch, rev, diffopts)
609 if head:
609 if head:
610 self.applied.append(statusentry(head, patch))
610 self.applied.append(statusentry(head, patch))
611 self.applieddirty = 1
611 self.applieddirty = 1
612 if err:
612 if err:
613 return (err, head)
613 return (err, head)
614 self.savedirty()
614 self.savedirty()
615 return (0, head)
615 return (0, head)
616
616
617 def patch(self, repo, patchfile):
617 def patch(self, repo, patchfile):
618 '''Apply patchfile to the working directory.
618 '''Apply patchfile to the working directory.
619 patchfile: name of patch file'''
619 patchfile: name of patch file'''
620 files = set()
620 files = set()
621 try:
621 try:
622 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
622 fuzz = patchmod.patch(self.ui, repo, patchfile, strip=1,
623 files=files, eolmode=None)
623 files=files, eolmode=None)
624 return (True, list(files), fuzz)
624 return (True, list(files), fuzz)
625 except Exception, inst:
625 except Exception, inst:
626 self.ui.note(str(inst) + '\n')
626 self.ui.note(str(inst) + '\n')
627 if not self.ui.verbose:
627 if not self.ui.verbose:
628 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
628 self.ui.warn(_("patch failed, unable to continue (try -v)\n"))
629 return (False, list(files), False)
629 return (False, list(files), False)
630
630
631 def apply(self, repo, series, list=False, update_status=True,
631 def apply(self, repo, series, list=False, update_status=True,
632 strict=False, patchdir=None, merge=None, all_files=None):
632 strict=False, patchdir=None, merge=None, all_files=None):
633 wlock = lock = tr = None
633 wlock = lock = tr = None
634 try:
634 try:
635 wlock = repo.wlock()
635 wlock = repo.wlock()
636 lock = repo.lock()
636 lock = repo.lock()
637 tr = repo.transaction("qpush")
637 tr = repo.transaction("qpush")
638 try:
638 try:
639 ret = self._apply(repo, series, list, update_status,
639 ret = self._apply(repo, series, list, update_status,
640 strict, patchdir, merge, all_files=all_files)
640 strict, patchdir, merge, all_files=all_files)
641 tr.close()
641 tr.close()
642 self.savedirty()
642 self.savedirty()
643 return ret
643 return ret
644 except:
644 except:
645 try:
645 try:
646 tr.abort()
646 tr.abort()
647 finally:
647 finally:
648 repo.invalidate()
648 repo.invalidate()
649 repo.dirstate.invalidate()
649 repo.dirstate.invalidate()
650 raise
650 raise
651 finally:
651 finally:
652 release(tr, lock, wlock)
652 release(tr, lock, wlock)
653 self.removeundo(repo)
653 self.removeundo(repo)
654
654
655 def _apply(self, repo, series, list=False, update_status=True,
655 def _apply(self, repo, series, list=False, update_status=True,
656 strict=False, patchdir=None, merge=None, all_files=None):
656 strict=False, patchdir=None, merge=None, all_files=None):
657 '''returns (error, hash)
657 '''returns (error, hash)
658 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
658 error = 1 for unable to read, 2 for patch failed, 3 for patch fuzz'''
659 # TODO unify with commands.py
659 # TODO unify with commands.py
660 if not patchdir:
660 if not patchdir:
661 patchdir = self.path
661 patchdir = self.path
662 err = 0
662 err = 0
663 n = None
663 n = None
664 for patchname in series:
664 for patchname in series:
665 pushable, reason = self.pushable(patchname)
665 pushable, reason = self.pushable(patchname)
666 if not pushable:
666 if not pushable:
667 self.explainpushable(patchname, all_patches=True)
667 self.explainpushable(patchname, all_patches=True)
668 continue
668 continue
669 self.ui.status(_("applying %s\n") % patchname)
669 self.ui.status(_("applying %s\n") % patchname)
670 pf = os.path.join(patchdir, patchname)
670 pf = os.path.join(patchdir, patchname)
671
671
672 try:
672 try:
673 ph = patchheader(self.join(patchname), self.plainmode)
673 ph = patchheader(self.join(patchname), self.plainmode)
674 except IOError:
674 except IOError:
675 self.ui.warn(_("unable to read %s\n") % patchname)
675 self.ui.warn(_("unable to read %s\n") % patchname)
676 err = 1
676 err = 1
677 break
677 break
678
678
679 message = ph.message
679 message = ph.message
680 if not message:
680 if not message:
681 # The commit message should not be translated
681 # The commit message should not be translated
682 message = "imported patch %s\n" % patchname
682 message = "imported patch %s\n" % patchname
683 else:
683 else:
684 if list:
684 if list:
685 # The commit message should not be translated
685 # The commit message should not be translated
686 message.append("\nimported patch %s" % patchname)
686 message.append("\nimported patch %s" % patchname)
687 message = '\n'.join(message)
687 message = '\n'.join(message)
688
688
689 if ph.haspatch:
689 if ph.haspatch:
690 (patcherr, files, fuzz) = self.patch(repo, pf)
690 (patcherr, files, fuzz) = self.patch(repo, pf)
691 if all_files is not None:
691 if all_files is not None:
692 all_files.update(files)
692 all_files.update(files)
693 patcherr = not patcherr
693 patcherr = not patcherr
694 else:
694 else:
695 self.ui.warn(_("patch %s is empty\n") % patchname)
695 self.ui.warn(_("patch %s is empty\n") % patchname)
696 patcherr, files, fuzz = 0, [], 0
696 patcherr, files, fuzz = 0, [], 0
697
697
698 if merge and files:
698 if merge and files:
699 # Mark as removed/merged and update dirstate parent info
699 # Mark as removed/merged and update dirstate parent info
700 removed = []
700 removed = []
701 merged = []
701 merged = []
702 for f in files:
702 for f in files:
703 if os.path.lexists(repo.wjoin(f)):
703 if os.path.lexists(repo.wjoin(f)):
704 merged.append(f)
704 merged.append(f)
705 else:
705 else:
706 removed.append(f)
706 removed.append(f)
707 for f in removed:
707 for f in removed:
708 repo.dirstate.remove(f)
708 repo.dirstate.remove(f)
709 for f in merged:
709 for f in merged:
710 repo.dirstate.merge(f)
710 repo.dirstate.merge(f)
711 p1, p2 = repo.dirstate.parents()
711 p1, p2 = repo.dirstate.parents()
712 repo.dirstate.setparents(p1, merge)
712 repo.dirstate.setparents(p1, merge)
713
713
714 match = scmutil.matchfiles(repo, files or [])
714 match = scmutil.matchfiles(repo, files or [])
715 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
715 n = repo.commit(message, ph.user, ph.date, match=match, force=True)
716
716
717 if n is None:
717 if n is None:
718 raise util.Abort(_("repository commit failed"))
718 raise util.Abort(_("repository commit failed"))
719
719
720 if update_status:
720 if update_status:
721 self.applied.append(statusentry(n, patchname))
721 self.applied.append(statusentry(n, patchname))
722
722
723 if patcherr:
723 if patcherr:
724 self.ui.warn(_("patch failed, rejects left in working dir\n"))
724 self.ui.warn(_("patch failed, rejects left in working dir\n"))
725 err = 2
725 err = 2
726 break
726 break
727
727
728 if fuzz and strict:
728 if fuzz and strict:
729 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
729 self.ui.warn(_("fuzz found when applying patch, stopping\n"))
730 err = 3
730 err = 3
731 break
731 break
732 return (err, n)
732 return (err, n)
733
733
734 def _cleanup(self, patches, numrevs, keep=False):
734 def _cleanup(self, patches, numrevs, keep=False):
735 if not keep:
735 if not keep:
736 r = self.qrepo()
736 r = self.qrepo()
737 if r:
737 if r:
738 r[None].forget(patches)
738 r[None].forget(patches)
739 for p in patches:
739 for p in patches:
740 os.unlink(self.join(p))
740 os.unlink(self.join(p))
741
741
742 if numrevs:
742 if numrevs:
743 qfinished = self.applied[:numrevs]
743 qfinished = self.applied[:numrevs]
744 del self.applied[:numrevs]
744 del self.applied[:numrevs]
745 self.applieddirty = 1
745 self.applieddirty = 1
746
746
747 unknown = []
747 unknown = []
748
748
749 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
749 for (i, p) in sorted([(self.findseries(p), p) for p in patches],
750 reverse=True):
750 reverse=True):
751 if i is not None:
751 if i is not None:
752 del self.fullseries[i]
752 del self.fullseries[i]
753 else:
753 else:
754 unknown.append(p)
754 unknown.append(p)
755
755
756 if unknown:
756 if unknown:
757 if numrevs:
757 if numrevs:
758 rev = dict((entry.name, entry.node) for entry in qfinished)
758 rev = dict((entry.name, entry.node) for entry in qfinished)
759 for p in unknown:
759 for p in unknown:
760 msg = _('revision %s refers to unknown patches: %s\n')
760 msg = _('revision %s refers to unknown patches: %s\n')
761 self.ui.warn(msg % (short(rev[p]), p))
761 self.ui.warn(msg % (short(rev[p]), p))
762 else:
762 else:
763 msg = _('unknown patches: %s\n')
763 msg = _('unknown patches: %s\n')
764 raise util.Abort(''.join(msg % p for p in unknown))
764 raise util.Abort(''.join(msg % p for p in unknown))
765
765
766 self.parseseries()
766 self.parseseries()
767 self.seriesdirty = 1
767 self.seriesdirty = 1
768
768
769 def _revpatches(self, repo, revs):
769 def _revpatches(self, repo, revs):
770 firstrev = repo[self.applied[0].node].rev()
770 firstrev = repo[self.applied[0].node].rev()
771 patches = []
771 patches = []
772 for i, rev in enumerate(revs):
772 for i, rev in enumerate(revs):
773
773
774 if rev < firstrev:
774 if rev < firstrev:
775 raise util.Abort(_('revision %d is not managed') % rev)
775 raise util.Abort(_('revision %d is not managed') % rev)
776
776
777 ctx = repo[rev]
777 ctx = repo[rev]
778 base = self.applied[i].node
778 base = self.applied[i].node
779 if ctx.node() != base:
779 if ctx.node() != base:
780 msg = _('cannot delete revision %d above applied patches')
780 msg = _('cannot delete revision %d above applied patches')
781 raise util.Abort(msg % rev)
781 raise util.Abort(msg % rev)
782
782
783 patch = self.applied[i].name
783 patch = self.applied[i].name
784 for fmt in ('[mq]: %s', 'imported patch %s'):
784 for fmt in ('[mq]: %s', 'imported patch %s'):
785 if ctx.description() == fmt % patch:
785 if ctx.description() == fmt % patch:
786 msg = _('patch %s finalized without changeset message\n')
786 msg = _('patch %s finalized without changeset message\n')
787 repo.ui.status(msg % patch)
787 repo.ui.status(msg % patch)
788 break
788 break
789
789
790 patches.append(patch)
790 patches.append(patch)
791 return patches
791 return patches
792
792
793 def finish(self, repo, revs):
793 def finish(self, repo, revs):
794 patches = self._revpatches(repo, sorted(revs))
794 patches = self._revpatches(repo, sorted(revs))
795 self._cleanup(patches, len(patches))
795 self._cleanup(patches, len(patches))
796
796
797 def delete(self, repo, patches, opts):
797 def delete(self, repo, patches, opts):
798 if not patches and not opts.get('rev'):
798 if not patches and not opts.get('rev'):
799 raise util.Abort(_('qdelete requires at least one revision or '
799 raise util.Abort(_('qdelete requires at least one revision or '
800 'patch name'))
800 'patch name'))
801
801
802 realpatches = []
802 realpatches = []
803 for patch in patches:
803 for patch in patches:
804 patch = self.lookup(patch, strict=True)
804 patch = self.lookup(patch, strict=True)
805 info = self.isapplied(patch)
805 info = self.isapplied(patch)
806 if info:
806 if info:
807 raise util.Abort(_("cannot delete applied patch %s") % patch)
807 raise util.Abort(_("cannot delete applied patch %s") % patch)
808 if patch not in self.series:
808 if patch not in self.series:
809 raise util.Abort(_("patch %s not in series file") % patch)
809 raise util.Abort(_("patch %s not in series file") % patch)
810 if patch not in realpatches:
810 if patch not in realpatches:
811 realpatches.append(patch)
811 realpatches.append(patch)
812
812
813 numrevs = 0
813 numrevs = 0
814 if opts.get('rev'):
814 if opts.get('rev'):
815 if not self.applied:
815 if not self.applied:
816 raise util.Abort(_('no patches applied'))
816 raise util.Abort(_('no patches applied'))
817 revs = scmutil.revrange(repo, opts.get('rev'))
817 revs = scmutil.revrange(repo, opts.get('rev'))
818 if len(revs) > 1 and revs[0] > revs[1]:
818 if len(revs) > 1 and revs[0] > revs[1]:
819 revs.reverse()
819 revs.reverse()
820 revpatches = self._revpatches(repo, revs)
820 revpatches = self._revpatches(repo, revs)
821 realpatches += revpatches
821 realpatches += revpatches
822 numrevs = len(revpatches)
822 numrevs = len(revpatches)
823
823
824 self._cleanup(realpatches, numrevs, opts.get('keep'))
824 self._cleanup(realpatches, numrevs, opts.get('keep'))
825
825
826 def checktoppatch(self, repo):
826 def checktoppatch(self, repo):
827 if self.applied:
827 if self.applied:
828 top = self.applied[-1].node
828 top = self.applied[-1].node
829 patch = self.applied[-1].name
829 patch = self.applied[-1].name
830 pp = repo.dirstate.parents()
830 pp = repo.dirstate.parents()
831 if top not in pp:
831 if top not in pp:
832 raise util.Abort(_("working directory revision is not qtip"))
832 raise util.Abort(_("working directory revision is not qtip"))
833 return top, patch
833 return top, patch
834 return None, None
834 return None, None
835
835
836 def checksubstate(self, repo):
836 def checksubstate(self, repo):
837 '''return list of subrepos at a different revision than substate.
837 '''return list of subrepos at a different revision than substate.
838 Abort if any subrepos have uncommitted changes.'''
838 Abort if any subrepos have uncommitted changes.'''
839 inclsubs = []
839 inclsubs = []
840 wctx = repo[None]
840 wctx = repo[None]
841 for s in wctx.substate:
841 for s in wctx.substate:
842 if wctx.sub(s).dirty(True):
842 if wctx.sub(s).dirty(True):
843 raise util.Abort(
843 raise util.Abort(
844 _("uncommitted changes in subrepository %s") % s)
844 _("uncommitted changes in subrepository %s") % s)
845 elif wctx.sub(s).dirty():
845 elif wctx.sub(s).dirty():
846 inclsubs.append(s)
846 inclsubs.append(s)
847 return inclsubs
847 return inclsubs
848
848
849 def localchangesfound(self, refresh=True):
849 def localchangesfound(self, refresh=True):
850 if refresh:
850 if refresh:
851 raise util.Abort(_("local changes found, refresh first"))
851 raise util.Abort(_("local changes found, refresh first"))
852 else:
852 else:
853 raise util.Abort(_("local changes found"))
853 raise util.Abort(_("local changes found"))
854
854
855 def checklocalchanges(self, repo, force=False, refresh=True):
855 def checklocalchanges(self, repo, force=False, refresh=True):
856 m, a, r, d = repo.status()[:4]
856 m, a, r, d = repo.status()[:4]
857 if (m or a or r or d) and not force:
857 if (m or a or r or d) and not force:
858 self.localchangesfound(refresh)
858 self.localchangesfound(refresh)
859 return m, a, r, d
859 return m, a, r, d
860
860
861 _reserved = ('series', 'status', 'guards', '.', '..')
861 _reserved = ('series', 'status', 'guards', '.', '..')
862 def checkreservedname(self, name):
862 def checkreservedname(self, name):
863 if name in self._reserved:
863 if name in self._reserved:
864 raise util.Abort(_('"%s" cannot be used as the name of a patch')
864 raise util.Abort(_('"%s" cannot be used as the name of a patch')
865 % name)
865 % name)
866 for prefix in ('.hg', '.mq'):
866 for prefix in ('.hg', '.mq'):
867 if name.startswith(prefix):
867 if name.startswith(prefix):
868 raise util.Abort(_('patch name cannot begin with "%s"')
868 raise util.Abort(_('patch name cannot begin with "%s"')
869 % prefix)
869 % prefix)
870 for c in ('#', ':'):
870 for c in ('#', ':'):
871 if c in name:
871 if c in name:
872 raise util.Abort(_('"%s" cannot be used in the name of a patch')
872 raise util.Abort(_('"%s" cannot be used in the name of a patch')
873 % c)
873 % c)
874
874
875 def checkpatchname(self, name, force=False):
875 def checkpatchname(self, name, force=False):
876 self.checkreservedname(name)
876 self.checkreservedname(name)
877 if not force and os.path.exists(self.join(name)):
877 if not force and os.path.exists(self.join(name)):
878 if os.path.isdir(self.join(name)):
878 if os.path.isdir(self.join(name)):
879 raise util.Abort(_('"%s" already exists as a directory')
879 raise util.Abort(_('"%s" already exists as a directory')
880 % name)
880 % name)
881 else:
881 else:
882 raise util.Abort(_('patch "%s" already exists') % name)
882 raise util.Abort(_('patch "%s" already exists') % name)
883
883
884 def new(self, repo, patchfn, *pats, **opts):
884 def new(self, repo, patchfn, *pats, **opts):
885 """options:
885 """options:
886 msg: a string or a no-argument function returning a string
886 msg: a string or a no-argument function returning a string
887 """
887 """
888 msg = opts.get('msg')
888 msg = opts.get('msg')
889 user = opts.get('user')
889 user = opts.get('user')
890 date = opts.get('date')
890 date = opts.get('date')
891 if date:
891 if date:
892 date = util.parsedate(date)
892 date = util.parsedate(date)
893 diffopts = self.diffopts({'git': opts.get('git')})
893 diffopts = self.diffopts({'git': opts.get('git')})
894 if opts.get('checkname', True):
894 if opts.get('checkname', True):
895 self.checkpatchname(patchfn)
895 self.checkpatchname(patchfn)
896 inclsubs = self.checksubstate(repo)
896 inclsubs = self.checksubstate(repo)
897 if inclsubs:
897 if inclsubs:
898 inclsubs.append('.hgsubstate')
898 inclsubs.append('.hgsubstate')
899 if opts.get('include') or opts.get('exclude') or pats:
899 if opts.get('include') or opts.get('exclude') or pats:
900 if inclsubs:
900 if inclsubs:
901 pats = list(pats or []) + inclsubs
901 pats = list(pats or []) + inclsubs
902 match = scmutil.match(repo[None], pats, opts)
902 match = scmutil.match(repo[None], pats, opts)
903 # detect missing files in pats
903 # detect missing files in pats
904 def badfn(f, msg):
904 def badfn(f, msg):
905 if f != '.hgsubstate': # .hgsubstate is auto-created
905 if f != '.hgsubstate': # .hgsubstate is auto-created
906 raise util.Abort('%s: %s' % (f, msg))
906 raise util.Abort('%s: %s' % (f, msg))
907 match.bad = badfn
907 match.bad = badfn
908 m, a, r, d = repo.status(match=match)[:4]
908 m, a, r, d = repo.status(match=match)[:4]
909 else:
909 else:
910 m, a, r, d = self.checklocalchanges(repo, force=True)
910 m, a, r, d = self.checklocalchanges(repo, force=True)
911 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
911 match = scmutil.matchfiles(repo, m + a + r + inclsubs)
912 if len(repo[None].parents()) > 1:
912 if len(repo[None].parents()) > 1:
913 raise util.Abort(_('cannot manage merge changesets'))
913 raise util.Abort(_('cannot manage merge changesets'))
914 commitfiles = m + a + r
914 commitfiles = m + a + r
915 self.checktoppatch(repo)
915 self.checktoppatch(repo)
916 insert = self.fullseriesend()
916 insert = self.fullseriesend()
917 wlock = repo.wlock()
917 wlock = repo.wlock()
918 try:
918 try:
919 try:
919 try:
920 # if patch file write fails, abort early
920 # if patch file write fails, abort early
921 p = self.opener(patchfn, "w")
921 p = self.opener(patchfn, "w")
922 except IOError, e:
922 except IOError, e:
923 raise util.Abort(_('cannot write patch "%s": %s')
923 raise util.Abort(_('cannot write patch "%s": %s')
924 % (patchfn, e.strerror))
924 % (patchfn, e.strerror))
925 try:
925 try:
926 if self.plainmode:
926 if self.plainmode:
927 if user:
927 if user:
928 p.write("From: " + user + "\n")
928 p.write("From: " + user + "\n")
929 if not date:
929 if not date:
930 p.write("\n")
930 p.write("\n")
931 if date:
931 if date:
932 p.write("Date: %d %d\n\n" % date)
932 p.write("Date: %d %d\n\n" % date)
933 else:
933 else:
934 p.write("# HG changeset patch\n")
934 p.write("# HG changeset patch\n")
935 p.write("# Parent "
935 p.write("# Parent "
936 + hex(repo[None].p1().node()) + "\n")
936 + hex(repo[None].p1().node()) + "\n")
937 if user:
937 if user:
938 p.write("# User " + user + "\n")
938 p.write("# User " + user + "\n")
939 if date:
939 if date:
940 p.write("# Date %s %s\n\n" % date)
940 p.write("# Date %s %s\n\n" % date)
941 if util.safehasattr(msg, '__call__'):
941 if util.safehasattr(msg, '__call__'):
942 msg = msg()
942 msg = msg()
943 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
943 commitmsg = msg and msg or ("[mq]: %s" % patchfn)
944 n = repo.commit(commitmsg, user, date, match=match, force=True)
944 n = repo.commit(commitmsg, user, date, match=match, force=True)
945 if n is None:
945 if n is None:
946 raise util.Abort(_("repo commit failed"))
946 raise util.Abort(_("repo commit failed"))
947 try:
947 try:
948 self.fullseries[insert:insert] = [patchfn]
948 self.fullseries[insert:insert] = [patchfn]
949 self.applied.append(statusentry(n, patchfn))
949 self.applied.append(statusentry(n, patchfn))
950 self.parseseries()
950 self.parseseries()
951 self.seriesdirty = 1
951 self.seriesdirty = 1
952 self.applieddirty = 1
952 self.applieddirty = 1
953 if msg:
953 if msg:
954 msg = msg + "\n\n"
954 msg = msg + "\n\n"
955 p.write(msg)
955 p.write(msg)
956 if commitfiles:
956 if commitfiles:
957 parent = self.qparents(repo, n)
957 parent = self.qparents(repo, n)
958 chunks = patchmod.diff(repo, node1=parent, node2=n,
958 chunks = patchmod.diff(repo, node1=parent, node2=n,
959 match=match, opts=diffopts)
959 match=match, opts=diffopts)
960 for chunk in chunks:
960 for chunk in chunks:
961 p.write(chunk)
961 p.write(chunk)
962 p.close()
962 p.close()
963 wlock.release()
963 wlock.release()
964 wlock = None
964 wlock = None
965 r = self.qrepo()
965 r = self.qrepo()
966 if r:
966 if r:
967 r[None].add([patchfn])
967 r[None].add([patchfn])
968 except:
968 except:
969 repo.rollback()
969 repo.rollback()
970 raise
970 raise
971 except Exception:
971 except Exception:
972 patchpath = self.join(patchfn)
972 patchpath = self.join(patchfn)
973 try:
973 try:
974 os.unlink(patchpath)
974 os.unlink(patchpath)
975 except:
975 except:
976 self.ui.warn(_('error unlinking %s\n') % patchpath)
976 self.ui.warn(_('error unlinking %s\n') % patchpath)
977 raise
977 raise
978 self.removeundo(repo)
978 self.removeundo(repo)
979 finally:
979 finally:
980 release(wlock)
980 release(wlock)
981
981
982 def strip(self, repo, revs, update=True, backup="all", force=None):
982 def strip(self, repo, revs, update=True, backup="all", force=None):
983 wlock = lock = None
983 wlock = lock = None
984 try:
984 try:
985 wlock = repo.wlock()
985 wlock = repo.wlock()
986 lock = repo.lock()
986 lock = repo.lock()
987
987
988 if update:
988 if update:
989 self.checklocalchanges(repo, force=force, refresh=False)
989 self.checklocalchanges(repo, force=force, refresh=False)
990 urev = self.qparents(repo, revs[0])
990 urev = self.qparents(repo, revs[0])
991 hg.clean(repo, urev)
991 hg.clean(repo, urev)
992 repo.dirstate.write()
992 repo.dirstate.write()
993
993
994 self.removeundo(repo)
994 self.removeundo(repo)
995 for rev in revs:
995 for rev in revs:
996 repair.strip(self.ui, repo, rev, backup)
996 repair.strip(self.ui, repo, rev, backup)
997 # strip may have unbundled a set of backed up revisions after
997 # strip may have unbundled a set of backed up revisions after
998 # the actual strip
998 # the actual strip
999 self.removeundo(repo)
999 self.removeundo(repo)
1000 finally:
1000 finally:
1001 release(lock, wlock)
1001 release(lock, wlock)
1002
1002
1003 def isapplied(self, patch):
1003 def isapplied(self, patch):
1004 """returns (index, rev, patch)"""
1004 """returns (index, rev, patch)"""
1005 for i, a in enumerate(self.applied):
1005 for i, a in enumerate(self.applied):
1006 if a.name == patch:
1006 if a.name == patch:
1007 return (i, a.node, a.name)
1007 return (i, a.node, a.name)
1008 return None
1008 return None
1009
1009
1010 # if the exact patch name does not exist, we try a few
1010 # if the exact patch name does not exist, we try a few
1011 # variations. If strict is passed, we try only #1
1011 # variations. If strict is passed, we try only #1
1012 #
1012 #
1013 # 1) a number to indicate an offset in the series file
1013 # 1) a number to indicate an offset in the series file
1014 # 2) a unique substring of the patch name was given
1014 # 2) a unique substring of the patch name was given
1015 # 3) patchname[-+]num to indicate an offset in the series file
1015 # 3) patchname[-+]num to indicate an offset in the series file
1016 def lookup(self, patch, strict=False):
1016 def lookup(self, patch, strict=False):
1017 patch = patch and str(patch)
1017 patch = patch and str(patch)
1018
1018
1019 def partialname(s):
1019 def partialname(s):
1020 if s in self.series:
1020 if s in self.series:
1021 return s
1021 return s
1022 matches = [x for x in self.series if s in x]
1022 matches = [x for x in self.series if s in x]
1023 if len(matches) > 1:
1023 if len(matches) > 1:
1024 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1024 self.ui.warn(_('patch name "%s" is ambiguous:\n') % s)
1025 for m in matches:
1025 for m in matches:
1026 self.ui.warn(' %s\n' % m)
1026 self.ui.warn(' %s\n' % m)
1027 return None
1027 return None
1028 if matches:
1028 if matches:
1029 return matches[0]
1029 return matches[0]
1030 if self.series and self.applied:
1030 if self.series and self.applied:
1031 if s == 'qtip':
1031 if s == 'qtip':
1032 return self.series[self.seriesend(True)-1]
1032 return self.series[self.seriesend(True)-1]
1033 if s == 'qbase':
1033 if s == 'qbase':
1034 return self.series[0]
1034 return self.series[0]
1035 return None
1035 return None
1036
1036
1037 if patch is None:
1037 if patch is None:
1038 return None
1038 return None
1039 if patch in self.series:
1039 if patch in self.series:
1040 return patch
1040 return patch
1041
1041
1042 if not os.path.isfile(self.join(patch)):
1042 if not os.path.isfile(self.join(patch)):
1043 try:
1043 try:
1044 sno = int(patch)
1044 sno = int(patch)
1045 except (ValueError, OverflowError):
1045 except (ValueError, OverflowError):
1046 pass
1046 pass
1047 else:
1047 else:
1048 if -len(self.series) <= sno < len(self.series):
1048 if -len(self.series) <= sno < len(self.series):
1049 return self.series[sno]
1049 return self.series[sno]
1050
1050
1051 if not strict:
1051 if not strict:
1052 res = partialname(patch)
1052 res = partialname(patch)
1053 if res:
1053 if res:
1054 return res
1054 return res
1055 minus = patch.rfind('-')
1055 minus = patch.rfind('-')
1056 if minus >= 0:
1056 if minus >= 0:
1057 res = partialname(patch[:minus])
1057 res = partialname(patch[:minus])
1058 if res:
1058 if res:
1059 i = self.series.index(res)
1059 i = self.series.index(res)
1060 try:
1060 try:
1061 off = int(patch[minus + 1:] or 1)
1061 off = int(patch[minus + 1:] or 1)
1062 except (ValueError, OverflowError):
1062 except (ValueError, OverflowError):
1063 pass
1063 pass
1064 else:
1064 else:
1065 if i - off >= 0:
1065 if i - off >= 0:
1066 return self.series[i - off]
1066 return self.series[i - off]
1067 plus = patch.rfind('+')
1067 plus = patch.rfind('+')
1068 if plus >= 0:
1068 if plus >= 0:
1069 res = partialname(patch[:plus])
1069 res = partialname(patch[:plus])
1070 if res:
1070 if res:
1071 i = self.series.index(res)
1071 i = self.series.index(res)
1072 try:
1072 try:
1073 off = int(patch[plus + 1:] or 1)
1073 off = int(patch[plus + 1:] or 1)
1074 except (ValueError, OverflowError):
1074 except (ValueError, OverflowError):
1075 pass
1075 pass
1076 else:
1076 else:
1077 if i + off < len(self.series):
1077 if i + off < len(self.series):
1078 return self.series[i + off]
1078 return self.series[i + off]
1079 raise util.Abort(_("patch %s not in series") % patch)
1079 raise util.Abort(_("patch %s not in series") % patch)
1080
1080
1081 def push(self, repo, patch=None, force=False, list=False,
1081 def push(self, repo, patch=None, force=False, list=False,
1082 mergeq=None, all=False, move=False, exact=False):
1082 mergeq=None, all=False, move=False, exact=False):
1083 diffopts = self.diffopts()
1083 diffopts = self.diffopts()
1084 wlock = repo.wlock()
1084 wlock = repo.wlock()
1085 try:
1085 try:
1086 heads = []
1086 heads = []
1087 for b, ls in repo.branchmap().iteritems():
1087 for b, ls in repo.branchmap().iteritems():
1088 heads += ls
1088 heads += ls
1089 if not heads:
1089 if not heads:
1090 heads = [nullid]
1090 heads = [nullid]
1091 if repo.dirstate.p1() not in heads and not exact:
1091 if repo.dirstate.p1() not in heads and not exact:
1092 self.ui.status(_("(working directory not at a head)\n"))
1092 self.ui.status(_("(working directory not at a head)\n"))
1093
1093
1094 if not self.series:
1094 if not self.series:
1095 self.ui.warn(_('no patches in series\n'))
1095 self.ui.warn(_('no patches in series\n'))
1096 return 0
1096 return 0
1097
1097
1098 patch = self.lookup(patch)
1098 patch = self.lookup(patch)
1099 # Suppose our series file is: A B C and the current 'top'
1099 # Suppose our series file is: A B C and the current 'top'
1100 # patch is B. qpush C should be performed (moving forward)
1100 # patch is B. qpush C should be performed (moving forward)
1101 # qpush B is a NOP (no change) qpush A is an error (can't
1101 # qpush B is a NOP (no change) qpush A is an error (can't
1102 # go backwards with qpush)
1102 # go backwards with qpush)
1103 if patch:
1103 if patch:
1104 info = self.isapplied(patch)
1104 info = self.isapplied(patch)
1105 if info and info[0] >= len(self.applied) - 1:
1105 if info and info[0] >= len(self.applied) - 1:
1106 self.ui.warn(
1106 self.ui.warn(
1107 _('qpush: %s is already at the top\n') % patch)
1107 _('qpush: %s is already at the top\n') % patch)
1108 return 0
1108 return 0
1109
1109
1110 pushable, reason = self.pushable(patch)
1110 pushable, reason = self.pushable(patch)
1111 if pushable:
1111 if pushable:
1112 if self.series.index(patch) < self.seriesend():
1112 if self.series.index(patch) < self.seriesend():
1113 raise util.Abort(
1113 raise util.Abort(
1114 _("cannot push to a previous patch: %s") % patch)
1114 _("cannot push to a previous patch: %s") % patch)
1115 else:
1115 else:
1116 if reason:
1116 if reason:
1117 reason = _('guarded by %s') % reason
1117 reason = _('guarded by %s') % reason
1118 else:
1118 else:
1119 reason = _('no matching guards')
1119 reason = _('no matching guards')
1120 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1120 self.ui.warn(_("cannot push '%s' - %s\n") % (patch, reason))
1121 return 1
1121 return 1
1122 elif all:
1122 elif all:
1123 patch = self.series[-1]
1123 patch = self.series[-1]
1124 if self.isapplied(patch):
1124 if self.isapplied(patch):
1125 self.ui.warn(_('all patches are currently applied\n'))
1125 self.ui.warn(_('all patches are currently applied\n'))
1126 return 0
1126 return 0
1127
1127
1128 # Following the above example, starting at 'top' of B:
1128 # Following the above example, starting at 'top' of B:
1129 # qpush should be performed (pushes C), but a subsequent
1129 # qpush should be performed (pushes C), but a subsequent
1130 # qpush without an argument is an error (nothing to
1130 # qpush without an argument is an error (nothing to
1131 # apply). This allows a loop of "...while hg qpush..." to
1131 # apply). This allows a loop of "...while hg qpush..." to
1132 # work as it detects an error when done
1132 # work as it detects an error when done
1133 start = self.seriesend()
1133 start = self.seriesend()
1134 if start == len(self.series):
1134 if start == len(self.series):
1135 self.ui.warn(_('patch series already fully applied\n'))
1135 self.ui.warn(_('patch series already fully applied\n'))
1136 return 1
1136 return 1
1137 if not force:
1137 if not force:
1138 self.checklocalchanges(repo, refresh=self.applied)
1138 self.checklocalchanges(repo, refresh=self.applied)
1139
1139
1140 if exact:
1140 if exact:
1141 if move:
1141 if move:
1142 raise util.Abort(_("cannot use --exact and --move together"))
1142 raise util.Abort(_("cannot use --exact and --move together"))
1143 if self.applied:
1143 if self.applied:
1144 raise util.Abort(_("cannot push --exact with applied patches"))
1144 raise util.Abort(_("cannot push --exact with applied patches"))
1145 root = self.series[start]
1145 root = self.series[start]
1146 target = patchheader(self.join(root), self.plainmode).parent
1146 target = patchheader(self.join(root), self.plainmode).parent
1147 if not target:
1147 if not target:
1148 raise util.Abort(_("%s does not have a parent recorded" % root))
1148 raise util.Abort(_("%s does not have a parent recorded" % root))
1149 if not repo[target] == repo['.']:
1149 if not repo[target] == repo['.']:
1150 hg.update(repo, target)
1150 hg.update(repo, target)
1151
1151
1152 if move:
1152 if move:
1153 if not patch:
1153 if not patch:
1154 raise util.Abort(_("please specify the patch to move"))
1154 raise util.Abort(_("please specify the patch to move"))
1155 for i, rpn in enumerate(self.fullseries[start:]):
1155 for i, rpn in enumerate(self.fullseries[start:]):
1156 # strip markers for patch guards
1156 # strip markers for patch guards
1157 if self.guard_re.split(rpn, 1)[0] == patch:
1157 if self.guard_re.split(rpn, 1)[0] == patch:
1158 break
1158 break
1159 index = start + i
1159 index = start + i
1160 assert index < len(self.fullseries)
1160 assert index < len(self.fullseries)
1161 fullpatch = self.fullseries[index]
1161 fullpatch = self.fullseries[index]
1162 del self.fullseries[index]
1162 del self.fullseries[index]
1163 self.fullseries.insert(start, fullpatch)
1163 self.fullseries.insert(start, fullpatch)
1164 self.parseseries()
1164 self.parseseries()
1165 self.seriesdirty = 1
1165 self.seriesdirty = 1
1166
1166
1167 self.applieddirty = 1
1167 self.applieddirty = 1
1168 if start > 0:
1168 if start > 0:
1169 self.checktoppatch(repo)
1169 self.checktoppatch(repo)
1170 if not patch:
1170 if not patch:
1171 patch = self.series[start]
1171 patch = self.series[start]
1172 end = start + 1
1172 end = start + 1
1173 else:
1173 else:
1174 end = self.series.index(patch, start) + 1
1174 end = self.series.index(patch, start) + 1
1175
1175
1176 s = self.series[start:end]
1176 s = self.series[start:end]
1177 all_files = set()
1177 all_files = set()
1178 try:
1178 try:
1179 if mergeq:
1179 if mergeq:
1180 ret = self.mergepatch(repo, mergeq, s, diffopts)
1180 ret = self.mergepatch(repo, mergeq, s, diffopts)
1181 else:
1181 else:
1182 ret = self.apply(repo, s, list, all_files=all_files)
1182 ret = self.apply(repo, s, list, all_files=all_files)
1183 except:
1183 except:
1184 self.ui.warn(_('cleaning up working directory...'))
1184 self.ui.warn(_('cleaning up working directory...'))
1185 node = repo.dirstate.p1()
1185 node = repo.dirstate.p1()
1186 hg.revert(repo, node, None)
1186 hg.revert(repo, node, None)
1187 # only remove unknown files that we know we touched or
1187 # only remove unknown files that we know we touched or
1188 # created while patching
1188 # created while patching
1189 for f in all_files:
1189 for f in all_files:
1190 if f not in repo.dirstate:
1190 if f not in repo.dirstate:
1191 try:
1191 try:
1192 util.unlinkpath(repo.wjoin(f))
1192 util.unlinkpath(repo.wjoin(f))
1193 except OSError, inst:
1193 except OSError, inst:
1194 if inst.errno != errno.ENOENT:
1194 if inst.errno != errno.ENOENT:
1195 raise
1195 raise
1196 self.ui.warn(_('done\n'))
1196 self.ui.warn(_('done\n'))
1197 raise
1197 raise
1198
1198
1199 if not self.applied:
1199 if not self.applied:
1200 return ret[0]
1200 return ret[0]
1201 top = self.applied[-1].name
1201 top = self.applied[-1].name
1202 if ret[0] and ret[0] > 1:
1202 if ret[0] and ret[0] > 1:
1203 msg = _("errors during apply, please fix and refresh %s\n")
1203 msg = _("errors during apply, please fix and refresh %s\n")
1204 self.ui.write(msg % top)
1204 self.ui.write(msg % top)
1205 else:
1205 else:
1206 self.ui.write(_("now at: %s\n") % top)
1206 self.ui.write(_("now at: %s\n") % top)
1207 return ret[0]
1207 return ret[0]
1208
1208
1209 finally:
1209 finally:
1210 wlock.release()
1210 wlock.release()
1211
1211
1212 def pop(self, repo, patch=None, force=False, update=True, all=False):
1212 def pop(self, repo, patch=None, force=False, update=True, all=False):
1213 wlock = repo.wlock()
1213 wlock = repo.wlock()
1214 try:
1214 try:
1215 if patch:
1215 if patch:
1216 # index, rev, patch
1216 # index, rev, patch
1217 info = self.isapplied(patch)
1217 info = self.isapplied(patch)
1218 if not info:
1218 if not info:
1219 patch = self.lookup(patch)
1219 patch = self.lookup(patch)
1220 info = self.isapplied(patch)
1220 info = self.isapplied(patch)
1221 if not info:
1221 if not info:
1222 raise util.Abort(_("patch %s is not applied") % patch)
1222 raise util.Abort(_("patch %s is not applied") % patch)
1223
1223
1224 if not self.applied:
1224 if not self.applied:
1225 # Allow qpop -a to work repeatedly,
1225 # Allow qpop -a to work repeatedly,
1226 # but not qpop without an argument
1226 # but not qpop without an argument
1227 self.ui.warn(_("no patches applied\n"))
1227 self.ui.warn(_("no patches applied\n"))
1228 return not all
1228 return not all
1229
1229
1230 if all:
1230 if all:
1231 start = 0
1231 start = 0
1232 elif patch:
1232 elif patch:
1233 start = info[0] + 1
1233 start = info[0] + 1
1234 else:
1234 else:
1235 start = len(self.applied) - 1
1235 start = len(self.applied) - 1
1236
1236
1237 if start >= len(self.applied):
1237 if start >= len(self.applied):
1238 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1238 self.ui.warn(_("qpop: %s is already at the top\n") % patch)
1239 return
1239 return
1240
1240
1241 if not update:
1241 if not update:
1242 parents = repo.dirstate.parents()
1242 parents = repo.dirstate.parents()
1243 rr = [x.node for x in self.applied]
1243 rr = [x.node for x in self.applied]
1244 for p in parents:
1244 for p in parents:
1245 if p in rr:
1245 if p in rr:
1246 self.ui.warn(_("qpop: forcing dirstate update\n"))
1246 self.ui.warn(_("qpop: forcing dirstate update\n"))
1247 update = True
1247 update = True
1248 else:
1248 else:
1249 parents = [p.node() for p in repo[None].parents()]
1249 parents = [p.node() for p in repo[None].parents()]
1250 needupdate = False
1250 needupdate = False
1251 for entry in self.applied[start:]:
1251 for entry in self.applied[start:]:
1252 if entry.node in parents:
1252 if entry.node in parents:
1253 needupdate = True
1253 needupdate = True
1254 break
1254 break
1255 update = needupdate
1255 update = needupdate
1256
1256
1257 if not force and update:
1257 if not force and update:
1258 self.checklocalchanges(repo)
1258 self.checklocalchanges(repo)
1259
1259
1260 self.applieddirty = 1
1260 self.applieddirty = 1
1261 end = len(self.applied)
1261 end = len(self.applied)
1262 rev = self.applied[start].node
1262 rev = self.applied[start].node
1263 if update:
1263 if update:
1264 top = self.checktoppatch(repo)[0]
1264 top = self.checktoppatch(repo)[0]
1265
1265
1266 try:
1266 try:
1267 heads = repo.changelog.heads(rev)
1267 heads = repo.changelog.heads(rev)
1268 except error.LookupError:
1268 except error.LookupError:
1269 node = short(rev)
1269 node = short(rev)
1270 raise util.Abort(_('trying to pop unknown node %s') % node)
1270 raise util.Abort(_('trying to pop unknown node %s') % node)
1271
1271
1272 if heads != [self.applied[-1].node]:
1272 if heads != [self.applied[-1].node]:
1273 raise util.Abort(_("popping would remove a revision not "
1273 raise util.Abort(_("popping would remove a revision not "
1274 "managed by this patch queue"))
1274 "managed by this patch queue"))
1275
1275
1276 # we know there are no local changes, so we can make a simplified
1276 # we know there are no local changes, so we can make a simplified
1277 # form of hg.update.
1277 # form of hg.update.
1278 if update:
1278 if update:
1279 qp = self.qparents(repo, rev)
1279 qp = self.qparents(repo, rev)
1280 ctx = repo[qp]
1280 ctx = repo[qp]
1281 m, a, r, d = repo.status(qp, top)[:4]
1281 m, a, r, d = repo.status(qp, top)[:4]
1282 if d:
1282 if d:
1283 raise util.Abort(_("deletions found between repo revs"))
1283 raise util.Abort(_("deletions found between repo revs"))
1284 for f in a:
1284 for f in a:
1285 try:
1285 try:
1286 util.unlinkpath(repo.wjoin(f))
1286 util.unlinkpath(repo.wjoin(f))
1287 except OSError, e:
1287 except OSError, e:
1288 if e.errno != errno.ENOENT:
1288 if e.errno != errno.ENOENT:
1289 raise
1289 raise
1290 repo.dirstate.drop(f)
1290 repo.dirstate.drop(f)
1291 for f in m + r:
1291 for f in m + r:
1292 fctx = ctx[f]
1292 fctx = ctx[f]
1293 repo.wwrite(f, fctx.data(), fctx.flags())
1293 repo.wwrite(f, fctx.data(), fctx.flags())
1294 repo.dirstate.normal(f)
1294 repo.dirstate.normal(f)
1295 repo.dirstate.setparents(qp, nullid)
1295 repo.dirstate.setparents(qp, nullid)
1296 for patch in reversed(self.applied[start:end]):
1296 for patch in reversed(self.applied[start:end]):
1297 self.ui.status(_("popping %s\n") % patch.name)
1297 self.ui.status(_("popping %s\n") % patch.name)
1298 del self.applied[start:end]
1298 del self.applied[start:end]
1299 self.strip(repo, [rev], update=False, backup='strip')
1299 self.strip(repo, [rev], update=False, backup='strip')
1300 if self.applied:
1300 if self.applied:
1301 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1301 self.ui.write(_("now at: %s\n") % self.applied[-1].name)
1302 else:
1302 else:
1303 self.ui.write(_("patch queue now empty\n"))
1303 self.ui.write(_("patch queue now empty\n"))
1304 finally:
1304 finally:
1305 wlock.release()
1305 wlock.release()
1306
1306
1307 def diff(self, repo, pats, opts):
1307 def diff(self, repo, pats, opts):
1308 top, patch = self.checktoppatch(repo)
1308 top, patch = self.checktoppatch(repo)
1309 if not top:
1309 if not top:
1310 self.ui.write(_("no patches applied\n"))
1310 self.ui.write(_("no patches applied\n"))
1311 return
1311 return
1312 qp = self.qparents(repo, top)
1312 qp = self.qparents(repo, top)
1313 if opts.get('reverse'):
1313 if opts.get('reverse'):
1314 node1, node2 = None, qp
1314 node1, node2 = None, qp
1315 else:
1315 else:
1316 node1, node2 = qp, None
1316 node1, node2 = qp, None
1317 diffopts = self.diffopts(opts, patch)
1317 diffopts = self.diffopts(opts, patch)
1318 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1318 self.printdiff(repo, diffopts, node1, node2, files=pats, opts=opts)
1319
1319
1320 def refresh(self, repo, pats=None, **opts):
1320 def refresh(self, repo, pats=None, **opts):
1321 if not self.applied:
1321 if not self.applied:
1322 self.ui.write(_("no patches applied\n"))
1322 self.ui.write(_("no patches applied\n"))
1323 return 1
1323 return 1
1324 msg = opts.get('msg', '').rstrip()
1324 msg = opts.get('msg', '').rstrip()
1325 newuser = opts.get('user')
1325 newuser = opts.get('user')
1326 newdate = opts.get('date')
1326 newdate = opts.get('date')
1327 if newdate:
1327 if newdate:
1328 newdate = '%d %d' % util.parsedate(newdate)
1328 newdate = '%d %d' % util.parsedate(newdate)
1329 wlock = repo.wlock()
1329 wlock = repo.wlock()
1330
1330
1331 try:
1331 try:
1332 self.checktoppatch(repo)
1332 self.checktoppatch(repo)
1333 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1333 (top, patchfn) = (self.applied[-1].node, self.applied[-1].name)
1334 if repo.changelog.heads(top) != [top]:
1334 if repo.changelog.heads(top) != [top]:
1335 raise util.Abort(_("cannot refresh a revision with children"))
1335 raise util.Abort(_("cannot refresh a revision with children"))
1336
1336
1337 inclsubs = self.checksubstate(repo)
1337 inclsubs = self.checksubstate(repo)
1338
1338
1339 cparents = repo.changelog.parents(top)
1339 cparents = repo.changelog.parents(top)
1340 patchparent = self.qparents(repo, top)
1340 patchparent = self.qparents(repo, top)
1341 ph = patchheader(self.join(patchfn), self.plainmode)
1341 ph = patchheader(self.join(patchfn), self.plainmode)
1342 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1342 diffopts = self.diffopts({'git': opts.get('git')}, patchfn)
1343 if msg:
1343 if msg:
1344 ph.setmessage(msg)
1344 ph.setmessage(msg)
1345 if newuser:
1345 if newuser:
1346 ph.setuser(newuser)
1346 ph.setuser(newuser)
1347 if newdate:
1347 if newdate:
1348 ph.setdate(newdate)
1348 ph.setdate(newdate)
1349 ph.setparent(hex(patchparent))
1349 ph.setparent(hex(patchparent))
1350
1350
1351 # only commit new patch when write is complete
1351 # only commit new patch when write is complete
1352 patchf = self.opener(patchfn, 'w', atomictemp=True)
1352 patchf = self.opener(patchfn, 'w', atomictemp=True)
1353
1353
1354 comments = str(ph)
1354 comments = str(ph)
1355 if comments:
1355 if comments:
1356 patchf.write(comments)
1356 patchf.write(comments)
1357
1357
1358 # update the dirstate in place, strip off the qtip commit
1358 # update the dirstate in place, strip off the qtip commit
1359 # and then commit.
1359 # and then commit.
1360 #
1360 #
1361 # this should really read:
1361 # this should really read:
1362 # mm, dd, aa = repo.status(top, patchparent)[:3]
1362 # mm, dd, aa = repo.status(top, patchparent)[:3]
1363 # but we do it backwards to take advantage of manifest/chlog
1363 # but we do it backwards to take advantage of manifest/chlog
1364 # caching against the next repo.status call
1364 # caching against the next repo.status call
1365 mm, aa, dd = repo.status(patchparent, top)[:3]
1365 mm, aa, dd = repo.status(patchparent, top)[:3]
1366 changes = repo.changelog.read(top)
1366 changes = repo.changelog.read(top)
1367 man = repo.manifest.read(changes[0])
1367 man = repo.manifest.read(changes[0])
1368 aaa = aa[:]
1368 aaa = aa[:]
1369 matchfn = scmutil.match(repo[None], pats, opts)
1369 matchfn = scmutil.match(repo[None], pats, opts)
1370 # in short mode, we only diff the files included in the
1370 # in short mode, we only diff the files included in the
1371 # patch already plus specified files
1371 # patch already plus specified files
1372 if opts.get('short'):
1372 if opts.get('short'):
1373 # if amending a patch, we start with existing
1373 # if amending a patch, we start with existing
1374 # files plus specified files - unfiltered
1374 # files plus specified files - unfiltered
1375 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1375 match = scmutil.matchfiles(repo, mm + aa + dd + matchfn.files())
1376 # filter with inc/exl options
1376 # filter with inc/exl options
1377 matchfn = scmutil.match(repo[None], opts=opts)
1377 matchfn = scmutil.match(repo[None], opts=opts)
1378 else:
1378 else:
1379 match = scmutil.matchall(repo)
1379 match = scmutil.matchall(repo)
1380 m, a, r, d = repo.status(match=match)[:4]
1380 m, a, r, d = repo.status(match=match)[:4]
1381 mm = set(mm)
1381 mm = set(mm)
1382 aa = set(aa)
1382 aa = set(aa)
1383 dd = set(dd)
1383 dd = set(dd)
1384
1384
1385 # we might end up with files that were added between
1385 # we might end up with files that were added between
1386 # qtip and the dirstate parent, but then changed in the
1386 # qtip and the dirstate parent, but then changed in the
1387 # local dirstate. in this case, we want them to only
1387 # local dirstate. in this case, we want them to only
1388 # show up in the added section
1388 # show up in the added section
1389 for x in m:
1389 for x in m:
1390 if x not in aa:
1390 if x not in aa:
1391 mm.add(x)
1391 mm.add(x)
1392 # we might end up with files added by the local dirstate that
1392 # we might end up with files added by the local dirstate that
1393 # were deleted by the patch. In this case, they should only
1393 # were deleted by the patch. In this case, they should only
1394 # show up in the changed section.
1394 # show up in the changed section.
1395 for x in a:
1395 for x in a:
1396 if x in dd:
1396 if x in dd:
1397 dd.remove(x)
1397 dd.remove(x)
1398 mm.add(x)
1398 mm.add(x)
1399 else:
1399 else:
1400 aa.add(x)
1400 aa.add(x)
1401 # make sure any files deleted in the local dirstate
1401 # make sure any files deleted in the local dirstate
1402 # are not in the add or change column of the patch
1402 # are not in the add or change column of the patch
1403 forget = []
1403 forget = []
1404 for x in d + r:
1404 for x in d + r:
1405 if x in aa:
1405 if x in aa:
1406 aa.remove(x)
1406 aa.remove(x)
1407 forget.append(x)
1407 forget.append(x)
1408 continue
1408 continue
1409 else:
1409 else:
1410 mm.discard(x)
1410 mm.discard(x)
1411 dd.add(x)
1411 dd.add(x)
1412
1412
1413 m = list(mm)
1413 m = list(mm)
1414 r = list(dd)
1414 r = list(dd)
1415 a = list(aa)
1415 a = list(aa)
1416 c = [filter(matchfn, l) for l in (m, a, r)]
1416 c = [filter(matchfn, l) for l in (m, a, r)]
1417 match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1417 match = scmutil.matchfiles(repo, set(c[0] + c[1] + c[2] + inclsubs))
1418 chunks = patchmod.diff(repo, patchparent, match=match,
1418 chunks = patchmod.diff(repo, patchparent, match=match,
1419 changes=c, opts=diffopts)
1419 changes=c, opts=diffopts)
1420 for chunk in chunks:
1420 for chunk in chunks:
1421 patchf.write(chunk)
1421 patchf.write(chunk)
1422
1422
1423 try:
1423 try:
1424 if diffopts.git or diffopts.upgrade:
1424 if diffopts.git or diffopts.upgrade:
1425 copies = {}
1425 copies = {}
1426 for dst in a:
1426 for dst in a:
1427 src = repo.dirstate.copied(dst)
1427 src = repo.dirstate.copied(dst)
1428 # during qfold, the source file for copies may
1428 # during qfold, the source file for copies may
1429 # be removed. Treat this as a simple add.
1429 # be removed. Treat this as a simple add.
1430 if src is not None and src in repo.dirstate:
1430 if src is not None and src in repo.dirstate:
1431 copies.setdefault(src, []).append(dst)
1431 copies.setdefault(src, []).append(dst)
1432 repo.dirstate.add(dst)
1432 repo.dirstate.add(dst)
1433 # remember the copies between patchparent and qtip
1433 # remember the copies between patchparent and qtip
1434 for dst in aaa:
1434 for dst in aaa:
1435 f = repo.file(dst)
1435 f = repo.file(dst)
1436 src = f.renamed(man[dst])
1436 src = f.renamed(man[dst])
1437 if src:
1437 if src:
1438 copies.setdefault(src[0], []).extend(
1438 copies.setdefault(src[0], []).extend(
1439 copies.get(dst, []))
1439 copies.get(dst, []))
1440 if dst in a:
1440 if dst in a:
1441 copies[src[0]].append(dst)
1441 copies[src[0]].append(dst)
1442 # we can't copy a file created by the patch itself
1442 # we can't copy a file created by the patch itself
1443 if dst in copies:
1443 if dst in copies:
1444 del copies[dst]
1444 del copies[dst]
1445 for src, dsts in copies.iteritems():
1445 for src, dsts in copies.iteritems():
1446 for dst in dsts:
1446 for dst in dsts:
1447 repo.dirstate.copy(src, dst)
1447 repo.dirstate.copy(src, dst)
1448 else:
1448 else:
1449 for dst in a:
1449 for dst in a:
1450 repo.dirstate.add(dst)
1450 repo.dirstate.add(dst)
1451 # Drop useless copy information
1451 # Drop useless copy information
1452 for f in list(repo.dirstate.copies()):
1452 for f in list(repo.dirstate.copies()):
1453 repo.dirstate.copy(None, f)
1453 repo.dirstate.copy(None, f)
1454 for f in r:
1454 for f in r:
1455 repo.dirstate.remove(f)
1455 repo.dirstate.remove(f)
1456 # if the patch excludes a modified file, mark that
1456 # if the patch excludes a modified file, mark that
1457 # file with mtime=0 so status can see it.
1457 # file with mtime=0 so status can see it.
1458 mm = []
1458 mm = []
1459 for i in xrange(len(m)-1, -1, -1):
1459 for i in xrange(len(m)-1, -1, -1):
1460 if not matchfn(m[i]):
1460 if not matchfn(m[i]):
1461 mm.append(m[i])
1461 mm.append(m[i])
1462 del m[i]
1462 del m[i]
1463 for f in m:
1463 for f in m:
1464 repo.dirstate.normal(f)
1464 repo.dirstate.normal(f)
1465 for f in mm:
1465 for f in mm:
1466 repo.dirstate.normallookup(f)
1466 repo.dirstate.normallookup(f)
1467 for f in forget:
1467 for f in forget:
1468 repo.dirstate.drop(f)
1468 repo.dirstate.drop(f)
1469
1469
1470 if not msg:
1470 if not msg:
1471 if not ph.message:
1471 if not ph.message:
1472 message = "[mq]: %s\n" % patchfn
1472 message = "[mq]: %s\n" % patchfn
1473 else:
1473 else:
1474 message = "\n".join(ph.message)
1474 message = "\n".join(ph.message)
1475 else:
1475 else:
1476 message = msg
1476 message = msg
1477
1477
1478 user = ph.user or changes[1]
1478 user = ph.user or changes[1]
1479
1479
1480 # assumes strip can roll itself back if interrupted
1480 # assumes strip can roll itself back if interrupted
1481 repo.dirstate.setparents(*cparents)
1481 repo.dirstate.setparents(*cparents)
1482 self.applied.pop()
1482 self.applied.pop()
1483 self.applieddirty = 1
1483 self.applieddirty = 1
1484 self.strip(repo, [top], update=False,
1484 self.strip(repo, [top], update=False,
1485 backup='strip')
1485 backup='strip')
1486 except:
1486 except:
1487 repo.dirstate.invalidate()
1487 repo.dirstate.invalidate()
1488 raise
1488 raise
1489
1489
1490 try:
1490 try:
1491 # might be nice to attempt to roll back strip after this
1491 # might be nice to attempt to roll back strip after this
1492 n = repo.commit(message, user, ph.date, match=match,
1492 n = repo.commit(message, user, ph.date, match=match,
1493 force=True)
1493 force=True)
1494 # only write patch after a successful commit
1494 # only write patch after a successful commit
1495 patchf.rename()
1495 patchf.close()
1496 self.applied.append(statusentry(n, patchfn))
1496 self.applied.append(statusentry(n, patchfn))
1497 except:
1497 except:
1498 ctx = repo[cparents[0]]
1498 ctx = repo[cparents[0]]
1499 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1499 repo.dirstate.rebuild(ctx.node(), ctx.manifest())
1500 self.savedirty()
1500 self.savedirty()
1501 self.ui.warn(_('refresh interrupted while patch was popped! '
1501 self.ui.warn(_('refresh interrupted while patch was popped! '
1502 '(revert --all, qpush to recover)\n'))
1502 '(revert --all, qpush to recover)\n'))
1503 raise
1503 raise
1504 finally:
1504 finally:
1505 wlock.release()
1505 wlock.release()
1506 self.removeundo(repo)
1506 self.removeundo(repo)
1507
1507
1508 def init(self, repo, create=False):
1508 def init(self, repo, create=False):
1509 if not create and os.path.isdir(self.path):
1509 if not create and os.path.isdir(self.path):
1510 raise util.Abort(_("patch queue directory already exists"))
1510 raise util.Abort(_("patch queue directory already exists"))
1511 try:
1511 try:
1512 os.mkdir(self.path)
1512 os.mkdir(self.path)
1513 except OSError, inst:
1513 except OSError, inst:
1514 if inst.errno != errno.EEXIST or not create:
1514 if inst.errno != errno.EEXIST or not create:
1515 raise
1515 raise
1516 if create:
1516 if create:
1517 return self.qrepo(create=True)
1517 return self.qrepo(create=True)
1518
1518
1519 def unapplied(self, repo, patch=None):
1519 def unapplied(self, repo, patch=None):
1520 if patch and patch not in self.series:
1520 if patch and patch not in self.series:
1521 raise util.Abort(_("patch %s is not in series file") % patch)
1521 raise util.Abort(_("patch %s is not in series file") % patch)
1522 if not patch:
1522 if not patch:
1523 start = self.seriesend()
1523 start = self.seriesend()
1524 else:
1524 else:
1525 start = self.series.index(patch) + 1
1525 start = self.series.index(patch) + 1
1526 unapplied = []
1526 unapplied = []
1527 for i in xrange(start, len(self.series)):
1527 for i in xrange(start, len(self.series)):
1528 pushable, reason = self.pushable(i)
1528 pushable, reason = self.pushable(i)
1529 if pushable:
1529 if pushable:
1530 unapplied.append((i, self.series[i]))
1530 unapplied.append((i, self.series[i]))
1531 self.explainpushable(i)
1531 self.explainpushable(i)
1532 return unapplied
1532 return unapplied
1533
1533
1534 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1534 def qseries(self, repo, missing=None, start=0, length=None, status=None,
1535 summary=False):
1535 summary=False):
1536 def displayname(pfx, patchname, state):
1536 def displayname(pfx, patchname, state):
1537 if pfx:
1537 if pfx:
1538 self.ui.write(pfx)
1538 self.ui.write(pfx)
1539 if summary:
1539 if summary:
1540 ph = patchheader(self.join(patchname), self.plainmode)
1540 ph = patchheader(self.join(patchname), self.plainmode)
1541 msg = ph.message and ph.message[0] or ''
1541 msg = ph.message and ph.message[0] or ''
1542 if self.ui.formatted():
1542 if self.ui.formatted():
1543 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1543 width = self.ui.termwidth() - len(pfx) - len(patchname) - 2
1544 if width > 0:
1544 if width > 0:
1545 msg = util.ellipsis(msg, width)
1545 msg = util.ellipsis(msg, width)
1546 else:
1546 else:
1547 msg = ''
1547 msg = ''
1548 self.ui.write(patchname, label='qseries.' + state)
1548 self.ui.write(patchname, label='qseries.' + state)
1549 self.ui.write(': ')
1549 self.ui.write(': ')
1550 self.ui.write(msg, label='qseries.message.' + state)
1550 self.ui.write(msg, label='qseries.message.' + state)
1551 else:
1551 else:
1552 self.ui.write(patchname, label='qseries.' + state)
1552 self.ui.write(patchname, label='qseries.' + state)
1553 self.ui.write('\n')
1553 self.ui.write('\n')
1554
1554
1555 applied = set([p.name for p in self.applied])
1555 applied = set([p.name for p in self.applied])
1556 if length is None:
1556 if length is None:
1557 length = len(self.series) - start
1557 length = len(self.series) - start
1558 if not missing:
1558 if not missing:
1559 if self.ui.verbose:
1559 if self.ui.verbose:
1560 idxwidth = len(str(start + length - 1))
1560 idxwidth = len(str(start + length - 1))
1561 for i in xrange(start, start + length):
1561 for i in xrange(start, start + length):
1562 patch = self.series[i]
1562 patch = self.series[i]
1563 if patch in applied:
1563 if patch in applied:
1564 char, state = 'A', 'applied'
1564 char, state = 'A', 'applied'
1565 elif self.pushable(i)[0]:
1565 elif self.pushable(i)[0]:
1566 char, state = 'U', 'unapplied'
1566 char, state = 'U', 'unapplied'
1567 else:
1567 else:
1568 char, state = 'G', 'guarded'
1568 char, state = 'G', 'guarded'
1569 pfx = ''
1569 pfx = ''
1570 if self.ui.verbose:
1570 if self.ui.verbose:
1571 pfx = '%*d %s ' % (idxwidth, i, char)
1571 pfx = '%*d %s ' % (idxwidth, i, char)
1572 elif status and status != char:
1572 elif status and status != char:
1573 continue
1573 continue
1574 displayname(pfx, patch, state)
1574 displayname(pfx, patch, state)
1575 else:
1575 else:
1576 msng_list = []
1576 msng_list = []
1577 for root, dirs, files in os.walk(self.path):
1577 for root, dirs, files in os.walk(self.path):
1578 d = root[len(self.path) + 1:]
1578 d = root[len(self.path) + 1:]
1579 for f in files:
1579 for f in files:
1580 fl = os.path.join(d, f)
1580 fl = os.path.join(d, f)
1581 if (fl not in self.series and
1581 if (fl not in self.series and
1582 fl not in (self.statuspath, self.seriespath,
1582 fl not in (self.statuspath, self.seriespath,
1583 self.guardspath)
1583 self.guardspath)
1584 and not fl.startswith('.')):
1584 and not fl.startswith('.')):
1585 msng_list.append(fl)
1585 msng_list.append(fl)
1586 for x in sorted(msng_list):
1586 for x in sorted(msng_list):
1587 pfx = self.ui.verbose and ('D ') or ''
1587 pfx = self.ui.verbose and ('D ') or ''
1588 displayname(pfx, x, 'missing')
1588 displayname(pfx, x, 'missing')
1589
1589
1590 def issaveline(self, l):
1590 def issaveline(self, l):
1591 if l.name == '.hg.patches.save.line':
1591 if l.name == '.hg.patches.save.line':
1592 return True
1592 return True
1593
1593
1594 def qrepo(self, create=False):
1594 def qrepo(self, create=False):
1595 ui = self.ui.copy()
1595 ui = self.ui.copy()
1596 ui.setconfig('paths', 'default', '', overlay=False)
1596 ui.setconfig('paths', 'default', '', overlay=False)
1597 ui.setconfig('paths', 'default-push', '', overlay=False)
1597 ui.setconfig('paths', 'default-push', '', overlay=False)
1598 if create or os.path.isdir(self.join(".hg")):
1598 if create or os.path.isdir(self.join(".hg")):
1599 return hg.repository(ui, path=self.path, create=create)
1599 return hg.repository(ui, path=self.path, create=create)
1600
1600
1601 def restore(self, repo, rev, delete=None, qupdate=None):
1601 def restore(self, repo, rev, delete=None, qupdate=None):
1602 desc = repo[rev].description().strip()
1602 desc = repo[rev].description().strip()
1603 lines = desc.splitlines()
1603 lines = desc.splitlines()
1604 i = 0
1604 i = 0
1605 datastart = None
1605 datastart = None
1606 series = []
1606 series = []
1607 applied = []
1607 applied = []
1608 qpp = None
1608 qpp = None
1609 for i, line in enumerate(lines):
1609 for i, line in enumerate(lines):
1610 if line == 'Patch Data:':
1610 if line == 'Patch Data:':
1611 datastart = i + 1
1611 datastart = i + 1
1612 elif line.startswith('Dirstate:'):
1612 elif line.startswith('Dirstate:'):
1613 l = line.rstrip()
1613 l = line.rstrip()
1614 l = l[10:].split(' ')
1614 l = l[10:].split(' ')
1615 qpp = [bin(x) for x in l]
1615 qpp = [bin(x) for x in l]
1616 elif datastart is not None:
1616 elif datastart is not None:
1617 l = line.rstrip()
1617 l = line.rstrip()
1618 n, name = l.split(':', 1)
1618 n, name = l.split(':', 1)
1619 if n:
1619 if n:
1620 applied.append(statusentry(bin(n), name))
1620 applied.append(statusentry(bin(n), name))
1621 else:
1621 else:
1622 series.append(l)
1622 series.append(l)
1623 if datastart is None:
1623 if datastart is None:
1624 self.ui.warn(_("No saved patch data found\n"))
1624 self.ui.warn(_("No saved patch data found\n"))
1625 return 1
1625 return 1
1626 self.ui.warn(_("restoring status: %s\n") % lines[0])
1626 self.ui.warn(_("restoring status: %s\n") % lines[0])
1627 self.fullseries = series
1627 self.fullseries = series
1628 self.applied = applied
1628 self.applied = applied
1629 self.parseseries()
1629 self.parseseries()
1630 self.seriesdirty = 1
1630 self.seriesdirty = 1
1631 self.applieddirty = 1
1631 self.applieddirty = 1
1632 heads = repo.changelog.heads()
1632 heads = repo.changelog.heads()
1633 if delete:
1633 if delete:
1634 if rev not in heads:
1634 if rev not in heads:
1635 self.ui.warn(_("save entry has children, leaving it alone\n"))
1635 self.ui.warn(_("save entry has children, leaving it alone\n"))
1636 else:
1636 else:
1637 self.ui.warn(_("removing save entry %s\n") % short(rev))
1637 self.ui.warn(_("removing save entry %s\n") % short(rev))
1638 pp = repo.dirstate.parents()
1638 pp = repo.dirstate.parents()
1639 if rev in pp:
1639 if rev in pp:
1640 update = True
1640 update = True
1641 else:
1641 else:
1642 update = False
1642 update = False
1643 self.strip(repo, [rev], update=update, backup='strip')
1643 self.strip(repo, [rev], update=update, backup='strip')
1644 if qpp:
1644 if qpp:
1645 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1645 self.ui.warn(_("saved queue repository parents: %s %s\n") %
1646 (short(qpp[0]), short(qpp[1])))
1646 (short(qpp[0]), short(qpp[1])))
1647 if qupdate:
1647 if qupdate:
1648 self.ui.status(_("updating queue directory\n"))
1648 self.ui.status(_("updating queue directory\n"))
1649 r = self.qrepo()
1649 r = self.qrepo()
1650 if not r:
1650 if not r:
1651 self.ui.warn(_("Unable to load queue repository\n"))
1651 self.ui.warn(_("Unable to load queue repository\n"))
1652 return 1
1652 return 1
1653 hg.clean(r, qpp[0])
1653 hg.clean(r, qpp[0])
1654
1654
1655 def save(self, repo, msg=None):
1655 def save(self, repo, msg=None):
1656 if not self.applied:
1656 if not self.applied:
1657 self.ui.warn(_("save: no patches applied, exiting\n"))
1657 self.ui.warn(_("save: no patches applied, exiting\n"))
1658 return 1
1658 return 1
1659 if self.issaveline(self.applied[-1]):
1659 if self.issaveline(self.applied[-1]):
1660 self.ui.warn(_("status is already saved\n"))
1660 self.ui.warn(_("status is already saved\n"))
1661 return 1
1661 return 1
1662
1662
1663 if not msg:
1663 if not msg:
1664 msg = _("hg patches saved state")
1664 msg = _("hg patches saved state")
1665 else:
1665 else:
1666 msg = "hg patches: " + msg.rstrip('\r\n')
1666 msg = "hg patches: " + msg.rstrip('\r\n')
1667 r = self.qrepo()
1667 r = self.qrepo()
1668 if r:
1668 if r:
1669 pp = r.dirstate.parents()
1669 pp = r.dirstate.parents()
1670 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1670 msg += "\nDirstate: %s %s" % (hex(pp[0]), hex(pp[1]))
1671 msg += "\n\nPatch Data:\n"
1671 msg += "\n\nPatch Data:\n"
1672 msg += ''.join('%s\n' % x for x in self.applied)
1672 msg += ''.join('%s\n' % x for x in self.applied)
1673 msg += ''.join(':%s\n' % x for x in self.fullseries)
1673 msg += ''.join(':%s\n' % x for x in self.fullseries)
1674 n = repo.commit(msg, force=True)
1674 n = repo.commit(msg, force=True)
1675 if not n:
1675 if not n:
1676 self.ui.warn(_("repo commit failed\n"))
1676 self.ui.warn(_("repo commit failed\n"))
1677 return 1
1677 return 1
1678 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1678 self.applied.append(statusentry(n, '.hg.patches.save.line'))
1679 self.applieddirty = 1
1679 self.applieddirty = 1
1680 self.removeundo(repo)
1680 self.removeundo(repo)
1681
1681
1682 def fullseriesend(self):
1682 def fullseriesend(self):
1683 if self.applied:
1683 if self.applied:
1684 p = self.applied[-1].name
1684 p = self.applied[-1].name
1685 end = self.findseries(p)
1685 end = self.findseries(p)
1686 if end is None:
1686 if end is None:
1687 return len(self.fullseries)
1687 return len(self.fullseries)
1688 return end + 1
1688 return end + 1
1689 return 0
1689 return 0
1690
1690
1691 def seriesend(self, all_patches=False):
1691 def seriesend(self, all_patches=False):
1692 """If all_patches is False, return the index of the next pushable patch
1692 """If all_patches is False, return the index of the next pushable patch
1693 in the series, or the series length. If all_patches is True, return the
1693 in the series, or the series length. If all_patches is True, return the
1694 index of the first patch past the last applied one.
1694 index of the first patch past the last applied one.
1695 """
1695 """
1696 end = 0
1696 end = 0
1697 def next(start):
1697 def next(start):
1698 if all_patches or start >= len(self.series):
1698 if all_patches or start >= len(self.series):
1699 return start
1699 return start
1700 for i in xrange(start, len(self.series)):
1700 for i in xrange(start, len(self.series)):
1701 p, reason = self.pushable(i)
1701 p, reason = self.pushable(i)
1702 if p:
1702 if p:
1703 break
1703 break
1704 self.explainpushable(i)
1704 self.explainpushable(i)
1705 return i
1705 return i
1706 if self.applied:
1706 if self.applied:
1707 p = self.applied[-1].name
1707 p = self.applied[-1].name
1708 try:
1708 try:
1709 end = self.series.index(p)
1709 end = self.series.index(p)
1710 except ValueError:
1710 except ValueError:
1711 return 0
1711 return 0
1712 return next(end + 1)
1712 return next(end + 1)
1713 return next(end)
1713 return next(end)
1714
1714
1715 def appliedname(self, index):
1715 def appliedname(self, index):
1716 pname = self.applied[index].name
1716 pname = self.applied[index].name
1717 if not self.ui.verbose:
1717 if not self.ui.verbose:
1718 p = pname
1718 p = pname
1719 else:
1719 else:
1720 p = str(self.series.index(pname)) + " " + pname
1720 p = str(self.series.index(pname)) + " " + pname
1721 return p
1721 return p
1722
1722
1723 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1723 def qimport(self, repo, files, patchname=None, rev=None, existing=None,
1724 force=None, git=False):
1724 force=None, git=False):
1725 def checkseries(patchname):
1725 def checkseries(patchname):
1726 if patchname in self.series:
1726 if patchname in self.series:
1727 raise util.Abort(_('patch %s is already in the series file')
1727 raise util.Abort(_('patch %s is already in the series file')
1728 % patchname)
1728 % patchname)
1729
1729
1730 if rev:
1730 if rev:
1731 if files:
1731 if files:
1732 raise util.Abort(_('option "-r" not valid when importing '
1732 raise util.Abort(_('option "-r" not valid when importing '
1733 'files'))
1733 'files'))
1734 rev = scmutil.revrange(repo, rev)
1734 rev = scmutil.revrange(repo, rev)
1735 rev.sort(reverse=True)
1735 rev.sort(reverse=True)
1736 if (len(files) > 1 or len(rev) > 1) and patchname:
1736 if (len(files) > 1 or len(rev) > 1) and patchname:
1737 raise util.Abort(_('option "-n" not valid when importing multiple '
1737 raise util.Abort(_('option "-n" not valid when importing multiple '
1738 'patches'))
1738 'patches'))
1739 if rev:
1739 if rev:
1740 # If mq patches are applied, we can only import revisions
1740 # If mq patches are applied, we can only import revisions
1741 # that form a linear path to qbase.
1741 # that form a linear path to qbase.
1742 # Otherwise, they should form a linear path to a head.
1742 # Otherwise, they should form a linear path to a head.
1743 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1743 heads = repo.changelog.heads(repo.changelog.node(rev[-1]))
1744 if len(heads) > 1:
1744 if len(heads) > 1:
1745 raise util.Abort(_('revision %d is the root of more than one '
1745 raise util.Abort(_('revision %d is the root of more than one '
1746 'branch') % rev[-1])
1746 'branch') % rev[-1])
1747 if self.applied:
1747 if self.applied:
1748 base = repo.changelog.node(rev[0])
1748 base = repo.changelog.node(rev[0])
1749 if base in [n.node for n in self.applied]:
1749 if base in [n.node for n in self.applied]:
1750 raise util.Abort(_('revision %d is already managed')
1750 raise util.Abort(_('revision %d is already managed')
1751 % rev[0])
1751 % rev[0])
1752 if heads != [self.applied[-1].node]:
1752 if heads != [self.applied[-1].node]:
1753 raise util.Abort(_('revision %d is not the parent of '
1753 raise util.Abort(_('revision %d is not the parent of '
1754 'the queue') % rev[0])
1754 'the queue') % rev[0])
1755 base = repo.changelog.rev(self.applied[0].node)
1755 base = repo.changelog.rev(self.applied[0].node)
1756 lastparent = repo.changelog.parentrevs(base)[0]
1756 lastparent = repo.changelog.parentrevs(base)[0]
1757 else:
1757 else:
1758 if heads != [repo.changelog.node(rev[0])]:
1758 if heads != [repo.changelog.node(rev[0])]:
1759 raise util.Abort(_('revision %d has unmanaged children')
1759 raise util.Abort(_('revision %d has unmanaged children')
1760 % rev[0])
1760 % rev[0])
1761 lastparent = None
1761 lastparent = None
1762
1762
1763 diffopts = self.diffopts({'git': git})
1763 diffopts = self.diffopts({'git': git})
1764 for r in rev:
1764 for r in rev:
1765 p1, p2 = repo.changelog.parentrevs(r)
1765 p1, p2 = repo.changelog.parentrevs(r)
1766 n = repo.changelog.node(r)
1766 n = repo.changelog.node(r)
1767 if p2 != nullrev:
1767 if p2 != nullrev:
1768 raise util.Abort(_('cannot import merge revision %d') % r)
1768 raise util.Abort(_('cannot import merge revision %d') % r)
1769 if lastparent and lastparent != r:
1769 if lastparent and lastparent != r:
1770 raise util.Abort(_('revision %d is not the parent of %d')
1770 raise util.Abort(_('revision %d is not the parent of %d')
1771 % (r, lastparent))
1771 % (r, lastparent))
1772 lastparent = p1
1772 lastparent = p1
1773
1773
1774 if not patchname:
1774 if not patchname:
1775 patchname = normname('%d.diff' % r)
1775 patchname = normname('%d.diff' % r)
1776 checkseries(patchname)
1776 checkseries(patchname)
1777 self.checkpatchname(patchname, force)
1777 self.checkpatchname(patchname, force)
1778 self.fullseries.insert(0, patchname)
1778 self.fullseries.insert(0, patchname)
1779
1779
1780 patchf = self.opener(patchname, "w")
1780 patchf = self.opener(patchname, "w")
1781 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1781 cmdutil.export(repo, [n], fp=patchf, opts=diffopts)
1782 patchf.close()
1782 patchf.close()
1783
1783
1784 se = statusentry(n, patchname)
1784 se = statusentry(n, patchname)
1785 self.applied.insert(0, se)
1785 self.applied.insert(0, se)
1786
1786
1787 self.added.append(patchname)
1787 self.added.append(patchname)
1788 patchname = None
1788 patchname = None
1789 self.parseseries()
1789 self.parseseries()
1790 self.applieddirty = 1
1790 self.applieddirty = 1
1791 self.seriesdirty = True
1791 self.seriesdirty = True
1792
1792
1793 for i, filename in enumerate(files):
1793 for i, filename in enumerate(files):
1794 if existing:
1794 if existing:
1795 if filename == '-':
1795 if filename == '-':
1796 raise util.Abort(_('-e is incompatible with import from -'))
1796 raise util.Abort(_('-e is incompatible with import from -'))
1797 filename = normname(filename)
1797 filename = normname(filename)
1798 self.checkreservedname(filename)
1798 self.checkreservedname(filename)
1799 originpath = self.join(filename)
1799 originpath = self.join(filename)
1800 if not os.path.isfile(originpath):
1800 if not os.path.isfile(originpath):
1801 raise util.Abort(_("patch %s does not exist") % filename)
1801 raise util.Abort(_("patch %s does not exist") % filename)
1802
1802
1803 if patchname:
1803 if patchname:
1804 self.checkpatchname(patchname, force)
1804 self.checkpatchname(patchname, force)
1805
1805
1806 self.ui.write(_('renaming %s to %s\n')
1806 self.ui.write(_('renaming %s to %s\n')
1807 % (filename, patchname))
1807 % (filename, patchname))
1808 util.rename(originpath, self.join(patchname))
1808 util.rename(originpath, self.join(patchname))
1809 else:
1809 else:
1810 patchname = filename
1810 patchname = filename
1811
1811
1812 else:
1812 else:
1813 if filename == '-' and not patchname:
1813 if filename == '-' and not patchname:
1814 raise util.Abort(_('need --name to import a patch from -'))
1814 raise util.Abort(_('need --name to import a patch from -'))
1815 elif not patchname:
1815 elif not patchname:
1816 patchname = normname(os.path.basename(filename.rstrip('/')))
1816 patchname = normname(os.path.basename(filename.rstrip('/')))
1817 self.checkpatchname(patchname, force)
1817 self.checkpatchname(patchname, force)
1818 try:
1818 try:
1819 if filename == '-':
1819 if filename == '-':
1820 text = self.ui.fin.read()
1820 text = self.ui.fin.read()
1821 else:
1821 else:
1822 fp = url.open(self.ui, filename)
1822 fp = url.open(self.ui, filename)
1823 text = fp.read()
1823 text = fp.read()
1824 fp.close()
1824 fp.close()
1825 except (OSError, IOError):
1825 except (OSError, IOError):
1826 raise util.Abort(_("unable to read file %s") % filename)
1826 raise util.Abort(_("unable to read file %s") % filename)
1827 patchf = self.opener(patchname, "w")
1827 patchf = self.opener(patchname, "w")
1828 patchf.write(text)
1828 patchf.write(text)
1829 patchf.close()
1829 patchf.close()
1830 if not force:
1830 if not force:
1831 checkseries(patchname)
1831 checkseries(patchname)
1832 if patchname not in self.series:
1832 if patchname not in self.series:
1833 index = self.fullseriesend() + i
1833 index = self.fullseriesend() + i
1834 self.fullseries[index:index] = [patchname]
1834 self.fullseries[index:index] = [patchname]
1835 self.parseseries()
1835 self.parseseries()
1836 self.seriesdirty = True
1836 self.seriesdirty = True
1837 self.ui.warn(_("adding %s to series file\n") % patchname)
1837 self.ui.warn(_("adding %s to series file\n") % patchname)
1838 self.added.append(patchname)
1838 self.added.append(patchname)
1839 patchname = None
1839 patchname = None
1840
1840
1841 self.removeundo(repo)
1841 self.removeundo(repo)
1842
1842
1843 @command("qdelete|qremove|qrm",
1843 @command("qdelete|qremove|qrm",
1844 [('k', 'keep', None, _('keep patch file')),
1844 [('k', 'keep', None, _('keep patch file')),
1845 ('r', 'rev', [],
1845 ('r', 'rev', [],
1846 _('stop managing a revision (DEPRECATED)'), _('REV'))],
1846 _('stop managing a revision (DEPRECATED)'), _('REV'))],
1847 _('hg qdelete [-k] [PATCH]...'))
1847 _('hg qdelete [-k] [PATCH]...'))
1848 def delete(ui, repo, *patches, **opts):
1848 def delete(ui, repo, *patches, **opts):
1849 """remove patches from queue
1849 """remove patches from queue
1850
1850
1851 The patches must not be applied, and at least one patch is required. With
1851 The patches must not be applied, and at least one patch is required. With
1852 -k/--keep, the patch files are preserved in the patch directory.
1852 -k/--keep, the patch files are preserved in the patch directory.
1853
1853
1854 To stop managing a patch and move it into permanent history,
1854 To stop managing a patch and move it into permanent history,
1855 use the :hg:`qfinish` command."""
1855 use the :hg:`qfinish` command."""
1856 q = repo.mq
1856 q = repo.mq
1857 q.delete(repo, patches, opts)
1857 q.delete(repo, patches, opts)
1858 q.savedirty()
1858 q.savedirty()
1859 return 0
1859 return 0
1860
1860
1861 @command("qapplied",
1861 @command("qapplied",
1862 [('1', 'last', None, _('show only the last patch'))
1862 [('1', 'last', None, _('show only the last patch'))
1863 ] + seriesopts,
1863 ] + seriesopts,
1864 _('hg qapplied [-1] [-s] [PATCH]'))
1864 _('hg qapplied [-1] [-s] [PATCH]'))
1865 def applied(ui, repo, patch=None, **opts):
1865 def applied(ui, repo, patch=None, **opts):
1866 """print the patches already applied
1866 """print the patches already applied
1867
1867
1868 Returns 0 on success."""
1868 Returns 0 on success."""
1869
1869
1870 q = repo.mq
1870 q = repo.mq
1871
1871
1872 if patch:
1872 if patch:
1873 if patch not in q.series:
1873 if patch not in q.series:
1874 raise util.Abort(_("patch %s is not in series file") % patch)
1874 raise util.Abort(_("patch %s is not in series file") % patch)
1875 end = q.series.index(patch) + 1
1875 end = q.series.index(patch) + 1
1876 else:
1876 else:
1877 end = q.seriesend(True)
1877 end = q.seriesend(True)
1878
1878
1879 if opts.get('last') and not end:
1879 if opts.get('last') and not end:
1880 ui.write(_("no patches applied\n"))
1880 ui.write(_("no patches applied\n"))
1881 return 1
1881 return 1
1882 elif opts.get('last') and end == 1:
1882 elif opts.get('last') and end == 1:
1883 ui.write(_("only one patch applied\n"))
1883 ui.write(_("only one patch applied\n"))
1884 return 1
1884 return 1
1885 elif opts.get('last'):
1885 elif opts.get('last'):
1886 start = end - 2
1886 start = end - 2
1887 end = 1
1887 end = 1
1888 else:
1888 else:
1889 start = 0
1889 start = 0
1890
1890
1891 q.qseries(repo, length=end, start=start, status='A',
1891 q.qseries(repo, length=end, start=start, status='A',
1892 summary=opts.get('summary'))
1892 summary=opts.get('summary'))
1893
1893
1894
1894
1895 @command("qunapplied",
1895 @command("qunapplied",
1896 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
1896 [('1', 'first', None, _('show only the first patch'))] + seriesopts,
1897 _('hg qunapplied [-1] [-s] [PATCH]'))
1897 _('hg qunapplied [-1] [-s] [PATCH]'))
1898 def unapplied(ui, repo, patch=None, **opts):
1898 def unapplied(ui, repo, patch=None, **opts):
1899 """print the patches not yet applied
1899 """print the patches not yet applied
1900
1900
1901 Returns 0 on success."""
1901 Returns 0 on success."""
1902
1902
1903 q = repo.mq
1903 q = repo.mq
1904 if patch:
1904 if patch:
1905 if patch not in q.series:
1905 if patch not in q.series:
1906 raise util.Abort(_("patch %s is not in series file") % patch)
1906 raise util.Abort(_("patch %s is not in series file") % patch)
1907 start = q.series.index(patch) + 1
1907 start = q.series.index(patch) + 1
1908 else:
1908 else:
1909 start = q.seriesend(True)
1909 start = q.seriesend(True)
1910
1910
1911 if start == len(q.series) and opts.get('first'):
1911 if start == len(q.series) and opts.get('first'):
1912 ui.write(_("all patches applied\n"))
1912 ui.write(_("all patches applied\n"))
1913 return 1
1913 return 1
1914
1914
1915 length = opts.get('first') and 1 or None
1915 length = opts.get('first') and 1 or None
1916 q.qseries(repo, start=start, length=length, status='U',
1916 q.qseries(repo, start=start, length=length, status='U',
1917 summary=opts.get('summary'))
1917 summary=opts.get('summary'))
1918
1918
1919 @command("qimport",
1919 @command("qimport",
1920 [('e', 'existing', None, _('import file in patch directory')),
1920 [('e', 'existing', None, _('import file in patch directory')),
1921 ('n', 'name', '',
1921 ('n', 'name', '',
1922 _('name of patch file'), _('NAME')),
1922 _('name of patch file'), _('NAME')),
1923 ('f', 'force', None, _('overwrite existing files')),
1923 ('f', 'force', None, _('overwrite existing files')),
1924 ('r', 'rev', [],
1924 ('r', 'rev', [],
1925 _('place existing revisions under mq control'), _('REV')),
1925 _('place existing revisions under mq control'), _('REV')),
1926 ('g', 'git', None, _('use git extended diff format')),
1926 ('g', 'git', None, _('use git extended diff format')),
1927 ('P', 'push', None, _('qpush after importing'))],
1927 ('P', 'push', None, _('qpush after importing'))],
1928 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...'))
1928 _('hg qimport [-e] [-n NAME] [-f] [-g] [-P] [-r REV]... FILE...'))
1929 def qimport(ui, repo, *filename, **opts):
1929 def qimport(ui, repo, *filename, **opts):
1930 """import a patch
1930 """import a patch
1931
1931
1932 The patch is inserted into the series after the last applied
1932 The patch is inserted into the series after the last applied
1933 patch. If no patches have been applied, qimport prepends the patch
1933 patch. If no patches have been applied, qimport prepends the patch
1934 to the series.
1934 to the series.
1935
1935
1936 The patch will have the same name as its source file unless you
1936 The patch will have the same name as its source file unless you
1937 give it a new one with -n/--name.
1937 give it a new one with -n/--name.
1938
1938
1939 You can register an existing patch inside the patch directory with
1939 You can register an existing patch inside the patch directory with
1940 the -e/--existing flag.
1940 the -e/--existing flag.
1941
1941
1942 With -f/--force, an existing patch of the same name will be
1942 With -f/--force, an existing patch of the same name will be
1943 overwritten.
1943 overwritten.
1944
1944
1945 An existing changeset may be placed under mq control with -r/--rev
1945 An existing changeset may be placed under mq control with -r/--rev
1946 (e.g. qimport --rev tip -n patch will place tip under mq control).
1946 (e.g. qimport --rev tip -n patch will place tip under mq control).
1947 With -g/--git, patches imported with --rev will use the git diff
1947 With -g/--git, patches imported with --rev will use the git diff
1948 format. See the diffs help topic for information on why this is
1948 format. See the diffs help topic for information on why this is
1949 important for preserving rename/copy information and permission
1949 important for preserving rename/copy information and permission
1950 changes. Use :hg:`qfinish` to remove changesets from mq control.
1950 changes. Use :hg:`qfinish` to remove changesets from mq control.
1951
1951
1952 To import a patch from standard input, pass - as the patch file.
1952 To import a patch from standard input, pass - as the patch file.
1953 When importing from standard input, a patch name must be specified
1953 When importing from standard input, a patch name must be specified
1954 using the --name flag.
1954 using the --name flag.
1955
1955
1956 To import an existing patch while renaming it::
1956 To import an existing patch while renaming it::
1957
1957
1958 hg qimport -e existing-patch -n new-name
1958 hg qimport -e existing-patch -n new-name
1959
1959
1960 Returns 0 if import succeeded.
1960 Returns 0 if import succeeded.
1961 """
1961 """
1962 q = repo.mq
1962 q = repo.mq
1963 try:
1963 try:
1964 q.qimport(repo, filename, patchname=opts.get('name'),
1964 q.qimport(repo, filename, patchname=opts.get('name'),
1965 existing=opts.get('existing'), force=opts.get('force'),
1965 existing=opts.get('existing'), force=opts.get('force'),
1966 rev=opts.get('rev'), git=opts.get('git'))
1966 rev=opts.get('rev'), git=opts.get('git'))
1967 finally:
1967 finally:
1968 q.savedirty()
1968 q.savedirty()
1969
1969
1970 if opts.get('push') and not opts.get('rev'):
1970 if opts.get('push') and not opts.get('rev'):
1971 return q.push(repo, None)
1971 return q.push(repo, None)
1972 return 0
1972 return 0
1973
1973
1974 def qinit(ui, repo, create):
1974 def qinit(ui, repo, create):
1975 """initialize a new queue repository
1975 """initialize a new queue repository
1976
1976
1977 This command also creates a series file for ordering patches, and
1977 This command also creates a series file for ordering patches, and
1978 an mq-specific .hgignore file in the queue repository, to exclude
1978 an mq-specific .hgignore file in the queue repository, to exclude
1979 the status and guards files (these contain mostly transient state).
1979 the status and guards files (these contain mostly transient state).
1980
1980
1981 Returns 0 if initialization succeeded."""
1981 Returns 0 if initialization succeeded."""
1982 q = repo.mq
1982 q = repo.mq
1983 r = q.init(repo, create)
1983 r = q.init(repo, create)
1984 q.savedirty()
1984 q.savedirty()
1985 if r:
1985 if r:
1986 if not os.path.exists(r.wjoin('.hgignore')):
1986 if not os.path.exists(r.wjoin('.hgignore')):
1987 fp = r.wopener('.hgignore', 'w')
1987 fp = r.wopener('.hgignore', 'w')
1988 fp.write('^\\.hg\n')
1988 fp.write('^\\.hg\n')
1989 fp.write('^\\.mq\n')
1989 fp.write('^\\.mq\n')
1990 fp.write('syntax: glob\n')
1990 fp.write('syntax: glob\n')
1991 fp.write('status\n')
1991 fp.write('status\n')
1992 fp.write('guards\n')
1992 fp.write('guards\n')
1993 fp.close()
1993 fp.close()
1994 if not os.path.exists(r.wjoin('series')):
1994 if not os.path.exists(r.wjoin('series')):
1995 r.wopener('series', 'w').close()
1995 r.wopener('series', 'w').close()
1996 r[None].add(['.hgignore', 'series'])
1996 r[None].add(['.hgignore', 'series'])
1997 commands.add(ui, r)
1997 commands.add(ui, r)
1998 return 0
1998 return 0
1999
1999
2000 @command("^qinit",
2000 @command("^qinit",
2001 [('c', 'create-repo', None, _('create queue repository'))],
2001 [('c', 'create-repo', None, _('create queue repository'))],
2002 _('hg qinit [-c]'))
2002 _('hg qinit [-c]'))
2003 def init(ui, repo, **opts):
2003 def init(ui, repo, **opts):
2004 """init a new queue repository (DEPRECATED)
2004 """init a new queue repository (DEPRECATED)
2005
2005
2006 The queue repository is unversioned by default. If
2006 The queue repository is unversioned by default. If
2007 -c/--create-repo is specified, qinit will create a separate nested
2007 -c/--create-repo is specified, qinit will create a separate nested
2008 repository for patches (qinit -c may also be run later to convert
2008 repository for patches (qinit -c may also be run later to convert
2009 an unversioned patch repository into a versioned one). You can use
2009 an unversioned patch repository into a versioned one). You can use
2010 qcommit to commit changes to this queue repository.
2010 qcommit to commit changes to this queue repository.
2011
2011
2012 This command is deprecated. Without -c, it's implied by other relevant
2012 This command is deprecated. Without -c, it's implied by other relevant
2013 commands. With -c, use :hg:`init --mq` instead."""
2013 commands. With -c, use :hg:`init --mq` instead."""
2014 return qinit(ui, repo, create=opts.get('create_repo'))
2014 return qinit(ui, repo, create=opts.get('create_repo'))
2015
2015
2016 @command("qclone",
2016 @command("qclone",
2017 [('', 'pull', None, _('use pull protocol to copy metadata')),
2017 [('', 'pull', None, _('use pull protocol to copy metadata')),
2018 ('U', 'noupdate', None, _('do not update the new working directories')),
2018 ('U', 'noupdate', None, _('do not update the new working directories')),
2019 ('', 'uncompressed', None,
2019 ('', 'uncompressed', None,
2020 _('use uncompressed transfer (fast over LAN)')),
2020 _('use uncompressed transfer (fast over LAN)')),
2021 ('p', 'patches', '',
2021 ('p', 'patches', '',
2022 _('location of source patch repository'), _('REPO')),
2022 _('location of source patch repository'), _('REPO')),
2023 ] + commands.remoteopts,
2023 ] + commands.remoteopts,
2024 _('hg qclone [OPTION]... SOURCE [DEST]'))
2024 _('hg qclone [OPTION]... SOURCE [DEST]'))
2025 def clone(ui, source, dest=None, **opts):
2025 def clone(ui, source, dest=None, **opts):
2026 '''clone main and patch repository at same time
2026 '''clone main and patch repository at same time
2027
2027
2028 If source is local, destination will have no patches applied. If
2028 If source is local, destination will have no patches applied. If
2029 source is remote, this command can not check if patches are
2029 source is remote, this command can not check if patches are
2030 applied in source, so cannot guarantee that patches are not
2030 applied in source, so cannot guarantee that patches are not
2031 applied in destination. If you clone remote repository, be sure
2031 applied in destination. If you clone remote repository, be sure
2032 before that it has no patches applied.
2032 before that it has no patches applied.
2033
2033
2034 Source patch repository is looked for in <src>/.hg/patches by
2034 Source patch repository is looked for in <src>/.hg/patches by
2035 default. Use -p <url> to change.
2035 default. Use -p <url> to change.
2036
2036
2037 The patch directory must be a nested Mercurial repository, as
2037 The patch directory must be a nested Mercurial repository, as
2038 would be created by :hg:`init --mq`.
2038 would be created by :hg:`init --mq`.
2039
2039
2040 Return 0 on success.
2040 Return 0 on success.
2041 '''
2041 '''
2042 def patchdir(repo):
2042 def patchdir(repo):
2043 url = repo.url()
2043 url = repo.url()
2044 if url.endswith('/'):
2044 if url.endswith('/'):
2045 url = url[:-1]
2045 url = url[:-1]
2046 return url + '/.hg/patches'
2046 return url + '/.hg/patches'
2047 if dest is None:
2047 if dest is None:
2048 dest = hg.defaultdest(source)
2048 dest = hg.defaultdest(source)
2049 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
2049 sr = hg.repository(hg.remoteui(ui, opts), ui.expandpath(source))
2050 if opts.get('patches'):
2050 if opts.get('patches'):
2051 patchespath = ui.expandpath(opts.get('patches'))
2051 patchespath = ui.expandpath(opts.get('patches'))
2052 else:
2052 else:
2053 patchespath = patchdir(sr)
2053 patchespath = patchdir(sr)
2054 try:
2054 try:
2055 hg.repository(ui, patchespath)
2055 hg.repository(ui, patchespath)
2056 except error.RepoError:
2056 except error.RepoError:
2057 raise util.Abort(_('versioned patch repository not found'
2057 raise util.Abort(_('versioned patch repository not found'
2058 ' (see init --mq)'))
2058 ' (see init --mq)'))
2059 qbase, destrev = None, None
2059 qbase, destrev = None, None
2060 if sr.local():
2060 if sr.local():
2061 if sr.mq.applied:
2061 if sr.mq.applied:
2062 qbase = sr.mq.applied[0].node
2062 qbase = sr.mq.applied[0].node
2063 if not hg.islocal(dest):
2063 if not hg.islocal(dest):
2064 heads = set(sr.heads())
2064 heads = set(sr.heads())
2065 destrev = list(heads.difference(sr.heads(qbase)))
2065 destrev = list(heads.difference(sr.heads(qbase)))
2066 destrev.append(sr.changelog.parents(qbase)[0])
2066 destrev.append(sr.changelog.parents(qbase)[0])
2067 elif sr.capable('lookup'):
2067 elif sr.capable('lookup'):
2068 try:
2068 try:
2069 qbase = sr.lookup('qbase')
2069 qbase = sr.lookup('qbase')
2070 except error.RepoError:
2070 except error.RepoError:
2071 pass
2071 pass
2072 ui.note(_('cloning main repository\n'))
2072 ui.note(_('cloning main repository\n'))
2073 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2073 sr, dr = hg.clone(ui, opts, sr.url(), dest,
2074 pull=opts.get('pull'),
2074 pull=opts.get('pull'),
2075 rev=destrev,
2075 rev=destrev,
2076 update=False,
2076 update=False,
2077 stream=opts.get('uncompressed'))
2077 stream=opts.get('uncompressed'))
2078 ui.note(_('cloning patch repository\n'))
2078 ui.note(_('cloning patch repository\n'))
2079 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2079 hg.clone(ui, opts, opts.get('patches') or patchdir(sr), patchdir(dr),
2080 pull=opts.get('pull'), update=not opts.get('noupdate'),
2080 pull=opts.get('pull'), update=not opts.get('noupdate'),
2081 stream=opts.get('uncompressed'))
2081 stream=opts.get('uncompressed'))
2082 if dr.local():
2082 if dr.local():
2083 if qbase:
2083 if qbase:
2084 ui.note(_('stripping applied patches from destination '
2084 ui.note(_('stripping applied patches from destination '
2085 'repository\n'))
2085 'repository\n'))
2086 dr.mq.strip(dr, [qbase], update=False, backup=None)
2086 dr.mq.strip(dr, [qbase], update=False, backup=None)
2087 if not opts.get('noupdate'):
2087 if not opts.get('noupdate'):
2088 ui.note(_('updating destination repository\n'))
2088 ui.note(_('updating destination repository\n'))
2089 hg.update(dr, dr.changelog.tip())
2089 hg.update(dr, dr.changelog.tip())
2090
2090
2091 @command("qcommit|qci",
2091 @command("qcommit|qci",
2092 commands.table["^commit|ci"][1],
2092 commands.table["^commit|ci"][1],
2093 _('hg qcommit [OPTION]... [FILE]...'))
2093 _('hg qcommit [OPTION]... [FILE]...'))
2094 def commit(ui, repo, *pats, **opts):
2094 def commit(ui, repo, *pats, **opts):
2095 """commit changes in the queue repository (DEPRECATED)
2095 """commit changes in the queue repository (DEPRECATED)
2096
2096
2097 This command is deprecated; use :hg:`commit --mq` instead."""
2097 This command is deprecated; use :hg:`commit --mq` instead."""
2098 q = repo.mq
2098 q = repo.mq
2099 r = q.qrepo()
2099 r = q.qrepo()
2100 if not r:
2100 if not r:
2101 raise util.Abort('no queue repository')
2101 raise util.Abort('no queue repository')
2102 commands.commit(r.ui, r, *pats, **opts)
2102 commands.commit(r.ui, r, *pats, **opts)
2103
2103
2104 @command("qseries",
2104 @command("qseries",
2105 [('m', 'missing', None, _('print patches not in series')),
2105 [('m', 'missing', None, _('print patches not in series')),
2106 ] + seriesopts,
2106 ] + seriesopts,
2107 _('hg qseries [-ms]'))
2107 _('hg qseries [-ms]'))
2108 def series(ui, repo, **opts):
2108 def series(ui, repo, **opts):
2109 """print the entire series file
2109 """print the entire series file
2110
2110
2111 Returns 0 on success."""
2111 Returns 0 on success."""
2112 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2112 repo.mq.qseries(repo, missing=opts.get('missing'), summary=opts.get('summary'))
2113 return 0
2113 return 0
2114
2114
2115 @command("qtop", seriesopts, _('hg qtop [-s]'))
2115 @command("qtop", seriesopts, _('hg qtop [-s]'))
2116 def top(ui, repo, **opts):
2116 def top(ui, repo, **opts):
2117 """print the name of the current patch
2117 """print the name of the current patch
2118
2118
2119 Returns 0 on success."""
2119 Returns 0 on success."""
2120 q = repo.mq
2120 q = repo.mq
2121 t = q.applied and q.seriesend(True) or 0
2121 t = q.applied and q.seriesend(True) or 0
2122 if t:
2122 if t:
2123 q.qseries(repo, start=t - 1, length=1, status='A',
2123 q.qseries(repo, start=t - 1, length=1, status='A',
2124 summary=opts.get('summary'))
2124 summary=opts.get('summary'))
2125 else:
2125 else:
2126 ui.write(_("no patches applied\n"))
2126 ui.write(_("no patches applied\n"))
2127 return 1
2127 return 1
2128
2128
2129 @command("qnext", seriesopts, _('hg qnext [-s]'))
2129 @command("qnext", seriesopts, _('hg qnext [-s]'))
2130 def next(ui, repo, **opts):
2130 def next(ui, repo, **opts):
2131 """print the name of the next patch
2131 """print the name of the next patch
2132
2132
2133 Returns 0 on success."""
2133 Returns 0 on success."""
2134 q = repo.mq
2134 q = repo.mq
2135 end = q.seriesend()
2135 end = q.seriesend()
2136 if end == len(q.series):
2136 if end == len(q.series):
2137 ui.write(_("all patches applied\n"))
2137 ui.write(_("all patches applied\n"))
2138 return 1
2138 return 1
2139 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2139 q.qseries(repo, start=end, length=1, summary=opts.get('summary'))
2140
2140
2141 @command("qprev", seriesopts, _('hg qprev [-s]'))
2141 @command("qprev", seriesopts, _('hg qprev [-s]'))
2142 def prev(ui, repo, **opts):
2142 def prev(ui, repo, **opts):
2143 """print the name of the previous patch
2143 """print the name of the previous patch
2144
2144
2145 Returns 0 on success."""
2145 Returns 0 on success."""
2146 q = repo.mq
2146 q = repo.mq
2147 l = len(q.applied)
2147 l = len(q.applied)
2148 if l == 1:
2148 if l == 1:
2149 ui.write(_("only one patch applied\n"))
2149 ui.write(_("only one patch applied\n"))
2150 return 1
2150 return 1
2151 if not l:
2151 if not l:
2152 ui.write(_("no patches applied\n"))
2152 ui.write(_("no patches applied\n"))
2153 return 1
2153 return 1
2154 q.qseries(repo, start=l - 2, length=1, status='A',
2154 q.qseries(repo, start=l - 2, length=1, status='A',
2155 summary=opts.get('summary'))
2155 summary=opts.get('summary'))
2156
2156
2157 def setupheaderopts(ui, opts):
2157 def setupheaderopts(ui, opts):
2158 if not opts.get('user') and opts.get('currentuser'):
2158 if not opts.get('user') and opts.get('currentuser'):
2159 opts['user'] = ui.username()
2159 opts['user'] = ui.username()
2160 if not opts.get('date') and opts.get('currentdate'):
2160 if not opts.get('date') and opts.get('currentdate'):
2161 opts['date'] = "%d %d" % util.makedate()
2161 opts['date'] = "%d %d" % util.makedate()
2162
2162
2163 @command("^qnew",
2163 @command("^qnew",
2164 [('e', 'edit', None, _('edit commit message')),
2164 [('e', 'edit', None, _('edit commit message')),
2165 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2165 ('f', 'force', None, _('import uncommitted changes (DEPRECATED)')),
2166 ('g', 'git', None, _('use git extended diff format')),
2166 ('g', 'git', None, _('use git extended diff format')),
2167 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2167 ('U', 'currentuser', None, _('add "From: <current user>" to patch')),
2168 ('u', 'user', '',
2168 ('u', 'user', '',
2169 _('add "From: <USER>" to patch'), _('USER')),
2169 _('add "From: <USER>" to patch'), _('USER')),
2170 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2170 ('D', 'currentdate', None, _('add "Date: <current date>" to patch')),
2171 ('d', 'date', '',
2171 ('d', 'date', '',
2172 _('add "Date: <DATE>" to patch'), _('DATE'))
2172 _('add "Date: <DATE>" to patch'), _('DATE'))
2173 ] + commands.walkopts + commands.commitopts,
2173 ] + commands.walkopts + commands.commitopts,
2174 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2174 _('hg qnew [-e] [-m TEXT] [-l FILE] PATCH [FILE]...'))
2175 def new(ui, repo, patch, *args, **opts):
2175 def new(ui, repo, patch, *args, **opts):
2176 """create a new patch
2176 """create a new patch
2177
2177
2178 qnew creates a new patch on top of the currently-applied patch (if
2178 qnew creates a new patch on top of the currently-applied patch (if
2179 any). The patch will be initialized with any outstanding changes
2179 any). The patch will be initialized with any outstanding changes
2180 in the working directory. You may also use -I/--include,
2180 in the working directory. You may also use -I/--include,
2181 -X/--exclude, and/or a list of files after the patch name to add
2181 -X/--exclude, and/or a list of files after the patch name to add
2182 only changes to matching files to the new patch, leaving the rest
2182 only changes to matching files to the new patch, leaving the rest
2183 as uncommitted modifications.
2183 as uncommitted modifications.
2184
2184
2185 -u/--user and -d/--date can be used to set the (given) user and
2185 -u/--user and -d/--date can be used to set the (given) user and
2186 date, respectively. -U/--currentuser and -D/--currentdate set user
2186 date, respectively. -U/--currentuser and -D/--currentdate set user
2187 to current user and date to current date.
2187 to current user and date to current date.
2188
2188
2189 -e/--edit, -m/--message or -l/--logfile set the patch header as
2189 -e/--edit, -m/--message or -l/--logfile set the patch header as
2190 well as the commit message. If none is specified, the header is
2190 well as the commit message. If none is specified, the header is
2191 empty and the commit message is '[mq]: PATCH'.
2191 empty and the commit message is '[mq]: PATCH'.
2192
2192
2193 Use the -g/--git option to keep the patch in the git extended diff
2193 Use the -g/--git option to keep the patch in the git extended diff
2194 format. Read the diffs help topic for more information on why this
2194 format. Read the diffs help topic for more information on why this
2195 is important for preserving permission changes and copy/rename
2195 is important for preserving permission changes and copy/rename
2196 information.
2196 information.
2197
2197
2198 Returns 0 on successful creation of a new patch.
2198 Returns 0 on successful creation of a new patch.
2199 """
2199 """
2200 msg = cmdutil.logmessage(ui, opts)
2200 msg = cmdutil.logmessage(ui, opts)
2201 def getmsg():
2201 def getmsg():
2202 return ui.edit(msg, opts.get('user') or ui.username())
2202 return ui.edit(msg, opts.get('user') or ui.username())
2203 q = repo.mq
2203 q = repo.mq
2204 opts['msg'] = msg
2204 opts['msg'] = msg
2205 if opts.get('edit'):
2205 if opts.get('edit'):
2206 opts['msg'] = getmsg
2206 opts['msg'] = getmsg
2207 else:
2207 else:
2208 opts['msg'] = msg
2208 opts['msg'] = msg
2209 setupheaderopts(ui, opts)
2209 setupheaderopts(ui, opts)
2210 q.new(repo, patch, *args, **opts)
2210 q.new(repo, patch, *args, **opts)
2211 q.savedirty()
2211 q.savedirty()
2212 return 0
2212 return 0
2213
2213
2214 @command("^qrefresh",
2214 @command("^qrefresh",
2215 [('e', 'edit', None, _('edit commit message')),
2215 [('e', 'edit', None, _('edit commit message')),
2216 ('g', 'git', None, _('use git extended diff format')),
2216 ('g', 'git', None, _('use git extended diff format')),
2217 ('s', 'short', None,
2217 ('s', 'short', None,
2218 _('refresh only files already in the patch and specified files')),
2218 _('refresh only files already in the patch and specified files')),
2219 ('U', 'currentuser', None,
2219 ('U', 'currentuser', None,
2220 _('add/update author field in patch with current user')),
2220 _('add/update author field in patch with current user')),
2221 ('u', 'user', '',
2221 ('u', 'user', '',
2222 _('add/update author field in patch with given user'), _('USER')),
2222 _('add/update author field in patch with given user'), _('USER')),
2223 ('D', 'currentdate', None,
2223 ('D', 'currentdate', None,
2224 _('add/update date field in patch with current date')),
2224 _('add/update date field in patch with current date')),
2225 ('d', 'date', '',
2225 ('d', 'date', '',
2226 _('add/update date field in patch with given date'), _('DATE'))
2226 _('add/update date field in patch with given date'), _('DATE'))
2227 ] + commands.walkopts + commands.commitopts,
2227 ] + commands.walkopts + commands.commitopts,
2228 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2228 _('hg qrefresh [-I] [-X] [-e] [-m TEXT] [-l FILE] [-s] [FILE]...'))
2229 def refresh(ui, repo, *pats, **opts):
2229 def refresh(ui, repo, *pats, **opts):
2230 """update the current patch
2230 """update the current patch
2231
2231
2232 If any file patterns are provided, the refreshed patch will
2232 If any file patterns are provided, the refreshed patch will
2233 contain only the modifications that match those patterns; the
2233 contain only the modifications that match those patterns; the
2234 remaining modifications will remain in the working directory.
2234 remaining modifications will remain in the working directory.
2235
2235
2236 If -s/--short is specified, files currently included in the patch
2236 If -s/--short is specified, files currently included in the patch
2237 will be refreshed just like matched files and remain in the patch.
2237 will be refreshed just like matched files and remain in the patch.
2238
2238
2239 If -e/--edit is specified, Mercurial will start your configured editor for
2239 If -e/--edit is specified, Mercurial will start your configured editor for
2240 you to enter a message. In case qrefresh fails, you will find a backup of
2240 you to enter a message. In case qrefresh fails, you will find a backup of
2241 your message in ``.hg/last-message.txt``.
2241 your message in ``.hg/last-message.txt``.
2242
2242
2243 hg add/remove/copy/rename work as usual, though you might want to
2243 hg add/remove/copy/rename work as usual, though you might want to
2244 use git-style patches (-g/--git or [diff] git=1) to track copies
2244 use git-style patches (-g/--git or [diff] git=1) to track copies
2245 and renames. See the diffs help topic for more information on the
2245 and renames. See the diffs help topic for more information on the
2246 git diff format.
2246 git diff format.
2247
2247
2248 Returns 0 on success.
2248 Returns 0 on success.
2249 """
2249 """
2250 q = repo.mq
2250 q = repo.mq
2251 message = cmdutil.logmessage(ui, opts)
2251 message = cmdutil.logmessage(ui, opts)
2252 if opts.get('edit'):
2252 if opts.get('edit'):
2253 if not q.applied:
2253 if not q.applied:
2254 ui.write(_("no patches applied\n"))
2254 ui.write(_("no patches applied\n"))
2255 return 1
2255 return 1
2256 if message:
2256 if message:
2257 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2257 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2258 patch = q.applied[-1].name
2258 patch = q.applied[-1].name
2259 ph = patchheader(q.join(patch), q.plainmode)
2259 ph = patchheader(q.join(patch), q.plainmode)
2260 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2260 message = ui.edit('\n'.join(ph.message), ph.user or ui.username())
2261 # We don't want to lose the patch message if qrefresh fails (issue2062)
2261 # We don't want to lose the patch message if qrefresh fails (issue2062)
2262 repo.savecommitmessage(message)
2262 repo.savecommitmessage(message)
2263 setupheaderopts(ui, opts)
2263 setupheaderopts(ui, opts)
2264 wlock = repo.wlock()
2264 wlock = repo.wlock()
2265 try:
2265 try:
2266 ret = q.refresh(repo, pats, msg=message, **opts)
2266 ret = q.refresh(repo, pats, msg=message, **opts)
2267 q.savedirty()
2267 q.savedirty()
2268 return ret
2268 return ret
2269 finally:
2269 finally:
2270 wlock.release()
2270 wlock.release()
2271
2271
2272 @command("^qdiff",
2272 @command("^qdiff",
2273 commands.diffopts + commands.diffopts2 + commands.walkopts,
2273 commands.diffopts + commands.diffopts2 + commands.walkopts,
2274 _('hg qdiff [OPTION]... [FILE]...'))
2274 _('hg qdiff [OPTION]... [FILE]...'))
2275 def diff(ui, repo, *pats, **opts):
2275 def diff(ui, repo, *pats, **opts):
2276 """diff of the current patch and subsequent modifications
2276 """diff of the current patch and subsequent modifications
2277
2277
2278 Shows a diff which includes the current patch as well as any
2278 Shows a diff which includes the current patch as well as any
2279 changes which have been made in the working directory since the
2279 changes which have been made in the working directory since the
2280 last refresh (thus showing what the current patch would become
2280 last refresh (thus showing what the current patch would become
2281 after a qrefresh).
2281 after a qrefresh).
2282
2282
2283 Use :hg:`diff` if you only want to see the changes made since the
2283 Use :hg:`diff` if you only want to see the changes made since the
2284 last qrefresh, or :hg:`export qtip` if you want to see changes
2284 last qrefresh, or :hg:`export qtip` if you want to see changes
2285 made by the current patch without including changes made since the
2285 made by the current patch without including changes made since the
2286 qrefresh.
2286 qrefresh.
2287
2287
2288 Returns 0 on success.
2288 Returns 0 on success.
2289 """
2289 """
2290 repo.mq.diff(repo, pats, opts)
2290 repo.mq.diff(repo, pats, opts)
2291 return 0
2291 return 0
2292
2292
2293 @command('qfold',
2293 @command('qfold',
2294 [('e', 'edit', None, _('edit patch header')),
2294 [('e', 'edit', None, _('edit patch header')),
2295 ('k', 'keep', None, _('keep folded patch files')),
2295 ('k', 'keep', None, _('keep folded patch files')),
2296 ] + commands.commitopts,
2296 ] + commands.commitopts,
2297 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2297 _('hg qfold [-e] [-k] [-m TEXT] [-l FILE] PATCH...'))
2298 def fold(ui, repo, *files, **opts):
2298 def fold(ui, repo, *files, **opts):
2299 """fold the named patches into the current patch
2299 """fold the named patches into the current patch
2300
2300
2301 Patches must not yet be applied. Each patch will be successively
2301 Patches must not yet be applied. Each patch will be successively
2302 applied to the current patch in the order given. If all the
2302 applied to the current patch in the order given. If all the
2303 patches apply successfully, the current patch will be refreshed
2303 patches apply successfully, the current patch will be refreshed
2304 with the new cumulative patch, and the folded patches will be
2304 with the new cumulative patch, and the folded patches will be
2305 deleted. With -k/--keep, the folded patch files will not be
2305 deleted. With -k/--keep, the folded patch files will not be
2306 removed afterwards.
2306 removed afterwards.
2307
2307
2308 The header for each folded patch will be concatenated with the
2308 The header for each folded patch will be concatenated with the
2309 current patch header, separated by a line of ``* * *``.
2309 current patch header, separated by a line of ``* * *``.
2310
2310
2311 Returns 0 on success."""
2311 Returns 0 on success."""
2312
2312
2313 q = repo.mq
2313 q = repo.mq
2314
2314
2315 if not files:
2315 if not files:
2316 raise util.Abort(_('qfold requires at least one patch name'))
2316 raise util.Abort(_('qfold requires at least one patch name'))
2317 if not q.checktoppatch(repo)[0]:
2317 if not q.checktoppatch(repo)[0]:
2318 raise util.Abort(_('no patches applied'))
2318 raise util.Abort(_('no patches applied'))
2319 q.checklocalchanges(repo)
2319 q.checklocalchanges(repo)
2320
2320
2321 message = cmdutil.logmessage(ui, opts)
2321 message = cmdutil.logmessage(ui, opts)
2322 if opts.get('edit'):
2322 if opts.get('edit'):
2323 if message:
2323 if message:
2324 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2324 raise util.Abort(_('option "-e" incompatible with "-m" or "-l"'))
2325
2325
2326 parent = q.lookup('qtip')
2326 parent = q.lookup('qtip')
2327 patches = []
2327 patches = []
2328 messages = []
2328 messages = []
2329 for f in files:
2329 for f in files:
2330 p = q.lookup(f)
2330 p = q.lookup(f)
2331 if p in patches or p == parent:
2331 if p in patches or p == parent:
2332 ui.warn(_('Skipping already folded patch %s\n') % p)
2332 ui.warn(_('Skipping already folded patch %s\n') % p)
2333 if q.isapplied(p):
2333 if q.isapplied(p):
2334 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2334 raise util.Abort(_('qfold cannot fold already applied patch %s') % p)
2335 patches.append(p)
2335 patches.append(p)
2336
2336
2337 for p in patches:
2337 for p in patches:
2338 if not message:
2338 if not message:
2339 ph = patchheader(q.join(p), q.plainmode)
2339 ph = patchheader(q.join(p), q.plainmode)
2340 if ph.message:
2340 if ph.message:
2341 messages.append(ph.message)
2341 messages.append(ph.message)
2342 pf = q.join(p)
2342 pf = q.join(p)
2343 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2343 (patchsuccess, files, fuzz) = q.patch(repo, pf)
2344 if not patchsuccess:
2344 if not patchsuccess:
2345 raise util.Abort(_('error folding patch %s') % p)
2345 raise util.Abort(_('error folding patch %s') % p)
2346
2346
2347 if not message:
2347 if not message:
2348 ph = patchheader(q.join(parent), q.plainmode)
2348 ph = patchheader(q.join(parent), q.plainmode)
2349 message, user = ph.message, ph.user
2349 message, user = ph.message, ph.user
2350 for msg in messages:
2350 for msg in messages:
2351 message.append('* * *')
2351 message.append('* * *')
2352 message.extend(msg)
2352 message.extend(msg)
2353 message = '\n'.join(message)
2353 message = '\n'.join(message)
2354
2354
2355 if opts.get('edit'):
2355 if opts.get('edit'):
2356 message = ui.edit(message, user or ui.username())
2356 message = ui.edit(message, user or ui.username())
2357
2357
2358 diffopts = q.patchopts(q.diffopts(), *patches)
2358 diffopts = q.patchopts(q.diffopts(), *patches)
2359 wlock = repo.wlock()
2359 wlock = repo.wlock()
2360 try:
2360 try:
2361 q.refresh(repo, msg=message, git=diffopts.git)
2361 q.refresh(repo, msg=message, git=diffopts.git)
2362 q.delete(repo, patches, opts)
2362 q.delete(repo, patches, opts)
2363 q.savedirty()
2363 q.savedirty()
2364 finally:
2364 finally:
2365 wlock.release()
2365 wlock.release()
2366
2366
2367 @command("qgoto",
2367 @command("qgoto",
2368 [('f', 'force', None, _('overwrite any local changes'))],
2368 [('f', 'force', None, _('overwrite any local changes'))],
2369 _('hg qgoto [OPTION]... PATCH'))
2369 _('hg qgoto [OPTION]... PATCH'))
2370 def goto(ui, repo, patch, **opts):
2370 def goto(ui, repo, patch, **opts):
2371 '''push or pop patches until named patch is at top of stack
2371 '''push or pop patches until named patch is at top of stack
2372
2372
2373 Returns 0 on success.'''
2373 Returns 0 on success.'''
2374 q = repo.mq
2374 q = repo.mq
2375 patch = q.lookup(patch)
2375 patch = q.lookup(patch)
2376 if q.isapplied(patch):
2376 if q.isapplied(patch):
2377 ret = q.pop(repo, patch, force=opts.get('force'))
2377 ret = q.pop(repo, patch, force=opts.get('force'))
2378 else:
2378 else:
2379 ret = q.push(repo, patch, force=opts.get('force'))
2379 ret = q.push(repo, patch, force=opts.get('force'))
2380 q.savedirty()
2380 q.savedirty()
2381 return ret
2381 return ret
2382
2382
2383 @command("qguard",
2383 @command("qguard",
2384 [('l', 'list', None, _('list all patches and guards')),
2384 [('l', 'list', None, _('list all patches and guards')),
2385 ('n', 'none', None, _('drop all guards'))],
2385 ('n', 'none', None, _('drop all guards'))],
2386 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2386 _('hg qguard [-l] [-n] [PATCH] [-- [+GUARD]... [-GUARD]...]'))
2387 def guard(ui, repo, *args, **opts):
2387 def guard(ui, repo, *args, **opts):
2388 '''set or print guards for a patch
2388 '''set or print guards for a patch
2389
2389
2390 Guards control whether a patch can be pushed. A patch with no
2390 Guards control whether a patch can be pushed. A patch with no
2391 guards is always pushed. A patch with a positive guard ("+foo") is
2391 guards is always pushed. A patch with a positive guard ("+foo") is
2392 pushed only if the :hg:`qselect` command has activated it. A patch with
2392 pushed only if the :hg:`qselect` command has activated it. A patch with
2393 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2393 a negative guard ("-foo") is never pushed if the :hg:`qselect` command
2394 has activated it.
2394 has activated it.
2395
2395
2396 With no arguments, print the currently active guards.
2396 With no arguments, print the currently active guards.
2397 With arguments, set guards for the named patch.
2397 With arguments, set guards for the named patch.
2398
2398
2399 .. note::
2399 .. note::
2400 Specifying negative guards now requires '--'.
2400 Specifying negative guards now requires '--'.
2401
2401
2402 To set guards on another patch::
2402 To set guards on another patch::
2403
2403
2404 hg qguard other.patch -- +2.6.17 -stable
2404 hg qguard other.patch -- +2.6.17 -stable
2405
2405
2406 Returns 0 on success.
2406 Returns 0 on success.
2407 '''
2407 '''
2408 def status(idx):
2408 def status(idx):
2409 guards = q.seriesguards[idx] or ['unguarded']
2409 guards = q.seriesguards[idx] or ['unguarded']
2410 if q.series[idx] in applied:
2410 if q.series[idx] in applied:
2411 state = 'applied'
2411 state = 'applied'
2412 elif q.pushable(idx)[0]:
2412 elif q.pushable(idx)[0]:
2413 state = 'unapplied'
2413 state = 'unapplied'
2414 else:
2414 else:
2415 state = 'guarded'
2415 state = 'guarded'
2416 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2416 label = 'qguard.patch qguard.%s qseries.%s' % (state, state)
2417 ui.write('%s: ' % ui.label(q.series[idx], label))
2417 ui.write('%s: ' % ui.label(q.series[idx], label))
2418
2418
2419 for i, guard in enumerate(guards):
2419 for i, guard in enumerate(guards):
2420 if guard.startswith('+'):
2420 if guard.startswith('+'):
2421 ui.write(guard, label='qguard.positive')
2421 ui.write(guard, label='qguard.positive')
2422 elif guard.startswith('-'):
2422 elif guard.startswith('-'):
2423 ui.write(guard, label='qguard.negative')
2423 ui.write(guard, label='qguard.negative')
2424 else:
2424 else:
2425 ui.write(guard, label='qguard.unguarded')
2425 ui.write(guard, label='qguard.unguarded')
2426 if i != len(guards) - 1:
2426 if i != len(guards) - 1:
2427 ui.write(' ')
2427 ui.write(' ')
2428 ui.write('\n')
2428 ui.write('\n')
2429 q = repo.mq
2429 q = repo.mq
2430 applied = set(p.name for p in q.applied)
2430 applied = set(p.name for p in q.applied)
2431 patch = None
2431 patch = None
2432 args = list(args)
2432 args = list(args)
2433 if opts.get('list'):
2433 if opts.get('list'):
2434 if args or opts.get('none'):
2434 if args or opts.get('none'):
2435 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2435 raise util.Abort(_('cannot mix -l/--list with options or arguments'))
2436 for i in xrange(len(q.series)):
2436 for i in xrange(len(q.series)):
2437 status(i)
2437 status(i)
2438 return
2438 return
2439 if not args or args[0][0:1] in '-+':
2439 if not args or args[0][0:1] in '-+':
2440 if not q.applied:
2440 if not q.applied:
2441 raise util.Abort(_('no patches applied'))
2441 raise util.Abort(_('no patches applied'))
2442 patch = q.applied[-1].name
2442 patch = q.applied[-1].name
2443 if patch is None and args[0][0:1] not in '-+':
2443 if patch is None and args[0][0:1] not in '-+':
2444 patch = args.pop(0)
2444 patch = args.pop(0)
2445 if patch is None:
2445 if patch is None:
2446 raise util.Abort(_('no patch to work with'))
2446 raise util.Abort(_('no patch to work with'))
2447 if args or opts.get('none'):
2447 if args or opts.get('none'):
2448 idx = q.findseries(patch)
2448 idx = q.findseries(patch)
2449 if idx is None:
2449 if idx is None:
2450 raise util.Abort(_('no patch named %s') % patch)
2450 raise util.Abort(_('no patch named %s') % patch)
2451 q.setguards(idx, args)
2451 q.setguards(idx, args)
2452 q.savedirty()
2452 q.savedirty()
2453 else:
2453 else:
2454 status(q.series.index(q.lookup(patch)))
2454 status(q.series.index(q.lookup(patch)))
2455
2455
2456 @command("qheader", [], _('hg qheader [PATCH]'))
2456 @command("qheader", [], _('hg qheader [PATCH]'))
2457 def header(ui, repo, patch=None):
2457 def header(ui, repo, patch=None):
2458 """print the header of the topmost or specified patch
2458 """print the header of the topmost or specified patch
2459
2459
2460 Returns 0 on success."""
2460 Returns 0 on success."""
2461 q = repo.mq
2461 q = repo.mq
2462
2462
2463 if patch:
2463 if patch:
2464 patch = q.lookup(patch)
2464 patch = q.lookup(patch)
2465 else:
2465 else:
2466 if not q.applied:
2466 if not q.applied:
2467 ui.write(_('no patches applied\n'))
2467 ui.write(_('no patches applied\n'))
2468 return 1
2468 return 1
2469 patch = q.lookup('qtip')
2469 patch = q.lookup('qtip')
2470 ph = patchheader(q.join(patch), q.plainmode)
2470 ph = patchheader(q.join(patch), q.plainmode)
2471
2471
2472 ui.write('\n'.join(ph.message) + '\n')
2472 ui.write('\n'.join(ph.message) + '\n')
2473
2473
2474 def lastsavename(path):
2474 def lastsavename(path):
2475 (directory, base) = os.path.split(path)
2475 (directory, base) = os.path.split(path)
2476 names = os.listdir(directory)
2476 names = os.listdir(directory)
2477 namere = re.compile("%s.([0-9]+)" % base)
2477 namere = re.compile("%s.([0-9]+)" % base)
2478 maxindex = None
2478 maxindex = None
2479 maxname = None
2479 maxname = None
2480 for f in names:
2480 for f in names:
2481 m = namere.match(f)
2481 m = namere.match(f)
2482 if m:
2482 if m:
2483 index = int(m.group(1))
2483 index = int(m.group(1))
2484 if maxindex is None or index > maxindex:
2484 if maxindex is None or index > maxindex:
2485 maxindex = index
2485 maxindex = index
2486 maxname = f
2486 maxname = f
2487 if maxname:
2487 if maxname:
2488 return (os.path.join(directory, maxname), maxindex)
2488 return (os.path.join(directory, maxname), maxindex)
2489 return (None, None)
2489 return (None, None)
2490
2490
2491 def savename(path):
2491 def savename(path):
2492 (last, index) = lastsavename(path)
2492 (last, index) = lastsavename(path)
2493 if last is None:
2493 if last is None:
2494 index = 0
2494 index = 0
2495 newpath = path + ".%d" % (index + 1)
2495 newpath = path + ".%d" % (index + 1)
2496 return newpath
2496 return newpath
2497
2497
2498 @command("^qpush",
2498 @command("^qpush",
2499 [('f', 'force', None, _('apply on top of local changes')),
2499 [('f', 'force', None, _('apply on top of local changes')),
2500 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
2500 ('e', 'exact', None, _('apply the target patch to its recorded parent')),
2501 ('l', 'list', None, _('list patch name in commit text')),
2501 ('l', 'list', None, _('list patch name in commit text')),
2502 ('a', 'all', None, _('apply all patches')),
2502 ('a', 'all', None, _('apply all patches')),
2503 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2503 ('m', 'merge', None, _('merge from another queue (DEPRECATED)')),
2504 ('n', 'name', '',
2504 ('n', 'name', '',
2505 _('merge queue name (DEPRECATED)'), _('NAME')),
2505 _('merge queue name (DEPRECATED)'), _('NAME')),
2506 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2506 ('', 'move', None, _('reorder patch series and apply only the patch'))],
2507 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2507 _('hg qpush [-f] [-l] [-a] [--move] [PATCH | INDEX]'))
2508 def push(ui, repo, patch=None, **opts):
2508 def push(ui, repo, patch=None, **opts):
2509 """push the next patch onto the stack
2509 """push the next patch onto the stack
2510
2510
2511 When -f/--force is applied, all local changes in patched files
2511 When -f/--force is applied, all local changes in patched files
2512 will be lost.
2512 will be lost.
2513
2513
2514 Return 0 on success.
2514 Return 0 on success.
2515 """
2515 """
2516 q = repo.mq
2516 q = repo.mq
2517 mergeq = None
2517 mergeq = None
2518
2518
2519 if opts.get('merge'):
2519 if opts.get('merge'):
2520 if opts.get('name'):
2520 if opts.get('name'):
2521 newpath = repo.join(opts.get('name'))
2521 newpath = repo.join(opts.get('name'))
2522 else:
2522 else:
2523 newpath, i = lastsavename(q.path)
2523 newpath, i = lastsavename(q.path)
2524 if not newpath:
2524 if not newpath:
2525 ui.warn(_("no saved queues found, please use -n\n"))
2525 ui.warn(_("no saved queues found, please use -n\n"))
2526 return 1
2526 return 1
2527 mergeq = queue(ui, repo.join(""), newpath)
2527 mergeq = queue(ui, repo.join(""), newpath)
2528 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2528 ui.warn(_("merging with queue at: %s\n") % mergeq.path)
2529 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2529 ret = q.push(repo, patch, force=opts.get('force'), list=opts.get('list'),
2530 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2530 mergeq=mergeq, all=opts.get('all'), move=opts.get('move'),
2531 exact=opts.get('exact'))
2531 exact=opts.get('exact'))
2532 return ret
2532 return ret
2533
2533
2534 @command("^qpop",
2534 @command("^qpop",
2535 [('a', 'all', None, _('pop all patches')),
2535 [('a', 'all', None, _('pop all patches')),
2536 ('n', 'name', '',
2536 ('n', 'name', '',
2537 _('queue name to pop (DEPRECATED)'), _('NAME')),
2537 _('queue name to pop (DEPRECATED)'), _('NAME')),
2538 ('f', 'force', None, _('forget any local changes to patched files'))],
2538 ('f', 'force', None, _('forget any local changes to patched files'))],
2539 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2539 _('hg qpop [-a] [-f] [PATCH | INDEX]'))
2540 def pop(ui, repo, patch=None, **opts):
2540 def pop(ui, repo, patch=None, **opts):
2541 """pop the current patch off the stack
2541 """pop the current patch off the stack
2542
2542
2543 By default, pops off the top of the patch stack. If given a patch
2543 By default, pops off the top of the patch stack. If given a patch
2544 name, keeps popping off patches until the named patch is at the
2544 name, keeps popping off patches until the named patch is at the
2545 top of the stack.
2545 top of the stack.
2546
2546
2547 Return 0 on success.
2547 Return 0 on success.
2548 """
2548 """
2549 localupdate = True
2549 localupdate = True
2550 if opts.get('name'):
2550 if opts.get('name'):
2551 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2551 q = queue(ui, repo.join(""), repo.join(opts.get('name')))
2552 ui.warn(_('using patch queue: %s\n') % q.path)
2552 ui.warn(_('using patch queue: %s\n') % q.path)
2553 localupdate = False
2553 localupdate = False
2554 else:
2554 else:
2555 q = repo.mq
2555 q = repo.mq
2556 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2556 ret = q.pop(repo, patch, force=opts.get('force'), update=localupdate,
2557 all=opts.get('all'))
2557 all=opts.get('all'))
2558 q.savedirty()
2558 q.savedirty()
2559 return ret
2559 return ret
2560
2560
2561 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2561 @command("qrename|qmv", [], _('hg qrename PATCH1 [PATCH2]'))
2562 def rename(ui, repo, patch, name=None, **opts):
2562 def rename(ui, repo, patch, name=None, **opts):
2563 """rename a patch
2563 """rename a patch
2564
2564
2565 With one argument, renames the current patch to PATCH1.
2565 With one argument, renames the current patch to PATCH1.
2566 With two arguments, renames PATCH1 to PATCH2.
2566 With two arguments, renames PATCH1 to PATCH2.
2567
2567
2568 Returns 0 on success."""
2568 Returns 0 on success."""
2569
2569
2570 q = repo.mq
2570 q = repo.mq
2571
2571
2572 if not name:
2572 if not name:
2573 name = patch
2573 name = patch
2574 patch = None
2574 patch = None
2575
2575
2576 if patch:
2576 if patch:
2577 patch = q.lookup(patch)
2577 patch = q.lookup(patch)
2578 else:
2578 else:
2579 if not q.applied:
2579 if not q.applied:
2580 ui.write(_('no patches applied\n'))
2580 ui.write(_('no patches applied\n'))
2581 return
2581 return
2582 patch = q.lookup('qtip')
2582 patch = q.lookup('qtip')
2583 absdest = q.join(name)
2583 absdest = q.join(name)
2584 if os.path.isdir(absdest):
2584 if os.path.isdir(absdest):
2585 name = normname(os.path.join(name, os.path.basename(patch)))
2585 name = normname(os.path.join(name, os.path.basename(patch)))
2586 absdest = q.join(name)
2586 absdest = q.join(name)
2587 q.checkpatchname(name)
2587 q.checkpatchname(name)
2588
2588
2589 ui.note(_('renaming %s to %s\n') % (patch, name))
2589 ui.note(_('renaming %s to %s\n') % (patch, name))
2590 i = q.findseries(patch)
2590 i = q.findseries(patch)
2591 guards = q.guard_re.findall(q.fullseries[i])
2591 guards = q.guard_re.findall(q.fullseries[i])
2592 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2592 q.fullseries[i] = name + ''.join([' #' + g for g in guards])
2593 q.parseseries()
2593 q.parseseries()
2594 q.seriesdirty = 1
2594 q.seriesdirty = 1
2595
2595
2596 info = q.isapplied(patch)
2596 info = q.isapplied(patch)
2597 if info:
2597 if info:
2598 q.applied[info[0]] = statusentry(info[1], name)
2598 q.applied[info[0]] = statusentry(info[1], name)
2599 q.applieddirty = 1
2599 q.applieddirty = 1
2600
2600
2601 destdir = os.path.dirname(absdest)
2601 destdir = os.path.dirname(absdest)
2602 if not os.path.isdir(destdir):
2602 if not os.path.isdir(destdir):
2603 os.makedirs(destdir)
2603 os.makedirs(destdir)
2604 util.rename(q.join(patch), absdest)
2604 util.rename(q.join(patch), absdest)
2605 r = q.qrepo()
2605 r = q.qrepo()
2606 if r and patch in r.dirstate:
2606 if r and patch in r.dirstate:
2607 wctx = r[None]
2607 wctx = r[None]
2608 wlock = r.wlock()
2608 wlock = r.wlock()
2609 try:
2609 try:
2610 if r.dirstate[patch] == 'a':
2610 if r.dirstate[patch] == 'a':
2611 r.dirstate.drop(patch)
2611 r.dirstate.drop(patch)
2612 r.dirstate.add(name)
2612 r.dirstate.add(name)
2613 else:
2613 else:
2614 if r.dirstate[name] == 'r':
2614 if r.dirstate[name] == 'r':
2615 wctx.undelete([name])
2615 wctx.undelete([name])
2616 wctx.copy(patch, name)
2616 wctx.copy(patch, name)
2617 wctx.forget([patch])
2617 wctx.forget([patch])
2618 finally:
2618 finally:
2619 wlock.release()
2619 wlock.release()
2620
2620
2621 q.savedirty()
2621 q.savedirty()
2622
2622
2623 @command("qrestore",
2623 @command("qrestore",
2624 [('d', 'delete', None, _('delete save entry')),
2624 [('d', 'delete', None, _('delete save entry')),
2625 ('u', 'update', None, _('update queue working directory'))],
2625 ('u', 'update', None, _('update queue working directory'))],
2626 _('hg qrestore [-d] [-u] REV'))
2626 _('hg qrestore [-d] [-u] REV'))
2627 def restore(ui, repo, rev, **opts):
2627 def restore(ui, repo, rev, **opts):
2628 """restore the queue state saved by a revision (DEPRECATED)
2628 """restore the queue state saved by a revision (DEPRECATED)
2629
2629
2630 This command is deprecated, use :hg:`rebase` instead."""
2630 This command is deprecated, use :hg:`rebase` instead."""
2631 rev = repo.lookup(rev)
2631 rev = repo.lookup(rev)
2632 q = repo.mq
2632 q = repo.mq
2633 q.restore(repo, rev, delete=opts.get('delete'),
2633 q.restore(repo, rev, delete=opts.get('delete'),
2634 qupdate=opts.get('update'))
2634 qupdate=opts.get('update'))
2635 q.savedirty()
2635 q.savedirty()
2636 return 0
2636 return 0
2637
2637
2638 @command("qsave",
2638 @command("qsave",
2639 [('c', 'copy', None, _('copy patch directory')),
2639 [('c', 'copy', None, _('copy patch directory')),
2640 ('n', 'name', '',
2640 ('n', 'name', '',
2641 _('copy directory name'), _('NAME')),
2641 _('copy directory name'), _('NAME')),
2642 ('e', 'empty', None, _('clear queue status file')),
2642 ('e', 'empty', None, _('clear queue status file')),
2643 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2643 ('f', 'force', None, _('force copy'))] + commands.commitopts,
2644 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2644 _('hg qsave [-m TEXT] [-l FILE] [-c] [-n NAME] [-e] [-f]'))
2645 def save(ui, repo, **opts):
2645 def save(ui, repo, **opts):
2646 """save current queue state (DEPRECATED)
2646 """save current queue state (DEPRECATED)
2647
2647
2648 This command is deprecated, use :hg:`rebase` instead."""
2648 This command is deprecated, use :hg:`rebase` instead."""
2649 q = repo.mq
2649 q = repo.mq
2650 message = cmdutil.logmessage(ui, opts)
2650 message = cmdutil.logmessage(ui, opts)
2651 ret = q.save(repo, msg=message)
2651 ret = q.save(repo, msg=message)
2652 if ret:
2652 if ret:
2653 return ret
2653 return ret
2654 q.savedirty()
2654 q.savedirty()
2655 if opts.get('copy'):
2655 if opts.get('copy'):
2656 path = q.path
2656 path = q.path
2657 if opts.get('name'):
2657 if opts.get('name'):
2658 newpath = os.path.join(q.basepath, opts.get('name'))
2658 newpath = os.path.join(q.basepath, opts.get('name'))
2659 if os.path.exists(newpath):
2659 if os.path.exists(newpath):
2660 if not os.path.isdir(newpath):
2660 if not os.path.isdir(newpath):
2661 raise util.Abort(_('destination %s exists and is not '
2661 raise util.Abort(_('destination %s exists and is not '
2662 'a directory') % newpath)
2662 'a directory') % newpath)
2663 if not opts.get('force'):
2663 if not opts.get('force'):
2664 raise util.Abort(_('destination %s exists, '
2664 raise util.Abort(_('destination %s exists, '
2665 'use -f to force') % newpath)
2665 'use -f to force') % newpath)
2666 else:
2666 else:
2667 newpath = savename(path)
2667 newpath = savename(path)
2668 ui.warn(_("copy %s to %s\n") % (path, newpath))
2668 ui.warn(_("copy %s to %s\n") % (path, newpath))
2669 util.copyfiles(path, newpath)
2669 util.copyfiles(path, newpath)
2670 if opts.get('empty'):
2670 if opts.get('empty'):
2671 try:
2671 try:
2672 os.unlink(q.join(q.statuspath))
2672 os.unlink(q.join(q.statuspath))
2673 except:
2673 except:
2674 pass
2674 pass
2675 return 0
2675 return 0
2676
2676
2677 @command("strip",
2677 @command("strip",
2678 [('f', 'force', None, _('force removal of changesets, discard '
2678 [('f', 'force', None, _('force removal of changesets, discard '
2679 'uncommitted changes (no backup)')),
2679 'uncommitted changes (no backup)')),
2680 ('b', 'backup', None, _('bundle only changesets with local revision'
2680 ('b', 'backup', None, _('bundle only changesets with local revision'
2681 ' number greater than REV which are not'
2681 ' number greater than REV which are not'
2682 ' descendants of REV (DEPRECATED)')),
2682 ' descendants of REV (DEPRECATED)')),
2683 ('n', 'no-backup', None, _('no backups')),
2683 ('n', 'no-backup', None, _('no backups')),
2684 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2684 ('', 'nobackup', None, _('no backups (DEPRECATED)')),
2685 ('k', 'keep', None, _("do not modify working copy during strip"))],
2685 ('k', 'keep', None, _("do not modify working copy during strip"))],
2686 _('hg strip [-k] [-f] [-n] REV...'))
2686 _('hg strip [-k] [-f] [-n] REV...'))
2687 def strip(ui, repo, *revs, **opts):
2687 def strip(ui, repo, *revs, **opts):
2688 """strip changesets and all their descendants from the repository
2688 """strip changesets and all their descendants from the repository
2689
2689
2690 The strip command removes the specified changesets and all their
2690 The strip command removes the specified changesets and all their
2691 descendants. If the working directory has uncommitted changes, the
2691 descendants. If the working directory has uncommitted changes, the
2692 operation is aborted unless the --force flag is supplied, in which
2692 operation is aborted unless the --force flag is supplied, in which
2693 case changes will be discarded.
2693 case changes will be discarded.
2694
2694
2695 If a parent of the working directory is stripped, then the working
2695 If a parent of the working directory is stripped, then the working
2696 directory will automatically be updated to the most recent
2696 directory will automatically be updated to the most recent
2697 available ancestor of the stripped parent after the operation
2697 available ancestor of the stripped parent after the operation
2698 completes.
2698 completes.
2699
2699
2700 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2700 Any stripped changesets are stored in ``.hg/strip-backup`` as a
2701 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2701 bundle (see :hg:`help bundle` and :hg:`help unbundle`). They can
2702 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2702 be restored by running :hg:`unbundle .hg/strip-backup/BUNDLE`,
2703 where BUNDLE is the bundle file created by the strip. Note that
2703 where BUNDLE is the bundle file created by the strip. Note that
2704 the local revision numbers will in general be different after the
2704 the local revision numbers will in general be different after the
2705 restore.
2705 restore.
2706
2706
2707 Use the --no-backup option to discard the backup bundle once the
2707 Use the --no-backup option to discard the backup bundle once the
2708 operation completes.
2708 operation completes.
2709
2709
2710 Return 0 on success.
2710 Return 0 on success.
2711 """
2711 """
2712 backup = 'all'
2712 backup = 'all'
2713 if opts.get('backup'):
2713 if opts.get('backup'):
2714 backup = 'strip'
2714 backup = 'strip'
2715 elif opts.get('no_backup') or opts.get('nobackup'):
2715 elif opts.get('no_backup') or opts.get('nobackup'):
2716 backup = 'none'
2716 backup = 'none'
2717
2717
2718 cl = repo.changelog
2718 cl = repo.changelog
2719 revs = set(scmutil.revrange(repo, revs))
2719 revs = set(scmutil.revrange(repo, revs))
2720 if not revs:
2720 if not revs:
2721 raise util.Abort(_('empty revision set'))
2721 raise util.Abort(_('empty revision set'))
2722
2722
2723 descendants = set(cl.descendants(*revs))
2723 descendants = set(cl.descendants(*revs))
2724 strippedrevs = revs.union(descendants)
2724 strippedrevs = revs.union(descendants)
2725 roots = revs.difference(descendants)
2725 roots = revs.difference(descendants)
2726
2726
2727 update = False
2727 update = False
2728 # if one of the wdir parent is stripped we'll need
2728 # if one of the wdir parent is stripped we'll need
2729 # to update away to an earlier revision
2729 # to update away to an earlier revision
2730 for p in repo.dirstate.parents():
2730 for p in repo.dirstate.parents():
2731 if p != nullid and cl.rev(p) in strippedrevs:
2731 if p != nullid and cl.rev(p) in strippedrevs:
2732 update = True
2732 update = True
2733 break
2733 break
2734
2734
2735 rootnodes = set(cl.node(r) for r in roots)
2735 rootnodes = set(cl.node(r) for r in roots)
2736
2736
2737 q = repo.mq
2737 q = repo.mq
2738 if q.applied:
2738 if q.applied:
2739 # refresh queue state if we're about to strip
2739 # refresh queue state if we're about to strip
2740 # applied patches
2740 # applied patches
2741 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2741 if cl.rev(repo.lookup('qtip')) in strippedrevs:
2742 q.applieddirty = True
2742 q.applieddirty = True
2743 start = 0
2743 start = 0
2744 end = len(q.applied)
2744 end = len(q.applied)
2745 for i, statusentry in enumerate(q.applied):
2745 for i, statusentry in enumerate(q.applied):
2746 if statusentry.node in rootnodes:
2746 if statusentry.node in rootnodes:
2747 # if one of the stripped roots is an applied
2747 # if one of the stripped roots is an applied
2748 # patch, only part of the queue is stripped
2748 # patch, only part of the queue is stripped
2749 start = i
2749 start = i
2750 break
2750 break
2751 del q.applied[start:end]
2751 del q.applied[start:end]
2752 q.savedirty()
2752 q.savedirty()
2753
2753
2754 revs = list(rootnodes)
2754 revs = list(rootnodes)
2755 if update and opts.get('keep'):
2755 if update and opts.get('keep'):
2756 wlock = repo.wlock()
2756 wlock = repo.wlock()
2757 try:
2757 try:
2758 urev = repo.mq.qparents(repo, revs[0])
2758 urev = repo.mq.qparents(repo, revs[0])
2759 repo.dirstate.rebuild(urev, repo[urev].manifest())
2759 repo.dirstate.rebuild(urev, repo[urev].manifest())
2760 repo.dirstate.write()
2760 repo.dirstate.write()
2761 update = False
2761 update = False
2762 finally:
2762 finally:
2763 wlock.release()
2763 wlock.release()
2764
2764
2765 repo.mq.strip(repo, revs, backup=backup, update=update,
2765 repo.mq.strip(repo, revs, backup=backup, update=update,
2766 force=opts.get('force'))
2766 force=opts.get('force'))
2767 return 0
2767 return 0
2768
2768
2769 @command("qselect",
2769 @command("qselect",
2770 [('n', 'none', None, _('disable all guards')),
2770 [('n', 'none', None, _('disable all guards')),
2771 ('s', 'series', None, _('list all guards in series file')),
2771 ('s', 'series', None, _('list all guards in series file')),
2772 ('', 'pop', None, _('pop to before first guarded applied patch')),
2772 ('', 'pop', None, _('pop to before first guarded applied patch')),
2773 ('', 'reapply', None, _('pop, then reapply patches'))],
2773 ('', 'reapply', None, _('pop, then reapply patches'))],
2774 _('hg qselect [OPTION]... [GUARD]...'))
2774 _('hg qselect [OPTION]... [GUARD]...'))
2775 def select(ui, repo, *args, **opts):
2775 def select(ui, repo, *args, **opts):
2776 '''set or print guarded patches to push
2776 '''set or print guarded patches to push
2777
2777
2778 Use the :hg:`qguard` command to set or print guards on patch, then use
2778 Use the :hg:`qguard` command to set or print guards on patch, then use
2779 qselect to tell mq which guards to use. A patch will be pushed if
2779 qselect to tell mq which guards to use. A patch will be pushed if
2780 it has no guards or any positive guards match the currently
2780 it has no guards or any positive guards match the currently
2781 selected guard, but will not be pushed if any negative guards
2781 selected guard, but will not be pushed if any negative guards
2782 match the current guard. For example::
2782 match the current guard. For example::
2783
2783
2784 qguard foo.patch -- -stable (negative guard)
2784 qguard foo.patch -- -stable (negative guard)
2785 qguard bar.patch +stable (positive guard)
2785 qguard bar.patch +stable (positive guard)
2786 qselect stable
2786 qselect stable
2787
2787
2788 This activates the "stable" guard. mq will skip foo.patch (because
2788 This activates the "stable" guard. mq will skip foo.patch (because
2789 it has a negative match) but push bar.patch (because it has a
2789 it has a negative match) but push bar.patch (because it has a
2790 positive match).
2790 positive match).
2791
2791
2792 With no arguments, prints the currently active guards.
2792 With no arguments, prints the currently active guards.
2793 With one argument, sets the active guard.
2793 With one argument, sets the active guard.
2794
2794
2795 Use -n/--none to deactivate guards (no other arguments needed).
2795 Use -n/--none to deactivate guards (no other arguments needed).
2796 When no guards are active, patches with positive guards are
2796 When no guards are active, patches with positive guards are
2797 skipped and patches with negative guards are pushed.
2797 skipped and patches with negative guards are pushed.
2798
2798
2799 qselect can change the guards on applied patches. It does not pop
2799 qselect can change the guards on applied patches. It does not pop
2800 guarded patches by default. Use --pop to pop back to the last
2800 guarded patches by default. Use --pop to pop back to the last
2801 applied patch that is not guarded. Use --reapply (which implies
2801 applied patch that is not guarded. Use --reapply (which implies
2802 --pop) to push back to the current patch afterwards, but skip
2802 --pop) to push back to the current patch afterwards, but skip
2803 guarded patches.
2803 guarded patches.
2804
2804
2805 Use -s/--series to print a list of all guards in the series file
2805 Use -s/--series to print a list of all guards in the series file
2806 (no other arguments needed). Use -v for more information.
2806 (no other arguments needed). Use -v for more information.
2807
2807
2808 Returns 0 on success.'''
2808 Returns 0 on success.'''
2809
2809
2810 q = repo.mq
2810 q = repo.mq
2811 guards = q.active()
2811 guards = q.active()
2812 if args or opts.get('none'):
2812 if args or opts.get('none'):
2813 old_unapplied = q.unapplied(repo)
2813 old_unapplied = q.unapplied(repo)
2814 old_guarded = [i for i in xrange(len(q.applied)) if
2814 old_guarded = [i for i in xrange(len(q.applied)) if
2815 not q.pushable(i)[0]]
2815 not q.pushable(i)[0]]
2816 q.setactive(args)
2816 q.setactive(args)
2817 q.savedirty()
2817 q.savedirty()
2818 if not args:
2818 if not args:
2819 ui.status(_('guards deactivated\n'))
2819 ui.status(_('guards deactivated\n'))
2820 if not opts.get('pop') and not opts.get('reapply'):
2820 if not opts.get('pop') and not opts.get('reapply'):
2821 unapplied = q.unapplied(repo)
2821 unapplied = q.unapplied(repo)
2822 guarded = [i for i in xrange(len(q.applied))
2822 guarded = [i for i in xrange(len(q.applied))
2823 if not q.pushable(i)[0]]
2823 if not q.pushable(i)[0]]
2824 if len(unapplied) != len(old_unapplied):
2824 if len(unapplied) != len(old_unapplied):
2825 ui.status(_('number of unguarded, unapplied patches has '
2825 ui.status(_('number of unguarded, unapplied patches has '
2826 'changed from %d to %d\n') %
2826 'changed from %d to %d\n') %
2827 (len(old_unapplied), len(unapplied)))
2827 (len(old_unapplied), len(unapplied)))
2828 if len(guarded) != len(old_guarded):
2828 if len(guarded) != len(old_guarded):
2829 ui.status(_('number of guarded, applied patches has changed '
2829 ui.status(_('number of guarded, applied patches has changed '
2830 'from %d to %d\n') %
2830 'from %d to %d\n') %
2831 (len(old_guarded), len(guarded)))
2831 (len(old_guarded), len(guarded)))
2832 elif opts.get('series'):
2832 elif opts.get('series'):
2833 guards = {}
2833 guards = {}
2834 noguards = 0
2834 noguards = 0
2835 for gs in q.seriesguards:
2835 for gs in q.seriesguards:
2836 if not gs:
2836 if not gs:
2837 noguards += 1
2837 noguards += 1
2838 for g in gs:
2838 for g in gs:
2839 guards.setdefault(g, 0)
2839 guards.setdefault(g, 0)
2840 guards[g] += 1
2840 guards[g] += 1
2841 if ui.verbose:
2841 if ui.verbose:
2842 guards['NONE'] = noguards
2842 guards['NONE'] = noguards
2843 guards = guards.items()
2843 guards = guards.items()
2844 guards.sort(key=lambda x: x[0][1:])
2844 guards.sort(key=lambda x: x[0][1:])
2845 if guards:
2845 if guards:
2846 ui.note(_('guards in series file:\n'))
2846 ui.note(_('guards in series file:\n'))
2847 for guard, count in guards:
2847 for guard, count in guards:
2848 ui.note('%2d ' % count)
2848 ui.note('%2d ' % count)
2849 ui.write(guard, '\n')
2849 ui.write(guard, '\n')
2850 else:
2850 else:
2851 ui.note(_('no guards in series file\n'))
2851 ui.note(_('no guards in series file\n'))
2852 else:
2852 else:
2853 if guards:
2853 if guards:
2854 ui.note(_('active guards:\n'))
2854 ui.note(_('active guards:\n'))
2855 for g in guards:
2855 for g in guards:
2856 ui.write(g, '\n')
2856 ui.write(g, '\n')
2857 else:
2857 else:
2858 ui.write(_('no active guards\n'))
2858 ui.write(_('no active guards\n'))
2859 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2859 reapply = opts.get('reapply') and q.applied and q.appliedname(-1)
2860 popped = False
2860 popped = False
2861 if opts.get('pop') or opts.get('reapply'):
2861 if opts.get('pop') or opts.get('reapply'):
2862 for i in xrange(len(q.applied)):
2862 for i in xrange(len(q.applied)):
2863 pushable, reason = q.pushable(i)
2863 pushable, reason = q.pushable(i)
2864 if not pushable:
2864 if not pushable:
2865 ui.status(_('popping guarded patches\n'))
2865 ui.status(_('popping guarded patches\n'))
2866 popped = True
2866 popped = True
2867 if i == 0:
2867 if i == 0:
2868 q.pop(repo, all=True)
2868 q.pop(repo, all=True)
2869 else:
2869 else:
2870 q.pop(repo, i - 1)
2870 q.pop(repo, i - 1)
2871 break
2871 break
2872 if popped:
2872 if popped:
2873 try:
2873 try:
2874 if reapply:
2874 if reapply:
2875 ui.status(_('reapplying unguarded patches\n'))
2875 ui.status(_('reapplying unguarded patches\n'))
2876 q.push(repo, reapply)
2876 q.push(repo, reapply)
2877 finally:
2877 finally:
2878 q.savedirty()
2878 q.savedirty()
2879
2879
2880 @command("qfinish",
2880 @command("qfinish",
2881 [('a', 'applied', None, _('finish all applied changesets'))],
2881 [('a', 'applied', None, _('finish all applied changesets'))],
2882 _('hg qfinish [-a] [REV]...'))
2882 _('hg qfinish [-a] [REV]...'))
2883 def finish(ui, repo, *revrange, **opts):
2883 def finish(ui, repo, *revrange, **opts):
2884 """move applied patches into repository history
2884 """move applied patches into repository history
2885
2885
2886 Finishes the specified revisions (corresponding to applied
2886 Finishes the specified revisions (corresponding to applied
2887 patches) by moving them out of mq control into regular repository
2887 patches) by moving them out of mq control into regular repository
2888 history.
2888 history.
2889
2889
2890 Accepts a revision range or the -a/--applied option. If --applied
2890 Accepts a revision range or the -a/--applied option. If --applied
2891 is specified, all applied mq revisions are removed from mq
2891 is specified, all applied mq revisions are removed from mq
2892 control. Otherwise, the given revisions must be at the base of the
2892 control. Otherwise, the given revisions must be at the base of the
2893 stack of applied patches.
2893 stack of applied patches.
2894
2894
2895 This can be especially useful if your changes have been applied to
2895 This can be especially useful if your changes have been applied to
2896 an upstream repository, or if you are about to push your changes
2896 an upstream repository, or if you are about to push your changes
2897 to upstream.
2897 to upstream.
2898
2898
2899 Returns 0 on success.
2899 Returns 0 on success.
2900 """
2900 """
2901 if not opts.get('applied') and not revrange:
2901 if not opts.get('applied') and not revrange:
2902 raise util.Abort(_('no revisions specified'))
2902 raise util.Abort(_('no revisions specified'))
2903 elif opts.get('applied'):
2903 elif opts.get('applied'):
2904 revrange = ('qbase::qtip',) + revrange
2904 revrange = ('qbase::qtip',) + revrange
2905
2905
2906 q = repo.mq
2906 q = repo.mq
2907 if not q.applied:
2907 if not q.applied:
2908 ui.status(_('no patches applied\n'))
2908 ui.status(_('no patches applied\n'))
2909 return 0
2909 return 0
2910
2910
2911 revs = scmutil.revrange(repo, revrange)
2911 revs = scmutil.revrange(repo, revrange)
2912 q.finish(repo, revs)
2912 q.finish(repo, revs)
2913 q.savedirty()
2913 q.savedirty()
2914 return 0
2914 return 0
2915
2915
2916 @command("qqueue",
2916 @command("qqueue",
2917 [('l', 'list', False, _('list all available queues')),
2917 [('l', 'list', False, _('list all available queues')),
2918 ('', 'active', False, _('print name of active queue')),
2918 ('', 'active', False, _('print name of active queue')),
2919 ('c', 'create', False, _('create new queue')),
2919 ('c', 'create', False, _('create new queue')),
2920 ('', 'rename', False, _('rename active queue')),
2920 ('', 'rename', False, _('rename active queue')),
2921 ('', 'delete', False, _('delete reference to queue')),
2921 ('', 'delete', False, _('delete reference to queue')),
2922 ('', 'purge', False, _('delete queue, and remove patch dir')),
2922 ('', 'purge', False, _('delete queue, and remove patch dir')),
2923 ],
2923 ],
2924 _('[OPTION] [QUEUE]'))
2924 _('[OPTION] [QUEUE]'))
2925 def qqueue(ui, repo, name=None, **opts):
2925 def qqueue(ui, repo, name=None, **opts):
2926 '''manage multiple patch queues
2926 '''manage multiple patch queues
2927
2927
2928 Supports switching between different patch queues, as well as creating
2928 Supports switching between different patch queues, as well as creating
2929 new patch queues and deleting existing ones.
2929 new patch queues and deleting existing ones.
2930
2930
2931 Omitting a queue name or specifying -l/--list will show you the registered
2931 Omitting a queue name or specifying -l/--list will show you the registered
2932 queues - by default the "normal" patches queue is registered. The currently
2932 queues - by default the "normal" patches queue is registered. The currently
2933 active queue will be marked with "(active)". Specifying --active will print
2933 active queue will be marked with "(active)". Specifying --active will print
2934 only the name of the active queue.
2934 only the name of the active queue.
2935
2935
2936 To create a new queue, use -c/--create. The queue is automatically made
2936 To create a new queue, use -c/--create. The queue is automatically made
2937 active, except in the case where there are applied patches from the
2937 active, except in the case where there are applied patches from the
2938 currently active queue in the repository. Then the queue will only be
2938 currently active queue in the repository. Then the queue will only be
2939 created and switching will fail.
2939 created and switching will fail.
2940
2940
2941 To delete an existing queue, use --delete. You cannot delete the currently
2941 To delete an existing queue, use --delete. You cannot delete the currently
2942 active queue.
2942 active queue.
2943
2943
2944 Returns 0 on success.
2944 Returns 0 on success.
2945 '''
2945 '''
2946
2946
2947 q = repo.mq
2947 q = repo.mq
2948
2948
2949 _defaultqueue = 'patches'
2949 _defaultqueue = 'patches'
2950 _allqueues = 'patches.queues'
2950 _allqueues = 'patches.queues'
2951 _activequeue = 'patches.queue'
2951 _activequeue = 'patches.queue'
2952
2952
2953 def _getcurrent():
2953 def _getcurrent():
2954 cur = os.path.basename(q.path)
2954 cur = os.path.basename(q.path)
2955 if cur.startswith('patches-'):
2955 if cur.startswith('patches-'):
2956 cur = cur[8:]
2956 cur = cur[8:]
2957 return cur
2957 return cur
2958
2958
2959 def _noqueues():
2959 def _noqueues():
2960 try:
2960 try:
2961 fh = repo.opener(_allqueues, 'r')
2961 fh = repo.opener(_allqueues, 'r')
2962 fh.close()
2962 fh.close()
2963 except IOError:
2963 except IOError:
2964 return True
2964 return True
2965
2965
2966 return False
2966 return False
2967
2967
2968 def _getqueues():
2968 def _getqueues():
2969 current = _getcurrent()
2969 current = _getcurrent()
2970
2970
2971 try:
2971 try:
2972 fh = repo.opener(_allqueues, 'r')
2972 fh = repo.opener(_allqueues, 'r')
2973 queues = [queue.strip() for queue in fh if queue.strip()]
2973 queues = [queue.strip() for queue in fh if queue.strip()]
2974 fh.close()
2974 fh.close()
2975 if current not in queues:
2975 if current not in queues:
2976 queues.append(current)
2976 queues.append(current)
2977 except IOError:
2977 except IOError:
2978 queues = [_defaultqueue]
2978 queues = [_defaultqueue]
2979
2979
2980 return sorted(queues)
2980 return sorted(queues)
2981
2981
2982 def _setactive(name):
2982 def _setactive(name):
2983 if q.applied:
2983 if q.applied:
2984 raise util.Abort(_('patches applied - cannot set new queue active'))
2984 raise util.Abort(_('patches applied - cannot set new queue active'))
2985 _setactivenocheck(name)
2985 _setactivenocheck(name)
2986
2986
2987 def _setactivenocheck(name):
2987 def _setactivenocheck(name):
2988 fh = repo.opener(_activequeue, 'w')
2988 fh = repo.opener(_activequeue, 'w')
2989 if name != 'patches':
2989 if name != 'patches':
2990 fh.write(name)
2990 fh.write(name)
2991 fh.close()
2991 fh.close()
2992
2992
2993 def _addqueue(name):
2993 def _addqueue(name):
2994 fh = repo.opener(_allqueues, 'a')
2994 fh = repo.opener(_allqueues, 'a')
2995 fh.write('%s\n' % (name,))
2995 fh.write('%s\n' % (name,))
2996 fh.close()
2996 fh.close()
2997
2997
2998 def _queuedir(name):
2998 def _queuedir(name):
2999 if name == 'patches':
2999 if name == 'patches':
3000 return repo.join('patches')
3000 return repo.join('patches')
3001 else:
3001 else:
3002 return repo.join('patches-' + name)
3002 return repo.join('patches-' + name)
3003
3003
3004 def _validname(name):
3004 def _validname(name):
3005 for n in name:
3005 for n in name:
3006 if n in ':\\/.':
3006 if n in ':\\/.':
3007 return False
3007 return False
3008 return True
3008 return True
3009
3009
3010 def _delete(name):
3010 def _delete(name):
3011 if name not in existing:
3011 if name not in existing:
3012 raise util.Abort(_('cannot delete queue that does not exist'))
3012 raise util.Abort(_('cannot delete queue that does not exist'))
3013
3013
3014 current = _getcurrent()
3014 current = _getcurrent()
3015
3015
3016 if name == current:
3016 if name == current:
3017 raise util.Abort(_('cannot delete currently active queue'))
3017 raise util.Abort(_('cannot delete currently active queue'))
3018
3018
3019 fh = repo.opener('patches.queues.new', 'w')
3019 fh = repo.opener('patches.queues.new', 'w')
3020 for queue in existing:
3020 for queue in existing:
3021 if queue == name:
3021 if queue == name:
3022 continue
3022 continue
3023 fh.write('%s\n' % (queue,))
3023 fh.write('%s\n' % (queue,))
3024 fh.close()
3024 fh.close()
3025 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3025 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3026
3026
3027 if not name or opts.get('list') or opts.get('active'):
3027 if not name or opts.get('list') or opts.get('active'):
3028 current = _getcurrent()
3028 current = _getcurrent()
3029 if opts.get('active'):
3029 if opts.get('active'):
3030 ui.write('%s\n' % (current,))
3030 ui.write('%s\n' % (current,))
3031 return
3031 return
3032 for queue in _getqueues():
3032 for queue in _getqueues():
3033 ui.write('%s' % (queue,))
3033 ui.write('%s' % (queue,))
3034 if queue == current and not ui.quiet:
3034 if queue == current and not ui.quiet:
3035 ui.write(_(' (active)\n'))
3035 ui.write(_(' (active)\n'))
3036 else:
3036 else:
3037 ui.write('\n')
3037 ui.write('\n')
3038 return
3038 return
3039
3039
3040 if not _validname(name):
3040 if not _validname(name):
3041 raise util.Abort(
3041 raise util.Abort(
3042 _('invalid queue name, may not contain the characters ":\\/."'))
3042 _('invalid queue name, may not contain the characters ":\\/."'))
3043
3043
3044 existing = _getqueues()
3044 existing = _getqueues()
3045
3045
3046 if opts.get('create'):
3046 if opts.get('create'):
3047 if name in existing:
3047 if name in existing:
3048 raise util.Abort(_('queue "%s" already exists') % name)
3048 raise util.Abort(_('queue "%s" already exists') % name)
3049 if _noqueues():
3049 if _noqueues():
3050 _addqueue(_defaultqueue)
3050 _addqueue(_defaultqueue)
3051 _addqueue(name)
3051 _addqueue(name)
3052 _setactive(name)
3052 _setactive(name)
3053 elif opts.get('rename'):
3053 elif opts.get('rename'):
3054 current = _getcurrent()
3054 current = _getcurrent()
3055 if name == current:
3055 if name == current:
3056 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3056 raise util.Abort(_('can\'t rename "%s" to its current name') % name)
3057 if name in existing:
3057 if name in existing:
3058 raise util.Abort(_('queue "%s" already exists') % name)
3058 raise util.Abort(_('queue "%s" already exists') % name)
3059
3059
3060 olddir = _queuedir(current)
3060 olddir = _queuedir(current)
3061 newdir = _queuedir(name)
3061 newdir = _queuedir(name)
3062
3062
3063 if os.path.exists(newdir):
3063 if os.path.exists(newdir):
3064 raise util.Abort(_('non-queue directory "%s" already exists') %
3064 raise util.Abort(_('non-queue directory "%s" already exists') %
3065 newdir)
3065 newdir)
3066
3066
3067 fh = repo.opener('patches.queues.new', 'w')
3067 fh = repo.opener('patches.queues.new', 'w')
3068 for queue in existing:
3068 for queue in existing:
3069 if queue == current:
3069 if queue == current:
3070 fh.write('%s\n' % (name,))
3070 fh.write('%s\n' % (name,))
3071 if os.path.exists(olddir):
3071 if os.path.exists(olddir):
3072 util.rename(olddir, newdir)
3072 util.rename(olddir, newdir)
3073 else:
3073 else:
3074 fh.write('%s\n' % (queue,))
3074 fh.write('%s\n' % (queue,))
3075 fh.close()
3075 fh.close()
3076 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3076 util.rename(repo.join('patches.queues.new'), repo.join(_allqueues))
3077 _setactivenocheck(name)
3077 _setactivenocheck(name)
3078 elif opts.get('delete'):
3078 elif opts.get('delete'):
3079 _delete(name)
3079 _delete(name)
3080 elif opts.get('purge'):
3080 elif opts.get('purge'):
3081 if name in existing:
3081 if name in existing:
3082 _delete(name)
3082 _delete(name)
3083 qdir = _queuedir(name)
3083 qdir = _queuedir(name)
3084 if os.path.exists(qdir):
3084 if os.path.exists(qdir):
3085 shutil.rmtree(qdir)
3085 shutil.rmtree(qdir)
3086 else:
3086 else:
3087 if name not in existing:
3087 if name not in existing:
3088 raise util.Abort(_('use --create to create a new queue'))
3088 raise util.Abort(_('use --create to create a new queue'))
3089 _setactive(name)
3089 _setactive(name)
3090
3090
3091 def reposetup(ui, repo):
3091 def reposetup(ui, repo):
3092 class mqrepo(repo.__class__):
3092 class mqrepo(repo.__class__):
3093 @util.propertycache
3093 @util.propertycache
3094 def mq(self):
3094 def mq(self):
3095 return queue(self.ui, self.join(""))
3095 return queue(self.ui, self.join(""))
3096
3096
3097 def abortifwdirpatched(self, errmsg, force=False):
3097 def abortifwdirpatched(self, errmsg, force=False):
3098 if self.mq.applied and not force:
3098 if self.mq.applied and not force:
3099 parents = self.dirstate.parents()
3099 parents = self.dirstate.parents()
3100 patches = [s.node for s in self.mq.applied]
3100 patches = [s.node for s in self.mq.applied]
3101 if parents[0] in patches or parents[1] in patches:
3101 if parents[0] in patches or parents[1] in patches:
3102 raise util.Abort(errmsg)
3102 raise util.Abort(errmsg)
3103
3103
3104 def commit(self, text="", user=None, date=None, match=None,
3104 def commit(self, text="", user=None, date=None, match=None,
3105 force=False, editor=False, extra={}):
3105 force=False, editor=False, extra={}):
3106 self.abortifwdirpatched(
3106 self.abortifwdirpatched(
3107 _('cannot commit over an applied mq patch'),
3107 _('cannot commit over an applied mq patch'),
3108 force)
3108 force)
3109
3109
3110 return super(mqrepo, self).commit(text, user, date, match, force,
3110 return super(mqrepo, self).commit(text, user, date, match, force,
3111 editor, extra)
3111 editor, extra)
3112
3112
3113 def checkpush(self, force, revs):
3113 def checkpush(self, force, revs):
3114 if self.mq.applied and not force:
3114 if self.mq.applied and not force:
3115 haspatches = True
3115 haspatches = True
3116 if revs:
3116 if revs:
3117 # Assume applied patches have no non-patch descendants
3117 # Assume applied patches have no non-patch descendants
3118 # and are not on remote already. If they appear in the
3118 # and are not on remote already. If they appear in the
3119 # set of resolved 'revs', bail out.
3119 # set of resolved 'revs', bail out.
3120 applied = set(e.node for e in self.mq.applied)
3120 applied = set(e.node for e in self.mq.applied)
3121 haspatches = bool([n for n in revs if n in applied])
3121 haspatches = bool([n for n in revs if n in applied])
3122 if haspatches:
3122 if haspatches:
3123 raise util.Abort(_('source has mq patches applied'))
3123 raise util.Abort(_('source has mq patches applied'))
3124 super(mqrepo, self).checkpush(force, revs)
3124 super(mqrepo, self).checkpush(force, revs)
3125
3125
3126 def _findtags(self):
3126 def _findtags(self):
3127 '''augment tags from base class with patch tags'''
3127 '''augment tags from base class with patch tags'''
3128 result = super(mqrepo, self)._findtags()
3128 result = super(mqrepo, self)._findtags()
3129
3129
3130 q = self.mq
3130 q = self.mq
3131 if not q.applied:
3131 if not q.applied:
3132 return result
3132 return result
3133
3133
3134 mqtags = [(patch.node, patch.name) for patch in q.applied]
3134 mqtags = [(patch.node, patch.name) for patch in q.applied]
3135
3135
3136 try:
3136 try:
3137 self.changelog.rev(mqtags[-1][0])
3137 self.changelog.rev(mqtags[-1][0])
3138 except error.LookupError:
3138 except error.LookupError:
3139 self.ui.warn(_('mq status file refers to unknown node %s\n')
3139 self.ui.warn(_('mq status file refers to unknown node %s\n')
3140 % short(mqtags[-1][0]))
3140 % short(mqtags[-1][0]))
3141 return result
3141 return result
3142
3142
3143 mqtags.append((mqtags[-1][0], 'qtip'))
3143 mqtags.append((mqtags[-1][0], 'qtip'))
3144 mqtags.append((mqtags[0][0], 'qbase'))
3144 mqtags.append((mqtags[0][0], 'qbase'))
3145 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3145 mqtags.append((self.changelog.parents(mqtags[0][0])[0], 'qparent'))
3146 tags = result[0]
3146 tags = result[0]
3147 for patch in mqtags:
3147 for patch in mqtags:
3148 if patch[1] in tags:
3148 if patch[1] in tags:
3149 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
3149 self.ui.warn(_('Tag %s overrides mq patch of the same name\n')
3150 % patch[1])
3150 % patch[1])
3151 else:
3151 else:
3152 tags[patch[1]] = patch[0]
3152 tags[patch[1]] = patch[0]
3153
3153
3154 return result
3154 return result
3155
3155
3156 def _branchtags(self, partial, lrev):
3156 def _branchtags(self, partial, lrev):
3157 q = self.mq
3157 q = self.mq
3158 if not q.applied:
3158 if not q.applied:
3159 return super(mqrepo, self)._branchtags(partial, lrev)
3159 return super(mqrepo, self)._branchtags(partial, lrev)
3160
3160
3161 cl = self.changelog
3161 cl = self.changelog
3162 qbasenode = q.applied[0].node
3162 qbasenode = q.applied[0].node
3163 try:
3163 try:
3164 qbase = cl.rev(qbasenode)
3164 qbase = cl.rev(qbasenode)
3165 except error.LookupError:
3165 except error.LookupError:
3166 self.ui.warn(_('mq status file refers to unknown node %s\n')
3166 self.ui.warn(_('mq status file refers to unknown node %s\n')
3167 % short(qbasenode))
3167 % short(qbasenode))
3168 return super(mqrepo, self)._branchtags(partial, lrev)
3168 return super(mqrepo, self)._branchtags(partial, lrev)
3169
3169
3170 start = lrev + 1
3170 start = lrev + 1
3171 if start < qbase:
3171 if start < qbase:
3172 # update the cache (excluding the patches) and save it
3172 # update the cache (excluding the patches) and save it
3173 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3173 ctxgen = (self[r] for r in xrange(lrev + 1, qbase))
3174 self._updatebranchcache(partial, ctxgen)
3174 self._updatebranchcache(partial, ctxgen)
3175 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3175 self._writebranchcache(partial, cl.node(qbase - 1), qbase - 1)
3176 start = qbase
3176 start = qbase
3177 # if start = qbase, the cache is as updated as it should be.
3177 # if start = qbase, the cache is as updated as it should be.
3178 # if start > qbase, the cache includes (part of) the patches.
3178 # if start > qbase, the cache includes (part of) the patches.
3179 # we might as well use it, but we won't save it.
3179 # we might as well use it, but we won't save it.
3180
3180
3181 # update the cache up to the tip
3181 # update the cache up to the tip
3182 ctxgen = (self[r] for r in xrange(start, len(cl)))
3182 ctxgen = (self[r] for r in xrange(start, len(cl)))
3183 self._updatebranchcache(partial, ctxgen)
3183 self._updatebranchcache(partial, ctxgen)
3184
3184
3185 return partial
3185 return partial
3186
3186
3187 if repo.local():
3187 if repo.local():
3188 repo.__class__ = mqrepo
3188 repo.__class__ = mqrepo
3189
3189
3190 def mqimport(orig, ui, repo, *args, **kwargs):
3190 def mqimport(orig, ui, repo, *args, **kwargs):
3191 if (hasattr(repo, 'abortifwdirpatched')
3191 if (hasattr(repo, 'abortifwdirpatched')
3192 and not kwargs.get('no_commit', False)):
3192 and not kwargs.get('no_commit', False)):
3193 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3193 repo.abortifwdirpatched(_('cannot import over an applied patch'),
3194 kwargs.get('force'))
3194 kwargs.get('force'))
3195 return orig(ui, repo, *args, **kwargs)
3195 return orig(ui, repo, *args, **kwargs)
3196
3196
3197 def mqinit(orig, ui, *args, **kwargs):
3197 def mqinit(orig, ui, *args, **kwargs):
3198 mq = kwargs.pop('mq', None)
3198 mq = kwargs.pop('mq', None)
3199
3199
3200 if not mq:
3200 if not mq:
3201 return orig(ui, *args, **kwargs)
3201 return orig(ui, *args, **kwargs)
3202
3202
3203 if args:
3203 if args:
3204 repopath = args[0]
3204 repopath = args[0]
3205 if not hg.islocal(repopath):
3205 if not hg.islocal(repopath):
3206 raise util.Abort(_('only a local queue repository '
3206 raise util.Abort(_('only a local queue repository '
3207 'may be initialized'))
3207 'may be initialized'))
3208 else:
3208 else:
3209 repopath = cmdutil.findrepo(os.getcwd())
3209 repopath = cmdutil.findrepo(os.getcwd())
3210 if not repopath:
3210 if not repopath:
3211 raise util.Abort(_('there is no Mercurial repository here '
3211 raise util.Abort(_('there is no Mercurial repository here '
3212 '(.hg not found)'))
3212 '(.hg not found)'))
3213 repo = hg.repository(ui, repopath)
3213 repo = hg.repository(ui, repopath)
3214 return qinit(ui, repo, True)
3214 return qinit(ui, repo, True)
3215
3215
3216 def mqcommand(orig, ui, repo, *args, **kwargs):
3216 def mqcommand(orig, ui, repo, *args, **kwargs):
3217 """Add --mq option to operate on patch repository instead of main"""
3217 """Add --mq option to operate on patch repository instead of main"""
3218
3218
3219 # some commands do not like getting unknown options
3219 # some commands do not like getting unknown options
3220 mq = kwargs.pop('mq', None)
3220 mq = kwargs.pop('mq', None)
3221
3221
3222 if not mq:
3222 if not mq:
3223 return orig(ui, repo, *args, **kwargs)
3223 return orig(ui, repo, *args, **kwargs)
3224
3224
3225 q = repo.mq
3225 q = repo.mq
3226 r = q.qrepo()
3226 r = q.qrepo()
3227 if not r:
3227 if not r:
3228 raise util.Abort(_('no queue repository'))
3228 raise util.Abort(_('no queue repository'))
3229 return orig(r.ui, r, *args, **kwargs)
3229 return orig(r.ui, r, *args, **kwargs)
3230
3230
3231 def summary(orig, ui, repo, *args, **kwargs):
3231 def summary(orig, ui, repo, *args, **kwargs):
3232 r = orig(ui, repo, *args, **kwargs)
3232 r = orig(ui, repo, *args, **kwargs)
3233 q = repo.mq
3233 q = repo.mq
3234 m = []
3234 m = []
3235 a, u = len(q.applied), len(q.unapplied(repo))
3235 a, u = len(q.applied), len(q.unapplied(repo))
3236 if a:
3236 if a:
3237 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3237 m.append(ui.label(_("%d applied"), 'qseries.applied') % a)
3238 if u:
3238 if u:
3239 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3239 m.append(ui.label(_("%d unapplied"), 'qseries.unapplied') % u)
3240 if m:
3240 if m:
3241 ui.write("mq: %s\n" % ', '.join(m))
3241 ui.write("mq: %s\n" % ', '.join(m))
3242 else:
3242 else:
3243 ui.note(_("mq: (empty queue)\n"))
3243 ui.note(_("mq: (empty queue)\n"))
3244 return r
3244 return r
3245
3245
3246 def revsetmq(repo, subset, x):
3246 def revsetmq(repo, subset, x):
3247 """``mq()``
3247 """``mq()``
3248 Changesets managed by MQ.
3248 Changesets managed by MQ.
3249 """
3249 """
3250 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3250 revset.getargs(x, 0, 0, _("mq takes no arguments"))
3251 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3251 applied = set([repo[r.node].rev() for r in repo.mq.applied])
3252 return [r for r in subset if r in applied]
3252 return [r for r in subset if r in applied]
3253
3253
3254 def extsetup(ui):
3254 def extsetup(ui):
3255 revset.symbols['mq'] = revsetmq
3255 revset.symbols['mq'] = revsetmq
3256
3256
3257 # tell hggettext to extract docstrings from these functions:
3257 # tell hggettext to extract docstrings from these functions:
3258 i18nfunctions = [revsetmq]
3258 i18nfunctions = [revsetmq]
3259
3259
3260 def uisetup(ui):
3260 def uisetup(ui):
3261 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3261 mqopt = [('', 'mq', None, _("operate on patch repository"))]
3262
3262
3263 extensions.wrapcommand(commands.table, 'import', mqimport)
3263 extensions.wrapcommand(commands.table, 'import', mqimport)
3264 extensions.wrapcommand(commands.table, 'summary', summary)
3264 extensions.wrapcommand(commands.table, 'summary', summary)
3265
3265
3266 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3266 entry = extensions.wrapcommand(commands.table, 'init', mqinit)
3267 entry[1].extend(mqopt)
3267 entry[1].extend(mqopt)
3268
3268
3269 nowrap = set(commands.norepo.split(" "))
3269 nowrap = set(commands.norepo.split(" "))
3270
3270
3271 def dotable(cmdtable):
3271 def dotable(cmdtable):
3272 for cmd in cmdtable.keys():
3272 for cmd in cmdtable.keys():
3273 cmd = cmdutil.parsealiases(cmd)[0]
3273 cmd = cmdutil.parsealiases(cmd)[0]
3274 if cmd in nowrap:
3274 if cmd in nowrap:
3275 continue
3275 continue
3276 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3276 entry = extensions.wrapcommand(cmdtable, cmd, mqcommand)
3277 entry[1].extend(mqopt)
3277 entry[1].extend(mqopt)
3278
3278
3279 dotable(commands.table)
3279 dotable(commands.table)
3280
3280
3281 for extname, extmodule in extensions.extensions():
3281 for extname, extmodule in extensions.extensions():
3282 if extmodule.__file__ != __file__:
3282 if extmodule.__file__ != __file__:
3283 dotable(getattr(extmodule, 'cmdtable', {}))
3283 dotable(getattr(extmodule, 'cmdtable', {}))
3284
3284
3285
3285
3286 colortable = {'qguard.negative': 'red',
3286 colortable = {'qguard.negative': 'red',
3287 'qguard.positive': 'yellow',
3287 'qguard.positive': 'yellow',
3288 'qguard.unguarded': 'green',
3288 'qguard.unguarded': 'green',
3289 'qseries.applied': 'blue bold underline',
3289 'qseries.applied': 'blue bold underline',
3290 'qseries.guarded': 'black bold',
3290 'qseries.guarded': 'black bold',
3291 'qseries.missing': 'red bold',
3291 'qseries.missing': 'red bold',
3292 'qseries.unapplied': 'black bold'}
3292 'qseries.unapplied': 'black bold'}
@@ -1,284 +1,284 b''
1 # archival.py - revision archival for mercurial
1 # archival.py - revision archival for mercurial
2 #
2 #
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
3 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 from node import hex
9 from node import hex
10 import cmdutil
10 import cmdutil
11 import scmutil, util, encoding
11 import scmutil, util, encoding
12 import cStringIO, os, tarfile, time, zipfile
12 import cStringIO, os, tarfile, time, zipfile
13 import zlib, gzip
13 import zlib, gzip
14
14
15 def tidyprefix(dest, kind, prefix):
15 def tidyprefix(dest, kind, prefix):
16 '''choose prefix to use for names in archive. make sure prefix is
16 '''choose prefix to use for names in archive. make sure prefix is
17 safe for consumers.'''
17 safe for consumers.'''
18
18
19 if prefix:
19 if prefix:
20 prefix = util.normpath(prefix)
20 prefix = util.normpath(prefix)
21 else:
21 else:
22 if not isinstance(dest, str):
22 if not isinstance(dest, str):
23 raise ValueError('dest must be string if no prefix')
23 raise ValueError('dest must be string if no prefix')
24 prefix = os.path.basename(dest)
24 prefix = os.path.basename(dest)
25 lower = prefix.lower()
25 lower = prefix.lower()
26 for sfx in exts.get(kind, []):
26 for sfx in exts.get(kind, []):
27 if lower.endswith(sfx):
27 if lower.endswith(sfx):
28 prefix = prefix[:-len(sfx)]
28 prefix = prefix[:-len(sfx)]
29 break
29 break
30 lpfx = os.path.normpath(util.localpath(prefix))
30 lpfx = os.path.normpath(util.localpath(prefix))
31 prefix = util.pconvert(lpfx)
31 prefix = util.pconvert(lpfx)
32 if not prefix.endswith('/'):
32 if not prefix.endswith('/'):
33 prefix += '/'
33 prefix += '/'
34 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
34 if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix:
35 raise util.Abort(_('archive prefix contains illegal components'))
35 raise util.Abort(_('archive prefix contains illegal components'))
36 return prefix
36 return prefix
37
37
38 exts = {
38 exts = {
39 'tar': ['.tar'],
39 'tar': ['.tar'],
40 'tbz2': ['.tbz2', '.tar.bz2'],
40 'tbz2': ['.tbz2', '.tar.bz2'],
41 'tgz': ['.tgz', '.tar.gz'],
41 'tgz': ['.tgz', '.tar.gz'],
42 'zip': ['.zip'],
42 'zip': ['.zip'],
43 }
43 }
44
44
45 def guesskind(dest):
45 def guesskind(dest):
46 for kind, extensions in exts.iteritems():
46 for kind, extensions in exts.iteritems():
47 if util.any(dest.endswith(ext) for ext in extensions):
47 if util.any(dest.endswith(ext) for ext in extensions):
48 return kind
48 return kind
49 return None
49 return None
50
50
51
51
52 class tarit(object):
52 class tarit(object):
53 '''write archive to tar file or stream. can write uncompressed,
53 '''write archive to tar file or stream. can write uncompressed,
54 or compress with gzip or bzip2.'''
54 or compress with gzip or bzip2.'''
55
55
56 class GzipFileWithTime(gzip.GzipFile):
56 class GzipFileWithTime(gzip.GzipFile):
57
57
58 def __init__(self, *args, **kw):
58 def __init__(self, *args, **kw):
59 timestamp = None
59 timestamp = None
60 if 'timestamp' in kw:
60 if 'timestamp' in kw:
61 timestamp = kw.pop('timestamp')
61 timestamp = kw.pop('timestamp')
62 if timestamp is None:
62 if timestamp is None:
63 self.timestamp = time.time()
63 self.timestamp = time.time()
64 else:
64 else:
65 self.timestamp = timestamp
65 self.timestamp = timestamp
66 gzip.GzipFile.__init__(self, *args, **kw)
66 gzip.GzipFile.__init__(self, *args, **kw)
67
67
68 def _write_gzip_header(self):
68 def _write_gzip_header(self):
69 self.fileobj.write('\037\213') # magic header
69 self.fileobj.write('\037\213') # magic header
70 self.fileobj.write('\010') # compression method
70 self.fileobj.write('\010') # compression method
71 # Python 2.6 deprecates self.filename
71 # Python 2.6 deprecates self.filename
72 fname = getattr(self, 'name', None) or self.filename
72 fname = getattr(self, 'name', None) or self.filename
73 if fname and fname.endswith('.gz'):
73 if fname and fname.endswith('.gz'):
74 fname = fname[:-3]
74 fname = fname[:-3]
75 flags = 0
75 flags = 0
76 if fname:
76 if fname:
77 flags = gzip.FNAME
77 flags = gzip.FNAME
78 self.fileobj.write(chr(flags))
78 self.fileobj.write(chr(flags))
79 gzip.write32u(self.fileobj, long(self.timestamp))
79 gzip.write32u(self.fileobj, long(self.timestamp))
80 self.fileobj.write('\002')
80 self.fileobj.write('\002')
81 self.fileobj.write('\377')
81 self.fileobj.write('\377')
82 if fname:
82 if fname:
83 self.fileobj.write(fname + '\000')
83 self.fileobj.write(fname + '\000')
84
84
85 def __init__(self, dest, mtime, kind=''):
85 def __init__(self, dest, mtime, kind=''):
86 self.mtime = mtime
86 self.mtime = mtime
87 self.fileobj = None
87 self.fileobj = None
88
88
89 def taropen(name, mode, fileobj=None):
89 def taropen(name, mode, fileobj=None):
90 if kind == 'gz':
90 if kind == 'gz':
91 mode = mode[0]
91 mode = mode[0]
92 if not fileobj:
92 if not fileobj:
93 fileobj = open(name, mode + 'b')
93 fileobj = open(name, mode + 'b')
94 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
94 gzfileobj = self.GzipFileWithTime(name, mode + 'b',
95 zlib.Z_BEST_COMPRESSION,
95 zlib.Z_BEST_COMPRESSION,
96 fileobj, timestamp=mtime)
96 fileobj, timestamp=mtime)
97 self.fileobj = gzfileobj
97 self.fileobj = gzfileobj
98 return tarfile.TarFile.taropen(name, mode, gzfileobj)
98 return tarfile.TarFile.taropen(name, mode, gzfileobj)
99 else:
99 else:
100 self.fileobj = fileobj
100 self.fileobj = fileobj
101 return tarfile.open(name, mode + kind, fileobj)
101 return tarfile.open(name, mode + kind, fileobj)
102
102
103 if isinstance(dest, str):
103 if isinstance(dest, str):
104 self.z = taropen(dest, mode='w:')
104 self.z = taropen(dest, mode='w:')
105 else:
105 else:
106 # Python 2.5-2.5.1 have a regression that requires a name arg
106 # Python 2.5-2.5.1 have a regression that requires a name arg
107 self.z = taropen(name='', mode='w|', fileobj=dest)
107 self.z = taropen(name='', mode='w|', fileobj=dest)
108
108
109 def addfile(self, name, mode, islink, data):
109 def addfile(self, name, mode, islink, data):
110 i = tarfile.TarInfo(name)
110 i = tarfile.TarInfo(name)
111 i.mtime = self.mtime
111 i.mtime = self.mtime
112 i.size = len(data)
112 i.size = len(data)
113 if islink:
113 if islink:
114 i.type = tarfile.SYMTYPE
114 i.type = tarfile.SYMTYPE
115 i.mode = 0777
115 i.mode = 0777
116 i.linkname = data
116 i.linkname = data
117 data = None
117 data = None
118 i.size = 0
118 i.size = 0
119 else:
119 else:
120 i.mode = mode
120 i.mode = mode
121 data = cStringIO.StringIO(data)
121 data = cStringIO.StringIO(data)
122 self.z.addfile(i, data)
122 self.z.addfile(i, data)
123
123
124 def done(self):
124 def done(self):
125 self.z.close()
125 self.z.close()
126 if self.fileobj:
126 if self.fileobj:
127 self.fileobj.close()
127 self.fileobj.close()
128
128
129 class tellable(object):
129 class tellable(object):
130 '''provide tell method for zipfile.ZipFile when writing to http
130 '''provide tell method for zipfile.ZipFile when writing to http
131 response file object.'''
131 response file object.'''
132
132
133 def __init__(self, fp):
133 def __init__(self, fp):
134 self.fp = fp
134 self.fp = fp
135 self.offset = 0
135 self.offset = 0
136
136
137 def __getattr__(self, key):
137 def __getattr__(self, key):
138 return getattr(self.fp, key)
138 return getattr(self.fp, key)
139
139
140 def write(self, s):
140 def write(self, s):
141 self.fp.write(s)
141 self.fp.write(s)
142 self.offset += len(s)
142 self.offset += len(s)
143
143
144 def tell(self):
144 def tell(self):
145 return self.offset
145 return self.offset
146
146
147 class zipit(object):
147 class zipit(object):
148 '''write archive to zip file or stream. can write uncompressed,
148 '''write archive to zip file or stream. can write uncompressed,
149 or compressed with deflate.'''
149 or compressed with deflate.'''
150
150
151 def __init__(self, dest, mtime, compress=True):
151 def __init__(self, dest, mtime, compress=True):
152 if not isinstance(dest, str):
152 if not isinstance(dest, str):
153 try:
153 try:
154 dest.tell()
154 dest.tell()
155 except (AttributeError, IOError):
155 except (AttributeError, IOError):
156 dest = tellable(dest)
156 dest = tellable(dest)
157 self.z = zipfile.ZipFile(dest, 'w',
157 self.z = zipfile.ZipFile(dest, 'w',
158 compress and zipfile.ZIP_DEFLATED or
158 compress and zipfile.ZIP_DEFLATED or
159 zipfile.ZIP_STORED)
159 zipfile.ZIP_STORED)
160
160
161 # Python's zipfile module emits deprecation warnings if we try
161 # Python's zipfile module emits deprecation warnings if we try
162 # to store files with a date before 1980.
162 # to store files with a date before 1980.
163 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
163 epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0))
164 if mtime < epoch:
164 if mtime < epoch:
165 mtime = epoch
165 mtime = epoch
166
166
167 self.date_time = time.gmtime(mtime)[:6]
167 self.date_time = time.gmtime(mtime)[:6]
168
168
169 def addfile(self, name, mode, islink, data):
169 def addfile(self, name, mode, islink, data):
170 i = zipfile.ZipInfo(name, self.date_time)
170 i = zipfile.ZipInfo(name, self.date_time)
171 i.compress_type = self.z.compression
171 i.compress_type = self.z.compression
172 # unzip will not honor unix file modes unless file creator is
172 # unzip will not honor unix file modes unless file creator is
173 # set to unix (id 3).
173 # set to unix (id 3).
174 i.create_system = 3
174 i.create_system = 3
175 ftype = 0x8000 # UNX_IFREG in unzip source code
175 ftype = 0x8000 # UNX_IFREG in unzip source code
176 if islink:
176 if islink:
177 mode = 0777
177 mode = 0777
178 ftype = 0xa000 # UNX_IFLNK in unzip source code
178 ftype = 0xa000 # UNX_IFLNK in unzip source code
179 i.external_attr = (mode | ftype) << 16L
179 i.external_attr = (mode | ftype) << 16L
180 self.z.writestr(i, data)
180 self.z.writestr(i, data)
181
181
182 def done(self):
182 def done(self):
183 self.z.close()
183 self.z.close()
184
184
185 class fileit(object):
185 class fileit(object):
186 '''write archive as files in directory.'''
186 '''write archive as files in directory.'''
187
187
188 def __init__(self, name, mtime):
188 def __init__(self, name, mtime):
189 self.basedir = name
189 self.basedir = name
190 self.opener = scmutil.opener(self.basedir)
190 self.opener = scmutil.opener(self.basedir)
191
191
192 def addfile(self, name, mode, islink, data):
192 def addfile(self, name, mode, islink, data):
193 if islink:
193 if islink:
194 self.opener.symlink(data, name)
194 self.opener.symlink(data, name)
195 return
195 return
196 f = self.opener(name, "w", atomictemp=True)
196 f = self.opener(name, "w", atomictemp=True)
197 f.write(data)
197 f.write(data)
198 f.rename()
198 f.close()
199 destfile = os.path.join(self.basedir, name)
199 destfile = os.path.join(self.basedir, name)
200 os.chmod(destfile, mode)
200 os.chmod(destfile, mode)
201
201
202 def done(self):
202 def done(self):
203 pass
203 pass
204
204
205 archivers = {
205 archivers = {
206 'files': fileit,
206 'files': fileit,
207 'tar': tarit,
207 'tar': tarit,
208 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
208 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'),
209 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
209 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'),
210 'uzip': lambda name, mtime: zipit(name, mtime, False),
210 'uzip': lambda name, mtime: zipit(name, mtime, False),
211 'zip': zipit,
211 'zip': zipit,
212 }
212 }
213
213
214 def archive(repo, dest, node, kind, decode=True, matchfn=None,
214 def archive(repo, dest, node, kind, decode=True, matchfn=None,
215 prefix=None, mtime=None, subrepos=False):
215 prefix=None, mtime=None, subrepos=False):
216 '''create archive of repo as it was at node.
216 '''create archive of repo as it was at node.
217
217
218 dest can be name of directory, name of archive file, or file
218 dest can be name of directory, name of archive file, or file
219 object to write archive to.
219 object to write archive to.
220
220
221 kind is type of archive to create.
221 kind is type of archive to create.
222
222
223 decode tells whether to put files through decode filters from
223 decode tells whether to put files through decode filters from
224 hgrc.
224 hgrc.
225
225
226 matchfn is function to filter names of files to write to archive.
226 matchfn is function to filter names of files to write to archive.
227
227
228 prefix is name of path to put before every archive member.'''
228 prefix is name of path to put before every archive member.'''
229
229
230 if kind == 'files':
230 if kind == 'files':
231 if prefix:
231 if prefix:
232 raise util.Abort(_('cannot give prefix when archiving to files'))
232 raise util.Abort(_('cannot give prefix when archiving to files'))
233 else:
233 else:
234 prefix = tidyprefix(dest, kind, prefix)
234 prefix = tidyprefix(dest, kind, prefix)
235
235
236 def write(name, mode, islink, getdata):
236 def write(name, mode, islink, getdata):
237 if matchfn and not matchfn(name):
237 if matchfn and not matchfn(name):
238 return
238 return
239 data = getdata()
239 data = getdata()
240 if decode:
240 if decode:
241 data = repo.wwritedata(name, data)
241 data = repo.wwritedata(name, data)
242 archiver.addfile(prefix + name, mode, islink, data)
242 archiver.addfile(prefix + name, mode, islink, data)
243
243
244 if kind not in archivers:
244 if kind not in archivers:
245 raise util.Abort(_("unknown archive type '%s'") % kind)
245 raise util.Abort(_("unknown archive type '%s'") % kind)
246
246
247 ctx = repo[node]
247 ctx = repo[node]
248 archiver = archivers[kind](dest, mtime or ctx.date()[0])
248 archiver = archivers[kind](dest, mtime or ctx.date()[0])
249
249
250 if repo.ui.configbool("ui", "archivemeta", True):
250 if repo.ui.configbool("ui", "archivemeta", True):
251 def metadata():
251 def metadata():
252 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
252 base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
253 repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
253 repo[0].hex(), hex(node), encoding.fromlocal(ctx.branch()))
254
254
255 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
255 tags = ''.join('tag: %s\n' % t for t in ctx.tags()
256 if repo.tagtype(t) == 'global')
256 if repo.tagtype(t) == 'global')
257 if not tags:
257 if not tags:
258 repo.ui.pushbuffer()
258 repo.ui.pushbuffer()
259 opts = {'template': '{latesttag}\n{latesttagdistance}',
259 opts = {'template': '{latesttag}\n{latesttagdistance}',
260 'style': '', 'patch': None, 'git': None}
260 'style': '', 'patch': None, 'git': None}
261 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
261 cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
262 ltags, dist = repo.ui.popbuffer().split('\n')
262 ltags, dist = repo.ui.popbuffer().split('\n')
263 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
263 tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
264 tags += 'latesttagdistance: %s\n' % dist
264 tags += 'latesttagdistance: %s\n' % dist
265
265
266 return base + tags
266 return base + tags
267
267
268 write('.hg_archival.txt', 0644, False, metadata)
268 write('.hg_archival.txt', 0644, False, metadata)
269
269
270 total = len(ctx.manifest())
270 total = len(ctx.manifest())
271 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
271 repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total)
272 for i, f in enumerate(ctx):
272 for i, f in enumerate(ctx):
273 ff = ctx.flags(f)
273 ff = ctx.flags(f)
274 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
274 write(f, 'x' in ff and 0755 or 0644, 'l' in ff, ctx[f].data)
275 repo.ui.progress(_('archiving'), i + 1, item=f,
275 repo.ui.progress(_('archiving'), i + 1, item=f,
276 unit=_('files'), total=total)
276 unit=_('files'), total=total)
277 repo.ui.progress(_('archiving'), None)
277 repo.ui.progress(_('archiving'), None)
278
278
279 if subrepos:
279 if subrepos:
280 for subpath in ctx.substate:
280 for subpath in ctx.substate:
281 sub = ctx.sub(subpath)
281 sub = ctx.sub(subpath)
282 sub.archive(repo.ui, archiver, prefix)
282 sub.archive(repo.ui, archiver, prefix)
283
283
284 archiver.done()
284 archiver.done()
@@ -1,213 +1,213 b''
1 # Mercurial bookmark support code
1 # Mercurial bookmark support code
2 #
2 #
3 # Copyright 2008 David Soria Parra <dsp@php.net>
3 # Copyright 2008 David Soria Parra <dsp@php.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from mercurial.i18n import _
8 from mercurial.i18n import _
9 from mercurial.node import hex
9 from mercurial.node import hex
10 from mercurial import encoding, error, util
10 from mercurial import encoding, error, util
11 import errno, os
11 import errno, os
12
12
13 def valid(mark):
13 def valid(mark):
14 for c in (':', '\0', '\n', '\r'):
14 for c in (':', '\0', '\n', '\r'):
15 if c in mark:
15 if c in mark:
16 return False
16 return False
17 return True
17 return True
18
18
19 def read(repo):
19 def read(repo):
20 '''Parse .hg/bookmarks file and return a dictionary
20 '''Parse .hg/bookmarks file and return a dictionary
21
21
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
22 Bookmarks are stored as {HASH}\\s{NAME}\\n (localtags format) values
23 in the .hg/bookmarks file.
23 in the .hg/bookmarks file.
24 Read the file and return a (name=>nodeid) dictionary
24 Read the file and return a (name=>nodeid) dictionary
25 '''
25 '''
26 bookmarks = {}
26 bookmarks = {}
27 try:
27 try:
28 for line in repo.opener('bookmarks'):
28 for line in repo.opener('bookmarks'):
29 line = line.strip()
29 line = line.strip()
30 if not line:
30 if not line:
31 continue
31 continue
32 if ' ' not in line:
32 if ' ' not in line:
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
33 repo.ui.warn(_('malformed line in .hg/bookmarks: %r\n') % line)
34 continue
34 continue
35 sha, refspec = line.split(' ', 1)
35 sha, refspec = line.split(' ', 1)
36 refspec = encoding.tolocal(refspec)
36 refspec = encoding.tolocal(refspec)
37 try:
37 try:
38 bookmarks[refspec] = repo.changelog.lookup(sha)
38 bookmarks[refspec] = repo.changelog.lookup(sha)
39 except error.RepoLookupError:
39 except error.RepoLookupError:
40 pass
40 pass
41 except IOError, inst:
41 except IOError, inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44 return bookmarks
44 return bookmarks
45
45
46 def readcurrent(repo):
46 def readcurrent(repo):
47 '''Get the current bookmark
47 '''Get the current bookmark
48
48
49 If we use gittishsh branches we have a current bookmark that
49 If we use gittishsh branches we have a current bookmark that
50 we are on. This function returns the name of the bookmark. It
50 we are on. This function returns the name of the bookmark. It
51 is stored in .hg/bookmarks.current
51 is stored in .hg/bookmarks.current
52 '''
52 '''
53 mark = None
53 mark = None
54 try:
54 try:
55 file = repo.opener('bookmarks.current')
55 file = repo.opener('bookmarks.current')
56 except IOError, inst:
56 except IOError, inst:
57 if inst.errno != errno.ENOENT:
57 if inst.errno != errno.ENOENT:
58 raise
58 raise
59 return None
59 return None
60 try:
60 try:
61 # No readline() in posixfile_nt, reading everything is cheap
61 # No readline() in posixfile_nt, reading everything is cheap
62 mark = encoding.tolocal((file.readlines() or [''])[0])
62 mark = encoding.tolocal((file.readlines() or [''])[0])
63 if mark == '' or mark not in repo._bookmarks:
63 if mark == '' or mark not in repo._bookmarks:
64 mark = None
64 mark = None
65 finally:
65 finally:
66 file.close()
66 file.close()
67 return mark
67 return mark
68
68
69 def write(repo):
69 def write(repo):
70 '''Write bookmarks
70 '''Write bookmarks
71
71
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
72 Write the given bookmark => hash dictionary to the .hg/bookmarks file
73 in a format equal to those of localtags.
73 in a format equal to those of localtags.
74
74
75 We also store a backup of the previous state in undo.bookmarks that
75 We also store a backup of the previous state in undo.bookmarks that
76 can be copied back on rollback.
76 can be copied back on rollback.
77 '''
77 '''
78 refs = repo._bookmarks
78 refs = repo._bookmarks
79
79
80 if repo._bookmarkcurrent not in refs:
80 if repo._bookmarkcurrent not in refs:
81 setcurrent(repo, None)
81 setcurrent(repo, None)
82 for mark in refs.keys():
82 for mark in refs.keys():
83 if not valid(mark):
83 if not valid(mark):
84 raise util.Abort(_("bookmark '%s' contains illegal "
84 raise util.Abort(_("bookmark '%s' contains illegal "
85 "character" % mark))
85 "character" % mark))
86
86
87 wlock = repo.wlock()
87 wlock = repo.wlock()
88 try:
88 try:
89
89
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
90 file = repo.opener('bookmarks', 'w', atomictemp=True)
91 for refspec, node in refs.iteritems():
91 for refspec, node in refs.iteritems():
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
92 file.write("%s %s\n" % (hex(node), encoding.fromlocal(refspec)))
93 file.rename()
93 file.close()
94
94
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
95 # touch 00changelog.i so hgweb reloads bookmarks (no lock needed)
96 try:
96 try:
97 os.utime(repo.sjoin('00changelog.i'), None)
97 os.utime(repo.sjoin('00changelog.i'), None)
98 except OSError:
98 except OSError:
99 pass
99 pass
100
100
101 finally:
101 finally:
102 wlock.release()
102 wlock.release()
103
103
104 def setcurrent(repo, mark):
104 def setcurrent(repo, mark):
105 '''Set the name of the bookmark that we are currently on
105 '''Set the name of the bookmark that we are currently on
106
106
107 Set the name of the bookmark that we are on (hg update <bookmark>).
107 Set the name of the bookmark that we are on (hg update <bookmark>).
108 The name is recorded in .hg/bookmarks.current
108 The name is recorded in .hg/bookmarks.current
109 '''
109 '''
110 current = repo._bookmarkcurrent
110 current = repo._bookmarkcurrent
111 if current == mark:
111 if current == mark:
112 return
112 return
113
113
114 if mark not in repo._bookmarks:
114 if mark not in repo._bookmarks:
115 mark = ''
115 mark = ''
116 if not valid(mark):
116 if not valid(mark):
117 raise util.Abort(_("bookmark '%s' contains illegal "
117 raise util.Abort(_("bookmark '%s' contains illegal "
118 "character" % mark))
118 "character" % mark))
119
119
120 wlock = repo.wlock()
120 wlock = repo.wlock()
121 try:
121 try:
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
122 file = repo.opener('bookmarks.current', 'w', atomictemp=True)
123 file.write(encoding.fromlocal(mark))
123 file.write(encoding.fromlocal(mark))
124 file.rename()
124 file.close()
125 finally:
125 finally:
126 wlock.release()
126 wlock.release()
127 repo._bookmarkcurrent = mark
127 repo._bookmarkcurrent = mark
128
128
129 def updatecurrentbookmark(repo, oldnode, curbranch):
129 def updatecurrentbookmark(repo, oldnode, curbranch):
130 try:
130 try:
131 update(repo, oldnode, repo.branchtags()[curbranch])
131 update(repo, oldnode, repo.branchtags()[curbranch])
132 except KeyError:
132 except KeyError:
133 if curbranch == "default": # no default branch!
133 if curbranch == "default": # no default branch!
134 update(repo, oldnode, repo.lookup("tip"))
134 update(repo, oldnode, repo.lookup("tip"))
135 else:
135 else:
136 raise util.Abort(_("branch %s not found") % curbranch)
136 raise util.Abort(_("branch %s not found") % curbranch)
137
137
138 def update(repo, parents, node):
138 def update(repo, parents, node):
139 marks = repo._bookmarks
139 marks = repo._bookmarks
140 update = False
140 update = False
141 mark = repo._bookmarkcurrent
141 mark = repo._bookmarkcurrent
142 if mark and marks[mark] in parents:
142 if mark and marks[mark] in parents:
143 old = repo[marks[mark]]
143 old = repo[marks[mark]]
144 new = repo[node]
144 new = repo[node]
145 if new in old.descendants():
145 if new in old.descendants():
146 marks[mark] = new.node()
146 marks[mark] = new.node()
147 update = True
147 update = True
148 if update:
148 if update:
149 write(repo)
149 write(repo)
150
150
151 def listbookmarks(repo):
151 def listbookmarks(repo):
152 # We may try to list bookmarks on a repo type that does not
152 # We may try to list bookmarks on a repo type that does not
153 # support it (e.g., statichttprepository).
153 # support it (e.g., statichttprepository).
154 marks = getattr(repo, '_bookmarks', {})
154 marks = getattr(repo, '_bookmarks', {})
155
155
156 d = {}
156 d = {}
157 for k, v in marks.iteritems():
157 for k, v in marks.iteritems():
158 d[k] = hex(v)
158 d[k] = hex(v)
159 return d
159 return d
160
160
161 def pushbookmark(repo, key, old, new):
161 def pushbookmark(repo, key, old, new):
162 w = repo.wlock()
162 w = repo.wlock()
163 try:
163 try:
164 marks = repo._bookmarks
164 marks = repo._bookmarks
165 if hex(marks.get(key, '')) != old:
165 if hex(marks.get(key, '')) != old:
166 return False
166 return False
167 if new == '':
167 if new == '':
168 del marks[key]
168 del marks[key]
169 else:
169 else:
170 if new not in repo:
170 if new not in repo:
171 return False
171 return False
172 marks[key] = repo[new].node()
172 marks[key] = repo[new].node()
173 write(repo)
173 write(repo)
174 return True
174 return True
175 finally:
175 finally:
176 w.release()
176 w.release()
177
177
178 def updatefromremote(ui, repo, remote):
178 def updatefromremote(ui, repo, remote):
179 ui.debug("checking for updated bookmarks\n")
179 ui.debug("checking for updated bookmarks\n")
180 rb = remote.listkeys('bookmarks')
180 rb = remote.listkeys('bookmarks')
181 changed = False
181 changed = False
182 for k in rb.keys():
182 for k in rb.keys():
183 if k in repo._bookmarks:
183 if k in repo._bookmarks:
184 nr, nl = rb[k], repo._bookmarks[k]
184 nr, nl = rb[k], repo._bookmarks[k]
185 if nr in repo:
185 if nr in repo:
186 cr = repo[nr]
186 cr = repo[nr]
187 cl = repo[nl]
187 cl = repo[nl]
188 if cl.rev() >= cr.rev():
188 if cl.rev() >= cr.rev():
189 continue
189 continue
190 if cr in cl.descendants():
190 if cr in cl.descendants():
191 repo._bookmarks[k] = cr.node()
191 repo._bookmarks[k] = cr.node()
192 changed = True
192 changed = True
193 ui.status(_("updating bookmark %s\n") % k)
193 ui.status(_("updating bookmark %s\n") % k)
194 else:
194 else:
195 ui.warn(_("not updating divergent"
195 ui.warn(_("not updating divergent"
196 " bookmark %s\n") % k)
196 " bookmark %s\n") % k)
197 if changed:
197 if changed:
198 write(repo)
198 write(repo)
199
199
200 def diff(ui, repo, remote):
200 def diff(ui, repo, remote):
201 ui.status(_("searching for changed bookmarks\n"))
201 ui.status(_("searching for changed bookmarks\n"))
202
202
203 lmarks = repo.listkeys('bookmarks')
203 lmarks = repo.listkeys('bookmarks')
204 rmarks = remote.listkeys('bookmarks')
204 rmarks = remote.listkeys('bookmarks')
205
205
206 diff = sorted(set(rmarks) - set(lmarks))
206 diff = sorted(set(rmarks) - set(lmarks))
207 for k in diff:
207 for k in diff:
208 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
208 ui.write(" %-25s %s\n" % (k, rmarks[k][:12]))
209
209
210 if len(diff) <= 0:
210 if len(diff) <= 0:
211 ui.status(_("no changed bookmarks found\n"))
211 ui.status(_("no changed bookmarks found\n"))
212 return 1
212 return 1
213 return 0
213 return 0
@@ -1,721 +1,721 b''
1 # dirstate.py - working directory tracking for mercurial
1 # dirstate.py - working directory tracking for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import nullid
8 from node import nullid
9 from i18n import _
9 from i18n import _
10 import scmutil, util, ignore, osutil, parsers, encoding
10 import scmutil, util, ignore, osutil, parsers, encoding
11 import struct, os, stat, errno
11 import struct, os, stat, errno
12 import cStringIO
12 import cStringIO
13
13
14 _format = ">cllll"
14 _format = ">cllll"
15 propertycache = util.propertycache
15 propertycache = util.propertycache
16
16
17 def _finddirs(path):
17 def _finddirs(path):
18 pos = path.rfind('/')
18 pos = path.rfind('/')
19 while pos != -1:
19 while pos != -1:
20 yield path[:pos]
20 yield path[:pos]
21 pos = path.rfind('/', 0, pos)
21 pos = path.rfind('/', 0, pos)
22
22
23 def _incdirs(dirs, path):
23 def _incdirs(dirs, path):
24 for base in _finddirs(path):
24 for base in _finddirs(path):
25 if base in dirs:
25 if base in dirs:
26 dirs[base] += 1
26 dirs[base] += 1
27 return
27 return
28 dirs[base] = 1
28 dirs[base] = 1
29
29
30 def _decdirs(dirs, path):
30 def _decdirs(dirs, path):
31 for base in _finddirs(path):
31 for base in _finddirs(path):
32 if dirs[base] > 1:
32 if dirs[base] > 1:
33 dirs[base] -= 1
33 dirs[base] -= 1
34 return
34 return
35 del dirs[base]
35 del dirs[base]
36
36
37 class dirstate(object):
37 class dirstate(object):
38
38
39 def __init__(self, opener, ui, root, validate):
39 def __init__(self, opener, ui, root, validate):
40 '''Create a new dirstate object.
40 '''Create a new dirstate object.
41
41
42 opener is an open()-like callable that can be used to open the
42 opener is an open()-like callable that can be used to open the
43 dirstate file; root is the root of the directory tracked by
43 dirstate file; root is the root of the directory tracked by
44 the dirstate.
44 the dirstate.
45 '''
45 '''
46 self._opener = opener
46 self._opener = opener
47 self._validate = validate
47 self._validate = validate
48 self._root = root
48 self._root = root
49 self._rootdir = os.path.join(root, '')
49 self._rootdir = os.path.join(root, '')
50 self._dirty = False
50 self._dirty = False
51 self._dirtypl = False
51 self._dirtypl = False
52 self._lastnormaltime = None
52 self._lastnormaltime = None
53 self._ui = ui
53 self._ui = ui
54
54
55 @propertycache
55 @propertycache
56 def _map(self):
56 def _map(self):
57 '''Return the dirstate contents as a map from filename to
57 '''Return the dirstate contents as a map from filename to
58 (state, mode, size, time).'''
58 (state, mode, size, time).'''
59 self._read()
59 self._read()
60 return self._map
60 return self._map
61
61
62 @propertycache
62 @propertycache
63 def _copymap(self):
63 def _copymap(self):
64 self._read()
64 self._read()
65 return self._copymap
65 return self._copymap
66
66
67 @propertycache
67 @propertycache
68 def _foldmap(self):
68 def _foldmap(self):
69 f = {}
69 f = {}
70 for name in self._map:
70 for name in self._map:
71 f[os.path.normcase(name)] = name
71 f[os.path.normcase(name)] = name
72 return f
72 return f
73
73
74 @propertycache
74 @propertycache
75 def _branch(self):
75 def _branch(self):
76 try:
76 try:
77 return self._opener.read("branch").strip() or "default"
77 return self._opener.read("branch").strip() or "default"
78 except IOError:
78 except IOError:
79 return "default"
79 return "default"
80
80
81 @propertycache
81 @propertycache
82 def _pl(self):
82 def _pl(self):
83 try:
83 try:
84 fp = self._opener("dirstate")
84 fp = self._opener("dirstate")
85 st = fp.read(40)
85 st = fp.read(40)
86 fp.close()
86 fp.close()
87 l = len(st)
87 l = len(st)
88 if l == 40:
88 if l == 40:
89 return st[:20], st[20:40]
89 return st[:20], st[20:40]
90 elif l > 0 and l < 40:
90 elif l > 0 and l < 40:
91 raise util.Abort(_('working directory state appears damaged!'))
91 raise util.Abort(_('working directory state appears damaged!'))
92 except IOError, err:
92 except IOError, err:
93 if err.errno != errno.ENOENT:
93 if err.errno != errno.ENOENT:
94 raise
94 raise
95 return [nullid, nullid]
95 return [nullid, nullid]
96
96
97 @propertycache
97 @propertycache
98 def _dirs(self):
98 def _dirs(self):
99 dirs = {}
99 dirs = {}
100 for f, s in self._map.iteritems():
100 for f, s in self._map.iteritems():
101 if s[0] != 'r':
101 if s[0] != 'r':
102 _incdirs(dirs, f)
102 _incdirs(dirs, f)
103 return dirs
103 return dirs
104
104
105 @propertycache
105 @propertycache
106 def _ignore(self):
106 def _ignore(self):
107 files = [self._join('.hgignore')]
107 files = [self._join('.hgignore')]
108 for name, path in self._ui.configitems("ui"):
108 for name, path in self._ui.configitems("ui"):
109 if name == 'ignore' or name.startswith('ignore.'):
109 if name == 'ignore' or name.startswith('ignore.'):
110 files.append(util.expandpath(path))
110 files.append(util.expandpath(path))
111 return ignore.ignore(self._root, files, self._ui.warn)
111 return ignore.ignore(self._root, files, self._ui.warn)
112
112
113 @propertycache
113 @propertycache
114 def _slash(self):
114 def _slash(self):
115 return self._ui.configbool('ui', 'slash') and os.sep != '/'
115 return self._ui.configbool('ui', 'slash') and os.sep != '/'
116
116
117 @propertycache
117 @propertycache
118 def _checklink(self):
118 def _checklink(self):
119 return util.checklink(self._root)
119 return util.checklink(self._root)
120
120
121 @propertycache
121 @propertycache
122 def _checkexec(self):
122 def _checkexec(self):
123 return util.checkexec(self._root)
123 return util.checkexec(self._root)
124
124
125 @propertycache
125 @propertycache
126 def _checkcase(self):
126 def _checkcase(self):
127 return not util.checkcase(self._join('.hg'))
127 return not util.checkcase(self._join('.hg'))
128
128
129 def _join(self, f):
129 def _join(self, f):
130 # much faster than os.path.join()
130 # much faster than os.path.join()
131 # it's safe because f is always a relative path
131 # it's safe because f is always a relative path
132 return self._rootdir + f
132 return self._rootdir + f
133
133
134 def flagfunc(self, fallback):
134 def flagfunc(self, fallback):
135 if self._checklink:
135 if self._checklink:
136 if self._checkexec:
136 if self._checkexec:
137 def f(x):
137 def f(x):
138 p = self._join(x)
138 p = self._join(x)
139 if os.path.islink(p):
139 if os.path.islink(p):
140 return 'l'
140 return 'l'
141 if util.isexec(p):
141 if util.isexec(p):
142 return 'x'
142 return 'x'
143 return ''
143 return ''
144 return f
144 return f
145 def f(x):
145 def f(x):
146 if os.path.islink(self._join(x)):
146 if os.path.islink(self._join(x)):
147 return 'l'
147 return 'l'
148 if 'x' in fallback(x):
148 if 'x' in fallback(x):
149 return 'x'
149 return 'x'
150 return ''
150 return ''
151 return f
151 return f
152 if self._checkexec:
152 if self._checkexec:
153 def f(x):
153 def f(x):
154 if 'l' in fallback(x):
154 if 'l' in fallback(x):
155 return 'l'
155 return 'l'
156 if util.isexec(self._join(x)):
156 if util.isexec(self._join(x)):
157 return 'x'
157 return 'x'
158 return ''
158 return ''
159 return f
159 return f
160 return fallback
160 return fallback
161
161
162 def getcwd(self):
162 def getcwd(self):
163 cwd = os.getcwd()
163 cwd = os.getcwd()
164 if cwd == self._root:
164 if cwd == self._root:
165 return ''
165 return ''
166 # self._root ends with a path separator if self._root is '/' or 'C:\'
166 # self._root ends with a path separator if self._root is '/' or 'C:\'
167 rootsep = self._root
167 rootsep = self._root
168 if not util.endswithsep(rootsep):
168 if not util.endswithsep(rootsep):
169 rootsep += os.sep
169 rootsep += os.sep
170 if cwd.startswith(rootsep):
170 if cwd.startswith(rootsep):
171 return cwd[len(rootsep):]
171 return cwd[len(rootsep):]
172 else:
172 else:
173 # we're outside the repo. return an absolute path.
173 # we're outside the repo. return an absolute path.
174 return cwd
174 return cwd
175
175
176 def pathto(self, f, cwd=None):
176 def pathto(self, f, cwd=None):
177 if cwd is None:
177 if cwd is None:
178 cwd = self.getcwd()
178 cwd = self.getcwd()
179 path = util.pathto(self._root, cwd, f)
179 path = util.pathto(self._root, cwd, f)
180 if self._slash:
180 if self._slash:
181 return util.normpath(path)
181 return util.normpath(path)
182 return path
182 return path
183
183
184 def __getitem__(self, key):
184 def __getitem__(self, key):
185 '''Return the current state of key (a filename) in the dirstate.
185 '''Return the current state of key (a filename) in the dirstate.
186
186
187 States are:
187 States are:
188 n normal
188 n normal
189 m needs merging
189 m needs merging
190 r marked for removal
190 r marked for removal
191 a marked for addition
191 a marked for addition
192 ? not tracked
192 ? not tracked
193 '''
193 '''
194 return self._map.get(key, ("?",))[0]
194 return self._map.get(key, ("?",))[0]
195
195
196 def __contains__(self, key):
196 def __contains__(self, key):
197 return key in self._map
197 return key in self._map
198
198
199 def __iter__(self):
199 def __iter__(self):
200 for x in sorted(self._map):
200 for x in sorted(self._map):
201 yield x
201 yield x
202
202
203 def parents(self):
203 def parents(self):
204 return [self._validate(p) for p in self._pl]
204 return [self._validate(p) for p in self._pl]
205
205
206 def p1(self):
206 def p1(self):
207 return self._validate(self._pl[0])
207 return self._validate(self._pl[0])
208
208
209 def p2(self):
209 def p2(self):
210 return self._validate(self._pl[1])
210 return self._validate(self._pl[1])
211
211
212 def branch(self):
212 def branch(self):
213 return encoding.tolocal(self._branch)
213 return encoding.tolocal(self._branch)
214
214
215 def setparents(self, p1, p2=nullid):
215 def setparents(self, p1, p2=nullid):
216 self._dirty = self._dirtypl = True
216 self._dirty = self._dirtypl = True
217 self._pl = p1, p2
217 self._pl = p1, p2
218
218
219 def setbranch(self, branch):
219 def setbranch(self, branch):
220 if branch in ['tip', '.', 'null']:
220 if branch in ['tip', '.', 'null']:
221 raise util.Abort(_('the name \'%s\' is reserved') % branch)
221 raise util.Abort(_('the name \'%s\' is reserved') % branch)
222 self._branch = encoding.fromlocal(branch)
222 self._branch = encoding.fromlocal(branch)
223 self._opener.write("branch", self._branch + '\n')
223 self._opener.write("branch", self._branch + '\n')
224
224
225 def _read(self):
225 def _read(self):
226 self._map = {}
226 self._map = {}
227 self._copymap = {}
227 self._copymap = {}
228 try:
228 try:
229 st = self._opener.read("dirstate")
229 st = self._opener.read("dirstate")
230 except IOError, err:
230 except IOError, err:
231 if err.errno != errno.ENOENT:
231 if err.errno != errno.ENOENT:
232 raise
232 raise
233 return
233 return
234 if not st:
234 if not st:
235 return
235 return
236
236
237 p = parsers.parse_dirstate(self._map, self._copymap, st)
237 p = parsers.parse_dirstate(self._map, self._copymap, st)
238 if not self._dirtypl:
238 if not self._dirtypl:
239 self._pl = p
239 self._pl = p
240
240
241 def invalidate(self):
241 def invalidate(self):
242 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
242 for a in ("_map", "_copymap", "_foldmap", "_branch", "_pl", "_dirs",
243 "_ignore"):
243 "_ignore"):
244 if a in self.__dict__:
244 if a in self.__dict__:
245 delattr(self, a)
245 delattr(self, a)
246 self._lastnormaltime = None
246 self._lastnormaltime = None
247 self._dirty = False
247 self._dirty = False
248
248
249 def copy(self, source, dest):
249 def copy(self, source, dest):
250 """Mark dest as a copy of source. Unmark dest if source is None."""
250 """Mark dest as a copy of source. Unmark dest if source is None."""
251 if source == dest:
251 if source == dest:
252 return
252 return
253 self._dirty = True
253 self._dirty = True
254 if source is not None:
254 if source is not None:
255 self._copymap[dest] = source
255 self._copymap[dest] = source
256 elif dest in self._copymap:
256 elif dest in self._copymap:
257 del self._copymap[dest]
257 del self._copymap[dest]
258
258
259 def copied(self, file):
259 def copied(self, file):
260 return self._copymap.get(file, None)
260 return self._copymap.get(file, None)
261
261
262 def copies(self):
262 def copies(self):
263 return self._copymap
263 return self._copymap
264
264
265 def _droppath(self, f):
265 def _droppath(self, f):
266 if self[f] not in "?r" and "_dirs" in self.__dict__:
266 if self[f] not in "?r" and "_dirs" in self.__dict__:
267 _decdirs(self._dirs, f)
267 _decdirs(self._dirs, f)
268
268
269 def _addpath(self, f, check=False):
269 def _addpath(self, f, check=False):
270 oldstate = self[f]
270 oldstate = self[f]
271 if check or oldstate == "r":
271 if check or oldstate == "r":
272 scmutil.checkfilename(f)
272 scmutil.checkfilename(f)
273 if f in self._dirs:
273 if f in self._dirs:
274 raise util.Abort(_('directory %r already in dirstate') % f)
274 raise util.Abort(_('directory %r already in dirstate') % f)
275 # shadows
275 # shadows
276 for d in _finddirs(f):
276 for d in _finddirs(f):
277 if d in self._dirs:
277 if d in self._dirs:
278 break
278 break
279 if d in self._map and self[d] != 'r':
279 if d in self._map and self[d] != 'r':
280 raise util.Abort(
280 raise util.Abort(
281 _('file %r in dirstate clashes with %r') % (d, f))
281 _('file %r in dirstate clashes with %r') % (d, f))
282 if oldstate in "?r" and "_dirs" in self.__dict__:
282 if oldstate in "?r" and "_dirs" in self.__dict__:
283 _incdirs(self._dirs, f)
283 _incdirs(self._dirs, f)
284
284
285 def normal(self, f):
285 def normal(self, f):
286 '''Mark a file normal and clean.'''
286 '''Mark a file normal and clean.'''
287 self._dirty = True
287 self._dirty = True
288 self._addpath(f)
288 self._addpath(f)
289 s = os.lstat(self._join(f))
289 s = os.lstat(self._join(f))
290 mtime = int(s.st_mtime)
290 mtime = int(s.st_mtime)
291 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
291 self._map[f] = ('n', s.st_mode, s.st_size, mtime)
292 if f in self._copymap:
292 if f in self._copymap:
293 del self._copymap[f]
293 del self._copymap[f]
294 if mtime > self._lastnormaltime:
294 if mtime > self._lastnormaltime:
295 # Remember the most recent modification timeslot for status(),
295 # Remember the most recent modification timeslot for status(),
296 # to make sure we won't miss future size-preserving file content
296 # to make sure we won't miss future size-preserving file content
297 # modifications that happen within the same timeslot.
297 # modifications that happen within the same timeslot.
298 self._lastnormaltime = mtime
298 self._lastnormaltime = mtime
299
299
300 def normallookup(self, f):
300 def normallookup(self, f):
301 '''Mark a file normal, but possibly dirty.'''
301 '''Mark a file normal, but possibly dirty.'''
302 if self._pl[1] != nullid and f in self._map:
302 if self._pl[1] != nullid and f in self._map:
303 # if there is a merge going on and the file was either
303 # if there is a merge going on and the file was either
304 # in state 'm' (-1) or coming from other parent (-2) before
304 # in state 'm' (-1) or coming from other parent (-2) before
305 # being removed, restore that state.
305 # being removed, restore that state.
306 entry = self._map[f]
306 entry = self._map[f]
307 if entry[0] == 'r' and entry[2] in (-1, -2):
307 if entry[0] == 'r' and entry[2] in (-1, -2):
308 source = self._copymap.get(f)
308 source = self._copymap.get(f)
309 if entry[2] == -1:
309 if entry[2] == -1:
310 self.merge(f)
310 self.merge(f)
311 elif entry[2] == -2:
311 elif entry[2] == -2:
312 self.otherparent(f)
312 self.otherparent(f)
313 if source:
313 if source:
314 self.copy(source, f)
314 self.copy(source, f)
315 return
315 return
316 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
316 if entry[0] == 'm' or entry[0] == 'n' and entry[2] == -2:
317 return
317 return
318 self._dirty = True
318 self._dirty = True
319 self._addpath(f)
319 self._addpath(f)
320 self._map[f] = ('n', 0, -1, -1)
320 self._map[f] = ('n', 0, -1, -1)
321 if f in self._copymap:
321 if f in self._copymap:
322 del self._copymap[f]
322 del self._copymap[f]
323
323
324 def otherparent(self, f):
324 def otherparent(self, f):
325 '''Mark as coming from the other parent, always dirty.'''
325 '''Mark as coming from the other parent, always dirty.'''
326 if self._pl[1] == nullid:
326 if self._pl[1] == nullid:
327 raise util.Abort(_("setting %r to other parent "
327 raise util.Abort(_("setting %r to other parent "
328 "only allowed in merges") % f)
328 "only allowed in merges") % f)
329 self._dirty = True
329 self._dirty = True
330 self._addpath(f)
330 self._addpath(f)
331 self._map[f] = ('n', 0, -2, -1)
331 self._map[f] = ('n', 0, -2, -1)
332 if f in self._copymap:
332 if f in self._copymap:
333 del self._copymap[f]
333 del self._copymap[f]
334
334
335 def add(self, f):
335 def add(self, f):
336 '''Mark a file added.'''
336 '''Mark a file added.'''
337 self._dirty = True
337 self._dirty = True
338 self._addpath(f, True)
338 self._addpath(f, True)
339 self._map[f] = ('a', 0, -1, -1)
339 self._map[f] = ('a', 0, -1, -1)
340 if f in self._copymap:
340 if f in self._copymap:
341 del self._copymap[f]
341 del self._copymap[f]
342
342
343 def remove(self, f):
343 def remove(self, f):
344 '''Mark a file removed.'''
344 '''Mark a file removed.'''
345 self._dirty = True
345 self._dirty = True
346 self._droppath(f)
346 self._droppath(f)
347 size = 0
347 size = 0
348 if self._pl[1] != nullid and f in self._map:
348 if self._pl[1] != nullid and f in self._map:
349 # backup the previous state
349 # backup the previous state
350 entry = self._map[f]
350 entry = self._map[f]
351 if entry[0] == 'm': # merge
351 if entry[0] == 'm': # merge
352 size = -1
352 size = -1
353 elif entry[0] == 'n' and entry[2] == -2: # other parent
353 elif entry[0] == 'n' and entry[2] == -2: # other parent
354 size = -2
354 size = -2
355 self._map[f] = ('r', 0, size, 0)
355 self._map[f] = ('r', 0, size, 0)
356 if size == 0 and f in self._copymap:
356 if size == 0 and f in self._copymap:
357 del self._copymap[f]
357 del self._copymap[f]
358
358
359 def merge(self, f):
359 def merge(self, f):
360 '''Mark a file merged.'''
360 '''Mark a file merged.'''
361 self._dirty = True
361 self._dirty = True
362 s = os.lstat(self._join(f))
362 s = os.lstat(self._join(f))
363 self._addpath(f)
363 self._addpath(f)
364 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
364 self._map[f] = ('m', s.st_mode, s.st_size, int(s.st_mtime))
365 if f in self._copymap:
365 if f in self._copymap:
366 del self._copymap[f]
366 del self._copymap[f]
367
367
368 def drop(self, f):
368 def drop(self, f):
369 '''Drop a file from the dirstate'''
369 '''Drop a file from the dirstate'''
370 self._dirty = True
370 self._dirty = True
371 self._droppath(f)
371 self._droppath(f)
372 del self._map[f]
372 del self._map[f]
373
373
374 def _normalize(self, path, isknown):
374 def _normalize(self, path, isknown):
375 normed = os.path.normcase(path)
375 normed = os.path.normcase(path)
376 folded = self._foldmap.get(normed, None)
376 folded = self._foldmap.get(normed, None)
377 if folded is None:
377 if folded is None:
378 if isknown or not os.path.lexists(os.path.join(self._root, path)):
378 if isknown or not os.path.lexists(os.path.join(self._root, path)):
379 folded = path
379 folded = path
380 else:
380 else:
381 folded = self._foldmap.setdefault(normed,
381 folded = self._foldmap.setdefault(normed,
382 util.fspath(path, self._root))
382 util.fspath(path, self._root))
383 return folded
383 return folded
384
384
385 def normalize(self, path, isknown=False):
385 def normalize(self, path, isknown=False):
386 '''
386 '''
387 normalize the case of a pathname when on a casefolding filesystem
387 normalize the case of a pathname when on a casefolding filesystem
388
388
389 isknown specifies whether the filename came from walking the
389 isknown specifies whether the filename came from walking the
390 disk, to avoid extra filesystem access
390 disk, to avoid extra filesystem access
391
391
392 The normalized case is determined based on the following precedence:
392 The normalized case is determined based on the following precedence:
393
393
394 - version of name already stored in the dirstate
394 - version of name already stored in the dirstate
395 - version of name stored on disk
395 - version of name stored on disk
396 - version provided via command arguments
396 - version provided via command arguments
397 '''
397 '''
398
398
399 if self._checkcase:
399 if self._checkcase:
400 return self._normalize(path, isknown)
400 return self._normalize(path, isknown)
401 return path
401 return path
402
402
403 def clear(self):
403 def clear(self):
404 self._map = {}
404 self._map = {}
405 if "_dirs" in self.__dict__:
405 if "_dirs" in self.__dict__:
406 delattr(self, "_dirs")
406 delattr(self, "_dirs")
407 self._copymap = {}
407 self._copymap = {}
408 self._pl = [nullid, nullid]
408 self._pl = [nullid, nullid]
409 self._lastnormaltime = None
409 self._lastnormaltime = None
410 self._dirty = True
410 self._dirty = True
411
411
412 def rebuild(self, parent, files):
412 def rebuild(self, parent, files):
413 self.clear()
413 self.clear()
414 for f in files:
414 for f in files:
415 if 'x' in files.flags(f):
415 if 'x' in files.flags(f):
416 self._map[f] = ('n', 0777, -1, 0)
416 self._map[f] = ('n', 0777, -1, 0)
417 else:
417 else:
418 self._map[f] = ('n', 0666, -1, 0)
418 self._map[f] = ('n', 0666, -1, 0)
419 self._pl = (parent, nullid)
419 self._pl = (parent, nullid)
420 self._dirty = True
420 self._dirty = True
421
421
422 def write(self):
422 def write(self):
423 if not self._dirty:
423 if not self._dirty:
424 return
424 return
425 st = self._opener("dirstate", "w", atomictemp=True)
425 st = self._opener("dirstate", "w", atomictemp=True)
426
426
427 # use the modification time of the newly created temporary file as the
427 # use the modification time of the newly created temporary file as the
428 # filesystem's notion of 'now'
428 # filesystem's notion of 'now'
429 now = int(util.fstat(st).st_mtime)
429 now = int(util.fstat(st).st_mtime)
430
430
431 cs = cStringIO.StringIO()
431 cs = cStringIO.StringIO()
432 copymap = self._copymap
432 copymap = self._copymap
433 pack = struct.pack
433 pack = struct.pack
434 write = cs.write
434 write = cs.write
435 write("".join(self._pl))
435 write("".join(self._pl))
436 for f, e in self._map.iteritems():
436 for f, e in self._map.iteritems():
437 if e[0] == 'n' and e[3] == now:
437 if e[0] == 'n' and e[3] == now:
438 # The file was last modified "simultaneously" with the current
438 # The file was last modified "simultaneously" with the current
439 # write to dirstate (i.e. within the same second for file-
439 # write to dirstate (i.e. within the same second for file-
440 # systems with a granularity of 1 sec). This commonly happens
440 # systems with a granularity of 1 sec). This commonly happens
441 # for at least a couple of files on 'update'.
441 # for at least a couple of files on 'update'.
442 # The user could change the file without changing its size
442 # The user could change the file without changing its size
443 # within the same second. Invalidate the file's stat data in
443 # within the same second. Invalidate the file's stat data in
444 # dirstate, forcing future 'status' calls to compare the
444 # dirstate, forcing future 'status' calls to compare the
445 # contents of the file. This prevents mistakenly treating such
445 # contents of the file. This prevents mistakenly treating such
446 # files as clean.
446 # files as clean.
447 e = (e[0], 0, -1, -1) # mark entry as 'unset'
447 e = (e[0], 0, -1, -1) # mark entry as 'unset'
448 self._map[f] = e
448 self._map[f] = e
449
449
450 if f in copymap:
450 if f in copymap:
451 f = "%s\0%s" % (f, copymap[f])
451 f = "%s\0%s" % (f, copymap[f])
452 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
452 e = pack(_format, e[0], e[1], e[2], e[3], len(f))
453 write(e)
453 write(e)
454 write(f)
454 write(f)
455 st.write(cs.getvalue())
455 st.write(cs.getvalue())
456 st.rename()
456 st.close()
457 self._lastnormaltime = None
457 self._lastnormaltime = None
458 self._dirty = self._dirtypl = False
458 self._dirty = self._dirtypl = False
459
459
460 def _dirignore(self, f):
460 def _dirignore(self, f):
461 if f == '.':
461 if f == '.':
462 return False
462 return False
463 if self._ignore(f):
463 if self._ignore(f):
464 return True
464 return True
465 for p in _finddirs(f):
465 for p in _finddirs(f):
466 if self._ignore(p):
466 if self._ignore(p):
467 return True
467 return True
468 return False
468 return False
469
469
470 def walk(self, match, subrepos, unknown, ignored):
470 def walk(self, match, subrepos, unknown, ignored):
471 '''
471 '''
472 Walk recursively through the directory tree, finding all files
472 Walk recursively through the directory tree, finding all files
473 matched by match.
473 matched by match.
474
474
475 Return a dict mapping filename to stat-like object (either
475 Return a dict mapping filename to stat-like object (either
476 mercurial.osutil.stat instance or return value of os.stat()).
476 mercurial.osutil.stat instance or return value of os.stat()).
477 '''
477 '''
478
478
479 def fwarn(f, msg):
479 def fwarn(f, msg):
480 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
480 self._ui.warn('%s: %s\n' % (self.pathto(f), msg))
481 return False
481 return False
482
482
483 def badtype(mode):
483 def badtype(mode):
484 kind = _('unknown')
484 kind = _('unknown')
485 if stat.S_ISCHR(mode):
485 if stat.S_ISCHR(mode):
486 kind = _('character device')
486 kind = _('character device')
487 elif stat.S_ISBLK(mode):
487 elif stat.S_ISBLK(mode):
488 kind = _('block device')
488 kind = _('block device')
489 elif stat.S_ISFIFO(mode):
489 elif stat.S_ISFIFO(mode):
490 kind = _('fifo')
490 kind = _('fifo')
491 elif stat.S_ISSOCK(mode):
491 elif stat.S_ISSOCK(mode):
492 kind = _('socket')
492 kind = _('socket')
493 elif stat.S_ISDIR(mode):
493 elif stat.S_ISDIR(mode):
494 kind = _('directory')
494 kind = _('directory')
495 return _('unsupported file type (type is %s)') % kind
495 return _('unsupported file type (type is %s)') % kind
496
496
497 ignore = self._ignore
497 ignore = self._ignore
498 dirignore = self._dirignore
498 dirignore = self._dirignore
499 if ignored:
499 if ignored:
500 ignore = util.never
500 ignore = util.never
501 dirignore = util.never
501 dirignore = util.never
502 elif not unknown:
502 elif not unknown:
503 # if unknown and ignored are False, skip step 2
503 # if unknown and ignored are False, skip step 2
504 ignore = util.always
504 ignore = util.always
505 dirignore = util.always
505 dirignore = util.always
506
506
507 matchfn = match.matchfn
507 matchfn = match.matchfn
508 badfn = match.bad
508 badfn = match.bad
509 dmap = self._map
509 dmap = self._map
510 normpath = util.normpath
510 normpath = util.normpath
511 listdir = osutil.listdir
511 listdir = osutil.listdir
512 lstat = os.lstat
512 lstat = os.lstat
513 getkind = stat.S_IFMT
513 getkind = stat.S_IFMT
514 dirkind = stat.S_IFDIR
514 dirkind = stat.S_IFDIR
515 regkind = stat.S_IFREG
515 regkind = stat.S_IFREG
516 lnkkind = stat.S_IFLNK
516 lnkkind = stat.S_IFLNK
517 join = self._join
517 join = self._join
518 work = []
518 work = []
519 wadd = work.append
519 wadd = work.append
520
520
521 exact = skipstep3 = False
521 exact = skipstep3 = False
522 if matchfn == match.exact: # match.exact
522 if matchfn == match.exact: # match.exact
523 exact = True
523 exact = True
524 dirignore = util.always # skip step 2
524 dirignore = util.always # skip step 2
525 elif match.files() and not match.anypats(): # match.match, no patterns
525 elif match.files() and not match.anypats(): # match.match, no patterns
526 skipstep3 = True
526 skipstep3 = True
527
527
528 if self._checkcase:
528 if self._checkcase:
529 normalize = self._normalize
529 normalize = self._normalize
530 skipstep3 = False
530 skipstep3 = False
531 else:
531 else:
532 normalize = lambda x, y: x
532 normalize = lambda x, y: x
533
533
534 files = sorted(match.files())
534 files = sorted(match.files())
535 subrepos.sort()
535 subrepos.sort()
536 i, j = 0, 0
536 i, j = 0, 0
537 while i < len(files) and j < len(subrepos):
537 while i < len(files) and j < len(subrepos):
538 subpath = subrepos[j] + "/"
538 subpath = subrepos[j] + "/"
539 if files[i] < subpath:
539 if files[i] < subpath:
540 i += 1
540 i += 1
541 continue
541 continue
542 while i < len(files) and files[i].startswith(subpath):
542 while i < len(files) and files[i].startswith(subpath):
543 del files[i]
543 del files[i]
544 j += 1
544 j += 1
545
545
546 if not files or '.' in files:
546 if not files or '.' in files:
547 files = ['']
547 files = ['']
548 results = dict.fromkeys(subrepos)
548 results = dict.fromkeys(subrepos)
549 results['.hg'] = None
549 results['.hg'] = None
550
550
551 # step 1: find all explicit files
551 # step 1: find all explicit files
552 for ff in files:
552 for ff in files:
553 nf = normalize(normpath(ff), False)
553 nf = normalize(normpath(ff), False)
554 if nf in results:
554 if nf in results:
555 continue
555 continue
556
556
557 try:
557 try:
558 st = lstat(join(nf))
558 st = lstat(join(nf))
559 kind = getkind(st.st_mode)
559 kind = getkind(st.st_mode)
560 if kind == dirkind:
560 if kind == dirkind:
561 skipstep3 = False
561 skipstep3 = False
562 if nf in dmap:
562 if nf in dmap:
563 #file deleted on disk but still in dirstate
563 #file deleted on disk but still in dirstate
564 results[nf] = None
564 results[nf] = None
565 match.dir(nf)
565 match.dir(nf)
566 if not dirignore(nf):
566 if not dirignore(nf):
567 wadd(nf)
567 wadd(nf)
568 elif kind == regkind or kind == lnkkind:
568 elif kind == regkind or kind == lnkkind:
569 results[nf] = st
569 results[nf] = st
570 else:
570 else:
571 badfn(ff, badtype(kind))
571 badfn(ff, badtype(kind))
572 if nf in dmap:
572 if nf in dmap:
573 results[nf] = None
573 results[nf] = None
574 except OSError, inst:
574 except OSError, inst:
575 if nf in dmap: # does it exactly match a file?
575 if nf in dmap: # does it exactly match a file?
576 results[nf] = None
576 results[nf] = None
577 else: # does it match a directory?
577 else: # does it match a directory?
578 prefix = nf + "/"
578 prefix = nf + "/"
579 for fn in dmap:
579 for fn in dmap:
580 if fn.startswith(prefix):
580 if fn.startswith(prefix):
581 match.dir(nf)
581 match.dir(nf)
582 skipstep3 = False
582 skipstep3 = False
583 break
583 break
584 else:
584 else:
585 badfn(ff, inst.strerror)
585 badfn(ff, inst.strerror)
586
586
587 # step 2: visit subdirectories
587 # step 2: visit subdirectories
588 while work:
588 while work:
589 nd = work.pop()
589 nd = work.pop()
590 skip = None
590 skip = None
591 if nd == '.':
591 if nd == '.':
592 nd = ''
592 nd = ''
593 else:
593 else:
594 skip = '.hg'
594 skip = '.hg'
595 try:
595 try:
596 entries = listdir(join(nd), stat=True, skip=skip)
596 entries = listdir(join(nd), stat=True, skip=skip)
597 except OSError, inst:
597 except OSError, inst:
598 if inst.errno == errno.EACCES:
598 if inst.errno == errno.EACCES:
599 fwarn(nd, inst.strerror)
599 fwarn(nd, inst.strerror)
600 continue
600 continue
601 raise
601 raise
602 for f, kind, st in entries:
602 for f, kind, st in entries:
603 nf = normalize(nd and (nd + "/" + f) or f, True)
603 nf = normalize(nd and (nd + "/" + f) or f, True)
604 if nf not in results:
604 if nf not in results:
605 if kind == dirkind:
605 if kind == dirkind:
606 if not ignore(nf):
606 if not ignore(nf):
607 match.dir(nf)
607 match.dir(nf)
608 wadd(nf)
608 wadd(nf)
609 if nf in dmap and matchfn(nf):
609 if nf in dmap and matchfn(nf):
610 results[nf] = None
610 results[nf] = None
611 elif kind == regkind or kind == lnkkind:
611 elif kind == regkind or kind == lnkkind:
612 if nf in dmap:
612 if nf in dmap:
613 if matchfn(nf):
613 if matchfn(nf):
614 results[nf] = st
614 results[nf] = st
615 elif matchfn(nf) and not ignore(nf):
615 elif matchfn(nf) and not ignore(nf):
616 results[nf] = st
616 results[nf] = st
617 elif nf in dmap and matchfn(nf):
617 elif nf in dmap and matchfn(nf):
618 results[nf] = None
618 results[nf] = None
619
619
620 # step 3: report unseen items in the dmap hash
620 # step 3: report unseen items in the dmap hash
621 if not skipstep3 and not exact:
621 if not skipstep3 and not exact:
622 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
622 visit = sorted([f for f in dmap if f not in results and matchfn(f)])
623 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
623 for nf, st in zip(visit, util.statfiles([join(i) for i in visit])):
624 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
624 if not st is None and not getkind(st.st_mode) in (regkind, lnkkind):
625 st = None
625 st = None
626 results[nf] = st
626 results[nf] = st
627 for s in subrepos:
627 for s in subrepos:
628 del results[s]
628 del results[s]
629 del results['.hg']
629 del results['.hg']
630 return results
630 return results
631
631
632 def status(self, match, subrepos, ignored, clean, unknown):
632 def status(self, match, subrepos, ignored, clean, unknown):
633 '''Determine the status of the working copy relative to the
633 '''Determine the status of the working copy relative to the
634 dirstate and return a tuple of lists (unsure, modified, added,
634 dirstate and return a tuple of lists (unsure, modified, added,
635 removed, deleted, unknown, ignored, clean), where:
635 removed, deleted, unknown, ignored, clean), where:
636
636
637 unsure:
637 unsure:
638 files that might have been modified since the dirstate was
638 files that might have been modified since the dirstate was
639 written, but need to be read to be sure (size is the same
639 written, but need to be read to be sure (size is the same
640 but mtime differs)
640 but mtime differs)
641 modified:
641 modified:
642 files that have definitely been modified since the dirstate
642 files that have definitely been modified since the dirstate
643 was written (different size or mode)
643 was written (different size or mode)
644 added:
644 added:
645 files that have been explicitly added with hg add
645 files that have been explicitly added with hg add
646 removed:
646 removed:
647 files that have been explicitly removed with hg remove
647 files that have been explicitly removed with hg remove
648 deleted:
648 deleted:
649 files that have been deleted through other means ("missing")
649 files that have been deleted through other means ("missing")
650 unknown:
650 unknown:
651 files not in the dirstate that are not ignored
651 files not in the dirstate that are not ignored
652 ignored:
652 ignored:
653 files not in the dirstate that are ignored
653 files not in the dirstate that are ignored
654 (by _dirignore())
654 (by _dirignore())
655 clean:
655 clean:
656 files that have definitely not been modified since the
656 files that have definitely not been modified since the
657 dirstate was written
657 dirstate was written
658 '''
658 '''
659 listignored, listclean, listunknown = ignored, clean, unknown
659 listignored, listclean, listunknown = ignored, clean, unknown
660 lookup, modified, added, unknown, ignored = [], [], [], [], []
660 lookup, modified, added, unknown, ignored = [], [], [], [], []
661 removed, deleted, clean = [], [], []
661 removed, deleted, clean = [], [], []
662
662
663 dmap = self._map
663 dmap = self._map
664 ladd = lookup.append # aka "unsure"
664 ladd = lookup.append # aka "unsure"
665 madd = modified.append
665 madd = modified.append
666 aadd = added.append
666 aadd = added.append
667 uadd = unknown.append
667 uadd = unknown.append
668 iadd = ignored.append
668 iadd = ignored.append
669 radd = removed.append
669 radd = removed.append
670 dadd = deleted.append
670 dadd = deleted.append
671 cadd = clean.append
671 cadd = clean.append
672
672
673 lnkkind = stat.S_IFLNK
673 lnkkind = stat.S_IFLNK
674
674
675 for fn, st in self.walk(match, subrepos, listunknown,
675 for fn, st in self.walk(match, subrepos, listunknown,
676 listignored).iteritems():
676 listignored).iteritems():
677 if fn not in dmap:
677 if fn not in dmap:
678 if (listignored or match.exact(fn)) and self._dirignore(fn):
678 if (listignored or match.exact(fn)) and self._dirignore(fn):
679 if listignored:
679 if listignored:
680 iadd(fn)
680 iadd(fn)
681 elif listunknown:
681 elif listunknown:
682 uadd(fn)
682 uadd(fn)
683 continue
683 continue
684
684
685 state, mode, size, time = dmap[fn]
685 state, mode, size, time = dmap[fn]
686
686
687 if not st and state in "nma":
687 if not st and state in "nma":
688 dadd(fn)
688 dadd(fn)
689 elif state == 'n':
689 elif state == 'n':
690 # The "mode & lnkkind != lnkkind or self._checklink"
690 # The "mode & lnkkind != lnkkind or self._checklink"
691 # lines are an expansion of "islink => checklink"
691 # lines are an expansion of "islink => checklink"
692 # where islink means "is this a link?" and checklink
692 # where islink means "is this a link?" and checklink
693 # means "can we check links?".
693 # means "can we check links?".
694 mtime = int(st.st_mtime)
694 mtime = int(st.st_mtime)
695 if (size >= 0 and
695 if (size >= 0 and
696 (size != st.st_size
696 (size != st.st_size
697 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
697 or ((mode ^ st.st_mode) & 0100 and self._checkexec))
698 and (mode & lnkkind != lnkkind or self._checklink)
698 and (mode & lnkkind != lnkkind or self._checklink)
699 or size == -2 # other parent
699 or size == -2 # other parent
700 or fn in self._copymap):
700 or fn in self._copymap):
701 madd(fn)
701 madd(fn)
702 elif (mtime != time
702 elif (mtime != time
703 and (mode & lnkkind != lnkkind or self._checklink)):
703 and (mode & lnkkind != lnkkind or self._checklink)):
704 ladd(fn)
704 ladd(fn)
705 elif mtime == self._lastnormaltime:
705 elif mtime == self._lastnormaltime:
706 # fn may have been changed in the same timeslot without
706 # fn may have been changed in the same timeslot without
707 # changing its size. This can happen if we quickly do
707 # changing its size. This can happen if we quickly do
708 # multiple commits in a single transaction.
708 # multiple commits in a single transaction.
709 # Force lookup, so we don't miss such a racy file change.
709 # Force lookup, so we don't miss such a racy file change.
710 ladd(fn)
710 ladd(fn)
711 elif listclean:
711 elif listclean:
712 cadd(fn)
712 cadd(fn)
713 elif state == 'm':
713 elif state == 'm':
714 madd(fn)
714 madd(fn)
715 elif state == 'a':
715 elif state == 'a':
716 aadd(fn)
716 aadd(fn)
717 elif state == 'r':
717 elif state == 'r':
718 radd(fn)
718 radd(fn)
719
719
720 return (lookup, modified, added, removed, deleted, unknown, ignored,
720 return (lookup, modified, added, removed, deleted, unknown, ignored,
721 clean)
721 clean)
@@ -1,156 +1,156 b''
1 # changelog bisection for mercurial
1 # changelog bisection for mercurial
2 #
2 #
3 # Copyright 2007 Matt Mackall
3 # Copyright 2007 Matt Mackall
4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
4 # Copyright 2005, 2006 Benoit Boissinot <benoit.boissinot@ens-lyon.org>
5 #
5 #
6 # Inspired by git bisect, extension skeleton taken from mq.py.
6 # Inspired by git bisect, extension skeleton taken from mq.py.
7 #
7 #
8 # This software may be used and distributed according to the terms of the
8 # This software may be used and distributed according to the terms of the
9 # GNU General Public License version 2 or any later version.
9 # GNU General Public License version 2 or any later version.
10
10
11 import os
11 import os
12 from i18n import _
12 from i18n import _
13 from node import short, hex
13 from node import short, hex
14 import util
14 import util
15
15
16 def bisect(changelog, state):
16 def bisect(changelog, state):
17 """find the next node (if any) for testing during a bisect search.
17 """find the next node (if any) for testing during a bisect search.
18 returns a (nodes, number, good) tuple.
18 returns a (nodes, number, good) tuple.
19
19
20 'nodes' is the final result of the bisect if 'number' is 0.
20 'nodes' is the final result of the bisect if 'number' is 0.
21 Otherwise 'number' indicates the remaining possible candidates for
21 Otherwise 'number' indicates the remaining possible candidates for
22 the search and 'nodes' contains the next bisect target.
22 the search and 'nodes' contains the next bisect target.
23 'good' is True if bisect is searching for a first good changeset, False
23 'good' is True if bisect is searching for a first good changeset, False
24 if searching for a first bad one.
24 if searching for a first bad one.
25 """
25 """
26
26
27 clparents = changelog.parentrevs
27 clparents = changelog.parentrevs
28 skip = set([changelog.rev(n) for n in state['skip']])
28 skip = set([changelog.rev(n) for n in state['skip']])
29
29
30 def buildancestors(bad, good):
30 def buildancestors(bad, good):
31 # only the earliest bad revision matters
31 # only the earliest bad revision matters
32 badrev = min([changelog.rev(n) for n in bad])
32 badrev = min([changelog.rev(n) for n in bad])
33 goodrevs = [changelog.rev(n) for n in good]
33 goodrevs = [changelog.rev(n) for n in good]
34 goodrev = min(goodrevs)
34 goodrev = min(goodrevs)
35 # build visit array
35 # build visit array
36 ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
36 ancestors = [None] * (len(changelog) + 1) # an extra for [-1]
37
37
38 # set nodes descended from goodrevs
38 # set nodes descended from goodrevs
39 for rev in goodrevs:
39 for rev in goodrevs:
40 ancestors[rev] = []
40 ancestors[rev] = []
41 for rev in xrange(goodrev + 1, len(changelog)):
41 for rev in xrange(goodrev + 1, len(changelog)):
42 for prev in clparents(rev):
42 for prev in clparents(rev):
43 if ancestors[prev] == []:
43 if ancestors[prev] == []:
44 ancestors[rev] = []
44 ancestors[rev] = []
45
45
46 # clear good revs from array
46 # clear good revs from array
47 for rev in goodrevs:
47 for rev in goodrevs:
48 ancestors[rev] = None
48 ancestors[rev] = None
49 for rev in xrange(len(changelog), goodrev, -1):
49 for rev in xrange(len(changelog), goodrev, -1):
50 if ancestors[rev] is None:
50 if ancestors[rev] is None:
51 for prev in clparents(rev):
51 for prev in clparents(rev):
52 ancestors[prev] = None
52 ancestors[prev] = None
53
53
54 if ancestors[badrev] is None:
54 if ancestors[badrev] is None:
55 return badrev, None
55 return badrev, None
56 return badrev, ancestors
56 return badrev, ancestors
57
57
58 good = False
58 good = False
59 badrev, ancestors = buildancestors(state['bad'], state['good'])
59 badrev, ancestors = buildancestors(state['bad'], state['good'])
60 if not ancestors: # looking for bad to good transition?
60 if not ancestors: # looking for bad to good transition?
61 good = True
61 good = True
62 badrev, ancestors = buildancestors(state['good'], state['bad'])
62 badrev, ancestors = buildancestors(state['good'], state['bad'])
63 bad = changelog.node(badrev)
63 bad = changelog.node(badrev)
64 if not ancestors: # now we're confused
64 if not ancestors: # now we're confused
65 if len(state['bad']) == 1 and len(state['good']) == 1:
65 if len(state['bad']) == 1 and len(state['good']) == 1:
66 raise util.Abort(_("starting revisions are not directly related"))
66 raise util.Abort(_("starting revisions are not directly related"))
67 raise util.Abort(_("inconsistent state, %s:%s is good and bad")
67 raise util.Abort(_("inconsistent state, %s:%s is good and bad")
68 % (badrev, short(bad)))
68 % (badrev, short(bad)))
69
69
70 # build children dict
70 # build children dict
71 children = {}
71 children = {}
72 visit = [badrev]
72 visit = [badrev]
73 candidates = []
73 candidates = []
74 while visit:
74 while visit:
75 rev = visit.pop(0)
75 rev = visit.pop(0)
76 if ancestors[rev] == []:
76 if ancestors[rev] == []:
77 candidates.append(rev)
77 candidates.append(rev)
78 for prev in clparents(rev):
78 for prev in clparents(rev):
79 if prev != -1:
79 if prev != -1:
80 if prev in children:
80 if prev in children:
81 children[prev].append(rev)
81 children[prev].append(rev)
82 else:
82 else:
83 children[prev] = [rev]
83 children[prev] = [rev]
84 visit.append(prev)
84 visit.append(prev)
85
85
86 candidates.sort()
86 candidates.sort()
87 # have we narrowed it down to one entry?
87 # have we narrowed it down to one entry?
88 # or have all other possible candidates besides 'bad' have been skipped?
88 # or have all other possible candidates besides 'bad' have been skipped?
89 tot = len(candidates)
89 tot = len(candidates)
90 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
90 unskipped = [c for c in candidates if (c not in skip) and (c != badrev)]
91 if tot == 1 or not unskipped:
91 if tot == 1 or not unskipped:
92 return ([changelog.node(rev) for rev in candidates], 0, good)
92 return ([changelog.node(rev) for rev in candidates], 0, good)
93 perfect = tot // 2
93 perfect = tot // 2
94
94
95 # find the best node to test
95 # find the best node to test
96 best_rev = None
96 best_rev = None
97 best_len = -1
97 best_len = -1
98 poison = set()
98 poison = set()
99 for rev in candidates:
99 for rev in candidates:
100 if rev in poison:
100 if rev in poison:
101 # poison children
101 # poison children
102 poison.update(children.get(rev, []))
102 poison.update(children.get(rev, []))
103 continue
103 continue
104
104
105 a = ancestors[rev] or [rev]
105 a = ancestors[rev] or [rev]
106 ancestors[rev] = None
106 ancestors[rev] = None
107
107
108 x = len(a) # number of ancestors
108 x = len(a) # number of ancestors
109 y = tot - x # number of non-ancestors
109 y = tot - x # number of non-ancestors
110 value = min(x, y) # how good is this test?
110 value = min(x, y) # how good is this test?
111 if value > best_len and rev not in skip:
111 if value > best_len and rev not in skip:
112 best_len = value
112 best_len = value
113 best_rev = rev
113 best_rev = rev
114 if value == perfect: # found a perfect candidate? quit early
114 if value == perfect: # found a perfect candidate? quit early
115 break
115 break
116
116
117 if y < perfect and rev not in skip: # all downhill from here?
117 if y < perfect and rev not in skip: # all downhill from here?
118 # poison children
118 # poison children
119 poison.update(children.get(rev, []))
119 poison.update(children.get(rev, []))
120 continue
120 continue
121
121
122 for c in children.get(rev, []):
122 for c in children.get(rev, []):
123 if ancestors[c]:
123 if ancestors[c]:
124 ancestors[c] = list(set(ancestors[c] + a))
124 ancestors[c] = list(set(ancestors[c] + a))
125 else:
125 else:
126 ancestors[c] = a + [c]
126 ancestors[c] = a + [c]
127
127
128 assert best_rev is not None
128 assert best_rev is not None
129 best_node = changelog.node(best_rev)
129 best_node = changelog.node(best_rev)
130
130
131 return ([best_node], tot, good)
131 return ([best_node], tot, good)
132
132
133
133
134 def load_state(repo):
134 def load_state(repo):
135 state = {'good': [], 'bad': [], 'skip': []}
135 state = {'good': [], 'bad': [], 'skip': []}
136 if os.path.exists(repo.join("bisect.state")):
136 if os.path.exists(repo.join("bisect.state")):
137 for l in repo.opener("bisect.state"):
137 for l in repo.opener("bisect.state"):
138 kind, node = l[:-1].split()
138 kind, node = l[:-1].split()
139 node = repo.lookup(node)
139 node = repo.lookup(node)
140 if kind not in state:
140 if kind not in state:
141 raise util.Abort(_("unknown bisect kind %s") % kind)
141 raise util.Abort(_("unknown bisect kind %s") % kind)
142 state[kind].append(node)
142 state[kind].append(node)
143 return state
143 return state
144
144
145
145
146 def save_state(repo, state):
146 def save_state(repo, state):
147 f = repo.opener("bisect.state", "w", atomictemp=True)
147 f = repo.opener("bisect.state", "w", atomictemp=True)
148 wlock = repo.wlock()
148 wlock = repo.wlock()
149 try:
149 try:
150 for kind in state:
150 for kind in state:
151 for node in state[kind]:
151 for node in state[kind]:
152 f.write("%s %s\n" % (kind, hex(node)))
152 f.write("%s %s\n" % (kind, hex(node)))
153 f.rename()
153 f.close()
154 finally:
154 finally:
155 wlock.release()
155 wlock.release()
156
156
@@ -1,2058 +1,2058 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup, subrepo, discovery, pushkey
10 import repo, changegroup, subrepo, discovery, pushkey
11 import changelog, dirstate, filelog, manifest, context, bookmarks
11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 import lock, transaction, store, encoding
12 import lock, transaction, store, encoding
13 import scmutil, util, extensions, hook, error, revset
13 import scmutil, util, extensions, hook, error, revset
14 import match as matchmod
14 import match as matchmod
15 import merge as mergemod
15 import merge as mergemod
16 import tags as tagsmod
16 import tags as tagsmod
17 from lock import release
17 from lock import release
18 import weakref, errno, os, time, inspect
18 import weakref, errno, os, time, inspect
19 propertycache = util.propertycache
19 propertycache = util.propertycache
20 filecache = scmutil.filecache
20 filecache = scmutil.filecache
21
21
22 class localrepository(repo.repository):
22 class localrepository(repo.repository):
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
24 'known', 'getbundle'))
24 'known', 'getbundle'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
25 supportedformats = set(('revlogv1', 'generaldelta'))
26 supported = supportedformats | set(('store', 'fncache', 'shared',
26 supported = supportedformats | set(('store', 'fncache', 'shared',
27 'dotencode'))
27 'dotencode'))
28
28
29 def __init__(self, baseui, path=None, create=False):
29 def __init__(self, baseui, path=None, create=False):
30 repo.repository.__init__(self)
30 repo.repository.__init__(self)
31 self.root = os.path.realpath(util.expandpath(path))
31 self.root = os.path.realpath(util.expandpath(path))
32 self.path = os.path.join(self.root, ".hg")
32 self.path = os.path.join(self.root, ".hg")
33 self.origroot = path
33 self.origroot = path
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 self.auditor = scmutil.pathauditor(self.root, self._checknested)
35 self.opener = scmutil.opener(self.path)
35 self.opener = scmutil.opener(self.path)
36 self.wopener = scmutil.opener(self.root)
36 self.wopener = scmutil.opener(self.root)
37 self.baseui = baseui
37 self.baseui = baseui
38 self.ui = baseui.copy()
38 self.ui = baseui.copy()
39
39
40 try:
40 try:
41 self.ui.readconfig(self.join("hgrc"), self.root)
41 self.ui.readconfig(self.join("hgrc"), self.root)
42 extensions.loadall(self.ui)
42 extensions.loadall(self.ui)
43 except IOError:
43 except IOError:
44 pass
44 pass
45
45
46 if not os.path.isdir(self.path):
46 if not os.path.isdir(self.path):
47 if create:
47 if create:
48 if not os.path.exists(path):
48 if not os.path.exists(path):
49 util.makedirs(path)
49 util.makedirs(path)
50 util.makedir(self.path, notindexed=True)
50 util.makedir(self.path, notindexed=True)
51 requirements = ["revlogv1"]
51 requirements = ["revlogv1"]
52 if self.ui.configbool('format', 'usestore', True):
52 if self.ui.configbool('format', 'usestore', True):
53 os.mkdir(os.path.join(self.path, "store"))
53 os.mkdir(os.path.join(self.path, "store"))
54 requirements.append("store")
54 requirements.append("store")
55 if self.ui.configbool('format', 'usefncache', True):
55 if self.ui.configbool('format', 'usefncache', True):
56 requirements.append("fncache")
56 requirements.append("fncache")
57 if self.ui.configbool('format', 'dotencode', True):
57 if self.ui.configbool('format', 'dotencode', True):
58 requirements.append('dotencode')
58 requirements.append('dotencode')
59 # create an invalid changelog
59 # create an invalid changelog
60 self.opener.append(
60 self.opener.append(
61 "00changelog.i",
61 "00changelog.i",
62 '\0\0\0\2' # represents revlogv2
62 '\0\0\0\2' # represents revlogv2
63 ' dummy changelog to prevent using the old repo layout'
63 ' dummy changelog to prevent using the old repo layout'
64 )
64 )
65 if self.ui.configbool('format', 'generaldelta', False):
65 if self.ui.configbool('format', 'generaldelta', False):
66 requirements.append("generaldelta")
66 requirements.append("generaldelta")
67 requirements = set(requirements)
67 requirements = set(requirements)
68 else:
68 else:
69 raise error.RepoError(_("repository %s not found") % path)
69 raise error.RepoError(_("repository %s not found") % path)
70 elif create:
70 elif create:
71 raise error.RepoError(_("repository %s already exists") % path)
71 raise error.RepoError(_("repository %s already exists") % path)
72 else:
72 else:
73 try:
73 try:
74 requirements = scmutil.readrequires(self.opener, self.supported)
74 requirements = scmutil.readrequires(self.opener, self.supported)
75 except IOError, inst:
75 except IOError, inst:
76 if inst.errno != errno.ENOENT:
76 if inst.errno != errno.ENOENT:
77 raise
77 raise
78 requirements = set()
78 requirements = set()
79
79
80 self.sharedpath = self.path
80 self.sharedpath = self.path
81 try:
81 try:
82 s = os.path.realpath(self.opener.read("sharedpath"))
82 s = os.path.realpath(self.opener.read("sharedpath"))
83 if not os.path.exists(s):
83 if not os.path.exists(s):
84 raise error.RepoError(
84 raise error.RepoError(
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
85 _('.hg/sharedpath points to nonexistent directory %s') % s)
86 self.sharedpath = s
86 self.sharedpath = s
87 except IOError, inst:
87 except IOError, inst:
88 if inst.errno != errno.ENOENT:
88 if inst.errno != errno.ENOENT:
89 raise
89 raise
90
90
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
91 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
92 self.spath = self.store.path
92 self.spath = self.store.path
93 self.sopener = self.store.opener
93 self.sopener = self.store.opener
94 self.sjoin = self.store.join
94 self.sjoin = self.store.join
95 self.opener.createmode = self.store.createmode
95 self.opener.createmode = self.store.createmode
96 self._applyrequirements(requirements)
96 self._applyrequirements(requirements)
97 if create:
97 if create:
98 self._writerequirements()
98 self._writerequirements()
99
99
100
100
101 self._branchcache = None
101 self._branchcache = None
102 self._branchcachetip = None
102 self._branchcachetip = None
103 self.filterpats = {}
103 self.filterpats = {}
104 self._datafilters = {}
104 self._datafilters = {}
105 self._transref = self._lockref = self._wlockref = None
105 self._transref = self._lockref = self._wlockref = None
106
106
107 # A cache for various files under .hg/ that tracks file changes,
107 # A cache for various files under .hg/ that tracks file changes,
108 # (used by the filecache decorator)
108 # (used by the filecache decorator)
109 #
109 #
110 # Maps a property name to its util.filecacheentry
110 # Maps a property name to its util.filecacheentry
111 self._filecache = {}
111 self._filecache = {}
112
112
113 def _applyrequirements(self, requirements):
113 def _applyrequirements(self, requirements):
114 self.requirements = requirements
114 self.requirements = requirements
115 openerreqs = set(('revlogv1', 'generaldelta'))
115 openerreqs = set(('revlogv1', 'generaldelta'))
116 self.sopener.options = dict((r, 1) for r in requirements
116 self.sopener.options = dict((r, 1) for r in requirements
117 if r in openerreqs)
117 if r in openerreqs)
118
118
119 def _writerequirements(self):
119 def _writerequirements(self):
120 reqfile = self.opener("requires", "w")
120 reqfile = self.opener("requires", "w")
121 for r in self.requirements:
121 for r in self.requirements:
122 reqfile.write("%s\n" % r)
122 reqfile.write("%s\n" % r)
123 reqfile.close()
123 reqfile.close()
124
124
125 def _checknested(self, path):
125 def _checknested(self, path):
126 """Determine if path is a legal nested repository."""
126 """Determine if path is a legal nested repository."""
127 if not path.startswith(self.root):
127 if not path.startswith(self.root):
128 return False
128 return False
129 subpath = path[len(self.root) + 1:]
129 subpath = path[len(self.root) + 1:]
130
130
131 # XXX: Checking against the current working copy is wrong in
131 # XXX: Checking against the current working copy is wrong in
132 # the sense that it can reject things like
132 # the sense that it can reject things like
133 #
133 #
134 # $ hg cat -r 10 sub/x.txt
134 # $ hg cat -r 10 sub/x.txt
135 #
135 #
136 # if sub/ is no longer a subrepository in the working copy
136 # if sub/ is no longer a subrepository in the working copy
137 # parent revision.
137 # parent revision.
138 #
138 #
139 # However, it can of course also allow things that would have
139 # However, it can of course also allow things that would have
140 # been rejected before, such as the above cat command if sub/
140 # been rejected before, such as the above cat command if sub/
141 # is a subrepository now, but was a normal directory before.
141 # is a subrepository now, but was a normal directory before.
142 # The old path auditor would have rejected by mistake since it
142 # The old path auditor would have rejected by mistake since it
143 # panics when it sees sub/.hg/.
143 # panics when it sees sub/.hg/.
144 #
144 #
145 # All in all, checking against the working copy seems sensible
145 # All in all, checking against the working copy seems sensible
146 # since we want to prevent access to nested repositories on
146 # since we want to prevent access to nested repositories on
147 # the filesystem *now*.
147 # the filesystem *now*.
148 ctx = self[None]
148 ctx = self[None]
149 parts = util.splitpath(subpath)
149 parts = util.splitpath(subpath)
150 while parts:
150 while parts:
151 prefix = os.sep.join(parts)
151 prefix = os.sep.join(parts)
152 if prefix in ctx.substate:
152 if prefix in ctx.substate:
153 if prefix == subpath:
153 if prefix == subpath:
154 return True
154 return True
155 else:
155 else:
156 sub = ctx.sub(prefix)
156 sub = ctx.sub(prefix)
157 return sub.checknested(subpath[len(prefix) + 1:])
157 return sub.checknested(subpath[len(prefix) + 1:])
158 else:
158 else:
159 parts.pop()
159 parts.pop()
160 return False
160 return False
161
161
162 @filecache('bookmarks')
162 @filecache('bookmarks')
163 def _bookmarks(self):
163 def _bookmarks(self):
164 return bookmarks.read(self)
164 return bookmarks.read(self)
165
165
166 @filecache('bookmarks.current')
166 @filecache('bookmarks.current')
167 def _bookmarkcurrent(self):
167 def _bookmarkcurrent(self):
168 return bookmarks.readcurrent(self)
168 return bookmarks.readcurrent(self)
169
169
170 @filecache('00changelog.i', True)
170 @filecache('00changelog.i', True)
171 def changelog(self):
171 def changelog(self):
172 c = changelog.changelog(self.sopener)
172 c = changelog.changelog(self.sopener)
173 if 'HG_PENDING' in os.environ:
173 if 'HG_PENDING' in os.environ:
174 p = os.environ['HG_PENDING']
174 p = os.environ['HG_PENDING']
175 if p.startswith(self.root):
175 if p.startswith(self.root):
176 c.readpending('00changelog.i.a')
176 c.readpending('00changelog.i.a')
177 return c
177 return c
178
178
179 @filecache('00manifest.i', True)
179 @filecache('00manifest.i', True)
180 def manifest(self):
180 def manifest(self):
181 return manifest.manifest(self.sopener)
181 return manifest.manifest(self.sopener)
182
182
183 @filecache('dirstate')
183 @filecache('dirstate')
184 def dirstate(self):
184 def dirstate(self):
185 warned = [0]
185 warned = [0]
186 def validate(node):
186 def validate(node):
187 try:
187 try:
188 self.changelog.rev(node)
188 self.changelog.rev(node)
189 return node
189 return node
190 except error.LookupError:
190 except error.LookupError:
191 if not warned[0]:
191 if not warned[0]:
192 warned[0] = True
192 warned[0] = True
193 self.ui.warn(_("warning: ignoring unknown"
193 self.ui.warn(_("warning: ignoring unknown"
194 " working parent %s!\n") % short(node))
194 " working parent %s!\n") % short(node))
195 return nullid
195 return nullid
196
196
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
197 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
198
198
199 def __getitem__(self, changeid):
199 def __getitem__(self, changeid):
200 if changeid is None:
200 if changeid is None:
201 return context.workingctx(self)
201 return context.workingctx(self)
202 return context.changectx(self, changeid)
202 return context.changectx(self, changeid)
203
203
204 def __contains__(self, changeid):
204 def __contains__(self, changeid):
205 try:
205 try:
206 return bool(self.lookup(changeid))
206 return bool(self.lookup(changeid))
207 except error.RepoLookupError:
207 except error.RepoLookupError:
208 return False
208 return False
209
209
210 def __nonzero__(self):
210 def __nonzero__(self):
211 return True
211 return True
212
212
213 def __len__(self):
213 def __len__(self):
214 return len(self.changelog)
214 return len(self.changelog)
215
215
216 def __iter__(self):
216 def __iter__(self):
217 for i in xrange(len(self)):
217 for i in xrange(len(self)):
218 yield i
218 yield i
219
219
220 def set(self, expr, *args):
220 def set(self, expr, *args):
221 '''
221 '''
222 Yield a context for each matching revision, after doing arg
222 Yield a context for each matching revision, after doing arg
223 replacement via revset.formatspec
223 replacement via revset.formatspec
224 '''
224 '''
225
225
226 expr = revset.formatspec(expr, *args)
226 expr = revset.formatspec(expr, *args)
227 m = revset.match(None, expr)
227 m = revset.match(None, expr)
228 for r in m(self, range(len(self))):
228 for r in m(self, range(len(self))):
229 yield self[r]
229 yield self[r]
230
230
231 def url(self):
231 def url(self):
232 return 'file:' + self.root
232 return 'file:' + self.root
233
233
234 def hook(self, name, throw=False, **args):
234 def hook(self, name, throw=False, **args):
235 return hook.hook(self.ui, self, name, throw, **args)
235 return hook.hook(self.ui, self, name, throw, **args)
236
236
237 tag_disallowed = ':\r\n'
237 tag_disallowed = ':\r\n'
238
238
239 def _tag(self, names, node, message, local, user, date, extra={}):
239 def _tag(self, names, node, message, local, user, date, extra={}):
240 if isinstance(names, str):
240 if isinstance(names, str):
241 allchars = names
241 allchars = names
242 names = (names,)
242 names = (names,)
243 else:
243 else:
244 allchars = ''.join(names)
244 allchars = ''.join(names)
245 for c in self.tag_disallowed:
245 for c in self.tag_disallowed:
246 if c in allchars:
246 if c in allchars:
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
247 raise util.Abort(_('%r cannot be used in a tag name') % c)
248
248
249 branches = self.branchmap()
249 branches = self.branchmap()
250 for name in names:
250 for name in names:
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
251 self.hook('pretag', throw=True, node=hex(node), tag=name,
252 local=local)
252 local=local)
253 if name in branches:
253 if name in branches:
254 self.ui.warn(_("warning: tag %s conflicts with existing"
254 self.ui.warn(_("warning: tag %s conflicts with existing"
255 " branch name\n") % name)
255 " branch name\n") % name)
256
256
257 def writetags(fp, names, munge, prevtags):
257 def writetags(fp, names, munge, prevtags):
258 fp.seek(0, 2)
258 fp.seek(0, 2)
259 if prevtags and prevtags[-1] != '\n':
259 if prevtags and prevtags[-1] != '\n':
260 fp.write('\n')
260 fp.write('\n')
261 for name in names:
261 for name in names:
262 m = munge and munge(name) or name
262 m = munge and munge(name) or name
263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
263 if self._tagscache.tagtypes and name in self._tagscache.tagtypes:
264 old = self.tags().get(name, nullid)
264 old = self.tags().get(name, nullid)
265 fp.write('%s %s\n' % (hex(old), m))
265 fp.write('%s %s\n' % (hex(old), m))
266 fp.write('%s %s\n' % (hex(node), m))
266 fp.write('%s %s\n' % (hex(node), m))
267 fp.close()
267 fp.close()
268
268
269 prevtags = ''
269 prevtags = ''
270 if local:
270 if local:
271 try:
271 try:
272 fp = self.opener('localtags', 'r+')
272 fp = self.opener('localtags', 'r+')
273 except IOError:
273 except IOError:
274 fp = self.opener('localtags', 'a')
274 fp = self.opener('localtags', 'a')
275 else:
275 else:
276 prevtags = fp.read()
276 prevtags = fp.read()
277
277
278 # local tags are stored in the current charset
278 # local tags are stored in the current charset
279 writetags(fp, names, None, prevtags)
279 writetags(fp, names, None, prevtags)
280 for name in names:
280 for name in names:
281 self.hook('tag', node=hex(node), tag=name, local=local)
281 self.hook('tag', node=hex(node), tag=name, local=local)
282 return
282 return
283
283
284 try:
284 try:
285 fp = self.wfile('.hgtags', 'rb+')
285 fp = self.wfile('.hgtags', 'rb+')
286 except IOError, e:
286 except IOError, e:
287 if e.errno != errno.ENOENT:
287 if e.errno != errno.ENOENT:
288 raise
288 raise
289 fp = self.wfile('.hgtags', 'ab')
289 fp = self.wfile('.hgtags', 'ab')
290 else:
290 else:
291 prevtags = fp.read()
291 prevtags = fp.read()
292
292
293 # committed tags are stored in UTF-8
293 # committed tags are stored in UTF-8
294 writetags(fp, names, encoding.fromlocal, prevtags)
294 writetags(fp, names, encoding.fromlocal, prevtags)
295
295
296 fp.close()
296 fp.close()
297
297
298 if '.hgtags' not in self.dirstate:
298 if '.hgtags' not in self.dirstate:
299 self[None].add(['.hgtags'])
299 self[None].add(['.hgtags'])
300
300
301 m = matchmod.exact(self.root, '', ['.hgtags'])
301 m = matchmod.exact(self.root, '', ['.hgtags'])
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
302 tagnode = self.commit(message, user, date, extra=extra, match=m)
303
303
304 for name in names:
304 for name in names:
305 self.hook('tag', node=hex(node), tag=name, local=local)
305 self.hook('tag', node=hex(node), tag=name, local=local)
306
306
307 return tagnode
307 return tagnode
308
308
309 def tag(self, names, node, message, local, user, date):
309 def tag(self, names, node, message, local, user, date):
310 '''tag a revision with one or more symbolic names.
310 '''tag a revision with one or more symbolic names.
311
311
312 names is a list of strings or, when adding a single tag, names may be a
312 names is a list of strings or, when adding a single tag, names may be a
313 string.
313 string.
314
314
315 if local is True, the tags are stored in a per-repository file.
315 if local is True, the tags are stored in a per-repository file.
316 otherwise, they are stored in the .hgtags file, and a new
316 otherwise, they are stored in the .hgtags file, and a new
317 changeset is committed with the change.
317 changeset is committed with the change.
318
318
319 keyword arguments:
319 keyword arguments:
320
320
321 local: whether to store tags in non-version-controlled file
321 local: whether to store tags in non-version-controlled file
322 (default False)
322 (default False)
323
323
324 message: commit message to use if committing
324 message: commit message to use if committing
325
325
326 user: name of user to use if committing
326 user: name of user to use if committing
327
327
328 date: date tuple to use if committing'''
328 date: date tuple to use if committing'''
329
329
330 if not local:
330 if not local:
331 for x in self.status()[:5]:
331 for x in self.status()[:5]:
332 if '.hgtags' in x:
332 if '.hgtags' in x:
333 raise util.Abort(_('working copy of .hgtags is changed '
333 raise util.Abort(_('working copy of .hgtags is changed '
334 '(please commit .hgtags manually)'))
334 '(please commit .hgtags manually)'))
335
335
336 self.tags() # instantiate the cache
336 self.tags() # instantiate the cache
337 self._tag(names, node, message, local, user, date)
337 self._tag(names, node, message, local, user, date)
338
338
339 @propertycache
339 @propertycache
340 def _tagscache(self):
340 def _tagscache(self):
341 '''Returns a tagscache object that contains various tags related caches.'''
341 '''Returns a tagscache object that contains various tags related caches.'''
342
342
343 # This simplifies its cache management by having one decorated
343 # This simplifies its cache management by having one decorated
344 # function (this one) and the rest simply fetch things from it.
344 # function (this one) and the rest simply fetch things from it.
345 class tagscache(object):
345 class tagscache(object):
346 def __init__(self):
346 def __init__(self):
347 # These two define the set of tags for this repository. tags
347 # These two define the set of tags for this repository. tags
348 # maps tag name to node; tagtypes maps tag name to 'global' or
348 # maps tag name to node; tagtypes maps tag name to 'global' or
349 # 'local'. (Global tags are defined by .hgtags across all
349 # 'local'. (Global tags are defined by .hgtags across all
350 # heads, and local tags are defined in .hg/localtags.)
350 # heads, and local tags are defined in .hg/localtags.)
351 # They constitute the in-memory cache of tags.
351 # They constitute the in-memory cache of tags.
352 self.tags = self.tagtypes = None
352 self.tags = self.tagtypes = None
353
353
354 self.nodetagscache = self.tagslist = None
354 self.nodetagscache = self.tagslist = None
355
355
356 cache = tagscache()
356 cache = tagscache()
357 cache.tags, cache.tagtypes = self._findtags()
357 cache.tags, cache.tagtypes = self._findtags()
358
358
359 return cache
359 return cache
360
360
361 def tags(self):
361 def tags(self):
362 '''return a mapping of tag to node'''
362 '''return a mapping of tag to node'''
363 return self._tagscache.tags
363 return self._tagscache.tags
364
364
365 def _findtags(self):
365 def _findtags(self):
366 '''Do the hard work of finding tags. Return a pair of dicts
366 '''Do the hard work of finding tags. Return a pair of dicts
367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
367 (tags, tagtypes) where tags maps tag name to node, and tagtypes
368 maps tag name to a string like \'global\' or \'local\'.
368 maps tag name to a string like \'global\' or \'local\'.
369 Subclasses or extensions are free to add their own tags, but
369 Subclasses or extensions are free to add their own tags, but
370 should be aware that the returned dicts will be retained for the
370 should be aware that the returned dicts will be retained for the
371 duration of the localrepo object.'''
371 duration of the localrepo object.'''
372
372
373 # XXX what tagtype should subclasses/extensions use? Currently
373 # XXX what tagtype should subclasses/extensions use? Currently
374 # mq and bookmarks add tags, but do not set the tagtype at all.
374 # mq and bookmarks add tags, but do not set the tagtype at all.
375 # Should each extension invent its own tag type? Should there
375 # Should each extension invent its own tag type? Should there
376 # be one tagtype for all such "virtual" tags? Or is the status
376 # be one tagtype for all such "virtual" tags? Or is the status
377 # quo fine?
377 # quo fine?
378
378
379 alltags = {} # map tag name to (node, hist)
379 alltags = {} # map tag name to (node, hist)
380 tagtypes = {}
380 tagtypes = {}
381
381
382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
382 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
383 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
384
384
385 # Build the return dicts. Have to re-encode tag names because
385 # Build the return dicts. Have to re-encode tag names because
386 # the tags module always uses UTF-8 (in order not to lose info
386 # the tags module always uses UTF-8 (in order not to lose info
387 # writing to the cache), but the rest of Mercurial wants them in
387 # writing to the cache), but the rest of Mercurial wants them in
388 # local encoding.
388 # local encoding.
389 tags = {}
389 tags = {}
390 for (name, (node, hist)) in alltags.iteritems():
390 for (name, (node, hist)) in alltags.iteritems():
391 if node != nullid:
391 if node != nullid:
392 try:
392 try:
393 # ignore tags to unknown nodes
393 # ignore tags to unknown nodes
394 self.changelog.lookup(node)
394 self.changelog.lookup(node)
395 tags[encoding.tolocal(name)] = node
395 tags[encoding.tolocal(name)] = node
396 except error.LookupError:
396 except error.LookupError:
397 pass
397 pass
398 tags['tip'] = self.changelog.tip()
398 tags['tip'] = self.changelog.tip()
399 tagtypes = dict([(encoding.tolocal(name), value)
399 tagtypes = dict([(encoding.tolocal(name), value)
400 for (name, value) in tagtypes.iteritems()])
400 for (name, value) in tagtypes.iteritems()])
401 return (tags, tagtypes)
401 return (tags, tagtypes)
402
402
403 def tagtype(self, tagname):
403 def tagtype(self, tagname):
404 '''
404 '''
405 return the type of the given tag. result can be:
405 return the type of the given tag. result can be:
406
406
407 'local' : a local tag
407 'local' : a local tag
408 'global' : a global tag
408 'global' : a global tag
409 None : tag does not exist
409 None : tag does not exist
410 '''
410 '''
411
411
412 return self._tagscache.tagtypes.get(tagname)
412 return self._tagscache.tagtypes.get(tagname)
413
413
414 def tagslist(self):
414 def tagslist(self):
415 '''return a list of tags ordered by revision'''
415 '''return a list of tags ordered by revision'''
416 if not self._tagscache.tagslist:
416 if not self._tagscache.tagslist:
417 l = []
417 l = []
418 for t, n in self.tags().iteritems():
418 for t, n in self.tags().iteritems():
419 r = self.changelog.rev(n)
419 r = self.changelog.rev(n)
420 l.append((r, t, n))
420 l.append((r, t, n))
421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
421 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
422
422
423 return self._tagscache.tagslist
423 return self._tagscache.tagslist
424
424
425 def nodetags(self, node):
425 def nodetags(self, node):
426 '''return the tags associated with a node'''
426 '''return the tags associated with a node'''
427 if not self._tagscache.nodetagscache:
427 if not self._tagscache.nodetagscache:
428 nodetagscache = {}
428 nodetagscache = {}
429 for t, n in self.tags().iteritems():
429 for t, n in self.tags().iteritems():
430 nodetagscache.setdefault(n, []).append(t)
430 nodetagscache.setdefault(n, []).append(t)
431 for tags in nodetagscache.itervalues():
431 for tags in nodetagscache.itervalues():
432 tags.sort()
432 tags.sort()
433 self._tagscache.nodetagscache = nodetagscache
433 self._tagscache.nodetagscache = nodetagscache
434 return self._tagscache.nodetagscache.get(node, [])
434 return self._tagscache.nodetagscache.get(node, [])
435
435
436 def nodebookmarks(self, node):
436 def nodebookmarks(self, node):
437 marks = []
437 marks = []
438 for bookmark, n in self._bookmarks.iteritems():
438 for bookmark, n in self._bookmarks.iteritems():
439 if n == node:
439 if n == node:
440 marks.append(bookmark)
440 marks.append(bookmark)
441 return sorted(marks)
441 return sorted(marks)
442
442
443 def _branchtags(self, partial, lrev):
443 def _branchtags(self, partial, lrev):
444 # TODO: rename this function?
444 # TODO: rename this function?
445 tiprev = len(self) - 1
445 tiprev = len(self) - 1
446 if lrev != tiprev:
446 if lrev != tiprev:
447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
447 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
448 self._updatebranchcache(partial, ctxgen)
448 self._updatebranchcache(partial, ctxgen)
449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
449 self._writebranchcache(partial, self.changelog.tip(), tiprev)
450
450
451 return partial
451 return partial
452
452
453 def updatebranchcache(self):
453 def updatebranchcache(self):
454 tip = self.changelog.tip()
454 tip = self.changelog.tip()
455 if self._branchcache is not None and self._branchcachetip == tip:
455 if self._branchcache is not None and self._branchcachetip == tip:
456 return self._branchcache
456 return self._branchcache
457
457
458 oldtip = self._branchcachetip
458 oldtip = self._branchcachetip
459 self._branchcachetip = tip
459 self._branchcachetip = tip
460 if oldtip is None or oldtip not in self.changelog.nodemap:
460 if oldtip is None or oldtip not in self.changelog.nodemap:
461 partial, last, lrev = self._readbranchcache()
461 partial, last, lrev = self._readbranchcache()
462 else:
462 else:
463 lrev = self.changelog.rev(oldtip)
463 lrev = self.changelog.rev(oldtip)
464 partial = self._branchcache
464 partial = self._branchcache
465
465
466 self._branchtags(partial, lrev)
466 self._branchtags(partial, lrev)
467 # this private cache holds all heads (not just tips)
467 # this private cache holds all heads (not just tips)
468 self._branchcache = partial
468 self._branchcache = partial
469
469
470 def branchmap(self):
470 def branchmap(self):
471 '''returns a dictionary {branch: [branchheads]}'''
471 '''returns a dictionary {branch: [branchheads]}'''
472 self.updatebranchcache()
472 self.updatebranchcache()
473 return self._branchcache
473 return self._branchcache
474
474
475 def branchtags(self):
475 def branchtags(self):
476 '''return a dict where branch names map to the tipmost head of
476 '''return a dict where branch names map to the tipmost head of
477 the branch, open heads come before closed'''
477 the branch, open heads come before closed'''
478 bt = {}
478 bt = {}
479 for bn, heads in self.branchmap().iteritems():
479 for bn, heads in self.branchmap().iteritems():
480 tip = heads[-1]
480 tip = heads[-1]
481 for h in reversed(heads):
481 for h in reversed(heads):
482 if 'close' not in self.changelog.read(h)[5]:
482 if 'close' not in self.changelog.read(h)[5]:
483 tip = h
483 tip = h
484 break
484 break
485 bt[bn] = tip
485 bt[bn] = tip
486 return bt
486 return bt
487
487
488 def _readbranchcache(self):
488 def _readbranchcache(self):
489 partial = {}
489 partial = {}
490 try:
490 try:
491 f = self.opener("cache/branchheads")
491 f = self.opener("cache/branchheads")
492 lines = f.read().split('\n')
492 lines = f.read().split('\n')
493 f.close()
493 f.close()
494 except (IOError, OSError):
494 except (IOError, OSError):
495 return {}, nullid, nullrev
495 return {}, nullid, nullrev
496
496
497 try:
497 try:
498 last, lrev = lines.pop(0).split(" ", 1)
498 last, lrev = lines.pop(0).split(" ", 1)
499 last, lrev = bin(last), int(lrev)
499 last, lrev = bin(last), int(lrev)
500 if lrev >= len(self) or self[lrev].node() != last:
500 if lrev >= len(self) or self[lrev].node() != last:
501 # invalidate the cache
501 # invalidate the cache
502 raise ValueError('invalidating branch cache (tip differs)')
502 raise ValueError('invalidating branch cache (tip differs)')
503 for l in lines:
503 for l in lines:
504 if not l:
504 if not l:
505 continue
505 continue
506 node, label = l.split(" ", 1)
506 node, label = l.split(" ", 1)
507 label = encoding.tolocal(label.strip())
507 label = encoding.tolocal(label.strip())
508 partial.setdefault(label, []).append(bin(node))
508 partial.setdefault(label, []).append(bin(node))
509 except KeyboardInterrupt:
509 except KeyboardInterrupt:
510 raise
510 raise
511 except Exception, inst:
511 except Exception, inst:
512 if self.ui.debugflag:
512 if self.ui.debugflag:
513 self.ui.warn(str(inst), '\n')
513 self.ui.warn(str(inst), '\n')
514 partial, last, lrev = {}, nullid, nullrev
514 partial, last, lrev = {}, nullid, nullrev
515 return partial, last, lrev
515 return partial, last, lrev
516
516
517 def _writebranchcache(self, branches, tip, tiprev):
517 def _writebranchcache(self, branches, tip, tiprev):
518 try:
518 try:
519 f = self.opener("cache/branchheads", "w", atomictemp=True)
519 f = self.opener("cache/branchheads", "w", atomictemp=True)
520 f.write("%s %s\n" % (hex(tip), tiprev))
520 f.write("%s %s\n" % (hex(tip), tiprev))
521 for label, nodes in branches.iteritems():
521 for label, nodes in branches.iteritems():
522 for node in nodes:
522 for node in nodes:
523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
523 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
524 f.rename()
524 f.close()
525 except (IOError, OSError):
525 except (IOError, OSError):
526 pass
526 pass
527
527
528 def _updatebranchcache(self, partial, ctxgen):
528 def _updatebranchcache(self, partial, ctxgen):
529 # collect new branch entries
529 # collect new branch entries
530 newbranches = {}
530 newbranches = {}
531 for c in ctxgen:
531 for c in ctxgen:
532 newbranches.setdefault(c.branch(), []).append(c.node())
532 newbranches.setdefault(c.branch(), []).append(c.node())
533 # if older branchheads are reachable from new ones, they aren't
533 # if older branchheads are reachable from new ones, they aren't
534 # really branchheads. Note checking parents is insufficient:
534 # really branchheads. Note checking parents is insufficient:
535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
535 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
536 for branch, newnodes in newbranches.iteritems():
536 for branch, newnodes in newbranches.iteritems():
537 bheads = partial.setdefault(branch, [])
537 bheads = partial.setdefault(branch, [])
538 bheads.extend(newnodes)
538 bheads.extend(newnodes)
539 if len(bheads) <= 1:
539 if len(bheads) <= 1:
540 continue
540 continue
541 bheads = sorted(bheads, key=lambda x: self[x].rev())
541 bheads = sorted(bheads, key=lambda x: self[x].rev())
542 # starting from tip means fewer passes over reachable
542 # starting from tip means fewer passes over reachable
543 while newnodes:
543 while newnodes:
544 latest = newnodes.pop()
544 latest = newnodes.pop()
545 if latest not in bheads:
545 if latest not in bheads:
546 continue
546 continue
547 minbhrev = self[bheads[0]].node()
547 minbhrev = self[bheads[0]].node()
548 reachable = self.changelog.reachable(latest, minbhrev)
548 reachable = self.changelog.reachable(latest, minbhrev)
549 reachable.remove(latest)
549 reachable.remove(latest)
550 if reachable:
550 if reachable:
551 bheads = [b for b in bheads if b not in reachable]
551 bheads = [b for b in bheads if b not in reachable]
552 partial[branch] = bheads
552 partial[branch] = bheads
553
553
554 def lookup(self, key):
554 def lookup(self, key):
555 if isinstance(key, int):
555 if isinstance(key, int):
556 return self.changelog.node(key)
556 return self.changelog.node(key)
557 elif key == '.':
557 elif key == '.':
558 return self.dirstate.p1()
558 return self.dirstate.p1()
559 elif key == 'null':
559 elif key == 'null':
560 return nullid
560 return nullid
561 elif key == 'tip':
561 elif key == 'tip':
562 return self.changelog.tip()
562 return self.changelog.tip()
563 n = self.changelog._match(key)
563 n = self.changelog._match(key)
564 if n:
564 if n:
565 return n
565 return n
566 if key in self._bookmarks:
566 if key in self._bookmarks:
567 return self._bookmarks[key]
567 return self._bookmarks[key]
568 if key in self.tags():
568 if key in self.tags():
569 return self.tags()[key]
569 return self.tags()[key]
570 if key in self.branchtags():
570 if key in self.branchtags():
571 return self.branchtags()[key]
571 return self.branchtags()[key]
572 n = self.changelog._partialmatch(key)
572 n = self.changelog._partialmatch(key)
573 if n:
573 if n:
574 return n
574 return n
575
575
576 # can't find key, check if it might have come from damaged dirstate
576 # can't find key, check if it might have come from damaged dirstate
577 if key in self.dirstate.parents():
577 if key in self.dirstate.parents():
578 raise error.Abort(_("working directory has unknown parent '%s'!")
578 raise error.Abort(_("working directory has unknown parent '%s'!")
579 % short(key))
579 % short(key))
580 try:
580 try:
581 if len(key) == 20:
581 if len(key) == 20:
582 key = hex(key)
582 key = hex(key)
583 except TypeError:
583 except TypeError:
584 pass
584 pass
585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
585 raise error.RepoLookupError(_("unknown revision '%s'") % key)
586
586
587 def lookupbranch(self, key, remote=None):
587 def lookupbranch(self, key, remote=None):
588 repo = remote or self
588 repo = remote or self
589 if key in repo.branchmap():
589 if key in repo.branchmap():
590 return key
590 return key
591
591
592 repo = (remote and remote.local()) and remote or self
592 repo = (remote and remote.local()) and remote or self
593 return repo[key].branch()
593 return repo[key].branch()
594
594
595 def known(self, nodes):
595 def known(self, nodes):
596 nm = self.changelog.nodemap
596 nm = self.changelog.nodemap
597 return [(n in nm) for n in nodes]
597 return [(n in nm) for n in nodes]
598
598
599 def local(self):
599 def local(self):
600 return self
600 return self
601
601
602 def join(self, f):
602 def join(self, f):
603 return os.path.join(self.path, f)
603 return os.path.join(self.path, f)
604
604
605 def wjoin(self, f):
605 def wjoin(self, f):
606 return os.path.join(self.root, f)
606 return os.path.join(self.root, f)
607
607
608 def file(self, f):
608 def file(self, f):
609 if f[0] == '/':
609 if f[0] == '/':
610 f = f[1:]
610 f = f[1:]
611 return filelog.filelog(self.sopener, f)
611 return filelog.filelog(self.sopener, f)
612
612
613 def changectx(self, changeid):
613 def changectx(self, changeid):
614 return self[changeid]
614 return self[changeid]
615
615
616 def parents(self, changeid=None):
616 def parents(self, changeid=None):
617 '''get list of changectxs for parents of changeid'''
617 '''get list of changectxs for parents of changeid'''
618 return self[changeid].parents()
618 return self[changeid].parents()
619
619
620 def filectx(self, path, changeid=None, fileid=None):
620 def filectx(self, path, changeid=None, fileid=None):
621 """changeid can be a changeset revision, node, or tag.
621 """changeid can be a changeset revision, node, or tag.
622 fileid can be a file revision or node."""
622 fileid can be a file revision or node."""
623 return context.filectx(self, path, changeid, fileid)
623 return context.filectx(self, path, changeid, fileid)
624
624
625 def getcwd(self):
625 def getcwd(self):
626 return self.dirstate.getcwd()
626 return self.dirstate.getcwd()
627
627
628 def pathto(self, f, cwd=None):
628 def pathto(self, f, cwd=None):
629 return self.dirstate.pathto(f, cwd)
629 return self.dirstate.pathto(f, cwd)
630
630
631 def wfile(self, f, mode='r'):
631 def wfile(self, f, mode='r'):
632 return self.wopener(f, mode)
632 return self.wopener(f, mode)
633
633
634 def _link(self, f):
634 def _link(self, f):
635 return os.path.islink(self.wjoin(f))
635 return os.path.islink(self.wjoin(f))
636
636
637 def _loadfilter(self, filter):
637 def _loadfilter(self, filter):
638 if filter not in self.filterpats:
638 if filter not in self.filterpats:
639 l = []
639 l = []
640 for pat, cmd in self.ui.configitems(filter):
640 for pat, cmd in self.ui.configitems(filter):
641 if cmd == '!':
641 if cmd == '!':
642 continue
642 continue
643 mf = matchmod.match(self.root, '', [pat])
643 mf = matchmod.match(self.root, '', [pat])
644 fn = None
644 fn = None
645 params = cmd
645 params = cmd
646 for name, filterfn in self._datafilters.iteritems():
646 for name, filterfn in self._datafilters.iteritems():
647 if cmd.startswith(name):
647 if cmd.startswith(name):
648 fn = filterfn
648 fn = filterfn
649 params = cmd[len(name):].lstrip()
649 params = cmd[len(name):].lstrip()
650 break
650 break
651 if not fn:
651 if not fn:
652 fn = lambda s, c, **kwargs: util.filter(s, c)
652 fn = lambda s, c, **kwargs: util.filter(s, c)
653 # Wrap old filters not supporting keyword arguments
653 # Wrap old filters not supporting keyword arguments
654 if not inspect.getargspec(fn)[2]:
654 if not inspect.getargspec(fn)[2]:
655 oldfn = fn
655 oldfn = fn
656 fn = lambda s, c, **kwargs: oldfn(s, c)
656 fn = lambda s, c, **kwargs: oldfn(s, c)
657 l.append((mf, fn, params))
657 l.append((mf, fn, params))
658 self.filterpats[filter] = l
658 self.filterpats[filter] = l
659 return self.filterpats[filter]
659 return self.filterpats[filter]
660
660
661 def _filter(self, filterpats, filename, data):
661 def _filter(self, filterpats, filename, data):
662 for mf, fn, cmd in filterpats:
662 for mf, fn, cmd in filterpats:
663 if mf(filename):
663 if mf(filename):
664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
664 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
665 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
666 break
666 break
667
667
668 return data
668 return data
669
669
670 @propertycache
670 @propertycache
671 def _encodefilterpats(self):
671 def _encodefilterpats(self):
672 return self._loadfilter('encode')
672 return self._loadfilter('encode')
673
673
674 @propertycache
674 @propertycache
675 def _decodefilterpats(self):
675 def _decodefilterpats(self):
676 return self._loadfilter('decode')
676 return self._loadfilter('decode')
677
677
678 def adddatafilter(self, name, filter):
678 def adddatafilter(self, name, filter):
679 self._datafilters[name] = filter
679 self._datafilters[name] = filter
680
680
681 def wread(self, filename):
681 def wread(self, filename):
682 if self._link(filename):
682 if self._link(filename):
683 data = os.readlink(self.wjoin(filename))
683 data = os.readlink(self.wjoin(filename))
684 else:
684 else:
685 data = self.wopener.read(filename)
685 data = self.wopener.read(filename)
686 return self._filter(self._encodefilterpats, filename, data)
686 return self._filter(self._encodefilterpats, filename, data)
687
687
688 def wwrite(self, filename, data, flags):
688 def wwrite(self, filename, data, flags):
689 data = self._filter(self._decodefilterpats, filename, data)
689 data = self._filter(self._decodefilterpats, filename, data)
690 if 'l' in flags:
690 if 'l' in flags:
691 self.wopener.symlink(data, filename)
691 self.wopener.symlink(data, filename)
692 else:
692 else:
693 self.wopener.write(filename, data)
693 self.wopener.write(filename, data)
694 if 'x' in flags:
694 if 'x' in flags:
695 util.setflags(self.wjoin(filename), False, True)
695 util.setflags(self.wjoin(filename), False, True)
696
696
697 def wwritedata(self, filename, data):
697 def wwritedata(self, filename, data):
698 return self._filter(self._decodefilterpats, filename, data)
698 return self._filter(self._decodefilterpats, filename, data)
699
699
700 def transaction(self, desc):
700 def transaction(self, desc):
701 tr = self._transref and self._transref() or None
701 tr = self._transref and self._transref() or None
702 if tr and tr.running():
702 if tr and tr.running():
703 return tr.nest()
703 return tr.nest()
704
704
705 # abort here if the journal already exists
705 # abort here if the journal already exists
706 if os.path.exists(self.sjoin("journal")):
706 if os.path.exists(self.sjoin("journal")):
707 raise error.RepoError(
707 raise error.RepoError(
708 _("abandoned transaction found - run hg recover"))
708 _("abandoned transaction found - run hg recover"))
709
709
710 journalfiles = self._writejournal(desc)
710 journalfiles = self._writejournal(desc)
711 renames = [(x, undoname(x)) for x in journalfiles]
711 renames = [(x, undoname(x)) for x in journalfiles]
712
712
713 tr = transaction.transaction(self.ui.warn, self.sopener,
713 tr = transaction.transaction(self.ui.warn, self.sopener,
714 self.sjoin("journal"),
714 self.sjoin("journal"),
715 aftertrans(renames),
715 aftertrans(renames),
716 self.store.createmode)
716 self.store.createmode)
717 self._transref = weakref.ref(tr)
717 self._transref = weakref.ref(tr)
718 return tr
718 return tr
719
719
720 def _writejournal(self, desc):
720 def _writejournal(self, desc):
721 # save dirstate for rollback
721 # save dirstate for rollback
722 try:
722 try:
723 ds = self.opener.read("dirstate")
723 ds = self.opener.read("dirstate")
724 except IOError:
724 except IOError:
725 ds = ""
725 ds = ""
726 self.opener.write("journal.dirstate", ds)
726 self.opener.write("journal.dirstate", ds)
727 self.opener.write("journal.branch",
727 self.opener.write("journal.branch",
728 encoding.fromlocal(self.dirstate.branch()))
728 encoding.fromlocal(self.dirstate.branch()))
729 self.opener.write("journal.desc",
729 self.opener.write("journal.desc",
730 "%d\n%s\n" % (len(self), desc))
730 "%d\n%s\n" % (len(self), desc))
731
731
732 bkname = self.join('bookmarks')
732 bkname = self.join('bookmarks')
733 if os.path.exists(bkname):
733 if os.path.exists(bkname):
734 util.copyfile(bkname, self.join('journal.bookmarks'))
734 util.copyfile(bkname, self.join('journal.bookmarks'))
735 else:
735 else:
736 self.opener.write('journal.bookmarks', '')
736 self.opener.write('journal.bookmarks', '')
737
737
738 return (self.sjoin('journal'), self.join('journal.dirstate'),
738 return (self.sjoin('journal'), self.join('journal.dirstate'),
739 self.join('journal.branch'), self.join('journal.desc'),
739 self.join('journal.branch'), self.join('journal.desc'),
740 self.join('journal.bookmarks'))
740 self.join('journal.bookmarks'))
741
741
742 def recover(self):
742 def recover(self):
743 lock = self.lock()
743 lock = self.lock()
744 try:
744 try:
745 if os.path.exists(self.sjoin("journal")):
745 if os.path.exists(self.sjoin("journal")):
746 self.ui.status(_("rolling back interrupted transaction\n"))
746 self.ui.status(_("rolling back interrupted transaction\n"))
747 transaction.rollback(self.sopener, self.sjoin("journal"),
747 transaction.rollback(self.sopener, self.sjoin("journal"),
748 self.ui.warn)
748 self.ui.warn)
749 self.invalidate()
749 self.invalidate()
750 return True
750 return True
751 else:
751 else:
752 self.ui.warn(_("no interrupted transaction available\n"))
752 self.ui.warn(_("no interrupted transaction available\n"))
753 return False
753 return False
754 finally:
754 finally:
755 lock.release()
755 lock.release()
756
756
757 def rollback(self, dryrun=False):
757 def rollback(self, dryrun=False):
758 wlock = lock = None
758 wlock = lock = None
759 try:
759 try:
760 wlock = self.wlock()
760 wlock = self.wlock()
761 lock = self.lock()
761 lock = self.lock()
762 if os.path.exists(self.sjoin("undo")):
762 if os.path.exists(self.sjoin("undo")):
763 try:
763 try:
764 args = self.opener.read("undo.desc").splitlines()
764 args = self.opener.read("undo.desc").splitlines()
765 if len(args) >= 3 and self.ui.verbose:
765 if len(args) >= 3 and self.ui.verbose:
766 desc = _("repository tip rolled back to revision %s"
766 desc = _("repository tip rolled back to revision %s"
767 " (undo %s: %s)\n") % (
767 " (undo %s: %s)\n") % (
768 int(args[0]) - 1, args[1], args[2])
768 int(args[0]) - 1, args[1], args[2])
769 elif len(args) >= 2:
769 elif len(args) >= 2:
770 desc = _("repository tip rolled back to revision %s"
770 desc = _("repository tip rolled back to revision %s"
771 " (undo %s)\n") % (
771 " (undo %s)\n") % (
772 int(args[0]) - 1, args[1])
772 int(args[0]) - 1, args[1])
773 except IOError:
773 except IOError:
774 desc = _("rolling back unknown transaction\n")
774 desc = _("rolling back unknown transaction\n")
775 self.ui.status(desc)
775 self.ui.status(desc)
776 if dryrun:
776 if dryrun:
777 return
777 return
778 transaction.rollback(self.sopener, self.sjoin("undo"),
778 transaction.rollback(self.sopener, self.sjoin("undo"),
779 self.ui.warn)
779 self.ui.warn)
780 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
780 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
781 if os.path.exists(self.join('undo.bookmarks')):
781 if os.path.exists(self.join('undo.bookmarks')):
782 util.rename(self.join('undo.bookmarks'),
782 util.rename(self.join('undo.bookmarks'),
783 self.join('bookmarks'))
783 self.join('bookmarks'))
784 try:
784 try:
785 branch = self.opener.read("undo.branch")
785 branch = self.opener.read("undo.branch")
786 self.dirstate.setbranch(branch)
786 self.dirstate.setbranch(branch)
787 except IOError:
787 except IOError:
788 self.ui.warn(_("named branch could not be reset, "
788 self.ui.warn(_("named branch could not be reset, "
789 "current branch is still: %s\n")
789 "current branch is still: %s\n")
790 % self.dirstate.branch())
790 % self.dirstate.branch())
791 self.invalidate()
791 self.invalidate()
792 self.dirstate.invalidate()
792 self.dirstate.invalidate()
793 self.destroyed()
793 self.destroyed()
794 parents = tuple([p.rev() for p in self.parents()])
794 parents = tuple([p.rev() for p in self.parents()])
795 if len(parents) > 1:
795 if len(parents) > 1:
796 self.ui.status(_("working directory now based on "
796 self.ui.status(_("working directory now based on "
797 "revisions %d and %d\n") % parents)
797 "revisions %d and %d\n") % parents)
798 else:
798 else:
799 self.ui.status(_("working directory now based on "
799 self.ui.status(_("working directory now based on "
800 "revision %d\n") % parents)
800 "revision %d\n") % parents)
801 else:
801 else:
802 self.ui.warn(_("no rollback information available\n"))
802 self.ui.warn(_("no rollback information available\n"))
803 return 1
803 return 1
804 finally:
804 finally:
805 release(lock, wlock)
805 release(lock, wlock)
806
806
807 def invalidatecaches(self):
807 def invalidatecaches(self):
808 try:
808 try:
809 delattr(self, '_tagscache')
809 delattr(self, '_tagscache')
810 except AttributeError:
810 except AttributeError:
811 pass
811 pass
812
812
813 self._branchcache = None # in UTF-8
813 self._branchcache = None # in UTF-8
814 self._branchcachetip = None
814 self._branchcachetip = None
815
815
816 def invalidatedirstate(self):
816 def invalidatedirstate(self):
817 '''Invalidates the dirstate, causing the next call to dirstate
817 '''Invalidates the dirstate, causing the next call to dirstate
818 to check if it was modified since the last time it was read,
818 to check if it was modified since the last time it was read,
819 rereading it if it has.
819 rereading it if it has.
820
820
821 This is different to dirstate.invalidate() that it doesn't always
821 This is different to dirstate.invalidate() that it doesn't always
822 rereads the dirstate. Use dirstate.invalidate() if you want to
822 rereads the dirstate. Use dirstate.invalidate() if you want to
823 explicitly read the dirstate again (i.e. restoring it to a previous
823 explicitly read the dirstate again (i.e. restoring it to a previous
824 known good state).'''
824 known good state).'''
825 try:
825 try:
826 delattr(self, 'dirstate')
826 delattr(self, 'dirstate')
827 except AttributeError:
827 except AttributeError:
828 pass
828 pass
829
829
830 def invalidate(self):
830 def invalidate(self):
831 for k in self._filecache:
831 for k in self._filecache:
832 # dirstate is invalidated separately in invalidatedirstate()
832 # dirstate is invalidated separately in invalidatedirstate()
833 if k == 'dirstate':
833 if k == 'dirstate':
834 continue
834 continue
835
835
836 try:
836 try:
837 delattr(self, k)
837 delattr(self, k)
838 except AttributeError:
838 except AttributeError:
839 pass
839 pass
840 self.invalidatecaches()
840 self.invalidatecaches()
841
841
842 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
842 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
843 try:
843 try:
844 l = lock.lock(lockname, 0, releasefn, desc=desc)
844 l = lock.lock(lockname, 0, releasefn, desc=desc)
845 except error.LockHeld, inst:
845 except error.LockHeld, inst:
846 if not wait:
846 if not wait:
847 raise
847 raise
848 self.ui.warn(_("waiting for lock on %s held by %r\n") %
848 self.ui.warn(_("waiting for lock on %s held by %r\n") %
849 (desc, inst.locker))
849 (desc, inst.locker))
850 # default to 600 seconds timeout
850 # default to 600 seconds timeout
851 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
851 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
852 releasefn, desc=desc)
852 releasefn, desc=desc)
853 if acquirefn:
853 if acquirefn:
854 acquirefn()
854 acquirefn()
855 return l
855 return l
856
856
857 def lock(self, wait=True):
857 def lock(self, wait=True):
858 '''Lock the repository store (.hg/store) and return a weak reference
858 '''Lock the repository store (.hg/store) and return a weak reference
859 to the lock. Use this before modifying the store (e.g. committing or
859 to the lock. Use this before modifying the store (e.g. committing or
860 stripping). If you are opening a transaction, get a lock as well.)'''
860 stripping). If you are opening a transaction, get a lock as well.)'''
861 l = self._lockref and self._lockref()
861 l = self._lockref and self._lockref()
862 if l is not None and l.held:
862 if l is not None and l.held:
863 l.lock()
863 l.lock()
864 return l
864 return l
865
865
866 def unlock():
866 def unlock():
867 self.store.write()
867 self.store.write()
868 for k, ce in self._filecache.items():
868 for k, ce in self._filecache.items():
869 if k == 'dirstate':
869 if k == 'dirstate':
870 continue
870 continue
871 ce.refresh()
871 ce.refresh()
872
872
873 l = self._lock(self.sjoin("lock"), wait, unlock,
873 l = self._lock(self.sjoin("lock"), wait, unlock,
874 self.invalidate, _('repository %s') % self.origroot)
874 self.invalidate, _('repository %s') % self.origroot)
875 self._lockref = weakref.ref(l)
875 self._lockref = weakref.ref(l)
876 return l
876 return l
877
877
878 def wlock(self, wait=True):
878 def wlock(self, wait=True):
879 '''Lock the non-store parts of the repository (everything under
879 '''Lock the non-store parts of the repository (everything under
880 .hg except .hg/store) and return a weak reference to the lock.
880 .hg except .hg/store) and return a weak reference to the lock.
881 Use this before modifying files in .hg.'''
881 Use this before modifying files in .hg.'''
882 l = self._wlockref and self._wlockref()
882 l = self._wlockref and self._wlockref()
883 if l is not None and l.held:
883 if l is not None and l.held:
884 l.lock()
884 l.lock()
885 return l
885 return l
886
886
887 def unlock():
887 def unlock():
888 self.dirstate.write()
888 self.dirstate.write()
889 ce = self._filecache.get('dirstate')
889 ce = self._filecache.get('dirstate')
890 if ce:
890 if ce:
891 ce.refresh()
891 ce.refresh()
892
892
893 l = self._lock(self.join("wlock"), wait, unlock,
893 l = self._lock(self.join("wlock"), wait, unlock,
894 self.invalidatedirstate, _('working directory of %s') %
894 self.invalidatedirstate, _('working directory of %s') %
895 self.origroot)
895 self.origroot)
896 self._wlockref = weakref.ref(l)
896 self._wlockref = weakref.ref(l)
897 return l
897 return l
898
898
899 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
899 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
900 """
900 """
901 commit an individual file as part of a larger transaction
901 commit an individual file as part of a larger transaction
902 """
902 """
903
903
904 fname = fctx.path()
904 fname = fctx.path()
905 text = fctx.data()
905 text = fctx.data()
906 flog = self.file(fname)
906 flog = self.file(fname)
907 fparent1 = manifest1.get(fname, nullid)
907 fparent1 = manifest1.get(fname, nullid)
908 fparent2 = fparent2o = manifest2.get(fname, nullid)
908 fparent2 = fparent2o = manifest2.get(fname, nullid)
909
909
910 meta = {}
910 meta = {}
911 copy = fctx.renamed()
911 copy = fctx.renamed()
912 if copy and copy[0] != fname:
912 if copy and copy[0] != fname:
913 # Mark the new revision of this file as a copy of another
913 # Mark the new revision of this file as a copy of another
914 # file. This copy data will effectively act as a parent
914 # file. This copy data will effectively act as a parent
915 # of this new revision. If this is a merge, the first
915 # of this new revision. If this is a merge, the first
916 # parent will be the nullid (meaning "look up the copy data")
916 # parent will be the nullid (meaning "look up the copy data")
917 # and the second one will be the other parent. For example:
917 # and the second one will be the other parent. For example:
918 #
918 #
919 # 0 --- 1 --- 3 rev1 changes file foo
919 # 0 --- 1 --- 3 rev1 changes file foo
920 # \ / rev2 renames foo to bar and changes it
920 # \ / rev2 renames foo to bar and changes it
921 # \- 2 -/ rev3 should have bar with all changes and
921 # \- 2 -/ rev3 should have bar with all changes and
922 # should record that bar descends from
922 # should record that bar descends from
923 # bar in rev2 and foo in rev1
923 # bar in rev2 and foo in rev1
924 #
924 #
925 # this allows this merge to succeed:
925 # this allows this merge to succeed:
926 #
926 #
927 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
927 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
928 # \ / merging rev3 and rev4 should use bar@rev2
928 # \ / merging rev3 and rev4 should use bar@rev2
929 # \- 2 --- 4 as the merge base
929 # \- 2 --- 4 as the merge base
930 #
930 #
931
931
932 cfname = copy[0]
932 cfname = copy[0]
933 crev = manifest1.get(cfname)
933 crev = manifest1.get(cfname)
934 newfparent = fparent2
934 newfparent = fparent2
935
935
936 if manifest2: # branch merge
936 if manifest2: # branch merge
937 if fparent2 == nullid or crev is None: # copied on remote side
937 if fparent2 == nullid or crev is None: # copied on remote side
938 if cfname in manifest2:
938 if cfname in manifest2:
939 crev = manifest2[cfname]
939 crev = manifest2[cfname]
940 newfparent = fparent1
940 newfparent = fparent1
941
941
942 # find source in nearest ancestor if we've lost track
942 # find source in nearest ancestor if we've lost track
943 if not crev:
943 if not crev:
944 self.ui.debug(" %s: searching for copy revision for %s\n" %
944 self.ui.debug(" %s: searching for copy revision for %s\n" %
945 (fname, cfname))
945 (fname, cfname))
946 for ancestor in self[None].ancestors():
946 for ancestor in self[None].ancestors():
947 if cfname in ancestor:
947 if cfname in ancestor:
948 crev = ancestor[cfname].filenode()
948 crev = ancestor[cfname].filenode()
949 break
949 break
950
950
951 if crev:
951 if crev:
952 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
952 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
953 meta["copy"] = cfname
953 meta["copy"] = cfname
954 meta["copyrev"] = hex(crev)
954 meta["copyrev"] = hex(crev)
955 fparent1, fparent2 = nullid, newfparent
955 fparent1, fparent2 = nullid, newfparent
956 else:
956 else:
957 self.ui.warn(_("warning: can't find ancestor for '%s' "
957 self.ui.warn(_("warning: can't find ancestor for '%s' "
958 "copied from '%s'!\n") % (fname, cfname))
958 "copied from '%s'!\n") % (fname, cfname))
959
959
960 elif fparent2 != nullid:
960 elif fparent2 != nullid:
961 # is one parent an ancestor of the other?
961 # is one parent an ancestor of the other?
962 fparentancestor = flog.ancestor(fparent1, fparent2)
962 fparentancestor = flog.ancestor(fparent1, fparent2)
963 if fparentancestor == fparent1:
963 if fparentancestor == fparent1:
964 fparent1, fparent2 = fparent2, nullid
964 fparent1, fparent2 = fparent2, nullid
965 elif fparentancestor == fparent2:
965 elif fparentancestor == fparent2:
966 fparent2 = nullid
966 fparent2 = nullid
967
967
968 # is the file changed?
968 # is the file changed?
969 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
969 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
970 changelist.append(fname)
970 changelist.append(fname)
971 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
971 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
972
972
973 # are just the flags changed during merge?
973 # are just the flags changed during merge?
974 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
974 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
975 changelist.append(fname)
975 changelist.append(fname)
976
976
977 return fparent1
977 return fparent1
978
978
979 def commit(self, text="", user=None, date=None, match=None, force=False,
979 def commit(self, text="", user=None, date=None, match=None, force=False,
980 editor=False, extra={}):
980 editor=False, extra={}):
981 """Add a new revision to current repository.
981 """Add a new revision to current repository.
982
982
983 Revision information is gathered from the working directory,
983 Revision information is gathered from the working directory,
984 match can be used to filter the committed files. If editor is
984 match can be used to filter the committed files. If editor is
985 supplied, it is called to get a commit message.
985 supplied, it is called to get a commit message.
986 """
986 """
987
987
988 def fail(f, msg):
988 def fail(f, msg):
989 raise util.Abort('%s: %s' % (f, msg))
989 raise util.Abort('%s: %s' % (f, msg))
990
990
991 if not match:
991 if not match:
992 match = matchmod.always(self.root, '')
992 match = matchmod.always(self.root, '')
993
993
994 if not force:
994 if not force:
995 vdirs = []
995 vdirs = []
996 match.dir = vdirs.append
996 match.dir = vdirs.append
997 match.bad = fail
997 match.bad = fail
998
998
999 wlock = self.wlock()
999 wlock = self.wlock()
1000 try:
1000 try:
1001 wctx = self[None]
1001 wctx = self[None]
1002 merge = len(wctx.parents()) > 1
1002 merge = len(wctx.parents()) > 1
1003
1003
1004 if (not force and merge and match and
1004 if (not force and merge and match and
1005 (match.files() or match.anypats())):
1005 (match.files() or match.anypats())):
1006 raise util.Abort(_('cannot partially commit a merge '
1006 raise util.Abort(_('cannot partially commit a merge '
1007 '(do not specify files or patterns)'))
1007 '(do not specify files or patterns)'))
1008
1008
1009 changes = self.status(match=match, clean=force)
1009 changes = self.status(match=match, clean=force)
1010 if force:
1010 if force:
1011 changes[0].extend(changes[6]) # mq may commit unchanged files
1011 changes[0].extend(changes[6]) # mq may commit unchanged files
1012
1012
1013 # check subrepos
1013 # check subrepos
1014 subs = []
1014 subs = []
1015 removedsubs = set()
1015 removedsubs = set()
1016 if '.hgsub' in wctx:
1016 if '.hgsub' in wctx:
1017 # only manage subrepos and .hgsubstate if .hgsub is present
1017 # only manage subrepos and .hgsubstate if .hgsub is present
1018 for p in wctx.parents():
1018 for p in wctx.parents():
1019 removedsubs.update(s for s in p.substate if match(s))
1019 removedsubs.update(s for s in p.substate if match(s))
1020 for s in wctx.substate:
1020 for s in wctx.substate:
1021 removedsubs.discard(s)
1021 removedsubs.discard(s)
1022 if match(s) and wctx.sub(s).dirty():
1022 if match(s) and wctx.sub(s).dirty():
1023 subs.append(s)
1023 subs.append(s)
1024 if (subs or removedsubs):
1024 if (subs or removedsubs):
1025 if (not match('.hgsub') and
1025 if (not match('.hgsub') and
1026 '.hgsub' in (wctx.modified() + wctx.added())):
1026 '.hgsub' in (wctx.modified() + wctx.added())):
1027 raise util.Abort(
1027 raise util.Abort(
1028 _("can't commit subrepos without .hgsub"))
1028 _("can't commit subrepos without .hgsub"))
1029 if '.hgsubstate' not in changes[0]:
1029 if '.hgsubstate' not in changes[0]:
1030 changes[0].insert(0, '.hgsubstate')
1030 changes[0].insert(0, '.hgsubstate')
1031 if '.hgsubstate' in changes[2]:
1031 if '.hgsubstate' in changes[2]:
1032 changes[2].remove('.hgsubstate')
1032 changes[2].remove('.hgsubstate')
1033 elif '.hgsub' in changes[2]:
1033 elif '.hgsub' in changes[2]:
1034 # clean up .hgsubstate when .hgsub is removed
1034 # clean up .hgsubstate when .hgsub is removed
1035 if ('.hgsubstate' in wctx and
1035 if ('.hgsubstate' in wctx and
1036 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1036 '.hgsubstate' not in changes[0] + changes[1] + changes[2]):
1037 changes[2].insert(0, '.hgsubstate')
1037 changes[2].insert(0, '.hgsubstate')
1038
1038
1039 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1039 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
1040 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1040 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
1041 if changedsubs:
1041 if changedsubs:
1042 raise util.Abort(_("uncommitted changes in subrepo %s")
1042 raise util.Abort(_("uncommitted changes in subrepo %s")
1043 % changedsubs[0])
1043 % changedsubs[0])
1044
1044
1045 # make sure all explicit patterns are matched
1045 # make sure all explicit patterns are matched
1046 if not force and match.files():
1046 if not force and match.files():
1047 matched = set(changes[0] + changes[1] + changes[2])
1047 matched = set(changes[0] + changes[1] + changes[2])
1048
1048
1049 for f in match.files():
1049 for f in match.files():
1050 if f == '.' or f in matched or f in wctx.substate:
1050 if f == '.' or f in matched or f in wctx.substate:
1051 continue
1051 continue
1052 if f in changes[3]: # missing
1052 if f in changes[3]: # missing
1053 fail(f, _('file not found!'))
1053 fail(f, _('file not found!'))
1054 if f in vdirs: # visited directory
1054 if f in vdirs: # visited directory
1055 d = f + '/'
1055 d = f + '/'
1056 for mf in matched:
1056 for mf in matched:
1057 if mf.startswith(d):
1057 if mf.startswith(d):
1058 break
1058 break
1059 else:
1059 else:
1060 fail(f, _("no match under directory!"))
1060 fail(f, _("no match under directory!"))
1061 elif f not in self.dirstate:
1061 elif f not in self.dirstate:
1062 fail(f, _("file not tracked!"))
1062 fail(f, _("file not tracked!"))
1063
1063
1064 if (not force and not extra.get("close") and not merge
1064 if (not force and not extra.get("close") and not merge
1065 and not (changes[0] or changes[1] or changes[2])
1065 and not (changes[0] or changes[1] or changes[2])
1066 and wctx.branch() == wctx.p1().branch()):
1066 and wctx.branch() == wctx.p1().branch()):
1067 return None
1067 return None
1068
1068
1069 ms = mergemod.mergestate(self)
1069 ms = mergemod.mergestate(self)
1070 for f in changes[0]:
1070 for f in changes[0]:
1071 if f in ms and ms[f] == 'u':
1071 if f in ms and ms[f] == 'u':
1072 raise util.Abort(_("unresolved merge conflicts "
1072 raise util.Abort(_("unresolved merge conflicts "
1073 "(see hg help resolve)"))
1073 "(see hg help resolve)"))
1074
1074
1075 cctx = context.workingctx(self, text, user, date, extra, changes)
1075 cctx = context.workingctx(self, text, user, date, extra, changes)
1076 if editor:
1076 if editor:
1077 cctx._text = editor(self, cctx, subs)
1077 cctx._text = editor(self, cctx, subs)
1078 edited = (text != cctx._text)
1078 edited = (text != cctx._text)
1079
1079
1080 # commit subs
1080 # commit subs
1081 if subs or removedsubs:
1081 if subs or removedsubs:
1082 state = wctx.substate.copy()
1082 state = wctx.substate.copy()
1083 for s in sorted(subs):
1083 for s in sorted(subs):
1084 sub = wctx.sub(s)
1084 sub = wctx.sub(s)
1085 self.ui.status(_('committing subrepository %s\n') %
1085 self.ui.status(_('committing subrepository %s\n') %
1086 subrepo.subrelpath(sub))
1086 subrepo.subrelpath(sub))
1087 sr = sub.commit(cctx._text, user, date)
1087 sr = sub.commit(cctx._text, user, date)
1088 state[s] = (state[s][0], sr)
1088 state[s] = (state[s][0], sr)
1089 subrepo.writestate(self, state)
1089 subrepo.writestate(self, state)
1090
1090
1091 # Save commit message in case this transaction gets rolled back
1091 # Save commit message in case this transaction gets rolled back
1092 # (e.g. by a pretxncommit hook). Leave the content alone on
1092 # (e.g. by a pretxncommit hook). Leave the content alone on
1093 # the assumption that the user will use the same editor again.
1093 # the assumption that the user will use the same editor again.
1094 msgfn = self.savecommitmessage(cctx._text)
1094 msgfn = self.savecommitmessage(cctx._text)
1095
1095
1096 p1, p2 = self.dirstate.parents()
1096 p1, p2 = self.dirstate.parents()
1097 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1097 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1098 try:
1098 try:
1099 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1099 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1100 ret = self.commitctx(cctx, True)
1100 ret = self.commitctx(cctx, True)
1101 except:
1101 except:
1102 if edited:
1102 if edited:
1103 self.ui.write(
1103 self.ui.write(
1104 _('note: commit message saved in %s\n') % msgfn)
1104 _('note: commit message saved in %s\n') % msgfn)
1105 raise
1105 raise
1106
1106
1107 # update bookmarks, dirstate and mergestate
1107 # update bookmarks, dirstate and mergestate
1108 bookmarks.update(self, p1, ret)
1108 bookmarks.update(self, p1, ret)
1109 for f in changes[0] + changes[1]:
1109 for f in changes[0] + changes[1]:
1110 self.dirstate.normal(f)
1110 self.dirstate.normal(f)
1111 for f in changes[2]:
1111 for f in changes[2]:
1112 self.dirstate.drop(f)
1112 self.dirstate.drop(f)
1113 self.dirstate.setparents(ret)
1113 self.dirstate.setparents(ret)
1114 ms.reset()
1114 ms.reset()
1115 finally:
1115 finally:
1116 wlock.release()
1116 wlock.release()
1117
1117
1118 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1118 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1119 return ret
1119 return ret
1120
1120
1121 def commitctx(self, ctx, error=False):
1121 def commitctx(self, ctx, error=False):
1122 """Add a new revision to current repository.
1122 """Add a new revision to current repository.
1123 Revision information is passed via the context argument.
1123 Revision information is passed via the context argument.
1124 """
1124 """
1125
1125
1126 tr = lock = None
1126 tr = lock = None
1127 removed = list(ctx.removed())
1127 removed = list(ctx.removed())
1128 p1, p2 = ctx.p1(), ctx.p2()
1128 p1, p2 = ctx.p1(), ctx.p2()
1129 user = ctx.user()
1129 user = ctx.user()
1130
1130
1131 lock = self.lock()
1131 lock = self.lock()
1132 try:
1132 try:
1133 tr = self.transaction("commit")
1133 tr = self.transaction("commit")
1134 trp = weakref.proxy(tr)
1134 trp = weakref.proxy(tr)
1135
1135
1136 if ctx.files():
1136 if ctx.files():
1137 m1 = p1.manifest().copy()
1137 m1 = p1.manifest().copy()
1138 m2 = p2.manifest()
1138 m2 = p2.manifest()
1139
1139
1140 # check in files
1140 # check in files
1141 new = {}
1141 new = {}
1142 changed = []
1142 changed = []
1143 linkrev = len(self)
1143 linkrev = len(self)
1144 for f in sorted(ctx.modified() + ctx.added()):
1144 for f in sorted(ctx.modified() + ctx.added()):
1145 self.ui.note(f + "\n")
1145 self.ui.note(f + "\n")
1146 try:
1146 try:
1147 fctx = ctx[f]
1147 fctx = ctx[f]
1148 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1148 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1149 changed)
1149 changed)
1150 m1.set(f, fctx.flags())
1150 m1.set(f, fctx.flags())
1151 except OSError, inst:
1151 except OSError, inst:
1152 self.ui.warn(_("trouble committing %s!\n") % f)
1152 self.ui.warn(_("trouble committing %s!\n") % f)
1153 raise
1153 raise
1154 except IOError, inst:
1154 except IOError, inst:
1155 errcode = getattr(inst, 'errno', errno.ENOENT)
1155 errcode = getattr(inst, 'errno', errno.ENOENT)
1156 if error or errcode and errcode != errno.ENOENT:
1156 if error or errcode and errcode != errno.ENOENT:
1157 self.ui.warn(_("trouble committing %s!\n") % f)
1157 self.ui.warn(_("trouble committing %s!\n") % f)
1158 raise
1158 raise
1159 else:
1159 else:
1160 removed.append(f)
1160 removed.append(f)
1161
1161
1162 # update manifest
1162 # update manifest
1163 m1.update(new)
1163 m1.update(new)
1164 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1164 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1165 drop = [f for f in removed if f in m1]
1165 drop = [f for f in removed if f in m1]
1166 for f in drop:
1166 for f in drop:
1167 del m1[f]
1167 del m1[f]
1168 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1168 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1169 p2.manifestnode(), (new, drop))
1169 p2.manifestnode(), (new, drop))
1170 files = changed + removed
1170 files = changed + removed
1171 else:
1171 else:
1172 mn = p1.manifestnode()
1172 mn = p1.manifestnode()
1173 files = []
1173 files = []
1174
1174
1175 # update changelog
1175 # update changelog
1176 self.changelog.delayupdate()
1176 self.changelog.delayupdate()
1177 n = self.changelog.add(mn, files, ctx.description(),
1177 n = self.changelog.add(mn, files, ctx.description(),
1178 trp, p1.node(), p2.node(),
1178 trp, p1.node(), p2.node(),
1179 user, ctx.date(), ctx.extra().copy())
1179 user, ctx.date(), ctx.extra().copy())
1180 p = lambda: self.changelog.writepending() and self.root or ""
1180 p = lambda: self.changelog.writepending() and self.root or ""
1181 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1181 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1182 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1182 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1183 parent2=xp2, pending=p)
1183 parent2=xp2, pending=p)
1184 self.changelog.finalize(trp)
1184 self.changelog.finalize(trp)
1185 tr.close()
1185 tr.close()
1186
1186
1187 if self._branchcache:
1187 if self._branchcache:
1188 self.updatebranchcache()
1188 self.updatebranchcache()
1189 return n
1189 return n
1190 finally:
1190 finally:
1191 if tr:
1191 if tr:
1192 tr.release()
1192 tr.release()
1193 lock.release()
1193 lock.release()
1194
1194
1195 def destroyed(self):
1195 def destroyed(self):
1196 '''Inform the repository that nodes have been destroyed.
1196 '''Inform the repository that nodes have been destroyed.
1197 Intended for use by strip and rollback, so there's a common
1197 Intended for use by strip and rollback, so there's a common
1198 place for anything that has to be done after destroying history.'''
1198 place for anything that has to be done after destroying history.'''
1199 # XXX it might be nice if we could take the list of destroyed
1199 # XXX it might be nice if we could take the list of destroyed
1200 # nodes, but I don't see an easy way for rollback() to do that
1200 # nodes, but I don't see an easy way for rollback() to do that
1201
1201
1202 # Ensure the persistent tag cache is updated. Doing it now
1202 # Ensure the persistent tag cache is updated. Doing it now
1203 # means that the tag cache only has to worry about destroyed
1203 # means that the tag cache only has to worry about destroyed
1204 # heads immediately after a strip/rollback. That in turn
1204 # heads immediately after a strip/rollback. That in turn
1205 # guarantees that "cachetip == currenttip" (comparing both rev
1205 # guarantees that "cachetip == currenttip" (comparing both rev
1206 # and node) always means no nodes have been added or destroyed.
1206 # and node) always means no nodes have been added or destroyed.
1207
1207
1208 # XXX this is suboptimal when qrefresh'ing: we strip the current
1208 # XXX this is suboptimal when qrefresh'ing: we strip the current
1209 # head, refresh the tag cache, then immediately add a new head.
1209 # head, refresh the tag cache, then immediately add a new head.
1210 # But I think doing it this way is necessary for the "instant
1210 # But I think doing it this way is necessary for the "instant
1211 # tag cache retrieval" case to work.
1211 # tag cache retrieval" case to work.
1212 self.invalidatecaches()
1212 self.invalidatecaches()
1213
1213
1214 def walk(self, match, node=None):
1214 def walk(self, match, node=None):
1215 '''
1215 '''
1216 walk recursively through the directory tree or a given
1216 walk recursively through the directory tree or a given
1217 changeset, finding all files matched by the match
1217 changeset, finding all files matched by the match
1218 function
1218 function
1219 '''
1219 '''
1220 return self[node].walk(match)
1220 return self[node].walk(match)
1221
1221
1222 def status(self, node1='.', node2=None, match=None,
1222 def status(self, node1='.', node2=None, match=None,
1223 ignored=False, clean=False, unknown=False,
1223 ignored=False, clean=False, unknown=False,
1224 listsubrepos=False):
1224 listsubrepos=False):
1225 """return status of files between two nodes or node and working directory
1225 """return status of files between two nodes or node and working directory
1226
1226
1227 If node1 is None, use the first dirstate parent instead.
1227 If node1 is None, use the first dirstate parent instead.
1228 If node2 is None, compare node1 with working directory.
1228 If node2 is None, compare node1 with working directory.
1229 """
1229 """
1230
1230
1231 def mfmatches(ctx):
1231 def mfmatches(ctx):
1232 mf = ctx.manifest().copy()
1232 mf = ctx.manifest().copy()
1233 for fn in mf.keys():
1233 for fn in mf.keys():
1234 if not match(fn):
1234 if not match(fn):
1235 del mf[fn]
1235 del mf[fn]
1236 return mf
1236 return mf
1237
1237
1238 if isinstance(node1, context.changectx):
1238 if isinstance(node1, context.changectx):
1239 ctx1 = node1
1239 ctx1 = node1
1240 else:
1240 else:
1241 ctx1 = self[node1]
1241 ctx1 = self[node1]
1242 if isinstance(node2, context.changectx):
1242 if isinstance(node2, context.changectx):
1243 ctx2 = node2
1243 ctx2 = node2
1244 else:
1244 else:
1245 ctx2 = self[node2]
1245 ctx2 = self[node2]
1246
1246
1247 working = ctx2.rev() is None
1247 working = ctx2.rev() is None
1248 parentworking = working and ctx1 == self['.']
1248 parentworking = working and ctx1 == self['.']
1249 match = match or matchmod.always(self.root, self.getcwd())
1249 match = match or matchmod.always(self.root, self.getcwd())
1250 listignored, listclean, listunknown = ignored, clean, unknown
1250 listignored, listclean, listunknown = ignored, clean, unknown
1251
1251
1252 # load earliest manifest first for caching reasons
1252 # load earliest manifest first for caching reasons
1253 if not working and ctx2.rev() < ctx1.rev():
1253 if not working and ctx2.rev() < ctx1.rev():
1254 ctx2.manifest()
1254 ctx2.manifest()
1255
1255
1256 if not parentworking:
1256 if not parentworking:
1257 def bad(f, msg):
1257 def bad(f, msg):
1258 if f not in ctx1:
1258 if f not in ctx1:
1259 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1259 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1260 match.bad = bad
1260 match.bad = bad
1261
1261
1262 if working: # we need to scan the working dir
1262 if working: # we need to scan the working dir
1263 subrepos = []
1263 subrepos = []
1264 if '.hgsub' in self.dirstate:
1264 if '.hgsub' in self.dirstate:
1265 subrepos = ctx2.substate.keys()
1265 subrepos = ctx2.substate.keys()
1266 s = self.dirstate.status(match, subrepos, listignored,
1266 s = self.dirstate.status(match, subrepos, listignored,
1267 listclean, listunknown)
1267 listclean, listunknown)
1268 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1268 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1269
1269
1270 # check for any possibly clean files
1270 # check for any possibly clean files
1271 if parentworking and cmp:
1271 if parentworking and cmp:
1272 fixup = []
1272 fixup = []
1273 # do a full compare of any files that might have changed
1273 # do a full compare of any files that might have changed
1274 for f in sorted(cmp):
1274 for f in sorted(cmp):
1275 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1275 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1276 or ctx1[f].cmp(ctx2[f])):
1276 or ctx1[f].cmp(ctx2[f])):
1277 modified.append(f)
1277 modified.append(f)
1278 else:
1278 else:
1279 fixup.append(f)
1279 fixup.append(f)
1280
1280
1281 # update dirstate for files that are actually clean
1281 # update dirstate for files that are actually clean
1282 if fixup:
1282 if fixup:
1283 if listclean:
1283 if listclean:
1284 clean += fixup
1284 clean += fixup
1285
1285
1286 try:
1286 try:
1287 # updating the dirstate is optional
1287 # updating the dirstate is optional
1288 # so we don't wait on the lock
1288 # so we don't wait on the lock
1289 wlock = self.wlock(False)
1289 wlock = self.wlock(False)
1290 try:
1290 try:
1291 for f in fixup:
1291 for f in fixup:
1292 self.dirstate.normal(f)
1292 self.dirstate.normal(f)
1293 finally:
1293 finally:
1294 wlock.release()
1294 wlock.release()
1295 except error.LockError:
1295 except error.LockError:
1296 pass
1296 pass
1297
1297
1298 if not parentworking:
1298 if not parentworking:
1299 mf1 = mfmatches(ctx1)
1299 mf1 = mfmatches(ctx1)
1300 if working:
1300 if working:
1301 # we are comparing working dir against non-parent
1301 # we are comparing working dir against non-parent
1302 # generate a pseudo-manifest for the working dir
1302 # generate a pseudo-manifest for the working dir
1303 mf2 = mfmatches(self['.'])
1303 mf2 = mfmatches(self['.'])
1304 for f in cmp + modified + added:
1304 for f in cmp + modified + added:
1305 mf2[f] = None
1305 mf2[f] = None
1306 mf2.set(f, ctx2.flags(f))
1306 mf2.set(f, ctx2.flags(f))
1307 for f in removed:
1307 for f in removed:
1308 if f in mf2:
1308 if f in mf2:
1309 del mf2[f]
1309 del mf2[f]
1310 else:
1310 else:
1311 # we are comparing two revisions
1311 # we are comparing two revisions
1312 deleted, unknown, ignored = [], [], []
1312 deleted, unknown, ignored = [], [], []
1313 mf2 = mfmatches(ctx2)
1313 mf2 = mfmatches(ctx2)
1314
1314
1315 modified, added, clean = [], [], []
1315 modified, added, clean = [], [], []
1316 for fn in mf2:
1316 for fn in mf2:
1317 if fn in mf1:
1317 if fn in mf1:
1318 if (fn not in deleted and
1318 if (fn not in deleted and
1319 (mf1.flags(fn) != mf2.flags(fn) or
1319 (mf1.flags(fn) != mf2.flags(fn) or
1320 (mf1[fn] != mf2[fn] and
1320 (mf1[fn] != mf2[fn] and
1321 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1321 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1322 modified.append(fn)
1322 modified.append(fn)
1323 elif listclean:
1323 elif listclean:
1324 clean.append(fn)
1324 clean.append(fn)
1325 del mf1[fn]
1325 del mf1[fn]
1326 elif fn not in deleted:
1326 elif fn not in deleted:
1327 added.append(fn)
1327 added.append(fn)
1328 removed = mf1.keys()
1328 removed = mf1.keys()
1329
1329
1330 r = modified, added, removed, deleted, unknown, ignored, clean
1330 r = modified, added, removed, deleted, unknown, ignored, clean
1331
1331
1332 if listsubrepos:
1332 if listsubrepos:
1333 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1333 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1334 if working:
1334 if working:
1335 rev2 = None
1335 rev2 = None
1336 else:
1336 else:
1337 rev2 = ctx2.substate[subpath][1]
1337 rev2 = ctx2.substate[subpath][1]
1338 try:
1338 try:
1339 submatch = matchmod.narrowmatcher(subpath, match)
1339 submatch = matchmod.narrowmatcher(subpath, match)
1340 s = sub.status(rev2, match=submatch, ignored=listignored,
1340 s = sub.status(rev2, match=submatch, ignored=listignored,
1341 clean=listclean, unknown=listunknown,
1341 clean=listclean, unknown=listunknown,
1342 listsubrepos=True)
1342 listsubrepos=True)
1343 for rfiles, sfiles in zip(r, s):
1343 for rfiles, sfiles in zip(r, s):
1344 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1344 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1345 except error.LookupError:
1345 except error.LookupError:
1346 self.ui.status(_("skipping missing subrepository: %s\n")
1346 self.ui.status(_("skipping missing subrepository: %s\n")
1347 % subpath)
1347 % subpath)
1348
1348
1349 for l in r:
1349 for l in r:
1350 l.sort()
1350 l.sort()
1351 return r
1351 return r
1352
1352
1353 def heads(self, start=None):
1353 def heads(self, start=None):
1354 heads = self.changelog.heads(start)
1354 heads = self.changelog.heads(start)
1355 # sort the output in rev descending order
1355 # sort the output in rev descending order
1356 return sorted(heads, key=self.changelog.rev, reverse=True)
1356 return sorted(heads, key=self.changelog.rev, reverse=True)
1357
1357
1358 def branchheads(self, branch=None, start=None, closed=False):
1358 def branchheads(self, branch=None, start=None, closed=False):
1359 '''return a (possibly filtered) list of heads for the given branch
1359 '''return a (possibly filtered) list of heads for the given branch
1360
1360
1361 Heads are returned in topological order, from newest to oldest.
1361 Heads are returned in topological order, from newest to oldest.
1362 If branch is None, use the dirstate branch.
1362 If branch is None, use the dirstate branch.
1363 If start is not None, return only heads reachable from start.
1363 If start is not None, return only heads reachable from start.
1364 If closed is True, return heads that are marked as closed as well.
1364 If closed is True, return heads that are marked as closed as well.
1365 '''
1365 '''
1366 if branch is None:
1366 if branch is None:
1367 branch = self[None].branch()
1367 branch = self[None].branch()
1368 branches = self.branchmap()
1368 branches = self.branchmap()
1369 if branch not in branches:
1369 if branch not in branches:
1370 return []
1370 return []
1371 # the cache returns heads ordered lowest to highest
1371 # the cache returns heads ordered lowest to highest
1372 bheads = list(reversed(branches[branch]))
1372 bheads = list(reversed(branches[branch]))
1373 if start is not None:
1373 if start is not None:
1374 # filter out the heads that cannot be reached from startrev
1374 # filter out the heads that cannot be reached from startrev
1375 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1375 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1376 bheads = [h for h in bheads if h in fbheads]
1376 bheads = [h for h in bheads if h in fbheads]
1377 if not closed:
1377 if not closed:
1378 bheads = [h for h in bheads if
1378 bheads = [h for h in bheads if
1379 ('close' not in self.changelog.read(h)[5])]
1379 ('close' not in self.changelog.read(h)[5])]
1380 return bheads
1380 return bheads
1381
1381
1382 def branches(self, nodes):
1382 def branches(self, nodes):
1383 if not nodes:
1383 if not nodes:
1384 nodes = [self.changelog.tip()]
1384 nodes = [self.changelog.tip()]
1385 b = []
1385 b = []
1386 for n in nodes:
1386 for n in nodes:
1387 t = n
1387 t = n
1388 while True:
1388 while True:
1389 p = self.changelog.parents(n)
1389 p = self.changelog.parents(n)
1390 if p[1] != nullid or p[0] == nullid:
1390 if p[1] != nullid or p[0] == nullid:
1391 b.append((t, n, p[0], p[1]))
1391 b.append((t, n, p[0], p[1]))
1392 break
1392 break
1393 n = p[0]
1393 n = p[0]
1394 return b
1394 return b
1395
1395
1396 def between(self, pairs):
1396 def between(self, pairs):
1397 r = []
1397 r = []
1398
1398
1399 for top, bottom in pairs:
1399 for top, bottom in pairs:
1400 n, l, i = top, [], 0
1400 n, l, i = top, [], 0
1401 f = 1
1401 f = 1
1402
1402
1403 while n != bottom and n != nullid:
1403 while n != bottom and n != nullid:
1404 p = self.changelog.parents(n)[0]
1404 p = self.changelog.parents(n)[0]
1405 if i == f:
1405 if i == f:
1406 l.append(n)
1406 l.append(n)
1407 f = f * 2
1407 f = f * 2
1408 n = p
1408 n = p
1409 i += 1
1409 i += 1
1410
1410
1411 r.append(l)
1411 r.append(l)
1412
1412
1413 return r
1413 return r
1414
1414
1415 def pull(self, remote, heads=None, force=False):
1415 def pull(self, remote, heads=None, force=False):
1416 lock = self.lock()
1416 lock = self.lock()
1417 try:
1417 try:
1418 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1418 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1419 force=force)
1419 force=force)
1420 common, fetch, rheads = tmp
1420 common, fetch, rheads = tmp
1421 if not fetch:
1421 if not fetch:
1422 self.ui.status(_("no changes found\n"))
1422 self.ui.status(_("no changes found\n"))
1423 result = 0
1423 result = 0
1424 else:
1424 else:
1425 if heads is None and list(common) == [nullid]:
1425 if heads is None and list(common) == [nullid]:
1426 self.ui.status(_("requesting all changes\n"))
1426 self.ui.status(_("requesting all changes\n"))
1427 elif heads is None and remote.capable('changegroupsubset'):
1427 elif heads is None and remote.capable('changegroupsubset'):
1428 # issue1320, avoid a race if remote changed after discovery
1428 # issue1320, avoid a race if remote changed after discovery
1429 heads = rheads
1429 heads = rheads
1430
1430
1431 if remote.capable('getbundle'):
1431 if remote.capable('getbundle'):
1432 cg = remote.getbundle('pull', common=common,
1432 cg = remote.getbundle('pull', common=common,
1433 heads=heads or rheads)
1433 heads=heads or rheads)
1434 elif heads is None:
1434 elif heads is None:
1435 cg = remote.changegroup(fetch, 'pull')
1435 cg = remote.changegroup(fetch, 'pull')
1436 elif not remote.capable('changegroupsubset'):
1436 elif not remote.capable('changegroupsubset'):
1437 raise util.Abort(_("partial pull cannot be done because "
1437 raise util.Abort(_("partial pull cannot be done because "
1438 "other repository doesn't support "
1438 "other repository doesn't support "
1439 "changegroupsubset."))
1439 "changegroupsubset."))
1440 else:
1440 else:
1441 cg = remote.changegroupsubset(fetch, heads, 'pull')
1441 cg = remote.changegroupsubset(fetch, heads, 'pull')
1442 result = self.addchangegroup(cg, 'pull', remote.url(),
1442 result = self.addchangegroup(cg, 'pull', remote.url(),
1443 lock=lock)
1443 lock=lock)
1444 finally:
1444 finally:
1445 lock.release()
1445 lock.release()
1446
1446
1447 return result
1447 return result
1448
1448
1449 def checkpush(self, force, revs):
1449 def checkpush(self, force, revs):
1450 """Extensions can override this function if additional checks have
1450 """Extensions can override this function if additional checks have
1451 to be performed before pushing, or call it if they override push
1451 to be performed before pushing, or call it if they override push
1452 command.
1452 command.
1453 """
1453 """
1454 pass
1454 pass
1455
1455
1456 def push(self, remote, force=False, revs=None, newbranch=False):
1456 def push(self, remote, force=False, revs=None, newbranch=False):
1457 '''Push outgoing changesets (limited by revs) from the current
1457 '''Push outgoing changesets (limited by revs) from the current
1458 repository to remote. Return an integer:
1458 repository to remote. Return an integer:
1459 - 0 means HTTP error *or* nothing to push
1459 - 0 means HTTP error *or* nothing to push
1460 - 1 means we pushed and remote head count is unchanged *or*
1460 - 1 means we pushed and remote head count is unchanged *or*
1461 we have outgoing changesets but refused to push
1461 we have outgoing changesets but refused to push
1462 - other values as described by addchangegroup()
1462 - other values as described by addchangegroup()
1463 '''
1463 '''
1464 # there are two ways to push to remote repo:
1464 # there are two ways to push to remote repo:
1465 #
1465 #
1466 # addchangegroup assumes local user can lock remote
1466 # addchangegroup assumes local user can lock remote
1467 # repo (local filesystem, old ssh servers).
1467 # repo (local filesystem, old ssh servers).
1468 #
1468 #
1469 # unbundle assumes local user cannot lock remote repo (new ssh
1469 # unbundle assumes local user cannot lock remote repo (new ssh
1470 # servers, http servers).
1470 # servers, http servers).
1471
1471
1472 self.checkpush(force, revs)
1472 self.checkpush(force, revs)
1473 lock = None
1473 lock = None
1474 unbundle = remote.capable('unbundle')
1474 unbundle = remote.capable('unbundle')
1475 if not unbundle:
1475 if not unbundle:
1476 lock = remote.lock()
1476 lock = remote.lock()
1477 try:
1477 try:
1478 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1478 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1479 newbranch)
1479 newbranch)
1480 ret = remote_heads
1480 ret = remote_heads
1481 if cg is not None:
1481 if cg is not None:
1482 if unbundle:
1482 if unbundle:
1483 # local repo finds heads on server, finds out what
1483 # local repo finds heads on server, finds out what
1484 # revs it must push. once revs transferred, if server
1484 # revs it must push. once revs transferred, if server
1485 # finds it has different heads (someone else won
1485 # finds it has different heads (someone else won
1486 # commit/push race), server aborts.
1486 # commit/push race), server aborts.
1487 if force:
1487 if force:
1488 remote_heads = ['force']
1488 remote_heads = ['force']
1489 # ssh: return remote's addchangegroup()
1489 # ssh: return remote's addchangegroup()
1490 # http: return remote's addchangegroup() or 0 for error
1490 # http: return remote's addchangegroup() or 0 for error
1491 ret = remote.unbundle(cg, remote_heads, 'push')
1491 ret = remote.unbundle(cg, remote_heads, 'push')
1492 else:
1492 else:
1493 # we return an integer indicating remote head count change
1493 # we return an integer indicating remote head count change
1494 ret = remote.addchangegroup(cg, 'push', self.url(),
1494 ret = remote.addchangegroup(cg, 'push', self.url(),
1495 lock=lock)
1495 lock=lock)
1496 finally:
1496 finally:
1497 if lock is not None:
1497 if lock is not None:
1498 lock.release()
1498 lock.release()
1499
1499
1500 self.ui.debug("checking for updated bookmarks\n")
1500 self.ui.debug("checking for updated bookmarks\n")
1501 rb = remote.listkeys('bookmarks')
1501 rb = remote.listkeys('bookmarks')
1502 for k in rb.keys():
1502 for k in rb.keys():
1503 if k in self._bookmarks:
1503 if k in self._bookmarks:
1504 nr, nl = rb[k], hex(self._bookmarks[k])
1504 nr, nl = rb[k], hex(self._bookmarks[k])
1505 if nr in self:
1505 if nr in self:
1506 cr = self[nr]
1506 cr = self[nr]
1507 cl = self[nl]
1507 cl = self[nl]
1508 if cl in cr.descendants():
1508 if cl in cr.descendants():
1509 r = remote.pushkey('bookmarks', k, nr, nl)
1509 r = remote.pushkey('bookmarks', k, nr, nl)
1510 if r:
1510 if r:
1511 self.ui.status(_("updating bookmark %s\n") % k)
1511 self.ui.status(_("updating bookmark %s\n") % k)
1512 else:
1512 else:
1513 self.ui.warn(_('updating bookmark %s'
1513 self.ui.warn(_('updating bookmark %s'
1514 ' failed!\n') % k)
1514 ' failed!\n') % k)
1515
1515
1516 return ret
1516 return ret
1517
1517
1518 def changegroupinfo(self, nodes, source):
1518 def changegroupinfo(self, nodes, source):
1519 if self.ui.verbose or source == 'bundle':
1519 if self.ui.verbose or source == 'bundle':
1520 self.ui.status(_("%d changesets found\n") % len(nodes))
1520 self.ui.status(_("%d changesets found\n") % len(nodes))
1521 if self.ui.debugflag:
1521 if self.ui.debugflag:
1522 self.ui.debug("list of changesets:\n")
1522 self.ui.debug("list of changesets:\n")
1523 for node in nodes:
1523 for node in nodes:
1524 self.ui.debug("%s\n" % hex(node))
1524 self.ui.debug("%s\n" % hex(node))
1525
1525
1526 def changegroupsubset(self, bases, heads, source):
1526 def changegroupsubset(self, bases, heads, source):
1527 """Compute a changegroup consisting of all the nodes that are
1527 """Compute a changegroup consisting of all the nodes that are
1528 descendants of any of the bases and ancestors of any of the heads.
1528 descendants of any of the bases and ancestors of any of the heads.
1529 Return a chunkbuffer object whose read() method will return
1529 Return a chunkbuffer object whose read() method will return
1530 successive changegroup chunks.
1530 successive changegroup chunks.
1531
1531
1532 It is fairly complex as determining which filenodes and which
1532 It is fairly complex as determining which filenodes and which
1533 manifest nodes need to be included for the changeset to be complete
1533 manifest nodes need to be included for the changeset to be complete
1534 is non-trivial.
1534 is non-trivial.
1535
1535
1536 Another wrinkle is doing the reverse, figuring out which changeset in
1536 Another wrinkle is doing the reverse, figuring out which changeset in
1537 the changegroup a particular filenode or manifestnode belongs to.
1537 the changegroup a particular filenode or manifestnode belongs to.
1538 """
1538 """
1539 cl = self.changelog
1539 cl = self.changelog
1540 if not bases:
1540 if not bases:
1541 bases = [nullid]
1541 bases = [nullid]
1542 csets, bases, heads = cl.nodesbetween(bases, heads)
1542 csets, bases, heads = cl.nodesbetween(bases, heads)
1543 # We assume that all ancestors of bases are known
1543 # We assume that all ancestors of bases are known
1544 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1544 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1545 return self._changegroupsubset(common, csets, heads, source)
1545 return self._changegroupsubset(common, csets, heads, source)
1546
1546
1547 def getbundle(self, source, heads=None, common=None):
1547 def getbundle(self, source, heads=None, common=None):
1548 """Like changegroupsubset, but returns the set difference between the
1548 """Like changegroupsubset, but returns the set difference between the
1549 ancestors of heads and the ancestors common.
1549 ancestors of heads and the ancestors common.
1550
1550
1551 If heads is None, use the local heads. If common is None, use [nullid].
1551 If heads is None, use the local heads. If common is None, use [nullid].
1552
1552
1553 The nodes in common might not all be known locally due to the way the
1553 The nodes in common might not all be known locally due to the way the
1554 current discovery protocol works.
1554 current discovery protocol works.
1555 """
1555 """
1556 cl = self.changelog
1556 cl = self.changelog
1557 if common:
1557 if common:
1558 nm = cl.nodemap
1558 nm = cl.nodemap
1559 common = [n for n in common if n in nm]
1559 common = [n for n in common if n in nm]
1560 else:
1560 else:
1561 common = [nullid]
1561 common = [nullid]
1562 if not heads:
1562 if not heads:
1563 heads = cl.heads()
1563 heads = cl.heads()
1564 common, missing = cl.findcommonmissing(common, heads)
1564 common, missing = cl.findcommonmissing(common, heads)
1565 if not missing:
1565 if not missing:
1566 return None
1566 return None
1567 return self._changegroupsubset(common, missing, heads, source)
1567 return self._changegroupsubset(common, missing, heads, source)
1568
1568
1569 def _changegroupsubset(self, commonrevs, csets, heads, source):
1569 def _changegroupsubset(self, commonrevs, csets, heads, source):
1570
1570
1571 cl = self.changelog
1571 cl = self.changelog
1572 mf = self.manifest
1572 mf = self.manifest
1573 mfs = {} # needed manifests
1573 mfs = {} # needed manifests
1574 fnodes = {} # needed file nodes
1574 fnodes = {} # needed file nodes
1575 changedfiles = set()
1575 changedfiles = set()
1576 fstate = ['', {}]
1576 fstate = ['', {}]
1577 count = [0]
1577 count = [0]
1578
1578
1579 # can we go through the fast path ?
1579 # can we go through the fast path ?
1580 heads.sort()
1580 heads.sort()
1581 if heads == sorted(self.heads()):
1581 if heads == sorted(self.heads()):
1582 return self._changegroup(csets, source)
1582 return self._changegroup(csets, source)
1583
1583
1584 # slow path
1584 # slow path
1585 self.hook('preoutgoing', throw=True, source=source)
1585 self.hook('preoutgoing', throw=True, source=source)
1586 self.changegroupinfo(csets, source)
1586 self.changegroupinfo(csets, source)
1587
1587
1588 # filter any nodes that claim to be part of the known set
1588 # filter any nodes that claim to be part of the known set
1589 def prune(revlog, missing):
1589 def prune(revlog, missing):
1590 return [n for n in missing
1590 return [n for n in missing
1591 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1591 if revlog.linkrev(revlog.rev(n)) not in commonrevs]
1592
1592
1593 def lookup(revlog, x):
1593 def lookup(revlog, x):
1594 if revlog == cl:
1594 if revlog == cl:
1595 c = cl.read(x)
1595 c = cl.read(x)
1596 changedfiles.update(c[3])
1596 changedfiles.update(c[3])
1597 mfs.setdefault(c[0], x)
1597 mfs.setdefault(c[0], x)
1598 count[0] += 1
1598 count[0] += 1
1599 self.ui.progress(_('bundling'), count[0],
1599 self.ui.progress(_('bundling'), count[0],
1600 unit=_('changesets'), total=len(csets))
1600 unit=_('changesets'), total=len(csets))
1601 return x
1601 return x
1602 elif revlog == mf:
1602 elif revlog == mf:
1603 clnode = mfs[x]
1603 clnode = mfs[x]
1604 mdata = mf.readfast(x)
1604 mdata = mf.readfast(x)
1605 for f in changedfiles:
1605 for f in changedfiles:
1606 if f in mdata:
1606 if f in mdata:
1607 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1607 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1608 count[0] += 1
1608 count[0] += 1
1609 self.ui.progress(_('bundling'), count[0],
1609 self.ui.progress(_('bundling'), count[0],
1610 unit=_('manifests'), total=len(mfs))
1610 unit=_('manifests'), total=len(mfs))
1611 return mfs[x]
1611 return mfs[x]
1612 else:
1612 else:
1613 self.ui.progress(
1613 self.ui.progress(
1614 _('bundling'), count[0], item=fstate[0],
1614 _('bundling'), count[0], item=fstate[0],
1615 unit=_('files'), total=len(changedfiles))
1615 unit=_('files'), total=len(changedfiles))
1616 return fstate[1][x]
1616 return fstate[1][x]
1617
1617
1618 bundler = changegroup.bundle10(lookup)
1618 bundler = changegroup.bundle10(lookup)
1619 reorder = self.ui.config('bundle', 'reorder', 'auto')
1619 reorder = self.ui.config('bundle', 'reorder', 'auto')
1620 if reorder == 'auto':
1620 if reorder == 'auto':
1621 reorder = None
1621 reorder = None
1622 else:
1622 else:
1623 reorder = util.parsebool(reorder)
1623 reorder = util.parsebool(reorder)
1624
1624
1625 def gengroup():
1625 def gengroup():
1626 # Create a changenode group generator that will call our functions
1626 # Create a changenode group generator that will call our functions
1627 # back to lookup the owning changenode and collect information.
1627 # back to lookup the owning changenode and collect information.
1628 for chunk in cl.group(csets, bundler, reorder=reorder):
1628 for chunk in cl.group(csets, bundler, reorder=reorder):
1629 yield chunk
1629 yield chunk
1630 self.ui.progress(_('bundling'), None)
1630 self.ui.progress(_('bundling'), None)
1631
1631
1632 # Create a generator for the manifestnodes that calls our lookup
1632 # Create a generator for the manifestnodes that calls our lookup
1633 # and data collection functions back.
1633 # and data collection functions back.
1634 count[0] = 0
1634 count[0] = 0
1635 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1635 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1636 yield chunk
1636 yield chunk
1637 self.ui.progress(_('bundling'), None)
1637 self.ui.progress(_('bundling'), None)
1638
1638
1639 mfs.clear()
1639 mfs.clear()
1640
1640
1641 # Go through all our files in order sorted by name.
1641 # Go through all our files in order sorted by name.
1642 count[0] = 0
1642 count[0] = 0
1643 for fname in sorted(changedfiles):
1643 for fname in sorted(changedfiles):
1644 filerevlog = self.file(fname)
1644 filerevlog = self.file(fname)
1645 if not len(filerevlog):
1645 if not len(filerevlog):
1646 raise util.Abort(_("empty or missing revlog for %s") % fname)
1646 raise util.Abort(_("empty or missing revlog for %s") % fname)
1647 fstate[0] = fname
1647 fstate[0] = fname
1648 fstate[1] = fnodes.pop(fname, {})
1648 fstate[1] = fnodes.pop(fname, {})
1649
1649
1650 nodelist = prune(filerevlog, fstate[1])
1650 nodelist = prune(filerevlog, fstate[1])
1651 if nodelist:
1651 if nodelist:
1652 count[0] += 1
1652 count[0] += 1
1653 yield bundler.fileheader(fname)
1653 yield bundler.fileheader(fname)
1654 for chunk in filerevlog.group(nodelist, bundler, reorder):
1654 for chunk in filerevlog.group(nodelist, bundler, reorder):
1655 yield chunk
1655 yield chunk
1656
1656
1657 # Signal that no more groups are left.
1657 # Signal that no more groups are left.
1658 yield bundler.close()
1658 yield bundler.close()
1659 self.ui.progress(_('bundling'), None)
1659 self.ui.progress(_('bundling'), None)
1660
1660
1661 if csets:
1661 if csets:
1662 self.hook('outgoing', node=hex(csets[0]), source=source)
1662 self.hook('outgoing', node=hex(csets[0]), source=source)
1663
1663
1664 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1664 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1665
1665
1666 def changegroup(self, basenodes, source):
1666 def changegroup(self, basenodes, source):
1667 # to avoid a race we use changegroupsubset() (issue1320)
1667 # to avoid a race we use changegroupsubset() (issue1320)
1668 return self.changegroupsubset(basenodes, self.heads(), source)
1668 return self.changegroupsubset(basenodes, self.heads(), source)
1669
1669
1670 def _changegroup(self, nodes, source):
1670 def _changegroup(self, nodes, source):
1671 """Compute the changegroup of all nodes that we have that a recipient
1671 """Compute the changegroup of all nodes that we have that a recipient
1672 doesn't. Return a chunkbuffer object whose read() method will return
1672 doesn't. Return a chunkbuffer object whose read() method will return
1673 successive changegroup chunks.
1673 successive changegroup chunks.
1674
1674
1675 This is much easier than the previous function as we can assume that
1675 This is much easier than the previous function as we can assume that
1676 the recipient has any changenode we aren't sending them.
1676 the recipient has any changenode we aren't sending them.
1677
1677
1678 nodes is the set of nodes to send"""
1678 nodes is the set of nodes to send"""
1679
1679
1680 cl = self.changelog
1680 cl = self.changelog
1681 mf = self.manifest
1681 mf = self.manifest
1682 mfs = {}
1682 mfs = {}
1683 changedfiles = set()
1683 changedfiles = set()
1684 fstate = ['']
1684 fstate = ['']
1685 count = [0]
1685 count = [0]
1686
1686
1687 self.hook('preoutgoing', throw=True, source=source)
1687 self.hook('preoutgoing', throw=True, source=source)
1688 self.changegroupinfo(nodes, source)
1688 self.changegroupinfo(nodes, source)
1689
1689
1690 revset = set([cl.rev(n) for n in nodes])
1690 revset = set([cl.rev(n) for n in nodes])
1691
1691
1692 def gennodelst(log):
1692 def gennodelst(log):
1693 return [log.node(r) for r in log if log.linkrev(r) in revset]
1693 return [log.node(r) for r in log if log.linkrev(r) in revset]
1694
1694
1695 def lookup(revlog, x):
1695 def lookup(revlog, x):
1696 if revlog == cl:
1696 if revlog == cl:
1697 c = cl.read(x)
1697 c = cl.read(x)
1698 changedfiles.update(c[3])
1698 changedfiles.update(c[3])
1699 mfs.setdefault(c[0], x)
1699 mfs.setdefault(c[0], x)
1700 count[0] += 1
1700 count[0] += 1
1701 self.ui.progress(_('bundling'), count[0],
1701 self.ui.progress(_('bundling'), count[0],
1702 unit=_('changesets'), total=len(nodes))
1702 unit=_('changesets'), total=len(nodes))
1703 return x
1703 return x
1704 elif revlog == mf:
1704 elif revlog == mf:
1705 count[0] += 1
1705 count[0] += 1
1706 self.ui.progress(_('bundling'), count[0],
1706 self.ui.progress(_('bundling'), count[0],
1707 unit=_('manifests'), total=len(mfs))
1707 unit=_('manifests'), total=len(mfs))
1708 return cl.node(revlog.linkrev(revlog.rev(x)))
1708 return cl.node(revlog.linkrev(revlog.rev(x)))
1709 else:
1709 else:
1710 self.ui.progress(
1710 self.ui.progress(
1711 _('bundling'), count[0], item=fstate[0],
1711 _('bundling'), count[0], item=fstate[0],
1712 total=len(changedfiles), unit=_('files'))
1712 total=len(changedfiles), unit=_('files'))
1713 return cl.node(revlog.linkrev(revlog.rev(x)))
1713 return cl.node(revlog.linkrev(revlog.rev(x)))
1714
1714
1715 bundler = changegroup.bundle10(lookup)
1715 bundler = changegroup.bundle10(lookup)
1716 reorder = self.ui.config('bundle', 'reorder', 'auto')
1716 reorder = self.ui.config('bundle', 'reorder', 'auto')
1717 if reorder == 'auto':
1717 if reorder == 'auto':
1718 reorder = None
1718 reorder = None
1719 else:
1719 else:
1720 reorder = util.parsebool(reorder)
1720 reorder = util.parsebool(reorder)
1721
1721
1722 def gengroup():
1722 def gengroup():
1723 '''yield a sequence of changegroup chunks (strings)'''
1723 '''yield a sequence of changegroup chunks (strings)'''
1724 # construct a list of all changed files
1724 # construct a list of all changed files
1725
1725
1726 for chunk in cl.group(nodes, bundler, reorder=reorder):
1726 for chunk in cl.group(nodes, bundler, reorder=reorder):
1727 yield chunk
1727 yield chunk
1728 self.ui.progress(_('bundling'), None)
1728 self.ui.progress(_('bundling'), None)
1729
1729
1730 count[0] = 0
1730 count[0] = 0
1731 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1731 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1732 yield chunk
1732 yield chunk
1733 self.ui.progress(_('bundling'), None)
1733 self.ui.progress(_('bundling'), None)
1734
1734
1735 count[0] = 0
1735 count[0] = 0
1736 for fname in sorted(changedfiles):
1736 for fname in sorted(changedfiles):
1737 filerevlog = self.file(fname)
1737 filerevlog = self.file(fname)
1738 if not len(filerevlog):
1738 if not len(filerevlog):
1739 raise util.Abort(_("empty or missing revlog for %s") % fname)
1739 raise util.Abort(_("empty or missing revlog for %s") % fname)
1740 fstate[0] = fname
1740 fstate[0] = fname
1741 nodelist = gennodelst(filerevlog)
1741 nodelist = gennodelst(filerevlog)
1742 if nodelist:
1742 if nodelist:
1743 count[0] += 1
1743 count[0] += 1
1744 yield bundler.fileheader(fname)
1744 yield bundler.fileheader(fname)
1745 for chunk in filerevlog.group(nodelist, bundler, reorder):
1745 for chunk in filerevlog.group(nodelist, bundler, reorder):
1746 yield chunk
1746 yield chunk
1747 yield bundler.close()
1747 yield bundler.close()
1748 self.ui.progress(_('bundling'), None)
1748 self.ui.progress(_('bundling'), None)
1749
1749
1750 if nodes:
1750 if nodes:
1751 self.hook('outgoing', node=hex(nodes[0]), source=source)
1751 self.hook('outgoing', node=hex(nodes[0]), source=source)
1752
1752
1753 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1753 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1754
1754
1755 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1755 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1756 """Add the changegroup returned by source.read() to this repo.
1756 """Add the changegroup returned by source.read() to this repo.
1757 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1757 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1758 the URL of the repo where this changegroup is coming from.
1758 the URL of the repo where this changegroup is coming from.
1759 If lock is not None, the function takes ownership of the lock
1759 If lock is not None, the function takes ownership of the lock
1760 and releases it after the changegroup is added.
1760 and releases it after the changegroup is added.
1761
1761
1762 Return an integer summarizing the change to this repo:
1762 Return an integer summarizing the change to this repo:
1763 - nothing changed or no source: 0
1763 - nothing changed or no source: 0
1764 - more heads than before: 1+added heads (2..n)
1764 - more heads than before: 1+added heads (2..n)
1765 - fewer heads than before: -1-removed heads (-2..-n)
1765 - fewer heads than before: -1-removed heads (-2..-n)
1766 - number of heads stays the same: 1
1766 - number of heads stays the same: 1
1767 """
1767 """
1768 def csmap(x):
1768 def csmap(x):
1769 self.ui.debug("add changeset %s\n" % short(x))
1769 self.ui.debug("add changeset %s\n" % short(x))
1770 return len(cl)
1770 return len(cl)
1771
1771
1772 def revmap(x):
1772 def revmap(x):
1773 return cl.rev(x)
1773 return cl.rev(x)
1774
1774
1775 if not source:
1775 if not source:
1776 return 0
1776 return 0
1777
1777
1778 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1778 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1779
1779
1780 changesets = files = revisions = 0
1780 changesets = files = revisions = 0
1781 efiles = set()
1781 efiles = set()
1782
1782
1783 # write changelog data to temp files so concurrent readers will not see
1783 # write changelog data to temp files so concurrent readers will not see
1784 # inconsistent view
1784 # inconsistent view
1785 cl = self.changelog
1785 cl = self.changelog
1786 cl.delayupdate()
1786 cl.delayupdate()
1787 oldheads = cl.heads()
1787 oldheads = cl.heads()
1788
1788
1789 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1789 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1790 try:
1790 try:
1791 trp = weakref.proxy(tr)
1791 trp = weakref.proxy(tr)
1792 # pull off the changeset group
1792 # pull off the changeset group
1793 self.ui.status(_("adding changesets\n"))
1793 self.ui.status(_("adding changesets\n"))
1794 clstart = len(cl)
1794 clstart = len(cl)
1795 class prog(object):
1795 class prog(object):
1796 step = _('changesets')
1796 step = _('changesets')
1797 count = 1
1797 count = 1
1798 ui = self.ui
1798 ui = self.ui
1799 total = None
1799 total = None
1800 def __call__(self):
1800 def __call__(self):
1801 self.ui.progress(self.step, self.count, unit=_('chunks'),
1801 self.ui.progress(self.step, self.count, unit=_('chunks'),
1802 total=self.total)
1802 total=self.total)
1803 self.count += 1
1803 self.count += 1
1804 pr = prog()
1804 pr = prog()
1805 source.callback = pr
1805 source.callback = pr
1806
1806
1807 source.changelogheader()
1807 source.changelogheader()
1808 if (cl.addgroup(source, csmap, trp) is None
1808 if (cl.addgroup(source, csmap, trp) is None
1809 and not emptyok):
1809 and not emptyok):
1810 raise util.Abort(_("received changelog group is empty"))
1810 raise util.Abort(_("received changelog group is empty"))
1811 clend = len(cl)
1811 clend = len(cl)
1812 changesets = clend - clstart
1812 changesets = clend - clstart
1813 for c in xrange(clstart, clend):
1813 for c in xrange(clstart, clend):
1814 efiles.update(self[c].files())
1814 efiles.update(self[c].files())
1815 efiles = len(efiles)
1815 efiles = len(efiles)
1816 self.ui.progress(_('changesets'), None)
1816 self.ui.progress(_('changesets'), None)
1817
1817
1818 # pull off the manifest group
1818 # pull off the manifest group
1819 self.ui.status(_("adding manifests\n"))
1819 self.ui.status(_("adding manifests\n"))
1820 pr.step = _('manifests')
1820 pr.step = _('manifests')
1821 pr.count = 1
1821 pr.count = 1
1822 pr.total = changesets # manifests <= changesets
1822 pr.total = changesets # manifests <= changesets
1823 # no need to check for empty manifest group here:
1823 # no need to check for empty manifest group here:
1824 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1824 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1825 # no new manifest will be created and the manifest group will
1825 # no new manifest will be created and the manifest group will
1826 # be empty during the pull
1826 # be empty during the pull
1827 source.manifestheader()
1827 source.manifestheader()
1828 self.manifest.addgroup(source, revmap, trp)
1828 self.manifest.addgroup(source, revmap, trp)
1829 self.ui.progress(_('manifests'), None)
1829 self.ui.progress(_('manifests'), None)
1830
1830
1831 needfiles = {}
1831 needfiles = {}
1832 if self.ui.configbool('server', 'validate', default=False):
1832 if self.ui.configbool('server', 'validate', default=False):
1833 # validate incoming csets have their manifests
1833 # validate incoming csets have their manifests
1834 for cset in xrange(clstart, clend):
1834 for cset in xrange(clstart, clend):
1835 mfest = self.changelog.read(self.changelog.node(cset))[0]
1835 mfest = self.changelog.read(self.changelog.node(cset))[0]
1836 mfest = self.manifest.readdelta(mfest)
1836 mfest = self.manifest.readdelta(mfest)
1837 # store file nodes we must see
1837 # store file nodes we must see
1838 for f, n in mfest.iteritems():
1838 for f, n in mfest.iteritems():
1839 needfiles.setdefault(f, set()).add(n)
1839 needfiles.setdefault(f, set()).add(n)
1840
1840
1841 # process the files
1841 # process the files
1842 self.ui.status(_("adding file changes\n"))
1842 self.ui.status(_("adding file changes\n"))
1843 pr.step = _('files')
1843 pr.step = _('files')
1844 pr.count = 1
1844 pr.count = 1
1845 pr.total = efiles
1845 pr.total = efiles
1846 source.callback = None
1846 source.callback = None
1847
1847
1848 while True:
1848 while True:
1849 chunkdata = source.filelogheader()
1849 chunkdata = source.filelogheader()
1850 if not chunkdata:
1850 if not chunkdata:
1851 break
1851 break
1852 f = chunkdata["filename"]
1852 f = chunkdata["filename"]
1853 self.ui.debug("adding %s revisions\n" % f)
1853 self.ui.debug("adding %s revisions\n" % f)
1854 pr()
1854 pr()
1855 fl = self.file(f)
1855 fl = self.file(f)
1856 o = len(fl)
1856 o = len(fl)
1857 if fl.addgroup(source, revmap, trp) is None:
1857 if fl.addgroup(source, revmap, trp) is None:
1858 raise util.Abort(_("received file revlog group is empty"))
1858 raise util.Abort(_("received file revlog group is empty"))
1859 revisions += len(fl) - o
1859 revisions += len(fl) - o
1860 files += 1
1860 files += 1
1861 if f in needfiles:
1861 if f in needfiles:
1862 needs = needfiles[f]
1862 needs = needfiles[f]
1863 for new in xrange(o, len(fl)):
1863 for new in xrange(o, len(fl)):
1864 n = fl.node(new)
1864 n = fl.node(new)
1865 if n in needs:
1865 if n in needs:
1866 needs.remove(n)
1866 needs.remove(n)
1867 if not needs:
1867 if not needs:
1868 del needfiles[f]
1868 del needfiles[f]
1869 self.ui.progress(_('files'), None)
1869 self.ui.progress(_('files'), None)
1870
1870
1871 for f, needs in needfiles.iteritems():
1871 for f, needs in needfiles.iteritems():
1872 fl = self.file(f)
1872 fl = self.file(f)
1873 for n in needs:
1873 for n in needs:
1874 try:
1874 try:
1875 fl.rev(n)
1875 fl.rev(n)
1876 except error.LookupError:
1876 except error.LookupError:
1877 raise util.Abort(
1877 raise util.Abort(
1878 _('missing file data for %s:%s - run hg verify') %
1878 _('missing file data for %s:%s - run hg verify') %
1879 (f, hex(n)))
1879 (f, hex(n)))
1880
1880
1881 dh = 0
1881 dh = 0
1882 if oldheads:
1882 if oldheads:
1883 heads = cl.heads()
1883 heads = cl.heads()
1884 dh = len(heads) - len(oldheads)
1884 dh = len(heads) - len(oldheads)
1885 for h in heads:
1885 for h in heads:
1886 if h not in oldheads and 'close' in self[h].extra():
1886 if h not in oldheads and 'close' in self[h].extra():
1887 dh -= 1
1887 dh -= 1
1888 htext = ""
1888 htext = ""
1889 if dh:
1889 if dh:
1890 htext = _(" (%+d heads)") % dh
1890 htext = _(" (%+d heads)") % dh
1891
1891
1892 self.ui.status(_("added %d changesets"
1892 self.ui.status(_("added %d changesets"
1893 " with %d changes to %d files%s\n")
1893 " with %d changes to %d files%s\n")
1894 % (changesets, revisions, files, htext))
1894 % (changesets, revisions, files, htext))
1895
1895
1896 if changesets > 0:
1896 if changesets > 0:
1897 p = lambda: cl.writepending() and self.root or ""
1897 p = lambda: cl.writepending() and self.root or ""
1898 self.hook('pretxnchangegroup', throw=True,
1898 self.hook('pretxnchangegroup', throw=True,
1899 node=hex(cl.node(clstart)), source=srctype,
1899 node=hex(cl.node(clstart)), source=srctype,
1900 url=url, pending=p)
1900 url=url, pending=p)
1901
1901
1902 # make changelog see real files again
1902 # make changelog see real files again
1903 cl.finalize(trp)
1903 cl.finalize(trp)
1904
1904
1905 tr.close()
1905 tr.close()
1906 finally:
1906 finally:
1907 tr.release()
1907 tr.release()
1908 if lock:
1908 if lock:
1909 lock.release()
1909 lock.release()
1910
1910
1911 if changesets > 0:
1911 if changesets > 0:
1912 # forcefully update the on-disk branch cache
1912 # forcefully update the on-disk branch cache
1913 self.ui.debug("updating the branch cache\n")
1913 self.ui.debug("updating the branch cache\n")
1914 self.updatebranchcache()
1914 self.updatebranchcache()
1915 self.hook("changegroup", node=hex(cl.node(clstart)),
1915 self.hook("changegroup", node=hex(cl.node(clstart)),
1916 source=srctype, url=url)
1916 source=srctype, url=url)
1917
1917
1918 for i in xrange(clstart, clend):
1918 for i in xrange(clstart, clend):
1919 self.hook("incoming", node=hex(cl.node(i)),
1919 self.hook("incoming", node=hex(cl.node(i)),
1920 source=srctype, url=url)
1920 source=srctype, url=url)
1921
1921
1922 # never return 0 here:
1922 # never return 0 here:
1923 if dh < 0:
1923 if dh < 0:
1924 return dh - 1
1924 return dh - 1
1925 else:
1925 else:
1926 return dh + 1
1926 return dh + 1
1927
1927
1928 def stream_in(self, remote, requirements):
1928 def stream_in(self, remote, requirements):
1929 lock = self.lock()
1929 lock = self.lock()
1930 try:
1930 try:
1931 fp = remote.stream_out()
1931 fp = remote.stream_out()
1932 l = fp.readline()
1932 l = fp.readline()
1933 try:
1933 try:
1934 resp = int(l)
1934 resp = int(l)
1935 except ValueError:
1935 except ValueError:
1936 raise error.ResponseError(
1936 raise error.ResponseError(
1937 _('Unexpected response from remote server:'), l)
1937 _('Unexpected response from remote server:'), l)
1938 if resp == 1:
1938 if resp == 1:
1939 raise util.Abort(_('operation forbidden by server'))
1939 raise util.Abort(_('operation forbidden by server'))
1940 elif resp == 2:
1940 elif resp == 2:
1941 raise util.Abort(_('locking the remote repository failed'))
1941 raise util.Abort(_('locking the remote repository failed'))
1942 elif resp != 0:
1942 elif resp != 0:
1943 raise util.Abort(_('the server sent an unknown error code'))
1943 raise util.Abort(_('the server sent an unknown error code'))
1944 self.ui.status(_('streaming all changes\n'))
1944 self.ui.status(_('streaming all changes\n'))
1945 l = fp.readline()
1945 l = fp.readline()
1946 try:
1946 try:
1947 total_files, total_bytes = map(int, l.split(' ', 1))
1947 total_files, total_bytes = map(int, l.split(' ', 1))
1948 except (ValueError, TypeError):
1948 except (ValueError, TypeError):
1949 raise error.ResponseError(
1949 raise error.ResponseError(
1950 _('Unexpected response from remote server:'), l)
1950 _('Unexpected response from remote server:'), l)
1951 self.ui.status(_('%d files to transfer, %s of data\n') %
1951 self.ui.status(_('%d files to transfer, %s of data\n') %
1952 (total_files, util.bytecount(total_bytes)))
1952 (total_files, util.bytecount(total_bytes)))
1953 start = time.time()
1953 start = time.time()
1954 for i in xrange(total_files):
1954 for i in xrange(total_files):
1955 # XXX doesn't support '\n' or '\r' in filenames
1955 # XXX doesn't support '\n' or '\r' in filenames
1956 l = fp.readline()
1956 l = fp.readline()
1957 try:
1957 try:
1958 name, size = l.split('\0', 1)
1958 name, size = l.split('\0', 1)
1959 size = int(size)
1959 size = int(size)
1960 except (ValueError, TypeError):
1960 except (ValueError, TypeError):
1961 raise error.ResponseError(
1961 raise error.ResponseError(
1962 _('Unexpected response from remote server:'), l)
1962 _('Unexpected response from remote server:'), l)
1963 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1963 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1964 # for backwards compat, name was partially encoded
1964 # for backwards compat, name was partially encoded
1965 ofp = self.sopener(store.decodedir(name), 'w')
1965 ofp = self.sopener(store.decodedir(name), 'w')
1966 for chunk in util.filechunkiter(fp, limit=size):
1966 for chunk in util.filechunkiter(fp, limit=size):
1967 ofp.write(chunk)
1967 ofp.write(chunk)
1968 ofp.close()
1968 ofp.close()
1969 elapsed = time.time() - start
1969 elapsed = time.time() - start
1970 if elapsed <= 0:
1970 if elapsed <= 0:
1971 elapsed = 0.001
1971 elapsed = 0.001
1972 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1972 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1973 (util.bytecount(total_bytes), elapsed,
1973 (util.bytecount(total_bytes), elapsed,
1974 util.bytecount(total_bytes / elapsed)))
1974 util.bytecount(total_bytes / elapsed)))
1975
1975
1976 # new requirements = old non-format requirements + new format-related
1976 # new requirements = old non-format requirements + new format-related
1977 # requirements from the streamed-in repository
1977 # requirements from the streamed-in repository
1978 requirements.update(set(self.requirements) - self.supportedformats)
1978 requirements.update(set(self.requirements) - self.supportedformats)
1979 self._applyrequirements(requirements)
1979 self._applyrequirements(requirements)
1980 self._writerequirements()
1980 self._writerequirements()
1981
1981
1982 self.invalidate()
1982 self.invalidate()
1983 return len(self.heads()) + 1
1983 return len(self.heads()) + 1
1984 finally:
1984 finally:
1985 lock.release()
1985 lock.release()
1986
1986
1987 def clone(self, remote, heads=[], stream=False):
1987 def clone(self, remote, heads=[], stream=False):
1988 '''clone remote repository.
1988 '''clone remote repository.
1989
1989
1990 keyword arguments:
1990 keyword arguments:
1991 heads: list of revs to clone (forces use of pull)
1991 heads: list of revs to clone (forces use of pull)
1992 stream: use streaming clone if possible'''
1992 stream: use streaming clone if possible'''
1993
1993
1994 # now, all clients that can request uncompressed clones can
1994 # now, all clients that can request uncompressed clones can
1995 # read repo formats supported by all servers that can serve
1995 # read repo formats supported by all servers that can serve
1996 # them.
1996 # them.
1997
1997
1998 # if revlog format changes, client will have to check version
1998 # if revlog format changes, client will have to check version
1999 # and format flags on "stream" capability, and use
1999 # and format flags on "stream" capability, and use
2000 # uncompressed only if compatible.
2000 # uncompressed only if compatible.
2001
2001
2002 if stream and not heads:
2002 if stream and not heads:
2003 # 'stream' means remote revlog format is revlogv1 only
2003 # 'stream' means remote revlog format is revlogv1 only
2004 if remote.capable('stream'):
2004 if remote.capable('stream'):
2005 return self.stream_in(remote, set(('revlogv1',)))
2005 return self.stream_in(remote, set(('revlogv1',)))
2006 # otherwise, 'streamreqs' contains the remote revlog format
2006 # otherwise, 'streamreqs' contains the remote revlog format
2007 streamreqs = remote.capable('streamreqs')
2007 streamreqs = remote.capable('streamreqs')
2008 if streamreqs:
2008 if streamreqs:
2009 streamreqs = set(streamreqs.split(','))
2009 streamreqs = set(streamreqs.split(','))
2010 # if we support it, stream in and adjust our requirements
2010 # if we support it, stream in and adjust our requirements
2011 if not streamreqs - self.supportedformats:
2011 if not streamreqs - self.supportedformats:
2012 return self.stream_in(remote, streamreqs)
2012 return self.stream_in(remote, streamreqs)
2013 return self.pull(remote, heads)
2013 return self.pull(remote, heads)
2014
2014
2015 def pushkey(self, namespace, key, old, new):
2015 def pushkey(self, namespace, key, old, new):
2016 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2016 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
2017 old=old, new=new)
2017 old=old, new=new)
2018 ret = pushkey.push(self, namespace, key, old, new)
2018 ret = pushkey.push(self, namespace, key, old, new)
2019 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2019 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
2020 ret=ret)
2020 ret=ret)
2021 return ret
2021 return ret
2022
2022
2023 def listkeys(self, namespace):
2023 def listkeys(self, namespace):
2024 self.hook('prelistkeys', throw=True, namespace=namespace)
2024 self.hook('prelistkeys', throw=True, namespace=namespace)
2025 values = pushkey.list(self, namespace)
2025 values = pushkey.list(self, namespace)
2026 self.hook('listkeys', namespace=namespace, values=values)
2026 self.hook('listkeys', namespace=namespace, values=values)
2027 return values
2027 return values
2028
2028
2029 def debugwireargs(self, one, two, three=None, four=None, five=None):
2029 def debugwireargs(self, one, two, three=None, four=None, five=None):
2030 '''used to test argument passing over the wire'''
2030 '''used to test argument passing over the wire'''
2031 return "%s %s %s %s %s" % (one, two, three, four, five)
2031 return "%s %s %s %s %s" % (one, two, three, four, five)
2032
2032
2033 def savecommitmessage(self, text):
2033 def savecommitmessage(self, text):
2034 fp = self.opener('last-message.txt', 'wb')
2034 fp = self.opener('last-message.txt', 'wb')
2035 try:
2035 try:
2036 fp.write(text)
2036 fp.write(text)
2037 finally:
2037 finally:
2038 fp.close()
2038 fp.close()
2039 return self.pathto(fp.name[len(self.root)+1:])
2039 return self.pathto(fp.name[len(self.root)+1:])
2040
2040
2041 # used to avoid circular references so destructors work
2041 # used to avoid circular references so destructors work
2042 def aftertrans(files):
2042 def aftertrans(files):
2043 renamefiles = [tuple(t) for t in files]
2043 renamefiles = [tuple(t) for t in files]
2044 def a():
2044 def a():
2045 for src, dest in renamefiles:
2045 for src, dest in renamefiles:
2046 util.rename(src, dest)
2046 util.rename(src, dest)
2047 return a
2047 return a
2048
2048
2049 def undoname(fn):
2049 def undoname(fn):
2050 base, name = os.path.split(fn)
2050 base, name = os.path.split(fn)
2051 assert name.startswith('journal')
2051 assert name.startswith('journal')
2052 return os.path.join(base, name.replace('journal', 'undo', 1))
2052 return os.path.join(base, name.replace('journal', 'undo', 1))
2053
2053
2054 def instance(ui, path, create):
2054 def instance(ui, path, create):
2055 return localrepository(ui, util.urllocalpath(path), create)
2055 return localrepository(ui, util.urllocalpath(path), create)
2056
2056
2057 def islocal(path):
2057 def islocal(path):
2058 return True
2058 return True
@@ -1,1279 +1,1279 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 # import stuff from node for others to import from revlog
14 # import stuff from node for others to import from revlog
15 from node import bin, hex, nullid, nullrev
15 from node import bin, hex, nullid, nullrev
16 from i18n import _
16 from i18n import _
17 import ancestor, mdiff, parsers, error, util, dagutil
17 import ancestor, mdiff, parsers, error, util, dagutil
18 import struct, zlib, errno
18 import struct, zlib, errno
19
19
20 _pack = struct.pack
20 _pack = struct.pack
21 _unpack = struct.unpack
21 _unpack = struct.unpack
22 _compress = zlib.compress
22 _compress = zlib.compress
23 _decompress = zlib.decompress
23 _decompress = zlib.decompress
24 _sha = util.sha1
24 _sha = util.sha1
25
25
26 # revlog header flags
26 # revlog header flags
27 REVLOGV0 = 0
27 REVLOGV0 = 0
28 REVLOGNG = 1
28 REVLOGNG = 1
29 REVLOGNGINLINEDATA = (1 << 16)
29 REVLOGNGINLINEDATA = (1 << 16)
30 REVLOGGENERALDELTA = (1 << 17)
30 REVLOGGENERALDELTA = (1 << 17)
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35
35
36 # revlog index flags
36 # revlog index flags
37 REVIDX_KNOWN_FLAGS = 0
37 REVIDX_KNOWN_FLAGS = 0
38
38
39 # max size of revlog with inline data
39 # max size of revlog with inline data
40 _maxinline = 131072
40 _maxinline = 131072
41 _chunksize = 1048576
41 _chunksize = 1048576
42
42
43 RevlogError = error.RevlogError
43 RevlogError = error.RevlogError
44 LookupError = error.LookupError
44 LookupError = error.LookupError
45
45
46 def getoffset(q):
46 def getoffset(q):
47 return int(q >> 16)
47 return int(q >> 16)
48
48
49 def gettype(q):
49 def gettype(q):
50 return int(q & 0xFFFF)
50 return int(q & 0xFFFF)
51
51
52 def offset_type(offset, type):
52 def offset_type(offset, type):
53 return long(long(offset) << 16 | type)
53 return long(long(offset) << 16 | type)
54
54
55 nullhash = _sha(nullid)
55 nullhash = _sha(nullid)
56
56
57 def hash(text, p1, p2):
57 def hash(text, p1, p2):
58 """generate a hash from the given text and its parent hashes
58 """generate a hash from the given text and its parent hashes
59
59
60 This hash combines both the current file contents and its history
60 This hash combines both the current file contents and its history
61 in a manner that makes it easy to distinguish nodes with the same
61 in a manner that makes it easy to distinguish nodes with the same
62 content in the revision graph.
62 content in the revision graph.
63 """
63 """
64 # As of now, if one of the parent node is null, p2 is null
64 # As of now, if one of the parent node is null, p2 is null
65 if p2 == nullid:
65 if p2 == nullid:
66 # deep copy of a hash is faster than creating one
66 # deep copy of a hash is faster than creating one
67 s = nullhash.copy()
67 s = nullhash.copy()
68 s.update(p1)
68 s.update(p1)
69 else:
69 else:
70 # none of the parent nodes are nullid
70 # none of the parent nodes are nullid
71 l = [p1, p2]
71 l = [p1, p2]
72 l.sort()
72 l.sort()
73 s = _sha(l[0])
73 s = _sha(l[0])
74 s.update(l[1])
74 s.update(l[1])
75 s.update(text)
75 s.update(text)
76 return s.digest()
76 return s.digest()
77
77
78 def compress(text):
78 def compress(text):
79 """ generate a possibly-compressed representation of text """
79 """ generate a possibly-compressed representation of text """
80 if not text:
80 if not text:
81 return ("", text)
81 return ("", text)
82 l = len(text)
82 l = len(text)
83 bin = None
83 bin = None
84 if l < 44:
84 if l < 44:
85 pass
85 pass
86 elif l > 1000000:
86 elif l > 1000000:
87 # zlib makes an internal copy, thus doubling memory usage for
87 # zlib makes an internal copy, thus doubling memory usage for
88 # large files, so lets do this in pieces
88 # large files, so lets do this in pieces
89 z = zlib.compressobj()
89 z = zlib.compressobj()
90 p = []
90 p = []
91 pos = 0
91 pos = 0
92 while pos < l:
92 while pos < l:
93 pos2 = pos + 2**20
93 pos2 = pos + 2**20
94 p.append(z.compress(text[pos:pos2]))
94 p.append(z.compress(text[pos:pos2]))
95 pos = pos2
95 pos = pos2
96 p.append(z.flush())
96 p.append(z.flush())
97 if sum(map(len, p)) < l:
97 if sum(map(len, p)) < l:
98 bin = "".join(p)
98 bin = "".join(p)
99 else:
99 else:
100 bin = _compress(text)
100 bin = _compress(text)
101 if bin is None or len(bin) > l:
101 if bin is None or len(bin) > l:
102 if text[0] == '\0':
102 if text[0] == '\0':
103 return ("", text)
103 return ("", text)
104 return ('u', text)
104 return ('u', text)
105 return ("", bin)
105 return ("", bin)
106
106
107 def decompress(bin):
107 def decompress(bin):
108 """ decompress the given input """
108 """ decompress the given input """
109 if not bin:
109 if not bin:
110 return bin
110 return bin
111 t = bin[0]
111 t = bin[0]
112 if t == '\0':
112 if t == '\0':
113 return bin
113 return bin
114 if t == 'x':
114 if t == 'x':
115 return _decompress(bin)
115 return _decompress(bin)
116 if t == 'u':
116 if t == 'u':
117 return bin[1:]
117 return bin[1:]
118 raise RevlogError(_("unknown compression type %r") % t)
118 raise RevlogError(_("unknown compression type %r") % t)
119
119
120 indexformatv0 = ">4l20s20s20s"
120 indexformatv0 = ">4l20s20s20s"
121 v0shaoffset = 56
121 v0shaoffset = 56
122
122
123 class revlogoldio(object):
123 class revlogoldio(object):
124 def __init__(self):
124 def __init__(self):
125 self.size = struct.calcsize(indexformatv0)
125 self.size = struct.calcsize(indexformatv0)
126
126
127 def parseindex(self, data, inline):
127 def parseindex(self, data, inline):
128 s = self.size
128 s = self.size
129 index = []
129 index = []
130 nodemap = {nullid: nullrev}
130 nodemap = {nullid: nullrev}
131 n = off = 0
131 n = off = 0
132 l = len(data)
132 l = len(data)
133 while off + s <= l:
133 while off + s <= l:
134 cur = data[off:off + s]
134 cur = data[off:off + s]
135 off += s
135 off += s
136 e = _unpack(indexformatv0, cur)
136 e = _unpack(indexformatv0, cur)
137 # transform to revlogv1 format
137 # transform to revlogv1 format
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 index.append(e2)
140 index.append(e2)
141 nodemap[e[6]] = n
141 nodemap[e[6]] = n
142 n += 1
142 n += 1
143
143
144 # add the magic null revision at -1
144 # add the magic null revision at -1
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146
146
147 return index, nodemap, None
147 return index, nodemap, None
148
148
149 def packentry(self, entry, node, version, rev):
149 def packentry(self, entry, node, version, rev):
150 if gettype(entry[0]):
150 if gettype(entry[0]):
151 raise RevlogError(_("index entry flags need RevlogNG"))
151 raise RevlogError(_("index entry flags need RevlogNG"))
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 node(entry[5]), node(entry[6]), entry[7])
153 node(entry[5]), node(entry[6]), entry[7])
154 return _pack(indexformatv0, *e2)
154 return _pack(indexformatv0, *e2)
155
155
156 # index ng:
156 # index ng:
157 # 6 bytes: offset
157 # 6 bytes: offset
158 # 2 bytes: flags
158 # 2 bytes: flags
159 # 4 bytes: compressed length
159 # 4 bytes: compressed length
160 # 4 bytes: uncompressed length
160 # 4 bytes: uncompressed length
161 # 4 bytes: base rev
161 # 4 bytes: base rev
162 # 4 bytes: link rev
162 # 4 bytes: link rev
163 # 4 bytes: parent 1 rev
163 # 4 bytes: parent 1 rev
164 # 4 bytes: parent 2 rev
164 # 4 bytes: parent 2 rev
165 # 32 bytes: nodeid
165 # 32 bytes: nodeid
166 indexformatng = ">Qiiiiii20s12x"
166 indexformatng = ">Qiiiiii20s12x"
167 ngshaoffset = 32
167 ngshaoffset = 32
168 versionformat = ">I"
168 versionformat = ">I"
169
169
170 class revlogio(object):
170 class revlogio(object):
171 def __init__(self):
171 def __init__(self):
172 self.size = struct.calcsize(indexformatng)
172 self.size = struct.calcsize(indexformatng)
173
173
174 def parseindex(self, data, inline):
174 def parseindex(self, data, inline):
175 # call the C implementation to parse the index data
175 # call the C implementation to parse the index data
176 index, cache = parsers.parse_index2(data, inline)
176 index, cache = parsers.parse_index2(data, inline)
177 return index, None, cache
177 return index, None, cache
178
178
179 def packentry(self, entry, node, version, rev):
179 def packentry(self, entry, node, version, rev):
180 p = _pack(indexformatng, *entry)
180 p = _pack(indexformatng, *entry)
181 if rev == 0:
181 if rev == 0:
182 p = _pack(versionformat, version) + p[4:]
182 p = _pack(versionformat, version) + p[4:]
183 return p
183 return p
184
184
185 class revlog(object):
185 class revlog(object):
186 """
186 """
187 the underlying revision storage object
187 the underlying revision storage object
188
188
189 A revlog consists of two parts, an index and the revision data.
189 A revlog consists of two parts, an index and the revision data.
190
190
191 The index is a file with a fixed record size containing
191 The index is a file with a fixed record size containing
192 information on each revision, including its nodeid (hash), the
192 information on each revision, including its nodeid (hash), the
193 nodeids of its parents, the position and offset of its data within
193 nodeids of its parents, the position and offset of its data within
194 the data file, and the revision it's based on. Finally, each entry
194 the data file, and the revision it's based on. Finally, each entry
195 contains a linkrev entry that can serve as a pointer to external
195 contains a linkrev entry that can serve as a pointer to external
196 data.
196 data.
197
197
198 The revision data itself is a linear collection of data chunks.
198 The revision data itself is a linear collection of data chunks.
199 Each chunk represents a revision and is usually represented as a
199 Each chunk represents a revision and is usually represented as a
200 delta against the previous chunk. To bound lookup time, runs of
200 delta against the previous chunk. To bound lookup time, runs of
201 deltas are limited to about 2 times the length of the original
201 deltas are limited to about 2 times the length of the original
202 version data. This makes retrieval of a version proportional to
202 version data. This makes retrieval of a version proportional to
203 its size, or O(1) relative to the number of revisions.
203 its size, or O(1) relative to the number of revisions.
204
204
205 Both pieces of the revlog are written to in an append-only
205 Both pieces of the revlog are written to in an append-only
206 fashion, which means we never need to rewrite a file to insert or
206 fashion, which means we never need to rewrite a file to insert or
207 remove data, and can use some simple techniques to avoid the need
207 remove data, and can use some simple techniques to avoid the need
208 for locking while reading.
208 for locking while reading.
209 """
209 """
210 def __init__(self, opener, indexfile):
210 def __init__(self, opener, indexfile):
211 """
211 """
212 create a revlog object
212 create a revlog object
213
213
214 opener is a function that abstracts the file opening operation
214 opener is a function that abstracts the file opening operation
215 and can be used to implement COW semantics or the like.
215 and can be used to implement COW semantics or the like.
216 """
216 """
217 self.indexfile = indexfile
217 self.indexfile = indexfile
218 self.datafile = indexfile[:-2] + ".d"
218 self.datafile = indexfile[:-2] + ".d"
219 self.opener = opener
219 self.opener = opener
220 self._cache = None
220 self._cache = None
221 self._basecache = (0, 0)
221 self._basecache = (0, 0)
222 self._chunkcache = (0, '')
222 self._chunkcache = (0, '')
223 self.index = []
223 self.index = []
224 self._pcache = {}
224 self._pcache = {}
225 self._nodecache = {nullid: nullrev}
225 self._nodecache = {nullid: nullrev}
226 self._nodepos = None
226 self._nodepos = None
227
227
228 v = REVLOG_DEFAULT_VERSION
228 v = REVLOG_DEFAULT_VERSION
229 opts = getattr(opener, 'options', None)
229 opts = getattr(opener, 'options', None)
230 if opts is not None:
230 if opts is not None:
231 if 'revlogv1' in opts:
231 if 'revlogv1' in opts:
232 if 'generaldelta' in opts:
232 if 'generaldelta' in opts:
233 v |= REVLOGGENERALDELTA
233 v |= REVLOGGENERALDELTA
234 else:
234 else:
235 v = 0
235 v = 0
236
236
237 i = ''
237 i = ''
238 self._initempty = True
238 self._initempty = True
239 try:
239 try:
240 f = self.opener(self.indexfile)
240 f = self.opener(self.indexfile)
241 i = f.read()
241 i = f.read()
242 f.close()
242 f.close()
243 if len(i) > 0:
243 if len(i) > 0:
244 v = struct.unpack(versionformat, i[:4])[0]
244 v = struct.unpack(versionformat, i[:4])[0]
245 self._initempty = False
245 self._initempty = False
246 except IOError, inst:
246 except IOError, inst:
247 if inst.errno != errno.ENOENT:
247 if inst.errno != errno.ENOENT:
248 raise
248 raise
249
249
250 self.version = v
250 self.version = v
251 self._inline = v & REVLOGNGINLINEDATA
251 self._inline = v & REVLOGNGINLINEDATA
252 self._generaldelta = v & REVLOGGENERALDELTA
252 self._generaldelta = v & REVLOGGENERALDELTA
253 flags = v & ~0xFFFF
253 flags = v & ~0xFFFF
254 fmt = v & 0xFFFF
254 fmt = v & 0xFFFF
255 if fmt == REVLOGV0 and flags:
255 if fmt == REVLOGV0 and flags:
256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 raise RevlogError(_("index %s unknown flags %#04x for format v0")
257 % (self.indexfile, flags >> 16))
257 % (self.indexfile, flags >> 16))
258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
260 % (self.indexfile, flags >> 16))
260 % (self.indexfile, flags >> 16))
261 elif fmt > REVLOGNG:
261 elif fmt > REVLOGNG:
262 raise RevlogError(_("index %s unknown format %d")
262 raise RevlogError(_("index %s unknown format %d")
263 % (self.indexfile, fmt))
263 % (self.indexfile, fmt))
264
264
265 self._io = revlogio()
265 self._io = revlogio()
266 if self.version == REVLOGV0:
266 if self.version == REVLOGV0:
267 self._io = revlogoldio()
267 self._io = revlogoldio()
268 try:
268 try:
269 d = self._io.parseindex(i, self._inline)
269 d = self._io.parseindex(i, self._inline)
270 except (ValueError, IndexError):
270 except (ValueError, IndexError):
271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
272 self.index, nodemap, self._chunkcache = d
272 self.index, nodemap, self._chunkcache = d
273 if nodemap is not None:
273 if nodemap is not None:
274 self.nodemap = self._nodecache = nodemap
274 self.nodemap = self._nodecache = nodemap
275 if not self._chunkcache:
275 if not self._chunkcache:
276 self._chunkclear()
276 self._chunkclear()
277
277
278 def tip(self):
278 def tip(self):
279 return self.node(len(self.index) - 2)
279 return self.node(len(self.index) - 2)
280 def __len__(self):
280 def __len__(self):
281 return len(self.index) - 1
281 return len(self.index) - 1
282 def __iter__(self):
282 def __iter__(self):
283 for i in xrange(len(self)):
283 for i in xrange(len(self)):
284 yield i
284 yield i
285
285
286 @util.propertycache
286 @util.propertycache
287 def nodemap(self):
287 def nodemap(self):
288 self.rev(self.node(0))
288 self.rev(self.node(0))
289 return self._nodecache
289 return self._nodecache
290
290
291 def rev(self, node):
291 def rev(self, node):
292 try:
292 try:
293 return self._nodecache[node]
293 return self._nodecache[node]
294 except KeyError:
294 except KeyError:
295 n = self._nodecache
295 n = self._nodecache
296 i = self.index
296 i = self.index
297 p = self._nodepos
297 p = self._nodepos
298 if p is None:
298 if p is None:
299 p = len(i) - 2
299 p = len(i) - 2
300 for r in xrange(p, -1, -1):
300 for r in xrange(p, -1, -1):
301 v = i[r][7]
301 v = i[r][7]
302 n[v] = r
302 n[v] = r
303 if v == node:
303 if v == node:
304 self._nodepos = r - 1
304 self._nodepos = r - 1
305 return r
305 return r
306 raise LookupError(node, self.indexfile, _('no node'))
306 raise LookupError(node, self.indexfile, _('no node'))
307
307
308 def node(self, rev):
308 def node(self, rev):
309 return self.index[rev][7]
309 return self.index[rev][7]
310 def linkrev(self, rev):
310 def linkrev(self, rev):
311 return self.index[rev][4]
311 return self.index[rev][4]
312 def parents(self, node):
312 def parents(self, node):
313 i = self.index
313 i = self.index
314 d = i[self.rev(node)]
314 d = i[self.rev(node)]
315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
316 def parentrevs(self, rev):
316 def parentrevs(self, rev):
317 return self.index[rev][5:7]
317 return self.index[rev][5:7]
318 def start(self, rev):
318 def start(self, rev):
319 return int(self.index[rev][0] >> 16)
319 return int(self.index[rev][0] >> 16)
320 def end(self, rev):
320 def end(self, rev):
321 return self.start(rev) + self.length(rev)
321 return self.start(rev) + self.length(rev)
322 def length(self, rev):
322 def length(self, rev):
323 return self.index[rev][1]
323 return self.index[rev][1]
324 def chainbase(self, rev):
324 def chainbase(self, rev):
325 index = self.index
325 index = self.index
326 base = index[rev][3]
326 base = index[rev][3]
327 while base != rev:
327 while base != rev:
328 rev = base
328 rev = base
329 base = index[rev][3]
329 base = index[rev][3]
330 return base
330 return base
331 def flags(self, rev):
331 def flags(self, rev):
332 return self.index[rev][0] & 0xFFFF
332 return self.index[rev][0] & 0xFFFF
333 def rawsize(self, rev):
333 def rawsize(self, rev):
334 """return the length of the uncompressed text for a given revision"""
334 """return the length of the uncompressed text for a given revision"""
335 l = self.index[rev][2]
335 l = self.index[rev][2]
336 if l >= 0:
336 if l >= 0:
337 return l
337 return l
338
338
339 t = self.revision(self.node(rev))
339 t = self.revision(self.node(rev))
340 return len(t)
340 return len(t)
341 size = rawsize
341 size = rawsize
342
342
343 def reachable(self, node, stop=None):
343 def reachable(self, node, stop=None):
344 """return the set of all nodes ancestral to a given node, including
344 """return the set of all nodes ancestral to a given node, including
345 the node itself, stopping when stop is matched"""
345 the node itself, stopping when stop is matched"""
346 reachable = set((node,))
346 reachable = set((node,))
347 visit = [node]
347 visit = [node]
348 if stop:
348 if stop:
349 stopn = self.rev(stop)
349 stopn = self.rev(stop)
350 else:
350 else:
351 stopn = 0
351 stopn = 0
352 while visit:
352 while visit:
353 n = visit.pop(0)
353 n = visit.pop(0)
354 if n == stop:
354 if n == stop:
355 continue
355 continue
356 if n == nullid:
356 if n == nullid:
357 continue
357 continue
358 for p in self.parents(n):
358 for p in self.parents(n):
359 if self.rev(p) < stopn:
359 if self.rev(p) < stopn:
360 continue
360 continue
361 if p not in reachable:
361 if p not in reachable:
362 reachable.add(p)
362 reachable.add(p)
363 visit.append(p)
363 visit.append(p)
364 return reachable
364 return reachable
365
365
366 def ancestors(self, *revs):
366 def ancestors(self, *revs):
367 """Generate the ancestors of 'revs' in reverse topological order.
367 """Generate the ancestors of 'revs' in reverse topological order.
368
368
369 Yield a sequence of revision numbers starting with the parents
369 Yield a sequence of revision numbers starting with the parents
370 of each revision in revs, i.e., each revision is *not* considered
370 of each revision in revs, i.e., each revision is *not* considered
371 an ancestor of itself. Results are in breadth-first order:
371 an ancestor of itself. Results are in breadth-first order:
372 parents of each rev in revs, then parents of those, etc. Result
372 parents of each rev in revs, then parents of those, etc. Result
373 does not include the null revision."""
373 does not include the null revision."""
374 visit = list(revs)
374 visit = list(revs)
375 seen = set([nullrev])
375 seen = set([nullrev])
376 while visit:
376 while visit:
377 for parent in self.parentrevs(visit.pop(0)):
377 for parent in self.parentrevs(visit.pop(0)):
378 if parent not in seen:
378 if parent not in seen:
379 visit.append(parent)
379 visit.append(parent)
380 seen.add(parent)
380 seen.add(parent)
381 yield parent
381 yield parent
382
382
383 def descendants(self, *revs):
383 def descendants(self, *revs):
384 """Generate the descendants of 'revs' in revision order.
384 """Generate the descendants of 'revs' in revision order.
385
385
386 Yield a sequence of revision numbers starting with a child of
386 Yield a sequence of revision numbers starting with a child of
387 some rev in revs, i.e., each revision is *not* considered a
387 some rev in revs, i.e., each revision is *not* considered a
388 descendant of itself. Results are ordered by revision number (a
388 descendant of itself. Results are ordered by revision number (a
389 topological sort)."""
389 topological sort)."""
390 first = min(revs)
390 first = min(revs)
391 if first == nullrev:
391 if first == nullrev:
392 for i in self:
392 for i in self:
393 yield i
393 yield i
394 return
394 return
395
395
396 seen = set(revs)
396 seen = set(revs)
397 for i in xrange(first + 1, len(self)):
397 for i in xrange(first + 1, len(self)):
398 for x in self.parentrevs(i):
398 for x in self.parentrevs(i):
399 if x != nullrev and x in seen:
399 if x != nullrev and x in seen:
400 seen.add(i)
400 seen.add(i)
401 yield i
401 yield i
402 break
402 break
403
403
404 def findcommonmissing(self, common=None, heads=None):
404 def findcommonmissing(self, common=None, heads=None):
405 """Return a tuple of the ancestors of common and the ancestors of heads
405 """Return a tuple of the ancestors of common and the ancestors of heads
406 that are not ancestors of common.
406 that are not ancestors of common.
407
407
408 More specifically, the second element is a list of nodes N such that
408 More specifically, the second element is a list of nodes N such that
409 every N satisfies the following constraints:
409 every N satisfies the following constraints:
410
410
411 1. N is an ancestor of some node in 'heads'
411 1. N is an ancestor of some node in 'heads'
412 2. N is not an ancestor of any node in 'common'
412 2. N is not an ancestor of any node in 'common'
413
413
414 The list is sorted by revision number, meaning it is
414 The list is sorted by revision number, meaning it is
415 topologically sorted.
415 topologically sorted.
416
416
417 'heads' and 'common' are both lists of node IDs. If heads is
417 'heads' and 'common' are both lists of node IDs. If heads is
418 not supplied, uses all of the revlog's heads. If common is not
418 not supplied, uses all of the revlog's heads. If common is not
419 supplied, uses nullid."""
419 supplied, uses nullid."""
420 if common is None:
420 if common is None:
421 common = [nullid]
421 common = [nullid]
422 if heads is None:
422 if heads is None:
423 heads = self.heads()
423 heads = self.heads()
424
424
425 common = [self.rev(n) for n in common]
425 common = [self.rev(n) for n in common]
426 heads = [self.rev(n) for n in heads]
426 heads = [self.rev(n) for n in heads]
427
427
428 # we want the ancestors, but inclusive
428 # we want the ancestors, but inclusive
429 has = set(self.ancestors(*common))
429 has = set(self.ancestors(*common))
430 has.add(nullrev)
430 has.add(nullrev)
431 has.update(common)
431 has.update(common)
432
432
433 # take all ancestors from heads that aren't in has
433 # take all ancestors from heads that aren't in has
434 missing = set()
434 missing = set()
435 visit = [r for r in heads if r not in has]
435 visit = [r for r in heads if r not in has]
436 while visit:
436 while visit:
437 r = visit.pop(0)
437 r = visit.pop(0)
438 if r in missing:
438 if r in missing:
439 continue
439 continue
440 else:
440 else:
441 missing.add(r)
441 missing.add(r)
442 for p in self.parentrevs(r):
442 for p in self.parentrevs(r):
443 if p not in has:
443 if p not in has:
444 visit.append(p)
444 visit.append(p)
445 missing = list(missing)
445 missing = list(missing)
446 missing.sort()
446 missing.sort()
447 return has, [self.node(r) for r in missing]
447 return has, [self.node(r) for r in missing]
448
448
449 def findmissing(self, common=None, heads=None):
449 def findmissing(self, common=None, heads=None):
450 """Return the ancestors of heads that are not ancestors of common.
450 """Return the ancestors of heads that are not ancestors of common.
451
451
452 More specifically, return a list of nodes N such that every N
452 More specifically, return a list of nodes N such that every N
453 satisfies the following constraints:
453 satisfies the following constraints:
454
454
455 1. N is an ancestor of some node in 'heads'
455 1. N is an ancestor of some node in 'heads'
456 2. N is not an ancestor of any node in 'common'
456 2. N is not an ancestor of any node in 'common'
457
457
458 The list is sorted by revision number, meaning it is
458 The list is sorted by revision number, meaning it is
459 topologically sorted.
459 topologically sorted.
460
460
461 'heads' and 'common' are both lists of node IDs. If heads is
461 'heads' and 'common' are both lists of node IDs. If heads is
462 not supplied, uses all of the revlog's heads. If common is not
462 not supplied, uses all of the revlog's heads. If common is not
463 supplied, uses nullid."""
463 supplied, uses nullid."""
464 _common, missing = self.findcommonmissing(common, heads)
464 _common, missing = self.findcommonmissing(common, heads)
465 return missing
465 return missing
466
466
467 def nodesbetween(self, roots=None, heads=None):
467 def nodesbetween(self, roots=None, heads=None):
468 """Return a topological path from 'roots' to 'heads'.
468 """Return a topological path from 'roots' to 'heads'.
469
469
470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
470 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
471 topologically sorted list of all nodes N that satisfy both of
471 topologically sorted list of all nodes N that satisfy both of
472 these constraints:
472 these constraints:
473
473
474 1. N is a descendant of some node in 'roots'
474 1. N is a descendant of some node in 'roots'
475 2. N is an ancestor of some node in 'heads'
475 2. N is an ancestor of some node in 'heads'
476
476
477 Every node is considered to be both a descendant and an ancestor
477 Every node is considered to be both a descendant and an ancestor
478 of itself, so every reachable node in 'roots' and 'heads' will be
478 of itself, so every reachable node in 'roots' and 'heads' will be
479 included in 'nodes'.
479 included in 'nodes'.
480
480
481 'outroots' is the list of reachable nodes in 'roots', i.e., the
481 'outroots' is the list of reachable nodes in 'roots', i.e., the
482 subset of 'roots' that is returned in 'nodes'. Likewise,
482 subset of 'roots' that is returned in 'nodes'. Likewise,
483 'outheads' is the subset of 'heads' that is also in 'nodes'.
483 'outheads' is the subset of 'heads' that is also in 'nodes'.
484
484
485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
485 'roots' and 'heads' are both lists of node IDs. If 'roots' is
486 unspecified, uses nullid as the only root. If 'heads' is
486 unspecified, uses nullid as the only root. If 'heads' is
487 unspecified, uses list of all of the revlog's heads."""
487 unspecified, uses list of all of the revlog's heads."""
488 nonodes = ([], [], [])
488 nonodes = ([], [], [])
489 if roots is not None:
489 if roots is not None:
490 roots = list(roots)
490 roots = list(roots)
491 if not roots:
491 if not roots:
492 return nonodes
492 return nonodes
493 lowestrev = min([self.rev(n) for n in roots])
493 lowestrev = min([self.rev(n) for n in roots])
494 else:
494 else:
495 roots = [nullid] # Everybody's a descendant of nullid
495 roots = [nullid] # Everybody's a descendant of nullid
496 lowestrev = nullrev
496 lowestrev = nullrev
497 if (lowestrev == nullrev) and (heads is None):
497 if (lowestrev == nullrev) and (heads is None):
498 # We want _all_ the nodes!
498 # We want _all_ the nodes!
499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
499 return ([self.node(r) for r in self], [nullid], list(self.heads()))
500 if heads is None:
500 if heads is None:
501 # All nodes are ancestors, so the latest ancestor is the last
501 # All nodes are ancestors, so the latest ancestor is the last
502 # node.
502 # node.
503 highestrev = len(self) - 1
503 highestrev = len(self) - 1
504 # Set ancestors to None to signal that every node is an ancestor.
504 # Set ancestors to None to signal that every node is an ancestor.
505 ancestors = None
505 ancestors = None
506 # Set heads to an empty dictionary for later discovery of heads
506 # Set heads to an empty dictionary for later discovery of heads
507 heads = {}
507 heads = {}
508 else:
508 else:
509 heads = list(heads)
509 heads = list(heads)
510 if not heads:
510 if not heads:
511 return nonodes
511 return nonodes
512 ancestors = set()
512 ancestors = set()
513 # Turn heads into a dictionary so we can remove 'fake' heads.
513 # Turn heads into a dictionary so we can remove 'fake' heads.
514 # Also, later we will be using it to filter out the heads we can't
514 # Also, later we will be using it to filter out the heads we can't
515 # find from roots.
515 # find from roots.
516 heads = dict.fromkeys(heads, False)
516 heads = dict.fromkeys(heads, False)
517 # Start at the top and keep marking parents until we're done.
517 # Start at the top and keep marking parents until we're done.
518 nodestotag = set(heads)
518 nodestotag = set(heads)
519 # Remember where the top was so we can use it as a limit later.
519 # Remember where the top was so we can use it as a limit later.
520 highestrev = max([self.rev(n) for n in nodestotag])
520 highestrev = max([self.rev(n) for n in nodestotag])
521 while nodestotag:
521 while nodestotag:
522 # grab a node to tag
522 # grab a node to tag
523 n = nodestotag.pop()
523 n = nodestotag.pop()
524 # Never tag nullid
524 # Never tag nullid
525 if n == nullid:
525 if n == nullid:
526 continue
526 continue
527 # A node's revision number represents its place in a
527 # A node's revision number represents its place in a
528 # topologically sorted list of nodes.
528 # topologically sorted list of nodes.
529 r = self.rev(n)
529 r = self.rev(n)
530 if r >= lowestrev:
530 if r >= lowestrev:
531 if n not in ancestors:
531 if n not in ancestors:
532 # If we are possibly a descendant of one of the roots
532 # If we are possibly a descendant of one of the roots
533 # and we haven't already been marked as an ancestor
533 # and we haven't already been marked as an ancestor
534 ancestors.add(n) # Mark as ancestor
534 ancestors.add(n) # Mark as ancestor
535 # Add non-nullid parents to list of nodes to tag.
535 # Add non-nullid parents to list of nodes to tag.
536 nodestotag.update([p for p in self.parents(n) if
536 nodestotag.update([p for p in self.parents(n) if
537 p != nullid])
537 p != nullid])
538 elif n in heads: # We've seen it before, is it a fake head?
538 elif n in heads: # We've seen it before, is it a fake head?
539 # So it is, real heads should not be the ancestors of
539 # So it is, real heads should not be the ancestors of
540 # any other heads.
540 # any other heads.
541 heads.pop(n)
541 heads.pop(n)
542 if not ancestors:
542 if not ancestors:
543 return nonodes
543 return nonodes
544 # Now that we have our set of ancestors, we want to remove any
544 # Now that we have our set of ancestors, we want to remove any
545 # roots that are not ancestors.
545 # roots that are not ancestors.
546
546
547 # If one of the roots was nullid, everything is included anyway.
547 # If one of the roots was nullid, everything is included anyway.
548 if lowestrev > nullrev:
548 if lowestrev > nullrev:
549 # But, since we weren't, let's recompute the lowest rev to not
549 # But, since we weren't, let's recompute the lowest rev to not
550 # include roots that aren't ancestors.
550 # include roots that aren't ancestors.
551
551
552 # Filter out roots that aren't ancestors of heads
552 # Filter out roots that aren't ancestors of heads
553 roots = [n for n in roots if n in ancestors]
553 roots = [n for n in roots if n in ancestors]
554 # Recompute the lowest revision
554 # Recompute the lowest revision
555 if roots:
555 if roots:
556 lowestrev = min([self.rev(n) for n in roots])
556 lowestrev = min([self.rev(n) for n in roots])
557 else:
557 else:
558 # No more roots? Return empty list
558 # No more roots? Return empty list
559 return nonodes
559 return nonodes
560 else:
560 else:
561 # We are descending from nullid, and don't need to care about
561 # We are descending from nullid, and don't need to care about
562 # any other roots.
562 # any other roots.
563 lowestrev = nullrev
563 lowestrev = nullrev
564 roots = [nullid]
564 roots = [nullid]
565 # Transform our roots list into a set.
565 # Transform our roots list into a set.
566 descendants = set(roots)
566 descendants = set(roots)
567 # Also, keep the original roots so we can filter out roots that aren't
567 # Also, keep the original roots so we can filter out roots that aren't
568 # 'real' roots (i.e. are descended from other roots).
568 # 'real' roots (i.e. are descended from other roots).
569 roots = descendants.copy()
569 roots = descendants.copy()
570 # Our topologically sorted list of output nodes.
570 # Our topologically sorted list of output nodes.
571 orderedout = []
571 orderedout = []
572 # Don't start at nullid since we don't want nullid in our output list,
572 # Don't start at nullid since we don't want nullid in our output list,
573 # and if nullid shows up in descedents, empty parents will look like
573 # and if nullid shows up in descedents, empty parents will look like
574 # they're descendants.
574 # they're descendants.
575 for r in xrange(max(lowestrev, 0), highestrev + 1):
575 for r in xrange(max(lowestrev, 0), highestrev + 1):
576 n = self.node(r)
576 n = self.node(r)
577 isdescendant = False
577 isdescendant = False
578 if lowestrev == nullrev: # Everybody is a descendant of nullid
578 if lowestrev == nullrev: # Everybody is a descendant of nullid
579 isdescendant = True
579 isdescendant = True
580 elif n in descendants:
580 elif n in descendants:
581 # n is already a descendant
581 # n is already a descendant
582 isdescendant = True
582 isdescendant = True
583 # This check only needs to be done here because all the roots
583 # This check only needs to be done here because all the roots
584 # will start being marked is descendants before the loop.
584 # will start being marked is descendants before the loop.
585 if n in roots:
585 if n in roots:
586 # If n was a root, check if it's a 'real' root.
586 # If n was a root, check if it's a 'real' root.
587 p = tuple(self.parents(n))
587 p = tuple(self.parents(n))
588 # If any of its parents are descendants, it's not a root.
588 # If any of its parents are descendants, it's not a root.
589 if (p[0] in descendants) or (p[1] in descendants):
589 if (p[0] in descendants) or (p[1] in descendants):
590 roots.remove(n)
590 roots.remove(n)
591 else:
591 else:
592 p = tuple(self.parents(n))
592 p = tuple(self.parents(n))
593 # A node is a descendant if either of its parents are
593 # A node is a descendant if either of its parents are
594 # descendants. (We seeded the dependents list with the roots
594 # descendants. (We seeded the dependents list with the roots
595 # up there, remember?)
595 # up there, remember?)
596 if (p[0] in descendants) or (p[1] in descendants):
596 if (p[0] in descendants) or (p[1] in descendants):
597 descendants.add(n)
597 descendants.add(n)
598 isdescendant = True
598 isdescendant = True
599 if isdescendant and ((ancestors is None) or (n in ancestors)):
599 if isdescendant and ((ancestors is None) or (n in ancestors)):
600 # Only include nodes that are both descendants and ancestors.
600 # Only include nodes that are both descendants and ancestors.
601 orderedout.append(n)
601 orderedout.append(n)
602 if (ancestors is not None) and (n in heads):
602 if (ancestors is not None) and (n in heads):
603 # We're trying to figure out which heads are reachable
603 # We're trying to figure out which heads are reachable
604 # from roots.
604 # from roots.
605 # Mark this head as having been reached
605 # Mark this head as having been reached
606 heads[n] = True
606 heads[n] = True
607 elif ancestors is None:
607 elif ancestors is None:
608 # Otherwise, we're trying to discover the heads.
608 # Otherwise, we're trying to discover the heads.
609 # Assume this is a head because if it isn't, the next step
609 # Assume this is a head because if it isn't, the next step
610 # will eventually remove it.
610 # will eventually remove it.
611 heads[n] = True
611 heads[n] = True
612 # But, obviously its parents aren't.
612 # But, obviously its parents aren't.
613 for p in self.parents(n):
613 for p in self.parents(n):
614 heads.pop(p, None)
614 heads.pop(p, None)
615 heads = [n for n, flag in heads.iteritems() if flag]
615 heads = [n for n, flag in heads.iteritems() if flag]
616 roots = list(roots)
616 roots = list(roots)
617 assert orderedout
617 assert orderedout
618 assert roots
618 assert roots
619 assert heads
619 assert heads
620 return (orderedout, roots, heads)
620 return (orderedout, roots, heads)
621
621
622 def headrevs(self):
622 def headrevs(self):
623 count = len(self)
623 count = len(self)
624 if not count:
624 if not count:
625 return [nullrev]
625 return [nullrev]
626 ishead = [1] * (count + 1)
626 ishead = [1] * (count + 1)
627 index = self.index
627 index = self.index
628 for r in xrange(count):
628 for r in xrange(count):
629 e = index[r]
629 e = index[r]
630 ishead[e[5]] = ishead[e[6]] = 0
630 ishead[e[5]] = ishead[e[6]] = 0
631 return [r for r in xrange(count) if ishead[r]]
631 return [r for r in xrange(count) if ishead[r]]
632
632
633 def heads(self, start=None, stop=None):
633 def heads(self, start=None, stop=None):
634 """return the list of all nodes that have no children
634 """return the list of all nodes that have no children
635
635
636 if start is specified, only heads that are descendants of
636 if start is specified, only heads that are descendants of
637 start will be returned
637 start will be returned
638 if stop is specified, it will consider all the revs from stop
638 if stop is specified, it will consider all the revs from stop
639 as if they had no children
639 as if they had no children
640 """
640 """
641 if start is None and stop is None:
641 if start is None and stop is None:
642 if not len(self):
642 if not len(self):
643 return [nullid]
643 return [nullid]
644 return [self.node(r) for r in self.headrevs()]
644 return [self.node(r) for r in self.headrevs()]
645
645
646 if start is None:
646 if start is None:
647 start = nullid
647 start = nullid
648 if stop is None:
648 if stop is None:
649 stop = []
649 stop = []
650 stoprevs = set([self.rev(n) for n in stop])
650 stoprevs = set([self.rev(n) for n in stop])
651 startrev = self.rev(start)
651 startrev = self.rev(start)
652 reachable = set((startrev,))
652 reachable = set((startrev,))
653 heads = set((startrev,))
653 heads = set((startrev,))
654
654
655 parentrevs = self.parentrevs
655 parentrevs = self.parentrevs
656 for r in xrange(startrev + 1, len(self)):
656 for r in xrange(startrev + 1, len(self)):
657 for p in parentrevs(r):
657 for p in parentrevs(r):
658 if p in reachable:
658 if p in reachable:
659 if r not in stoprevs:
659 if r not in stoprevs:
660 reachable.add(r)
660 reachable.add(r)
661 heads.add(r)
661 heads.add(r)
662 if p in heads and p not in stoprevs:
662 if p in heads and p not in stoprevs:
663 heads.remove(p)
663 heads.remove(p)
664
664
665 return [self.node(r) for r in heads]
665 return [self.node(r) for r in heads]
666
666
667 def children(self, node):
667 def children(self, node):
668 """find the children of a given node"""
668 """find the children of a given node"""
669 c = []
669 c = []
670 p = self.rev(node)
670 p = self.rev(node)
671 for r in range(p + 1, len(self)):
671 for r in range(p + 1, len(self)):
672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
672 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
673 if prevs:
673 if prevs:
674 for pr in prevs:
674 for pr in prevs:
675 if pr == p:
675 if pr == p:
676 c.append(self.node(r))
676 c.append(self.node(r))
677 elif p == nullrev:
677 elif p == nullrev:
678 c.append(self.node(r))
678 c.append(self.node(r))
679 return c
679 return c
680
680
681 def descendant(self, start, end):
681 def descendant(self, start, end):
682 if start == nullrev:
682 if start == nullrev:
683 return True
683 return True
684 for i in self.descendants(start):
684 for i in self.descendants(start):
685 if i == end:
685 if i == end:
686 return True
686 return True
687 elif i > end:
687 elif i > end:
688 break
688 break
689 return False
689 return False
690
690
691 def ancestor(self, a, b):
691 def ancestor(self, a, b):
692 """calculate the least common ancestor of nodes a and b"""
692 """calculate the least common ancestor of nodes a and b"""
693
693
694 # fast path, check if it is a descendant
694 # fast path, check if it is a descendant
695 a, b = self.rev(a), self.rev(b)
695 a, b = self.rev(a), self.rev(b)
696 start, end = sorted((a, b))
696 start, end = sorted((a, b))
697 if self.descendant(start, end):
697 if self.descendant(start, end):
698 return self.node(start)
698 return self.node(start)
699
699
700 def parents(rev):
700 def parents(rev):
701 return [p for p in self.parentrevs(rev) if p != nullrev]
701 return [p for p in self.parentrevs(rev) if p != nullrev]
702
702
703 c = ancestor.ancestor(a, b, parents)
703 c = ancestor.ancestor(a, b, parents)
704 if c is None:
704 if c is None:
705 return nullid
705 return nullid
706
706
707 return self.node(c)
707 return self.node(c)
708
708
709 def _match(self, id):
709 def _match(self, id):
710 if isinstance(id, (long, int)):
710 if isinstance(id, (long, int)):
711 # rev
711 # rev
712 return self.node(id)
712 return self.node(id)
713 if len(id) == 20:
713 if len(id) == 20:
714 # possibly a binary node
714 # possibly a binary node
715 # odds of a binary node being all hex in ASCII are 1 in 10**25
715 # odds of a binary node being all hex in ASCII are 1 in 10**25
716 try:
716 try:
717 node = id
717 node = id
718 self.rev(node) # quick search the index
718 self.rev(node) # quick search the index
719 return node
719 return node
720 except LookupError:
720 except LookupError:
721 pass # may be partial hex id
721 pass # may be partial hex id
722 try:
722 try:
723 # str(rev)
723 # str(rev)
724 rev = int(id)
724 rev = int(id)
725 if str(rev) != id:
725 if str(rev) != id:
726 raise ValueError
726 raise ValueError
727 if rev < 0:
727 if rev < 0:
728 rev = len(self) + rev
728 rev = len(self) + rev
729 if rev < 0 or rev >= len(self):
729 if rev < 0 or rev >= len(self):
730 raise ValueError
730 raise ValueError
731 return self.node(rev)
731 return self.node(rev)
732 except (ValueError, OverflowError):
732 except (ValueError, OverflowError):
733 pass
733 pass
734 if len(id) == 40:
734 if len(id) == 40:
735 try:
735 try:
736 # a full hex nodeid?
736 # a full hex nodeid?
737 node = bin(id)
737 node = bin(id)
738 self.rev(node)
738 self.rev(node)
739 return node
739 return node
740 except (TypeError, LookupError):
740 except (TypeError, LookupError):
741 pass
741 pass
742
742
743 def _partialmatch(self, id):
743 def _partialmatch(self, id):
744 if id in self._pcache:
744 if id in self._pcache:
745 return self._pcache[id]
745 return self._pcache[id]
746
746
747 if len(id) < 40:
747 if len(id) < 40:
748 try:
748 try:
749 # hex(node)[:...]
749 # hex(node)[:...]
750 l = len(id) // 2 # grab an even number of digits
750 l = len(id) // 2 # grab an even number of digits
751 prefix = bin(id[:l * 2])
751 prefix = bin(id[:l * 2])
752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
752 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
753 nl = [n for n in nl if hex(n).startswith(id)]
753 nl = [n for n in nl if hex(n).startswith(id)]
754 if len(nl) > 0:
754 if len(nl) > 0:
755 if len(nl) == 1:
755 if len(nl) == 1:
756 self._pcache[id] = nl[0]
756 self._pcache[id] = nl[0]
757 return nl[0]
757 return nl[0]
758 raise LookupError(id, self.indexfile,
758 raise LookupError(id, self.indexfile,
759 _('ambiguous identifier'))
759 _('ambiguous identifier'))
760 return None
760 return None
761 except TypeError:
761 except TypeError:
762 pass
762 pass
763
763
764 def lookup(self, id):
764 def lookup(self, id):
765 """locate a node based on:
765 """locate a node based on:
766 - revision number or str(revision number)
766 - revision number or str(revision number)
767 - nodeid or subset of hex nodeid
767 - nodeid or subset of hex nodeid
768 """
768 """
769 n = self._match(id)
769 n = self._match(id)
770 if n is not None:
770 if n is not None:
771 return n
771 return n
772 n = self._partialmatch(id)
772 n = self._partialmatch(id)
773 if n:
773 if n:
774 return n
774 return n
775
775
776 raise LookupError(id, self.indexfile, _('no match found'))
776 raise LookupError(id, self.indexfile, _('no match found'))
777
777
778 def cmp(self, node, text):
778 def cmp(self, node, text):
779 """compare text with a given file revision
779 """compare text with a given file revision
780
780
781 returns True if text is different than what is stored.
781 returns True if text is different than what is stored.
782 """
782 """
783 p1, p2 = self.parents(node)
783 p1, p2 = self.parents(node)
784 return hash(text, p1, p2) != node
784 return hash(text, p1, p2) != node
785
785
786 def _addchunk(self, offset, data):
786 def _addchunk(self, offset, data):
787 o, d = self._chunkcache
787 o, d = self._chunkcache
788 # try to add to existing cache
788 # try to add to existing cache
789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
789 if o + len(d) == offset and len(d) + len(data) < _chunksize:
790 self._chunkcache = o, d + data
790 self._chunkcache = o, d + data
791 else:
791 else:
792 self._chunkcache = offset, data
792 self._chunkcache = offset, data
793
793
794 def _loadchunk(self, offset, length):
794 def _loadchunk(self, offset, length):
795 if self._inline:
795 if self._inline:
796 df = self.opener(self.indexfile)
796 df = self.opener(self.indexfile)
797 else:
797 else:
798 df = self.opener(self.datafile)
798 df = self.opener(self.datafile)
799
799
800 readahead = max(65536, length)
800 readahead = max(65536, length)
801 df.seek(offset)
801 df.seek(offset)
802 d = df.read(readahead)
802 d = df.read(readahead)
803 self._addchunk(offset, d)
803 self._addchunk(offset, d)
804 if readahead > length:
804 if readahead > length:
805 return d[:length]
805 return d[:length]
806 return d
806 return d
807
807
808 def _getchunk(self, offset, length):
808 def _getchunk(self, offset, length):
809 o, d = self._chunkcache
809 o, d = self._chunkcache
810 l = len(d)
810 l = len(d)
811
811
812 # is it in the cache?
812 # is it in the cache?
813 cachestart = offset - o
813 cachestart = offset - o
814 cacheend = cachestart + length
814 cacheend = cachestart + length
815 if cachestart >= 0 and cacheend <= l:
815 if cachestart >= 0 and cacheend <= l:
816 if cachestart == 0 and cacheend == l:
816 if cachestart == 0 and cacheend == l:
817 return d # avoid a copy
817 return d # avoid a copy
818 return d[cachestart:cacheend]
818 return d[cachestart:cacheend]
819
819
820 return self._loadchunk(offset, length)
820 return self._loadchunk(offset, length)
821
821
822 def _chunkraw(self, startrev, endrev):
822 def _chunkraw(self, startrev, endrev):
823 start = self.start(startrev)
823 start = self.start(startrev)
824 length = self.end(endrev) - start
824 length = self.end(endrev) - start
825 if self._inline:
825 if self._inline:
826 start += (startrev + 1) * self._io.size
826 start += (startrev + 1) * self._io.size
827 return self._getchunk(start, length)
827 return self._getchunk(start, length)
828
828
829 def _chunk(self, rev):
829 def _chunk(self, rev):
830 return decompress(self._chunkraw(rev, rev))
830 return decompress(self._chunkraw(rev, rev))
831
831
832 def _chunkbase(self, rev):
832 def _chunkbase(self, rev):
833 return self._chunk(rev)
833 return self._chunk(rev)
834
834
835 def _chunkclear(self):
835 def _chunkclear(self):
836 self._chunkcache = (0, '')
836 self._chunkcache = (0, '')
837
837
838 def deltaparent(self, rev):
838 def deltaparent(self, rev):
839 """return deltaparent of the given revision"""
839 """return deltaparent of the given revision"""
840 base = self.index[rev][3]
840 base = self.index[rev][3]
841 if base == rev:
841 if base == rev:
842 return nullrev
842 return nullrev
843 elif self._generaldelta:
843 elif self._generaldelta:
844 return base
844 return base
845 else:
845 else:
846 return rev - 1
846 return rev - 1
847
847
848 def revdiff(self, rev1, rev2):
848 def revdiff(self, rev1, rev2):
849 """return or calculate a delta between two revisions"""
849 """return or calculate a delta between two revisions"""
850 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
850 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
851 return self._chunk(rev2)
851 return self._chunk(rev2)
852
852
853 return mdiff.textdiff(self.revision(self.node(rev1)),
853 return mdiff.textdiff(self.revision(self.node(rev1)),
854 self.revision(self.node(rev2)))
854 self.revision(self.node(rev2)))
855
855
856 def revision(self, node):
856 def revision(self, node):
857 """return an uncompressed revision of a given node"""
857 """return an uncompressed revision of a given node"""
858 cachedrev = None
858 cachedrev = None
859 if node == nullid:
859 if node == nullid:
860 return ""
860 return ""
861 if self._cache:
861 if self._cache:
862 if self._cache[0] == node:
862 if self._cache[0] == node:
863 return self._cache[2]
863 return self._cache[2]
864 cachedrev = self._cache[1]
864 cachedrev = self._cache[1]
865
865
866 # look up what we need to read
866 # look up what we need to read
867 text = None
867 text = None
868 rev = self.rev(node)
868 rev = self.rev(node)
869
869
870 # check rev flags
870 # check rev flags
871 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
871 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
872 raise RevlogError(_('incompatible revision flag %x') %
872 raise RevlogError(_('incompatible revision flag %x') %
873 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
873 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
874
874
875 # build delta chain
875 # build delta chain
876 chain = []
876 chain = []
877 index = self.index # for performance
877 index = self.index # for performance
878 generaldelta = self._generaldelta
878 generaldelta = self._generaldelta
879 iterrev = rev
879 iterrev = rev
880 e = index[iterrev]
880 e = index[iterrev]
881 while iterrev != e[3] and iterrev != cachedrev:
881 while iterrev != e[3] and iterrev != cachedrev:
882 chain.append(iterrev)
882 chain.append(iterrev)
883 if generaldelta:
883 if generaldelta:
884 iterrev = e[3]
884 iterrev = e[3]
885 else:
885 else:
886 iterrev -= 1
886 iterrev -= 1
887 e = index[iterrev]
887 e = index[iterrev]
888 chain.reverse()
888 chain.reverse()
889 base = iterrev
889 base = iterrev
890
890
891 if iterrev == cachedrev:
891 if iterrev == cachedrev:
892 # cache hit
892 # cache hit
893 text = self._cache[2]
893 text = self._cache[2]
894
894
895 # drop cache to save memory
895 # drop cache to save memory
896 self._cache = None
896 self._cache = None
897
897
898 self._chunkraw(base, rev)
898 self._chunkraw(base, rev)
899 if text is None:
899 if text is None:
900 text = self._chunkbase(base)
900 text = self._chunkbase(base)
901
901
902 bins = [self._chunk(r) for r in chain]
902 bins = [self._chunk(r) for r in chain]
903 text = mdiff.patches(text, bins)
903 text = mdiff.patches(text, bins)
904
904
905 text = self._checkhash(text, node, rev)
905 text = self._checkhash(text, node, rev)
906
906
907 self._cache = (node, rev, text)
907 self._cache = (node, rev, text)
908 return text
908 return text
909
909
910 def _checkhash(self, text, node, rev):
910 def _checkhash(self, text, node, rev):
911 p1, p2 = self.parents(node)
911 p1, p2 = self.parents(node)
912 if node != hash(text, p1, p2):
912 if node != hash(text, p1, p2):
913 raise RevlogError(_("integrity check failed on %s:%d")
913 raise RevlogError(_("integrity check failed on %s:%d")
914 % (self.indexfile, rev))
914 % (self.indexfile, rev))
915 return text
915 return text
916
916
917 def checkinlinesize(self, tr, fp=None):
917 def checkinlinesize(self, tr, fp=None):
918 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
918 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
919 return
919 return
920
920
921 trinfo = tr.find(self.indexfile)
921 trinfo = tr.find(self.indexfile)
922 if trinfo is None:
922 if trinfo is None:
923 raise RevlogError(_("%s not found in the transaction")
923 raise RevlogError(_("%s not found in the transaction")
924 % self.indexfile)
924 % self.indexfile)
925
925
926 trindex = trinfo[2]
926 trindex = trinfo[2]
927 dataoff = self.start(trindex)
927 dataoff = self.start(trindex)
928
928
929 tr.add(self.datafile, dataoff)
929 tr.add(self.datafile, dataoff)
930
930
931 if fp:
931 if fp:
932 fp.flush()
932 fp.flush()
933 fp.close()
933 fp.close()
934
934
935 df = self.opener(self.datafile, 'w')
935 df = self.opener(self.datafile, 'w')
936 try:
936 try:
937 for r in self:
937 for r in self:
938 df.write(self._chunkraw(r, r))
938 df.write(self._chunkraw(r, r))
939 finally:
939 finally:
940 df.close()
940 df.close()
941
941
942 fp = self.opener(self.indexfile, 'w', atomictemp=True)
942 fp = self.opener(self.indexfile, 'w', atomictemp=True)
943 self.version &= ~(REVLOGNGINLINEDATA)
943 self.version &= ~(REVLOGNGINLINEDATA)
944 self._inline = False
944 self._inline = False
945 for i in self:
945 for i in self:
946 e = self._io.packentry(self.index[i], self.node, self.version, i)
946 e = self._io.packentry(self.index[i], self.node, self.version, i)
947 fp.write(e)
947 fp.write(e)
948
948
949 # if we don't call rename, the temp file will never replace the
949 # if we don't call close, the temp file will never replace the
950 # real index
950 # real index
951 fp.rename()
951 fp.close()
952
952
953 tr.replace(self.indexfile, trindex * self._io.size)
953 tr.replace(self.indexfile, trindex * self._io.size)
954 self._chunkclear()
954 self._chunkclear()
955
955
956 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
956 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
957 """add a revision to the log
957 """add a revision to the log
958
958
959 text - the revision data to add
959 text - the revision data to add
960 transaction - the transaction object used for rollback
960 transaction - the transaction object used for rollback
961 link - the linkrev data to add
961 link - the linkrev data to add
962 p1, p2 - the parent nodeids of the revision
962 p1, p2 - the parent nodeids of the revision
963 cachedelta - an optional precomputed delta
963 cachedelta - an optional precomputed delta
964 """
964 """
965 node = hash(text, p1, p2)
965 node = hash(text, p1, p2)
966 if node in self.nodemap:
966 if node in self.nodemap:
967 return node
967 return node
968
968
969 dfh = None
969 dfh = None
970 if not self._inline:
970 if not self._inline:
971 dfh = self.opener(self.datafile, "a")
971 dfh = self.opener(self.datafile, "a")
972 ifh = self.opener(self.indexfile, "a+")
972 ifh = self.opener(self.indexfile, "a+")
973 try:
973 try:
974 return self._addrevision(node, text, transaction, link, p1, p2,
974 return self._addrevision(node, text, transaction, link, p1, p2,
975 cachedelta, ifh, dfh)
975 cachedelta, ifh, dfh)
976 finally:
976 finally:
977 if dfh:
977 if dfh:
978 dfh.close()
978 dfh.close()
979 ifh.close()
979 ifh.close()
980
980
981 def _addrevision(self, node, text, transaction, link, p1, p2,
981 def _addrevision(self, node, text, transaction, link, p1, p2,
982 cachedelta, ifh, dfh):
982 cachedelta, ifh, dfh):
983 """internal function to add revisions to the log
983 """internal function to add revisions to the log
984
984
985 see addrevision for argument descriptions.
985 see addrevision for argument descriptions.
986 invariants:
986 invariants:
987 - text is optional (can be None); if not set, cachedelta must be set.
987 - text is optional (can be None); if not set, cachedelta must be set.
988 if both are set, they must correspond to eachother.
988 if both are set, they must correspond to eachother.
989 """
989 """
990 btext = [text]
990 btext = [text]
991 def buildtext():
991 def buildtext():
992 if btext[0] is not None:
992 if btext[0] is not None:
993 return btext[0]
993 return btext[0]
994 # flush any pending writes here so we can read it in revision
994 # flush any pending writes here so we can read it in revision
995 if dfh:
995 if dfh:
996 dfh.flush()
996 dfh.flush()
997 ifh.flush()
997 ifh.flush()
998 basetext = self.revision(self.node(cachedelta[0]))
998 basetext = self.revision(self.node(cachedelta[0]))
999 btext[0] = mdiff.patch(basetext, cachedelta[1])
999 btext[0] = mdiff.patch(basetext, cachedelta[1])
1000 chk = hash(btext[0], p1, p2)
1000 chk = hash(btext[0], p1, p2)
1001 if chk != node:
1001 if chk != node:
1002 raise RevlogError(_("consistency error in delta"))
1002 raise RevlogError(_("consistency error in delta"))
1003 return btext[0]
1003 return btext[0]
1004
1004
1005 def builddelta(rev):
1005 def builddelta(rev):
1006 # can we use the cached delta?
1006 # can we use the cached delta?
1007 if cachedelta and cachedelta[0] == rev:
1007 if cachedelta and cachedelta[0] == rev:
1008 delta = cachedelta[1]
1008 delta = cachedelta[1]
1009 else:
1009 else:
1010 t = buildtext()
1010 t = buildtext()
1011 ptext = self.revision(self.node(rev))
1011 ptext = self.revision(self.node(rev))
1012 delta = mdiff.textdiff(ptext, t)
1012 delta = mdiff.textdiff(ptext, t)
1013 data = compress(delta)
1013 data = compress(delta)
1014 l = len(data[1]) + len(data[0])
1014 l = len(data[1]) + len(data[0])
1015 if basecache[0] == rev:
1015 if basecache[0] == rev:
1016 chainbase = basecache[1]
1016 chainbase = basecache[1]
1017 else:
1017 else:
1018 chainbase = self.chainbase(rev)
1018 chainbase = self.chainbase(rev)
1019 dist = l + offset - self.start(chainbase)
1019 dist = l + offset - self.start(chainbase)
1020 if self._generaldelta:
1020 if self._generaldelta:
1021 base = rev
1021 base = rev
1022 else:
1022 else:
1023 base = chainbase
1023 base = chainbase
1024 return dist, l, data, base, chainbase
1024 return dist, l, data, base, chainbase
1025
1025
1026 curr = len(self)
1026 curr = len(self)
1027 prev = curr - 1
1027 prev = curr - 1
1028 base = chainbase = curr
1028 base = chainbase = curr
1029 offset = self.end(prev)
1029 offset = self.end(prev)
1030 flags = 0
1030 flags = 0
1031 d = None
1031 d = None
1032 basecache = self._basecache
1032 basecache = self._basecache
1033 p1r, p2r = self.rev(p1), self.rev(p2)
1033 p1r, p2r = self.rev(p1), self.rev(p2)
1034
1034
1035 # should we try to build a delta?
1035 # should we try to build a delta?
1036 if prev != nullrev:
1036 if prev != nullrev:
1037 if self._generaldelta:
1037 if self._generaldelta:
1038 if p1r >= basecache[1]:
1038 if p1r >= basecache[1]:
1039 d = builddelta(p1r)
1039 d = builddelta(p1r)
1040 elif p2r >= basecache[1]:
1040 elif p2r >= basecache[1]:
1041 d = builddelta(p2r)
1041 d = builddelta(p2r)
1042 else:
1042 else:
1043 d = builddelta(prev)
1043 d = builddelta(prev)
1044 else:
1044 else:
1045 d = builddelta(prev)
1045 d = builddelta(prev)
1046 dist, l, data, base, chainbase = d
1046 dist, l, data, base, chainbase = d
1047
1047
1048 # full versions are inserted when the needed deltas
1048 # full versions are inserted when the needed deltas
1049 # become comparable to the uncompressed text
1049 # become comparable to the uncompressed text
1050 if text is None:
1050 if text is None:
1051 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1051 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1052 cachedelta[1])
1052 cachedelta[1])
1053 else:
1053 else:
1054 textlen = len(text)
1054 textlen = len(text)
1055 if d is None or dist > textlen * 2:
1055 if d is None or dist > textlen * 2:
1056 text = buildtext()
1056 text = buildtext()
1057 data = compress(text)
1057 data = compress(text)
1058 l = len(data[1]) + len(data[0])
1058 l = len(data[1]) + len(data[0])
1059 base = chainbase = curr
1059 base = chainbase = curr
1060
1060
1061 e = (offset_type(offset, flags), l, textlen,
1061 e = (offset_type(offset, flags), l, textlen,
1062 base, link, p1r, p2r, node)
1062 base, link, p1r, p2r, node)
1063 self.index.insert(-1, e)
1063 self.index.insert(-1, e)
1064 self.nodemap[node] = curr
1064 self.nodemap[node] = curr
1065
1065
1066 entry = self._io.packentry(e, self.node, self.version, curr)
1066 entry = self._io.packentry(e, self.node, self.version, curr)
1067 if not self._inline:
1067 if not self._inline:
1068 transaction.add(self.datafile, offset)
1068 transaction.add(self.datafile, offset)
1069 transaction.add(self.indexfile, curr * len(entry))
1069 transaction.add(self.indexfile, curr * len(entry))
1070 if data[0]:
1070 if data[0]:
1071 dfh.write(data[0])
1071 dfh.write(data[0])
1072 dfh.write(data[1])
1072 dfh.write(data[1])
1073 dfh.flush()
1073 dfh.flush()
1074 ifh.write(entry)
1074 ifh.write(entry)
1075 else:
1075 else:
1076 offset += curr * self._io.size
1076 offset += curr * self._io.size
1077 transaction.add(self.indexfile, offset, curr)
1077 transaction.add(self.indexfile, offset, curr)
1078 ifh.write(entry)
1078 ifh.write(entry)
1079 ifh.write(data[0])
1079 ifh.write(data[0])
1080 ifh.write(data[1])
1080 ifh.write(data[1])
1081 self.checkinlinesize(transaction, ifh)
1081 self.checkinlinesize(transaction, ifh)
1082
1082
1083 if type(text) == str: # only accept immutable objects
1083 if type(text) == str: # only accept immutable objects
1084 self._cache = (node, curr, text)
1084 self._cache = (node, curr, text)
1085 self._basecache = (curr, chainbase)
1085 self._basecache = (curr, chainbase)
1086 return node
1086 return node
1087
1087
1088 def group(self, nodelist, bundler, reorder=None):
1088 def group(self, nodelist, bundler, reorder=None):
1089 """Calculate a delta group, yielding a sequence of changegroup chunks
1089 """Calculate a delta group, yielding a sequence of changegroup chunks
1090 (strings).
1090 (strings).
1091
1091
1092 Given a list of changeset revs, return a set of deltas and
1092 Given a list of changeset revs, return a set of deltas and
1093 metadata corresponding to nodes. The first delta is
1093 metadata corresponding to nodes. The first delta is
1094 first parent(nodelist[0]) -> nodelist[0], the receiver is
1094 first parent(nodelist[0]) -> nodelist[0], the receiver is
1095 guaranteed to have this parent as it has all history before
1095 guaranteed to have this parent as it has all history before
1096 these changesets. In the case firstparent is nullrev the
1096 these changesets. In the case firstparent is nullrev the
1097 changegroup starts with a full revision.
1097 changegroup starts with a full revision.
1098 """
1098 """
1099
1099
1100 # if we don't have any revisions touched by these changesets, bail
1100 # if we don't have any revisions touched by these changesets, bail
1101 if len(nodelist) == 0:
1101 if len(nodelist) == 0:
1102 yield bundler.close()
1102 yield bundler.close()
1103 return
1103 return
1104
1104
1105 # for generaldelta revlogs, we linearize the revs; this will both be
1105 # for generaldelta revlogs, we linearize the revs; this will both be
1106 # much quicker and generate a much smaller bundle
1106 # much quicker and generate a much smaller bundle
1107 if (self._generaldelta and reorder is not False) or reorder:
1107 if (self._generaldelta and reorder is not False) or reorder:
1108 dag = dagutil.revlogdag(self)
1108 dag = dagutil.revlogdag(self)
1109 revs = set(self.rev(n) for n in nodelist)
1109 revs = set(self.rev(n) for n in nodelist)
1110 revs = dag.linearize(revs)
1110 revs = dag.linearize(revs)
1111 else:
1111 else:
1112 revs = sorted([self.rev(n) for n in nodelist])
1112 revs = sorted([self.rev(n) for n in nodelist])
1113
1113
1114 # add the parent of the first rev
1114 # add the parent of the first rev
1115 p = self.parentrevs(revs[0])[0]
1115 p = self.parentrevs(revs[0])[0]
1116 revs.insert(0, p)
1116 revs.insert(0, p)
1117
1117
1118 # build deltas
1118 # build deltas
1119 for r in xrange(len(revs) - 1):
1119 for r in xrange(len(revs) - 1):
1120 prev, curr = revs[r], revs[r + 1]
1120 prev, curr = revs[r], revs[r + 1]
1121 for c in bundler.revchunk(self, curr, prev):
1121 for c in bundler.revchunk(self, curr, prev):
1122 yield c
1122 yield c
1123
1123
1124 yield bundler.close()
1124 yield bundler.close()
1125
1125
1126 def addgroup(self, bundle, linkmapper, transaction):
1126 def addgroup(self, bundle, linkmapper, transaction):
1127 """
1127 """
1128 add a delta group
1128 add a delta group
1129
1129
1130 given a set of deltas, add them to the revision log. the
1130 given a set of deltas, add them to the revision log. the
1131 first delta is against its parent, which should be in our
1131 first delta is against its parent, which should be in our
1132 log, the rest are against the previous delta.
1132 log, the rest are against the previous delta.
1133 """
1133 """
1134
1134
1135 # track the base of the current delta log
1135 # track the base of the current delta log
1136 node = None
1136 node = None
1137
1137
1138 r = len(self)
1138 r = len(self)
1139 end = 0
1139 end = 0
1140 if r:
1140 if r:
1141 end = self.end(r - 1)
1141 end = self.end(r - 1)
1142 ifh = self.opener(self.indexfile, "a+")
1142 ifh = self.opener(self.indexfile, "a+")
1143 isize = r * self._io.size
1143 isize = r * self._io.size
1144 if self._inline:
1144 if self._inline:
1145 transaction.add(self.indexfile, end + isize, r)
1145 transaction.add(self.indexfile, end + isize, r)
1146 dfh = None
1146 dfh = None
1147 else:
1147 else:
1148 transaction.add(self.indexfile, isize, r)
1148 transaction.add(self.indexfile, isize, r)
1149 transaction.add(self.datafile, end)
1149 transaction.add(self.datafile, end)
1150 dfh = self.opener(self.datafile, "a")
1150 dfh = self.opener(self.datafile, "a")
1151
1151
1152 try:
1152 try:
1153 # loop through our set of deltas
1153 # loop through our set of deltas
1154 chain = None
1154 chain = None
1155 while True:
1155 while True:
1156 chunkdata = bundle.deltachunk(chain)
1156 chunkdata = bundle.deltachunk(chain)
1157 if not chunkdata:
1157 if not chunkdata:
1158 break
1158 break
1159 node = chunkdata['node']
1159 node = chunkdata['node']
1160 p1 = chunkdata['p1']
1160 p1 = chunkdata['p1']
1161 p2 = chunkdata['p2']
1161 p2 = chunkdata['p2']
1162 cs = chunkdata['cs']
1162 cs = chunkdata['cs']
1163 deltabase = chunkdata['deltabase']
1163 deltabase = chunkdata['deltabase']
1164 delta = chunkdata['delta']
1164 delta = chunkdata['delta']
1165
1165
1166 link = linkmapper(cs)
1166 link = linkmapper(cs)
1167 if node in self.nodemap:
1167 if node in self.nodemap:
1168 # this can happen if two branches make the same change
1168 # this can happen if two branches make the same change
1169 chain = node
1169 chain = node
1170 continue
1170 continue
1171
1171
1172 for p in (p1, p2):
1172 for p in (p1, p2):
1173 if not p in self.nodemap:
1173 if not p in self.nodemap:
1174 raise LookupError(p, self.indexfile,
1174 raise LookupError(p, self.indexfile,
1175 _('unknown parent'))
1175 _('unknown parent'))
1176
1176
1177 if deltabase not in self.nodemap:
1177 if deltabase not in self.nodemap:
1178 raise LookupError(deltabase, self.indexfile,
1178 raise LookupError(deltabase, self.indexfile,
1179 _('unknown delta base'))
1179 _('unknown delta base'))
1180
1180
1181 baserev = self.rev(deltabase)
1181 baserev = self.rev(deltabase)
1182 chain = self._addrevision(node, None, transaction, link,
1182 chain = self._addrevision(node, None, transaction, link,
1183 p1, p2, (baserev, delta), ifh, dfh)
1183 p1, p2, (baserev, delta), ifh, dfh)
1184 if not dfh and not self._inline:
1184 if not dfh and not self._inline:
1185 # addrevision switched from inline to conventional
1185 # addrevision switched from inline to conventional
1186 # reopen the index
1186 # reopen the index
1187 ifh.close()
1187 ifh.close()
1188 dfh = self.opener(self.datafile, "a")
1188 dfh = self.opener(self.datafile, "a")
1189 ifh = self.opener(self.indexfile, "a")
1189 ifh = self.opener(self.indexfile, "a")
1190 finally:
1190 finally:
1191 if dfh:
1191 if dfh:
1192 dfh.close()
1192 dfh.close()
1193 ifh.close()
1193 ifh.close()
1194
1194
1195 return node
1195 return node
1196
1196
1197 def strip(self, minlink, transaction):
1197 def strip(self, minlink, transaction):
1198 """truncate the revlog on the first revision with a linkrev >= minlink
1198 """truncate the revlog on the first revision with a linkrev >= minlink
1199
1199
1200 This function is called when we're stripping revision minlink and
1200 This function is called when we're stripping revision minlink and
1201 its descendants from the repository.
1201 its descendants from the repository.
1202
1202
1203 We have to remove all revisions with linkrev >= minlink, because
1203 We have to remove all revisions with linkrev >= minlink, because
1204 the equivalent changelog revisions will be renumbered after the
1204 the equivalent changelog revisions will be renumbered after the
1205 strip.
1205 strip.
1206
1206
1207 So we truncate the revlog on the first of these revisions, and
1207 So we truncate the revlog on the first of these revisions, and
1208 trust that the caller has saved the revisions that shouldn't be
1208 trust that the caller has saved the revisions that shouldn't be
1209 removed and that it'll readd them after this truncation.
1209 removed and that it'll readd them after this truncation.
1210 """
1210 """
1211 if len(self) == 0:
1211 if len(self) == 0:
1212 return
1212 return
1213
1213
1214 for rev in self:
1214 for rev in self:
1215 if self.index[rev][4] >= minlink:
1215 if self.index[rev][4] >= minlink:
1216 break
1216 break
1217 else:
1217 else:
1218 return
1218 return
1219
1219
1220 # first truncate the files on disk
1220 # first truncate the files on disk
1221 end = self.start(rev)
1221 end = self.start(rev)
1222 if not self._inline:
1222 if not self._inline:
1223 transaction.add(self.datafile, end)
1223 transaction.add(self.datafile, end)
1224 end = rev * self._io.size
1224 end = rev * self._io.size
1225 else:
1225 else:
1226 end += rev * self._io.size
1226 end += rev * self._io.size
1227
1227
1228 transaction.add(self.indexfile, end)
1228 transaction.add(self.indexfile, end)
1229
1229
1230 # then reset internal state in memory to forget those revisions
1230 # then reset internal state in memory to forget those revisions
1231 self._cache = None
1231 self._cache = None
1232 self._chunkclear()
1232 self._chunkclear()
1233 for x in xrange(rev, len(self)):
1233 for x in xrange(rev, len(self)):
1234 del self.nodemap[self.node(x)]
1234 del self.nodemap[self.node(x)]
1235
1235
1236 del self.index[rev:-1]
1236 del self.index[rev:-1]
1237
1237
1238 def checksize(self):
1238 def checksize(self):
1239 expected = 0
1239 expected = 0
1240 if len(self):
1240 if len(self):
1241 expected = max(0, self.end(len(self) - 1))
1241 expected = max(0, self.end(len(self) - 1))
1242
1242
1243 try:
1243 try:
1244 f = self.opener(self.datafile)
1244 f = self.opener(self.datafile)
1245 f.seek(0, 2)
1245 f.seek(0, 2)
1246 actual = f.tell()
1246 actual = f.tell()
1247 f.close()
1247 f.close()
1248 dd = actual - expected
1248 dd = actual - expected
1249 except IOError, inst:
1249 except IOError, inst:
1250 if inst.errno != errno.ENOENT:
1250 if inst.errno != errno.ENOENT:
1251 raise
1251 raise
1252 dd = 0
1252 dd = 0
1253
1253
1254 try:
1254 try:
1255 f = self.opener(self.indexfile)
1255 f = self.opener(self.indexfile)
1256 f.seek(0, 2)
1256 f.seek(0, 2)
1257 actual = f.tell()
1257 actual = f.tell()
1258 f.close()
1258 f.close()
1259 s = self._io.size
1259 s = self._io.size
1260 i = max(0, actual // s)
1260 i = max(0, actual // s)
1261 di = actual - (i * s)
1261 di = actual - (i * s)
1262 if self._inline:
1262 if self._inline:
1263 databytes = 0
1263 databytes = 0
1264 for r in self:
1264 for r in self:
1265 databytes += max(0, self.length(r))
1265 databytes += max(0, self.length(r))
1266 dd = 0
1266 dd = 0
1267 di = actual - len(self) * s - databytes
1267 di = actual - len(self) * s - databytes
1268 except IOError, inst:
1268 except IOError, inst:
1269 if inst.errno != errno.ENOENT:
1269 if inst.errno != errno.ENOENT:
1270 raise
1270 raise
1271 di = 0
1271 di = 0
1272
1272
1273 return (dd, di)
1273 return (dd, di)
1274
1274
1275 def files(self):
1275 def files(self):
1276 res = [self.indexfile]
1276 res = [self.indexfile]
1277 if not self._inline:
1277 if not self._inline:
1278 res.append(self.datafile)
1278 res.append(self.datafile)
1279 return res
1279 return res
@@ -1,453 +1,453 b''
1 # Copyright (C) 2004, 2005 Canonical Ltd
1 # Copyright (C) 2004, 2005 Canonical Ltd
2 #
2 #
3 # This program is free software; you can redistribute it and/or modify
3 # This program is free software; you can redistribute it and/or modify
4 # it under the terms of the GNU General Public License as published by
4 # it under the terms of the GNU General Public License as published by
5 # the Free Software Foundation; either version 2 of the License, or
5 # the Free Software Foundation; either version 2 of the License, or
6 # (at your option) any later version.
6 # (at your option) any later version.
7 #
7 #
8 # This program is distributed in the hope that it will be useful,
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU General Public License for more details.
11 # GNU General Public License for more details.
12 #
12 #
13 # You should have received a copy of the GNU General Public License
13 # You should have received a copy of the GNU General Public License
14 # along with this program; if not, write to the Free Software
14 # along with this program; if not, write to the Free Software
15 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
15 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16
16
17 # mbp: "you know that thing where cvs gives you conflict markers?"
17 # mbp: "you know that thing where cvs gives you conflict markers?"
18 # s: "i hate that."
18 # s: "i hate that."
19
19
20 from i18n import _
20 from i18n import _
21 import scmutil, util, mdiff
21 import scmutil, util, mdiff
22 import sys, os
22 import sys, os
23
23
24 class CantReprocessAndShowBase(Exception):
24 class CantReprocessAndShowBase(Exception):
25 pass
25 pass
26
26
27 def intersect(ra, rb):
27 def intersect(ra, rb):
28 """Given two ranges return the range where they intersect or None.
28 """Given two ranges return the range where they intersect or None.
29
29
30 >>> intersect((0, 10), (0, 6))
30 >>> intersect((0, 10), (0, 6))
31 (0, 6)
31 (0, 6)
32 >>> intersect((0, 10), (5, 15))
32 >>> intersect((0, 10), (5, 15))
33 (5, 10)
33 (5, 10)
34 >>> intersect((0, 10), (10, 15))
34 >>> intersect((0, 10), (10, 15))
35 >>> intersect((0, 9), (10, 15))
35 >>> intersect((0, 9), (10, 15))
36 >>> intersect((0, 9), (7, 15))
36 >>> intersect((0, 9), (7, 15))
37 (7, 9)
37 (7, 9)
38 """
38 """
39 assert ra[0] <= ra[1]
39 assert ra[0] <= ra[1]
40 assert rb[0] <= rb[1]
40 assert rb[0] <= rb[1]
41
41
42 sa = max(ra[0], rb[0])
42 sa = max(ra[0], rb[0])
43 sb = min(ra[1], rb[1])
43 sb = min(ra[1], rb[1])
44 if sa < sb:
44 if sa < sb:
45 return sa, sb
45 return sa, sb
46 else:
46 else:
47 return None
47 return None
48
48
49 def compare_range(a, astart, aend, b, bstart, bend):
49 def compare_range(a, astart, aend, b, bstart, bend):
50 """Compare a[astart:aend] == b[bstart:bend], without slicing.
50 """Compare a[astart:aend] == b[bstart:bend], without slicing.
51 """
51 """
52 if (aend - astart) != (bend - bstart):
52 if (aend - astart) != (bend - bstart):
53 return False
53 return False
54 for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
54 for ia, ib in zip(xrange(astart, aend), xrange(bstart, bend)):
55 if a[ia] != b[ib]:
55 if a[ia] != b[ib]:
56 return False
56 return False
57 else:
57 else:
58 return True
58 return True
59
59
60 class Merge3Text(object):
60 class Merge3Text(object):
61 """3-way merge of texts.
61 """3-way merge of texts.
62
62
63 Given strings BASE, OTHER, THIS, tries to produce a combined text
63 Given strings BASE, OTHER, THIS, tries to produce a combined text
64 incorporating the changes from both BASE->OTHER and BASE->THIS."""
64 incorporating the changes from both BASE->OTHER and BASE->THIS."""
65 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
65 def __init__(self, basetext, atext, btext, base=None, a=None, b=None):
66 self.basetext = basetext
66 self.basetext = basetext
67 self.atext = atext
67 self.atext = atext
68 self.btext = btext
68 self.btext = btext
69 if base is None:
69 if base is None:
70 base = mdiff.splitnewlines(basetext)
70 base = mdiff.splitnewlines(basetext)
71 if a is None:
71 if a is None:
72 a = mdiff.splitnewlines(atext)
72 a = mdiff.splitnewlines(atext)
73 if b is None:
73 if b is None:
74 b = mdiff.splitnewlines(btext)
74 b = mdiff.splitnewlines(btext)
75 self.base = base
75 self.base = base
76 self.a = a
76 self.a = a
77 self.b = b
77 self.b = b
78
78
79 def merge_lines(self,
79 def merge_lines(self,
80 name_a=None,
80 name_a=None,
81 name_b=None,
81 name_b=None,
82 name_base=None,
82 name_base=None,
83 start_marker='<<<<<<<',
83 start_marker='<<<<<<<',
84 mid_marker='=======',
84 mid_marker='=======',
85 end_marker='>>>>>>>',
85 end_marker='>>>>>>>',
86 base_marker=None,
86 base_marker=None,
87 reprocess=False):
87 reprocess=False):
88 """Return merge in cvs-like form.
88 """Return merge in cvs-like form.
89 """
89 """
90 self.conflicts = False
90 self.conflicts = False
91 newline = '\n'
91 newline = '\n'
92 if len(self.a) > 0:
92 if len(self.a) > 0:
93 if self.a[0].endswith('\r\n'):
93 if self.a[0].endswith('\r\n'):
94 newline = '\r\n'
94 newline = '\r\n'
95 elif self.a[0].endswith('\r'):
95 elif self.a[0].endswith('\r'):
96 newline = '\r'
96 newline = '\r'
97 if base_marker and reprocess:
97 if base_marker and reprocess:
98 raise CantReprocessAndShowBase()
98 raise CantReprocessAndShowBase()
99 if name_a:
99 if name_a:
100 start_marker = start_marker + ' ' + name_a
100 start_marker = start_marker + ' ' + name_a
101 if name_b:
101 if name_b:
102 end_marker = end_marker + ' ' + name_b
102 end_marker = end_marker + ' ' + name_b
103 if name_base and base_marker:
103 if name_base and base_marker:
104 base_marker = base_marker + ' ' + name_base
104 base_marker = base_marker + ' ' + name_base
105 merge_regions = self.merge_regions()
105 merge_regions = self.merge_regions()
106 if reprocess is True:
106 if reprocess is True:
107 merge_regions = self.reprocess_merge_regions(merge_regions)
107 merge_regions = self.reprocess_merge_regions(merge_regions)
108 for t in merge_regions:
108 for t in merge_regions:
109 what = t[0]
109 what = t[0]
110 if what == 'unchanged':
110 if what == 'unchanged':
111 for i in range(t[1], t[2]):
111 for i in range(t[1], t[2]):
112 yield self.base[i]
112 yield self.base[i]
113 elif what == 'a' or what == 'same':
113 elif what == 'a' or what == 'same':
114 for i in range(t[1], t[2]):
114 for i in range(t[1], t[2]):
115 yield self.a[i]
115 yield self.a[i]
116 elif what == 'b':
116 elif what == 'b':
117 for i in range(t[1], t[2]):
117 for i in range(t[1], t[2]):
118 yield self.b[i]
118 yield self.b[i]
119 elif what == 'conflict':
119 elif what == 'conflict':
120 self.conflicts = True
120 self.conflicts = True
121 yield start_marker + newline
121 yield start_marker + newline
122 for i in range(t[3], t[4]):
122 for i in range(t[3], t[4]):
123 yield self.a[i]
123 yield self.a[i]
124 if base_marker is not None:
124 if base_marker is not None:
125 yield base_marker + newline
125 yield base_marker + newline
126 for i in range(t[1], t[2]):
126 for i in range(t[1], t[2]):
127 yield self.base[i]
127 yield self.base[i]
128 yield mid_marker + newline
128 yield mid_marker + newline
129 for i in range(t[5], t[6]):
129 for i in range(t[5], t[6]):
130 yield self.b[i]
130 yield self.b[i]
131 yield end_marker + newline
131 yield end_marker + newline
132 else:
132 else:
133 raise ValueError(what)
133 raise ValueError(what)
134
134
135 def merge_annotated(self):
135 def merge_annotated(self):
136 """Return merge with conflicts, showing origin of lines.
136 """Return merge with conflicts, showing origin of lines.
137
137
138 Most useful for debugging merge.
138 Most useful for debugging merge.
139 """
139 """
140 for t in self.merge_regions():
140 for t in self.merge_regions():
141 what = t[0]
141 what = t[0]
142 if what == 'unchanged':
142 if what == 'unchanged':
143 for i in range(t[1], t[2]):
143 for i in range(t[1], t[2]):
144 yield 'u | ' + self.base[i]
144 yield 'u | ' + self.base[i]
145 elif what == 'a' or what == 'same':
145 elif what == 'a' or what == 'same':
146 for i in range(t[1], t[2]):
146 for i in range(t[1], t[2]):
147 yield what[0] + ' | ' + self.a[i]
147 yield what[0] + ' | ' + self.a[i]
148 elif what == 'b':
148 elif what == 'b':
149 for i in range(t[1], t[2]):
149 for i in range(t[1], t[2]):
150 yield 'b | ' + self.b[i]
150 yield 'b | ' + self.b[i]
151 elif what == 'conflict':
151 elif what == 'conflict':
152 yield '<<<<\n'
152 yield '<<<<\n'
153 for i in range(t[3], t[4]):
153 for i in range(t[3], t[4]):
154 yield 'A | ' + self.a[i]
154 yield 'A | ' + self.a[i]
155 yield '----\n'
155 yield '----\n'
156 for i in range(t[5], t[6]):
156 for i in range(t[5], t[6]):
157 yield 'B | ' + self.b[i]
157 yield 'B | ' + self.b[i]
158 yield '>>>>\n'
158 yield '>>>>\n'
159 else:
159 else:
160 raise ValueError(what)
160 raise ValueError(what)
161
161
162 def merge_groups(self):
162 def merge_groups(self):
163 """Yield sequence of line groups. Each one is a tuple:
163 """Yield sequence of line groups. Each one is a tuple:
164
164
165 'unchanged', lines
165 'unchanged', lines
166 Lines unchanged from base
166 Lines unchanged from base
167
167
168 'a', lines
168 'a', lines
169 Lines taken from a
169 Lines taken from a
170
170
171 'same', lines
171 'same', lines
172 Lines taken from a (and equal to b)
172 Lines taken from a (and equal to b)
173
173
174 'b', lines
174 'b', lines
175 Lines taken from b
175 Lines taken from b
176
176
177 'conflict', base_lines, a_lines, b_lines
177 'conflict', base_lines, a_lines, b_lines
178 Lines from base were changed to either a or b and conflict.
178 Lines from base were changed to either a or b and conflict.
179 """
179 """
180 for t in self.merge_regions():
180 for t in self.merge_regions():
181 what = t[0]
181 what = t[0]
182 if what == 'unchanged':
182 if what == 'unchanged':
183 yield what, self.base[t[1]:t[2]]
183 yield what, self.base[t[1]:t[2]]
184 elif what == 'a' or what == 'same':
184 elif what == 'a' or what == 'same':
185 yield what, self.a[t[1]:t[2]]
185 yield what, self.a[t[1]:t[2]]
186 elif what == 'b':
186 elif what == 'b':
187 yield what, self.b[t[1]:t[2]]
187 yield what, self.b[t[1]:t[2]]
188 elif what == 'conflict':
188 elif what == 'conflict':
189 yield (what,
189 yield (what,
190 self.base[t[1]:t[2]],
190 self.base[t[1]:t[2]],
191 self.a[t[3]:t[4]],
191 self.a[t[3]:t[4]],
192 self.b[t[5]:t[6]])
192 self.b[t[5]:t[6]])
193 else:
193 else:
194 raise ValueError(what)
194 raise ValueError(what)
195
195
196 def merge_regions(self):
196 def merge_regions(self):
197 """Return sequences of matching and conflicting regions.
197 """Return sequences of matching and conflicting regions.
198
198
199 This returns tuples, where the first value says what kind we
199 This returns tuples, where the first value says what kind we
200 have:
200 have:
201
201
202 'unchanged', start, end
202 'unchanged', start, end
203 Take a region of base[start:end]
203 Take a region of base[start:end]
204
204
205 'same', astart, aend
205 'same', astart, aend
206 b and a are different from base but give the same result
206 b and a are different from base but give the same result
207
207
208 'a', start, end
208 'a', start, end
209 Non-clashing insertion from a[start:end]
209 Non-clashing insertion from a[start:end]
210
210
211 Method is as follows:
211 Method is as follows:
212
212
213 The two sequences align only on regions which match the base
213 The two sequences align only on regions which match the base
214 and both descendants. These are found by doing a two-way diff
214 and both descendants. These are found by doing a two-way diff
215 of each one against the base, and then finding the
215 of each one against the base, and then finding the
216 intersections between those regions. These "sync regions"
216 intersections between those regions. These "sync regions"
217 are by definition unchanged in both and easily dealt with.
217 are by definition unchanged in both and easily dealt with.
218
218
219 The regions in between can be in any of three cases:
219 The regions in between can be in any of three cases:
220 conflicted, or changed on only one side.
220 conflicted, or changed on only one side.
221 """
221 """
222
222
223 # section a[0:ia] has been disposed of, etc
223 # section a[0:ia] has been disposed of, etc
224 iz = ia = ib = 0
224 iz = ia = ib = 0
225
225
226 for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
226 for zmatch, zend, amatch, aend, bmatch, bend in self.find_sync_regions():
227 #print 'match base [%d:%d]' % (zmatch, zend)
227 #print 'match base [%d:%d]' % (zmatch, zend)
228
228
229 matchlen = zend - zmatch
229 matchlen = zend - zmatch
230 assert matchlen >= 0
230 assert matchlen >= 0
231 assert matchlen == (aend - amatch)
231 assert matchlen == (aend - amatch)
232 assert matchlen == (bend - bmatch)
232 assert matchlen == (bend - bmatch)
233
233
234 len_a = amatch - ia
234 len_a = amatch - ia
235 len_b = bmatch - ib
235 len_b = bmatch - ib
236 len_base = zmatch - iz
236 len_base = zmatch - iz
237 assert len_a >= 0
237 assert len_a >= 0
238 assert len_b >= 0
238 assert len_b >= 0
239 assert len_base >= 0
239 assert len_base >= 0
240
240
241 #print 'unmatched a=%d, b=%d' % (len_a, len_b)
241 #print 'unmatched a=%d, b=%d' % (len_a, len_b)
242
242
243 if len_a or len_b:
243 if len_a or len_b:
244 # try to avoid actually slicing the lists
244 # try to avoid actually slicing the lists
245 equal_a = compare_range(self.a, ia, amatch,
245 equal_a = compare_range(self.a, ia, amatch,
246 self.base, iz, zmatch)
246 self.base, iz, zmatch)
247 equal_b = compare_range(self.b, ib, bmatch,
247 equal_b = compare_range(self.b, ib, bmatch,
248 self.base, iz, zmatch)
248 self.base, iz, zmatch)
249 same = compare_range(self.a, ia, amatch,
249 same = compare_range(self.a, ia, amatch,
250 self.b, ib, bmatch)
250 self.b, ib, bmatch)
251
251
252 if same:
252 if same:
253 yield 'same', ia, amatch
253 yield 'same', ia, amatch
254 elif equal_a and not equal_b:
254 elif equal_a and not equal_b:
255 yield 'b', ib, bmatch
255 yield 'b', ib, bmatch
256 elif equal_b and not equal_a:
256 elif equal_b and not equal_a:
257 yield 'a', ia, amatch
257 yield 'a', ia, amatch
258 elif not equal_a and not equal_b:
258 elif not equal_a and not equal_b:
259 yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
259 yield 'conflict', iz, zmatch, ia, amatch, ib, bmatch
260 else:
260 else:
261 raise AssertionError("can't handle a=b=base but unmatched")
261 raise AssertionError("can't handle a=b=base but unmatched")
262
262
263 ia = amatch
263 ia = amatch
264 ib = bmatch
264 ib = bmatch
265 iz = zmatch
265 iz = zmatch
266
266
267 # if the same part of the base was deleted on both sides
267 # if the same part of the base was deleted on both sides
268 # that's OK, we can just skip it.
268 # that's OK, we can just skip it.
269
269
270
270
271 if matchlen > 0:
271 if matchlen > 0:
272 assert ia == amatch
272 assert ia == amatch
273 assert ib == bmatch
273 assert ib == bmatch
274 assert iz == zmatch
274 assert iz == zmatch
275
275
276 yield 'unchanged', zmatch, zend
276 yield 'unchanged', zmatch, zend
277 iz = zend
277 iz = zend
278 ia = aend
278 ia = aend
279 ib = bend
279 ib = bend
280
280
281 def reprocess_merge_regions(self, merge_regions):
281 def reprocess_merge_regions(self, merge_regions):
282 """Where there are conflict regions, remove the agreed lines.
282 """Where there are conflict regions, remove the agreed lines.
283
283
284 Lines where both A and B have made the same changes are
284 Lines where both A and B have made the same changes are
285 eliminated.
285 eliminated.
286 """
286 """
287 for region in merge_regions:
287 for region in merge_regions:
288 if region[0] != "conflict":
288 if region[0] != "conflict":
289 yield region
289 yield region
290 continue
290 continue
291 type, iz, zmatch, ia, amatch, ib, bmatch = region
291 type, iz, zmatch, ia, amatch, ib, bmatch = region
292 a_region = self.a[ia:amatch]
292 a_region = self.a[ia:amatch]
293 b_region = self.b[ib:bmatch]
293 b_region = self.b[ib:bmatch]
294 matches = mdiff.get_matching_blocks(''.join(a_region),
294 matches = mdiff.get_matching_blocks(''.join(a_region),
295 ''.join(b_region))
295 ''.join(b_region))
296 next_a = ia
296 next_a = ia
297 next_b = ib
297 next_b = ib
298 for region_ia, region_ib, region_len in matches[:-1]:
298 for region_ia, region_ib, region_len in matches[:-1]:
299 region_ia += ia
299 region_ia += ia
300 region_ib += ib
300 region_ib += ib
301 reg = self.mismatch_region(next_a, region_ia, next_b,
301 reg = self.mismatch_region(next_a, region_ia, next_b,
302 region_ib)
302 region_ib)
303 if reg is not None:
303 if reg is not None:
304 yield reg
304 yield reg
305 yield 'same', region_ia, region_len + region_ia
305 yield 'same', region_ia, region_len + region_ia
306 next_a = region_ia + region_len
306 next_a = region_ia + region_len
307 next_b = region_ib + region_len
307 next_b = region_ib + region_len
308 reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
308 reg = self.mismatch_region(next_a, amatch, next_b, bmatch)
309 if reg is not None:
309 if reg is not None:
310 yield reg
310 yield reg
311
311
312 def mismatch_region(next_a, region_ia, next_b, region_ib):
312 def mismatch_region(next_a, region_ia, next_b, region_ib):
313 if next_a < region_ia or next_b < region_ib:
313 if next_a < region_ia or next_b < region_ib:
314 return 'conflict', None, None, next_a, region_ia, next_b, region_ib
314 return 'conflict', None, None, next_a, region_ia, next_b, region_ib
315 mismatch_region = staticmethod(mismatch_region)
315 mismatch_region = staticmethod(mismatch_region)
316
316
317 def find_sync_regions(self):
317 def find_sync_regions(self):
318 """Return a list of sync regions, where both descendants match the base.
318 """Return a list of sync regions, where both descendants match the base.
319
319
320 Generates a list of (base1, base2, a1, a2, b1, b2). There is
320 Generates a list of (base1, base2, a1, a2, b1, b2). There is
321 always a zero-length sync region at the end of all the files.
321 always a zero-length sync region at the end of all the files.
322 """
322 """
323
323
324 ia = ib = 0
324 ia = ib = 0
325 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
325 amatches = mdiff.get_matching_blocks(self.basetext, self.atext)
326 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
326 bmatches = mdiff.get_matching_blocks(self.basetext, self.btext)
327 len_a = len(amatches)
327 len_a = len(amatches)
328 len_b = len(bmatches)
328 len_b = len(bmatches)
329
329
330 sl = []
330 sl = []
331
331
332 while ia < len_a and ib < len_b:
332 while ia < len_a and ib < len_b:
333 abase, amatch, alen = amatches[ia]
333 abase, amatch, alen = amatches[ia]
334 bbase, bmatch, blen = bmatches[ib]
334 bbase, bmatch, blen = bmatches[ib]
335
335
336 # there is an unconflicted block at i; how long does it
336 # there is an unconflicted block at i; how long does it
337 # extend? until whichever one ends earlier.
337 # extend? until whichever one ends earlier.
338 i = intersect((abase, abase + alen), (bbase, bbase + blen))
338 i = intersect((abase, abase + alen), (bbase, bbase + blen))
339 if i:
339 if i:
340 intbase = i[0]
340 intbase = i[0]
341 intend = i[1]
341 intend = i[1]
342 intlen = intend - intbase
342 intlen = intend - intbase
343
343
344 # found a match of base[i[0], i[1]]; this may be less than
344 # found a match of base[i[0], i[1]]; this may be less than
345 # the region that matches in either one
345 # the region that matches in either one
346 assert intlen <= alen
346 assert intlen <= alen
347 assert intlen <= blen
347 assert intlen <= blen
348 assert abase <= intbase
348 assert abase <= intbase
349 assert bbase <= intbase
349 assert bbase <= intbase
350
350
351 asub = amatch + (intbase - abase)
351 asub = amatch + (intbase - abase)
352 bsub = bmatch + (intbase - bbase)
352 bsub = bmatch + (intbase - bbase)
353 aend = asub + intlen
353 aend = asub + intlen
354 bend = bsub + intlen
354 bend = bsub + intlen
355
355
356 assert self.base[intbase:intend] == self.a[asub:aend], \
356 assert self.base[intbase:intend] == self.a[asub:aend], \
357 (self.base[intbase:intend], self.a[asub:aend])
357 (self.base[intbase:intend], self.a[asub:aend])
358
358
359 assert self.base[intbase:intend] == self.b[bsub:bend]
359 assert self.base[intbase:intend] == self.b[bsub:bend]
360
360
361 sl.append((intbase, intend,
361 sl.append((intbase, intend,
362 asub, aend,
362 asub, aend,
363 bsub, bend))
363 bsub, bend))
364
364
365 # advance whichever one ends first in the base text
365 # advance whichever one ends first in the base text
366 if (abase + alen) < (bbase + blen):
366 if (abase + alen) < (bbase + blen):
367 ia += 1
367 ia += 1
368 else:
368 else:
369 ib += 1
369 ib += 1
370
370
371 intbase = len(self.base)
371 intbase = len(self.base)
372 abase = len(self.a)
372 abase = len(self.a)
373 bbase = len(self.b)
373 bbase = len(self.b)
374 sl.append((intbase, intbase, abase, abase, bbase, bbase))
374 sl.append((intbase, intbase, abase, abase, bbase, bbase))
375
375
376 return sl
376 return sl
377
377
378 def find_unconflicted(self):
378 def find_unconflicted(self):
379 """Return a list of ranges in base that are not conflicted."""
379 """Return a list of ranges in base that are not conflicted."""
380 am = mdiff.get_matching_blocks(self.basetext, self.atext)
380 am = mdiff.get_matching_blocks(self.basetext, self.atext)
381 bm = mdiff.get_matching_blocks(self.basetext, self.btext)
381 bm = mdiff.get_matching_blocks(self.basetext, self.btext)
382
382
383 unc = []
383 unc = []
384
384
385 while am and bm:
385 while am and bm:
386 # there is an unconflicted block at i; how long does it
386 # there is an unconflicted block at i; how long does it
387 # extend? until whichever one ends earlier.
387 # extend? until whichever one ends earlier.
388 a1 = am[0][0]
388 a1 = am[0][0]
389 a2 = a1 + am[0][2]
389 a2 = a1 + am[0][2]
390 b1 = bm[0][0]
390 b1 = bm[0][0]
391 b2 = b1 + bm[0][2]
391 b2 = b1 + bm[0][2]
392 i = intersect((a1, a2), (b1, b2))
392 i = intersect((a1, a2), (b1, b2))
393 if i:
393 if i:
394 unc.append(i)
394 unc.append(i)
395
395
396 if a2 < b2:
396 if a2 < b2:
397 del am[0]
397 del am[0]
398 else:
398 else:
399 del bm[0]
399 del bm[0]
400
400
401 return unc
401 return unc
402
402
403 def simplemerge(ui, local, base, other, **opts):
403 def simplemerge(ui, local, base, other, **opts):
404 def readfile(filename):
404 def readfile(filename):
405 f = open(filename, "rb")
405 f = open(filename, "rb")
406 text = f.read()
406 text = f.read()
407 f.close()
407 f.close()
408 if util.binary(text):
408 if util.binary(text):
409 msg = _("%s looks like a binary file.") % filename
409 msg = _("%s looks like a binary file.") % filename
410 if not opts.get('quiet'):
410 if not opts.get('quiet'):
411 ui.warn(_('warning: %s\n') % msg)
411 ui.warn(_('warning: %s\n') % msg)
412 if not opts.get('text'):
412 if not opts.get('text'):
413 raise util.Abort(msg)
413 raise util.Abort(msg)
414 return text
414 return text
415
415
416 name_a = local
416 name_a = local
417 name_b = other
417 name_b = other
418 labels = opts.get('label', [])
418 labels = opts.get('label', [])
419 if labels:
419 if labels:
420 name_a = labels.pop(0)
420 name_a = labels.pop(0)
421 if labels:
421 if labels:
422 name_b = labels.pop(0)
422 name_b = labels.pop(0)
423 if labels:
423 if labels:
424 raise util.Abort(_("can only specify two labels."))
424 raise util.Abort(_("can only specify two labels."))
425
425
426 try:
426 try:
427 localtext = readfile(local)
427 localtext = readfile(local)
428 basetext = readfile(base)
428 basetext = readfile(base)
429 othertext = readfile(other)
429 othertext = readfile(other)
430 except util.Abort:
430 except util.Abort:
431 return 1
431 return 1
432
432
433 local = os.path.realpath(local)
433 local = os.path.realpath(local)
434 if not opts.get('print'):
434 if not opts.get('print'):
435 opener = scmutil.opener(os.path.dirname(local))
435 opener = scmutil.opener(os.path.dirname(local))
436 out = opener(os.path.basename(local), "w", atomictemp=True)
436 out = opener(os.path.basename(local), "w", atomictemp=True)
437 else:
437 else:
438 out = sys.stdout
438 out = sys.stdout
439
439
440 reprocess = not opts.get('no_minimal')
440 reprocess = not opts.get('no_minimal')
441
441
442 m3 = Merge3Text(basetext, localtext, othertext)
442 m3 = Merge3Text(basetext, localtext, othertext)
443 for line in m3.merge_lines(name_a=name_a, name_b=name_b,
443 for line in m3.merge_lines(name_a=name_a, name_b=name_b,
444 reprocess=reprocess):
444 reprocess=reprocess):
445 out.write(line)
445 out.write(line)
446
446
447 if not opts.get('print'):
447 if not opts.get('print'):
448 out.rename()
448 out.close()
449
449
450 if m3.conflicts:
450 if m3.conflicts:
451 if not opts.get('quiet'):
451 if not opts.get('quiet'):
452 ui.warn(_("warning: conflicts during merge.\n"))
452 ui.warn(_("warning: conflicts during merge.\n"))
453 return 1
453 return 1
@@ -1,427 +1,427 b''
1 # store.py - repository store handling for Mercurial
1 # store.py - repository store handling for Mercurial
2 #
2 #
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
3 # Copyright 2008 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from i18n import _
8 from i18n import _
9 import osutil, scmutil, util
9 import osutil, scmutil, util
10 import os, stat
10 import os, stat
11
11
12 _sha = util.sha1
12 _sha = util.sha1
13
13
14 # This avoids a collision between a file named foo and a dir named
14 # This avoids a collision between a file named foo and a dir named
15 # foo.i or foo.d
15 # foo.i or foo.d
16 def encodedir(path):
16 def encodedir(path):
17 '''
17 '''
18 >>> encodedir('data/foo.i')
18 >>> encodedir('data/foo.i')
19 'data/foo.i'
19 'data/foo.i'
20 >>> encodedir('data/foo.i/bla.i')
20 >>> encodedir('data/foo.i/bla.i')
21 'data/foo.i.hg/bla.i'
21 'data/foo.i.hg/bla.i'
22 >>> encodedir('data/foo.i.hg/bla.i')
22 >>> encodedir('data/foo.i.hg/bla.i')
23 'data/foo.i.hg.hg/bla.i'
23 'data/foo.i.hg.hg/bla.i'
24 '''
24 '''
25 if not path.startswith('data/'):
25 if not path.startswith('data/'):
26 return path
26 return path
27 return (path
27 return (path
28 .replace(".hg/", ".hg.hg/")
28 .replace(".hg/", ".hg.hg/")
29 .replace(".i/", ".i.hg/")
29 .replace(".i/", ".i.hg/")
30 .replace(".d/", ".d.hg/"))
30 .replace(".d/", ".d.hg/"))
31
31
32 def decodedir(path):
32 def decodedir(path):
33 '''
33 '''
34 >>> decodedir('data/foo.i')
34 >>> decodedir('data/foo.i')
35 'data/foo.i'
35 'data/foo.i'
36 >>> decodedir('data/foo.i.hg/bla.i')
36 >>> decodedir('data/foo.i.hg/bla.i')
37 'data/foo.i/bla.i'
37 'data/foo.i/bla.i'
38 >>> decodedir('data/foo.i.hg.hg/bla.i')
38 >>> decodedir('data/foo.i.hg.hg/bla.i')
39 'data/foo.i.hg/bla.i'
39 'data/foo.i.hg/bla.i'
40 '''
40 '''
41 if not path.startswith('data/') or ".hg/" not in path:
41 if not path.startswith('data/') or ".hg/" not in path:
42 return path
42 return path
43 return (path
43 return (path
44 .replace(".d.hg/", ".d/")
44 .replace(".d.hg/", ".d/")
45 .replace(".i.hg/", ".i/")
45 .replace(".i.hg/", ".i/")
46 .replace(".hg.hg/", ".hg/"))
46 .replace(".hg.hg/", ".hg/"))
47
47
48 def _buildencodefun():
48 def _buildencodefun():
49 '''
49 '''
50 >>> enc, dec = _buildencodefun()
50 >>> enc, dec = _buildencodefun()
51
51
52 >>> enc('nothing/special.txt')
52 >>> enc('nothing/special.txt')
53 'nothing/special.txt'
53 'nothing/special.txt'
54 >>> dec('nothing/special.txt')
54 >>> dec('nothing/special.txt')
55 'nothing/special.txt'
55 'nothing/special.txt'
56
56
57 >>> enc('HELLO')
57 >>> enc('HELLO')
58 '_h_e_l_l_o'
58 '_h_e_l_l_o'
59 >>> dec('_h_e_l_l_o')
59 >>> dec('_h_e_l_l_o')
60 'HELLO'
60 'HELLO'
61
61
62 >>> enc('hello:world?')
62 >>> enc('hello:world?')
63 'hello~3aworld~3f'
63 'hello~3aworld~3f'
64 >>> dec('hello~3aworld~3f')
64 >>> dec('hello~3aworld~3f')
65 'hello:world?'
65 'hello:world?'
66
66
67 >>> enc('the\x07quick\xADshot')
67 >>> enc('the\x07quick\xADshot')
68 'the~07quick~adshot'
68 'the~07quick~adshot'
69 >>> dec('the~07quick~adshot')
69 >>> dec('the~07quick~adshot')
70 'the\\x07quick\\xadshot'
70 'the\\x07quick\\xadshot'
71 '''
71 '''
72 e = '_'
72 e = '_'
73 winreserved = [ord(x) for x in '\\:*?"<>|']
73 winreserved = [ord(x) for x in '\\:*?"<>|']
74 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
74 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
75 for x in (range(32) + range(126, 256) + winreserved):
75 for x in (range(32) + range(126, 256) + winreserved):
76 cmap[chr(x)] = "~%02x" % x
76 cmap[chr(x)] = "~%02x" % x
77 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
77 for x in range(ord("A"), ord("Z")+1) + [ord(e)]:
78 cmap[chr(x)] = e + chr(x).lower()
78 cmap[chr(x)] = e + chr(x).lower()
79 dmap = {}
79 dmap = {}
80 for k, v in cmap.iteritems():
80 for k, v in cmap.iteritems():
81 dmap[v] = k
81 dmap[v] = k
82 def decode(s):
82 def decode(s):
83 i = 0
83 i = 0
84 while i < len(s):
84 while i < len(s):
85 for l in xrange(1, 4):
85 for l in xrange(1, 4):
86 try:
86 try:
87 yield dmap[s[i:i + l]]
87 yield dmap[s[i:i + l]]
88 i += l
88 i += l
89 break
89 break
90 except KeyError:
90 except KeyError:
91 pass
91 pass
92 else:
92 else:
93 raise KeyError
93 raise KeyError
94 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
94 return (lambda s: "".join([cmap[c] for c in encodedir(s)]),
95 lambda s: decodedir("".join(list(decode(s)))))
95 lambda s: decodedir("".join(list(decode(s)))))
96
96
97 encodefilename, decodefilename = _buildencodefun()
97 encodefilename, decodefilename = _buildencodefun()
98
98
99 def _buildlowerencodefun():
99 def _buildlowerencodefun():
100 '''
100 '''
101 >>> f = _buildlowerencodefun()
101 >>> f = _buildlowerencodefun()
102 >>> f('nothing/special.txt')
102 >>> f('nothing/special.txt')
103 'nothing/special.txt'
103 'nothing/special.txt'
104 >>> f('HELLO')
104 >>> f('HELLO')
105 'hello'
105 'hello'
106 >>> f('hello:world?')
106 >>> f('hello:world?')
107 'hello~3aworld~3f'
107 'hello~3aworld~3f'
108 >>> f('the\x07quick\xADshot')
108 >>> f('the\x07quick\xADshot')
109 'the~07quick~adshot'
109 'the~07quick~adshot'
110 '''
110 '''
111 winreserved = [ord(x) for x in '\\:*?"<>|']
111 winreserved = [ord(x) for x in '\\:*?"<>|']
112 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
112 cmap = dict([(chr(x), chr(x)) for x in xrange(127)])
113 for x in (range(32) + range(126, 256) + winreserved):
113 for x in (range(32) + range(126, 256) + winreserved):
114 cmap[chr(x)] = "~%02x" % x
114 cmap[chr(x)] = "~%02x" % x
115 for x in range(ord("A"), ord("Z")+1):
115 for x in range(ord("A"), ord("Z")+1):
116 cmap[chr(x)] = chr(x).lower()
116 cmap[chr(x)] = chr(x).lower()
117 return lambda s: "".join([cmap[c] for c in s])
117 return lambda s: "".join([cmap[c] for c in s])
118
118
119 lowerencode = _buildlowerencodefun()
119 lowerencode = _buildlowerencodefun()
120
120
121 _winreservednames = '''con prn aux nul
121 _winreservednames = '''con prn aux nul
122 com1 com2 com3 com4 com5 com6 com7 com8 com9
122 com1 com2 com3 com4 com5 com6 com7 com8 com9
123 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
123 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
124 def _auxencode(path, dotencode):
124 def _auxencode(path, dotencode):
125 '''
125 '''
126 Encodes filenames containing names reserved by Windows or which end in
126 Encodes filenames containing names reserved by Windows or which end in
127 period or space. Does not touch other single reserved characters c.
127 period or space. Does not touch other single reserved characters c.
128 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
128 Specifically, c in '\\:*?"<>|' or ord(c) <= 31 are *not* encoded here.
129 Additionally encodes space or period at the beginning, if dotencode is
129 Additionally encodes space or period at the beginning, if dotencode is
130 True.
130 True.
131 path is assumed to be all lowercase.
131 path is assumed to be all lowercase.
132
132
133 >>> _auxencode('.foo/aux.txt/txt.aux/con/prn/nul/foo.', True)
133 >>> _auxencode('.foo/aux.txt/txt.aux/con/prn/nul/foo.', True)
134 '~2efoo/au~78.txt/txt.aux/co~6e/pr~6e/nu~6c/foo~2e'
134 '~2efoo/au~78.txt/txt.aux/co~6e/pr~6e/nu~6c/foo~2e'
135 >>> _auxencode('.com1com2/lpt9.lpt4.lpt1/conprn/foo.', False)
135 >>> _auxencode('.com1com2/lpt9.lpt4.lpt1/conprn/foo.', False)
136 '.com1com2/lp~749.lpt4.lpt1/conprn/foo~2e'
136 '.com1com2/lp~749.lpt4.lpt1/conprn/foo~2e'
137 >>> _auxencode('foo. ', True)
137 >>> _auxencode('foo. ', True)
138 'foo.~20'
138 'foo.~20'
139 >>> _auxencode(' .foo', True)
139 >>> _auxencode(' .foo', True)
140 '~20.foo'
140 '~20.foo'
141 '''
141 '''
142 res = []
142 res = []
143 for n in path.split('/'):
143 for n in path.split('/'):
144 if n:
144 if n:
145 base = n.split('.')[0]
145 base = n.split('.')[0]
146 if base and (base in _winreservednames):
146 if base and (base in _winreservednames):
147 # encode third letter ('aux' -> 'au~78')
147 # encode third letter ('aux' -> 'au~78')
148 ec = "~%02x" % ord(n[2])
148 ec = "~%02x" % ord(n[2])
149 n = n[0:2] + ec + n[3:]
149 n = n[0:2] + ec + n[3:]
150 if n[-1] in '. ':
150 if n[-1] in '. ':
151 # encode last period or space ('foo...' -> 'foo..~2e')
151 # encode last period or space ('foo...' -> 'foo..~2e')
152 n = n[:-1] + "~%02x" % ord(n[-1])
152 n = n[:-1] + "~%02x" % ord(n[-1])
153 if dotencode and n[0] in '. ':
153 if dotencode and n[0] in '. ':
154 n = "~%02x" % ord(n[0]) + n[1:]
154 n = "~%02x" % ord(n[0]) + n[1:]
155 res.append(n)
155 res.append(n)
156 return '/'.join(res)
156 return '/'.join(res)
157
157
158 _maxstorepathlen = 120
158 _maxstorepathlen = 120
159 _dirprefixlen = 8
159 _dirprefixlen = 8
160 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
160 _maxshortdirslen = 8 * (_dirprefixlen + 1) - 4
161 def _hybridencode(path, auxencode):
161 def _hybridencode(path, auxencode):
162 '''encodes path with a length limit
162 '''encodes path with a length limit
163
163
164 Encodes all paths that begin with 'data/', according to the following.
164 Encodes all paths that begin with 'data/', according to the following.
165
165
166 Default encoding (reversible):
166 Default encoding (reversible):
167
167
168 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
168 Encodes all uppercase letters 'X' as '_x'. All reserved or illegal
169 characters are encoded as '~xx', where xx is the two digit hex code
169 characters are encoded as '~xx', where xx is the two digit hex code
170 of the character (see encodefilename).
170 of the character (see encodefilename).
171 Relevant path components consisting of Windows reserved filenames are
171 Relevant path components consisting of Windows reserved filenames are
172 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
172 masked by encoding the third character ('aux' -> 'au~78', see auxencode).
173
173
174 Hashed encoding (not reversible):
174 Hashed encoding (not reversible):
175
175
176 If the default-encoded path is longer than _maxstorepathlen, a
176 If the default-encoded path is longer than _maxstorepathlen, a
177 non-reversible hybrid hashing of the path is done instead.
177 non-reversible hybrid hashing of the path is done instead.
178 This encoding uses up to _dirprefixlen characters of all directory
178 This encoding uses up to _dirprefixlen characters of all directory
179 levels of the lowerencoded path, but not more levels than can fit into
179 levels of the lowerencoded path, but not more levels than can fit into
180 _maxshortdirslen.
180 _maxshortdirslen.
181 Then follows the filler followed by the sha digest of the full path.
181 Then follows the filler followed by the sha digest of the full path.
182 The filler is the beginning of the basename of the lowerencoded path
182 The filler is the beginning of the basename of the lowerencoded path
183 (the basename is everything after the last path separator). The filler
183 (the basename is everything after the last path separator). The filler
184 is as long as possible, filling in characters from the basename until
184 is as long as possible, filling in characters from the basename until
185 the encoded path has _maxstorepathlen characters (or all chars of the
185 the encoded path has _maxstorepathlen characters (or all chars of the
186 basename have been taken).
186 basename have been taken).
187 The extension (e.g. '.i' or '.d') is preserved.
187 The extension (e.g. '.i' or '.d') is preserved.
188
188
189 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
189 The string 'data/' at the beginning is replaced with 'dh/', if the hashed
190 encoding was used.
190 encoding was used.
191 '''
191 '''
192 if not path.startswith('data/'):
192 if not path.startswith('data/'):
193 return path
193 return path
194 # escape directories ending with .i and .d
194 # escape directories ending with .i and .d
195 path = encodedir(path)
195 path = encodedir(path)
196 ndpath = path[len('data/'):]
196 ndpath = path[len('data/'):]
197 res = 'data/' + auxencode(encodefilename(ndpath))
197 res = 'data/' + auxencode(encodefilename(ndpath))
198 if len(res) > _maxstorepathlen:
198 if len(res) > _maxstorepathlen:
199 digest = _sha(path).hexdigest()
199 digest = _sha(path).hexdigest()
200 aep = auxencode(lowerencode(ndpath))
200 aep = auxencode(lowerencode(ndpath))
201 _root, ext = os.path.splitext(aep)
201 _root, ext = os.path.splitext(aep)
202 parts = aep.split('/')
202 parts = aep.split('/')
203 basename = parts[-1]
203 basename = parts[-1]
204 sdirs = []
204 sdirs = []
205 for p in parts[:-1]:
205 for p in parts[:-1]:
206 d = p[:_dirprefixlen]
206 d = p[:_dirprefixlen]
207 if d[-1] in '. ':
207 if d[-1] in '. ':
208 # Windows can't access dirs ending in period or space
208 # Windows can't access dirs ending in period or space
209 d = d[:-1] + '_'
209 d = d[:-1] + '_'
210 t = '/'.join(sdirs) + '/' + d
210 t = '/'.join(sdirs) + '/' + d
211 if len(t) > _maxshortdirslen:
211 if len(t) > _maxshortdirslen:
212 break
212 break
213 sdirs.append(d)
213 sdirs.append(d)
214 dirs = '/'.join(sdirs)
214 dirs = '/'.join(sdirs)
215 if len(dirs) > 0:
215 if len(dirs) > 0:
216 dirs += '/'
216 dirs += '/'
217 res = 'dh/' + dirs + digest + ext
217 res = 'dh/' + dirs + digest + ext
218 spaceleft = _maxstorepathlen - len(res)
218 spaceleft = _maxstorepathlen - len(res)
219 if spaceleft > 0:
219 if spaceleft > 0:
220 filler = basename[:spaceleft]
220 filler = basename[:spaceleft]
221 res = 'dh/' + dirs + filler + digest + ext
221 res = 'dh/' + dirs + filler + digest + ext
222 return res
222 return res
223
223
224 def _calcmode(path):
224 def _calcmode(path):
225 try:
225 try:
226 # files in .hg/ will be created using this mode
226 # files in .hg/ will be created using this mode
227 mode = os.stat(path).st_mode
227 mode = os.stat(path).st_mode
228 # avoid some useless chmods
228 # avoid some useless chmods
229 if (0777 & ~util.umask) == (0777 & mode):
229 if (0777 & ~util.umask) == (0777 & mode):
230 mode = None
230 mode = None
231 except OSError:
231 except OSError:
232 mode = None
232 mode = None
233 return mode
233 return mode
234
234
235 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
235 _data = 'data 00manifest.d 00manifest.i 00changelog.d 00changelog.i'
236
236
237 class basicstore(object):
237 class basicstore(object):
238 '''base class for local repository stores'''
238 '''base class for local repository stores'''
239 def __init__(self, path, openertype):
239 def __init__(self, path, openertype):
240 self.path = path
240 self.path = path
241 self.createmode = _calcmode(path)
241 self.createmode = _calcmode(path)
242 op = openertype(self.path)
242 op = openertype(self.path)
243 op.createmode = self.createmode
243 op.createmode = self.createmode
244 self.opener = scmutil.filteropener(op, encodedir)
244 self.opener = scmutil.filteropener(op, encodedir)
245
245
246 def join(self, f):
246 def join(self, f):
247 return self.path + '/' + encodedir(f)
247 return self.path + '/' + encodedir(f)
248
248
249 def _walk(self, relpath, recurse):
249 def _walk(self, relpath, recurse):
250 '''yields (unencoded, encoded, size)'''
250 '''yields (unencoded, encoded, size)'''
251 path = self.path
251 path = self.path
252 if relpath:
252 if relpath:
253 path += '/' + relpath
253 path += '/' + relpath
254 striplen = len(self.path) + 1
254 striplen = len(self.path) + 1
255 l = []
255 l = []
256 if os.path.isdir(path):
256 if os.path.isdir(path):
257 visit = [path]
257 visit = [path]
258 while visit:
258 while visit:
259 p = visit.pop()
259 p = visit.pop()
260 for f, kind, st in osutil.listdir(p, stat=True):
260 for f, kind, st in osutil.listdir(p, stat=True):
261 fp = p + '/' + f
261 fp = p + '/' + f
262 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
262 if kind == stat.S_IFREG and f[-2:] in ('.d', '.i'):
263 n = util.pconvert(fp[striplen:])
263 n = util.pconvert(fp[striplen:])
264 l.append((decodedir(n), n, st.st_size))
264 l.append((decodedir(n), n, st.st_size))
265 elif kind == stat.S_IFDIR and recurse:
265 elif kind == stat.S_IFDIR and recurse:
266 visit.append(fp)
266 visit.append(fp)
267 return sorted(l)
267 return sorted(l)
268
268
269 def datafiles(self):
269 def datafiles(self):
270 return self._walk('data', True)
270 return self._walk('data', True)
271
271
272 def walk(self):
272 def walk(self):
273 '''yields (unencoded, encoded, size)'''
273 '''yields (unencoded, encoded, size)'''
274 # yield data files first
274 # yield data files first
275 for x in self.datafiles():
275 for x in self.datafiles():
276 yield x
276 yield x
277 # yield manifest before changelog
277 # yield manifest before changelog
278 for x in reversed(self._walk('', False)):
278 for x in reversed(self._walk('', False)):
279 yield x
279 yield x
280
280
281 def copylist(self):
281 def copylist(self):
282 return ['requires'] + _data.split()
282 return ['requires'] + _data.split()
283
283
284 def write(self):
284 def write(self):
285 pass
285 pass
286
286
287 class encodedstore(basicstore):
287 class encodedstore(basicstore):
288 def __init__(self, path, openertype):
288 def __init__(self, path, openertype):
289 self.path = path + '/store'
289 self.path = path + '/store'
290 self.createmode = _calcmode(self.path)
290 self.createmode = _calcmode(self.path)
291 op = openertype(self.path)
291 op = openertype(self.path)
292 op.createmode = self.createmode
292 op.createmode = self.createmode
293 self.opener = scmutil.filteropener(op, encodefilename)
293 self.opener = scmutil.filteropener(op, encodefilename)
294
294
295 def datafiles(self):
295 def datafiles(self):
296 for a, b, size in self._walk('data', True):
296 for a, b, size in self._walk('data', True):
297 try:
297 try:
298 a = decodefilename(a)
298 a = decodefilename(a)
299 except KeyError:
299 except KeyError:
300 a = None
300 a = None
301 yield a, b, size
301 yield a, b, size
302
302
303 def join(self, f):
303 def join(self, f):
304 return self.path + '/' + encodefilename(f)
304 return self.path + '/' + encodefilename(f)
305
305
306 def copylist(self):
306 def copylist(self):
307 return (['requires', '00changelog.i'] +
307 return (['requires', '00changelog.i'] +
308 ['store/' + f for f in _data.split()])
308 ['store/' + f for f in _data.split()])
309
309
310 class fncache(object):
310 class fncache(object):
311 # the filename used to be partially encoded
311 # the filename used to be partially encoded
312 # hence the encodedir/decodedir dance
312 # hence the encodedir/decodedir dance
313 def __init__(self, opener):
313 def __init__(self, opener):
314 self.opener = opener
314 self.opener = opener
315 self.entries = None
315 self.entries = None
316 self._dirty = False
316 self._dirty = False
317
317
318 def _load(self):
318 def _load(self):
319 '''fill the entries from the fncache file'''
319 '''fill the entries from the fncache file'''
320 self.entries = set()
320 self.entries = set()
321 self._dirty = False
321 self._dirty = False
322 try:
322 try:
323 fp = self.opener('fncache', mode='rb')
323 fp = self.opener('fncache', mode='rb')
324 except IOError:
324 except IOError:
325 # skip nonexistent file
325 # skip nonexistent file
326 return
326 return
327 for n, line in enumerate(fp):
327 for n, line in enumerate(fp):
328 if (len(line) < 2) or (line[-1] != '\n'):
328 if (len(line) < 2) or (line[-1] != '\n'):
329 t = _('invalid entry in fncache, line %s') % (n + 1)
329 t = _('invalid entry in fncache, line %s') % (n + 1)
330 raise util.Abort(t)
330 raise util.Abort(t)
331 self.entries.add(decodedir(line[:-1]))
331 self.entries.add(decodedir(line[:-1]))
332 fp.close()
332 fp.close()
333
333
334 def rewrite(self, files):
334 def rewrite(self, files):
335 fp = self.opener('fncache', mode='wb')
335 fp = self.opener('fncache', mode='wb')
336 for p in files:
336 for p in files:
337 fp.write(encodedir(p) + '\n')
337 fp.write(encodedir(p) + '\n')
338 fp.close()
338 fp.close()
339 self.entries = set(files)
339 self.entries = set(files)
340 self._dirty = False
340 self._dirty = False
341
341
342 def write(self):
342 def write(self):
343 if not self._dirty:
343 if not self._dirty:
344 return
344 return
345 fp = self.opener('fncache', mode='wb', atomictemp=True)
345 fp = self.opener('fncache', mode='wb', atomictemp=True)
346 for p in self.entries:
346 for p in self.entries:
347 fp.write(encodedir(p) + '\n')
347 fp.write(encodedir(p) + '\n')
348 fp.rename()
348 fp.close()
349 self._dirty = False
349 self._dirty = False
350
350
351 def add(self, fn):
351 def add(self, fn):
352 if self.entries is None:
352 if self.entries is None:
353 self._load()
353 self._load()
354 if fn not in self.entries:
354 if fn not in self.entries:
355 self._dirty = True
355 self._dirty = True
356 self.entries.add(fn)
356 self.entries.add(fn)
357
357
358 def __contains__(self, fn):
358 def __contains__(self, fn):
359 if self.entries is None:
359 if self.entries is None:
360 self._load()
360 self._load()
361 return fn in self.entries
361 return fn in self.entries
362
362
363 def __iter__(self):
363 def __iter__(self):
364 if self.entries is None:
364 if self.entries is None:
365 self._load()
365 self._load()
366 return iter(self.entries)
366 return iter(self.entries)
367
367
368 class _fncacheopener(scmutil.abstractopener):
368 class _fncacheopener(scmutil.abstractopener):
369 def __init__(self, op, fnc, encode):
369 def __init__(self, op, fnc, encode):
370 self.opener = op
370 self.opener = op
371 self.fncache = fnc
371 self.fncache = fnc
372 self.encode = encode
372 self.encode = encode
373
373
374 def __call__(self, path, mode='r', *args, **kw):
374 def __call__(self, path, mode='r', *args, **kw):
375 if mode not in ('r', 'rb') and path.startswith('data/'):
375 if mode not in ('r', 'rb') and path.startswith('data/'):
376 self.fncache.add(path)
376 self.fncache.add(path)
377 return self.opener(self.encode(path), mode, *args, **kw)
377 return self.opener(self.encode(path), mode, *args, **kw)
378
378
379 class fncachestore(basicstore):
379 class fncachestore(basicstore):
380 def __init__(self, path, openertype, encode):
380 def __init__(self, path, openertype, encode):
381 self.encode = encode
381 self.encode = encode
382 self.path = path + '/store'
382 self.path = path + '/store'
383 self.createmode = _calcmode(self.path)
383 self.createmode = _calcmode(self.path)
384 op = openertype(self.path)
384 op = openertype(self.path)
385 op.createmode = self.createmode
385 op.createmode = self.createmode
386 fnc = fncache(op)
386 fnc = fncache(op)
387 self.fncache = fnc
387 self.fncache = fnc
388 self.opener = _fncacheopener(op, fnc, encode)
388 self.opener = _fncacheopener(op, fnc, encode)
389
389
390 def join(self, f):
390 def join(self, f):
391 return self.path + '/' + self.encode(f)
391 return self.path + '/' + self.encode(f)
392
392
393 def datafiles(self):
393 def datafiles(self):
394 rewrite = False
394 rewrite = False
395 existing = []
395 existing = []
396 spath = self.path
396 spath = self.path
397 for f in self.fncache:
397 for f in self.fncache:
398 ef = self.encode(f)
398 ef = self.encode(f)
399 try:
399 try:
400 st = os.stat(spath + '/' + ef)
400 st = os.stat(spath + '/' + ef)
401 yield f, ef, st.st_size
401 yield f, ef, st.st_size
402 existing.append(f)
402 existing.append(f)
403 except OSError:
403 except OSError:
404 # nonexistent entry
404 # nonexistent entry
405 rewrite = True
405 rewrite = True
406 if rewrite:
406 if rewrite:
407 # rewrite fncache to remove nonexistent entries
407 # rewrite fncache to remove nonexistent entries
408 # (may be caused by rollback / strip)
408 # (may be caused by rollback / strip)
409 self.fncache.rewrite(existing)
409 self.fncache.rewrite(existing)
410
410
411 def copylist(self):
411 def copylist(self):
412 d = ('data dh fncache'
412 d = ('data dh fncache'
413 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
413 ' 00manifest.d 00manifest.i 00changelog.d 00changelog.i')
414 return (['requires', '00changelog.i'] +
414 return (['requires', '00changelog.i'] +
415 ['store/' + f for f in d.split()])
415 ['store/' + f for f in d.split()])
416
416
417 def write(self):
417 def write(self):
418 self.fncache.write()
418 self.fncache.write()
419
419
420 def store(requirements, path, openertype):
420 def store(requirements, path, openertype):
421 if 'store' in requirements:
421 if 'store' in requirements:
422 if 'fncache' in requirements:
422 if 'fncache' in requirements:
423 auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
423 auxencode = lambda f: _auxencode(f, 'dotencode' in requirements)
424 encode = lambda f: _hybridencode(f, auxencode)
424 encode = lambda f: _hybridencode(f, auxencode)
425 return fncachestore(path, openertype, encode)
425 return fncachestore(path, openertype, encode)
426 return encodedstore(path, openertype)
426 return encodedstore(path, openertype)
427 return basicstore(path, openertype)
427 return basicstore(path, openertype)
@@ -1,292 +1,292 b''
1 # tags.py - read tag info from local repository
1 # tags.py - read tag info from local repository
2 #
2 #
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
3 # Copyright 2009 Matt Mackall <mpm@selenic.com>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
4 # Copyright 2009 Greg Ward <greg@gerg.ca>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 # Currently this module only deals with reading and caching tags.
9 # Currently this module only deals with reading and caching tags.
10 # Eventually, it could take care of updating (adding/removing/moving)
10 # Eventually, it could take care of updating (adding/removing/moving)
11 # tags too.
11 # tags too.
12
12
13 from node import nullid, bin, hex, short
13 from node import nullid, bin, hex, short
14 from i18n import _
14 from i18n import _
15 import encoding
15 import encoding
16 import error
16 import error
17 import errno
17 import errno
18
18
19 def findglobaltags(ui, repo, alltags, tagtypes):
19 def findglobaltags(ui, repo, alltags, tagtypes):
20 '''Find global tags in repo by reading .hgtags from every head that
20 '''Find global tags in repo by reading .hgtags from every head that
21 has a distinct version of it, using a cache to avoid excess work.
21 has a distinct version of it, using a cache to avoid excess work.
22 Updates the dicts alltags, tagtypes in place: alltags maps tag name
22 Updates the dicts alltags, tagtypes in place: alltags maps tag name
23 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
23 to (node, hist) pair (see _readtags() below), and tagtypes maps tag
24 name to tag type ("global" in this case).'''
24 name to tag type ("global" in this case).'''
25 # This is so we can be lazy and assume alltags contains only global
25 # This is so we can be lazy and assume alltags contains only global
26 # tags when we pass it to _writetagcache().
26 # tags when we pass it to _writetagcache().
27 assert len(alltags) == len(tagtypes) == 0, \
27 assert len(alltags) == len(tagtypes) == 0, \
28 "findglobaltags() should be called first"
28 "findglobaltags() should be called first"
29
29
30 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
30 (heads, tagfnode, cachetags, shouldwrite) = _readtagcache(ui, repo)
31 if cachetags is not None:
31 if cachetags is not None:
32 assert not shouldwrite
32 assert not shouldwrite
33 # XXX is this really 100% correct? are there oddball special
33 # XXX is this really 100% correct? are there oddball special
34 # cases where a global tag should outrank a local tag but won't,
34 # cases where a global tag should outrank a local tag but won't,
35 # because cachetags does not contain rank info?
35 # because cachetags does not contain rank info?
36 _updatetags(cachetags, 'global', alltags, tagtypes)
36 _updatetags(cachetags, 'global', alltags, tagtypes)
37 return
37 return
38
38
39 seen = set() # set of fnode
39 seen = set() # set of fnode
40 fctx = None
40 fctx = None
41 for head in reversed(heads): # oldest to newest
41 for head in reversed(heads): # oldest to newest
42 assert head in repo.changelog.nodemap, \
42 assert head in repo.changelog.nodemap, \
43 "tag cache returned bogus head %s" % short(head)
43 "tag cache returned bogus head %s" % short(head)
44
44
45 fnode = tagfnode.get(head)
45 fnode = tagfnode.get(head)
46 if fnode and fnode not in seen:
46 if fnode and fnode not in seen:
47 seen.add(fnode)
47 seen.add(fnode)
48 if not fctx:
48 if not fctx:
49 fctx = repo.filectx('.hgtags', fileid=fnode)
49 fctx = repo.filectx('.hgtags', fileid=fnode)
50 else:
50 else:
51 fctx = fctx.filectx(fnode)
51 fctx = fctx.filectx(fnode)
52
52
53 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
53 filetags = _readtags(ui, repo, fctx.data().splitlines(), fctx)
54 _updatetags(filetags, 'global', alltags, tagtypes)
54 _updatetags(filetags, 'global', alltags, tagtypes)
55
55
56 # and update the cache (if necessary)
56 # and update the cache (if necessary)
57 if shouldwrite:
57 if shouldwrite:
58 _writetagcache(ui, repo, heads, tagfnode, alltags)
58 _writetagcache(ui, repo, heads, tagfnode, alltags)
59
59
60 def readlocaltags(ui, repo, alltags, tagtypes):
60 def readlocaltags(ui, repo, alltags, tagtypes):
61 '''Read local tags in repo. Update alltags and tagtypes.'''
61 '''Read local tags in repo. Update alltags and tagtypes.'''
62 try:
62 try:
63 data = repo.opener.read("localtags")
63 data = repo.opener.read("localtags")
64 except IOError, inst:
64 except IOError, inst:
65 if inst.errno != errno.ENOENT:
65 if inst.errno != errno.ENOENT:
66 raise
66 raise
67 return
67 return
68
68
69 # localtags is in the local encoding; re-encode to UTF-8 on
69 # localtags is in the local encoding; re-encode to UTF-8 on
70 # input for consistency with the rest of this module.
70 # input for consistency with the rest of this module.
71 filetags = _readtags(
71 filetags = _readtags(
72 ui, repo, data.splitlines(), "localtags",
72 ui, repo, data.splitlines(), "localtags",
73 recode=encoding.fromlocal)
73 recode=encoding.fromlocal)
74 _updatetags(filetags, "local", alltags, tagtypes)
74 _updatetags(filetags, "local", alltags, tagtypes)
75
75
76 def _readtags(ui, repo, lines, fn, recode=None):
76 def _readtags(ui, repo, lines, fn, recode=None):
77 '''Read tag definitions from a file (or any source of lines).
77 '''Read tag definitions from a file (or any source of lines).
78 Return a mapping from tag name to (node, hist): node is the node id
78 Return a mapping from tag name to (node, hist): node is the node id
79 from the last line read for that name, and hist is the list of node
79 from the last line read for that name, and hist is the list of node
80 ids previously associated with it (in file order). All node ids are
80 ids previously associated with it (in file order). All node ids are
81 binary, not hex.'''
81 binary, not hex.'''
82
82
83 filetags = {} # map tag name to (node, hist)
83 filetags = {} # map tag name to (node, hist)
84 count = 0
84 count = 0
85
85
86 def warn(msg):
86 def warn(msg):
87 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
87 ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
88
88
89 for line in lines:
89 for line in lines:
90 count += 1
90 count += 1
91 if not line:
91 if not line:
92 continue
92 continue
93 try:
93 try:
94 (nodehex, name) = line.split(" ", 1)
94 (nodehex, name) = line.split(" ", 1)
95 except ValueError:
95 except ValueError:
96 warn(_("cannot parse entry"))
96 warn(_("cannot parse entry"))
97 continue
97 continue
98 name = name.strip()
98 name = name.strip()
99 if recode:
99 if recode:
100 name = recode(name)
100 name = recode(name)
101 try:
101 try:
102 nodebin = bin(nodehex)
102 nodebin = bin(nodehex)
103 except TypeError:
103 except TypeError:
104 warn(_("node '%s' is not well formed") % nodehex)
104 warn(_("node '%s' is not well formed") % nodehex)
105 continue
105 continue
106
106
107 # update filetags
107 # update filetags
108 hist = []
108 hist = []
109 if name in filetags:
109 if name in filetags:
110 n, hist = filetags[name]
110 n, hist = filetags[name]
111 hist.append(n)
111 hist.append(n)
112 filetags[name] = (nodebin, hist)
112 filetags[name] = (nodebin, hist)
113 return filetags
113 return filetags
114
114
115 def _updatetags(filetags, tagtype, alltags, tagtypes):
115 def _updatetags(filetags, tagtype, alltags, tagtypes):
116 '''Incorporate the tag info read from one file into the two
116 '''Incorporate the tag info read from one file into the two
117 dictionaries, alltags and tagtypes, that contain all tag
117 dictionaries, alltags and tagtypes, that contain all tag
118 info (global across all heads plus local).'''
118 info (global across all heads plus local).'''
119
119
120 for name, nodehist in filetags.iteritems():
120 for name, nodehist in filetags.iteritems():
121 if name not in alltags:
121 if name not in alltags:
122 alltags[name] = nodehist
122 alltags[name] = nodehist
123 tagtypes[name] = tagtype
123 tagtypes[name] = tagtype
124 continue
124 continue
125
125
126 # we prefer alltags[name] if:
126 # we prefer alltags[name] if:
127 # it supercedes us OR
127 # it supercedes us OR
128 # mutual supercedes and it has a higher rank
128 # mutual supercedes and it has a higher rank
129 # otherwise we win because we're tip-most
129 # otherwise we win because we're tip-most
130 anode, ahist = nodehist
130 anode, ahist = nodehist
131 bnode, bhist = alltags[name]
131 bnode, bhist = alltags[name]
132 if (bnode != anode and anode in bhist and
132 if (bnode != anode and anode in bhist and
133 (bnode not in ahist or len(bhist) > len(ahist))):
133 (bnode not in ahist or len(bhist) > len(ahist))):
134 anode = bnode
134 anode = bnode
135 ahist.extend([n for n in bhist if n not in ahist])
135 ahist.extend([n for n in bhist if n not in ahist])
136 alltags[name] = anode, ahist
136 alltags[name] = anode, ahist
137 tagtypes[name] = tagtype
137 tagtypes[name] = tagtype
138
138
139
139
140 # The tag cache only stores info about heads, not the tag contents
140 # The tag cache only stores info about heads, not the tag contents
141 # from each head. I.e. it doesn't try to squeeze out the maximum
141 # from each head. I.e. it doesn't try to squeeze out the maximum
142 # performance, but is simpler has a better chance of actually
142 # performance, but is simpler has a better chance of actually
143 # working correctly. And this gives the biggest performance win: it
143 # working correctly. And this gives the biggest performance win: it
144 # avoids looking up .hgtags in the manifest for every head, and it
144 # avoids looking up .hgtags in the manifest for every head, and it
145 # can avoid calling heads() at all if there have been no changes to
145 # can avoid calling heads() at all if there have been no changes to
146 # the repo.
146 # the repo.
147
147
148 def _readtagcache(ui, repo):
148 def _readtagcache(ui, repo):
149 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
149 '''Read the tag cache and return a tuple (heads, fnodes, cachetags,
150 shouldwrite). If the cache is completely up-to-date, cachetags is a
150 shouldwrite). If the cache is completely up-to-date, cachetags is a
151 dict of the form returned by _readtags(); otherwise, it is None and
151 dict of the form returned by _readtags(); otherwise, it is None and
152 heads and fnodes are set. In that case, heads is the list of all
152 heads and fnodes are set. In that case, heads is the list of all
153 heads currently in the repository (ordered from tip to oldest) and
153 heads currently in the repository (ordered from tip to oldest) and
154 fnodes is a mapping from head to .hgtags filenode. If those two are
154 fnodes is a mapping from head to .hgtags filenode. If those two are
155 set, caller is responsible for reading tag info from each head.'''
155 set, caller is responsible for reading tag info from each head.'''
156
156
157 try:
157 try:
158 cachefile = repo.opener('cache/tags', 'r')
158 cachefile = repo.opener('cache/tags', 'r')
159 # force reading the file for static-http
159 # force reading the file for static-http
160 cachelines = iter(cachefile)
160 cachelines = iter(cachefile)
161 except IOError:
161 except IOError:
162 cachefile = None
162 cachefile = None
163
163
164 # The cache file consists of lines like
164 # The cache file consists of lines like
165 # <headrev> <headnode> [<tagnode>]
165 # <headrev> <headnode> [<tagnode>]
166 # where <headrev> and <headnode> redundantly identify a repository
166 # where <headrev> and <headnode> redundantly identify a repository
167 # head from the time the cache was written, and <tagnode> is the
167 # head from the time the cache was written, and <tagnode> is the
168 # filenode of .hgtags on that head. Heads with no .hgtags file will
168 # filenode of .hgtags on that head. Heads with no .hgtags file will
169 # have no <tagnode>. The cache is ordered from tip to oldest (which
169 # have no <tagnode>. The cache is ordered from tip to oldest (which
170 # is part of why <headrev> is there: a quick visual check is all
170 # is part of why <headrev> is there: a quick visual check is all
171 # that's required to ensure correct order).
171 # that's required to ensure correct order).
172 #
172 #
173 # This information is enough to let us avoid the most expensive part
173 # This information is enough to let us avoid the most expensive part
174 # of finding global tags, which is looking up <tagnode> in the
174 # of finding global tags, which is looking up <tagnode> in the
175 # manifest for each head.
175 # manifest for each head.
176 cacherevs = [] # list of headrev
176 cacherevs = [] # list of headrev
177 cacheheads = [] # list of headnode
177 cacheheads = [] # list of headnode
178 cachefnode = {} # map headnode to filenode
178 cachefnode = {} # map headnode to filenode
179 if cachefile:
179 if cachefile:
180 try:
180 try:
181 for line in cachelines:
181 for line in cachelines:
182 if line == "\n":
182 if line == "\n":
183 break
183 break
184 line = line.rstrip().split()
184 line = line.rstrip().split()
185 cacherevs.append(int(line[0]))
185 cacherevs.append(int(line[0]))
186 headnode = bin(line[1])
186 headnode = bin(line[1])
187 cacheheads.append(headnode)
187 cacheheads.append(headnode)
188 if len(line) == 3:
188 if len(line) == 3:
189 fnode = bin(line[2])
189 fnode = bin(line[2])
190 cachefnode[headnode] = fnode
190 cachefnode[headnode] = fnode
191 except Exception:
191 except Exception:
192 # corruption of the tags cache, just recompute it
192 # corruption of the tags cache, just recompute it
193 ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
193 ui.warn(_('.hg/cache/tags is corrupt, rebuilding it\n'))
194 cacheheads = []
194 cacheheads = []
195 cacherevs = []
195 cacherevs = []
196 cachefnode = {}
196 cachefnode = {}
197
197
198 tipnode = repo.changelog.tip()
198 tipnode = repo.changelog.tip()
199 tiprev = len(repo.changelog) - 1
199 tiprev = len(repo.changelog) - 1
200
200
201 # Case 1 (common): tip is the same, so nothing has changed.
201 # Case 1 (common): tip is the same, so nothing has changed.
202 # (Unchanged tip trivially means no changesets have been added.
202 # (Unchanged tip trivially means no changesets have been added.
203 # But, thanks to localrepository.destroyed(), it also means none
203 # But, thanks to localrepository.destroyed(), it also means none
204 # have been destroyed by strip or rollback.)
204 # have been destroyed by strip or rollback.)
205 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
205 if cacheheads and cacheheads[0] == tipnode and cacherevs[0] == tiprev:
206 tags = _readtags(ui, repo, cachelines, cachefile.name)
206 tags = _readtags(ui, repo, cachelines, cachefile.name)
207 cachefile.close()
207 cachefile.close()
208 return (None, None, tags, False)
208 return (None, None, tags, False)
209 if cachefile:
209 if cachefile:
210 cachefile.close() # ignore rest of file
210 cachefile.close() # ignore rest of file
211
211
212 repoheads = repo.heads()
212 repoheads = repo.heads()
213 # Case 2 (uncommon): empty repo; get out quickly and don't bother
213 # Case 2 (uncommon): empty repo; get out quickly and don't bother
214 # writing an empty cache.
214 # writing an empty cache.
215 if repoheads == [nullid]:
215 if repoheads == [nullid]:
216 return ([], {}, {}, False)
216 return ([], {}, {}, False)
217
217
218 # Case 3 (uncommon): cache file missing or empty.
218 # Case 3 (uncommon): cache file missing or empty.
219
219
220 # Case 4 (uncommon): tip rev decreased. This should only happen
220 # Case 4 (uncommon): tip rev decreased. This should only happen
221 # when we're called from localrepository.destroyed(). Refresh the
221 # when we're called from localrepository.destroyed(). Refresh the
222 # cache so future invocations will not see disappeared heads in the
222 # cache so future invocations will not see disappeared heads in the
223 # cache.
223 # cache.
224
224
225 # Case 5 (common): tip has changed, so we've added/replaced heads.
225 # Case 5 (common): tip has changed, so we've added/replaced heads.
226
226
227 # As it happens, the code to handle cases 3, 4, 5 is the same.
227 # As it happens, the code to handle cases 3, 4, 5 is the same.
228
228
229 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
229 # N.B. in case 4 (nodes destroyed), "new head" really means "newly
230 # exposed".
230 # exposed".
231 newheads = [head
231 newheads = [head
232 for head in repoheads
232 for head in repoheads
233 if head not in set(cacheheads)]
233 if head not in set(cacheheads)]
234
234
235 # Now we have to lookup the .hgtags filenode for every new head.
235 # Now we have to lookup the .hgtags filenode for every new head.
236 # This is the most expensive part of finding tags, so performance
236 # This is the most expensive part of finding tags, so performance
237 # depends primarily on the size of newheads. Worst case: no cache
237 # depends primarily on the size of newheads. Worst case: no cache
238 # file, so newheads == repoheads.
238 # file, so newheads == repoheads.
239 for head in newheads:
239 for head in newheads:
240 cctx = repo[head]
240 cctx = repo[head]
241 try:
241 try:
242 fnode = cctx.filenode('.hgtags')
242 fnode = cctx.filenode('.hgtags')
243 cachefnode[head] = fnode
243 cachefnode[head] = fnode
244 except error.LookupError:
244 except error.LookupError:
245 # no .hgtags file on this head
245 # no .hgtags file on this head
246 pass
246 pass
247
247
248 # Caller has to iterate over all heads, but can use the filenodes in
248 # Caller has to iterate over all heads, but can use the filenodes in
249 # cachefnode to get to each .hgtags revision quickly.
249 # cachefnode to get to each .hgtags revision quickly.
250 return (repoheads, cachefnode, None, True)
250 return (repoheads, cachefnode, None, True)
251
251
252 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
252 def _writetagcache(ui, repo, heads, tagfnode, cachetags):
253
253
254 try:
254 try:
255 cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
255 cachefile = repo.opener('cache/tags', 'w', atomictemp=True)
256 except (OSError, IOError):
256 except (OSError, IOError):
257 return
257 return
258
258
259 realheads = repo.heads() # for sanity checks below
259 realheads = repo.heads() # for sanity checks below
260 for head in heads:
260 for head in heads:
261 # temporary sanity checks; these can probably be removed
261 # temporary sanity checks; these can probably be removed
262 # once this code has been in crew for a few weeks
262 # once this code has been in crew for a few weeks
263 assert head in repo.changelog.nodemap, \
263 assert head in repo.changelog.nodemap, \
264 'trying to write non-existent node %s to tag cache' % short(head)
264 'trying to write non-existent node %s to tag cache' % short(head)
265 assert head in realheads, \
265 assert head in realheads, \
266 'trying to write non-head %s to tag cache' % short(head)
266 'trying to write non-head %s to tag cache' % short(head)
267 assert head != nullid, \
267 assert head != nullid, \
268 'trying to write nullid to tag cache'
268 'trying to write nullid to tag cache'
269
269
270 # This can't fail because of the first assert above. When/if we
270 # This can't fail because of the first assert above. When/if we
271 # remove that assert, we might want to catch LookupError here
271 # remove that assert, we might want to catch LookupError here
272 # and downgrade it to a warning.
272 # and downgrade it to a warning.
273 rev = repo.changelog.rev(head)
273 rev = repo.changelog.rev(head)
274
274
275 fnode = tagfnode.get(head)
275 fnode = tagfnode.get(head)
276 if fnode:
276 if fnode:
277 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
277 cachefile.write('%d %s %s\n' % (rev, hex(head), hex(fnode)))
278 else:
278 else:
279 cachefile.write('%d %s\n' % (rev, hex(head)))
279 cachefile.write('%d %s\n' % (rev, hex(head)))
280
280
281 # Tag names in the cache are in UTF-8 -- which is the whole reason
281 # Tag names in the cache are in UTF-8 -- which is the whole reason
282 # we keep them in UTF-8 throughout this module. If we converted
282 # we keep them in UTF-8 throughout this module. If we converted
283 # them local encoding on input, we would lose info writing them to
283 # them local encoding on input, we would lose info writing them to
284 # the cache.
284 # the cache.
285 cachefile.write('\n')
285 cachefile.write('\n')
286 for (name, (node, hist)) in cachetags.iteritems():
286 for (name, (node, hist)) in cachetags.iteritems():
287 cachefile.write("%s %s\n" % (hex(node), name))
287 cachefile.write("%s %s\n" % (hex(node), name))
288
288
289 try:
289 try:
290 cachefile.rename()
290 cachefile.close()
291 except (OSError, IOError):
291 except (OSError, IOError):
292 pass
292 pass
@@ -1,1648 +1,1647 b''
1 # util.py - Mercurial utility functions and platform specfic implementations
1 # util.py - Mercurial utility functions and platform specfic implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specfic implementations.
10 """Mercurial utility functions and platform specfic implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from i18n import _
16 from i18n import _
17 import error, osutil, encoding
17 import error, osutil, encoding
18 import errno, re, shutil, sys, tempfile, traceback
18 import errno, re, shutil, sys, tempfile, traceback
19 import os, time, calendar, textwrap, unicodedata, signal
19 import os, time, calendar, textwrap, unicodedata, signal
20 import imp, socket, urllib
20 import imp, socket, urllib
21
21
22 if os.name == 'nt':
22 if os.name == 'nt':
23 import windows as platform
23 import windows as platform
24 else:
24 else:
25 import posix as platform
25 import posix as platform
26
26
27 cachestat = platform.cachestat
27 cachestat = platform.cachestat
28 checkexec = platform.checkexec
28 checkexec = platform.checkexec
29 checklink = platform.checklink
29 checklink = platform.checklink
30 copymode = platform.copymode
30 copymode = platform.copymode
31 executablepath = platform.executablepath
31 executablepath = platform.executablepath
32 expandglobs = platform.expandglobs
32 expandglobs = platform.expandglobs
33 explainexit = platform.explainexit
33 explainexit = platform.explainexit
34 findexe = platform.findexe
34 findexe = platform.findexe
35 gethgcmd = platform.gethgcmd
35 gethgcmd = platform.gethgcmd
36 getuser = platform.getuser
36 getuser = platform.getuser
37 groupmembers = platform.groupmembers
37 groupmembers = platform.groupmembers
38 groupname = platform.groupname
38 groupname = platform.groupname
39 hidewindow = platform.hidewindow
39 hidewindow = platform.hidewindow
40 isexec = platform.isexec
40 isexec = platform.isexec
41 isowner = platform.isowner
41 isowner = platform.isowner
42 localpath = platform.localpath
42 localpath = platform.localpath
43 lookupreg = platform.lookupreg
43 lookupreg = platform.lookupreg
44 makedir = platform.makedir
44 makedir = platform.makedir
45 nlinks = platform.nlinks
45 nlinks = platform.nlinks
46 normpath = platform.normpath
46 normpath = platform.normpath
47 nulldev = platform.nulldev
47 nulldev = platform.nulldev
48 openhardlinks = platform.openhardlinks
48 openhardlinks = platform.openhardlinks
49 oslink = platform.oslink
49 oslink = platform.oslink
50 parsepatchoutput = platform.parsepatchoutput
50 parsepatchoutput = platform.parsepatchoutput
51 pconvert = platform.pconvert
51 pconvert = platform.pconvert
52 popen = platform.popen
52 popen = platform.popen
53 posixfile = platform.posixfile
53 posixfile = platform.posixfile
54 quotecommand = platform.quotecommand
54 quotecommand = platform.quotecommand
55 realpath = platform.realpath
55 realpath = platform.realpath
56 rename = platform.rename
56 rename = platform.rename
57 samedevice = platform.samedevice
57 samedevice = platform.samedevice
58 samefile = platform.samefile
58 samefile = platform.samefile
59 samestat = platform.samestat
59 samestat = platform.samestat
60 setbinary = platform.setbinary
60 setbinary = platform.setbinary
61 setflags = platform.setflags
61 setflags = platform.setflags
62 setsignalhandler = platform.setsignalhandler
62 setsignalhandler = platform.setsignalhandler
63 shellquote = platform.shellquote
63 shellquote = platform.shellquote
64 spawndetached = platform.spawndetached
64 spawndetached = platform.spawndetached
65 sshargs = platform.sshargs
65 sshargs = platform.sshargs
66 statfiles = platform.statfiles
66 statfiles = platform.statfiles
67 termwidth = platform.termwidth
67 termwidth = platform.termwidth
68 testpid = platform.testpid
68 testpid = platform.testpid
69 umask = platform.umask
69 umask = platform.umask
70 unlink = platform.unlink
70 unlink = platform.unlink
71 unlinkpath = platform.unlinkpath
71 unlinkpath = platform.unlinkpath
72 username = platform.username
72 username = platform.username
73
73
74 # Python compatibility
74 # Python compatibility
75
75
76 def sha1(s):
76 def sha1(s):
77 return _fastsha1(s)
77 return _fastsha1(s)
78
78
79 _notset = object()
79 _notset = object()
80 def safehasattr(thing, attr):
80 def safehasattr(thing, attr):
81 return getattr(thing, attr, _notset) is not _notset
81 return getattr(thing, attr, _notset) is not _notset
82
82
83 def _fastsha1(s):
83 def _fastsha1(s):
84 # This function will import sha1 from hashlib or sha (whichever is
84 # This function will import sha1 from hashlib or sha (whichever is
85 # available) and overwrite itself with it on the first call.
85 # available) and overwrite itself with it on the first call.
86 # Subsequent calls will go directly to the imported function.
86 # Subsequent calls will go directly to the imported function.
87 if sys.version_info >= (2, 5):
87 if sys.version_info >= (2, 5):
88 from hashlib import sha1 as _sha1
88 from hashlib import sha1 as _sha1
89 else:
89 else:
90 from sha import sha as _sha1
90 from sha import sha as _sha1
91 global _fastsha1, sha1
91 global _fastsha1, sha1
92 _fastsha1 = sha1 = _sha1
92 _fastsha1 = sha1 = _sha1
93 return _sha1(s)
93 return _sha1(s)
94
94
95 import __builtin__
95 import __builtin__
96
96
97 if sys.version_info[0] < 3:
97 if sys.version_info[0] < 3:
98 def fakebuffer(sliceable, offset=0):
98 def fakebuffer(sliceable, offset=0):
99 return sliceable[offset:]
99 return sliceable[offset:]
100 else:
100 else:
101 def fakebuffer(sliceable, offset=0):
101 def fakebuffer(sliceable, offset=0):
102 return memoryview(sliceable)[offset:]
102 return memoryview(sliceable)[offset:]
103 try:
103 try:
104 buffer
104 buffer
105 except NameError:
105 except NameError:
106 __builtin__.buffer = fakebuffer
106 __builtin__.buffer = fakebuffer
107
107
108 import subprocess
108 import subprocess
109 closefds = os.name == 'posix'
109 closefds = os.name == 'posix'
110
110
111 def popen2(cmd, env=None, newlines=False):
111 def popen2(cmd, env=None, newlines=False):
112 # Setting bufsize to -1 lets the system decide the buffer size.
112 # Setting bufsize to -1 lets the system decide the buffer size.
113 # The default for bufsize is 0, meaning unbuffered. This leads to
113 # The default for bufsize is 0, meaning unbuffered. This leads to
114 # poor performance on Mac OS X: http://bugs.python.org/issue4194
114 # poor performance on Mac OS X: http://bugs.python.org/issue4194
115 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
115 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
116 close_fds=closefds,
116 close_fds=closefds,
117 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
117 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
118 universal_newlines=newlines,
118 universal_newlines=newlines,
119 env=env)
119 env=env)
120 return p.stdin, p.stdout
120 return p.stdin, p.stdout
121
121
122 def popen3(cmd, env=None, newlines=False):
122 def popen3(cmd, env=None, newlines=False):
123 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
123 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
124 close_fds=closefds,
124 close_fds=closefds,
125 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
125 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
126 stderr=subprocess.PIPE,
126 stderr=subprocess.PIPE,
127 universal_newlines=newlines,
127 universal_newlines=newlines,
128 env=env)
128 env=env)
129 return p.stdin, p.stdout, p.stderr
129 return p.stdin, p.stdout, p.stderr
130
130
131 def version():
131 def version():
132 """Return version information if available."""
132 """Return version information if available."""
133 try:
133 try:
134 import __version__
134 import __version__
135 return __version__.version
135 return __version__.version
136 except ImportError:
136 except ImportError:
137 return 'unknown'
137 return 'unknown'
138
138
139 # used by parsedate
139 # used by parsedate
140 defaultdateformats = (
140 defaultdateformats = (
141 '%Y-%m-%d %H:%M:%S',
141 '%Y-%m-%d %H:%M:%S',
142 '%Y-%m-%d %I:%M:%S%p',
142 '%Y-%m-%d %I:%M:%S%p',
143 '%Y-%m-%d %H:%M',
143 '%Y-%m-%d %H:%M',
144 '%Y-%m-%d %I:%M%p',
144 '%Y-%m-%d %I:%M%p',
145 '%Y-%m-%d',
145 '%Y-%m-%d',
146 '%m-%d',
146 '%m-%d',
147 '%m/%d',
147 '%m/%d',
148 '%m/%d/%y',
148 '%m/%d/%y',
149 '%m/%d/%Y',
149 '%m/%d/%Y',
150 '%a %b %d %H:%M:%S %Y',
150 '%a %b %d %H:%M:%S %Y',
151 '%a %b %d %I:%M:%S%p %Y',
151 '%a %b %d %I:%M:%S%p %Y',
152 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
152 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
153 '%b %d %H:%M:%S %Y',
153 '%b %d %H:%M:%S %Y',
154 '%b %d %I:%M:%S%p %Y',
154 '%b %d %I:%M:%S%p %Y',
155 '%b %d %H:%M:%S',
155 '%b %d %H:%M:%S',
156 '%b %d %I:%M:%S%p',
156 '%b %d %I:%M:%S%p',
157 '%b %d %H:%M',
157 '%b %d %H:%M',
158 '%b %d %I:%M%p',
158 '%b %d %I:%M%p',
159 '%b %d %Y',
159 '%b %d %Y',
160 '%b %d',
160 '%b %d',
161 '%H:%M:%S',
161 '%H:%M:%S',
162 '%I:%M:%S%p',
162 '%I:%M:%S%p',
163 '%H:%M',
163 '%H:%M',
164 '%I:%M%p',
164 '%I:%M%p',
165 )
165 )
166
166
167 extendeddateformats = defaultdateformats + (
167 extendeddateformats = defaultdateformats + (
168 "%Y",
168 "%Y",
169 "%Y-%m",
169 "%Y-%m",
170 "%b",
170 "%b",
171 "%b %Y",
171 "%b %Y",
172 )
172 )
173
173
174 def cachefunc(func):
174 def cachefunc(func):
175 '''cache the result of function calls'''
175 '''cache the result of function calls'''
176 # XXX doesn't handle keywords args
176 # XXX doesn't handle keywords args
177 cache = {}
177 cache = {}
178 if func.func_code.co_argcount == 1:
178 if func.func_code.co_argcount == 1:
179 # we gain a small amount of time because
179 # we gain a small amount of time because
180 # we don't need to pack/unpack the list
180 # we don't need to pack/unpack the list
181 def f(arg):
181 def f(arg):
182 if arg not in cache:
182 if arg not in cache:
183 cache[arg] = func(arg)
183 cache[arg] = func(arg)
184 return cache[arg]
184 return cache[arg]
185 else:
185 else:
186 def f(*args):
186 def f(*args):
187 if args not in cache:
187 if args not in cache:
188 cache[args] = func(*args)
188 cache[args] = func(*args)
189 return cache[args]
189 return cache[args]
190
190
191 return f
191 return f
192
192
193 def lrucachefunc(func):
193 def lrucachefunc(func):
194 '''cache most recent results of function calls'''
194 '''cache most recent results of function calls'''
195 cache = {}
195 cache = {}
196 order = []
196 order = []
197 if func.func_code.co_argcount == 1:
197 if func.func_code.co_argcount == 1:
198 def f(arg):
198 def f(arg):
199 if arg not in cache:
199 if arg not in cache:
200 if len(cache) > 20:
200 if len(cache) > 20:
201 del cache[order.pop(0)]
201 del cache[order.pop(0)]
202 cache[arg] = func(arg)
202 cache[arg] = func(arg)
203 else:
203 else:
204 order.remove(arg)
204 order.remove(arg)
205 order.append(arg)
205 order.append(arg)
206 return cache[arg]
206 return cache[arg]
207 else:
207 else:
208 def f(*args):
208 def f(*args):
209 if args not in cache:
209 if args not in cache:
210 if len(cache) > 20:
210 if len(cache) > 20:
211 del cache[order.pop(0)]
211 del cache[order.pop(0)]
212 cache[args] = func(*args)
212 cache[args] = func(*args)
213 else:
213 else:
214 order.remove(args)
214 order.remove(args)
215 order.append(args)
215 order.append(args)
216 return cache[args]
216 return cache[args]
217
217
218 return f
218 return f
219
219
220 class propertycache(object):
220 class propertycache(object):
221 def __init__(self, func):
221 def __init__(self, func):
222 self.func = func
222 self.func = func
223 self.name = func.__name__
223 self.name = func.__name__
224 def __get__(self, obj, type=None):
224 def __get__(self, obj, type=None):
225 result = self.func(obj)
225 result = self.func(obj)
226 setattr(obj, self.name, result)
226 setattr(obj, self.name, result)
227 return result
227 return result
228
228
229 def pipefilter(s, cmd):
229 def pipefilter(s, cmd):
230 '''filter string S through command CMD, returning its output'''
230 '''filter string S through command CMD, returning its output'''
231 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
231 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
232 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
232 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
233 pout, perr = p.communicate(s)
233 pout, perr = p.communicate(s)
234 return pout
234 return pout
235
235
236 def tempfilter(s, cmd):
236 def tempfilter(s, cmd):
237 '''filter string S through a pair of temporary files with CMD.
237 '''filter string S through a pair of temporary files with CMD.
238 CMD is used as a template to create the real command to be run,
238 CMD is used as a template to create the real command to be run,
239 with the strings INFILE and OUTFILE replaced by the real names of
239 with the strings INFILE and OUTFILE replaced by the real names of
240 the temporary files generated.'''
240 the temporary files generated.'''
241 inname, outname = None, None
241 inname, outname = None, None
242 try:
242 try:
243 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
243 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
244 fp = os.fdopen(infd, 'wb')
244 fp = os.fdopen(infd, 'wb')
245 fp.write(s)
245 fp.write(s)
246 fp.close()
246 fp.close()
247 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
247 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
248 os.close(outfd)
248 os.close(outfd)
249 cmd = cmd.replace('INFILE', inname)
249 cmd = cmd.replace('INFILE', inname)
250 cmd = cmd.replace('OUTFILE', outname)
250 cmd = cmd.replace('OUTFILE', outname)
251 code = os.system(cmd)
251 code = os.system(cmd)
252 if sys.platform == 'OpenVMS' and code & 1:
252 if sys.platform == 'OpenVMS' and code & 1:
253 code = 0
253 code = 0
254 if code:
254 if code:
255 raise Abort(_("command '%s' failed: %s") %
255 raise Abort(_("command '%s' failed: %s") %
256 (cmd, explainexit(code)))
256 (cmd, explainexit(code)))
257 fp = open(outname, 'rb')
257 fp = open(outname, 'rb')
258 r = fp.read()
258 r = fp.read()
259 fp.close()
259 fp.close()
260 return r
260 return r
261 finally:
261 finally:
262 try:
262 try:
263 if inname:
263 if inname:
264 os.unlink(inname)
264 os.unlink(inname)
265 except OSError:
265 except OSError:
266 pass
266 pass
267 try:
267 try:
268 if outname:
268 if outname:
269 os.unlink(outname)
269 os.unlink(outname)
270 except OSError:
270 except OSError:
271 pass
271 pass
272
272
273 filtertable = {
273 filtertable = {
274 'tempfile:': tempfilter,
274 'tempfile:': tempfilter,
275 'pipe:': pipefilter,
275 'pipe:': pipefilter,
276 }
276 }
277
277
278 def filter(s, cmd):
278 def filter(s, cmd):
279 "filter a string through a command that transforms its input to its output"
279 "filter a string through a command that transforms its input to its output"
280 for name, fn in filtertable.iteritems():
280 for name, fn in filtertable.iteritems():
281 if cmd.startswith(name):
281 if cmd.startswith(name):
282 return fn(s, cmd[len(name):].lstrip())
282 return fn(s, cmd[len(name):].lstrip())
283 return pipefilter(s, cmd)
283 return pipefilter(s, cmd)
284
284
285 def binary(s):
285 def binary(s):
286 """return true if a string is binary data"""
286 """return true if a string is binary data"""
287 return bool(s and '\0' in s)
287 return bool(s and '\0' in s)
288
288
289 def increasingchunks(source, min=1024, max=65536):
289 def increasingchunks(source, min=1024, max=65536):
290 '''return no less than min bytes per chunk while data remains,
290 '''return no less than min bytes per chunk while data remains,
291 doubling min after each chunk until it reaches max'''
291 doubling min after each chunk until it reaches max'''
292 def log2(x):
292 def log2(x):
293 if not x:
293 if not x:
294 return 0
294 return 0
295 i = 0
295 i = 0
296 while x:
296 while x:
297 x >>= 1
297 x >>= 1
298 i += 1
298 i += 1
299 return i - 1
299 return i - 1
300
300
301 buf = []
301 buf = []
302 blen = 0
302 blen = 0
303 for chunk in source:
303 for chunk in source:
304 buf.append(chunk)
304 buf.append(chunk)
305 blen += len(chunk)
305 blen += len(chunk)
306 if blen >= min:
306 if blen >= min:
307 if min < max:
307 if min < max:
308 min = min << 1
308 min = min << 1
309 nmin = 1 << log2(blen)
309 nmin = 1 << log2(blen)
310 if nmin > min:
310 if nmin > min:
311 min = nmin
311 min = nmin
312 if min > max:
312 if min > max:
313 min = max
313 min = max
314 yield ''.join(buf)
314 yield ''.join(buf)
315 blen = 0
315 blen = 0
316 buf = []
316 buf = []
317 if buf:
317 if buf:
318 yield ''.join(buf)
318 yield ''.join(buf)
319
319
320 Abort = error.Abort
320 Abort = error.Abort
321
321
322 def always(fn):
322 def always(fn):
323 return True
323 return True
324
324
325 def never(fn):
325 def never(fn):
326 return False
326 return False
327
327
328 def pathto(root, n1, n2):
328 def pathto(root, n1, n2):
329 '''return the relative path from one place to another.
329 '''return the relative path from one place to another.
330 root should use os.sep to separate directories
330 root should use os.sep to separate directories
331 n1 should use os.sep to separate directories
331 n1 should use os.sep to separate directories
332 n2 should use "/" to separate directories
332 n2 should use "/" to separate directories
333 returns an os.sep-separated path.
333 returns an os.sep-separated path.
334
334
335 If n1 is a relative path, it's assumed it's
335 If n1 is a relative path, it's assumed it's
336 relative to root.
336 relative to root.
337 n2 should always be relative to root.
337 n2 should always be relative to root.
338 '''
338 '''
339 if not n1:
339 if not n1:
340 return localpath(n2)
340 return localpath(n2)
341 if os.path.isabs(n1):
341 if os.path.isabs(n1):
342 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
342 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
343 return os.path.join(root, localpath(n2))
343 return os.path.join(root, localpath(n2))
344 n2 = '/'.join((pconvert(root), n2))
344 n2 = '/'.join((pconvert(root), n2))
345 a, b = splitpath(n1), n2.split('/')
345 a, b = splitpath(n1), n2.split('/')
346 a.reverse()
346 a.reverse()
347 b.reverse()
347 b.reverse()
348 while a and b and a[-1] == b[-1]:
348 while a and b and a[-1] == b[-1]:
349 a.pop()
349 a.pop()
350 b.pop()
350 b.pop()
351 b.reverse()
351 b.reverse()
352 return os.sep.join((['..'] * len(a)) + b) or '.'
352 return os.sep.join((['..'] * len(a)) + b) or '.'
353
353
354 _hgexecutable = None
354 _hgexecutable = None
355
355
356 def mainfrozen():
356 def mainfrozen():
357 """return True if we are a frozen executable.
357 """return True if we are a frozen executable.
358
358
359 The code supports py2exe (most common, Windows only) and tools/freeze
359 The code supports py2exe (most common, Windows only) and tools/freeze
360 (portable, not much used).
360 (portable, not much used).
361 """
361 """
362 return (safehasattr(sys, "frozen") or # new py2exe
362 return (safehasattr(sys, "frozen") or # new py2exe
363 safehasattr(sys, "importers") or # old py2exe
363 safehasattr(sys, "importers") or # old py2exe
364 imp.is_frozen("__main__")) # tools/freeze
364 imp.is_frozen("__main__")) # tools/freeze
365
365
366 def hgexecutable():
366 def hgexecutable():
367 """return location of the 'hg' executable.
367 """return location of the 'hg' executable.
368
368
369 Defaults to $HG or 'hg' in the search path.
369 Defaults to $HG or 'hg' in the search path.
370 """
370 """
371 if _hgexecutable is None:
371 if _hgexecutable is None:
372 hg = os.environ.get('HG')
372 hg = os.environ.get('HG')
373 if hg:
373 if hg:
374 _sethgexecutable(hg)
374 _sethgexecutable(hg)
375 elif mainfrozen():
375 elif mainfrozen():
376 _sethgexecutable(sys.executable)
376 _sethgexecutable(sys.executable)
377 else:
377 else:
378 exe = findexe('hg') or os.path.basename(sys.argv[0])
378 exe = findexe('hg') or os.path.basename(sys.argv[0])
379 _sethgexecutable(exe)
379 _sethgexecutable(exe)
380 return _hgexecutable
380 return _hgexecutable
381
381
382 def _sethgexecutable(path):
382 def _sethgexecutable(path):
383 """set location of the 'hg' executable"""
383 """set location of the 'hg' executable"""
384 global _hgexecutable
384 global _hgexecutable
385 _hgexecutable = path
385 _hgexecutable = path
386
386
387 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
387 def system(cmd, environ={}, cwd=None, onerr=None, errprefix=None, out=None):
388 '''enhanced shell command execution.
388 '''enhanced shell command execution.
389 run with environment maybe modified, maybe in different dir.
389 run with environment maybe modified, maybe in different dir.
390
390
391 if command fails and onerr is None, return status. if ui object,
391 if command fails and onerr is None, return status. if ui object,
392 print error message and return status, else raise onerr object as
392 print error message and return status, else raise onerr object as
393 exception.
393 exception.
394
394
395 if out is specified, it is assumed to be a file-like object that has a
395 if out is specified, it is assumed to be a file-like object that has a
396 write() method. stdout and stderr will be redirected to out.'''
396 write() method. stdout and stderr will be redirected to out.'''
397 try:
397 try:
398 sys.stdout.flush()
398 sys.stdout.flush()
399 except Exception:
399 except Exception:
400 pass
400 pass
401 def py2shell(val):
401 def py2shell(val):
402 'convert python object into string that is useful to shell'
402 'convert python object into string that is useful to shell'
403 if val is None or val is False:
403 if val is None or val is False:
404 return '0'
404 return '0'
405 if val is True:
405 if val is True:
406 return '1'
406 return '1'
407 return str(val)
407 return str(val)
408 origcmd = cmd
408 origcmd = cmd
409 cmd = quotecommand(cmd)
409 cmd = quotecommand(cmd)
410 env = dict(os.environ)
410 env = dict(os.environ)
411 env.update((k, py2shell(v)) for k, v in environ.iteritems())
411 env.update((k, py2shell(v)) for k, v in environ.iteritems())
412 env['HG'] = hgexecutable()
412 env['HG'] = hgexecutable()
413 if out is None or out == sys.__stdout__:
413 if out is None or out == sys.__stdout__:
414 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
414 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
415 env=env, cwd=cwd)
415 env=env, cwd=cwd)
416 else:
416 else:
417 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
417 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
418 env=env, cwd=cwd, stdout=subprocess.PIPE,
418 env=env, cwd=cwd, stdout=subprocess.PIPE,
419 stderr=subprocess.STDOUT)
419 stderr=subprocess.STDOUT)
420 for line in proc.stdout:
420 for line in proc.stdout:
421 out.write(line)
421 out.write(line)
422 proc.wait()
422 proc.wait()
423 rc = proc.returncode
423 rc = proc.returncode
424 if sys.platform == 'OpenVMS' and rc & 1:
424 if sys.platform == 'OpenVMS' and rc & 1:
425 rc = 0
425 rc = 0
426 if rc and onerr:
426 if rc and onerr:
427 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
427 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
428 explainexit(rc)[0])
428 explainexit(rc)[0])
429 if errprefix:
429 if errprefix:
430 errmsg = '%s: %s' % (errprefix, errmsg)
430 errmsg = '%s: %s' % (errprefix, errmsg)
431 try:
431 try:
432 onerr.warn(errmsg + '\n')
432 onerr.warn(errmsg + '\n')
433 except AttributeError:
433 except AttributeError:
434 raise onerr(errmsg)
434 raise onerr(errmsg)
435 return rc
435 return rc
436
436
437 def checksignature(func):
437 def checksignature(func):
438 '''wrap a function with code to check for calling errors'''
438 '''wrap a function with code to check for calling errors'''
439 def check(*args, **kwargs):
439 def check(*args, **kwargs):
440 try:
440 try:
441 return func(*args, **kwargs)
441 return func(*args, **kwargs)
442 except TypeError:
442 except TypeError:
443 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
443 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
444 raise error.SignatureError
444 raise error.SignatureError
445 raise
445 raise
446
446
447 return check
447 return check
448
448
449 def copyfile(src, dest):
449 def copyfile(src, dest):
450 "copy a file, preserving mode and atime/mtime"
450 "copy a file, preserving mode and atime/mtime"
451 if os.path.islink(src):
451 if os.path.islink(src):
452 try:
452 try:
453 os.unlink(dest)
453 os.unlink(dest)
454 except OSError:
454 except OSError:
455 pass
455 pass
456 os.symlink(os.readlink(src), dest)
456 os.symlink(os.readlink(src), dest)
457 else:
457 else:
458 try:
458 try:
459 shutil.copyfile(src, dest)
459 shutil.copyfile(src, dest)
460 shutil.copymode(src, dest)
460 shutil.copymode(src, dest)
461 except shutil.Error, inst:
461 except shutil.Error, inst:
462 raise Abort(str(inst))
462 raise Abort(str(inst))
463
463
464 def copyfiles(src, dst, hardlink=None):
464 def copyfiles(src, dst, hardlink=None):
465 """Copy a directory tree using hardlinks if possible"""
465 """Copy a directory tree using hardlinks if possible"""
466
466
467 if hardlink is None:
467 if hardlink is None:
468 hardlink = (os.stat(src).st_dev ==
468 hardlink = (os.stat(src).st_dev ==
469 os.stat(os.path.dirname(dst)).st_dev)
469 os.stat(os.path.dirname(dst)).st_dev)
470
470
471 num = 0
471 num = 0
472 if os.path.isdir(src):
472 if os.path.isdir(src):
473 os.mkdir(dst)
473 os.mkdir(dst)
474 for name, kind in osutil.listdir(src):
474 for name, kind in osutil.listdir(src):
475 srcname = os.path.join(src, name)
475 srcname = os.path.join(src, name)
476 dstname = os.path.join(dst, name)
476 dstname = os.path.join(dst, name)
477 hardlink, n = copyfiles(srcname, dstname, hardlink)
477 hardlink, n = copyfiles(srcname, dstname, hardlink)
478 num += n
478 num += n
479 else:
479 else:
480 if hardlink:
480 if hardlink:
481 try:
481 try:
482 oslink(src, dst)
482 oslink(src, dst)
483 except (IOError, OSError):
483 except (IOError, OSError):
484 hardlink = False
484 hardlink = False
485 shutil.copy(src, dst)
485 shutil.copy(src, dst)
486 else:
486 else:
487 shutil.copy(src, dst)
487 shutil.copy(src, dst)
488 num += 1
488 num += 1
489
489
490 return hardlink, num
490 return hardlink, num
491
491
492 _winreservednames = '''con prn aux nul
492 _winreservednames = '''con prn aux nul
493 com1 com2 com3 com4 com5 com6 com7 com8 com9
493 com1 com2 com3 com4 com5 com6 com7 com8 com9
494 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
494 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
495 _winreservedchars = ':*?"<>|'
495 _winreservedchars = ':*?"<>|'
496 def checkwinfilename(path):
496 def checkwinfilename(path):
497 '''Check that the base-relative path is a valid filename on Windows.
497 '''Check that the base-relative path is a valid filename on Windows.
498 Returns None if the path is ok, or a UI string describing the problem.
498 Returns None if the path is ok, or a UI string describing the problem.
499
499
500 >>> checkwinfilename("just/a/normal/path")
500 >>> checkwinfilename("just/a/normal/path")
501 >>> checkwinfilename("foo/bar/con.xml")
501 >>> checkwinfilename("foo/bar/con.xml")
502 "filename contains 'con', which is reserved on Windows"
502 "filename contains 'con', which is reserved on Windows"
503 >>> checkwinfilename("foo/con.xml/bar")
503 >>> checkwinfilename("foo/con.xml/bar")
504 "filename contains 'con', which is reserved on Windows"
504 "filename contains 'con', which is reserved on Windows"
505 >>> checkwinfilename("foo/bar/xml.con")
505 >>> checkwinfilename("foo/bar/xml.con")
506 >>> checkwinfilename("foo/bar/AUX/bla.txt")
506 >>> checkwinfilename("foo/bar/AUX/bla.txt")
507 "filename contains 'AUX', which is reserved on Windows"
507 "filename contains 'AUX', which is reserved on Windows"
508 >>> checkwinfilename("foo/bar/bla:.txt")
508 >>> checkwinfilename("foo/bar/bla:.txt")
509 "filename contains ':', which is reserved on Windows"
509 "filename contains ':', which is reserved on Windows"
510 >>> checkwinfilename("foo/bar/b\07la.txt")
510 >>> checkwinfilename("foo/bar/b\07la.txt")
511 "filename contains '\\\\x07', which is invalid on Windows"
511 "filename contains '\\\\x07', which is invalid on Windows"
512 >>> checkwinfilename("foo/bar/bla ")
512 >>> checkwinfilename("foo/bar/bla ")
513 "filename ends with ' ', which is not allowed on Windows"
513 "filename ends with ' ', which is not allowed on Windows"
514 '''
514 '''
515 for n in path.replace('\\', '/').split('/'):
515 for n in path.replace('\\', '/').split('/'):
516 if not n:
516 if not n:
517 continue
517 continue
518 for c in n:
518 for c in n:
519 if c in _winreservedchars:
519 if c in _winreservedchars:
520 return _("filename contains '%s', which is reserved "
520 return _("filename contains '%s', which is reserved "
521 "on Windows") % c
521 "on Windows") % c
522 if ord(c) <= 31:
522 if ord(c) <= 31:
523 return _("filename contains %r, which is invalid "
523 return _("filename contains %r, which is invalid "
524 "on Windows") % c
524 "on Windows") % c
525 base = n.split('.')[0]
525 base = n.split('.')[0]
526 if base and base.lower() in _winreservednames:
526 if base and base.lower() in _winreservednames:
527 return _("filename contains '%s', which is reserved "
527 return _("filename contains '%s', which is reserved "
528 "on Windows") % base
528 "on Windows") % base
529 t = n[-1]
529 t = n[-1]
530 if t in '. ':
530 if t in '. ':
531 return _("filename ends with '%s', which is not allowed "
531 return _("filename ends with '%s', which is not allowed "
532 "on Windows") % t
532 "on Windows") % t
533
533
534 if os.name == 'nt':
534 if os.name == 'nt':
535 checkosfilename = checkwinfilename
535 checkosfilename = checkwinfilename
536 else:
536 else:
537 checkosfilename = platform.checkosfilename
537 checkosfilename = platform.checkosfilename
538
538
539 def makelock(info, pathname):
539 def makelock(info, pathname):
540 try:
540 try:
541 return os.symlink(info, pathname)
541 return os.symlink(info, pathname)
542 except OSError, why:
542 except OSError, why:
543 if why.errno == errno.EEXIST:
543 if why.errno == errno.EEXIST:
544 raise
544 raise
545 except AttributeError: # no symlink in os
545 except AttributeError: # no symlink in os
546 pass
546 pass
547
547
548 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
548 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
549 os.write(ld, info)
549 os.write(ld, info)
550 os.close(ld)
550 os.close(ld)
551
551
552 def readlock(pathname):
552 def readlock(pathname):
553 try:
553 try:
554 return os.readlink(pathname)
554 return os.readlink(pathname)
555 except OSError, why:
555 except OSError, why:
556 if why.errno not in (errno.EINVAL, errno.ENOSYS):
556 if why.errno not in (errno.EINVAL, errno.ENOSYS):
557 raise
557 raise
558 except AttributeError: # no symlink in os
558 except AttributeError: # no symlink in os
559 pass
559 pass
560 fp = posixfile(pathname)
560 fp = posixfile(pathname)
561 r = fp.read()
561 r = fp.read()
562 fp.close()
562 fp.close()
563 return r
563 return r
564
564
565 def fstat(fp):
565 def fstat(fp):
566 '''stat file object that may not have fileno method.'''
566 '''stat file object that may not have fileno method.'''
567 try:
567 try:
568 return os.fstat(fp.fileno())
568 return os.fstat(fp.fileno())
569 except AttributeError:
569 except AttributeError:
570 return os.stat(fp.name)
570 return os.stat(fp.name)
571
571
572 # File system features
572 # File system features
573
573
574 def checkcase(path):
574 def checkcase(path):
575 """
575 """
576 Check whether the given path is on a case-sensitive filesystem
576 Check whether the given path is on a case-sensitive filesystem
577
577
578 Requires a path (like /foo/.hg) ending with a foldable final
578 Requires a path (like /foo/.hg) ending with a foldable final
579 directory component.
579 directory component.
580 """
580 """
581 s1 = os.stat(path)
581 s1 = os.stat(path)
582 d, b = os.path.split(path)
582 d, b = os.path.split(path)
583 p2 = os.path.join(d, b.upper())
583 p2 = os.path.join(d, b.upper())
584 if path == p2:
584 if path == p2:
585 p2 = os.path.join(d, b.lower())
585 p2 = os.path.join(d, b.lower())
586 try:
586 try:
587 s2 = os.stat(p2)
587 s2 = os.stat(p2)
588 if s2 == s1:
588 if s2 == s1:
589 return False
589 return False
590 return True
590 return True
591 except OSError:
591 except OSError:
592 return True
592 return True
593
593
594 _fspathcache = {}
594 _fspathcache = {}
595 def fspath(name, root):
595 def fspath(name, root):
596 '''Get name in the case stored in the filesystem
596 '''Get name in the case stored in the filesystem
597
597
598 The name is either relative to root, or it is an absolute path starting
598 The name is either relative to root, or it is an absolute path starting
599 with root. Note that this function is unnecessary, and should not be
599 with root. Note that this function is unnecessary, and should not be
600 called, for case-sensitive filesystems (simply because it's expensive).
600 called, for case-sensitive filesystems (simply because it's expensive).
601 '''
601 '''
602 # If name is absolute, make it relative
602 # If name is absolute, make it relative
603 if name.lower().startswith(root.lower()):
603 if name.lower().startswith(root.lower()):
604 l = len(root)
604 l = len(root)
605 if name[l] == os.sep or name[l] == os.altsep:
605 if name[l] == os.sep or name[l] == os.altsep:
606 l = l + 1
606 l = l + 1
607 name = name[l:]
607 name = name[l:]
608
608
609 if not os.path.lexists(os.path.join(root, name)):
609 if not os.path.lexists(os.path.join(root, name)):
610 return None
610 return None
611
611
612 seps = os.sep
612 seps = os.sep
613 if os.altsep:
613 if os.altsep:
614 seps = seps + os.altsep
614 seps = seps + os.altsep
615 # Protect backslashes. This gets silly very quickly.
615 # Protect backslashes. This gets silly very quickly.
616 seps.replace('\\','\\\\')
616 seps.replace('\\','\\\\')
617 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
617 pattern = re.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
618 dir = os.path.normcase(os.path.normpath(root))
618 dir = os.path.normcase(os.path.normpath(root))
619 result = []
619 result = []
620 for part, sep in pattern.findall(name):
620 for part, sep in pattern.findall(name):
621 if sep:
621 if sep:
622 result.append(sep)
622 result.append(sep)
623 continue
623 continue
624
624
625 if dir not in _fspathcache:
625 if dir not in _fspathcache:
626 _fspathcache[dir] = os.listdir(dir)
626 _fspathcache[dir] = os.listdir(dir)
627 contents = _fspathcache[dir]
627 contents = _fspathcache[dir]
628
628
629 lpart = part.lower()
629 lpart = part.lower()
630 lenp = len(part)
630 lenp = len(part)
631 for n in contents:
631 for n in contents:
632 if lenp == len(n) and n.lower() == lpart:
632 if lenp == len(n) and n.lower() == lpart:
633 result.append(n)
633 result.append(n)
634 break
634 break
635 else:
635 else:
636 # Cannot happen, as the file exists!
636 # Cannot happen, as the file exists!
637 result.append(part)
637 result.append(part)
638 dir = os.path.join(dir, lpart)
638 dir = os.path.join(dir, lpart)
639
639
640 return ''.join(result)
640 return ''.join(result)
641
641
642 def checknlink(testfile):
642 def checknlink(testfile):
643 '''check whether hardlink count reporting works properly'''
643 '''check whether hardlink count reporting works properly'''
644
644
645 # testfile may be open, so we need a separate file for checking to
645 # testfile may be open, so we need a separate file for checking to
646 # work around issue2543 (or testfile may get lost on Samba shares)
646 # work around issue2543 (or testfile may get lost on Samba shares)
647 f1 = testfile + ".hgtmp1"
647 f1 = testfile + ".hgtmp1"
648 if os.path.lexists(f1):
648 if os.path.lexists(f1):
649 return False
649 return False
650 try:
650 try:
651 posixfile(f1, 'w').close()
651 posixfile(f1, 'w').close()
652 except IOError:
652 except IOError:
653 return False
653 return False
654
654
655 f2 = testfile + ".hgtmp2"
655 f2 = testfile + ".hgtmp2"
656 fd = None
656 fd = None
657 try:
657 try:
658 try:
658 try:
659 oslink(f1, f2)
659 oslink(f1, f2)
660 except OSError:
660 except OSError:
661 return False
661 return False
662
662
663 # nlinks() may behave differently for files on Windows shares if
663 # nlinks() may behave differently for files on Windows shares if
664 # the file is open.
664 # the file is open.
665 fd = posixfile(f2)
665 fd = posixfile(f2)
666 return nlinks(f2) > 1
666 return nlinks(f2) > 1
667 finally:
667 finally:
668 if fd is not None:
668 if fd is not None:
669 fd.close()
669 fd.close()
670 for f in (f1, f2):
670 for f in (f1, f2):
671 try:
671 try:
672 os.unlink(f)
672 os.unlink(f)
673 except OSError:
673 except OSError:
674 pass
674 pass
675
675
676 return False
676 return False
677
677
678 def endswithsep(path):
678 def endswithsep(path):
679 '''Check path ends with os.sep or os.altsep.'''
679 '''Check path ends with os.sep or os.altsep.'''
680 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
680 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
681
681
682 def splitpath(path):
682 def splitpath(path):
683 '''Split path by os.sep.
683 '''Split path by os.sep.
684 Note that this function does not use os.altsep because this is
684 Note that this function does not use os.altsep because this is
685 an alternative of simple "xxx.split(os.sep)".
685 an alternative of simple "xxx.split(os.sep)".
686 It is recommended to use os.path.normpath() before using this
686 It is recommended to use os.path.normpath() before using this
687 function if need.'''
687 function if need.'''
688 return path.split(os.sep)
688 return path.split(os.sep)
689
689
690 def gui():
690 def gui():
691 '''Are we running in a GUI?'''
691 '''Are we running in a GUI?'''
692 if sys.platform == 'darwin':
692 if sys.platform == 'darwin':
693 if 'SSH_CONNECTION' in os.environ:
693 if 'SSH_CONNECTION' in os.environ:
694 # handle SSH access to a box where the user is logged in
694 # handle SSH access to a box where the user is logged in
695 return False
695 return False
696 elif getattr(osutil, 'isgui', None):
696 elif getattr(osutil, 'isgui', None):
697 # check if a CoreGraphics session is available
697 # check if a CoreGraphics session is available
698 return osutil.isgui()
698 return osutil.isgui()
699 else:
699 else:
700 # pure build; use a safe default
700 # pure build; use a safe default
701 return True
701 return True
702 else:
702 else:
703 return os.name == "nt" or os.environ.get("DISPLAY")
703 return os.name == "nt" or os.environ.get("DISPLAY")
704
704
705 def mktempcopy(name, emptyok=False, createmode=None):
705 def mktempcopy(name, emptyok=False, createmode=None):
706 """Create a temporary file with the same contents from name
706 """Create a temporary file with the same contents from name
707
707
708 The permission bits are copied from the original file.
708 The permission bits are copied from the original file.
709
709
710 If the temporary file is going to be truncated immediately, you
710 If the temporary file is going to be truncated immediately, you
711 can use emptyok=True as an optimization.
711 can use emptyok=True as an optimization.
712
712
713 Returns the name of the temporary file.
713 Returns the name of the temporary file.
714 """
714 """
715 d, fn = os.path.split(name)
715 d, fn = os.path.split(name)
716 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
716 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
717 os.close(fd)
717 os.close(fd)
718 # Temporary files are created with mode 0600, which is usually not
718 # Temporary files are created with mode 0600, which is usually not
719 # what we want. If the original file already exists, just copy
719 # what we want. If the original file already exists, just copy
720 # its mode. Otherwise, manually obey umask.
720 # its mode. Otherwise, manually obey umask.
721 copymode(name, temp, createmode)
721 copymode(name, temp, createmode)
722 if emptyok:
722 if emptyok:
723 return temp
723 return temp
724 try:
724 try:
725 try:
725 try:
726 ifp = posixfile(name, "rb")
726 ifp = posixfile(name, "rb")
727 except IOError, inst:
727 except IOError, inst:
728 if inst.errno == errno.ENOENT:
728 if inst.errno == errno.ENOENT:
729 return temp
729 return temp
730 if not getattr(inst, 'filename', None):
730 if not getattr(inst, 'filename', None):
731 inst.filename = name
731 inst.filename = name
732 raise
732 raise
733 ofp = posixfile(temp, "wb")
733 ofp = posixfile(temp, "wb")
734 for chunk in filechunkiter(ifp):
734 for chunk in filechunkiter(ifp):
735 ofp.write(chunk)
735 ofp.write(chunk)
736 ifp.close()
736 ifp.close()
737 ofp.close()
737 ofp.close()
738 except:
738 except:
739 try: os.unlink(temp)
739 try: os.unlink(temp)
740 except: pass
740 except: pass
741 raise
741 raise
742 return temp
742 return temp
743
743
744 class atomictempfile(object):
744 class atomictempfile(object):
745 '''writeable file object that atomically updates a file
745 '''writeable file object that atomically updates a file
746
746
747 All writes will go to a temporary copy of the original file. Call
747 All writes will go to a temporary copy of the original file. Call
748 rename() when you are done writing, and atomictempfile will rename
748 close() when you are done writing, and atomictempfile will rename
749 the temporary copy to the original name, making the changes visible.
749 the temporary copy to the original name, making the changes
750
750 visible. If the object is destroyed without being closed, all your
751 Unlike other file-like objects, close() discards your writes by
751 writes are discarded.
752 simply deleting the temporary file.
753 '''
752 '''
754 def __init__(self, name, mode='w+b', createmode=None):
753 def __init__(self, name, mode='w+b', createmode=None):
755 self.__name = name # permanent name
754 self.__name = name # permanent name
756 self._tempname = mktempcopy(name, emptyok=('w' in mode),
755 self._tempname = mktempcopy(name, emptyok=('w' in mode),
757 createmode=createmode)
756 createmode=createmode)
758 self._fp = posixfile(self._tempname, mode)
757 self._fp = posixfile(self._tempname, mode)
759
758
760 # delegated methods
759 # delegated methods
761 self.write = self._fp.write
760 self.write = self._fp.write
762 self.fileno = self._fp.fileno
761 self.fileno = self._fp.fileno
763
762
764 def rename(self):
763 def close(self):
765 if not self._fp.closed:
764 if not self._fp.closed:
766 self._fp.close()
765 self._fp.close()
767 rename(self._tempname, localpath(self.__name))
766 rename(self._tempname, localpath(self.__name))
768
767
769 def close(self):
768 def discard(self):
770 if not self._fp.closed:
769 if not self._fp.closed:
771 try:
770 try:
772 os.unlink(self._tempname)
771 os.unlink(self._tempname)
773 except OSError:
772 except OSError:
774 pass
773 pass
775 self._fp.close()
774 self._fp.close()
776
775
777 def __del__(self):
776 def __del__(self):
778 if safehasattr(self, '_fp'): # constructor actually did something
777 if safehasattr(self, '_fp'): # constructor actually did something
779 self.close()
778 self.discard()
780
779
781 def makedirs(name, mode=None):
780 def makedirs(name, mode=None):
782 """recursive directory creation with parent mode inheritance"""
781 """recursive directory creation with parent mode inheritance"""
783 parent = os.path.abspath(os.path.dirname(name))
782 parent = os.path.abspath(os.path.dirname(name))
784 try:
783 try:
785 os.mkdir(name)
784 os.mkdir(name)
786 except OSError, err:
785 except OSError, err:
787 if err.errno == errno.EEXIST:
786 if err.errno == errno.EEXIST:
788 return
787 return
789 if not name or parent == name or err.errno != errno.ENOENT:
788 if not name or parent == name or err.errno != errno.ENOENT:
790 raise
789 raise
791 makedirs(parent, mode)
790 makedirs(parent, mode)
792 os.mkdir(name)
791 os.mkdir(name)
793 if mode is not None:
792 if mode is not None:
794 os.chmod(name, mode)
793 os.chmod(name, mode)
795
794
796 def readfile(path):
795 def readfile(path):
797 fp = open(path, 'rb')
796 fp = open(path, 'rb')
798 try:
797 try:
799 return fp.read()
798 return fp.read()
800 finally:
799 finally:
801 fp.close()
800 fp.close()
802
801
803 def writefile(path, text):
802 def writefile(path, text):
804 fp = open(path, 'wb')
803 fp = open(path, 'wb')
805 try:
804 try:
806 fp.write(text)
805 fp.write(text)
807 finally:
806 finally:
808 fp.close()
807 fp.close()
809
808
810 def appendfile(path, text):
809 def appendfile(path, text):
811 fp = open(path, 'ab')
810 fp = open(path, 'ab')
812 try:
811 try:
813 fp.write(text)
812 fp.write(text)
814 finally:
813 finally:
815 fp.close()
814 fp.close()
816
815
817 class chunkbuffer(object):
816 class chunkbuffer(object):
818 """Allow arbitrary sized chunks of data to be efficiently read from an
817 """Allow arbitrary sized chunks of data to be efficiently read from an
819 iterator over chunks of arbitrary size."""
818 iterator over chunks of arbitrary size."""
820
819
821 def __init__(self, in_iter):
820 def __init__(self, in_iter):
822 """in_iter is the iterator that's iterating over the input chunks.
821 """in_iter is the iterator that's iterating over the input chunks.
823 targetsize is how big a buffer to try to maintain."""
822 targetsize is how big a buffer to try to maintain."""
824 def splitbig(chunks):
823 def splitbig(chunks):
825 for chunk in chunks:
824 for chunk in chunks:
826 if len(chunk) > 2**20:
825 if len(chunk) > 2**20:
827 pos = 0
826 pos = 0
828 while pos < len(chunk):
827 while pos < len(chunk):
829 end = pos + 2 ** 18
828 end = pos + 2 ** 18
830 yield chunk[pos:end]
829 yield chunk[pos:end]
831 pos = end
830 pos = end
832 else:
831 else:
833 yield chunk
832 yield chunk
834 self.iter = splitbig(in_iter)
833 self.iter = splitbig(in_iter)
835 self._queue = []
834 self._queue = []
836
835
837 def read(self, l):
836 def read(self, l):
838 """Read L bytes of data from the iterator of chunks of data.
837 """Read L bytes of data from the iterator of chunks of data.
839 Returns less than L bytes if the iterator runs dry."""
838 Returns less than L bytes if the iterator runs dry."""
840 left = l
839 left = l
841 buf = ''
840 buf = ''
842 queue = self._queue
841 queue = self._queue
843 while left > 0:
842 while left > 0:
844 # refill the queue
843 # refill the queue
845 if not queue:
844 if not queue:
846 target = 2**18
845 target = 2**18
847 for chunk in self.iter:
846 for chunk in self.iter:
848 queue.append(chunk)
847 queue.append(chunk)
849 target -= len(chunk)
848 target -= len(chunk)
850 if target <= 0:
849 if target <= 0:
851 break
850 break
852 if not queue:
851 if not queue:
853 break
852 break
854
853
855 chunk = queue.pop(0)
854 chunk = queue.pop(0)
856 left -= len(chunk)
855 left -= len(chunk)
857 if left < 0:
856 if left < 0:
858 queue.insert(0, chunk[left:])
857 queue.insert(0, chunk[left:])
859 buf += chunk[:left]
858 buf += chunk[:left]
860 else:
859 else:
861 buf += chunk
860 buf += chunk
862
861
863 return buf
862 return buf
864
863
865 def filechunkiter(f, size=65536, limit=None):
864 def filechunkiter(f, size=65536, limit=None):
866 """Create a generator that produces the data in the file size
865 """Create a generator that produces the data in the file size
867 (default 65536) bytes at a time, up to optional limit (default is
866 (default 65536) bytes at a time, up to optional limit (default is
868 to read all data). Chunks may be less than size bytes if the
867 to read all data). Chunks may be less than size bytes if the
869 chunk is the last chunk in the file, or the file is a socket or
868 chunk is the last chunk in the file, or the file is a socket or
870 some other type of file that sometimes reads less data than is
869 some other type of file that sometimes reads less data than is
871 requested."""
870 requested."""
872 assert size >= 0
871 assert size >= 0
873 assert limit is None or limit >= 0
872 assert limit is None or limit >= 0
874 while True:
873 while True:
875 if limit is None:
874 if limit is None:
876 nbytes = size
875 nbytes = size
877 else:
876 else:
878 nbytes = min(limit, size)
877 nbytes = min(limit, size)
879 s = nbytes and f.read(nbytes)
878 s = nbytes and f.read(nbytes)
880 if not s:
879 if not s:
881 break
880 break
882 if limit:
881 if limit:
883 limit -= len(s)
882 limit -= len(s)
884 yield s
883 yield s
885
884
886 def makedate():
885 def makedate():
887 lt = time.localtime()
886 lt = time.localtime()
888 if lt[8] == 1 and time.daylight:
887 if lt[8] == 1 and time.daylight:
889 tz = time.altzone
888 tz = time.altzone
890 else:
889 else:
891 tz = time.timezone
890 tz = time.timezone
892 t = time.mktime(lt)
891 t = time.mktime(lt)
893 if t < 0:
892 if t < 0:
894 hint = _("check your clock")
893 hint = _("check your clock")
895 raise Abort(_("negative timestamp: %d") % t, hint=hint)
894 raise Abort(_("negative timestamp: %d") % t, hint=hint)
896 return t, tz
895 return t, tz
897
896
898 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
897 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
899 """represent a (unixtime, offset) tuple as a localized time.
898 """represent a (unixtime, offset) tuple as a localized time.
900 unixtime is seconds since the epoch, and offset is the time zone's
899 unixtime is seconds since the epoch, and offset is the time zone's
901 number of seconds away from UTC. if timezone is false, do not
900 number of seconds away from UTC. if timezone is false, do not
902 append time zone to string."""
901 append time zone to string."""
903 t, tz = date or makedate()
902 t, tz = date or makedate()
904 if t < 0:
903 if t < 0:
905 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
904 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
906 tz = 0
905 tz = 0
907 if "%1" in format or "%2" in format:
906 if "%1" in format or "%2" in format:
908 sign = (tz > 0) and "-" or "+"
907 sign = (tz > 0) and "-" or "+"
909 minutes = abs(tz) // 60
908 minutes = abs(tz) // 60
910 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
909 format = format.replace("%1", "%c%02d" % (sign, minutes // 60))
911 format = format.replace("%2", "%02d" % (minutes % 60))
910 format = format.replace("%2", "%02d" % (minutes % 60))
912 s = time.strftime(format, time.gmtime(float(t) - tz))
911 s = time.strftime(format, time.gmtime(float(t) - tz))
913 return s
912 return s
914
913
915 def shortdate(date=None):
914 def shortdate(date=None):
916 """turn (timestamp, tzoff) tuple into iso 8631 date."""
915 """turn (timestamp, tzoff) tuple into iso 8631 date."""
917 return datestr(date, format='%Y-%m-%d')
916 return datestr(date, format='%Y-%m-%d')
918
917
919 def strdate(string, format, defaults=[]):
918 def strdate(string, format, defaults=[]):
920 """parse a localized time string and return a (unixtime, offset) tuple.
919 """parse a localized time string and return a (unixtime, offset) tuple.
921 if the string cannot be parsed, ValueError is raised."""
920 if the string cannot be parsed, ValueError is raised."""
922 def timezone(string):
921 def timezone(string):
923 tz = string.split()[-1]
922 tz = string.split()[-1]
924 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
923 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
925 sign = (tz[0] == "+") and 1 or -1
924 sign = (tz[0] == "+") and 1 or -1
926 hours = int(tz[1:3])
925 hours = int(tz[1:3])
927 minutes = int(tz[3:5])
926 minutes = int(tz[3:5])
928 return -sign * (hours * 60 + minutes) * 60
927 return -sign * (hours * 60 + minutes) * 60
929 if tz == "GMT" or tz == "UTC":
928 if tz == "GMT" or tz == "UTC":
930 return 0
929 return 0
931 return None
930 return None
932
931
933 # NOTE: unixtime = localunixtime + offset
932 # NOTE: unixtime = localunixtime + offset
934 offset, date = timezone(string), string
933 offset, date = timezone(string), string
935 if offset is not None:
934 if offset is not None:
936 date = " ".join(string.split()[:-1])
935 date = " ".join(string.split()[:-1])
937
936
938 # add missing elements from defaults
937 # add missing elements from defaults
939 usenow = False # default to using biased defaults
938 usenow = False # default to using biased defaults
940 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
939 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
941 found = [True for p in part if ("%"+p) in format]
940 found = [True for p in part if ("%"+p) in format]
942 if not found:
941 if not found:
943 date += "@" + defaults[part][usenow]
942 date += "@" + defaults[part][usenow]
944 format += "@%" + part[0]
943 format += "@%" + part[0]
945 else:
944 else:
946 # We've found a specific time element, less specific time
945 # We've found a specific time element, less specific time
947 # elements are relative to today
946 # elements are relative to today
948 usenow = True
947 usenow = True
949
948
950 timetuple = time.strptime(date, format)
949 timetuple = time.strptime(date, format)
951 localunixtime = int(calendar.timegm(timetuple))
950 localunixtime = int(calendar.timegm(timetuple))
952 if offset is None:
951 if offset is None:
953 # local timezone
952 # local timezone
954 unixtime = int(time.mktime(timetuple))
953 unixtime = int(time.mktime(timetuple))
955 offset = unixtime - localunixtime
954 offset = unixtime - localunixtime
956 else:
955 else:
957 unixtime = localunixtime + offset
956 unixtime = localunixtime + offset
958 return unixtime, offset
957 return unixtime, offset
959
958
960 def parsedate(date, formats=None, bias={}):
959 def parsedate(date, formats=None, bias={}):
961 """parse a localized date/time and return a (unixtime, offset) tuple.
960 """parse a localized date/time and return a (unixtime, offset) tuple.
962
961
963 The date may be a "unixtime offset" string or in one of the specified
962 The date may be a "unixtime offset" string or in one of the specified
964 formats. If the date already is a (unixtime, offset) tuple, it is returned.
963 formats. If the date already is a (unixtime, offset) tuple, it is returned.
965 """
964 """
966 if not date:
965 if not date:
967 return 0, 0
966 return 0, 0
968 if isinstance(date, tuple) and len(date) == 2:
967 if isinstance(date, tuple) and len(date) == 2:
969 return date
968 return date
970 if not formats:
969 if not formats:
971 formats = defaultdateformats
970 formats = defaultdateformats
972 date = date.strip()
971 date = date.strip()
973 try:
972 try:
974 when, offset = map(int, date.split(' '))
973 when, offset = map(int, date.split(' '))
975 except ValueError:
974 except ValueError:
976 # fill out defaults
975 # fill out defaults
977 now = makedate()
976 now = makedate()
978 defaults = {}
977 defaults = {}
979 for part in ("d", "mb", "yY", "HI", "M", "S"):
978 for part in ("d", "mb", "yY", "HI", "M", "S"):
980 # this piece is for rounding the specific end of unknowns
979 # this piece is for rounding the specific end of unknowns
981 b = bias.get(part)
980 b = bias.get(part)
982 if b is None:
981 if b is None:
983 if part[0] in "HMS":
982 if part[0] in "HMS":
984 b = "00"
983 b = "00"
985 else:
984 else:
986 b = "0"
985 b = "0"
987
986
988 # this piece is for matching the generic end to today's date
987 # this piece is for matching the generic end to today's date
989 n = datestr(now, "%" + part[0])
988 n = datestr(now, "%" + part[0])
990
989
991 defaults[part] = (b, n)
990 defaults[part] = (b, n)
992
991
993 for format in formats:
992 for format in formats:
994 try:
993 try:
995 when, offset = strdate(date, format, defaults)
994 when, offset = strdate(date, format, defaults)
996 except (ValueError, OverflowError):
995 except (ValueError, OverflowError):
997 pass
996 pass
998 else:
997 else:
999 break
998 break
1000 else:
999 else:
1001 raise Abort(_('invalid date: %r') % date)
1000 raise Abort(_('invalid date: %r') % date)
1002 # validate explicit (probably user-specified) date and
1001 # validate explicit (probably user-specified) date and
1003 # time zone offset. values must fit in signed 32 bits for
1002 # time zone offset. values must fit in signed 32 bits for
1004 # current 32-bit linux runtimes. timezones go from UTC-12
1003 # current 32-bit linux runtimes. timezones go from UTC-12
1005 # to UTC+14
1004 # to UTC+14
1006 if abs(when) > 0x7fffffff:
1005 if abs(when) > 0x7fffffff:
1007 raise Abort(_('date exceeds 32 bits: %d') % when)
1006 raise Abort(_('date exceeds 32 bits: %d') % when)
1008 if when < 0:
1007 if when < 0:
1009 raise Abort(_('negative date value: %d') % when)
1008 raise Abort(_('negative date value: %d') % when)
1010 if offset < -50400 or offset > 43200:
1009 if offset < -50400 or offset > 43200:
1011 raise Abort(_('impossible time zone offset: %d') % offset)
1010 raise Abort(_('impossible time zone offset: %d') % offset)
1012 return when, offset
1011 return when, offset
1013
1012
1014 def matchdate(date):
1013 def matchdate(date):
1015 """Return a function that matches a given date match specifier
1014 """Return a function that matches a given date match specifier
1016
1015
1017 Formats include:
1016 Formats include:
1018
1017
1019 '{date}' match a given date to the accuracy provided
1018 '{date}' match a given date to the accuracy provided
1020
1019
1021 '<{date}' on or before a given date
1020 '<{date}' on or before a given date
1022
1021
1023 '>{date}' on or after a given date
1022 '>{date}' on or after a given date
1024
1023
1025 >>> p1 = parsedate("10:29:59")
1024 >>> p1 = parsedate("10:29:59")
1026 >>> p2 = parsedate("10:30:00")
1025 >>> p2 = parsedate("10:30:00")
1027 >>> p3 = parsedate("10:30:59")
1026 >>> p3 = parsedate("10:30:59")
1028 >>> p4 = parsedate("10:31:00")
1027 >>> p4 = parsedate("10:31:00")
1029 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1028 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1030 >>> f = matchdate("10:30")
1029 >>> f = matchdate("10:30")
1031 >>> f(p1[0])
1030 >>> f(p1[0])
1032 False
1031 False
1033 >>> f(p2[0])
1032 >>> f(p2[0])
1034 True
1033 True
1035 >>> f(p3[0])
1034 >>> f(p3[0])
1036 True
1035 True
1037 >>> f(p4[0])
1036 >>> f(p4[0])
1038 False
1037 False
1039 >>> f(p5[0])
1038 >>> f(p5[0])
1040 False
1039 False
1041 """
1040 """
1042
1041
1043 def lower(date):
1042 def lower(date):
1044 d = dict(mb="1", d="1")
1043 d = dict(mb="1", d="1")
1045 return parsedate(date, extendeddateformats, d)[0]
1044 return parsedate(date, extendeddateformats, d)[0]
1046
1045
1047 def upper(date):
1046 def upper(date):
1048 d = dict(mb="12", HI="23", M="59", S="59")
1047 d = dict(mb="12", HI="23", M="59", S="59")
1049 for days in ("31", "30", "29"):
1048 for days in ("31", "30", "29"):
1050 try:
1049 try:
1051 d["d"] = days
1050 d["d"] = days
1052 return parsedate(date, extendeddateformats, d)[0]
1051 return parsedate(date, extendeddateformats, d)[0]
1053 except:
1052 except:
1054 pass
1053 pass
1055 d["d"] = "28"
1054 d["d"] = "28"
1056 return parsedate(date, extendeddateformats, d)[0]
1055 return parsedate(date, extendeddateformats, d)[0]
1057
1056
1058 date = date.strip()
1057 date = date.strip()
1059
1058
1060 if not date:
1059 if not date:
1061 raise Abort(_("dates cannot consist entirely of whitespace"))
1060 raise Abort(_("dates cannot consist entirely of whitespace"))
1062 elif date[0] == "<":
1061 elif date[0] == "<":
1063 if not date[1:]:
1062 if not date[1:]:
1064 raise Abort(_("invalid day spec, use '<DATE'"))
1063 raise Abort(_("invalid day spec, use '<DATE'"))
1065 when = upper(date[1:])
1064 when = upper(date[1:])
1066 return lambda x: x <= when
1065 return lambda x: x <= when
1067 elif date[0] == ">":
1066 elif date[0] == ">":
1068 if not date[1:]:
1067 if not date[1:]:
1069 raise Abort(_("invalid day spec, use '>DATE'"))
1068 raise Abort(_("invalid day spec, use '>DATE'"))
1070 when = lower(date[1:])
1069 when = lower(date[1:])
1071 return lambda x: x >= when
1070 return lambda x: x >= when
1072 elif date[0] == "-":
1071 elif date[0] == "-":
1073 try:
1072 try:
1074 days = int(date[1:])
1073 days = int(date[1:])
1075 except ValueError:
1074 except ValueError:
1076 raise Abort(_("invalid day spec: %s") % date[1:])
1075 raise Abort(_("invalid day spec: %s") % date[1:])
1077 if days < 0:
1076 if days < 0:
1078 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1077 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1079 % date[1:])
1078 % date[1:])
1080 when = makedate()[0] - days * 3600 * 24
1079 when = makedate()[0] - days * 3600 * 24
1081 return lambda x: x >= when
1080 return lambda x: x >= when
1082 elif " to " in date:
1081 elif " to " in date:
1083 a, b = date.split(" to ")
1082 a, b = date.split(" to ")
1084 start, stop = lower(a), upper(b)
1083 start, stop = lower(a), upper(b)
1085 return lambda x: x >= start and x <= stop
1084 return lambda x: x >= start and x <= stop
1086 else:
1085 else:
1087 start, stop = lower(date), upper(date)
1086 start, stop = lower(date), upper(date)
1088 return lambda x: x >= start and x <= stop
1087 return lambda x: x >= start and x <= stop
1089
1088
1090 def shortuser(user):
1089 def shortuser(user):
1091 """Return a short representation of a user name or email address."""
1090 """Return a short representation of a user name or email address."""
1092 f = user.find('@')
1091 f = user.find('@')
1093 if f >= 0:
1092 if f >= 0:
1094 user = user[:f]
1093 user = user[:f]
1095 f = user.find('<')
1094 f = user.find('<')
1096 if f >= 0:
1095 if f >= 0:
1097 user = user[f + 1:]
1096 user = user[f + 1:]
1098 f = user.find(' ')
1097 f = user.find(' ')
1099 if f >= 0:
1098 if f >= 0:
1100 user = user[:f]
1099 user = user[:f]
1101 f = user.find('.')
1100 f = user.find('.')
1102 if f >= 0:
1101 if f >= 0:
1103 user = user[:f]
1102 user = user[:f]
1104 return user
1103 return user
1105
1104
1106 def email(author):
1105 def email(author):
1107 '''get email of author.'''
1106 '''get email of author.'''
1108 r = author.find('>')
1107 r = author.find('>')
1109 if r == -1:
1108 if r == -1:
1110 r = None
1109 r = None
1111 return author[author.find('<') + 1:r]
1110 return author[author.find('<') + 1:r]
1112
1111
1113 def _ellipsis(text, maxlength):
1112 def _ellipsis(text, maxlength):
1114 if len(text) <= maxlength:
1113 if len(text) <= maxlength:
1115 return text, False
1114 return text, False
1116 else:
1115 else:
1117 return "%s..." % (text[:maxlength - 3]), True
1116 return "%s..." % (text[:maxlength - 3]), True
1118
1117
1119 def ellipsis(text, maxlength=400):
1118 def ellipsis(text, maxlength=400):
1120 """Trim string to at most maxlength (default: 400) characters."""
1119 """Trim string to at most maxlength (default: 400) characters."""
1121 try:
1120 try:
1122 # use unicode not to split at intermediate multi-byte sequence
1121 # use unicode not to split at intermediate multi-byte sequence
1123 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1122 utext, truncated = _ellipsis(text.decode(encoding.encoding),
1124 maxlength)
1123 maxlength)
1125 if not truncated:
1124 if not truncated:
1126 return text
1125 return text
1127 return utext.encode(encoding.encoding)
1126 return utext.encode(encoding.encoding)
1128 except (UnicodeDecodeError, UnicodeEncodeError):
1127 except (UnicodeDecodeError, UnicodeEncodeError):
1129 return _ellipsis(text, maxlength)[0]
1128 return _ellipsis(text, maxlength)[0]
1130
1129
1131 def bytecount(nbytes):
1130 def bytecount(nbytes):
1132 '''return byte count formatted as readable string, with units'''
1131 '''return byte count formatted as readable string, with units'''
1133
1132
1134 units = (
1133 units = (
1135 (100, 1 << 30, _('%.0f GB')),
1134 (100, 1 << 30, _('%.0f GB')),
1136 (10, 1 << 30, _('%.1f GB')),
1135 (10, 1 << 30, _('%.1f GB')),
1137 (1, 1 << 30, _('%.2f GB')),
1136 (1, 1 << 30, _('%.2f GB')),
1138 (100, 1 << 20, _('%.0f MB')),
1137 (100, 1 << 20, _('%.0f MB')),
1139 (10, 1 << 20, _('%.1f MB')),
1138 (10, 1 << 20, _('%.1f MB')),
1140 (1, 1 << 20, _('%.2f MB')),
1139 (1, 1 << 20, _('%.2f MB')),
1141 (100, 1 << 10, _('%.0f KB')),
1140 (100, 1 << 10, _('%.0f KB')),
1142 (10, 1 << 10, _('%.1f KB')),
1141 (10, 1 << 10, _('%.1f KB')),
1143 (1, 1 << 10, _('%.2f KB')),
1142 (1, 1 << 10, _('%.2f KB')),
1144 (1, 1, _('%.0f bytes')),
1143 (1, 1, _('%.0f bytes')),
1145 )
1144 )
1146
1145
1147 for multiplier, divisor, format in units:
1146 for multiplier, divisor, format in units:
1148 if nbytes >= divisor * multiplier:
1147 if nbytes >= divisor * multiplier:
1149 return format % (nbytes / float(divisor))
1148 return format % (nbytes / float(divisor))
1150 return units[-1][2] % nbytes
1149 return units[-1][2] % nbytes
1151
1150
1152 def uirepr(s):
1151 def uirepr(s):
1153 # Avoid double backslash in Windows path repr()
1152 # Avoid double backslash in Windows path repr()
1154 return repr(s).replace('\\\\', '\\')
1153 return repr(s).replace('\\\\', '\\')
1155
1154
1156 # delay import of textwrap
1155 # delay import of textwrap
1157 def MBTextWrapper(**kwargs):
1156 def MBTextWrapper(**kwargs):
1158 class tw(textwrap.TextWrapper):
1157 class tw(textwrap.TextWrapper):
1159 """
1158 """
1160 Extend TextWrapper for double-width characters.
1159 Extend TextWrapper for double-width characters.
1161
1160
1162 Some Asian characters use two terminal columns instead of one.
1161 Some Asian characters use two terminal columns instead of one.
1163 A good example of this behavior can be seen with u'\u65e5\u672c',
1162 A good example of this behavior can be seen with u'\u65e5\u672c',
1164 the two Japanese characters for "Japan":
1163 the two Japanese characters for "Japan":
1165 len() returns 2, but when printed to a terminal, they eat 4 columns.
1164 len() returns 2, but when printed to a terminal, they eat 4 columns.
1166
1165
1167 (Note that this has nothing to do whatsoever with unicode
1166 (Note that this has nothing to do whatsoever with unicode
1168 representation, or encoding of the underlying string)
1167 representation, or encoding of the underlying string)
1169 """
1168 """
1170 def __init__(self, **kwargs):
1169 def __init__(self, **kwargs):
1171 textwrap.TextWrapper.__init__(self, **kwargs)
1170 textwrap.TextWrapper.__init__(self, **kwargs)
1172
1171
1173 def _cutdown(self, ucstr, space_left):
1172 def _cutdown(self, ucstr, space_left):
1174 l = 0
1173 l = 0
1175 colwidth = unicodedata.east_asian_width
1174 colwidth = unicodedata.east_asian_width
1176 for i in xrange(len(ucstr)):
1175 for i in xrange(len(ucstr)):
1177 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1176 l += colwidth(ucstr[i]) in 'WFA' and 2 or 1
1178 if space_left < l:
1177 if space_left < l:
1179 return (ucstr[:i], ucstr[i:])
1178 return (ucstr[:i], ucstr[i:])
1180 return ucstr, ''
1179 return ucstr, ''
1181
1180
1182 # overriding of base class
1181 # overriding of base class
1183 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1182 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1184 space_left = max(width - cur_len, 1)
1183 space_left = max(width - cur_len, 1)
1185
1184
1186 if self.break_long_words:
1185 if self.break_long_words:
1187 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1186 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1188 cur_line.append(cut)
1187 cur_line.append(cut)
1189 reversed_chunks[-1] = res
1188 reversed_chunks[-1] = res
1190 elif not cur_line:
1189 elif not cur_line:
1191 cur_line.append(reversed_chunks.pop())
1190 cur_line.append(reversed_chunks.pop())
1192
1191
1193 global MBTextWrapper
1192 global MBTextWrapper
1194 MBTextWrapper = tw
1193 MBTextWrapper = tw
1195 return tw(**kwargs)
1194 return tw(**kwargs)
1196
1195
1197 def wrap(line, width, initindent='', hangindent=''):
1196 def wrap(line, width, initindent='', hangindent=''):
1198 maxindent = max(len(hangindent), len(initindent))
1197 maxindent = max(len(hangindent), len(initindent))
1199 if width <= maxindent:
1198 if width <= maxindent:
1200 # adjust for weird terminal size
1199 # adjust for weird terminal size
1201 width = max(78, maxindent + 1)
1200 width = max(78, maxindent + 1)
1202 line = line.decode(encoding.encoding, encoding.encodingmode)
1201 line = line.decode(encoding.encoding, encoding.encodingmode)
1203 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1202 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1204 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1203 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1205 wrapper = MBTextWrapper(width=width,
1204 wrapper = MBTextWrapper(width=width,
1206 initial_indent=initindent,
1205 initial_indent=initindent,
1207 subsequent_indent=hangindent)
1206 subsequent_indent=hangindent)
1208 return wrapper.fill(line).encode(encoding.encoding)
1207 return wrapper.fill(line).encode(encoding.encoding)
1209
1208
1210 def iterlines(iterator):
1209 def iterlines(iterator):
1211 for chunk in iterator:
1210 for chunk in iterator:
1212 for line in chunk.splitlines():
1211 for line in chunk.splitlines():
1213 yield line
1212 yield line
1214
1213
1215 def expandpath(path):
1214 def expandpath(path):
1216 return os.path.expanduser(os.path.expandvars(path))
1215 return os.path.expanduser(os.path.expandvars(path))
1217
1216
1218 def hgcmd():
1217 def hgcmd():
1219 """Return the command used to execute current hg
1218 """Return the command used to execute current hg
1220
1219
1221 This is different from hgexecutable() because on Windows we want
1220 This is different from hgexecutable() because on Windows we want
1222 to avoid things opening new shell windows like batch files, so we
1221 to avoid things opening new shell windows like batch files, so we
1223 get either the python call or current executable.
1222 get either the python call or current executable.
1224 """
1223 """
1225 if mainfrozen():
1224 if mainfrozen():
1226 return [sys.executable]
1225 return [sys.executable]
1227 return gethgcmd()
1226 return gethgcmd()
1228
1227
1229 def rundetached(args, condfn):
1228 def rundetached(args, condfn):
1230 """Execute the argument list in a detached process.
1229 """Execute the argument list in a detached process.
1231
1230
1232 condfn is a callable which is called repeatedly and should return
1231 condfn is a callable which is called repeatedly and should return
1233 True once the child process is known to have started successfully.
1232 True once the child process is known to have started successfully.
1234 At this point, the child process PID is returned. If the child
1233 At this point, the child process PID is returned. If the child
1235 process fails to start or finishes before condfn() evaluates to
1234 process fails to start or finishes before condfn() evaluates to
1236 True, return -1.
1235 True, return -1.
1237 """
1236 """
1238 # Windows case is easier because the child process is either
1237 # Windows case is easier because the child process is either
1239 # successfully starting and validating the condition or exiting
1238 # successfully starting and validating the condition or exiting
1240 # on failure. We just poll on its PID. On Unix, if the child
1239 # on failure. We just poll on its PID. On Unix, if the child
1241 # process fails to start, it will be left in a zombie state until
1240 # process fails to start, it will be left in a zombie state until
1242 # the parent wait on it, which we cannot do since we expect a long
1241 # the parent wait on it, which we cannot do since we expect a long
1243 # running process on success. Instead we listen for SIGCHLD telling
1242 # running process on success. Instead we listen for SIGCHLD telling
1244 # us our child process terminated.
1243 # us our child process terminated.
1245 terminated = set()
1244 terminated = set()
1246 def handler(signum, frame):
1245 def handler(signum, frame):
1247 terminated.add(os.wait())
1246 terminated.add(os.wait())
1248 prevhandler = None
1247 prevhandler = None
1249 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1248 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1250 if SIGCHLD is not None:
1249 if SIGCHLD is not None:
1251 prevhandler = signal.signal(SIGCHLD, handler)
1250 prevhandler = signal.signal(SIGCHLD, handler)
1252 try:
1251 try:
1253 pid = spawndetached(args)
1252 pid = spawndetached(args)
1254 while not condfn():
1253 while not condfn():
1255 if ((pid in terminated or not testpid(pid))
1254 if ((pid in terminated or not testpid(pid))
1256 and not condfn()):
1255 and not condfn()):
1257 return -1
1256 return -1
1258 time.sleep(0.1)
1257 time.sleep(0.1)
1259 return pid
1258 return pid
1260 finally:
1259 finally:
1261 if prevhandler is not None:
1260 if prevhandler is not None:
1262 signal.signal(signal.SIGCHLD, prevhandler)
1261 signal.signal(signal.SIGCHLD, prevhandler)
1263
1262
1264 try:
1263 try:
1265 any, all = any, all
1264 any, all = any, all
1266 except NameError:
1265 except NameError:
1267 def any(iterable):
1266 def any(iterable):
1268 for i in iterable:
1267 for i in iterable:
1269 if i:
1268 if i:
1270 return True
1269 return True
1271 return False
1270 return False
1272
1271
1273 def all(iterable):
1272 def all(iterable):
1274 for i in iterable:
1273 for i in iterable:
1275 if not i:
1274 if not i:
1276 return False
1275 return False
1277 return True
1276 return True
1278
1277
1279 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1278 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1280 """Return the result of interpolating items in the mapping into string s.
1279 """Return the result of interpolating items in the mapping into string s.
1281
1280
1282 prefix is a single character string, or a two character string with
1281 prefix is a single character string, or a two character string with
1283 a backslash as the first character if the prefix needs to be escaped in
1282 a backslash as the first character if the prefix needs to be escaped in
1284 a regular expression.
1283 a regular expression.
1285
1284
1286 fn is an optional function that will be applied to the replacement text
1285 fn is an optional function that will be applied to the replacement text
1287 just before replacement.
1286 just before replacement.
1288
1287
1289 escape_prefix is an optional flag that allows using doubled prefix for
1288 escape_prefix is an optional flag that allows using doubled prefix for
1290 its escaping.
1289 its escaping.
1291 """
1290 """
1292 fn = fn or (lambda s: s)
1291 fn = fn or (lambda s: s)
1293 patterns = '|'.join(mapping.keys())
1292 patterns = '|'.join(mapping.keys())
1294 if escape_prefix:
1293 if escape_prefix:
1295 patterns += '|' + prefix
1294 patterns += '|' + prefix
1296 if len(prefix) > 1:
1295 if len(prefix) > 1:
1297 prefix_char = prefix[1:]
1296 prefix_char = prefix[1:]
1298 else:
1297 else:
1299 prefix_char = prefix
1298 prefix_char = prefix
1300 mapping[prefix_char] = prefix_char
1299 mapping[prefix_char] = prefix_char
1301 r = re.compile(r'%s(%s)' % (prefix, patterns))
1300 r = re.compile(r'%s(%s)' % (prefix, patterns))
1302 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1301 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1303
1302
1304 def getport(port):
1303 def getport(port):
1305 """Return the port for a given network service.
1304 """Return the port for a given network service.
1306
1305
1307 If port is an integer, it's returned as is. If it's a string, it's
1306 If port is an integer, it's returned as is. If it's a string, it's
1308 looked up using socket.getservbyname(). If there's no matching
1307 looked up using socket.getservbyname(). If there's no matching
1309 service, util.Abort is raised.
1308 service, util.Abort is raised.
1310 """
1309 """
1311 try:
1310 try:
1312 return int(port)
1311 return int(port)
1313 except ValueError:
1312 except ValueError:
1314 pass
1313 pass
1315
1314
1316 try:
1315 try:
1317 return socket.getservbyname(port)
1316 return socket.getservbyname(port)
1318 except socket.error:
1317 except socket.error:
1319 raise Abort(_("no port number associated with service '%s'") % port)
1318 raise Abort(_("no port number associated with service '%s'") % port)
1320
1319
1321 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1320 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1322 '0': False, 'no': False, 'false': False, 'off': False,
1321 '0': False, 'no': False, 'false': False, 'off': False,
1323 'never': False}
1322 'never': False}
1324
1323
1325 def parsebool(s):
1324 def parsebool(s):
1326 """Parse s into a boolean.
1325 """Parse s into a boolean.
1327
1326
1328 If s is not a valid boolean, returns None.
1327 If s is not a valid boolean, returns None.
1329 """
1328 """
1330 return _booleans.get(s.lower(), None)
1329 return _booleans.get(s.lower(), None)
1331
1330
1332 _hexdig = '0123456789ABCDEFabcdef'
1331 _hexdig = '0123456789ABCDEFabcdef'
1333 _hextochr = dict((a + b, chr(int(a + b, 16)))
1332 _hextochr = dict((a + b, chr(int(a + b, 16)))
1334 for a in _hexdig for b in _hexdig)
1333 for a in _hexdig for b in _hexdig)
1335
1334
1336 def _urlunquote(s):
1335 def _urlunquote(s):
1337 """unquote('abc%20def') -> 'abc def'."""
1336 """unquote('abc%20def') -> 'abc def'."""
1338 res = s.split('%')
1337 res = s.split('%')
1339 # fastpath
1338 # fastpath
1340 if len(res) == 1:
1339 if len(res) == 1:
1341 return s
1340 return s
1342 s = res[0]
1341 s = res[0]
1343 for item in res[1:]:
1342 for item in res[1:]:
1344 try:
1343 try:
1345 s += _hextochr[item[:2]] + item[2:]
1344 s += _hextochr[item[:2]] + item[2:]
1346 except KeyError:
1345 except KeyError:
1347 s += '%' + item
1346 s += '%' + item
1348 except UnicodeDecodeError:
1347 except UnicodeDecodeError:
1349 s += unichr(int(item[:2], 16)) + item[2:]
1348 s += unichr(int(item[:2], 16)) + item[2:]
1350 return s
1349 return s
1351
1350
1352 class url(object):
1351 class url(object):
1353 r"""Reliable URL parser.
1352 r"""Reliable URL parser.
1354
1353
1355 This parses URLs and provides attributes for the following
1354 This parses URLs and provides attributes for the following
1356 components:
1355 components:
1357
1356
1358 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1357 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1359
1358
1360 Missing components are set to None. The only exception is
1359 Missing components are set to None. The only exception is
1361 fragment, which is set to '' if present but empty.
1360 fragment, which is set to '' if present but empty.
1362
1361
1363 If parsefragment is False, fragment is included in query. If
1362 If parsefragment is False, fragment is included in query. If
1364 parsequery is False, query is included in path. If both are
1363 parsequery is False, query is included in path. If both are
1365 False, both fragment and query are included in path.
1364 False, both fragment and query are included in path.
1366
1365
1367 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1366 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1368
1367
1369 Note that for backward compatibility reasons, bundle URLs do not
1368 Note that for backward compatibility reasons, bundle URLs do not
1370 take host names. That means 'bundle://../' has a path of '../'.
1369 take host names. That means 'bundle://../' has a path of '../'.
1371
1370
1372 Examples:
1371 Examples:
1373
1372
1374 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1373 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
1375 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1374 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
1376 >>> url('ssh://[::1]:2200//home/joe/repo')
1375 >>> url('ssh://[::1]:2200//home/joe/repo')
1377 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1376 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
1378 >>> url('file:///home/joe/repo')
1377 >>> url('file:///home/joe/repo')
1379 <url scheme: 'file', path: '/home/joe/repo'>
1378 <url scheme: 'file', path: '/home/joe/repo'>
1380 >>> url('file:///c:/temp/foo/')
1379 >>> url('file:///c:/temp/foo/')
1381 <url scheme: 'file', path: 'c:/temp/foo/'>
1380 <url scheme: 'file', path: 'c:/temp/foo/'>
1382 >>> url('bundle:foo')
1381 >>> url('bundle:foo')
1383 <url scheme: 'bundle', path: 'foo'>
1382 <url scheme: 'bundle', path: 'foo'>
1384 >>> url('bundle://../foo')
1383 >>> url('bundle://../foo')
1385 <url scheme: 'bundle', path: '../foo'>
1384 <url scheme: 'bundle', path: '../foo'>
1386 >>> url(r'c:\foo\bar')
1385 >>> url(r'c:\foo\bar')
1387 <url path: 'c:\\foo\\bar'>
1386 <url path: 'c:\\foo\\bar'>
1388 >>> url(r'\\blah\blah\blah')
1387 >>> url(r'\\blah\blah\blah')
1389 <url path: '\\\\blah\\blah\\blah'>
1388 <url path: '\\\\blah\\blah\\blah'>
1390
1389
1391 Authentication credentials:
1390 Authentication credentials:
1392
1391
1393 >>> url('ssh://joe:xyz@x/repo')
1392 >>> url('ssh://joe:xyz@x/repo')
1394 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1393 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
1395 >>> url('ssh://joe@x/repo')
1394 >>> url('ssh://joe@x/repo')
1396 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1395 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
1397
1396
1398 Query strings and fragments:
1397 Query strings and fragments:
1399
1398
1400 >>> url('http://host/a?b#c')
1399 >>> url('http://host/a?b#c')
1401 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1400 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
1402 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1401 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
1403 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1402 <url scheme: 'http', host: 'host', path: 'a?b#c'>
1404 """
1403 """
1405
1404
1406 _safechars = "!~*'()+"
1405 _safechars = "!~*'()+"
1407 _safepchars = "/!~*'()+"
1406 _safepchars = "/!~*'()+"
1408 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1407 _matchscheme = re.compile(r'^[a-zA-Z0-9+.\-]+:').match
1409
1408
1410 def __init__(self, path, parsequery=True, parsefragment=True):
1409 def __init__(self, path, parsequery=True, parsefragment=True):
1411 # We slowly chomp away at path until we have only the path left
1410 # We slowly chomp away at path until we have only the path left
1412 self.scheme = self.user = self.passwd = self.host = None
1411 self.scheme = self.user = self.passwd = self.host = None
1413 self.port = self.path = self.query = self.fragment = None
1412 self.port = self.path = self.query = self.fragment = None
1414 self._localpath = True
1413 self._localpath = True
1415 self._hostport = ''
1414 self._hostport = ''
1416 self._origpath = path
1415 self._origpath = path
1417
1416
1418 # special case for Windows drive letters and UNC paths
1417 # special case for Windows drive letters and UNC paths
1419 if hasdriveletter(path) or path.startswith(r'\\'):
1418 if hasdriveletter(path) or path.startswith(r'\\'):
1420 self.path = path
1419 self.path = path
1421 return
1420 return
1422
1421
1423 # For compatibility reasons, we can't handle bundle paths as
1422 # For compatibility reasons, we can't handle bundle paths as
1424 # normal URLS
1423 # normal URLS
1425 if path.startswith('bundle:'):
1424 if path.startswith('bundle:'):
1426 self.scheme = 'bundle'
1425 self.scheme = 'bundle'
1427 path = path[7:]
1426 path = path[7:]
1428 if path.startswith('//'):
1427 if path.startswith('//'):
1429 path = path[2:]
1428 path = path[2:]
1430 self.path = path
1429 self.path = path
1431 return
1430 return
1432
1431
1433 if self._matchscheme(path):
1432 if self._matchscheme(path):
1434 parts = path.split(':', 1)
1433 parts = path.split(':', 1)
1435 if parts[0]:
1434 if parts[0]:
1436 self.scheme, path = parts
1435 self.scheme, path = parts
1437 self._localpath = False
1436 self._localpath = False
1438
1437
1439 if not path:
1438 if not path:
1440 path = None
1439 path = None
1441 if self._localpath:
1440 if self._localpath:
1442 self.path = ''
1441 self.path = ''
1443 return
1442 return
1444 else:
1443 else:
1445 if parsefragment and '#' in path:
1444 if parsefragment and '#' in path:
1446 path, self.fragment = path.split('#', 1)
1445 path, self.fragment = path.split('#', 1)
1447 if not path:
1446 if not path:
1448 path = None
1447 path = None
1449 if self._localpath:
1448 if self._localpath:
1450 self.path = path
1449 self.path = path
1451 return
1450 return
1452
1451
1453 if parsequery and '?' in path:
1452 if parsequery and '?' in path:
1454 path, self.query = path.split('?', 1)
1453 path, self.query = path.split('?', 1)
1455 if not path:
1454 if not path:
1456 path = None
1455 path = None
1457 if not self.query:
1456 if not self.query:
1458 self.query = None
1457 self.query = None
1459
1458
1460 # // is required to specify a host/authority
1459 # // is required to specify a host/authority
1461 if path and path.startswith('//'):
1460 if path and path.startswith('//'):
1462 parts = path[2:].split('/', 1)
1461 parts = path[2:].split('/', 1)
1463 if len(parts) > 1:
1462 if len(parts) > 1:
1464 self.host, path = parts
1463 self.host, path = parts
1465 path = path
1464 path = path
1466 else:
1465 else:
1467 self.host = parts[0]
1466 self.host = parts[0]
1468 path = None
1467 path = None
1469 if not self.host:
1468 if not self.host:
1470 self.host = None
1469 self.host = None
1471 # path of file:///d is /d
1470 # path of file:///d is /d
1472 # path of file:///d:/ is d:/, not /d:/
1471 # path of file:///d:/ is d:/, not /d:/
1473 if path and not hasdriveletter(path):
1472 if path and not hasdriveletter(path):
1474 path = '/' + path
1473 path = '/' + path
1475
1474
1476 if self.host and '@' in self.host:
1475 if self.host and '@' in self.host:
1477 self.user, self.host = self.host.rsplit('@', 1)
1476 self.user, self.host = self.host.rsplit('@', 1)
1478 if ':' in self.user:
1477 if ':' in self.user:
1479 self.user, self.passwd = self.user.split(':', 1)
1478 self.user, self.passwd = self.user.split(':', 1)
1480 if not self.host:
1479 if not self.host:
1481 self.host = None
1480 self.host = None
1482
1481
1483 # Don't split on colons in IPv6 addresses without ports
1482 # Don't split on colons in IPv6 addresses without ports
1484 if (self.host and ':' in self.host and
1483 if (self.host and ':' in self.host and
1485 not (self.host.startswith('[') and self.host.endswith(']'))):
1484 not (self.host.startswith('[') and self.host.endswith(']'))):
1486 self._hostport = self.host
1485 self._hostport = self.host
1487 self.host, self.port = self.host.rsplit(':', 1)
1486 self.host, self.port = self.host.rsplit(':', 1)
1488 if not self.host:
1487 if not self.host:
1489 self.host = None
1488 self.host = None
1490
1489
1491 if (self.host and self.scheme == 'file' and
1490 if (self.host and self.scheme == 'file' and
1492 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1491 self.host not in ('localhost', '127.0.0.1', '[::1]')):
1493 raise Abort(_('file:// URLs can only refer to localhost'))
1492 raise Abort(_('file:// URLs can only refer to localhost'))
1494
1493
1495 self.path = path
1494 self.path = path
1496
1495
1497 # leave the query string escaped
1496 # leave the query string escaped
1498 for a in ('user', 'passwd', 'host', 'port',
1497 for a in ('user', 'passwd', 'host', 'port',
1499 'path', 'fragment'):
1498 'path', 'fragment'):
1500 v = getattr(self, a)
1499 v = getattr(self, a)
1501 if v is not None:
1500 if v is not None:
1502 setattr(self, a, _urlunquote(v))
1501 setattr(self, a, _urlunquote(v))
1503
1502
1504 def __repr__(self):
1503 def __repr__(self):
1505 attrs = []
1504 attrs = []
1506 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1505 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
1507 'query', 'fragment'):
1506 'query', 'fragment'):
1508 v = getattr(self, a)
1507 v = getattr(self, a)
1509 if v is not None:
1508 if v is not None:
1510 attrs.append('%s: %r' % (a, v))
1509 attrs.append('%s: %r' % (a, v))
1511 return '<url %s>' % ', '.join(attrs)
1510 return '<url %s>' % ', '.join(attrs)
1512
1511
1513 def __str__(self):
1512 def __str__(self):
1514 r"""Join the URL's components back into a URL string.
1513 r"""Join the URL's components back into a URL string.
1515
1514
1516 Examples:
1515 Examples:
1517
1516
1518 >>> str(url('http://user:pw@host:80/?foo#bar'))
1517 >>> str(url('http://user:pw@host:80/?foo#bar'))
1519 'http://user:pw@host:80/?foo#bar'
1518 'http://user:pw@host:80/?foo#bar'
1520 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1519 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
1521 'http://user:pw@host:80/?foo=bar&baz=42'
1520 'http://user:pw@host:80/?foo=bar&baz=42'
1522 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1521 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
1523 'http://user:pw@host:80/?foo=bar%3dbaz'
1522 'http://user:pw@host:80/?foo=bar%3dbaz'
1524 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1523 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
1525 'ssh://user:pw@[::1]:2200//home/joe#'
1524 'ssh://user:pw@[::1]:2200//home/joe#'
1526 >>> str(url('http://localhost:80//'))
1525 >>> str(url('http://localhost:80//'))
1527 'http://localhost:80//'
1526 'http://localhost:80//'
1528 >>> str(url('http://localhost:80/'))
1527 >>> str(url('http://localhost:80/'))
1529 'http://localhost:80/'
1528 'http://localhost:80/'
1530 >>> str(url('http://localhost:80'))
1529 >>> str(url('http://localhost:80'))
1531 'http://localhost:80/'
1530 'http://localhost:80/'
1532 >>> str(url('bundle:foo'))
1531 >>> str(url('bundle:foo'))
1533 'bundle:foo'
1532 'bundle:foo'
1534 >>> str(url('bundle://../foo'))
1533 >>> str(url('bundle://../foo'))
1535 'bundle:../foo'
1534 'bundle:../foo'
1536 >>> str(url('path'))
1535 >>> str(url('path'))
1537 'path'
1536 'path'
1538 >>> str(url('file:///tmp/foo/bar'))
1537 >>> str(url('file:///tmp/foo/bar'))
1539 'file:///tmp/foo/bar'
1538 'file:///tmp/foo/bar'
1540 >>> print url(r'bundle:foo\bar')
1539 >>> print url(r'bundle:foo\bar')
1541 bundle:foo\bar
1540 bundle:foo\bar
1542 """
1541 """
1543 if self._localpath:
1542 if self._localpath:
1544 s = self.path
1543 s = self.path
1545 if self.scheme == 'bundle':
1544 if self.scheme == 'bundle':
1546 s = 'bundle:' + s
1545 s = 'bundle:' + s
1547 if self.fragment:
1546 if self.fragment:
1548 s += '#' + self.fragment
1547 s += '#' + self.fragment
1549 return s
1548 return s
1550
1549
1551 s = self.scheme + ':'
1550 s = self.scheme + ':'
1552 if self.user or self.passwd or self.host:
1551 if self.user or self.passwd or self.host:
1553 s += '//'
1552 s += '//'
1554 elif self.scheme and (not self.path or self.path.startswith('/')):
1553 elif self.scheme and (not self.path or self.path.startswith('/')):
1555 s += '//'
1554 s += '//'
1556 if self.user:
1555 if self.user:
1557 s += urllib.quote(self.user, safe=self._safechars)
1556 s += urllib.quote(self.user, safe=self._safechars)
1558 if self.passwd:
1557 if self.passwd:
1559 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1558 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
1560 if self.user or self.passwd:
1559 if self.user or self.passwd:
1561 s += '@'
1560 s += '@'
1562 if self.host:
1561 if self.host:
1563 if not (self.host.startswith('[') and self.host.endswith(']')):
1562 if not (self.host.startswith('[') and self.host.endswith(']')):
1564 s += urllib.quote(self.host)
1563 s += urllib.quote(self.host)
1565 else:
1564 else:
1566 s += self.host
1565 s += self.host
1567 if self.port:
1566 if self.port:
1568 s += ':' + urllib.quote(self.port)
1567 s += ':' + urllib.quote(self.port)
1569 if self.host:
1568 if self.host:
1570 s += '/'
1569 s += '/'
1571 if self.path:
1570 if self.path:
1572 # TODO: similar to the query string, we should not unescape the
1571 # TODO: similar to the query string, we should not unescape the
1573 # path when we store it, the path might contain '%2f' = '/',
1572 # path when we store it, the path might contain '%2f' = '/',
1574 # which we should *not* escape.
1573 # which we should *not* escape.
1575 s += urllib.quote(self.path, safe=self._safepchars)
1574 s += urllib.quote(self.path, safe=self._safepchars)
1576 if self.query:
1575 if self.query:
1577 # we store the query in escaped form.
1576 # we store the query in escaped form.
1578 s += '?' + self.query
1577 s += '?' + self.query
1579 if self.fragment is not None:
1578 if self.fragment is not None:
1580 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1579 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
1581 return s
1580 return s
1582
1581
1583 def authinfo(self):
1582 def authinfo(self):
1584 user, passwd = self.user, self.passwd
1583 user, passwd = self.user, self.passwd
1585 try:
1584 try:
1586 self.user, self.passwd = None, None
1585 self.user, self.passwd = None, None
1587 s = str(self)
1586 s = str(self)
1588 finally:
1587 finally:
1589 self.user, self.passwd = user, passwd
1588 self.user, self.passwd = user, passwd
1590 if not self.user:
1589 if not self.user:
1591 return (s, None)
1590 return (s, None)
1592 # authinfo[1] is passed to urllib2 password manager, and its
1591 # authinfo[1] is passed to urllib2 password manager, and its
1593 # URIs must not contain credentials. The host is passed in the
1592 # URIs must not contain credentials. The host is passed in the
1594 # URIs list because Python < 2.4.3 uses only that to search for
1593 # URIs list because Python < 2.4.3 uses only that to search for
1595 # a password.
1594 # a password.
1596 return (s, (None, (s, self.host),
1595 return (s, (None, (s, self.host),
1597 self.user, self.passwd or ''))
1596 self.user, self.passwd or ''))
1598
1597
1599 def isabs(self):
1598 def isabs(self):
1600 if self.scheme and self.scheme != 'file':
1599 if self.scheme and self.scheme != 'file':
1601 return True # remote URL
1600 return True # remote URL
1602 if hasdriveletter(self.path):
1601 if hasdriveletter(self.path):
1603 return True # absolute for our purposes - can't be joined()
1602 return True # absolute for our purposes - can't be joined()
1604 if self.path.startswith(r'\\'):
1603 if self.path.startswith(r'\\'):
1605 return True # Windows UNC path
1604 return True # Windows UNC path
1606 if self.path.startswith('/'):
1605 if self.path.startswith('/'):
1607 return True # POSIX-style
1606 return True # POSIX-style
1608 return False
1607 return False
1609
1608
1610 def localpath(self):
1609 def localpath(self):
1611 if self.scheme == 'file' or self.scheme == 'bundle':
1610 if self.scheme == 'file' or self.scheme == 'bundle':
1612 path = self.path or '/'
1611 path = self.path or '/'
1613 # For Windows, we need to promote hosts containing drive
1612 # For Windows, we need to promote hosts containing drive
1614 # letters to paths with drive letters.
1613 # letters to paths with drive letters.
1615 if hasdriveletter(self._hostport):
1614 if hasdriveletter(self._hostport):
1616 path = self._hostport + '/' + self.path
1615 path = self._hostport + '/' + self.path
1617 elif self.host is not None and self.path:
1616 elif self.host is not None and self.path:
1618 path = '/' + path
1617 path = '/' + path
1619 return path
1618 return path
1620 return self._origpath
1619 return self._origpath
1621
1620
1622 def hasscheme(path):
1621 def hasscheme(path):
1623 return bool(url(path).scheme)
1622 return bool(url(path).scheme)
1624
1623
1625 def hasdriveletter(path):
1624 def hasdriveletter(path):
1626 return path[1:2] == ':' and path[0:1].isalpha()
1625 return path[1:2] == ':' and path[0:1].isalpha()
1627
1626
1628 def urllocalpath(path):
1627 def urllocalpath(path):
1629 return url(path, parsequery=False, parsefragment=False).localpath()
1628 return url(path, parsequery=False, parsefragment=False).localpath()
1630
1629
1631 def hidepassword(u):
1630 def hidepassword(u):
1632 '''hide user credential in a url string'''
1631 '''hide user credential in a url string'''
1633 u = url(u)
1632 u = url(u)
1634 if u.passwd:
1633 if u.passwd:
1635 u.passwd = '***'
1634 u.passwd = '***'
1636 return str(u)
1635 return str(u)
1637
1636
1638 def removeauth(u):
1637 def removeauth(u):
1639 '''remove all authentication information from a url string'''
1638 '''remove all authentication information from a url string'''
1640 u = url(u)
1639 u = url(u)
1641 u.user = u.passwd = None
1640 u.user = u.passwd = None
1642 return str(u)
1641 return str(u)
1643
1642
1644 def isatty(fd):
1643 def isatty(fd):
1645 try:
1644 try:
1646 return fd.isatty()
1645 return fd.isatty()
1647 except AttributeError:
1646 except AttributeError:
1648 return False
1647 return False
@@ -1,49 +1,48 b''
1 import os
1 import os
2 import glob
2 import glob
3 from mercurial.util import atomictempfile
3 from mercurial.util import atomictempfile
4
4
5 # basic usage
5 # basic usage
6 def test1_simple():
6 def test1_simple():
7 if os.path.exists('foo'):
7 if os.path.exists('foo'):
8 os.remove('foo')
8 os.remove('foo')
9 file = atomictempfile('foo')
9 file = atomictempfile('foo')
10 (dir, basename) = os.path.split(file._tempname)
10 (dir, basename) = os.path.split(file._tempname)
11 assert not os.path.isfile('foo')
11 assert not os.path.isfile('foo')
12 assert basename in glob.glob('.foo-*')
12 assert basename in glob.glob('.foo-*')
13
13
14 file.write('argh\n')
14 file.write('argh\n')
15 file.rename()
15 file.close()
16
16
17 assert os.path.isfile('foo')
17 assert os.path.isfile('foo')
18 assert basename not in glob.glob('.foo-*')
18 assert basename not in glob.glob('.foo-*')
19 print 'OK'
19 print 'OK'
20
20
21 # close() removes the temp file but does not make the write
21 # discard() removes the temp file without making the write permanent
22 # permanent -- essentially discards your work (WTF?!)
22 def test2_discard():
23 def test2_close():
24 if os.path.exists('foo'):
23 if os.path.exists('foo'):
25 os.remove('foo')
24 os.remove('foo')
26 file = atomictempfile('foo')
25 file = atomictempfile('foo')
27 (dir, basename) = os.path.split(file._tempname)
26 (dir, basename) = os.path.split(file._tempname)
28
27
29 file.write('yo\n')
28 file.write('yo\n')
30 file.close()
29 file.discard()
31
30
32 assert not os.path.isfile('foo')
31 assert not os.path.isfile('foo')
33 assert basename not in os.listdir('.')
32 assert basename not in os.listdir('.')
34 print 'OK'
33 print 'OK'
35
34
36 # if a programmer screws up and passes bad args to atomictempfile, they
35 # if a programmer screws up and passes bad args to atomictempfile, they
37 # get a plain ordinary TypeError, not infinite recursion
36 # get a plain ordinary TypeError, not infinite recursion
38 def test3_oops():
37 def test3_oops():
39 try:
38 try:
40 file = atomictempfile()
39 file = atomictempfile()
41 except TypeError:
40 except TypeError:
42 print "OK"
41 print "OK"
43 else:
42 else:
44 print "expected TypeError"
43 print "expected TypeError"
45
44
46 if __name__ == '__main__':
45 if __name__ == '__main__':
47 test1_simple()
46 test1_simple()
48 test2_close()
47 test2_discard()
49 test3_oops()
48 test3_oops()
@@ -1,94 +1,94 b''
1 import sys, os, subprocess
1 import sys, os, subprocess
2
2
3 if subprocess.call(['%s/hghave' % os.environ['TESTDIR'], 'cacheable']):
3 if subprocess.call(['%s/hghave' % os.environ['TESTDIR'], 'cacheable']):
4 sys.exit(80)
4 sys.exit(80)
5
5
6 from mercurial import util, scmutil, extensions
6 from mercurial import util, scmutil, extensions
7
7
8 filecache = scmutil.filecache
8 filecache = scmutil.filecache
9
9
10 class fakerepo(object):
10 class fakerepo(object):
11 def __init__(self):
11 def __init__(self):
12 self._filecache = {}
12 self._filecache = {}
13
13
14 def join(self, p):
14 def join(self, p):
15 return p
15 return p
16
16
17 def sjoin(self, p):
17 def sjoin(self, p):
18 return p
18 return p
19
19
20 @filecache('x')
20 @filecache('x')
21 def cached(self):
21 def cached(self):
22 print 'creating'
22 print 'creating'
23
23
24 def invalidate(self):
24 def invalidate(self):
25 for k in self._filecache:
25 for k in self._filecache:
26 try:
26 try:
27 delattr(self, k)
27 delattr(self, k)
28 except AttributeError:
28 except AttributeError:
29 pass
29 pass
30
30
31 def basic(repo):
31 def basic(repo):
32 # file doesn't exist, calls function
32 # file doesn't exist, calls function
33 repo.cached
33 repo.cached
34
34
35 repo.invalidate()
35 repo.invalidate()
36 # file still doesn't exist, uses cache
36 # file still doesn't exist, uses cache
37 repo.cached
37 repo.cached
38
38
39 # create empty file
39 # create empty file
40 f = open('x', 'w')
40 f = open('x', 'w')
41 f.close()
41 f.close()
42 repo.invalidate()
42 repo.invalidate()
43 # should recreate the object
43 # should recreate the object
44 repo.cached
44 repo.cached
45
45
46 f = open('x', 'w')
46 f = open('x', 'w')
47 f.write('a')
47 f.write('a')
48 f.close()
48 f.close()
49 repo.invalidate()
49 repo.invalidate()
50 # should recreate the object
50 # should recreate the object
51 repo.cached
51 repo.cached
52
52
53 repo.invalidate()
53 repo.invalidate()
54 # stats file again, nothing changed, reuses object
54 # stats file again, nothing changed, reuses object
55 repo.cached
55 repo.cached
56
56
57 # atomic replace file, size doesn't change
57 # atomic replace file, size doesn't change
58 # hopefully st_mtime doesn't change as well so this doesn't use the cache
58 # hopefully st_mtime doesn't change as well so this doesn't use the cache
59 # because of inode change
59 # because of inode change
60 f = scmutil.opener('.')('x', 'w', atomictemp=True)
60 f = scmutil.opener('.')('x', 'w', atomictemp=True)
61 f.write('b')
61 f.write('b')
62 f.rename()
62 f.close()
63
63
64 repo.invalidate()
64 repo.invalidate()
65 repo.cached
65 repo.cached
66
66
67 def fakeuncacheable():
67 def fakeuncacheable():
68 def wrapcacheable(orig, *args, **kwargs):
68 def wrapcacheable(orig, *args, **kwargs):
69 return False
69 return False
70
70
71 def wrapinit(orig, *args, **kwargs):
71 def wrapinit(orig, *args, **kwargs):
72 pass
72 pass
73
73
74 originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
74 originit = extensions.wrapfunction(util.cachestat, '__init__', wrapinit)
75 origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
75 origcacheable = extensions.wrapfunction(util.cachestat, 'cacheable',
76 wrapcacheable)
76 wrapcacheable)
77
77
78 try:
78 try:
79 os.remove('x')
79 os.remove('x')
80 except:
80 except:
81 pass
81 pass
82
82
83 basic(fakerepo())
83 basic(fakerepo())
84
84
85 util.cachestat.cacheable = origcacheable
85 util.cachestat.cacheable = origcacheable
86 util.cachestat.__init__ = originit
86 util.cachestat.__init__ = originit
87
87
88 print 'basic:'
88 print 'basic:'
89 print
89 print
90 basic(fakerepo())
90 basic(fakerepo())
91 print
91 print
92 print 'fakeuncacheable:'
92 print 'fakeuncacheable:'
93 print
93 print
94 fakeuncacheable()
94 fakeuncacheable()
General Comments 0
You need to be logged in to leave comments. Login now